Merge branch 'snmp-stats-update'

Eric Dumazet says:

====================
net: snmp: update SNMP methods

In the old days (before linux-3.0), SNMP counters were duplicated,
one set for user context, and anther one for BH context.

After commit 8f0ea0fe3a ("snmp: reduce percpu needs by 50%")
we have a single copy, and what really matters is preemption being
enabled or disabled, since we use this_cpu_inc() or __this_cpu_inc()
respectively.

This patch series kills the obsolete STATS_USER() helpers,
and rename all XXX_BH() helpers to __XXX() ones, to more
closely match conventions used to update per cpu variables.

This is probably going to hurt maintainers job for a while,
since cherry-picks will not be clean, but this had to be
cleaned at one point. I am so sorry guys.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2016-04-27 22:48:25 -04:00
commit 210732d16d
54 changed files with 511 additions and 530 deletions

View file

@ -30,9 +30,9 @@ struct icmp_err {
extern const struct icmp_err icmp_err_convert[]; extern const struct icmp_err icmp_err_convert[];
#define ICMP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmp_statistics, field) #define ICMP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.icmp_statistics, field)
#define ICMP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.icmp_statistics, field) #define __ICMP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.icmp_statistics, field)
#define ICMPMSGOUT_INC_STATS(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field+256) #define ICMPMSGOUT_INC_STATS(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field+256)
#define ICMPMSGIN_INC_STATS_BH(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field) #define ICMPMSGIN_INC_STATS(net, field) SNMP_INC_STATS_ATOMIC_LONG((net)->mib.icmpmsg_statistics, field)
struct dst_entry; struct dst_entry;
struct net_proto_family; struct net_proto_family;

View file

@ -187,17 +187,15 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
unsigned int len); unsigned int len);
#define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field) #define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
#define IP_INC_STATS_BH(net, field) SNMP_INC_STATS64_BH((net)->mib.ip_statistics, field) #define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field)
#define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val) #define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
#define IP_ADD_STATS_BH(net, field, val) SNMP_ADD_STATS64_BH((net)->mib.ip_statistics, field, val) #define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
#define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val) #define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
#define IP_UPD_PO_STATS_BH(net, field, val) SNMP_UPD_PO_STATS64_BH((net)->mib.ip_statistics, field, val) #define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
#define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field) #define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field)
#define NET_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.net_statistics, field) #define __NET_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.net_statistics, field)
#define NET_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->mib.net_statistics, field)
#define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd) #define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
#define NET_ADD_STATS_BH(net, field, adnd) SNMP_ADD_STATS_BH((net)->mib.net_statistics, field, adnd) #define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
#define NET_ADD_STATS_USER(net, field, adnd) SNMP_ADD_STATS_USER((net)->mib.net_statistics, field, adnd)
u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct); u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
unsigned long snmp_fold_field(void __percpu *mib, int offt); unsigned long snmp_fold_field(void __percpu *mib, int offt);

View file

@ -121,21 +121,21 @@ struct frag_hdr {
extern int sysctl_mld_max_msf; extern int sysctl_mld_max_msf;
extern int sysctl_mld_qrv; extern int sysctl_mld_qrv;
#define _DEVINC(net, statname, modifier, idev, field) \ #define _DEVINC(net, statname, mod, idev, field) \
({ \ ({ \
struct inet6_dev *_idev = (idev); \ struct inet6_dev *_idev = (idev); \
if (likely(_idev != NULL)) \ if (likely(_idev != NULL)) \
SNMP_INC_STATS##modifier((_idev)->stats.statname, (field)); \ mod##SNMP_INC_STATS64((_idev)->stats.statname, (field));\
SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\ mod##SNMP_INC_STATS64((net)->mib.statname##_statistics, (field));\
}) })
/* per device counters are atomic_long_t */ /* per device counters are atomic_long_t */
#define _DEVINCATOMIC(net, statname, modifier, idev, field) \ #define _DEVINCATOMIC(net, statname, mod, idev, field) \
({ \ ({ \
struct inet6_dev *_idev = (idev); \ struct inet6_dev *_idev = (idev); \
if (likely(_idev != NULL)) \ if (likely(_idev != NULL)) \
SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \ SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \
SNMP_INC_STATS##modifier((net)->mib.statname##_statistics, (field));\ mod##SNMP_INC_STATS((net)->mib.statname##_statistics, (field));\
}) })
/* per device and per net counters are atomic_long_t */ /* per device and per net counters are atomic_long_t */
@ -147,46 +147,44 @@ extern int sysctl_mld_qrv;
SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\ SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\
}) })
#define _DEVADD(net, statname, modifier, idev, field, val) \ #define _DEVADD(net, statname, mod, idev, field, val) \
({ \ ({ \
struct inet6_dev *_idev = (idev); \ struct inet6_dev *_idev = (idev); \
if (likely(_idev != NULL)) \ if (likely(_idev != NULL)) \
SNMP_ADD_STATS##modifier((_idev)->stats.statname, (field), (val)); \ mod##SNMP_ADD_STATS((_idev)->stats.statname, (field), (val)); \
SNMP_ADD_STATS##modifier((net)->mib.statname##_statistics, (field), (val));\ mod##SNMP_ADD_STATS((net)->mib.statname##_statistics, (field), (val));\
}) })
#define _DEVUPD(net, statname, modifier, idev, field, val) \ #define _DEVUPD(net, statname, mod, idev, field, val) \
({ \ ({ \
struct inet6_dev *_idev = (idev); \ struct inet6_dev *_idev = (idev); \
if (likely(_idev != NULL)) \ if (likely(_idev != NULL)) \
SNMP_UPD_PO_STATS##modifier((_idev)->stats.statname, field, (val)); \ mod##SNMP_UPD_PO_STATS((_idev)->stats.statname, field, (val)); \
SNMP_UPD_PO_STATS##modifier((net)->mib.statname##_statistics, field, (val));\ mod##SNMP_UPD_PO_STATS((net)->mib.statname##_statistics, field, (val));\
}) })
/* MIBs */ /* MIBs */
#define IP6_INC_STATS(net, idev,field) \ #define IP6_INC_STATS(net, idev,field) \
_DEVINC(net, ipv6, 64, idev, field) _DEVINC(net, ipv6, , idev, field)
#define IP6_INC_STATS_BH(net, idev,field) \ #define __IP6_INC_STATS(net, idev,field) \
_DEVINC(net, ipv6, 64_BH, idev, field) _DEVINC(net, ipv6, __, idev, field)
#define IP6_ADD_STATS(net, idev,field,val) \ #define IP6_ADD_STATS(net, idev,field,val) \
_DEVADD(net, ipv6, 64, idev, field, val) _DEVADD(net, ipv6, , idev, field, val)
#define IP6_ADD_STATS_BH(net, idev,field,val) \ #define __IP6_ADD_STATS(net, idev,field,val) \
_DEVADD(net, ipv6, 64_BH, idev, field, val) _DEVADD(net, ipv6, __, idev, field, val)
#define IP6_UPD_PO_STATS(net, idev,field,val) \ #define IP6_UPD_PO_STATS(net, idev,field,val) \
_DEVUPD(net, ipv6, 64, idev, field, val) _DEVUPD(net, ipv6, , idev, field, val)
#define IP6_UPD_PO_STATS_BH(net, idev,field,val) \ #define __IP6_UPD_PO_STATS(net, idev,field,val) \
_DEVUPD(net, ipv6, 64_BH, idev, field, val) _DEVUPD(net, ipv6, __, idev, field, val)
#define ICMP6_INC_STATS(net, idev, field) \ #define ICMP6_INC_STATS(net, idev, field) \
_DEVINCATOMIC(net, icmpv6, , idev, field) _DEVINCATOMIC(net, icmpv6, , idev, field)
#define ICMP6_INC_STATS_BH(net, idev, field) \ #define __ICMP6_INC_STATS(net, idev, field) \
_DEVINCATOMIC(net, icmpv6, _BH, idev, field) _DEVINCATOMIC(net, icmpv6, __, idev, field)
#define ICMP6MSGOUT_INC_STATS(net, idev, field) \ #define ICMP6MSGOUT_INC_STATS(net, idev, field) \
_DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256) _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
#define ICMP6MSGOUT_INC_STATS_BH(net, idev, field) \ #define ICMP6MSGIN_INC_STATS(net, idev, field) \
_DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256)
#define ICMP6MSGIN_INC_STATS_BH(net, idev, field) \
_DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field) _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field)
struct ip6_ra_chain { struct ip6_ra_chain {

View file

@ -205,10 +205,9 @@ extern int sysctl_sctp_wmem[3];
*/ */
/* SCTP SNMP MIB stats handlers */ /* SCTP SNMP MIB stats handlers */
#define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field) #define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
#define SCTP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field) #define __SCTP_INC_STATS(net, field) __SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
#define SCTP_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)->sctp.sctp_statistics, field) #define SCTP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->sctp.sctp_statistics, field)
#define SCTP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->sctp.sctp_statistics, field)
/* sctp mib definitions */ /* sctp mib definitions */
enum { enum {

View file

@ -123,12 +123,9 @@ struct linux_xfrm_mib {
#define DECLARE_SNMP_STAT(type, name) \ #define DECLARE_SNMP_STAT(type, name) \
extern __typeof__(type) __percpu *name extern __typeof__(type) __percpu *name
#define SNMP_INC_STATS_BH(mib, field) \ #define __SNMP_INC_STATS(mib, field) \
__this_cpu_inc(mib->mibs[field]) __this_cpu_inc(mib->mibs[field])
#define SNMP_INC_STATS_USER(mib, field) \
this_cpu_inc(mib->mibs[field])
#define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \ #define SNMP_INC_STATS_ATOMIC_LONG(mib, field) \
atomic_long_inc(&mib->mibs[field]) atomic_long_inc(&mib->mibs[field])
@ -138,12 +135,9 @@ struct linux_xfrm_mib {
#define SNMP_DEC_STATS(mib, field) \ #define SNMP_DEC_STATS(mib, field) \
this_cpu_dec(mib->mibs[field]) this_cpu_dec(mib->mibs[field])
#define SNMP_ADD_STATS_BH(mib, field, addend) \ #define __SNMP_ADD_STATS(mib, field, addend) \
__this_cpu_add(mib->mibs[field], addend) __this_cpu_add(mib->mibs[field], addend)
#define SNMP_ADD_STATS_USER(mib, field, addend) \
this_cpu_add(mib->mibs[field], addend)
#define SNMP_ADD_STATS(mib, field, addend) \ #define SNMP_ADD_STATS(mib, field, addend) \
this_cpu_add(mib->mibs[field], addend) this_cpu_add(mib->mibs[field], addend)
#define SNMP_UPD_PO_STATS(mib, basefield, addend) \ #define SNMP_UPD_PO_STATS(mib, basefield, addend) \
@ -152,7 +146,7 @@ struct linux_xfrm_mib {
this_cpu_inc(ptr[basefield##PKTS]); \ this_cpu_inc(ptr[basefield##PKTS]); \
this_cpu_add(ptr[basefield##OCTETS], addend); \ this_cpu_add(ptr[basefield##OCTETS], addend); \
} while (0) } while (0)
#define SNMP_UPD_PO_STATS_BH(mib, basefield, addend) \ #define __SNMP_UPD_PO_STATS(mib, basefield, addend) \
do { \ do { \
__typeof__((mib->mibs) + 0) ptr = mib->mibs; \ __typeof__((mib->mibs) + 0) ptr = mib->mibs; \
__this_cpu_inc(ptr[basefield##PKTS]); \ __this_cpu_inc(ptr[basefield##PKTS]); \
@ -162,7 +156,7 @@ struct linux_xfrm_mib {
#if BITS_PER_LONG==32 #if BITS_PER_LONG==32
#define SNMP_ADD_STATS64_BH(mib, field, addend) \ #define __SNMP_ADD_STATS64(mib, field, addend) \
do { \ do { \
__typeof__(*mib) *ptr = raw_cpu_ptr(mib); \ __typeof__(*mib) *ptr = raw_cpu_ptr(mib); \
u64_stats_update_begin(&ptr->syncp); \ u64_stats_update_begin(&ptr->syncp); \
@ -170,20 +164,16 @@ struct linux_xfrm_mib {
u64_stats_update_end(&ptr->syncp); \ u64_stats_update_end(&ptr->syncp); \
} while (0) } while (0)
#define SNMP_ADD_STATS64_USER(mib, field, addend) \ #define SNMP_ADD_STATS64(mib, field, addend) \
do { \ do { \
local_bh_disable(); \ preempt_disable(); \
SNMP_ADD_STATS64_BH(mib, field, addend); \ __SNMP_ADD_STATS64(mib, field, addend); \
local_bh_enable(); \ preempt_enable(); \
} while (0) } while (0)
#define SNMP_ADD_STATS64(mib, field, addend) \ #define __SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
SNMP_ADD_STATS64_USER(mib, field, addend)
#define SNMP_INC_STATS64_BH(mib, field) SNMP_ADD_STATS64_BH(mib, field, 1)
#define SNMP_INC_STATS64_USER(mib, field) SNMP_ADD_STATS64_USER(mib, field, 1)
#define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1) #define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \ #define __SNMP_UPD_PO_STATS64(mib, basefield, addend) \
do { \ do { \
__typeof__(*mib) *ptr; \ __typeof__(*mib) *ptr; \
ptr = raw_cpu_ptr((mib)); \ ptr = raw_cpu_ptr((mib)); \
@ -194,20 +184,18 @@ struct linux_xfrm_mib {
} while (0) } while (0)
#define SNMP_UPD_PO_STATS64(mib, basefield, addend) \ #define SNMP_UPD_PO_STATS64(mib, basefield, addend) \
do { \ do { \
local_bh_disable(); \ preempt_disable(); \
SNMP_UPD_PO_STATS64_BH(mib, basefield, addend); \ __SNMP_UPD_PO_STATS64(mib, basefield, addend); \
local_bh_enable(); \ preempt_enable(); \
} while (0) } while (0)
#else #else
#define SNMP_INC_STATS64_BH(mib, field) SNMP_INC_STATS_BH(mib, field) #define __SNMP_INC_STATS64(mib, field) __SNMP_INC_STATS(mib, field)
#define SNMP_INC_STATS64_USER(mib, field) SNMP_INC_STATS_USER(mib, field)
#define SNMP_INC_STATS64(mib, field) SNMP_INC_STATS(mib, field) #define SNMP_INC_STATS64(mib, field) SNMP_INC_STATS(mib, field)
#define SNMP_DEC_STATS64(mib, field) SNMP_DEC_STATS(mib, field) #define SNMP_DEC_STATS64(mib, field) SNMP_DEC_STATS(mib, field)
#define SNMP_ADD_STATS64_BH(mib, field, addend) SNMP_ADD_STATS_BH(mib, field, addend) #define __SNMP_ADD_STATS64(mib, field, addend) __SNMP_ADD_STATS(mib, field, addend)
#define SNMP_ADD_STATS64_USER(mib, field, addend) SNMP_ADD_STATS_USER(mib, field, addend)
#define SNMP_ADD_STATS64(mib, field, addend) SNMP_ADD_STATS(mib, field, addend) #define SNMP_ADD_STATS64(mib, field, addend) SNMP_ADD_STATS(mib, field, addend)
#define SNMP_UPD_PO_STATS64(mib, basefield, addend) SNMP_UPD_PO_STATS(mib, basefield, addend) #define SNMP_UPD_PO_STATS64(mib, basefield, addend) SNMP_UPD_PO_STATS(mib, basefield, addend)
#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) SNMP_UPD_PO_STATS_BH(mib, basefield, addend) #define __SNMP_UPD_PO_STATS64(mib, basefield, addend) __SNMP_UPD_PO_STATS(mib, basefield, addend)
#endif #endif
#endif #endif

View file

@ -332,9 +332,8 @@ bool tcp_check_oom(struct sock *sk, int shift);
extern struct proto tcp_prot; extern struct proto tcp_prot;
#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field) #define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field) #define __TCP_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.tcp_statistics, field)
#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field) #define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val) #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
void tcp_tasklet_init(void); void tcp_tasklet_init(void);
@ -1298,10 +1297,10 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
static inline void tcp_mib_init(struct net *net) static inline void tcp_mib_init(struct net *net)
{ {
/* See RFC 2012 */ /* See RFC 2012 */
TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1); TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ); TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ); TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1); TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
} }
/* from STCP */ /* from STCP */
@ -1744,7 +1743,7 @@ static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
__u16 *mss) __u16 *mss)
{ {
tcp_synq_overflow(sk); tcp_synq_overflow(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
return ops->cookie_init_seq(skb, mss); return ops->cookie_init_seq(skb, mss);
} }
#else #else
@ -1853,7 +1852,7 @@ static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
static inline void tcp_listendrop(const struct sock *sk) static inline void tcp_listendrop(const struct sock *sk)
{ {
atomic_inc(&((struct sock *)sk)->sk_drops); atomic_inc(&((struct sock *)sk)->sk_drops);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
} }
#endif /* _TCP_H */ #endif /* _TCP_H */

View file

@ -289,32 +289,32 @@ struct sock *udp6_lib_lookup_skb(struct sk_buff *skb,
/* /*
* SNMP statistics for UDP and UDP-Lite * SNMP statistics for UDP and UDP-Lite
*/ */
#define UDP_INC_STATS_USER(net, field, is_udplite) do { \ #define UDP_INC_STATS(net, field, is_udplite) do { \
if (is_udplite) SNMP_INC_STATS_USER((net)->mib.udplite_statistics, field); \ if (is_udplite) SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
else SNMP_INC_STATS_USER((net)->mib.udp_statistics, field); } while(0) else SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
#define UDP_INC_STATS_BH(net, field, is_udplite) do { \ #define __UDP_INC_STATS(net, field, is_udplite) do { \
if (is_udplite) SNMP_INC_STATS_BH((net)->mib.udplite_statistics, field); \ if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_statistics, field); \
else SNMP_INC_STATS_BH((net)->mib.udp_statistics, field); } while(0) else __SNMP_INC_STATS((net)->mib.udp_statistics, field); } while(0)
#define UDP6_INC_STATS_BH(net, field, is_udplite) do { \ #define __UDP6_INC_STATS(net, field, is_udplite) do { \
if (is_udplite) SNMP_INC_STATS_BH((net)->mib.udplite_stats_in6, field);\ if (is_udplite) __SNMP_INC_STATS((net)->mib.udplite_stats_in6, field);\
else SNMP_INC_STATS_BH((net)->mib.udp_stats_in6, field); \ else __SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
} while(0) } while(0)
#define UDP6_INC_STATS_USER(net, field, __lite) do { \ #define UDP6_INC_STATS(net, field, __lite) do { \
if (__lite) SNMP_INC_STATS_USER((net)->mib.udplite_stats_in6, field); \ if (__lite) SNMP_INC_STATS((net)->mib.udplite_stats_in6, field); \
else SNMP_INC_STATS_USER((net)->mib.udp_stats_in6, field); \ else SNMP_INC_STATS((net)->mib.udp_stats_in6, field); \
} while(0) } while(0)
#if IS_ENABLED(CONFIG_IPV6) #if IS_ENABLED(CONFIG_IPV6)
#define UDPX_INC_STATS_BH(sk, field) \ #define __UDPX_INC_STATS(sk, field) \
do { \ do { \
if ((sk)->sk_family == AF_INET) \ if ((sk)->sk_family == AF_INET) \
UDP_INC_STATS_BH(sock_net(sk), field, 0); \ __UDP_INC_STATS(sock_net(sk), field, 0); \
else \ else \
UDP6_INC_STATS_BH(sock_net(sk), field, 0); \ __UDP6_INC_STATS(sock_net(sk), field, 0); \
} while (0) } while (0)
#else #else
#define UDPX_INC_STATS_BH(sk, field) UDP_INC_STATS_BH(sock_net(sk), field, 0) #define __UDPX_INC_STATS(sk, field) __UDP_INC_STATS(sock_net(sk), field, 0)
#endif #endif
/* /proc */ /* /proc */

View file

@ -45,12 +45,8 @@
#ifdef CONFIG_XFRM_STATISTICS #ifdef CONFIG_XFRM_STATISTICS
#define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field) #define XFRM_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.xfrm_statistics, field)
#define XFRM_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.xfrm_statistics, field)
#define XFRM_INC_STATS_USER(net, field) SNMP_INC_STATS_USER((net)-mib.xfrm_statistics, field)
#else #else
#define XFRM_INC_STATS(net, field) ((void)(net)) #define XFRM_INC_STATS(net, field) ((void)(net))
#define XFRM_INC_STATS_BH(net, field) ((void)(net))
#define XFRM_INC_STATS_USER(net, field) ((void)(net))
#endif #endif

View file

@ -217,13 +217,13 @@ static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
len = ntohs(iph->tot_len); len = ntohs(iph->tot_len);
if (skb->len < len) { if (skb->len < len) {
IP_INC_STATS_BH(net, IPSTATS_MIB_INTRUNCATEDPKTS); __IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
goto drop; goto drop;
} else if (len < (iph->ihl*4)) } else if (len < (iph->ihl*4))
goto inhdr_error; goto inhdr_error;
if (pskb_trim_rcsum(skb, len)) { if (pskb_trim_rcsum(skb, len)) {
IP_INC_STATS_BH(net, IPSTATS_MIB_INDISCARDS); __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
goto drop; goto drop;
} }
@ -236,7 +236,7 @@ static int br_validate_ipv4(struct net *net, struct sk_buff *skb)
return 0; return 0;
inhdr_error: inhdr_error:
IP_INC_STATS_BH(net, IPSTATS_MIB_INHDRERRORS); __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
drop: drop:
return -1; return -1;
} }

View file

@ -122,13 +122,13 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb)
if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) { if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
if (pkt_len + ip6h_len > skb->len) { if (pkt_len + ip6h_len > skb->len) {
IP6_INC_STATS_BH(net, idev, __IP6_INC_STATS(net, idev,
IPSTATS_MIB_INTRUNCATEDPKTS); IPSTATS_MIB_INTRUNCATEDPKTS);
goto drop; goto drop;
} }
if (pskb_trim_rcsum(skb, pkt_len + ip6h_len)) { if (pskb_trim_rcsum(skb, pkt_len + ip6h_len)) {
IP6_INC_STATS_BH(net, idev, __IP6_INC_STATS(net, idev,
IPSTATS_MIB_INDISCARDS); IPSTATS_MIB_INDISCARDS);
goto drop; goto drop;
} }
} }
@ -142,7 +142,7 @@ int br_validate_ipv6(struct net *net, struct sk_buff *skb)
return 0; return 0;
inhdr_error: inhdr_error:
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS); __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
drop: drop:
return -1; return -1;
} }

View file

@ -4982,8 +4982,8 @@ bool sk_busy_loop(struct sock *sk, int nonblock)
netpoll_poll_unlock(have); netpoll_poll_unlock(have);
} }
if (rc > 0) if (rc > 0)
NET_ADD_STATS_BH(sock_net(sk), __NET_ADD_STATS(sock_net(sk),
LINUX_MIB_BUSYPOLLRXPACKETS, rc); LINUX_MIB_BUSYPOLLRXPACKETS, rc);
local_bh_enable(); local_bh_enable();
if (rc == LL_FLUSH_FAILED) if (rc == LL_FLUSH_FAILED)

View file

@ -198,9 +198,9 @@ struct dccp_mib {
}; };
DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics); DECLARE_SNMP_STAT(struct dccp_mib, dccp_statistics);
#define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field) #define DCCP_INC_STATS(field) SNMP_INC_STATS(dccp_statistics, field)
#define DCCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(dccp_statistics, field) #define __DCCP_INC_STATS(field) __SNMP_INC_STATS(dccp_statistics, field)
#define DCCP_DEC_STATS(field) SNMP_DEC_STATS(dccp_statistics, field) #define DCCP_DEC_STATS(field) SNMP_DEC_STATS(dccp_statistics, field)
/* /*
* Checksumming routines * Checksumming routines

View file

@ -359,7 +359,7 @@ send_sync:
goto discard; goto discard;
} }
DCCP_INC_STATS_BH(DCCP_MIB_INERRS); __DCCP_INC_STATS(DCCP_MIB_INERRS);
discard: discard:
__kfree_skb(skb); __kfree_skb(skb);
return 0; return 0;

View file

@ -205,7 +205,7 @@ void dccp_req_err(struct sock *sk, u64 seq)
* socket here. * socket here.
*/ */
if (!between48(seq, dccp_rsk(req)->dreq_iss, dccp_rsk(req)->dreq_gss)) { if (!between48(seq, dccp_rsk(req)->dreq_iss, dccp_rsk(req)->dreq_gss)) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
} else { } else {
/* /*
* Still in RESPOND, just remove it silently. * Still in RESPOND, just remove it silently.
@ -247,7 +247,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
if (skb->len < offset + sizeof(*dh) || if (skb->len < offset + sizeof(*dh) ||
skb->len < offset + __dccp_basic_hdr_len(dh)) { skb->len < offset + __dccp_basic_hdr_len(dh)) {
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
return; return;
} }
@ -256,7 +256,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
iph->saddr, ntohs(dh->dccph_sport), iph->saddr, ntohs(dh->dccph_sport),
inet_iif(skb)); inet_iif(skb));
if (!sk) { if (!sk) {
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
return; return;
} }
@ -273,7 +273,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
* servers this needs to be solved differently. * servers this needs to be solved differently.
*/ */
if (sock_owned_by_user(sk)) if (sock_owned_by_user(sk))
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
if (sk->sk_state == DCCP_CLOSED) if (sk->sk_state == DCCP_CLOSED)
goto out; goto out;
@ -281,7 +281,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
dp = dccp_sk(sk); dp = dccp_sk(sk);
if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
!between48(seq, dp->dccps_awl, dp->dccps_awh)) { !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out; goto out;
} }
@ -318,7 +318,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
case DCCP_REQUESTING: case DCCP_REQUESTING:
case DCCP_RESPOND: case DCCP_RESPOND:
if (!sock_owned_by_user(sk)) { if (!sock_owned_by_user(sk)) {
DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); __DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
sk->sk_err = err; sk->sk_err = err;
sk->sk_error_report(sk); sk->sk_error_report(sk);
@ -431,11 +431,11 @@ struct sock *dccp_v4_request_recv_sock(const struct sock *sk,
return newsk; return newsk;
exit_overflow: exit_overflow:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
exit_nonewsk: exit_nonewsk:
dst_release(dst); dst_release(dst);
exit: exit:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
return NULL; return NULL;
put_and_exit: put_and_exit:
inet_csk_prepare_forced_close(newsk); inet_csk_prepare_forced_close(newsk);
@ -462,7 +462,7 @@ static struct dst_entry* dccp_v4_route_skb(struct net *net, struct sock *sk,
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
rt = ip_route_output_flow(net, &fl4, sk); rt = ip_route_output_flow(net, &fl4, sk);
if (IS_ERR(rt)) { if (IS_ERR(rt)) {
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
return NULL; return NULL;
} }
@ -533,8 +533,8 @@ static void dccp_v4_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
bh_unlock_sock(ctl_sk); bh_unlock_sock(ctl_sk);
if (net_xmit_eval(err) == 0) { if (net_xmit_eval(err) == 0) {
DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); __DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); __DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
} }
out: out:
dst_release(dst); dst_release(dst);
@ -637,7 +637,7 @@ int dccp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
drop_and_free: drop_and_free:
reqsk_free(req); reqsk_free(req);
drop: drop:
DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); __DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
return -1; return -1;
} }
EXPORT_SYMBOL_GPL(dccp_v4_conn_request); EXPORT_SYMBOL_GPL(dccp_v4_conn_request);

View file

@ -80,8 +80,8 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (skb->len < offset + sizeof(*dh) || if (skb->len < offset + sizeof(*dh) ||
skb->len < offset + __dccp_basic_hdr_len(dh)) { skb->len < offset + __dccp_basic_hdr_len(dh)) {
ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
ICMP6_MIB_INERRORS); ICMP6_MIB_INERRORS);
return; return;
} }
@ -91,8 +91,8 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
inet6_iif(skb)); inet6_iif(skb));
if (!sk) { if (!sk) {
ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
ICMP6_MIB_INERRORS); ICMP6_MIB_INERRORS);
return; return;
} }
@ -106,7 +106,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
bh_lock_sock(sk); bh_lock_sock(sk);
if (sock_owned_by_user(sk)) if (sock_owned_by_user(sk))
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
if (sk->sk_state == DCCP_CLOSED) if (sk->sk_state == DCCP_CLOSED)
goto out; goto out;
@ -114,7 +114,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
dp = dccp_sk(sk); dp = dccp_sk(sk);
if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) && if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_LISTEN) &&
!between48(seq, dp->dccps_awl, dp->dccps_awh)) { !between48(seq, dp->dccps_awl, dp->dccps_awh)) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out; goto out;
} }
@ -156,7 +156,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
case DCCP_RESPOND: /* Cannot happen. case DCCP_RESPOND: /* Cannot happen.
It can, it SYNs are crossed. --ANK */ It can, it SYNs are crossed. --ANK */
if (!sock_owned_by_user(sk)) { if (!sock_owned_by_user(sk)) {
DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); __DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
sk->sk_err = err; sk->sk_err = err;
/* /*
* Wake people up to see the error * Wake people up to see the error
@ -277,8 +277,8 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
if (!IS_ERR(dst)) { if (!IS_ERR(dst)) {
skb_dst_set(skb, dst); skb_dst_set(skb, dst);
ip6_xmit(ctl_sk, skb, &fl6, NULL, 0); ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
DCCP_INC_STATS_BH(DCCP_MIB_OUTSEGS); __DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
DCCP_INC_STATS_BH(DCCP_MIB_OUTRSTS); __DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
return; return;
} }
@ -378,7 +378,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
drop_and_free: drop_and_free:
reqsk_free(req); reqsk_free(req);
drop: drop:
DCCP_INC_STATS_BH(DCCP_MIB_ATTEMPTFAILS); __DCCP_INC_STATS(DCCP_MIB_ATTEMPTFAILS);
return -1; return -1;
} }
@ -527,11 +527,11 @@ static struct sock *dccp_v6_request_recv_sock(const struct sock *sk,
return newsk; return newsk;
out_overflow: out_overflow:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
out_nonewsk: out_nonewsk:
dst_release(dst); dst_release(dst);
out: out:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
return NULL; return NULL;
} }

View file

@ -127,7 +127,7 @@ struct sock *dccp_create_openreq_child(const struct sock *sk,
} }
dccp_init_xmit_timers(newsk); dccp_init_xmit_timers(newsk);
DCCP_INC_STATS_BH(DCCP_MIB_PASSIVEOPENS); __DCCP_INC_STATS(DCCP_MIB_PASSIVEOPENS);
} }
return newsk; return newsk;
} }

View file

@ -253,7 +253,7 @@ out_nonsensical_length:
return 0; return 0;
out_invalid_option: out_invalid_option:
DCCP_INC_STATS_BH(DCCP_MIB_INVALIDOPT); __DCCP_INC_STATS(DCCP_MIB_INVALIDOPT);
rc = DCCP_RESET_CODE_OPTION_ERROR; rc = DCCP_RESET_CODE_OPTION_ERROR;
out_featneg_failed: out_featneg_failed:
DCCP_WARN("DCCP(%p): Option %d (len=%d) error=%u\n", sk, opt, len, rc); DCCP_WARN("DCCP(%p): Option %d (len=%d) error=%u\n", sk, opt, len, rc);

View file

@ -28,7 +28,7 @@ static void dccp_write_err(struct sock *sk)
dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED); dccp_send_reset(sk, DCCP_RESET_CODE_ABORTED);
dccp_done(sk); dccp_done(sk);
DCCP_INC_STATS_BH(DCCP_MIB_ABORTONTIMEOUT); __DCCP_INC_STATS(DCCP_MIB_ABORTONTIMEOUT);
} }
/* A write timeout has occurred. Process the after effects. */ /* A write timeout has occurred. Process the after effects. */
@ -100,7 +100,7 @@ static void dccp_retransmit_timer(struct sock *sk)
* total number of retransmissions of clones of original packets. * total number of retransmissions of clones of original packets.
*/ */
if (icsk->icsk_retransmits == 0) if (icsk->icsk_retransmits == 0)
DCCP_INC_STATS_BH(DCCP_MIB_TIMEOUTS); __DCCP_INC_STATS(DCCP_MIB_TIMEOUTS);
if (dccp_retransmit_skb(sk) != 0) { if (dccp_retransmit_skb(sk) != 0) {
/* /*
@ -179,7 +179,7 @@ static void dccp_delack_timer(unsigned long data)
if (sock_owned_by_user(sk)) { if (sock_owned_by_user(sk)) {
/* Try again later. */ /* Try again later. */
icsk->icsk_ack.blocked = 1; icsk->icsk_ack.blocked = 1;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
sk_reset_timer(sk, &icsk->icsk_delack_timer, sk_reset_timer(sk, &icsk->icsk_delack_timer,
jiffies + TCP_DELACK_MIN); jiffies + TCP_DELACK_MIN);
goto out; goto out;
@ -209,7 +209,7 @@ static void dccp_delack_timer(unsigned long data)
icsk->icsk_ack.ato = TCP_ATO_MIN; icsk->icsk_ack.ato = TCP_ATO_MIN;
} }
dccp_send_ack(sk); dccp_send_ack(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
} }
out: out:
bh_unlock_sock(sk); bh_unlock_sock(sk);

View file

@ -436,7 +436,7 @@ static int arp_filter(__be32 sip, __be32 tip, struct net_device *dev)
if (IS_ERR(rt)) if (IS_ERR(rt))
return 1; return 1;
if (rt->dst.dev != dev) { if (rt->dst.dev != dev) {
NET_INC_STATS_BH(net, LINUX_MIB_ARPFILTER); __NET_INC_STATS(net, LINUX_MIB_ARPFILTER);
flag = 1; flag = 1;
} }
ip_rt_put(rt); ip_rt_put(rt);

View file

@ -363,7 +363,7 @@ static void icmp_push_reply(struct icmp_bxm *icmp_param,
icmp_param->data_len+icmp_param->head_len, icmp_param->data_len+icmp_param->head_len,
icmp_param->head_len, icmp_param->head_len,
ipc, rt, MSG_DONTWAIT) < 0) { ipc, rt, MSG_DONTWAIT) < 0) {
ICMP_INC_STATS_BH(sock_net(sk), ICMP_MIB_OUTERRORS); __ICMP_INC_STATS(sock_net(sk), ICMP_MIB_OUTERRORS);
ip_flush_pending_frames(sk); ip_flush_pending_frames(sk);
} else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { } else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
struct icmphdr *icmph = icmp_hdr(skb); struct icmphdr *icmph = icmp_hdr(skb);
@ -744,7 +744,7 @@ static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
* avoid additional coding at protocol handlers. * avoid additional coding at protocol handlers.
*/ */
if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) { if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) {
ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS); __ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
return; return;
} }
@ -865,7 +865,7 @@ static bool icmp_unreach(struct sk_buff *skb)
out: out:
return true; return true;
out_err: out_err:
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
return false; return false;
} }
@ -877,7 +877,7 @@ out_err:
static bool icmp_redirect(struct sk_buff *skb) static bool icmp_redirect(struct sk_buff *skb)
{ {
if (skb->len < sizeof(struct iphdr)) { if (skb->len < sizeof(struct iphdr)) {
ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS); __ICMP_INC_STATS(dev_net(skb->dev), ICMP_MIB_INERRORS);
return false; return false;
} }
@ -956,7 +956,7 @@ static bool icmp_timestamp(struct sk_buff *skb)
return true; return true;
out_err: out_err:
ICMP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS); __ICMP_INC_STATS(dev_net(skb_dst(skb)->dev), ICMP_MIB_INERRORS);
return false; return false;
} }
@ -996,7 +996,7 @@ int icmp_rcv(struct sk_buff *skb)
skb_set_network_header(skb, nh); skb_set_network_header(skb, nh);
} }
ICMP_INC_STATS_BH(net, ICMP_MIB_INMSGS); __ICMP_INC_STATS(net, ICMP_MIB_INMSGS);
if (skb_checksum_simple_validate(skb)) if (skb_checksum_simple_validate(skb))
goto csum_error; goto csum_error;
@ -1006,7 +1006,7 @@ int icmp_rcv(struct sk_buff *skb)
icmph = icmp_hdr(skb); icmph = icmp_hdr(skb);
ICMPMSGIN_INC_STATS_BH(net, icmph->type); ICMPMSGIN_INC_STATS(net, icmph->type);
/* /*
* 18 is the highest 'known' ICMP type. Anything else is a mystery * 18 is the highest 'known' ICMP type. Anything else is a mystery
* *
@ -1052,9 +1052,9 @@ drop:
kfree_skb(skb); kfree_skb(skb);
return 0; return 0;
csum_error: csum_error:
ICMP_INC_STATS_BH(net, ICMP_MIB_CSUMERRORS); __ICMP_INC_STATS(net, ICMP_MIB_CSUMERRORS);
error: error:
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
goto drop; goto drop;
} }

View file

@ -427,7 +427,7 @@ struct dst_entry *inet_csk_route_req(const struct sock *sk,
route_err: route_err:
ip_rt_put(rt); ip_rt_put(rt);
no_route: no_route:
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
return NULL; return NULL;
} }
EXPORT_SYMBOL_GPL(inet_csk_route_req); EXPORT_SYMBOL_GPL(inet_csk_route_req);
@ -466,7 +466,7 @@ route_err:
ip_rt_put(rt); ip_rt_put(rt);
no_route: no_route:
rcu_read_unlock(); rcu_read_unlock();
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTNOROUTES); __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
return NULL; return NULL;
} }
EXPORT_SYMBOL_GPL(inet_csk_route_child_sock); EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);

View file

@ -360,7 +360,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
__sk_nulls_add_node_rcu(sk, &head->chain); __sk_nulls_add_node_rcu(sk, &head->chain);
if (tw) { if (tw) {
sk_nulls_del_node_init_rcu((struct sock *)tw); sk_nulls_del_node_init_rcu((struct sock *)tw);
NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED); __NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED);
} }
spin_unlock(lock); spin_unlock(lock);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);

View file

@ -147,9 +147,9 @@ static void tw_timer_handler(unsigned long data)
struct inet_timewait_sock *tw = (struct inet_timewait_sock *)data; struct inet_timewait_sock *tw = (struct inet_timewait_sock *)data;
if (tw->tw_kill) if (tw->tw_kill)
NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED); __NET_INC_STATS(twsk_net(tw), LINUX_MIB_TIMEWAITKILLED);
else else
NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_TIMEWAITED); __NET_INC_STATS(twsk_net(tw), LINUX_MIB_TIMEWAITED);
inet_twsk_kill(tw); inet_twsk_kill(tw);
} }

View file

@ -65,8 +65,8 @@ static int ip_forward_finish(struct net *net, struct sock *sk, struct sk_buff *s
{ {
struct ip_options *opt = &(IPCB(skb)->opt); struct ip_options *opt = &(IPCB(skb)->opt);
IP_INC_STATS_BH(net, IPSTATS_MIB_OUTFORWDATAGRAMS); __IP_INC_STATS(net, IPSTATS_MIB_OUTFORWDATAGRAMS);
IP_ADD_STATS_BH(net, IPSTATS_MIB_OUTOCTETS, skb->len); __IP_ADD_STATS(net, IPSTATS_MIB_OUTOCTETS, skb->len);
if (unlikely(opt->optlen)) if (unlikely(opt->optlen))
ip_forward_options(skb); ip_forward_options(skb);
@ -157,7 +157,7 @@ sr_failed:
too_many_hops: too_many_hops:
/* Tell the sender its packet died... */ /* Tell the sender its packet died... */
IP_INC_STATS_BH(net, IPSTATS_MIB_INHDRERRORS); __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0); icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
drop: drop:
kfree_skb(skb); kfree_skb(skb);

View file

@ -204,14 +204,14 @@ static void ip_expire(unsigned long arg)
goto out; goto out;
ipq_kill(qp); ipq_kill(qp);
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
if (!inet_frag_evicting(&qp->q)) { if (!inet_frag_evicting(&qp->q)) {
struct sk_buff *head = qp->q.fragments; struct sk_buff *head = qp->q.fragments;
const struct iphdr *iph; const struct iphdr *iph;
int err; int err;
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT); __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments) if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
goto out; goto out;
@ -291,7 +291,7 @@ static int ip_frag_too_far(struct ipq *qp)
struct net *net; struct net *net;
net = container_of(qp->q.net, struct net, ipv4.frags); net = container_of(qp->q.net, struct net, ipv4.frags);
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
} }
return rc; return rc;
@ -635,7 +635,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
ip_send_check(iph); ip_send_check(iph);
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); __IP_INC_STATS(net, IPSTATS_MIB_REASMOKS);
qp->q.fragments = NULL; qp->q.fragments = NULL;
qp->q.fragments_tail = NULL; qp->q.fragments_tail = NULL;
return 0; return 0;
@ -647,7 +647,7 @@ out_nomem:
out_oversize: out_oversize:
net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr); net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
out_fail: out_fail:
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
return err; return err;
} }
@ -658,7 +658,7 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
int vif = l3mdev_master_ifindex_rcu(dev); int vif = l3mdev_master_ifindex_rcu(dev);
struct ipq *qp; struct ipq *qp;
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); __IP_INC_STATS(net, IPSTATS_MIB_REASMREQDS);
skb_orphan(skb); skb_orphan(skb);
/* Lookup (or create) queue header */ /* Lookup (or create) queue header */
@ -675,7 +675,7 @@ int ip_defrag(struct net *net, struct sk_buff *skb, u32 user)
return ret; return ret;
} }
IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
kfree_skb(skb); kfree_skb(skb);
return -ENOMEM; return -ENOMEM;
} }

View file

@ -218,17 +218,17 @@ static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_b
protocol = -ret; protocol = -ret;
goto resubmit; goto resubmit;
} }
IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS); __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
} else { } else {
if (!raw) { if (!raw) {
if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
IP_INC_STATS_BH(net, IPSTATS_MIB_INUNKNOWNPROTOS); __IP_INC_STATS(net, IPSTATS_MIB_INUNKNOWNPROTOS);
icmp_send(skb, ICMP_DEST_UNREACH, icmp_send(skb, ICMP_DEST_UNREACH,
ICMP_PROT_UNREACH, 0); ICMP_PROT_UNREACH, 0);
} }
kfree_skb(skb); kfree_skb(skb);
} else { } else {
IP_INC_STATS_BH(net, IPSTATS_MIB_INDELIVERS); __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
consume_skb(skb); consume_skb(skb);
} }
} }
@ -273,7 +273,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
--ANK (980813) --ANK (980813)
*/ */
if (skb_cow(skb, skb_headroom(skb))) { if (skb_cow(skb, skb_headroom(skb))) {
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INDISCARDS); __IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INDISCARDS);
goto drop; goto drop;
} }
@ -282,7 +282,7 @@ static inline bool ip_rcv_options(struct sk_buff *skb)
opt->optlen = iph->ihl*4 - sizeof(struct iphdr); opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
if (ip_options_compile(dev_net(dev), opt, skb)) { if (ip_options_compile(dev_net(dev), opt, skb)) {
IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_INHDRERRORS); __IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
goto drop; goto drop;
} }
@ -337,7 +337,7 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
iph->tos, skb->dev); iph->tos, skb->dev);
if (unlikely(err)) { if (unlikely(err)) {
if (err == -EXDEV) if (err == -EXDEV)
NET_INC_STATS_BH(net, LINUX_MIB_IPRPFILTER); __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
goto drop; goto drop;
} }
} }
@ -358,9 +358,9 @@ static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
rt = skb_rtable(skb); rt = skb_rtable(skb);
if (rt->rt_type == RTN_MULTICAST) { if (rt->rt_type == RTN_MULTICAST) {
IP_UPD_PO_STATS_BH(net, IPSTATS_MIB_INMCAST, skb->len); __IP_UPD_PO_STATS(net, IPSTATS_MIB_INMCAST, skb->len);
} else if (rt->rt_type == RTN_BROADCAST) { } else if (rt->rt_type == RTN_BROADCAST) {
IP_UPD_PO_STATS_BH(net, IPSTATS_MIB_INBCAST, skb->len); __IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len);
} else if (skb->pkt_type == PACKET_BROADCAST || } else if (skb->pkt_type == PACKET_BROADCAST ||
skb->pkt_type == PACKET_MULTICAST) { skb->pkt_type == PACKET_MULTICAST) {
struct in_device *in_dev = __in_dev_get_rcu(skb->dev); struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
@ -409,11 +409,11 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
net = dev_net(dev); net = dev_net(dev);
IP_UPD_PO_STATS_BH(net, IPSTATS_MIB_IN, skb->len); __IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len);
skb = skb_share_check(skb, GFP_ATOMIC); skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb) { if (!skb) {
IP_INC_STATS_BH(net, IPSTATS_MIB_INDISCARDS); __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
goto out; goto out;
} }
@ -439,9 +439,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1); BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1);
BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0); BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0);
BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE); BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE);
IP_ADD_STATS_BH(net, __IP_ADD_STATS(net,
IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK), IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs)); max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
if (!pskb_may_pull(skb, iph->ihl*4)) if (!pskb_may_pull(skb, iph->ihl*4))
goto inhdr_error; goto inhdr_error;
@ -453,7 +453,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
len = ntohs(iph->tot_len); len = ntohs(iph->tot_len);
if (skb->len < len) { if (skb->len < len) {
IP_INC_STATS_BH(net, IPSTATS_MIB_INTRUNCATEDPKTS); __IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
goto drop; goto drop;
} else if (len < (iph->ihl*4)) } else if (len < (iph->ihl*4))
goto inhdr_error; goto inhdr_error;
@ -463,7 +463,7 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
* Note this now means skb->len holds ntohs(iph->tot_len). * Note this now means skb->len holds ntohs(iph->tot_len).
*/ */
if (pskb_trim_rcsum(skb, len)) { if (pskb_trim_rcsum(skb, len)) {
IP_INC_STATS_BH(net, IPSTATS_MIB_INDISCARDS); __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
goto drop; goto drop;
} }
@ -480,9 +480,9 @@ int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
ip_rcv_finish); ip_rcv_finish);
csum_error: csum_error:
IP_INC_STATS_BH(net, IPSTATS_MIB_CSUMERRORS); __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
inhdr_error: inhdr_error:
IP_INC_STATS_BH(net, IPSTATS_MIB_INHDRERRORS); __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
drop: drop:
kfree_skb(skb); kfree_skb(skb);
out: out:

View file

@ -915,11 +915,11 @@ static int ip_error(struct sk_buff *skb)
if (!IN_DEV_FORWARD(in_dev)) { if (!IN_DEV_FORWARD(in_dev)) {
switch (rt->dst.error) { switch (rt->dst.error) {
case EHOSTUNREACH: case EHOSTUNREACH:
IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS); __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
break; break;
case ENETUNREACH: case ENETUNREACH:
IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES); __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
break; break;
} }
goto out; goto out;
@ -934,7 +934,7 @@ static int ip_error(struct sk_buff *skb)
break; break;
case ENETUNREACH: case ENETUNREACH:
code = ICMP_NET_UNREACH; code = ICMP_NET_UNREACH;
IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES); __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
break; break;
case EACCES: case EACCES:
code = ICMP_PKT_FILTERED; code = ICMP_PKT_FILTERED;

View file

@ -312,11 +312,11 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
mss = __cookie_v4_check(ip_hdr(skb), th, cookie); mss = __cookie_v4_check(ip_hdr(skb), th, cookie);
if (mss == 0) { if (mss == 0) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
goto out; goto out;
} }
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
/* check for timestamp cookie support */ /* check for timestamp cookie support */
memset(&tcp_opt, 0, sizeof(tcp_opt)); memset(&tcp_opt, 0, sizeof(tcp_opt));

View file

@ -1443,7 +1443,7 @@ static void tcp_prequeue_process(struct sock *sk)
struct sk_buff *skb; struct sk_buff *skb;
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
/* RX process wants to run with disabled BHs, though it is not /* RX process wants to run with disabled BHs, though it is not
* necessary */ * necessary */
@ -1777,7 +1777,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
chunk = len - tp->ucopy.len; chunk = len - tp->ucopy.len;
if (chunk != 0) { if (chunk != 0) {
NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
len -= chunk; len -= chunk;
copied += chunk; copied += chunk;
} }
@ -1789,7 +1789,7 @@ do_prequeue:
chunk = len - tp->ucopy.len; chunk = len - tp->ucopy.len;
if (chunk != 0) { if (chunk != 0) {
NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
len -= chunk; len -= chunk;
copied += chunk; copied += chunk;
} }
@ -1875,7 +1875,7 @@ skip_copy:
tcp_prequeue_process(sk); tcp_prequeue_process(sk);
if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
len -= chunk; len -= chunk;
copied += chunk; copied += chunk;
} }
@ -2065,13 +2065,13 @@ void tcp_close(struct sock *sk, long timeout)
sk->sk_prot->disconnect(sk, 0); sk->sk_prot->disconnect(sk, 0);
} else if (data_was_unread) { } else if (data_was_unread) {
/* Unread data was tossed, zap the connection. */ /* Unread data was tossed, zap the connection. */
NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
tcp_set_state(sk, TCP_CLOSE); tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, sk->sk_allocation); tcp_send_active_reset(sk, sk->sk_allocation);
} else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
/* Check zero linger _after_ checking for unread data. */ /* Check zero linger _after_ checking for unread data. */
sk->sk_prot->disconnect(sk, 0); sk->sk_prot->disconnect(sk, 0);
NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
} else if (tcp_close_state(sk)) { } else if (tcp_close_state(sk)) {
/* We FIN if the application ate all the data before /* We FIN if the application ate all the data before
* zapping the connection. * zapping the connection.
@ -2148,7 +2148,7 @@ adjudge_to_death:
if (tp->linger2 < 0) { if (tp->linger2 < 0) {
tcp_set_state(sk, TCP_CLOSE); tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_ATOMIC); tcp_send_active_reset(sk, GFP_ATOMIC);
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPABORTONLINGER); LINUX_MIB_TCPABORTONLINGER);
} else { } else {
const int tmo = tcp_fin_time(sk); const int tmo = tcp_fin_time(sk);
@ -2167,7 +2167,7 @@ adjudge_to_death:
if (tcp_check_oom(sk, 0)) { if (tcp_check_oom(sk, 0)) {
tcp_set_state(sk, TCP_CLOSE); tcp_set_state(sk, TCP_CLOSE);
tcp_send_active_reset(sk, GFP_ATOMIC); tcp_send_active_reset(sk, GFP_ATOMIC);
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPABORTONMEMORY); LINUX_MIB_TCPABORTONMEMORY);
} }
} }
@ -3091,7 +3091,7 @@ void tcp_done(struct sock *sk)
struct request_sock *req = tcp_sk(sk)->fastopen_rsk; struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
tcp_set_state(sk, TCP_CLOSE); tcp_set_state(sk, TCP_CLOSE);
tcp_clear_xmit_timers(sk); tcp_clear_xmit_timers(sk);

View file

@ -155,11 +155,11 @@ static void tcp_cdg_hystart_update(struct sock *sk)
ca->last_ack = now_us; ca->last_ack = now_us;
if (after(now_us, ca->round_start + base_owd)) { if (after(now_us, ca->round_start + base_owd)) {
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTTRAINDETECT); LINUX_MIB_TCPHYSTARTTRAINDETECT);
NET_ADD_STATS_BH(sock_net(sk), __NET_ADD_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTTRAINCWND, LINUX_MIB_TCPHYSTARTTRAINCWND,
tp->snd_cwnd); tp->snd_cwnd);
tp->snd_ssthresh = tp->snd_cwnd; tp->snd_ssthresh = tp->snd_cwnd;
return; return;
} }
@ -174,11 +174,11 @@ static void tcp_cdg_hystart_update(struct sock *sk)
125U); 125U);
if (ca->rtt.min > thresh) { if (ca->rtt.min > thresh) {
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTDELAYDETECT); LINUX_MIB_TCPHYSTARTDELAYDETECT);
NET_ADD_STATS_BH(sock_net(sk), __NET_ADD_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTDELAYCWND, LINUX_MIB_TCPHYSTARTDELAYCWND,
tp->snd_cwnd); tp->snd_cwnd);
tp->snd_ssthresh = tp->snd_cwnd; tp->snd_ssthresh = tp->snd_cwnd;
} }
} }

View file

@ -402,11 +402,11 @@ static void hystart_update(struct sock *sk, u32 delay)
ca->last_ack = now; ca->last_ack = now;
if ((s32)(now - ca->round_start) > ca->delay_min >> 4) { if ((s32)(now - ca->round_start) > ca->delay_min >> 4) {
ca->found |= HYSTART_ACK_TRAIN; ca->found |= HYSTART_ACK_TRAIN;
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTTRAINDETECT); LINUX_MIB_TCPHYSTARTTRAINDETECT);
NET_ADD_STATS_BH(sock_net(sk), __NET_ADD_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTTRAINCWND, LINUX_MIB_TCPHYSTARTTRAINCWND,
tp->snd_cwnd); tp->snd_cwnd);
tp->snd_ssthresh = tp->snd_cwnd; tp->snd_ssthresh = tp->snd_cwnd;
} }
} }
@ -423,11 +423,11 @@ static void hystart_update(struct sock *sk, u32 delay)
if (ca->curr_rtt > ca->delay_min + if (ca->curr_rtt > ca->delay_min +
HYSTART_DELAY_THRESH(ca->delay_min >> 3)) { HYSTART_DELAY_THRESH(ca->delay_min >> 3)) {
ca->found |= HYSTART_DELAY; ca->found |= HYSTART_DELAY;
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTDELAYDETECT); LINUX_MIB_TCPHYSTARTDELAYDETECT);
NET_ADD_STATS_BH(sock_net(sk), __NET_ADD_STATS(sock_net(sk),
LINUX_MIB_TCPHYSTARTDELAYCWND, LINUX_MIB_TCPHYSTARTDELAYCWND,
tp->snd_cwnd); tp->snd_cwnd);
tp->snd_ssthresh = tp->snd_cwnd; tp->snd_ssthresh = tp->snd_cwnd;
} }
} }

View file

@ -256,8 +256,8 @@ static bool tcp_fastopen_queue_check(struct sock *sk)
req1 = fastopenq->rskq_rst_head; req1 = fastopenq->rskq_rst_head;
if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) { if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
spin_unlock(&fastopenq->lock); spin_unlock(&fastopenq->lock);
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENLISTENOVERFLOW); LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
return false; return false;
} }
fastopenq->rskq_rst_head = req1->dl_next; fastopenq->rskq_rst_head = req1->dl_next;
@ -282,7 +282,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
struct sock *child; struct sock *child;
if (foc->len == 0) /* Client requests a cookie */ if (foc->len == 0) /* Client requests a cookie */
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENCOOKIEREQD);
if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) && if (!((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) &&
(syn_data || foc->len >= 0) && (syn_data || foc->len >= 0) &&
@ -311,13 +311,13 @@ fastopen:
child = tcp_fastopen_create_child(sk, skb, dst, req); child = tcp_fastopen_create_child(sk, skb, dst, req);
if (child) { if (child) {
foc->len = -1; foc->len = -1;
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENPASSIVE); LINUX_MIB_TCPFASTOPENPASSIVE);
return child; return child;
} }
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
} else if (foc->len > 0) /* Client presents an invalid cookie */ } else if (foc->len > 0) /* Client presents an invalid cookie */
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
valid_foc.exp = foc->exp; valid_foc.exp = foc->exp;
*foc = valid_foc; *foc = valid_foc;

View file

@ -869,7 +869,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
else else
mib_idx = LINUX_MIB_TCPSACKREORDER; mib_idx = LINUX_MIB_TCPSACKREORDER;
NET_INC_STATS_BH(sock_net(sk), mib_idx); __NET_INC_STATS(sock_net(sk), mib_idx);
#if FASTRETRANS_DEBUG > 1 #if FASTRETRANS_DEBUG > 1
pr_debug("Disorder%d %d %u f%u s%u rr%d\n", pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@ -1062,7 +1062,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
dup_sack = true; dup_sack = true;
tcp_dsack_seen(tp); tcp_dsack_seen(tp);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKRECV); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV);
} else if (num_sacks > 1) { } else if (num_sacks > 1) {
u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq);
u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq);
@ -1071,7 +1071,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
!before(start_seq_0, start_seq_1)) { !before(start_seq_0, start_seq_1)) {
dup_sack = true; dup_sack = true;
tcp_dsack_seen(tp); tcp_dsack_seen(tp);
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPDSACKOFORECV); LINUX_MIB_TCPDSACKOFORECV);
} }
} }
@ -1289,7 +1289,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
if (skb->len > 0) { if (skb->len > 0) {
BUG_ON(!tcp_skb_pcount(skb)); BUG_ON(!tcp_skb_pcount(skb));
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED);
return false; return false;
} }
@ -1313,7 +1313,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
tcp_unlink_write_queue(skb, sk); tcp_unlink_write_queue(skb, sk);
sk_wmem_free_skb(sk, skb); sk_wmem_free_skb(sk, skb);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKMERGED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED);
return true; return true;
} }
@ -1469,7 +1469,7 @@ noop:
return skb; return skb;
fallback: fallback:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); __NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK);
return NULL; return NULL;
} }
@ -1657,7 +1657,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
mib_idx = LINUX_MIB_TCPSACKDISCARD; mib_idx = LINUX_MIB_TCPSACKDISCARD;
} }
NET_INC_STATS_BH(sock_net(sk), mib_idx); __NET_INC_STATS(sock_net(sk), mib_idx);
if (i == 0) if (i == 0)
first_sack_index = -1; first_sack_index = -1;
continue; continue;
@ -1909,7 +1909,7 @@ void tcp_enter_loss(struct sock *sk)
skb = tcp_write_queue_head(sk); skb = tcp_write_queue_head(sk);
is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED); is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED);
if (is_reneg) { if (is_reneg) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING);
tp->sacked_out = 0; tp->sacked_out = 0;
tp->fackets_out = 0; tp->fackets_out = 0;
} }
@ -2395,7 +2395,7 @@ static bool tcp_try_undo_recovery(struct sock *sk)
else else
mib_idx = LINUX_MIB_TCPFULLUNDO; mib_idx = LINUX_MIB_TCPFULLUNDO;
NET_INC_STATS_BH(sock_net(sk), mib_idx); __NET_INC_STATS(sock_net(sk), mib_idx);
} }
if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) {
/* Hold old state until something *above* high_seq /* Hold old state until something *above* high_seq
@ -2417,7 +2417,7 @@ static bool tcp_try_undo_dsack(struct sock *sk)
if (tp->undo_marker && !tp->undo_retrans) { if (tp->undo_marker && !tp->undo_retrans) {
DBGUNDO(sk, "D-SACK"); DBGUNDO(sk, "D-SACK");
tcp_undo_cwnd_reduction(sk, false); tcp_undo_cwnd_reduction(sk, false);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKUNDO);
return true; return true;
} }
return false; return false;
@ -2432,10 +2432,10 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
tcp_undo_cwnd_reduction(sk, true); tcp_undo_cwnd_reduction(sk, true);
DBGUNDO(sk, "partial loss"); DBGUNDO(sk, "partial loss");
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSUNDO);
if (frto_undo) if (frto_undo)
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPSPURIOUSRTOS); LINUX_MIB_TCPSPURIOUSRTOS);
inet_csk(sk)->icsk_retransmits = 0; inet_csk(sk)->icsk_retransmits = 0;
if (frto_undo || tcp_is_sack(tp)) if (frto_undo || tcp_is_sack(tp))
tcp_set_ca_state(sk, TCP_CA_Open); tcp_set_ca_state(sk, TCP_CA_Open);
@ -2559,7 +2559,7 @@ static void tcp_mtup_probe_failed(struct sock *sk)
icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1;
icsk->icsk_mtup.probe_size = 0; icsk->icsk_mtup.probe_size = 0;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPFAIL); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPFAIL);
} }
static void tcp_mtup_probe_success(struct sock *sk) static void tcp_mtup_probe_success(struct sock *sk)
@ -2579,7 +2579,7 @@ static void tcp_mtup_probe_success(struct sock *sk)
icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size;
icsk->icsk_mtup.probe_size = 0; icsk->icsk_mtup.probe_size = 0;
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS);
} }
/* Do a simple retransmit without using the backoff mechanisms in /* Do a simple retransmit without using the backoff mechanisms in
@ -2643,7 +2643,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
else else
mib_idx = LINUX_MIB_TCPSACKRECOVERY; mib_idx = LINUX_MIB_TCPSACKRECOVERY;
NET_INC_STATS_BH(sock_net(sk), mib_idx); __NET_INC_STATS(sock_net(sk), mib_idx);
tp->prior_ssthresh = 0; tp->prior_ssthresh = 0;
tcp_init_undo(tp); tcp_init_undo(tp);
@ -2736,7 +2736,7 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked)
DBGUNDO(sk, "partial recovery"); DBGUNDO(sk, "partial recovery");
tcp_undo_cwnd_reduction(sk, true); tcp_undo_cwnd_reduction(sk, true);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO);
tcp_try_keep_open(sk); tcp_try_keep_open(sk);
return true; return true;
} }
@ -3431,7 +3431,7 @@ bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time); s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time);
if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) { if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) {
NET_INC_STATS_BH(net, mib_idx); __NET_INC_STATS(net, mib_idx);
return true; /* rate-limited: don't send yet! */ return true; /* rate-limited: don't send yet! */
} }
} }
@ -3464,7 +3464,7 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
challenge_count = 0; challenge_count = 0;
} }
if (++challenge_count <= sysctl_tcp_challenge_ack_limit) { if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
tcp_send_ack(sk); tcp_send_ack(sk);
} }
} }
@ -3513,8 +3513,8 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
tcp_set_ca_state(sk, TCP_CA_CWR); tcp_set_ca_state(sk, TCP_CA_CWR);
tcp_end_cwnd_reduction(sk); tcp_end_cwnd_reduction(sk);
tcp_try_keep_open(sk); tcp_try_keep_open(sk);
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPLOSSPROBERECOVERY); LINUX_MIB_TCPLOSSPROBERECOVERY);
} else if (!(flag & (FLAG_SND_UNA_ADVANCED | } else if (!(flag & (FLAG_SND_UNA_ADVANCED |
FLAG_NOT_DUP | FLAG_DATA_SACKED))) { FLAG_NOT_DUP | FLAG_DATA_SACKED))) {
/* Pure dupack: original and TLP probe arrived; no loss */ /* Pure dupack: original and TLP probe arrived; no loss */
@ -3618,14 +3618,14 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE); tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPACKS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPACKS);
} else { } else {
u32 ack_ev_flags = CA_ACK_SLOWPATH; u32 ack_ev_flags = CA_ACK_SLOWPATH;
if (ack_seq != TCP_SKB_CB(skb)->end_seq) if (ack_seq != TCP_SKB_CB(skb)->end_seq)
flag |= FLAG_DATA; flag |= FLAG_DATA;
else else
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPUREACKS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPUREACKS);
flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); flag |= tcp_ack_update_window(sk, skb, ack, ack_seq);
@ -4128,7 +4128,7 @@ static void tcp_dsack_set(struct sock *sk, u32 seq, u32 end_seq)
else else
mib_idx = LINUX_MIB_TCPDSACKOFOSENT; mib_idx = LINUX_MIB_TCPDSACKOFOSENT;
NET_INC_STATS_BH(sock_net(sk), mib_idx); __NET_INC_STATS(sock_net(sk), mib_idx);
tp->rx_opt.dsack = 1; tp->rx_opt.dsack = 1;
tp->duplicate_sack[0].start_seq = seq; tp->duplicate_sack[0].start_seq = seq;
@ -4152,7 +4152,7 @@ static void tcp_send_dupack(struct sock *sk, const struct sk_buff *skb)
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) { before(TCP_SKB_CB(skb)->seq, tp->rcv_nxt)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
tcp_enter_quickack_mode(sk); tcp_enter_quickack_mode(sk);
if (tcp_is_sack(tp) && sysctl_tcp_dsack) { if (tcp_is_sack(tp) && sysctl_tcp_dsack) {
@ -4302,7 +4302,7 @@ static bool tcp_try_coalesce(struct sock *sk,
atomic_add(delta, &sk->sk_rmem_alloc); atomic_add(delta, &sk->sk_rmem_alloc);
sk_mem_charge(sk, delta); sk_mem_charge(sk, delta);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOALESCE);
TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq; TCP_SKB_CB(to)->end_seq = TCP_SKB_CB(from)->end_seq;
TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq; TCP_SKB_CB(to)->ack_seq = TCP_SKB_CB(from)->ack_seq;
TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags; TCP_SKB_CB(to)->tcp_flags |= TCP_SKB_CB(from)->tcp_flags;
@ -4390,7 +4390,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
tcp_ecn_check_ce(tp, skb); tcp_ecn_check_ce(tp, skb);
if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) { if (unlikely(tcp_try_rmem_schedule(sk, skb, skb->truesize))) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFODROP); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFODROP);
tcp_drop(sk, skb); tcp_drop(sk, skb);
return; return;
} }
@ -4399,7 +4399,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
tp->pred_flags = 0; tp->pred_flags = 0;
inet_csk_schedule_ack(sk); inet_csk_schedule_ack(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOQUEUE); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOQUEUE);
SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n", SOCK_DEBUG(sk, "out of order segment: rcv_next %X seq %X - %X\n",
tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); tp->rcv_nxt, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
@ -4454,7 +4454,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) { if (skb1 && before(seq, TCP_SKB_CB(skb1)->end_seq)) {
if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) { if (!after(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
/* All the bits are present. Drop. */ /* All the bits are present. Drop. */
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
tcp_drop(sk, skb); tcp_drop(sk, skb);
skb = NULL; skb = NULL;
tcp_dsack_set(sk, seq, end_seq); tcp_dsack_set(sk, seq, end_seq);
@ -4493,7 +4493,7 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
__skb_unlink(skb1, &tp->out_of_order_queue); __skb_unlink(skb1, &tp->out_of_order_queue);
tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq, tcp_dsack_extend(sk, TCP_SKB_CB(skb1)->seq,
TCP_SKB_CB(skb1)->end_seq); TCP_SKB_CB(skb1)->end_seq);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPOFOMERGE); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPOFOMERGE);
tcp_drop(sk, skb1); tcp_drop(sk, skb1);
} }
@ -4658,7 +4658,7 @@ queue_and_out:
if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) { if (!after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) {
/* A retransmit, 2nd most common case. Force an immediate ack. */ /* A retransmit, 2nd most common case. Force an immediate ack. */
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOST); __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOST);
tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq); tcp_dsack_set(sk, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq);
out_of_window: out_of_window:
@ -4704,7 +4704,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
__skb_unlink(skb, list); __skb_unlink(skb, list);
__kfree_skb(skb); __kfree_skb(skb);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRCVCOLLAPSED);
return next; return next;
} }
@ -4863,7 +4863,7 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
bool res = false; bool res = false;
if (!skb_queue_empty(&tp->out_of_order_queue)) { if (!skb_queue_empty(&tp->out_of_order_queue)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_OFOPRUNED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
__skb_queue_purge(&tp->out_of_order_queue); __skb_queue_purge(&tp->out_of_order_queue);
/* Reset SACK state. A conforming SACK implementation will /* Reset SACK state. A conforming SACK implementation will
@ -4892,7 +4892,7 @@ static int tcp_prune_queue(struct sock *sk)
SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq); SOCK_DEBUG(sk, "prune_queue: c=%x\n", tp->copied_seq);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PRUNECALLED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_PRUNECALLED);
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
tcp_clamp_window(sk); tcp_clamp_window(sk);
@ -4922,7 +4922,7 @@ static int tcp_prune_queue(struct sock *sk)
* drop receive data on the floor. It will get retransmitted * drop receive data on the floor. It will get retransmitted
* and hopefully then we'll have sufficient space. * and hopefully then we'll have sufficient space.
*/ */
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_RCVPRUNED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_RCVPRUNED);
/* Massive buffer overcommit. */ /* Massive buffer overcommit. */
tp->pred_flags = 0; tp->pred_flags = 0;
@ -5181,7 +5181,7 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp && if (tcp_fast_parse_options(skb, th, tp) && tp->rx_opt.saw_tstamp &&
tcp_paws_discard(sk, skb)) { tcp_paws_discard(sk, skb)) {
if (!th->rst) { if (!th->rst) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
if (!tcp_oow_rate_limited(sock_net(sk), skb, if (!tcp_oow_rate_limited(sock_net(sk), skb,
LINUX_MIB_TCPACKSKIPPEDPAWS, LINUX_MIB_TCPACKSKIPPEDPAWS,
&tp->last_oow_ack_time)) &tp->last_oow_ack_time))
@ -5233,8 +5233,8 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
if (th->syn) { if (th->syn) {
syn_challenge: syn_challenge:
if (syn_inerr) if (syn_inerr)
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNCHALLENGE);
tcp_send_challenge_ack(sk, skb); tcp_send_challenge_ack(sk, skb);
goto discard; goto discard;
} }
@ -5349,7 +5349,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
tcp_data_snd_check(sk); tcp_data_snd_check(sk);
return; return;
} else { /* Header too small */ } else { /* Header too small */
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
goto discard; goto discard;
} }
} else { } else {
@ -5377,7 +5377,8 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
__skb_pull(skb, tcp_header_len); __skb_pull(skb, tcp_header_len);
tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq); tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER); __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPHPHITSTOUSER);
eaten = 1; eaten = 1;
} }
} }
@ -5399,7 +5400,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
tcp_rcv_rtt_measure_ts(sk, skb); tcp_rcv_rtt_measure_ts(sk, skb);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPHPHITS);
/* Bulk data transfer: receiver */ /* Bulk data transfer: receiver */
eaten = tcp_queue_rcv(sk, skb, tcp_header_len, eaten = tcp_queue_rcv(sk, skb, tcp_header_len,
@ -5456,8 +5457,8 @@ step5:
return; return;
csum_error: csum_error:
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS); __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
discard: discard:
tcp_drop(sk, skb); tcp_drop(sk, skb);
@ -5549,12 +5550,14 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
break; break;
} }
tcp_rearm_rto(sk); tcp_rearm_rto(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL); __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENACTIVEFAIL);
return true; return true;
} }
tp->syn_data_acked = tp->syn_data; tp->syn_data_acked = tp->syn_data;
if (tp->syn_data_acked) if (tp->syn_data_acked)
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE); __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENACTIVE);
tcp_fastopen_add_skb(sk, synack); tcp_fastopen_add_skb(sk, synack);
@ -5589,7 +5592,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
!between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp, !between(tp->rx_opt.rcv_tsecr, tp->retrans_stamp,
tcp_time_stamp)) { tcp_time_stamp)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSACTIVEREJECTED); __NET_INC_STATS(sock_net(sk),
LINUX_MIB_PAWSACTIVEREJECTED);
goto reset_and_undo; goto reset_and_undo;
} }
@ -5958,7 +5962,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
(TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) { after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt))) {
tcp_done(sk); tcp_done(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
return 1; return 1;
} }
@ -6015,7 +6019,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb)
if (sk->sk_shutdown & RCV_SHUTDOWN) { if (sk->sk_shutdown & RCV_SHUTDOWN) {
if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq && if (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq &&
after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) { after(TCP_SKB_CB(skb)->end_seq - th->fin, tp->rcv_nxt)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONDATA); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
tcp_reset(sk); tcp_reset(sk);
return 1; return 1;
} }
@ -6153,10 +6157,10 @@ static bool tcp_syn_flood_action(const struct sock *sk,
if (net->ipv4.sysctl_tcp_syncookies) { if (net->ipv4.sysctl_tcp_syncookies) {
msg = "Sending cookies"; msg = "Sending cookies";
want_cookie = true; want_cookie = true;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
} else } else
#endif #endif
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
if (!queue->synflood_warned && if (!queue->synflood_warned &&
net->ipv4.sysctl_tcp_syncookies != 2 && net->ipv4.sysctl_tcp_syncookies != 2 &&
@ -6217,7 +6221,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
* timeout. * timeout.
*/ */
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
goto drop; goto drop;
} }
@ -6264,7 +6268,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
if (dst && strict && if (dst && strict &&
!tcp_peer_is_proven(req, dst, true, !tcp_peer_is_proven(req, dst, true,
tmp_opt.saw_tstamp)) { tmp_opt.saw_tstamp)) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
goto drop_and_release; goto drop_and_release;
} }
} }

View file

@ -320,7 +320,7 @@ void tcp_req_err(struct sock *sk, u32 seq, bool abort)
* an established socket here. * an established socket here.
*/ */
if (seq != tcp_rsk(req)->snt_isn) { if (seq != tcp_rsk(req)->snt_isn) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
} else if (abort) { } else if (abort) {
/* /*
* Still in SYN_RECV, just remove it silently. * Still in SYN_RECV, just remove it silently.
@ -372,7 +372,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
th->dest, iph->saddr, ntohs(th->source), th->dest, iph->saddr, ntohs(th->source),
inet_iif(icmp_skb)); inet_iif(icmp_skb));
if (!sk) { if (!sk) {
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
return; return;
} }
if (sk->sk_state == TCP_TIME_WAIT) { if (sk->sk_state == TCP_TIME_WAIT) {
@ -396,13 +396,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
*/ */
if (sock_owned_by_user(sk)) { if (sock_owned_by_user(sk)) {
if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)) if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
} }
if (sk->sk_state == TCP_CLOSE) if (sk->sk_state == TCP_CLOSE)
goto out; goto out;
if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
goto out; goto out;
} }
@ -413,7 +413,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
if (sk->sk_state != TCP_LISTEN && if (sk->sk_state != TCP_LISTEN &&
!between(seq, snd_una, tp->snd_nxt)) { !between(seq, snd_una, tp->snd_nxt)) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out; goto out;
} }
@ -697,8 +697,8 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
&arg, arg.iov[0].iov_len); &arg, arg.iov[0].iov_len);
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
out: out:
@ -779,7 +779,7 @@ static void tcp_v4_send_ack(struct net *net,
ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
&arg, arg.iov[0].iov_len); &arg, arg.iov[0].iov_len);
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
} }
static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
@ -1151,12 +1151,12 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
return false; return false;
if (hash_expected && !hash_location) { if (hash_expected && !hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
return true; return true;
} }
if (!hash_expected && hash_location) { if (!hash_expected && hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
return true; return true;
} }
@ -1342,7 +1342,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
return newsk; return newsk;
exit_overflow: exit_overflow:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
exit_nonewsk: exit_nonewsk:
dst_release(dst); dst_release(dst);
exit: exit:
@ -1432,8 +1432,8 @@ discard:
return 0; return 0;
csum_err: csum_err:
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS); __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
goto discard; goto discard;
} }
EXPORT_SYMBOL(tcp_v4_do_rcv); EXPORT_SYMBOL(tcp_v4_do_rcv);
@ -1513,8 +1513,8 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) { while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
sk_backlog_rcv(sk, skb1); sk_backlog_rcv(sk, skb1);
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPPREQUEUEDROPPED); LINUX_MIB_TCPPREQUEUEDROPPED);
} }
tp->ucopy.memory = 0; tp->ucopy.memory = 0;
@ -1547,7 +1547,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
goto discard_it; goto discard_it;
/* Count it even if it's bad */ /* Count it even if it's bad */
TCP_INC_STATS_BH(net, TCP_MIB_INSEGS); __TCP_INC_STATS(net, TCP_MIB_INSEGS);
if (!pskb_may_pull(skb, sizeof(struct tcphdr))) if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
goto discard_it; goto discard_it;
@ -1629,7 +1629,7 @@ process:
} }
} }
if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
goto discard_and_relse; goto discard_and_relse;
} }
@ -1662,7 +1662,7 @@ process:
} else if (unlikely(sk_add_backlog(sk, skb, } else if (unlikely(sk_add_backlog(sk, skb,
sk->sk_rcvbuf + sk->sk_sndbuf))) { sk->sk_rcvbuf + sk->sk_sndbuf))) {
bh_unlock_sock(sk); bh_unlock_sock(sk);
NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
goto discard_and_relse; goto discard_and_relse;
} }
bh_unlock_sock(sk); bh_unlock_sock(sk);
@ -1679,9 +1679,9 @@ no_tcp_socket:
if (tcp_checksum_complete(skb)) { if (tcp_checksum_complete(skb)) {
csum_error: csum_error:
TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS); __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
bad_packet: bad_packet:
TCP_INC_STATS_BH(net, TCP_MIB_INERRS); __TCP_INC_STATS(net, TCP_MIB_INERRS);
} else { } else {
tcp_v4_send_reset(NULL, skb); tcp_v4_send_reset(NULL, skb);
} }

View file

@ -235,7 +235,7 @@ kill:
} }
if (paws_reject) if (paws_reject)
NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED); __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
if (!th->rst) { if (!th->rst) {
/* In this case we must reset the TIMEWAIT timer. /* In this case we must reset the TIMEWAIT timer.
@ -337,7 +337,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
* socket up. We've got bigger problems than * socket up. We've got bigger problems than
* non-graceful socket closings. * non-graceful socket closings.
*/ */
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW);
} }
tcp_update_metrics(sk); tcp_update_metrics(sk);
@ -545,7 +545,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->rack.mstamp.v64 = 0; newtp->rack.mstamp.v64 = 0;
newtp->rack.advanced = 0; newtp->rack.advanced = 0;
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS); __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
} }
return newsk; return newsk;
} }
@ -710,7 +710,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
&tcp_rsk(req)->last_oow_ack_time)) &tcp_rsk(req)->last_oow_ack_time))
req->rsk_ops->send_ack(sk, skb, req); req->rsk_ops->send_ack(sk, skb, req);
if (paws_reject) if (paws_reject)
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
return NULL; return NULL;
} }
@ -729,7 +729,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
* "fourth, check the SYN bit" * "fourth, check the SYN bit"
*/ */
if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
goto embryonic_reset; goto embryonic_reset;
} }
@ -752,7 +752,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept &&
TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
inet_rsk(req)->acked = 1; inet_rsk(req)->acked = 1;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
return NULL; return NULL;
} }
@ -791,7 +791,7 @@ embryonic_reset:
} }
if (!fastopen) { if (!fastopen) {
inet_csk_reqsk_queue_drop(sk, req); inet_csk_reqsk_queue_drop(sk, req);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
} }
return NULL; return NULL;
} }

View file

@ -2212,8 +2212,8 @@ static bool skb_still_in_host_queue(const struct sock *sk,
const struct sk_buff *skb) const struct sk_buff *skb)
{ {
if (unlikely(skb_fclone_busy(sk, skb))) { if (unlikely(skb_fclone_busy(sk, skb))) {
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
return true; return true;
} }
return false; return false;
@ -2275,7 +2275,7 @@ void tcp_send_loss_probe(struct sock *sk)
tp->tlp_high_seq = tp->snd_nxt; tp->tlp_high_seq = tp->snd_nxt;
probe_sent: probe_sent:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSPROBES); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSPROBES);
/* Reset s.t. tcp_rearm_rto will restart timer from now */ /* Reset s.t. tcp_rearm_rto will restart timer from now */
inet_csk(sk)->icsk_pending = 0; inet_csk(sk)->icsk_pending = 0;
rearm_timer: rearm_timer:
@ -2656,7 +2656,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
/* Update global TCP statistics. */ /* Update global TCP statistics. */
TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs); TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
tp->total_retrans += segs; tp->total_retrans += segs;
} }
return err; return err;
@ -2681,7 +2681,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
tp->retrans_stamp = tcp_skb_timestamp(skb); tp->retrans_stamp = tcp_skb_timestamp(skb);
} else if (err != -EBUSY) { } else if (err != -EBUSY) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
} }
if (tp->undo_retrans < 0) if (tp->undo_retrans < 0)
@ -2805,7 +2805,7 @@ begin_fwd:
if (tcp_retransmit_skb(sk, skb, segs)) if (tcp_retransmit_skb(sk, skb, segs))
return; return;
NET_INC_STATS_BH(sock_net(sk), mib_idx); __NET_INC_STATS(sock_net(sk), mib_idx);
if (tcp_in_cwnd_reduction(sk)) if (tcp_in_cwnd_reduction(sk))
tp->prr_out += tcp_skb_pcount(skb); tp->prr_out += tcp_skb_pcount(skb);
@ -3042,7 +3042,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
th->window = htons(min(req->rsk_rcv_wnd, 65535U)); th->window = htons(min(req->rsk_rcv_wnd, 65535U));
tcp_options_write((__be32 *)(th + 1), NULL, &opts); tcp_options_write((__be32 *)(th + 1), NULL, &opts);
th->doff = (tcp_header_size >> 2); th->doff = (tcp_header_size >> 2);
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_OUTSEGS); __TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS);
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
/* Okay, we have all we need - do the md5 hash if needed */ /* Okay, we have all we need - do the md5 hash if needed */
@ -3540,8 +3540,8 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
tcp_rsk(req)->txhash = net_tx_rndhash(); tcp_rsk(req)->txhash = net_tx_rndhash();
res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL); res = af_ops->send_synack(sk, NULL, &fl, req, NULL, TCP_SYNACK_NORMAL);
if (!res) { if (!res) {
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); __TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
} }
return res; return res;
} }

View file

@ -65,8 +65,8 @@ int tcp_rack_mark_lost(struct sock *sk)
if (scb->sacked & TCPCB_SACKED_RETRANS) { if (scb->sacked & TCPCB_SACKED_RETRANS) {
scb->sacked &= ~TCPCB_SACKED_RETRANS; scb->sacked &= ~TCPCB_SACKED_RETRANS;
tp->retrans_out -= tcp_skb_pcount(skb); tp->retrans_out -= tcp_skb_pcount(skb);
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPLOSTRETRANSMIT); LINUX_MIB_TCPLOSTRETRANSMIT);
} }
} else if (!(scb->sacked & TCPCB_RETRANS)) { } else if (!(scb->sacked & TCPCB_RETRANS)) {
/* Original data are sent sequentially so stop early /* Original data are sent sequentially so stop early

View file

@ -30,7 +30,7 @@ static void tcp_write_err(struct sock *sk)
sk->sk_error_report(sk); sk->sk_error_report(sk);
tcp_done(sk); tcp_done(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
} }
/* Do not allow orphaned sockets to eat all our resources. /* Do not allow orphaned sockets to eat all our resources.
@ -68,7 +68,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
if (do_reset) if (do_reset)
tcp_send_active_reset(sk, GFP_ATOMIC); tcp_send_active_reset(sk, GFP_ATOMIC);
tcp_done(sk); tcp_done(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
return 1; return 1;
} }
return 0; return 0;
@ -162,8 +162,8 @@ static int tcp_write_timeout(struct sock *sk)
if (tp->syn_fastopen || tp->syn_data) if (tp->syn_fastopen || tp->syn_data)
tcp_fastopen_cache_set(sk, 0, NULL, true, 0); tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
if (tp->syn_data && icsk->icsk_retransmits == 1) if (tp->syn_data && icsk->icsk_retransmits == 1)
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENACTIVEFAIL); LINUX_MIB_TCPFASTOPENACTIVEFAIL);
} }
retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
syn_set = true; syn_set = true;
@ -178,8 +178,8 @@ static int tcp_write_timeout(struct sock *sk)
tp->bytes_acked <= tp->rx_opt.mss_clamp) { tp->bytes_acked <= tp->rx_opt.mss_clamp) {
tcp_fastopen_cache_set(sk, 0, NULL, true, 0); tcp_fastopen_cache_set(sk, 0, NULL, true, 0);
if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1) if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1)
NET_INC_STATS_BH(sock_net(sk), __NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPFASTOPENACTIVEFAIL); LINUX_MIB_TCPFASTOPENACTIVEFAIL);
} }
/* Black hole detection */ /* Black hole detection */
tcp_mtu_probing(icsk, sk); tcp_mtu_probing(icsk, sk);
@ -228,7 +228,7 @@ void tcp_delack_timer_handler(struct sock *sk)
if (!skb_queue_empty(&tp->ucopy.prequeue)) { if (!skb_queue_empty(&tp->ucopy.prequeue)) {
struct sk_buff *skb; struct sk_buff *skb;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSCHEDULERFAILED);
while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
sk_backlog_rcv(sk, skb); sk_backlog_rcv(sk, skb);
@ -248,7 +248,7 @@ void tcp_delack_timer_handler(struct sock *sk)
icsk->icsk_ack.ato = TCP_ATO_MIN; icsk->icsk_ack.ato = TCP_ATO_MIN;
} }
tcp_send_ack(sk); tcp_send_ack(sk);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
} }
out: out:
@ -265,7 +265,7 @@ static void tcp_delack_timer(unsigned long data)
tcp_delack_timer_handler(sk); tcp_delack_timer_handler(sk);
} else { } else {
inet_csk(sk)->icsk_ack.blocked = 1; inet_csk(sk)->icsk_ack.blocked = 1;
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
/* deleguate our work to tcp_release_cb() */ /* deleguate our work to tcp_release_cb() */
if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags)) if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags))
sock_hold(sk); sock_hold(sk);
@ -431,7 +431,7 @@ void tcp_retransmit_timer(struct sock *sk)
} else { } else {
mib_idx = LINUX_MIB_TCPTIMEOUTS; mib_idx = LINUX_MIB_TCPTIMEOUTS;
} }
NET_INC_STATS_BH(sock_net(sk), mib_idx); __NET_INC_STATS(sock_net(sk), mib_idx);
} }
tcp_enter_loss(sk); tcp_enter_loss(sk);
@ -549,7 +549,7 @@ void tcp_syn_ack_timeout(const struct request_sock *req)
{ {
struct net *net = read_pnet(&inet_rsk(req)->ireq_net); struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
NET_INC_STATS_BH(net, LINUX_MIB_TCPTIMEOUTS); __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
} }
EXPORT_SYMBOL(tcp_syn_ack_timeout); EXPORT_SYMBOL(tcp_syn_ack_timeout);

View file

@ -688,7 +688,7 @@ void __udp4_lib_err(struct sk_buff *skb, u32 info, struct udp_table *udptable)
iph->saddr, uh->source, skb->dev->ifindex, udptable, iph->saddr, uh->source, skb->dev->ifindex, udptable,
NULL); NULL);
if (!sk) { if (!sk) {
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
return; /* No socket for error */ return; /* No socket for error */
} }
@ -882,13 +882,13 @@ send:
err = ip_send_skb(sock_net(sk), skb); err = ip_send_skb(sock_net(sk), skb);
if (err) { if (err) {
if (err == -ENOBUFS && !inet->recverr) { if (err == -ENOBUFS && !inet->recverr) {
UDP_INC_STATS_USER(sock_net(sk), UDP_INC_STATS(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite); UDP_MIB_SNDBUFERRORS, is_udplite);
err = 0; err = 0;
} }
} else } else
UDP_INC_STATS_USER(sock_net(sk), UDP_INC_STATS(sock_net(sk),
UDP_MIB_OUTDATAGRAMS, is_udplite); UDP_MIB_OUTDATAGRAMS, is_udplite);
return err; return err;
} }
@ -1157,8 +1157,8 @@ out:
* seems like overkill. * seems like overkill.
*/ */
if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
UDP_INC_STATS_USER(sock_net(sk), UDP_INC_STATS(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite); UDP_MIB_SNDBUFERRORS, is_udplite);
} }
return err; return err;
@ -1242,10 +1242,10 @@ static unsigned int first_packet_length(struct sock *sk)
spin_lock_bh(&rcvq->lock); spin_lock_bh(&rcvq->lock);
while ((skb = skb_peek(rcvq)) != NULL && while ((skb = skb_peek(rcvq)) != NULL &&
udp_lib_checksum_complete(skb)) { udp_lib_checksum_complete(skb)) {
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS,
IS_UDPLITE(sk)); IS_UDPLITE(sk));
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
IS_UDPLITE(sk)); IS_UDPLITE(sk));
atomic_inc(&sk->sk_drops); atomic_inc(&sk->sk_drops);
__skb_unlink(skb, rcvq); __skb_unlink(skb, rcvq);
__skb_queue_tail(&list_kill, skb); __skb_queue_tail(&list_kill, skb);
@ -1352,16 +1352,16 @@ try_again:
trace_kfree_skb(skb, udp_recvmsg); trace_kfree_skb(skb, udp_recvmsg);
if (!peeked) { if (!peeked) {
atomic_inc(&sk->sk_drops); atomic_inc(&sk->sk_drops);
UDP_INC_STATS_USER(sock_net(sk), UDP_INC_STATS(sock_net(sk),
UDP_MIB_INERRORS, is_udplite); UDP_MIB_INERRORS, is_udplite);
} }
skb_free_datagram_locked(sk, skb); skb_free_datagram_locked(sk, skb);
return err; return err;
} }
if (!peeked) if (!peeked)
UDP_INC_STATS_USER(sock_net(sk), UDP_INC_STATS(sock_net(sk),
UDP_MIB_INDATAGRAMS, is_udplite); UDP_MIB_INDATAGRAMS, is_udplite);
sock_recv_ts_and_drops(msg, sk, skb); sock_recv_ts_and_drops(msg, sk, skb);
@ -1386,8 +1386,8 @@ try_again:
csum_copy_err: csum_copy_err:
slow = lock_sock_fast(sk); slow = lock_sock_fast(sk);
if (!skb_kill_datagram(sk, skb, flags)) { if (!skb_kill_datagram(sk, skb, flags)) {
UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
UDP_INC_STATS_USER(sock_net(sk), UDP_MIB_INERRORS, is_udplite); UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
} }
unlock_sock_fast(sk, slow); unlock_sock_fast(sk, slow);
@ -1514,9 +1514,9 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
/* Note that an ENOMEM error is charged twice */ /* Note that an ENOMEM error is charged twice */
if (rc == -ENOMEM) if (rc == -ENOMEM)
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, __UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS,
is_udplite); is_udplite);
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
kfree_skb(skb); kfree_skb(skb);
trace_udp_fail_queue_rcv_skb(rc, sk); trace_udp_fail_queue_rcv_skb(rc, sk);
return -1; return -1;
@ -1580,9 +1580,9 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
ret = encap_rcv(sk, skb); ret = encap_rcv(sk, skb);
if (ret <= 0) { if (ret <= 0) {
UDP_INC_STATS_BH(sock_net(sk), __UDP_INC_STATS(sock_net(sk),
UDP_MIB_INDATAGRAMS, UDP_MIB_INDATAGRAMS,
is_udplite); is_udplite);
return -ret; return -ret;
} }
} }
@ -1633,8 +1633,8 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
udp_csum_pull_header(skb); udp_csum_pull_header(skb);
if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS, __UDP_INC_STATS(sock_net(sk), UDP_MIB_RCVBUFERRORS,
is_udplite); is_udplite);
goto drop; goto drop;
} }
@ -1653,9 +1653,9 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
return rc; return rc;
csum_error: csum_error:
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); __UDP_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
drop: drop:
UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
atomic_inc(&sk->sk_drops); atomic_inc(&sk->sk_drops);
kfree_skb(skb); kfree_skb(skb);
return -1; return -1;
@ -1715,10 +1715,10 @@ start_lookup:
if (unlikely(!nskb)) { if (unlikely(!nskb)) {
atomic_inc(&sk->sk_drops); atomic_inc(&sk->sk_drops);
UDP_INC_STATS_BH(net, UDP_MIB_RCVBUFERRORS, __UDP_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
IS_UDPLITE(sk)); IS_UDPLITE(sk));
UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, __UDP_INC_STATS(net, UDP_MIB_INERRORS,
IS_UDPLITE(sk)); IS_UDPLITE(sk));
continue; continue;
} }
if (udp_queue_rcv_skb(sk, nskb) > 0) if (udp_queue_rcv_skb(sk, nskb) > 0)
@ -1736,8 +1736,8 @@ start_lookup:
consume_skb(skb); consume_skb(skb);
} else { } else {
kfree_skb(skb); kfree_skb(skb);
UDP_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI, __UDP_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
proto == IPPROTO_UDPLITE); proto == IPPROTO_UDPLITE);
} }
return 0; return 0;
} }
@ -1851,7 +1851,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (udp_lib_checksum_complete(skb)) if (udp_lib_checksum_complete(skb))
goto csum_error; goto csum_error;
UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); __UDP_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
/* /*
@ -1878,9 +1878,9 @@ csum_error:
proto == IPPROTO_UDPLITE ? "Lite" : "", proto == IPPROTO_UDPLITE ? "Lite" : "",
&saddr, ntohs(uh->source), &daddr, ntohs(uh->dest), &saddr, ntohs(uh->source), &daddr, ntohs(uh->dest),
ulen); ulen);
UDP_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
drop: drop:
UDP_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); __UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
kfree_skb(skb); kfree_skb(skb);
return 0; return 0;
} }

View file

@ -258,8 +258,8 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
!pskb_may_pull(skb, (skb_transport_offset(skb) + !pskb_may_pull(skb, (skb_transport_offset(skb) +
((skb_transport_header(skb)[1] + 1) << 3)))) { ((skb_transport_header(skb)[1] + 1) << 3)))) {
IP6_INC_STATS_BH(dev_net(dst->dev), ip6_dst_idev(dst), __IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
IPSTATS_MIB_INHDRERRORS); IPSTATS_MIB_INHDRERRORS);
kfree_skb(skb); kfree_skb(skb);
return -1; return -1;
} }
@ -280,8 +280,8 @@ static int ipv6_destopt_rcv(struct sk_buff *skb)
return 1; return 1;
} }
IP6_INC_STATS_BH(dev_net(dst->dev), __IP6_INC_STATS(dev_net(dst->dev),
ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS); ip6_dst_idev(dst), IPSTATS_MIB_INHDRERRORS);
return -1; return -1;
} }
@ -309,8 +309,8 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb)
if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) || if (!pskb_may_pull(skb, skb_transport_offset(skb) + 8) ||
!pskb_may_pull(skb, (skb_transport_offset(skb) + !pskb_may_pull(skb, (skb_transport_offset(skb) +
((skb_transport_header(skb)[1] + 1) << 3)))) { ((skb_transport_header(skb)[1] + 1) << 3)))) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INHDRERRORS); IPSTATS_MIB_INHDRERRORS);
kfree_skb(skb); kfree_skb(skb);
return -1; return -1;
} }
@ -319,8 +319,8 @@ static int ipv6_rthdr_rcv(struct sk_buff *skb)
if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) || if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr) ||
skb->pkt_type != PACKET_HOST) { skb->pkt_type != PACKET_HOST) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INADDRERRORS); IPSTATS_MIB_INADDRERRORS);
kfree_skb(skb); kfree_skb(skb);
return -1; return -1;
} }
@ -334,8 +334,8 @@ looped_back:
* processed by own * processed by own
*/ */
if (!addr) { if (!addr) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INADDRERRORS); IPSTATS_MIB_INADDRERRORS);
kfree_skb(skb); kfree_skb(skb);
return -1; return -1;
} }
@ -360,8 +360,8 @@ looped_back:
goto unknown_rh; goto unknown_rh;
/* Silently discard invalid RTH type 2 */ /* Silently discard invalid RTH type 2 */
if (hdr->hdrlen != 2 || hdr->segments_left != 1) { if (hdr->hdrlen != 2 || hdr->segments_left != 1) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INHDRERRORS); IPSTATS_MIB_INHDRERRORS);
kfree_skb(skb); kfree_skb(skb);
return -1; return -1;
} }
@ -379,8 +379,8 @@ looped_back:
n = hdr->hdrlen >> 1; n = hdr->hdrlen >> 1;
if (hdr->segments_left > n) { if (hdr->segments_left > n) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INHDRERRORS); IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
((&hdr->segments_left) - ((&hdr->segments_left) -
skb_network_header(skb))); skb_network_header(skb)));
@ -393,8 +393,8 @@ looped_back:
if (skb_cloned(skb)) { if (skb_cloned(skb)) {
/* the copy is a forwarded packet */ /* the copy is a forwarded packet */
if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUTDISCARDS); IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb); kfree_skb(skb);
return -1; return -1;
} }
@ -416,14 +416,14 @@ looped_back:
if (xfrm6_input_addr(skb, (xfrm_address_t *)addr, if (xfrm6_input_addr(skb, (xfrm_address_t *)addr,
(xfrm_address_t *)&ipv6_hdr(skb)->saddr, (xfrm_address_t *)&ipv6_hdr(skb)->saddr,
IPPROTO_ROUTING) < 0) { IPPROTO_ROUTING) < 0) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INADDRERRORS); IPSTATS_MIB_INADDRERRORS);
kfree_skb(skb); kfree_skb(skb);
return -1; return -1;
} }
if (!ipv6_chk_home_addr(dev_net(skb_dst(skb)->dev), addr)) { if (!ipv6_chk_home_addr(dev_net(skb_dst(skb)->dev), addr)) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INADDRERRORS); IPSTATS_MIB_INADDRERRORS);
kfree_skb(skb); kfree_skb(skb);
return -1; return -1;
} }
@ -434,8 +434,8 @@ looped_back:
} }
if (ipv6_addr_is_multicast(addr)) { if (ipv6_addr_is_multicast(addr)) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INADDRERRORS); IPSTATS_MIB_INADDRERRORS);
kfree_skb(skb); kfree_skb(skb);
return -1; return -1;
} }
@ -454,8 +454,8 @@ looped_back:
if (skb_dst(skb)->dev->flags&IFF_LOOPBACK) { if (skb_dst(skb)->dev->flags&IFF_LOOPBACK) {
if (ipv6_hdr(skb)->hop_limit <= 1) { if (ipv6_hdr(skb)->hop_limit <= 1) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INHDRERRORS); IPSTATS_MIB_INHDRERRORS);
icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT,
0); 0);
kfree_skb(skb); kfree_skb(skb);
@ -470,7 +470,7 @@ looped_back:
return -1; return -1;
unknown_rh: unknown_rh:
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS); __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
(&hdr->type) - skb_network_header(skb)); (&hdr->type) - skb_network_header(skb));
return -1; return -1;
@ -568,28 +568,28 @@ static bool ipv6_hop_jumbo(struct sk_buff *skb, int optoff)
if (nh[optoff + 1] != 4 || (optoff & 3) != 2) { if (nh[optoff + 1] != 4 || (optoff & 3) != 2) {
net_dbg_ratelimited("ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n", net_dbg_ratelimited("ipv6_hop_jumbo: wrong jumbo opt length/alignment %d\n",
nh[optoff+1]); nh[optoff+1]);
IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), __IP6_INC_STATS(net, ipv6_skb_idev(skb),
IPSTATS_MIB_INHDRERRORS); IPSTATS_MIB_INHDRERRORS);
goto drop; goto drop;
} }
pkt_len = ntohl(*(__be32 *)(nh + optoff + 2)); pkt_len = ntohl(*(__be32 *)(nh + optoff + 2));
if (pkt_len <= IPV6_MAXPLEN) { if (pkt_len <= IPV6_MAXPLEN) {
IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), __IP6_INC_STATS(net, ipv6_skb_idev(skb),
IPSTATS_MIB_INHDRERRORS); IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff+2);
return false; return false;
} }
if (ipv6_hdr(skb)->payload_len) { if (ipv6_hdr(skb)->payload_len) {
IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), __IP6_INC_STATS(net, ipv6_skb_idev(skb),
IPSTATS_MIB_INHDRERRORS); IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, optoff);
return false; return false;
} }
if (pkt_len > skb->len - sizeof(struct ipv6hdr)) { if (pkt_len > skb->len - sizeof(struct ipv6hdr)) {
IP6_INC_STATS_BH(net, ipv6_skb_idev(skb), __IP6_INC_STATS(net, ipv6_skb_idev(skb),
IPSTATS_MIB_INTRUNCATEDPKTS); IPSTATS_MIB_INTRUNCATEDPKTS);
goto drop; goto drop;
} }

View file

@ -622,7 +622,7 @@ static void icmpv6_echo_reply(struct sk_buff *skb)
np->dontfrag, &sockc_unused); np->dontfrag, &sockc_unused);
if (err) { if (err) {
ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_OUTERRORS); __ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTERRORS);
ip6_flush_pending_frames(sk); ip6_flush_pending_frames(sk);
} else { } else {
err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr, err = icmpv6_push_pending_frames(sk, &fl6, &tmp_hdr,
@ -674,7 +674,7 @@ void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
return; return;
out: out:
ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS); __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
} }
/* /*
@ -710,7 +710,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
skb_set_network_header(skb, nh); skb_set_network_header(skb, nh);
} }
ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INMSGS); __ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INMSGS);
saddr = &ipv6_hdr(skb)->saddr; saddr = &ipv6_hdr(skb)->saddr;
daddr = &ipv6_hdr(skb)->daddr; daddr = &ipv6_hdr(skb)->daddr;
@ -728,7 +728,7 @@ static int icmpv6_rcv(struct sk_buff *skb)
type = hdr->icmp6_type; type = hdr->icmp6_type;
ICMP6MSGIN_INC_STATS_BH(dev_net(dev), idev, type); ICMP6MSGIN_INC_STATS(dev_net(dev), idev, type);
switch (type) { switch (type) {
case ICMPV6_ECHO_REQUEST: case ICMPV6_ECHO_REQUEST:
@ -812,9 +812,9 @@ static int icmpv6_rcv(struct sk_buff *skb)
return 0; return 0;
csum_error: csum_error:
ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_CSUMERRORS); __ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_CSUMERRORS);
discard_it: discard_it:
ICMP6_INC_STATS_BH(dev_net(dev), idev, ICMP6_MIB_INERRORS); __ICMP6_INC_STATS(dev_net(dev), idev, ICMP6_MIB_INERRORS);
drop_no_count: drop_no_count:
kfree_skb(skb); kfree_skb(skb);
return 0; return 0;

View file

@ -222,7 +222,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
__sk_nulls_add_node_rcu(sk, &head->chain); __sk_nulls_add_node_rcu(sk, &head->chain);
if (tw) { if (tw) {
sk_nulls_del_node_init_rcu((struct sock *)tw); sk_nulls_del_node_init_rcu((struct sock *)tw);
NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED); __NET_INC_STATS(net, LINUX_MIB_TIMEWAITRECYCLED);
} }
spin_unlock(lock); spin_unlock(lock);
sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);

View file

@ -78,11 +78,11 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
idev = __in6_dev_get(skb->dev); idev = __in6_dev_get(skb->dev);
IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_IN, skb->len); __IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_IN, skb->len);
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL || if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
!idev || unlikely(idev->cnf.disable_ipv6)) { !idev || unlikely(idev->cnf.disable_ipv6)) {
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDISCARDS); __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
goto drop; goto drop;
} }
@ -109,10 +109,10 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
if (hdr->version != 6) if (hdr->version != 6)
goto err; goto err;
IP6_ADD_STATS_BH(net, idev, __IP6_ADD_STATS(net, idev,
IPSTATS_MIB_NOECTPKTS + IPSTATS_MIB_NOECTPKTS +
(ipv6_get_dsfield(hdr) & INET_ECN_MASK), (ipv6_get_dsfield(hdr) & INET_ECN_MASK),
max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs)); max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
/* /*
* RFC4291 2.5.3 * RFC4291 2.5.3
* A packet received on an interface with a destination address * A packet received on an interface with a destination address
@ -169,12 +169,12 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
/* pkt_len may be zero if Jumbo payload option is present */ /* pkt_len may be zero if Jumbo payload option is present */
if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) { if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
if (pkt_len + sizeof(struct ipv6hdr) > skb->len) { if (pkt_len + sizeof(struct ipv6hdr) > skb->len) {
IP6_INC_STATS_BH(net, __IP6_INC_STATS(net,
idev, IPSTATS_MIB_INTRUNCATEDPKTS); idev, IPSTATS_MIB_INTRUNCATEDPKTS);
goto drop; goto drop;
} }
if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) { if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) {
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS); __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
goto drop; goto drop;
} }
hdr = ipv6_hdr(skb); hdr = ipv6_hdr(skb);
@ -182,7 +182,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
if (hdr->nexthdr == NEXTHDR_HOP) { if (hdr->nexthdr == NEXTHDR_HOP) {
if (ipv6_parse_hopopts(skb) < 0) { if (ipv6_parse_hopopts(skb) < 0) {
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS); __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
rcu_read_unlock(); rcu_read_unlock();
return NET_RX_DROP; return NET_RX_DROP;
} }
@ -197,7 +197,7 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt
net, NULL, skb, dev, NULL, net, NULL, skb, dev, NULL,
ip6_rcv_finish); ip6_rcv_finish);
err: err:
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS); __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
drop: drop:
rcu_read_unlock(); rcu_read_unlock();
kfree_skb(skb); kfree_skb(skb);
@ -259,18 +259,18 @@ resubmit:
if (ret > 0) if (ret > 0)
goto resubmit; goto resubmit;
else if (ret == 0) else if (ret == 0)
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS);
} else { } else {
if (!raw) { if (!raw) {
if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
IP6_INC_STATS_BH(net, idev, __IP6_INC_STATS(net, idev,
IPSTATS_MIB_INUNKNOWNPROTOS); IPSTATS_MIB_INUNKNOWNPROTOS);
icmpv6_send(skb, ICMPV6_PARAMPROB, icmpv6_send(skb, ICMPV6_PARAMPROB,
ICMPV6_UNK_NEXTHDR, nhoff); ICMPV6_UNK_NEXTHDR, nhoff);
} }
kfree_skb(skb); kfree_skb(skb);
} else { } else {
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS);
consume_skb(skb); consume_skb(skb);
} }
} }
@ -278,7 +278,7 @@ resubmit:
return 0; return 0;
discard: discard:
IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDISCARDS); __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
rcu_read_unlock(); rcu_read_unlock();
kfree_skb(skb); kfree_skb(skb);
return 0; return 0;
@ -297,7 +297,7 @@ int ip6_mc_input(struct sk_buff *skb)
const struct ipv6hdr *hdr; const struct ipv6hdr *hdr;
bool deliver; bool deliver;
IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev), __IP6_UPD_PO_STATS(dev_net(skb_dst(skb)->dev),
ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INMCAST, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INMCAST,
skb->len); skb->len);

View file

@ -395,8 +395,8 @@ int ip6_forward(struct sk_buff *skb)
goto drop; goto drop;
if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) { if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
IP6_INC_STATS_BH(net, ip6_dst_idev(dst), __IP6_INC_STATS(net, ip6_dst_idev(dst),
IPSTATS_MIB_INDISCARDS); IPSTATS_MIB_INDISCARDS);
goto drop; goto drop;
} }
@ -427,8 +427,8 @@ int ip6_forward(struct sk_buff *skb)
/* Force OUTPUT device used as source address */ /* Force OUTPUT device used as source address */
skb->dev = dst->dev; skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0); icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
IP6_INC_STATS_BH(net, ip6_dst_idev(dst), __IP6_INC_STATS(net, ip6_dst_idev(dst),
IPSTATS_MIB_INHDRERRORS); IPSTATS_MIB_INHDRERRORS);
kfree_skb(skb); kfree_skb(skb);
return -ETIMEDOUT; return -ETIMEDOUT;
@ -441,15 +441,15 @@ int ip6_forward(struct sk_buff *skb)
if (proxied > 0) if (proxied > 0)
return ip6_input(skb); return ip6_input(skb);
else if (proxied < 0) { else if (proxied < 0) {
IP6_INC_STATS_BH(net, ip6_dst_idev(dst), __IP6_INC_STATS(net, ip6_dst_idev(dst),
IPSTATS_MIB_INDISCARDS); IPSTATS_MIB_INDISCARDS);
goto drop; goto drop;
} }
} }
if (!xfrm6_route_forward(skb)) { if (!xfrm6_route_forward(skb)) {
IP6_INC_STATS_BH(net, ip6_dst_idev(dst), __IP6_INC_STATS(net, ip6_dst_idev(dst),
IPSTATS_MIB_INDISCARDS); IPSTATS_MIB_INDISCARDS);
goto drop; goto drop;
} }
dst = skb_dst(skb); dst = skb_dst(skb);
@ -505,17 +505,17 @@ int ip6_forward(struct sk_buff *skb)
/* Again, force OUTPUT device used as source address */ /* Again, force OUTPUT device used as source address */
skb->dev = dst->dev; skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu); icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP6_INC_STATS_BH(net, ip6_dst_idev(dst), __IP6_INC_STATS(net, ip6_dst_idev(dst),
IPSTATS_MIB_INTOOBIGERRORS); IPSTATS_MIB_INTOOBIGERRORS);
IP6_INC_STATS_BH(net, ip6_dst_idev(dst), __IP6_INC_STATS(net, ip6_dst_idev(dst),
IPSTATS_MIB_FRAGFAILS); IPSTATS_MIB_FRAGFAILS);
kfree_skb(skb); kfree_skb(skb);
return -EMSGSIZE; return -EMSGSIZE;
} }
if (skb_cow(skb, dst->dev->hard_header_len)) { if (skb_cow(skb, dst->dev->hard_header_len)) {
IP6_INC_STATS_BH(net, ip6_dst_idev(dst), __IP6_INC_STATS(net, ip6_dst_idev(dst),
IPSTATS_MIB_OUTDISCARDS); IPSTATS_MIB_OUTDISCARDS);
goto drop; goto drop;
} }
@ -525,14 +525,14 @@ int ip6_forward(struct sk_buff *skb)
hdr->hop_limit--; hdr->hop_limit--;
IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); __IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len); __IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
net, NULL, skb, skb->dev, dst->dev, net, NULL, skb, skb->dev, dst->dev,
ip6_forward_finish); ip6_forward_finish);
error: error:
IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS); __IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
drop: drop:
kfree_skb(skb); kfree_skb(skb);
return -EINVAL; return -EINVAL;

View file

@ -1984,10 +1984,10 @@ int ip6mr_compat_ioctl(struct sock *sk, unsigned int cmd, void __user *arg)
static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb) static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{ {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUTFORWDATAGRAMS); IPSTATS_MIB_OUTFORWDATAGRAMS);
IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), __IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUTOCTETS, skb->len); IPSTATS_MIB_OUTOCTETS, skb->len);
return dst_output(net, sk, skb); return dst_output(net, sk, skb);
} }

View file

@ -145,12 +145,12 @@ void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
if (!dev) if (!dev)
goto out_rcu_unlock; goto out_rcu_unlock;
IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
if (inet_frag_evicting(&fq->q)) if (inet_frag_evicting(&fq->q))
goto out_rcu_unlock; goto out_rcu_unlock;
IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
/* Don't send error if the first segment did not arrive. */ /* Don't send error if the first segment did not arrive. */
if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !fq->q.fragments) if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !fq->q.fragments)
@ -223,8 +223,8 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1)));
if ((unsigned int)end > IPV6_MAXPLEN) { if ((unsigned int)end > IPV6_MAXPLEN) {
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INHDRERRORS); IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
((u8 *)&fhdr->frag_off - ((u8 *)&fhdr->frag_off -
skb_network_header(skb))); skb_network_header(skb)));
@ -258,8 +258,8 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
/* RFC2460 says always send parameter problem in /* RFC2460 says always send parameter problem in
* this case. -DaveM * this case. -DaveM
*/ */
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INHDRERRORS); IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, icmpv6_param_prob(skb, ICMPV6_HDR_FIELD,
offsetof(struct ipv6hdr, payload_len)); offsetof(struct ipv6hdr, payload_len));
return -1; return -1;
@ -361,8 +361,8 @@ found:
discard_fq: discard_fq:
inet_frag_kill(&fq->q, &ip6_frags); inet_frag_kill(&fq->q, &ip6_frags);
err: err:
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_REASMFAILS); IPSTATS_MIB_REASMFAILS);
kfree_skb(skb); kfree_skb(skb);
return -1; return -1;
} }
@ -500,7 +500,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
skb_network_header_len(head)); skb_network_header_len(head));
rcu_read_lock(); rcu_read_lock();
IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS); __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
rcu_read_unlock(); rcu_read_unlock();
fq->q.fragments = NULL; fq->q.fragments = NULL;
fq->q.fragments_tail = NULL; fq->q.fragments_tail = NULL;
@ -513,7 +513,7 @@ out_oom:
net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n"); net_dbg_ratelimited("ip6_frag_reasm: no memory for reassembly\n");
out_fail: out_fail:
rcu_read_lock(); rcu_read_lock();
IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
rcu_read_unlock(); rcu_read_unlock();
return -1; return -1;
} }
@ -528,7 +528,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED) if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
goto fail_hdr; goto fail_hdr;
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS);
/* Jumbo payload inhibits frag. header */ /* Jumbo payload inhibits frag. header */
if (hdr->payload_len == 0) if (hdr->payload_len == 0)
@ -544,8 +544,8 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
if (!(fhdr->frag_off & htons(0xFFF9))) { if (!(fhdr->frag_off & htons(0xFFF9))) {
/* It is not a fragmented frame */ /* It is not a fragmented frame */
skb->transport_header += sizeof(struct frag_hdr); skb->transport_header += sizeof(struct frag_hdr);
IP6_INC_STATS_BH(net, __IP6_INC_STATS(net,
ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS);
IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb);
IP6CB(skb)->flags |= IP6SKB_FRAGMENTED; IP6CB(skb)->flags |= IP6SKB_FRAGMENTED;
@ -566,13 +566,13 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
return ret; return ret;
} }
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS); __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMFAILS);
kfree_skb(skb); kfree_skb(skb);
return -1; return -1;
fail_hdr: fail_hdr:
IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_INHDRERRORS); IPSTATS_MIB_INHDRERRORS);
icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb)); icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
return -1; return -1;
} }

View file

@ -155,11 +155,11 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie); mss = __cookie_v6_check(ipv6_hdr(skb), th, cookie);
if (mss == 0) { if (mss == 0) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED);
goto out; goto out;
} }
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV);
/* check for timestamp cookie support */ /* check for timestamp cookie support */
memset(&tcp_opt, 0, sizeof(tcp_opt)); memset(&tcp_opt, 0, sizeof(tcp_opt));

View file

@ -336,8 +336,8 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb->dev->ifindex); skb->dev->ifindex);
if (!sk) { if (!sk) {
ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
ICMP6_MIB_INERRORS); ICMP6_MIB_INERRORS);
return; return;
} }
@ -352,13 +352,13 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
bh_lock_sock(sk); bh_lock_sock(sk);
if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG) if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
if (sk->sk_state == TCP_CLOSE) if (sk->sk_state == TCP_CLOSE)
goto out; goto out;
if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) { if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
goto out; goto out;
} }
@ -368,7 +368,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
if (sk->sk_state != TCP_LISTEN && if (sk->sk_state != TCP_LISTEN &&
!between(seq, snd_una, tp->snd_nxt)) { !between(seq, snd_una, tp->snd_nxt)) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out; goto out;
} }
@ -649,12 +649,12 @@ static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
return false; return false;
if (hash_expected && !hash_location) { if (hash_expected && !hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
return true; return true;
} }
if (!hash_expected && hash_location) { if (!hash_expected && hash_location) {
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
return true; return true;
} }
@ -825,9 +825,9 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
if (!IS_ERR(dst)) { if (!IS_ERR(dst)) {
skb_dst_set(buff, dst); skb_dst_set(buff, dst);
ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass); ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS); __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
if (rst) if (rst)
TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS); __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
return; return;
} }
@ -1165,7 +1165,7 @@ static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *
return newsk; return newsk;
out_overflow: out_overflow:
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
out_nonewsk: out_nonewsk:
dst_release(dst); dst_release(dst);
out: out:
@ -1276,8 +1276,8 @@ discard:
kfree_skb(skb); kfree_skb(skb);
return 0; return 0;
csum_err: csum_err:
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS); __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS); __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
goto discard; goto discard;
@ -1359,7 +1359,7 @@ static int tcp_v6_rcv(struct sk_buff *skb)
/* /*
* Count it even if it's bad. * Count it even if it's bad.
*/ */
TCP_INC_STATS_BH(net, TCP_MIB_INSEGS); __TCP_INC_STATS(net, TCP_MIB_INSEGS);
if (!pskb_may_pull(skb, sizeof(struct tcphdr))) if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
goto discard_it; goto discard_it;
@ -1421,7 +1421,7 @@ process:
} }
} }
if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) { if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
goto discard_and_relse; goto discard_and_relse;
} }
@ -1454,7 +1454,7 @@ process:
} else if (unlikely(sk_add_backlog(sk, skb, } else if (unlikely(sk_add_backlog(sk, skb,
sk->sk_rcvbuf + sk->sk_sndbuf))) { sk->sk_rcvbuf + sk->sk_sndbuf))) {
bh_unlock_sock(sk); bh_unlock_sock(sk);
NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP); __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
goto discard_and_relse; goto discard_and_relse;
} }
bh_unlock_sock(sk); bh_unlock_sock(sk);
@ -1472,9 +1472,9 @@ no_tcp_socket:
if (tcp_checksum_complete(skb)) { if (tcp_checksum_complete(skb)) {
csum_error: csum_error:
TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS); __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
bad_packet: bad_packet:
TCP_INC_STATS_BH(net, TCP_MIB_INERRS); __TCP_INC_STATS(net, TCP_MIB_INERRS);
} else { } else {
tcp_v6_send_reset(NULL, skb); tcp_v6_send_reset(NULL, skb);
} }

View file

@ -423,24 +423,22 @@ try_again:
if (!peeked) { if (!peeked) {
atomic_inc(&sk->sk_drops); atomic_inc(&sk->sk_drops);
if (is_udp4) if (is_udp4)
UDP_INC_STATS_USER(sock_net(sk), UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
UDP_MIB_INERRORS, is_udplite);
is_udplite);
else else
UDP6_INC_STATS_USER(sock_net(sk), UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS,
UDP_MIB_INERRORS, is_udplite);
is_udplite);
} }
skb_free_datagram_locked(sk, skb); skb_free_datagram_locked(sk, skb);
return err; return err;
} }
if (!peeked) { if (!peeked) {
if (is_udp4) if (is_udp4)
UDP_INC_STATS_USER(sock_net(sk), UDP_INC_STATS(sock_net(sk), UDP_MIB_INDATAGRAMS,
UDP_MIB_INDATAGRAMS, is_udplite); is_udplite);
else else
UDP6_INC_STATS_USER(sock_net(sk), UDP6_INC_STATS(sock_net(sk), UDP_MIB_INDATAGRAMS,
UDP_MIB_INDATAGRAMS, is_udplite); is_udplite);
} }
sock_recv_ts_and_drops(msg, sk, skb); sock_recv_ts_and_drops(msg, sk, skb);
@ -487,15 +485,15 @@ csum_copy_err:
slow = lock_sock_fast(sk); slow = lock_sock_fast(sk);
if (!skb_kill_datagram(sk, skb, flags)) { if (!skb_kill_datagram(sk, skb, flags)) {
if (is_udp4) { if (is_udp4) {
UDP_INC_STATS_USER(sock_net(sk), UDP_INC_STATS(sock_net(sk),
UDP_MIB_CSUMERRORS, is_udplite); UDP_MIB_CSUMERRORS, is_udplite);
UDP_INC_STATS_USER(sock_net(sk), UDP_INC_STATS(sock_net(sk),
UDP_MIB_INERRORS, is_udplite); UDP_MIB_INERRORS, is_udplite);
} else { } else {
UDP6_INC_STATS_USER(sock_net(sk), UDP6_INC_STATS(sock_net(sk),
UDP_MIB_CSUMERRORS, is_udplite); UDP_MIB_CSUMERRORS, is_udplite);
UDP6_INC_STATS_USER(sock_net(sk), UDP6_INC_STATS(sock_net(sk),
UDP_MIB_INERRORS, is_udplite); UDP_MIB_INERRORS, is_udplite);
} }
} }
unlock_sock_fast(sk, slow); unlock_sock_fast(sk, slow);
@ -523,8 +521,8 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source, sk = __udp6_lib_lookup(net, daddr, uh->dest, saddr, uh->source,
inet6_iif(skb), udptable, skb); inet6_iif(skb), udptable, skb);
if (!sk) { if (!sk) {
ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
ICMP6_MIB_INERRORS); ICMP6_MIB_INERRORS);
return; return;
} }
@ -572,9 +570,9 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
/* Note that an ENOMEM error is charged twice */ /* Note that an ENOMEM error is charged twice */
if (rc == -ENOMEM) if (rc == -ENOMEM)
UDP6_INC_STATS_BH(sock_net(sk), __UDP6_INC_STATS(sock_net(sk),
UDP_MIB_RCVBUFERRORS, is_udplite); UDP_MIB_RCVBUFERRORS, is_udplite);
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
kfree_skb(skb); kfree_skb(skb);
return -1; return -1;
} }
@ -630,9 +628,9 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
ret = encap_rcv(sk, skb); ret = encap_rcv(sk, skb);
if (ret <= 0) { if (ret <= 0) {
UDP_INC_STATS_BH(sock_net(sk), __UDP_INC_STATS(sock_net(sk),
UDP_MIB_INDATAGRAMS, UDP_MIB_INDATAGRAMS,
is_udplite); is_udplite);
return -ret; return -ret;
} }
} }
@ -666,8 +664,8 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
udp_csum_pull_header(skb); udp_csum_pull_header(skb);
if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
UDP6_INC_STATS_BH(sock_net(sk), __UDP6_INC_STATS(sock_net(sk),
UDP_MIB_RCVBUFERRORS, is_udplite); UDP_MIB_RCVBUFERRORS, is_udplite);
goto drop; goto drop;
} }
@ -686,9 +684,9 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
return rc; return rc;
csum_error: csum_error:
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); __UDP6_INC_STATS(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
drop: drop:
UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite); __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
atomic_inc(&sk->sk_drops); atomic_inc(&sk->sk_drops);
kfree_skb(skb); kfree_skb(skb);
return -1; return -1;
@ -771,10 +769,10 @@ start_lookup:
nskb = skb_clone(skb, GFP_ATOMIC); nskb = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!nskb)) { if (unlikely(!nskb)) {
atomic_inc(&sk->sk_drops); atomic_inc(&sk->sk_drops);
UDP6_INC_STATS_BH(net, UDP_MIB_RCVBUFERRORS, __UDP6_INC_STATS(net, UDP_MIB_RCVBUFERRORS,
IS_UDPLITE(sk)); IS_UDPLITE(sk));
UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, __UDP6_INC_STATS(net, UDP_MIB_INERRORS,
IS_UDPLITE(sk)); IS_UDPLITE(sk));
continue; continue;
} }
@ -793,8 +791,8 @@ start_lookup:
consume_skb(skb); consume_skb(skb);
} else { } else {
kfree_skb(skb); kfree_skb(skb);
UDP6_INC_STATS_BH(net, UDP_MIB_IGNOREDMULTI, __UDP6_INC_STATS(net, UDP_MIB_IGNOREDMULTI,
proto == IPPROTO_UDPLITE); proto == IPPROTO_UDPLITE);
} }
return 0; return 0;
} }
@ -887,7 +885,7 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (udp_lib_checksum_complete(skb)) if (udp_lib_checksum_complete(skb))
goto csum_error; goto csum_error;
UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
kfree_skb(skb); kfree_skb(skb);
@ -901,9 +899,9 @@ short_packet:
daddr, ntohs(uh->dest)); daddr, ntohs(uh->dest));
goto discard; goto discard;
csum_error: csum_error:
UDP6_INC_STATS_BH(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE);
discard: discard:
UDP6_INC_STATS_BH(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE);
kfree_skb(skb); kfree_skb(skb);
return 0; return 0;
} }
@ -1015,13 +1013,14 @@ send:
err = ip6_send_skb(skb); err = ip6_send_skb(skb);
if (err) { if (err) {
if (err == -ENOBUFS && !inet6_sk(sk)->recverr) { if (err == -ENOBUFS && !inet6_sk(sk)->recverr) {
UDP6_INC_STATS_USER(sock_net(sk), UDP6_INC_STATS(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite); UDP_MIB_SNDBUFERRORS, is_udplite);
err = 0; err = 0;
} }
} else } else {
UDP6_INC_STATS_USER(sock_net(sk), UDP6_INC_STATS(sock_net(sk),
UDP_MIB_OUTDATAGRAMS, is_udplite); UDP_MIB_OUTDATAGRAMS, is_udplite);
}
return err; return err;
} }
@ -1342,8 +1341,8 @@ out:
* seems like overkill. * seems like overkill.
*/ */
if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) { if (err == -ENOBUFS || test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
UDP6_INC_STATS_USER(sock_net(sk), UDP6_INC_STATS(sock_net(sk),
UDP_MIB_SNDBUFERRORS, is_udplite); UDP_MIB_SNDBUFERRORS, is_udplite);
} }
return err; return err;

View file

@ -698,12 +698,12 @@ void rxrpc_data_ready(struct sock *sk)
if (skb_checksum_complete(skb)) { if (skb_checksum_complete(skb)) {
rxrpc_free_skb(skb); rxrpc_free_skb(skb);
rxrpc_put_local(local); rxrpc_put_local(local);
UDP_INC_STATS_BH(&init_net, UDP_MIB_INERRORS, 0); __UDP_INC_STATS(&init_net, UDP_MIB_INERRORS, 0);
_leave(" [CSUM failed]"); _leave(" [CSUM failed]");
return; return;
} }
UDP_INC_STATS_BH(&init_net, UDP_MIB_INDATAGRAMS, 0); __UDP_INC_STATS(&init_net, UDP_MIB_INDATAGRAMS, 0);
/* The socket buffer we have is owned by UDP, with UDP's data all over /* The socket buffer we have is owned by UDP, with UDP's data all over
* it, but we really want our own data there. * it, but we really want our own data there.

View file

@ -239,7 +239,7 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
offset = 0; offset = 0;
if ((whole > 1) || (whole && over)) if ((whole > 1) || (whole && over))
SCTP_INC_STATS_USER(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS); SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS);
/* Create chunks for all the full sized DATA chunks. */ /* Create chunks for all the full sized DATA chunks. */
for (i = 0, len = first_len; i < whole; i++) { for (i = 0, len = first_len; i < whole; i++) {

View file

@ -84,7 +84,7 @@ static inline int sctp_rcv_checksum(struct net *net, struct sk_buff *skb)
if (val != cmp) { if (val != cmp) {
/* CRC failure, dump it. */ /* CRC failure, dump it. */
SCTP_INC_STATS_BH(net, SCTP_MIB_CHECKSUMERRORS); __SCTP_INC_STATS(net, SCTP_MIB_CHECKSUMERRORS);
return -1; return -1;
} }
return 0; return 0;
@ -122,7 +122,7 @@ int sctp_rcv(struct sk_buff *skb)
if (skb->pkt_type != PACKET_HOST) if (skb->pkt_type != PACKET_HOST)
goto discard_it; goto discard_it;
SCTP_INC_STATS_BH(net, SCTP_MIB_INSCTPPACKS); __SCTP_INC_STATS(net, SCTP_MIB_INSCTPPACKS);
if (skb_linearize(skb)) if (skb_linearize(skb))
goto discard_it; goto discard_it;
@ -208,7 +208,7 @@ int sctp_rcv(struct sk_buff *skb)
*/ */
if (!asoc) { if (!asoc) {
if (sctp_rcv_ootb(skb)) { if (sctp_rcv_ootb(skb)) {
SCTP_INC_STATS_BH(net, SCTP_MIB_OUTOFBLUES); __SCTP_INC_STATS(net, SCTP_MIB_OUTOFBLUES);
goto discard_release; goto discard_release;
} }
} }
@ -264,9 +264,9 @@ int sctp_rcv(struct sk_buff *skb)
skb = NULL; /* sctp_chunk_free already freed the skb */ skb = NULL; /* sctp_chunk_free already freed the skb */
goto discard_release; goto discard_release;
} }
SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_BACKLOG); __SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_BACKLOG);
} else { } else {
SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_SOFTIRQ); __SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_SOFTIRQ);
sctp_inq_push(&chunk->rcvr->inqueue, chunk); sctp_inq_push(&chunk->rcvr->inqueue, chunk);
} }
@ -281,7 +281,7 @@ int sctp_rcv(struct sk_buff *skb)
return 0; return 0;
discard_it: discard_it:
SCTP_INC_STATS_BH(net, SCTP_MIB_IN_PKT_DISCARDS); __SCTP_INC_STATS(net, SCTP_MIB_IN_PKT_DISCARDS);
kfree_skb(skb); kfree_skb(skb);
return 0; return 0;
@ -532,7 +532,7 @@ struct sock *sctp_err_lookup(struct net *net, int family, struct sk_buff *skb,
* servers this needs to be solved differently. * servers this needs to be solved differently.
*/ */
if (sock_owned_by_user(sk)) if (sock_owned_by_user(sk))
NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS); __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
*app = asoc; *app = asoc;
*tpp = transport; *tpp = transport;
@ -589,7 +589,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
skb->network_header = saveip; skb->network_header = saveip;
skb->transport_header = savesctp; skb->transport_header = savesctp;
if (!sk) { if (!sk) {
ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS); __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
return; return;
} }
/* Warning: The sock lock is held. Remember to call /* Warning: The sock lock is held. Remember to call

View file

@ -162,7 +162,7 @@ static void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
skb->network_header = saveip; skb->network_header = saveip;
skb->transport_header = savesctp; skb->transport_header = savesctp;
if (!sk) { if (!sk) {
ICMP6_INC_STATS_BH(net, idev, ICMP6_MIB_INERRORS); __ICMP6_INC_STATS(net, idev, ICMP6_MIB_INERRORS);
goto out; goto out;
} }

View file

@ -1018,11 +1018,11 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
/* Suck it into the iovec, verify checksum if not done by hw. */ /* Suck it into the iovec, verify checksum if not done by hw. */
if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) { if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
UDPX_INC_STATS_BH(sk, UDP_MIB_INERRORS); __UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
goto out_unlock; goto out_unlock;
} }
UDPX_INC_STATS_BH(sk, UDP_MIB_INDATAGRAMS); __UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
xprt_adjust_cwnd(xprt, task, copied); xprt_adjust_cwnd(xprt, task, copied);
xprt_complete_rqst(task, copied); xprt_complete_rqst(task, copied);