Merge branch 'net-refcount_t'

Elena Reshetova says:

====================
v3 net generic subsystem refcount conversions

Changes in v3:
Rebased on top of the net-next tree.

Changes in v2:
No changes in patches apart from rebases, but now by
default refcount_t = atomic_t (*) and uses all atomic standard operations
unless CONFIG_REFCOUNT_FULL is enabled. This is a compromise for the
systems that are critical on performance (such as net) and cannot accept even
slight delay on the refcounter operations.

This series, for core network subsystem components, replaces atomic_t reference
counters with the new refcount_t type and API (see include/linux/refcount.h).
By doing this we prevent intentional or accidental
underflows or overflows that can led to use-after-free vulnerabilities.
These patches contain only generic net pieces. Other changes will be sent separately.

The patches are fully independent and can be cherry-picked separately.
The big patches, such as conversions for sock structure, need a very detailed
look from maintainers: refcount managing is quite complex in them and while
it seems that they would benefit from the change, extra checking is needed.
The biggest corner issue is the fact that refcount_inc() does not increment
from zero.

If there are no objections to the patches, please merge them via respective trees.

* The respective change is currently merged into -next as
  "locking/refcount: Create unchecked atomic_t implementation".
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2017-07-01 07:39:09 -07:00
commit 0b58e6db0f
105 changed files with 321 additions and 322 deletions

View file

@ -877,7 +877,7 @@ static void aead_sock_destruct(struct sock *sk)
unsigned int ivlen = crypto_aead_ivsize(
crypto_aead_reqtfm(&ctx->aead_req));
WARN_ON(atomic_read(&sk->sk_refcnt) != 0);
WARN_ON(refcount_read(&sk->sk_refcnt) != 0);
aead_put_sgl(sk);
sock_kzfree_s(sk, ctx->iv, ivlen);
sock_kfree_s(sk, ctx, ctx->len);

View file

@ -924,12 +924,7 @@ fore200e_tx_irq(struct fore200e* fore200e)
else {
dev_kfree_skb_any(entry->skb);
}
#if 1
/* race fixed by the above incarnation mechanism, but... */
if (atomic_read(&sk_atm(vcc)->sk_wmem_alloc) < 0) {
atomic_set(&sk_atm(vcc)->sk_wmem_alloc, 0);
}
#endif
/* check error condition */
if (*entry->status & STATUS_ERROR)
atomic_inc(&vcc->stats->tx_err);
@ -1130,13 +1125,9 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
return -ENOMEM;
}
ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
vcc->push(vcc, skb);
atomic_inc(&vcc->stats->rx);
ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
return 0;
}
@ -1572,7 +1563,6 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
unsigned long flags;
ASSERT(vcc);
ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
ASSERT(fore200e);
ASSERT(fore200e_vcc);

View file

@ -2395,7 +2395,7 @@ he_close(struct atm_vcc *vcc)
* TBRQ, the host issues the close command to the adapter.
*/
while (((tx_inuse = atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
while (((tx_inuse = refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) > 1) &&
(retry < MAX_RETRY)) {
msleep(sleep);
if (sleep < 250)

View file

@ -724,7 +724,7 @@ push_on_scq(struct idt77252_dev *card, struct vc_map *vc, struct sk_buff *skb)
struct sock *sk = sk_atm(vcc);
vc->estimator->cells += (skb->len + 47) / 48;
if (atomic_read(&sk->sk_wmem_alloc) >
if (refcount_read(&sk->sk_wmem_alloc) >
(sk->sk_sndbuf >> 1)) {
u32 cps = vc->estimator->maxcps;
@ -2009,7 +2009,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
atomic_inc(&vcc->stats->tx_err);
return -ENOMEM;
}
atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
skb_put_data(skb, cell, 52);

View file

@ -742,7 +742,7 @@ int schedule_nes_timer(struct nes_cm_node *cm_node, struct sk_buff *skb,
if (type == NES_TIMER_TYPE_SEND) {
new_send->seq_num = ntohl(tcp_hdr(skb)->seq);
atomic_inc(&new_send->skb->users);
refcount_inc(&new_send->skb->users);
spin_lock_irqsave(&cm_node->retrans_list_lock, flags);
cm_node->send_entry = new_send;
add_ref_cm_node(cm_node);
@ -924,7 +924,7 @@ static void nes_cm_timer_tick(unsigned long pass)
flags);
break;
}
atomic_inc(&send_entry->skb->users);
refcount_inc(&send_entry->skb->users);
cm_packets_retrans++;
nes_debug(NES_DBG_CM, "Retransmitting send_entry %p "
"for node %p, jiffies = %lu, time to send = "

View file

@ -155,7 +155,7 @@ mISDN_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
copied = skb->len + MISDN_HEADER_LEN;
if (len < copied) {
if (flags & MSG_PEEK)
atomic_dec(&skb->users);
refcount_dec(&skb->users);
else
skb_queue_head(&sk->sk_receive_queue, skb);
return -ENOSPC;

View file

@ -201,7 +201,7 @@ static int rionet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
rionet_queue_tx_msg(skb, ndev,
nets[rnet->mport->id].active[i]);
if (count)
atomic_inc(&skb->users);
refcount_inc(&skb->users);
count++;
}
} else if (RIONET_MAC_MATCH(eth->h_dest)) {

View file

@ -483,7 +483,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
spin_unlock_irqrestore(&ch->collect_lock, saveflags);
return -EBUSY;
} else {
atomic_inc(&skb->users);
refcount_inc(&skb->users);
header.length = l;
header.type = be16_to_cpu(skb->protocol);
header.unused = 0;
@ -500,7 +500,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
* Protect skb against beeing free'd by upper
* layers.
*/
atomic_inc(&skb->users);
refcount_inc(&skb->users);
ch->prof.txlen += skb->len;
header.length = skb->len + LL_HEADER_LENGTH;
header.type = be16_to_cpu(skb->protocol);
@ -517,14 +517,14 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
if (hi) {
nskb = alloc_skb(skb->len, GFP_ATOMIC | GFP_DMA);
if (!nskb) {
atomic_dec(&skb->users);
refcount_dec(&skb->users);
skb_pull(skb, LL_HEADER_LENGTH + 2);
ctcm_clear_busy(ch->netdev);
return -ENOMEM;
} else {
skb_put_data(nskb, skb->data, skb->len);
atomic_inc(&nskb->users);
atomic_dec(&skb->users);
refcount_inc(&nskb->users);
refcount_dec(&skb->users);
dev_kfree_skb_irq(skb);
skb = nskb;
}
@ -542,7 +542,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
* Remove our header. It gets added
* again on retransmit.
*/
atomic_dec(&skb->users);
refcount_dec(&skb->users);
skb_pull(skb, LL_HEADER_LENGTH + 2);
ctcm_clear_busy(ch->netdev);
return -ENOMEM;
@ -553,7 +553,7 @@ static int ctcm_transmit_skb(struct channel *ch, struct sk_buff *skb)
ch->ccw[1].count = skb->len;
skb_copy_from_linear_data(skb,
skb_put(ch->trans_skb, skb->len), skb->len);
atomic_dec(&skb->users);
refcount_dec(&skb->users);
dev_kfree_skb_irq(skb);
ccw_idx = 0;
} else {
@ -679,7 +679,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
if ((fsm_getstate(ch->fsm) != CTC_STATE_TXIDLE) || grp->in_sweep) {
spin_lock_irqsave(&ch->collect_lock, saveflags);
atomic_inc(&skb->users);
refcount_inc(&skb->users);
p_header = kmalloc(PDU_HEADER_LENGTH, gfp_type());
if (!p_header) {
@ -716,7 +716,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
* Protect skb against beeing free'd by upper
* layers.
*/
atomic_inc(&skb->users);
refcount_inc(&skb->users);
/*
* IDAL support in CTCM is broken, so we have to
@ -729,8 +729,8 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
goto nomem_exit;
} else {
skb_put_data(nskb, skb->data, skb->len);
atomic_inc(&nskb->users);
atomic_dec(&skb->users);
refcount_inc(&nskb->users);
refcount_dec(&skb->users);
dev_kfree_skb_irq(skb);
skb = nskb;
}
@ -810,7 +810,7 @@ static int ctcmpc_transmit_skb(struct channel *ch, struct sk_buff *skb)
ch->trans_skb->len = 0;
ch->ccw[1].count = skb->len;
skb_put_data(ch->trans_skb, skb->data, skb->len);
atomic_dec(&skb->users);
refcount_dec(&skb->users);
dev_kfree_skb_irq(skb);
ccw_idx = 0;
CTCM_PR_DBGDATA("%s(%s): trans_skb len: %04x\n"
@ -855,7 +855,7 @@ nomem_exit:
"%s(%s): MEMORY allocation ERROR\n",
CTCM_FUNTAIL, ch->id);
rc = -ENOMEM;
atomic_dec(&skb->users);
refcount_dec(&skb->users);
dev_kfree_skb_any(skb);
fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
done:

View file

@ -743,7 +743,7 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
conn->prof.tx_pending--;
if (single_flag) {
if ((skb = skb_dequeue(&conn->commit_queue))) {
atomic_dec(&skb->users);
refcount_dec(&skb->users);
if (privptr) {
privptr->stats.tx_packets++;
privptr->stats.tx_bytes +=
@ -766,7 +766,7 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
txbytes += skb->len;
txpackets++;
stat_maxcq++;
atomic_dec(&skb->users);
refcount_dec(&skb->users);
dev_kfree_skb_any(skb);
}
if (conn->collect_len > conn->prof.maxmulti)
@ -958,7 +958,7 @@ static void netiucv_purge_skb_queue(struct sk_buff_head *q)
struct sk_buff *skb;
while ((skb = skb_dequeue(q))) {
atomic_dec(&skb->users);
refcount_dec(&skb->users);
dev_kfree_skb_any(skb);
}
}
@ -1176,7 +1176,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
IUCV_DBF_TEXT(data, 2,
"EBUSY from netiucv_transmit_skb\n");
} else {
atomic_inc(&skb->users);
refcount_inc(&skb->users);
skb_queue_tail(&conn->collect_queue, skb);
conn->collect_len += l;
rc = 0;
@ -1245,7 +1245,7 @@ static int netiucv_transmit_skb(struct iucv_connection *conn,
} else {
if (copied)
dev_kfree_skb(skb);
atomic_inc(&nskb->users);
refcount_inc(&nskb->users);
skb_queue_tail(&conn->commit_queue, nskb);
}
}

View file

@ -1242,7 +1242,7 @@ static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf)
iucv->sk_txnotify(skb, TX_NOTIFY_GENERALERROR);
}
}
atomic_dec(&skb->users);
refcount_dec(&skb->users);
dev_kfree_skb_any(skb);
skb = skb_dequeue(&buf->skb_list);
}
@ -3975,7 +3975,7 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue,
int flush_cnt = 0, hdr_len, large_send = 0;
buffer = buf->buffer;
atomic_inc(&skb->users);
refcount_inc(&skb->users);
skb_queue_tail(&buf->skb_list, skb);
/*check first on TSO ....*/

View file

@ -254,7 +254,7 @@ static inline void atm_return(struct atm_vcc *vcc,int truesize)
static inline int atm_may_send(struct atm_vcc *vcc,unsigned int size)
{
return (size + atomic_read(&sk_atm(vcc)->sk_wmem_alloc)) <
return (size + refcount_read(&sk_atm(vcc)->sk_wmem_alloc)) <
sk_atm(vcc)->sk_sndbuf;
}

View file

@ -18,6 +18,7 @@
#include <linux/skbuff.h>
#include <linux/timer.h>
#include <linux/in.h>
#include <linux/refcount.h>
#include <uapi/linux/igmp.h>
static inline struct igmphdr *igmp_hdr(const struct sk_buff *skb)
@ -84,7 +85,7 @@ struct ip_mc_list {
struct ip_mc_list __rcu *next_hash;
struct timer_list timer;
int users;
atomic_t refcnt;
refcount_t refcnt;
spinlock_t lock;
char tm_running;
char reporter;

View file

@ -11,6 +11,7 @@
#include <linux/timer.h>
#include <linux/sysctl.h>
#include <linux/rtnetlink.h>
#include <linux/refcount.h>
struct ipv4_devconf {
void *sysctl;
@ -22,7 +23,7 @@ struct ipv4_devconf {
struct in_device {
struct net_device *dev;
atomic_t refcnt;
refcount_t refcnt;
int dead;
struct in_ifaddr *ifa_list; /* IP ifaddr chain */
@ -219,7 +220,7 @@ static inline struct in_device *in_dev_get(const struct net_device *dev)
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
if (in_dev)
atomic_inc(&in_dev->refcnt);
refcount_inc(&in_dev->refcnt);
rcu_read_unlock();
return in_dev;
}
@ -240,12 +241,12 @@ void in_dev_finish_destroy(struct in_device *idev);
static inline void in_dev_put(struct in_device *idev)
{
if (atomic_dec_and_test(&idev->refcnt))
if (refcount_dec_and_test(&idev->refcnt))
in_dev_finish_destroy(idev);
}
#define __in_dev_put(idev) atomic_dec(&(idev)->refcnt)
#define in_dev_hold(idev) atomic_inc(&(idev)->refcnt)
#define __in_dev_put(idev) refcount_dec(&(idev)->refcnt)
#define in_dev_hold(idev) refcount_inc(&(idev)->refcnt)
#endif /* __KERNEL__ */

View file

@ -11,6 +11,7 @@
#include <linux/interrupt.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
#include <linux/refcount.h>
union inet_addr {
__u32 all[4];
@ -34,7 +35,7 @@ struct netpoll {
};
struct netpoll_info {
atomic_t refcnt;
refcount_t refcnt;
struct semaphore dev_lock;

View file

@ -252,7 +252,7 @@ struct nf_conntrack {
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
struct nf_bridge_info {
atomic_t use;
refcount_t use;
enum {
BRNF_PROTO_UNCHANGED,
BRNF_PROTO_8021Q,
@ -761,7 +761,7 @@ struct sk_buff {
unsigned char *head,
*data;
unsigned int truesize;
atomic_t users;
refcount_t users;
};
#ifdef __KERNEL__
@ -872,9 +872,9 @@ static inline bool skb_unref(struct sk_buff *skb)
{
if (unlikely(!skb))
return false;
if (likely(atomic_read(&skb->users) == 1))
if (likely(refcount_read(&skb->users) == 1))
smp_rmb();
else if (likely(!atomic_dec_and_test(&skb->users)))
else if (likely(!refcount_dec_and_test(&skb->users)))
return false;
return true;
@ -915,7 +915,7 @@ struct sk_buff_fclones {
struct sk_buff skb2;
atomic_t fclone_ref;
refcount_t fclone_ref;
};
/**
@ -935,7 +935,7 @@ static inline bool skb_fclone_busy(const struct sock *sk,
fclones = container_of(skb, struct sk_buff_fclones, skb1);
return skb->fclone == SKB_FCLONE_ORIG &&
atomic_read(&fclones->fclone_ref) > 1 &&
refcount_read(&fclones->fclone_ref) > 1 &&
fclones->skb2.sk == sk;
}
@ -1283,7 +1283,7 @@ static inline struct sk_buff *skb_queue_prev(const struct sk_buff_head *list,
*/
static inline struct sk_buff *skb_get(struct sk_buff *skb)
{
atomic_inc(&skb->users);
refcount_inc(&skb->users);
return skb;
}
@ -1384,7 +1384,7 @@ static inline void __skb_header_release(struct sk_buff *skb)
*/
static inline int skb_shared(const struct sk_buff *skb)
{
return atomic_read(&skb->users) != 1;
return refcount_read(&skb->users) != 1;
}
/**
@ -3589,13 +3589,13 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct)
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
static inline void nf_bridge_put(struct nf_bridge_info *nf_bridge)
{
if (nf_bridge && atomic_dec_and_test(&nf_bridge->use))
if (nf_bridge && refcount_dec_and_test(&nf_bridge->use))
kfree(nf_bridge);
}
static inline void nf_bridge_get(struct nf_bridge_info *nf_bridge)
{
if (nf_bridge)
atomic_inc(&nf_bridge->use);
refcount_inc(&nf_bridge->use);
}
#endif /* CONFIG_BRIDGE_NETFILTER */
static inline void nf_reset(struct sk_buff *skb)

View file

@ -4,6 +4,7 @@
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/mutex.h>
#include <linux/refcount.h>
#include <net/sock.h>
void unix_inflight(struct user_struct *user, struct file *fp);
@ -21,7 +22,7 @@ extern spinlock_t unix_table_lock;
extern struct hlist_head unix_socket_table[2 * UNIX_HASH_SIZE];
struct unix_address {
atomic_t refcnt;
refcount_t refcnt;
int len;
unsigned int hash;
struct sockaddr_un name[0];

View file

@ -28,7 +28,7 @@ static inline struct neighbour *__ipv4_neigh_lookup(struct net_device *dev, u32
rcu_read_lock_bh();
n = __ipv4_neigh_lookup_noref(dev, key);
if (n && !atomic_inc_not_zero(&n->refcnt))
if (n && !refcount_inc_not_zero(&n->refcnt))
n = NULL;
rcu_read_unlock_bh();

View file

@ -5,6 +5,7 @@
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/fib_rules.h>
#include <linux/refcount.h>
#include <net/flow.h>
#include <net/rtnetlink.h>
@ -29,7 +30,7 @@ struct fib_rule {
struct fib_rule __rcu *ctarget;
struct net *fr_net;
atomic_t refcnt;
refcount_t refcnt;
u32 pref;
int suppress_ifgroup;
int suppress_prefixlen;
@ -103,12 +104,12 @@ struct fib_rules_ops {
static inline void fib_rule_get(struct fib_rule *rule)
{
atomic_inc(&rule->refcnt);
refcount_inc(&rule->refcnt);
}
static inline void fib_rule_put(struct fib_rule *rule)
{
if (atomic_dec_and_test(&rule->refcnt))
if (refcount_dec_and_test(&rule->refcnt))
kfree_rcu(rule, rcu);
}

View file

@ -50,7 +50,7 @@ struct inet_frag_queue {
spinlock_t lock;
struct timer_list timer;
struct hlist_node list;
atomic_t refcnt;
refcount_t refcnt;
struct sk_buff *fragments;
struct sk_buff *fragments_tail;
ktime_t stamp;
@ -129,7 +129,7 @@ void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
{
if (atomic_dec_and_test(&q->refcnt))
if (refcount_dec_and_test(&q->refcnt))
inet_frag_destroy(q, f);
}

View file

@ -32,7 +32,7 @@
#include <net/tcp_states.h>
#include <net/netns/hash.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
#include <asm/byteorder.h>
/* This is for all connections with a full identity, no wildcards.
@ -334,7 +334,7 @@ static inline struct sock *inet_lookup(struct net *net,
sk = __inet_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
dport, dif, &refcounted);
if (sk && !refcounted && !atomic_inc_not_zero(&sk->sk_refcnt))
if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
sk = NULL;
return sk;
}

View file

@ -46,7 +46,7 @@ struct inet_peer {
struct rcu_head gc_rcu;
};
/*
* Once inet_peer is queued for deletion (refcnt == -1), following field
* Once inet_peer is queued for deletion (refcnt == 0), following field
* is not available: rid
* We can share memory with rcu_head to help keep inet_peer small.
*/
@ -60,7 +60,7 @@ struct inet_peer {
/* following fields might be frequently dirtied */
__u32 dtime; /* the time of last use of not referenced entries */
atomic_t refcnt;
refcount_t refcnt;
};
struct inet_peer_base {

View file

@ -384,7 +384,7 @@ static inline struct neighbour *__ipv6_neigh_lookup(struct net_device *dev, cons
rcu_read_lock_bh();
n = __ipv6_neigh_lookup_noref(dev, pkey);
if (n && !atomic_inc_not_zero(&n->refcnt))
if (n && !refcount_inc_not_zero(&n->refcnt))
n = NULL;
rcu_read_unlock_bh();

View file

@ -17,6 +17,7 @@
*/
#include <linux/atomic.h>
#include <linux/refcount.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/rcupdate.h>
@ -76,7 +77,7 @@ struct neigh_parms {
void *sysctl_table;
int dead;
atomic_t refcnt;
refcount_t refcnt;
struct rcu_head rcu_head;
int reachable_time;
@ -137,7 +138,7 @@ struct neighbour {
unsigned long confirmed;
unsigned long updated;
rwlock_t lock;
atomic_t refcnt;
refcount_t refcnt;
struct sk_buff_head arp_queue;
unsigned int arp_queue_len_bytes;
struct timer_list timer;
@ -395,12 +396,12 @@ void neigh_sysctl_unregister(struct neigh_parms *p);
static inline void __neigh_parms_put(struct neigh_parms *parms)
{
atomic_dec(&parms->refcnt);
refcount_dec(&parms->refcnt);
}
static inline struct neigh_parms *neigh_parms_clone(struct neigh_parms *parms)
{
atomic_inc(&parms->refcnt);
refcount_inc(&parms->refcnt);
return parms;
}
@ -410,18 +411,18 @@ static inline struct neigh_parms *neigh_parms_clone(struct neigh_parms *parms)
static inline void neigh_release(struct neighbour *neigh)
{
if (atomic_dec_and_test(&neigh->refcnt))
if (refcount_dec_and_test(&neigh->refcnt))
neigh_destroy(neigh);
}
static inline struct neighbour * neigh_clone(struct neighbour *neigh)
{
if (neigh)
atomic_inc(&neigh->refcnt);
refcount_inc(&neigh->refcnt);
return neigh;
}
#define neigh_hold(n) atomic_inc(&(n)->refcnt)
#define neigh_hold(n) refcount_inc(&(n)->refcnt)
static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
{

View file

@ -5,6 +5,7 @@
#define __NET_NET_NAMESPACE_H
#include <linux/atomic.h>
#include <linux/refcount.h>
#include <linux/workqueue.h>
#include <linux/list.h>
#include <linux/sysctl.h>
@ -46,7 +47,7 @@ struct netns_ipvs;
#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
struct net {
atomic_t passive; /* To decided when the network
refcount_t passive; /* To decided when the network
* namespace should be freed.
*/
atomic_t count; /* To decided when the network

View file

@ -8,7 +8,7 @@ static inline struct nf_bridge_info *nf_bridge_alloc(struct sk_buff *skb)
skb->nf_bridge = kzalloc(sizeof(struct nf_bridge_info), GFP_ATOMIC);
if (likely(skb->nf_bridge))
atomic_set(&(skb->nf_bridge->use), 1);
refcount_set(&(skb->nf_bridge->use), 1);
return skb->nf_bridge;
}

View file

@ -37,7 +37,7 @@
#include <linux/in6.h>
#include <net/netlink.h>
#include <net/request_sock.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
struct cipso_v4_doi;
struct calipso_doi;
@ -136,7 +136,7 @@ struct netlbl_audit {
*
*/
struct netlbl_lsm_cache {
atomic_t refcount;
refcount_t refcount;
void (*free) (const void *data);
void *data;
};
@ -295,7 +295,7 @@ static inline struct netlbl_lsm_cache *netlbl_secattr_cache_alloc(gfp_t flags)
cache = kzalloc(sizeof(*cache), flags);
if (cache)
atomic_set(&cache->refcount, 1);
refcount_set(&cache->refcount, 1);
return cache;
}
@ -309,7 +309,7 @@ static inline struct netlbl_lsm_cache *netlbl_secattr_cache_alloc(gfp_t flags)
*/
static inline void netlbl_secattr_cache_free(struct netlbl_lsm_cache *cache)
{
if (!atomic_dec_and_test(&cache->refcount))
if (!refcount_dec_and_test(&cache->refcount))
return;
if (cache->free)

View file

@ -19,6 +19,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/bug.h>
#include <linux/refcount.h>
#include <net/sock.h>
@ -89,7 +90,7 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
return NULL;
req->rsk_listener = NULL;
if (attach_listener) {
if (unlikely(!atomic_inc_not_zero(&sk_listener->sk_refcnt))) {
if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) {
kmem_cache_free(ops->slab, req);
return NULL;
}
@ -100,7 +101,7 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
sk_node_init(&req_to_sk(req)->sk_node);
sk_tx_queue_clear(req_to_sk(req));
req->saved_syn = NULL;
atomic_set(&req->rsk_refcnt, 0);
refcount_set(&req->rsk_refcnt, 0);
return req;
}
@ -108,7 +109,7 @@ reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener,
static inline void reqsk_free(struct request_sock *req)
{
/* temporary debugging */
WARN_ON_ONCE(atomic_read(&req->rsk_refcnt) != 0);
WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0);
req->rsk_ops->destructor(req);
if (req->rsk_listener)
@ -119,7 +120,7 @@ static inline void reqsk_free(struct request_sock *req)
static inline void reqsk_put(struct request_sock *req)
{
if (atomic_dec_and_test(&req->rsk_refcnt))
if (refcount_dec_and_test(&req->rsk_refcnt))
reqsk_free(req);
}

View file

@ -66,6 +66,7 @@
#include <linux/poll.h>
#include <linux/atomic.h>
#include <linux/refcount.h>
#include <net/dst.h>
#include <net/checksum.h>
#include <net/tcp_states.h>
@ -219,7 +220,7 @@ struct sock_common {
u32 skc_tw_rcv_nxt; /* struct tcp_timewait_sock */
};
atomic_t skc_refcnt;
refcount_t skc_refcnt;
/* private: */
int skc_dontcopy_end[0];
union {
@ -390,7 +391,7 @@ struct sock {
/* ===== cache line for TX ===== */
int sk_wmem_queued;
atomic_t sk_wmem_alloc;
refcount_t sk_wmem_alloc;
unsigned long sk_tsq_flags;
struct sk_buff *sk_send_head;
struct sk_buff_head sk_write_queue;
@ -611,7 +612,7 @@ static inline bool __sk_del_node_init(struct sock *sk)
static __always_inline void sock_hold(struct sock *sk)
{
atomic_inc(&sk->sk_refcnt);
refcount_inc(&sk->sk_refcnt);
}
/* Ungrab socket in the context, which assumes that socket refcnt
@ -619,7 +620,7 @@ static __always_inline void sock_hold(struct sock *sk)
*/
static __always_inline void __sock_put(struct sock *sk)
{
atomic_dec(&sk->sk_refcnt);
refcount_dec(&sk->sk_refcnt);
}
static inline bool sk_del_node_init(struct sock *sk)
@ -628,7 +629,7 @@ static inline bool sk_del_node_init(struct sock *sk)
if (rc) {
/* paranoid for a while -acme */
WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
__sock_put(sk);
}
return rc;
@ -650,7 +651,7 @@ static inline bool sk_nulls_del_node_init_rcu(struct sock *sk)
if (rc) {
/* paranoid for a while -acme */
WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
__sock_put(sk);
}
return rc;
@ -1144,9 +1145,9 @@ static inline void sk_refcnt_debug_dec(struct sock *sk)
static inline void sk_refcnt_debug_release(const struct sock *sk)
{
if (atomic_read(&sk->sk_refcnt) != 1)
if (refcount_read(&sk->sk_refcnt) != 1)
printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
sk->sk_prot->name, sk, refcount_read(&sk->sk_refcnt));
}
#else /* SOCK_REFCNT_DEBUG */
#define sk_refcnt_debug_inc(sk) do { } while (0)
@ -1636,7 +1637,7 @@ void sock_init_data(struct socket *sock, struct sock *sk);
/* Ungrab socket and destroy it, if it was the last reference. */
static inline void sock_put(struct sock *sk)
{
if (atomic_dec_and_test(&sk->sk_refcnt))
if (refcount_dec_and_test(&sk->sk_refcnt))
sk_free(sk);
}
/* Generic version of sock_put(), dealing with all sockets
@ -1911,7 +1912,7 @@ static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *fro
*/
static inline int sk_wmem_alloc_get(const struct sock *sk)
{
return atomic_read(&sk->sk_wmem_alloc) - 1;
return refcount_read(&sk->sk_wmem_alloc) - 1;
}
/**
@ -2055,7 +2056,7 @@ static inline unsigned long sock_wspace(struct sock *sk)
int amt = 0;
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
amt = sk->sk_sndbuf - refcount_read(&sk->sk_wmem_alloc);
if (amt < 0)
amt = 0;
}
@ -2136,7 +2137,7 @@ bool sk_page_frag_refill(struct sock *sk, struct page_frag *pfrag);
*/
static inline bool sock_writeable(const struct sock *sk)
{
return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
return refcount_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
}
static inline gfp_t gfp_any(void)

View file

@ -252,7 +252,7 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc;
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev);
atomic_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc);
refcount_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc);
ATM_SKB(skb)->atm_options = atmvcc->atm_options;
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;

View file

@ -137,11 +137,11 @@ static int neigh_check_cb(struct neighbour *n)
if (entry->vccs || time_before(jiffies, entry->expires))
return 0;
if (atomic_read(&n->refcnt) > 1) {
if (refcount_read(&n->refcnt) > 1) {
struct sk_buff *skb;
pr_debug("destruction postponed with ref %d\n",
atomic_read(&n->refcnt));
refcount_read(&n->refcnt));
while ((skb = skb_dequeue(&n->arp_queue)) != NULL)
dev_kfree_skb(skb);
@ -381,7 +381,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
memcpy(here, llc_oui, sizeof(llc_oui));
((__be16 *) here)[3] = skb->protocol;
}
atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
ATM_SKB(skb)->atm_options = vcc->atm_options;
entry->vccs->last_use = jiffies;
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
@ -767,7 +767,7 @@ static void atmarp_info(struct seq_file *seq, struct neighbour *n,
seq_printf(seq, "(resolving)\n");
else
seq_printf(seq, "(expired, ref %d)\n",
atomic_read(&entry->neigh->refcnt));
refcount_read(&entry->neigh->refcnt));
} else if (!svc) {
seq_printf(seq, "%d.%d.%d\n",
clip_vcc->vcc->dev->number,

View file

@ -80,9 +80,9 @@ static void vcc_sock_destruct(struct sock *sk)
printk(KERN_DEBUG "%s: rmem leakage (%d bytes) detected.\n",
__func__, atomic_read(&sk->sk_rmem_alloc));
if (atomic_read(&sk->sk_wmem_alloc))
if (refcount_read(&sk->sk_wmem_alloc))
printk(KERN_DEBUG "%s: wmem leakage (%d bytes) detected.\n",
__func__, atomic_read(&sk->sk_wmem_alloc));
__func__, refcount_read(&sk->sk_wmem_alloc));
}
static void vcc_def_wakeup(struct sock *sk)
@ -101,7 +101,7 @@ static inline int vcc_writable(struct sock *sk)
struct atm_vcc *vcc = atm_sk(sk);
return (vcc->qos.txtp.max_sdu +
atomic_read(&sk->sk_wmem_alloc)) <= sk->sk_sndbuf;
refcount_read(&sk->sk_wmem_alloc)) <= sk->sk_sndbuf;
}
static void vcc_write_space(struct sock *sk)
@ -156,7 +156,7 @@ int vcc_create(struct net *net, struct socket *sock, int protocol, int family, i
memset(&vcc->local, 0, sizeof(struct sockaddr_atmsvc));
memset(&vcc->remote, 0, sizeof(struct sockaddr_atmsvc));
vcc->qos.txtp.max_sdu = 1 << 16; /* for meta VCs */
atomic_set(&sk->sk_wmem_alloc, 1);
refcount_set(&sk->sk_wmem_alloc, 1);
atomic_set(&sk->sk_rmem_alloc, 0);
vcc->push = NULL;
vcc->pop = NULL;
@ -630,7 +630,7 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
goto out;
}
pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
atomic_add(skb->truesize, &sk->sk_wmem_alloc);
refcount_add(skb->truesize, &sk->sk_wmem_alloc);
skb->dev = NULL; /* for paths shared with net_device interfaces */
ATM_SKB(skb)->atm_options = vcc->atm_options;

View file

@ -181,7 +181,7 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
ATM_SKB(skb)->vcc = vcc;
ATM_SKB(skb)->atm_options = vcc->atm_options;
atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
if (vcc->send(vcc, skb) < 0) {
dev->stats.tx_dropped++;
return;
@ -345,7 +345,7 @@ static int lec_atm_send(struct atm_vcc *vcc, struct sk_buff *skb)
int i;
char *tmp; /* FIXME */
atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc));
mesg = (struct atmlec_msg *)skb->data;
tmp = skb->data;
tmp += sizeof(struct atmlec_msg);

View file

@ -555,7 +555,7 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
sizeof(struct llc_snap_hdr));
}
atomic_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc);
refcount_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc);
ATM_SKB(skb)->atm_options = entry->shortcut->atm_options;
entry->shortcut->send(entry->shortcut, skb);
entry->packets_fwded++;
@ -911,7 +911,7 @@ static int msg_from_mpoad(struct atm_vcc *vcc, struct sk_buff *skb)
struct mpoa_client *mpc = find_mpc_by_vcc(vcc);
struct k_message *mesg = (struct k_message *)skb->data;
atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc));
if (mpc == NULL) {
pr_info("no mpc found\n");

View file

@ -350,7 +350,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
return 1;
}
atomic_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
refcount_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);

View file

@ -211,7 +211,7 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
vcc->flags, sk->sk_err,
sk_wmem_alloc_get(sk), sk->sk_sndbuf,
sk_rmem_alloc_get(sk), sk->sk_rcvbuf,
atomic_read(&sk->sk_refcnt));
refcount_read(&sk->sk_refcnt));
}
static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)

View file

@ -35,7 +35,7 @@ static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb)
pr_debug("(%d) %d -= %d\n",
vcc->vci, sk_wmem_alloc_get(sk), skb->truesize);
atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
dev_kfree_skb_any(skb);
sk->sk_write_space(sk);
}

View file

@ -67,7 +67,7 @@ static int sigd_send(struct atm_vcc *vcc, struct sk_buff *skb)
struct sock *sk;
msg = (struct atmsvc_msg *) skb->data;
atomic_sub(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
WARN_ON(refcount_sub_and_test(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc));
vcc = *(struct atm_vcc **) &msg->vcc;
pr_debug("%d (0x%lx)\n", (int)msg->type, (unsigned long)vcc);
sk = sk_atm(vcc);

View file

@ -657,7 +657,7 @@ static int bt_seq_show(struct seq_file *seq, void *v)
seq_printf(seq,
"%pK %-6d %-6u %-6u %-6u %-6lu %-6lu",
sk,
atomic_read(&sk->sk_refcnt),
refcount_read(&sk->sk_refcnt),
sk_rmem_alloc_get(sk),
sk_wmem_alloc_get(sk),
from_kuid(seq_user_ns(seq), sock_i_uid(sk)),

View file

@ -197,7 +197,7 @@ static void rfcomm_sock_kill(struct sock *sk)
if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
return;
BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, atomic_read(&sk->sk_refcnt));
BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
/* Kill poor orphan */
bt_sock_unlink(&rfcomm_sk_list, sk);

View file

@ -149,12 +149,12 @@ static inline struct nf_bridge_info *nf_bridge_unshare(struct sk_buff *skb)
{
struct nf_bridge_info *nf_bridge = skb->nf_bridge;
if (atomic_read(&nf_bridge->use) > 1) {
if (refcount_read(&nf_bridge->use) > 1) {
struct nf_bridge_info *tmp = nf_bridge_alloc(skb);
if (tmp) {
memcpy(tmp, nf_bridge, sizeof(struct nf_bridge_info));
atomic_set(&tmp->use, 1);
refcount_set(&tmp->use, 1);
}
nf_bridge_put(nf_bridge);
nf_bridge = tmp;

View file

@ -1013,7 +1013,7 @@ static const struct proto_ops caif_stream_ops = {
static void caif_sock_destructor(struct sock *sk)
{
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
caif_assert(!atomic_read(&sk->sk_wmem_alloc));
caif_assert(!refcount_read(&sk->sk_wmem_alloc));
caif_assert(sk_unhashed(sk));
caif_assert(!sk->sk_socket);
if (!sock_flag(sk, SOCK_DEAD)) {

View file

@ -188,7 +188,7 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
}
}
*peeked = 1;
atomic_inc(&skb->users);
refcount_inc(&skb->users);
} else {
__skb_unlink(skb, queue);
if (destructor)
@ -358,7 +358,7 @@ int __sk_queue_drop_skb(struct sock *sk, struct sk_buff_head *sk_queue,
spin_lock_bh(&sk_queue->lock);
if (skb == skb_peek(sk_queue)) {
__skb_unlink(skb, sk_queue);
atomic_dec(&skb->users);
refcount_dec(&skb->users);
if (destructor)
destructor(sk, skb);
err = 0;
@ -614,7 +614,7 @@ int zerocopy_sg_from_iter(struct sk_buff *skb, struct iov_iter *from)
skb->data_len += copied;
skb->len += copied;
skb->truesize += truesize;
atomic_add(truesize, &skb->sk->sk_wmem_alloc);
refcount_add(truesize, &skb->sk->sk_wmem_alloc);
while (copied) {
int size = min_t(int, copied, PAGE_SIZE - start);
skb_fill_page_desc(skb, frag++, pages[n], start, size);

View file

@ -1862,7 +1862,7 @@ static inline int deliver_skb(struct sk_buff *skb,
{
if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
return -ENOMEM;
atomic_inc(&skb->users);
refcount_inc(&skb->users);
return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
}
@ -2484,10 +2484,10 @@ void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
if (unlikely(!skb))
return;
if (likely(atomic_read(&skb->users) == 1)) {
if (likely(refcount_read(&skb->users) == 1)) {
smp_rmb();
atomic_set(&skb->users, 0);
} else if (likely(!atomic_dec_and_test(&skb->users))) {
refcount_set(&skb->users, 0);
} else if (likely(!refcount_dec_and_test(&skb->users))) {
return;
}
get_kfree_skb_cb(skb)->reason = reason;
@ -3955,7 +3955,7 @@ static __latent_entropy void net_tx_action(struct softirq_action *h)
clist = clist->next;
WARN_ON(atomic_read(&skb->users));
WARN_ON(refcount_read(&skb->users));
if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
trace_consume_skb(skb);
else

View file

@ -46,7 +46,7 @@ int fib_default_rule_add(struct fib_rules_ops *ops,
if (r == NULL)
return -ENOMEM;
atomic_set(&r->refcnt, 1);
refcount_set(&r->refcnt, 1);
r->action = FR_ACT_TO_TBL;
r->pref = pref;
r->table = table;
@ -283,7 +283,7 @@ jumped:
if (err != -EAGAIN) {
if ((arg->flags & FIB_LOOKUP_NOREF) ||
likely(atomic_inc_not_zero(&rule->refcnt))) {
likely(refcount_inc_not_zero(&rule->refcnt))) {
arg->rule = rule;
goto out;
}

View file

@ -124,7 +124,7 @@ static bool neigh_del(struct neighbour *n, __u8 state,
bool retval = false;
write_lock(&n->lock);
if (atomic_read(&n->refcnt) == 1 && !(n->nud_state & state)) {
if (refcount_read(&n->refcnt) == 1 && !(n->nud_state & state)) {
struct neighbour *neigh;
neigh = rcu_dereference_protected(n->next,
@ -254,7 +254,7 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
neigh_del_timer(n);
n->dead = 1;
if (atomic_read(&n->refcnt) != 1) {
if (refcount_read(&n->refcnt) != 1) {
/* The most unpleasant situation.
We must destroy neighbour entry,
but someone still uses it.
@ -335,7 +335,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device
NEIGH_CACHE_STAT_INC(tbl, allocs);
n->tbl = tbl;
atomic_set(&n->refcnt, 1);
refcount_set(&n->refcnt, 1);
n->dead = 1;
out:
return n;
@ -444,7 +444,7 @@ struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
rcu_read_lock_bh();
n = __neigh_lookup_noref(tbl, pkey, dev);
if (n) {
if (!atomic_inc_not_zero(&n->refcnt))
if (!refcount_inc_not_zero(&n->refcnt))
n = NULL;
NEIGH_CACHE_STAT_INC(tbl, hits);
}
@ -473,7 +473,7 @@ struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
n = rcu_dereference_bh(n->next)) {
if (!memcmp(n->primary_key, pkey, key_len) &&
net_eq(dev_net(n->dev), net)) {
if (!atomic_inc_not_zero(&n->refcnt))
if (!refcount_inc_not_zero(&n->refcnt))
n = NULL;
NEIGH_CACHE_STAT_INC(tbl, hits);
break;
@ -709,7 +709,7 @@ static void neigh_parms_destroy(struct neigh_parms *parms);
static inline void neigh_parms_put(struct neigh_parms *parms)
{
if (atomic_dec_and_test(&parms->refcnt))
if (refcount_dec_and_test(&parms->refcnt))
neigh_parms_destroy(parms);
}
@ -821,7 +821,7 @@ static void neigh_periodic_work(struct work_struct *work)
if (time_before(n->used, n->confirmed))
n->used = n->confirmed;
if (atomic_read(&n->refcnt) == 1 &&
if (refcount_read(&n->refcnt) == 1 &&
(state == NUD_FAILED ||
time_after(jiffies, n->used + NEIGH_VAR(n->parms, GC_STALETIME)))) {
*np = n->next;
@ -1479,7 +1479,7 @@ struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
if (p) {
p->tbl = tbl;
atomic_set(&p->refcnt, 1);
refcount_set(&p->refcnt, 1);
p->reachable_time =
neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
dev_hold(dev);
@ -1542,7 +1542,7 @@ void neigh_table_init(int index, struct neigh_table *tbl)
INIT_LIST_HEAD(&tbl->parms_list);
list_add(&tbl->parms.list, &tbl->parms_list);
write_pnet(&tbl->parms.net, &init_net);
atomic_set(&tbl->parms.refcnt, 1);
refcount_set(&tbl->parms.refcnt, 1);
tbl->parms.reachable_time =
neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
@ -1796,7 +1796,7 @@ static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
if ((parms->dev &&
nla_put_u32(skb, NDTPA_IFINDEX, parms->dev->ifindex)) ||
nla_put_u32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt)) ||
nla_put_u32(skb, NDTPA_REFCNT, refcount_read(&parms->refcnt)) ||
nla_put_u32(skb, NDTPA_QUEUE_LENBYTES,
NEIGH_VAR(parms, QUEUE_LEN_BYTES)) ||
/* approximative value for deprecated QUEUE_LEN (in packets) */
@ -2234,7 +2234,7 @@ static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
ci.ndm_refcnt = refcount_read(&neigh->refcnt) - 1;
read_unlock_bh(&neigh->lock);
if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||

View file

@ -1448,7 +1448,7 @@ static void *net_grab_current_ns(void)
struct net *ns = current->nsproxy->net_ns;
#ifdef CONFIG_NET_NS
if (ns)
atomic_inc(&ns->passive);
refcount_inc(&ns->passive);
#endif
return ns;
}

View file

@ -284,7 +284,7 @@ static __net_init int setup_net(struct net *net, struct user_namespace *user_ns)
LIST_HEAD(net_exit_list);
atomic_set(&net->count, 1);
atomic_set(&net->passive, 1);
refcount_set(&net->passive, 1);
net->dev_base_seq = 1;
net->user_ns = user_ns;
idr_init(&net->netns_ids);
@ -380,7 +380,7 @@ static void net_free(struct net *net)
void net_drop_ns(void *p)
{
struct net *ns = p;
if (ns && atomic_dec_and_test(&ns->passive))
if (ns && refcount_dec_and_test(&ns->passive))
net_free(ns);
}

View file

@ -277,7 +277,7 @@ static void zap_completion_queue(void)
struct sk_buff *skb = clist;
clist = clist->next;
if (!skb_irq_freeable(skb)) {
atomic_inc(&skb->users);
refcount_inc(&skb->users);
dev_kfree_skb_any(skb); /* put this one back */
} else {
__kfree_skb(skb);
@ -309,7 +309,7 @@ repeat:
return NULL;
}
atomic_set(&skb->users, 1);
refcount_set(&skb->users, 1);
skb_reserve(skb, reserve);
return skb;
}
@ -632,7 +632,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
skb_queue_head_init(&npinfo->txq);
INIT_DELAYED_WORK(&npinfo->tx_work, queue_process);
atomic_set(&npinfo->refcnt, 1);
refcount_set(&npinfo->refcnt, 1);
ops = np->dev->netdev_ops;
if (ops->ndo_netpoll_setup) {
@ -642,7 +642,7 @@ int __netpoll_setup(struct netpoll *np, struct net_device *ndev)
}
} else {
npinfo = rtnl_dereference(ndev->npinfo);
atomic_inc(&npinfo->refcnt);
refcount_inc(&npinfo->refcnt);
}
npinfo->netpoll = np;
@ -821,7 +821,7 @@ void __netpoll_cleanup(struct netpoll *np)
synchronize_srcu(&netpoll_srcu);
if (atomic_dec_and_test(&npinfo->refcnt)) {
if (refcount_dec_and_test(&npinfo->refcnt)) {
const struct net_device_ops *ops;
ops = np->dev->netdev_ops;

View file

@ -3363,7 +3363,7 @@ static void pktgen_wait_for_skb(struct pktgen_dev *pkt_dev)
{
ktime_t idle_start = ktime_get();
while (atomic_read(&(pkt_dev->skb->users)) != 1) {
while (refcount_read(&(pkt_dev->skb->users)) != 1) {
if (signal_pending(current))
break;
@ -3420,7 +3420,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
if (pkt_dev->xmit_mode == M_NETIF_RECEIVE) {
skb = pkt_dev->skb;
skb->protocol = eth_type_trans(skb, skb->dev);
atomic_add(burst, &skb->users);
refcount_add(burst, &skb->users);
local_bh_disable();
do {
ret = netif_receive_skb(skb);
@ -3428,11 +3428,11 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->errors++;
pkt_dev->sofar++;
pkt_dev->seq_num++;
if (atomic_read(&skb->users) != burst) {
if (refcount_read(&skb->users) != burst) {
/* skb was queued by rps/rfs or taps,
* so cannot reuse this skb
*/
atomic_sub(burst - 1, &skb->users);
WARN_ON(refcount_sub_and_test(burst - 1, &skb->users));
/* get out of the loop and wait
* until skb is consumed
*/
@ -3446,7 +3446,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
goto out; /* Skips xmit_mode M_START_XMIT */
} else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) {
local_bh_disable();
atomic_inc(&pkt_dev->skb->users);
refcount_inc(&pkt_dev->skb->users);
ret = dev_queue_xmit(pkt_dev->skb);
switch (ret) {
@ -3487,7 +3487,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
pkt_dev->last_ok = 0;
goto unlock;
}
atomic_add(burst, &pkt_dev->skb->users);
refcount_add(burst, &pkt_dev->skb->users);
xmit_more:
ret = netdev_start_xmit(pkt_dev->skb, odev, txq, --burst > 0);
@ -3513,11 +3513,11 @@ xmit_more:
/* fallthru */
case NETDEV_TX_BUSY:
/* Retry it next time */
atomic_dec(&(pkt_dev->skb->users));
refcount_dec(&(pkt_dev->skb->users));
pkt_dev->last_ok = 0;
}
if (unlikely(burst))
atomic_sub(burst, &pkt_dev->skb->users);
WARN_ON(refcount_sub_and_test(burst, &pkt_dev->skb->users));
unlock:
HARD_TX_UNLOCK(odev, txq);

View file

@ -649,7 +649,7 @@ int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned int g
NETLINK_CB(skb).dst_group = group;
if (echo)
atomic_inc(&skb->users);
refcount_inc(&skb->users);
netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
if (echo)
err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);

View file

@ -176,7 +176,7 @@ struct sk_buff *__alloc_skb_head(gfp_t gfp_mask, int node)
memset(skb, 0, offsetof(struct sk_buff, tail));
skb->head = NULL;
skb->truesize = sizeof(struct sk_buff);
atomic_set(&skb->users, 1);
refcount_set(&skb->users, 1);
skb->mac_header = (typeof(skb->mac_header))~0U;
out:
@ -247,7 +247,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
/* Account for allocated memory : skb + skb->head */
skb->truesize = SKB_TRUESIZE(size);
skb->pfmemalloc = pfmemalloc;
atomic_set(&skb->users, 1);
refcount_set(&skb->users, 1);
skb->head = data;
skb->data = data;
skb_reset_tail_pointer(skb);
@ -268,7 +268,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
kmemcheck_annotate_bitfield(&fclones->skb2, flags1);
skb->fclone = SKB_FCLONE_ORIG;
atomic_set(&fclones->fclone_ref, 1);
refcount_set(&fclones->fclone_ref, 1);
fclones->skb2.fclone = SKB_FCLONE_CLONE;
}
@ -314,7 +314,7 @@ struct sk_buff *__build_skb(void *data, unsigned int frag_size)
memset(skb, 0, offsetof(struct sk_buff, tail));
skb->truesize = SKB_TRUESIZE(size);
atomic_set(&skb->users, 1);
refcount_set(&skb->users, 1);
skb->head = data;
skb->data = data;
skb_reset_tail_pointer(skb);
@ -629,7 +629,7 @@ static void kfree_skbmem(struct sk_buff *skb)
* This test would have no chance to be true for the clone,
* while here, branch prediction will be good.
*/
if (atomic_read(&fclones->fclone_ref) == 1)
if (refcount_read(&fclones->fclone_ref) == 1)
goto fastpath;
break;
@ -637,7 +637,7 @@ static void kfree_skbmem(struct sk_buff *skb)
fclones = container_of(skb, struct sk_buff_fclones, skb2);
break;
}
if (!atomic_dec_and_test(&fclones->fclone_ref))
if (!refcount_dec_and_test(&fclones->fclone_ref))
return;
fastpath:
kmem_cache_free(skbuff_fclone_cache, fclones);
@ -915,7 +915,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
C(head_frag);
C(data);
C(truesize);
atomic_set(&n->users, 1);
refcount_set(&n->users, 1);
atomic_inc(&(skb_shinfo(skb)->dataref));
skb->cloned = 1;
@ -1027,9 +1027,9 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
return NULL;
if (skb->fclone == SKB_FCLONE_ORIG &&
atomic_read(&fclones->fclone_ref) == 1) {
refcount_read(&fclones->fclone_ref) == 1) {
n = &fclones->skb2;
atomic_set(&fclones->fclone_ref, 2);
refcount_set(&fclones->fclone_ref, 2);
} else {
if (skb_pfmemalloc(skb))
gfp_mask |= __GFP_MEMALLOC;
@ -3024,7 +3024,7 @@ int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
get_page(pfrag->page);
skb->truesize += copy;
atomic_add(copy, &sk->sk_wmem_alloc);
refcount_add(copy, &sk->sk_wmem_alloc);
skb->len += copy;
skb->data_len += copy;
offset += copy;
@ -3844,7 +3844,7 @@ struct sk_buff *skb_clone_sk(struct sk_buff *skb)
struct sock *sk = skb->sk;
struct sk_buff *clone;
if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt))
if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
return NULL;
clone = skb_clone(skb, GFP_ATOMIC);
@ -3915,7 +3915,7 @@ void skb_complete_tx_timestamp(struct sk_buff *skb,
/* Take a reference to prevent skb_orphan() from freeing the socket,
* but only if the socket refcount is not zero.
*/
if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
*skb_hwtstamps(skb) = *hwtstamps;
__skb_complete_tx_timestamp(skb, sk, SCM_TSTAMP_SND, false);
sock_put(sk);
@ -3997,7 +3997,7 @@ void skb_complete_wifi_ack(struct sk_buff *skb, bool acked)
/* Take a reference to prevent skb_orphan() from freeing the socket,
* but only if the socket refcount is not zero.
*/
if (likely(atomic_inc_not_zero(&sk->sk_refcnt))) {
if (likely(refcount_inc_not_zero(&sk->sk_refcnt))) {
err = sock_queue_err_skb(sk, skb);
sock_put(sk);
}

View file

@ -1528,7 +1528,7 @@ struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
if (likely(sk->sk_net_refcnt))
get_net(net);
sock_net_set(sk, net);
atomic_set(&sk->sk_wmem_alloc, 1);
refcount_set(&sk->sk_wmem_alloc, 1);
mem_cgroup_sk_alloc(sk);
cgroup_sk_alloc(&sk->sk_cgrp_data);
@ -1552,7 +1552,7 @@ static void __sk_destruct(struct rcu_head *head)
sk->sk_destruct(sk);
filter = rcu_dereference_check(sk->sk_filter,
atomic_read(&sk->sk_wmem_alloc) == 0);
refcount_read(&sk->sk_wmem_alloc) == 0);
if (filter) {
sk_filter_uncharge(sk, filter);
RCU_INIT_POINTER(sk->sk_filter, NULL);
@ -1602,7 +1602,7 @@ void sk_free(struct sock *sk)
* some packets are still in some tx queue.
* If not null, sock_wfree() will call __sk_free(sk) later
*/
if (atomic_dec_and_test(&sk->sk_wmem_alloc))
if (refcount_dec_and_test(&sk->sk_wmem_alloc))
__sk_free(sk);
}
EXPORT_SYMBOL(sk_free);
@ -1659,7 +1659,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
/*
* sk_wmem_alloc set to one (see sk_free() and sock_wfree())
*/
atomic_set(&newsk->sk_wmem_alloc, 1);
refcount_set(&newsk->sk_wmem_alloc, 1);
atomic_set(&newsk->sk_omem_alloc, 0);
sk_init_common(newsk);
@ -1708,7 +1708,7 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
* (Documentation/RCU/rculist_nulls.txt for details)
*/
smp_wmb();
atomic_set(&newsk->sk_refcnt, 2);
refcount_set(&newsk->sk_refcnt, 2);
/*
* Increment the counter in the same struct proto as the master
@ -1787,7 +1787,7 @@ void sock_wfree(struct sk_buff *skb)
* Keep a reference on sk_wmem_alloc, this will be released
* after sk_write_space() call
*/
atomic_sub(len - 1, &sk->sk_wmem_alloc);
WARN_ON(refcount_sub_and_test(len - 1, &sk->sk_wmem_alloc));
sk->sk_write_space(sk);
len = 1;
}
@ -1795,7 +1795,7 @@ void sock_wfree(struct sk_buff *skb)
* if sk_wmem_alloc reaches 0, we must finish what sk_free()
* could not do because of in-flight packets
*/
if (atomic_sub_and_test(len, &sk->sk_wmem_alloc))
if (refcount_sub_and_test(len, &sk->sk_wmem_alloc))
__sk_free(sk);
}
EXPORT_SYMBOL(sock_wfree);
@ -1807,7 +1807,7 @@ void __sock_wfree(struct sk_buff *skb)
{
struct sock *sk = skb->sk;
if (atomic_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
if (refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc))
__sk_free(sk);
}
@ -1829,7 +1829,7 @@ void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
* is enough to guarantee sk_free() wont free this sock until
* all in-flight packets are completed
*/
atomic_add(skb->truesize, &sk->sk_wmem_alloc);
refcount_add(skb->truesize, &sk->sk_wmem_alloc);
}
EXPORT_SYMBOL(skb_set_owner_w);
@ -1851,8 +1851,8 @@ void skb_orphan_partial(struct sk_buff *skb)
) {
struct sock *sk = skb->sk;
if (atomic_inc_not_zero(&sk->sk_refcnt)) {
atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
if (refcount_inc_not_zero(&sk->sk_refcnt)) {
WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
skb->destructor = sock_efree;
}
} else {
@ -1912,7 +1912,7 @@ EXPORT_SYMBOL(sock_i_ino);
struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
gfp_t priority)
{
if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
if (force || refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
struct sk_buff *skb = alloc_skb(size, priority);
if (skb) {
skb_set_owner_w(skb, sk);
@ -1987,7 +1987,7 @@ static long sock_wait_for_wmem(struct sock *sk, long timeo)
break;
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
if (refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
break;
if (sk->sk_shutdown & SEND_SHUTDOWN)
break;
@ -2310,7 +2310,7 @@ int __sk_mem_raise_allocated(struct sock *sk, int size, int amt, int kind)
if (sk->sk_type == SOCK_STREAM) {
if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
return 1;
} else if (atomic_read(&sk->sk_wmem_alloc) <
} else if (refcount_read(&sk->sk_wmem_alloc) <
prot->sysctl_wmem[0])
return 1;
}
@ -2577,7 +2577,7 @@ static void sock_def_write_space(struct sock *sk)
/* Do not wake up a writer until he can make "significant"
* progress. --DaveM
*/
if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
wq = rcu_dereference(sk->sk_wq);
if (skwq_has_sleeper(wq))
wake_up_interruptible_sync_poll(&wq->wait, POLLOUT |
@ -2687,7 +2687,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
* (Documentation/RCU/rculist_nulls.txt for details)
*/
smp_wmb();
atomic_set(&sk->sk_refcnt, 1);
refcount_set(&sk->sk_refcnt, 1);
atomic_set(&sk->sk_drops, 0);
}
EXPORT_SYMBOL(sock_init_data);

View file

@ -353,7 +353,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
if (ipv6_opt_accepted(sk, skb, IP6CB(skb)) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
atomic_inc(&skb->users);
refcount_inc(&skb->users);
ireq->pktopts = skb;
}
ireq->ir_iif = sk->sk_bound_dev_if;

View file

@ -559,7 +559,7 @@ static inline void dn_neigh_format_entry(struct seq_file *seq,
(dn->flags&DN_NDFLAG_R2) ? "2" : "-",
(dn->flags&DN_NDFLAG_P3) ? "3" : "-",
dn->n.nud_state,
atomic_read(&dn->n.refcnt),
refcount_read(&dn->n.refcnt),
dn->blksize,
(dn->n.dev) ? dn->n.dev->name : "?");
read_unlock(&n->lock);

View file

@ -150,7 +150,7 @@ void inet_sock_destruct(struct sock *sk)
}
WARN_ON(atomic_read(&sk->sk_rmem_alloc));
WARN_ON(atomic_read(&sk->sk_wmem_alloc));
WARN_ON(refcount_read(&sk->sk_wmem_alloc));
WARN_ON(sk->sk_wmem_queued);
WARN_ON(sk->sk_forward_alloc);

View file

@ -265,7 +265,7 @@ static int cipso_v4_cache_check(const unsigned char *key,
entry->key_len == key_len &&
memcmp(entry->key, key, key_len) == 0) {
entry->activity += 1;
atomic_inc(&entry->lsm_data->refcount);
refcount_inc(&entry->lsm_data->refcount);
secattr->cache = entry->lsm_data;
secattr->flags |= NETLBL_SECATTR_CACHE;
secattr->type = NETLBL_NLTYPE_CIPSOV4;
@ -332,7 +332,7 @@ int cipso_v4_cache_add(const unsigned char *cipso_ptr,
}
entry->key_len = cipso_ptr_len;
entry->hash = cipso_v4_map_cache_hash(cipso_ptr, cipso_ptr_len);
atomic_inc(&secattr->cache->refcount);
refcount_inc(&secattr->cache->refcount);
entry->lsm_data = secattr->cache;
bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1);

View file

@ -252,7 +252,7 @@ static struct in_device *inetdev_init(struct net_device *dev)
/* Reference in_dev->dev */
dev_hold(dev);
/* Account for reference dev->ip_ptr (below) */
in_dev_hold(in_dev);
refcount_set(&in_dev->refcnt, 1);
err = devinet_sysctl_register(in_dev);
if (err) {

View file

@ -307,7 +307,7 @@ int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *
skb->data_len += tailen;
skb->truesize += tailen;
if (sk)
atomic_add(tailen, &sk->sk_wmem_alloc);
refcount_add(tailen, &sk->sk_wmem_alloc);
goto out;
}

View file

@ -173,7 +173,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
static void ip_ma_put(struct ip_mc_list *im)
{
if (atomic_dec_and_test(&im->refcnt)) {
if (refcount_dec_and_test(&im->refcnt)) {
in_dev_put(im->interface);
kfree_rcu(im, rcu);
}
@ -199,7 +199,7 @@ static void igmp_stop_timer(struct ip_mc_list *im)
{
spin_lock_bh(&im->lock);
if (del_timer(&im->timer))
atomic_dec(&im->refcnt);
refcount_dec(&im->refcnt);
im->tm_running = 0;
im->reporter = 0;
im->unsolicit_count = 0;
@ -213,7 +213,7 @@ static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
im->tm_running = 1;
if (!mod_timer(&im->timer, jiffies+tv+2))
atomic_inc(&im->refcnt);
refcount_inc(&im->refcnt);
}
static void igmp_gq_start_timer(struct in_device *in_dev)
@ -249,7 +249,7 @@ static void igmp_mod_timer(struct ip_mc_list *im, int max_delay)
spin_unlock_bh(&im->lock);
return;
}
atomic_dec(&im->refcnt);
refcount_dec(&im->refcnt);
}
igmp_start_timer(im, max_delay);
spin_unlock_bh(&im->lock);
@ -1374,7 +1374,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
/* initial mode is (EX, empty) */
im->sfmode = MCAST_EXCLUDE;
im->sfcount[MCAST_EXCLUDE] = 1;
atomic_set(&im->refcnt, 1);
refcount_set(&im->refcnt, 1);
spin_lock_init(&im->lock);
#ifdef CONFIG_IP_MULTICAST
setup_timer(&im->timer, igmp_timer_expire, (unsigned long)im);

View file

@ -756,7 +756,7 @@ static void reqsk_queue_hash_req(struct request_sock *req,
* are committed to memory and refcnt initialized.
*/
smp_wmb();
atomic_set(&req->rsk_refcnt, 2 + 1);
refcount_set(&req->rsk_refcnt, 2 + 1);
}
void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,

View file

@ -276,11 +276,11 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
{
if (del_timer(&fq->timer))
atomic_dec(&fq->refcnt);
refcount_dec(&fq->refcnt);
if (!(fq->flags & INET_FRAG_COMPLETE)) {
fq_unlink(fq, f);
atomic_dec(&fq->refcnt);
refcount_dec(&fq->refcnt);
}
}
EXPORT_SYMBOL(inet_frag_kill);
@ -329,7 +329,7 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
*/
hlist_for_each_entry(qp, &hb->chain, list) {
if (qp->net == nf && f->match(qp, arg)) {
atomic_inc(&qp->refcnt);
refcount_inc(&qp->refcnt);
spin_unlock(&hb->chain_lock);
qp_in->flags |= INET_FRAG_COMPLETE;
inet_frag_put(qp_in, f);
@ -339,9 +339,9 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
#endif
qp = qp_in;
if (!mod_timer(&qp->timer, jiffies + nf->timeout))
atomic_inc(&qp->refcnt);
refcount_inc(&qp->refcnt);
atomic_inc(&qp->refcnt);
refcount_inc(&qp->refcnt);
hlist_add_head(&qp->list, &hb->chain);
spin_unlock(&hb->chain_lock);
@ -370,7 +370,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
spin_lock_init(&q->lock);
atomic_set(&q->refcnt, 1);
refcount_set(&q->refcnt, 1);
return q;
}
@ -405,7 +405,7 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
spin_lock(&hb->chain_lock);
hlist_for_each_entry(q, &hb->chain, list) {
if (q->net == nf && f->match(q, key)) {
atomic_inc(&q->refcnt);
refcount_inc(&q->refcnt);
spin_unlock(&hb->chain_lock);
return q;
}

View file

@ -246,7 +246,7 @@ EXPORT_SYMBOL_GPL(__inet_lookup_listener);
/* All sockets share common refcount, but have different destructors */
void sock_gen_put(struct sock *sk)
{
if (!atomic_dec_and_test(&sk->sk_refcnt))
if (!refcount_dec_and_test(&sk->sk_refcnt))
return;
if (sk->sk_state == TCP_TIME_WAIT)
@ -287,7 +287,7 @@ begin:
continue;
if (likely(INET_MATCH(sk, net, acookie,
saddr, daddr, ports, dif))) {
if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
goto out;
if (unlikely(!INET_MATCH(sk, net, acookie,
saddr, daddr, ports, dif))) {

View file

@ -76,7 +76,7 @@ void inet_twsk_free(struct inet_timewait_sock *tw)
void inet_twsk_put(struct inet_timewait_sock *tw)
{
if (atomic_dec_and_test(&tw->tw_refcnt))
if (refcount_dec_and_test(&tw->tw_refcnt))
inet_twsk_free(tw);
}
EXPORT_SYMBOL_GPL(inet_twsk_put);
@ -131,7 +131,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
* We can use atomic_set() because prior spin_lock()/spin_unlock()
* committed into memory all tw fields.
*/
atomic_set(&tw->tw_refcnt, 4);
refcount_set(&tw->tw_refcnt, 4);
inet_twsk_add_node_rcu(tw, &ehead->chain);
/* Step 3: Remove SK from hash chain */
@ -195,7 +195,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk,
* to a non null value before everything is setup for this
* timewait socket.
*/
atomic_set(&tw->tw_refcnt, 0);
refcount_set(&tw->tw_refcnt, 0);
__module_get(tw->tw_prot->owner);
}
@ -278,7 +278,7 @@ restart:
atomic_read(&twsk_net(tw)->count))
continue;
if (unlikely(!atomic_inc_not_zero(&tw->tw_refcnt)))
if (unlikely(!refcount_inc_not_zero(&tw->tw_refcnt)))
continue;
if (unlikely((tw->tw_family != family) ||

View file

@ -115,7 +115,7 @@ static void inetpeer_gc_worker(struct work_struct *work)
n = list_entry(p->gc_list.next, struct inet_peer, gc_list);
if (!atomic_read(&p->refcnt)) {
if (refcount_read(&p->refcnt) == 1) {
list_del(&p->gc_list);
kmem_cache_free(peer_cachep, p);
}
@ -202,10 +202,11 @@ static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr,
int cmp = inetpeer_addr_cmp(daddr, &u->daddr);
if (cmp == 0) {
/* Before taking a reference, check if this entry was
* deleted (refcnt=-1)
* deleted (refcnt=0)
*/
if (!atomic_add_unless(&u->refcnt, 1, -1))
if (!refcount_inc_not_zero(&u->refcnt)) {
u = NULL;
}
return u;
}
if (cmp == -1)
@ -382,11 +383,10 @@ static int inet_peer_gc(struct inet_peer_base *base,
while (stackptr > stack) {
stackptr--;
p = rcu_deref_locked(**stackptr, base);
if (atomic_read(&p->refcnt) == 0) {
if (refcount_read(&p->refcnt) == 1) {
smp_rmb();
delta = (__u32)jiffies - p->dtime;
if (delta >= ttl &&
atomic_cmpxchg(&p->refcnt, 0, -1) == 0) {
if (delta >= ttl && refcount_dec_if_one(&p->refcnt)) {
p->gc_next = gchead;
gchead = p;
}
@ -432,7 +432,7 @@ struct inet_peer *inet_getpeer(struct inet_peer_base *base,
relookup:
p = lookup(daddr, stack, base);
if (p != peer_avl_empty) {
atomic_inc(&p->refcnt);
refcount_inc(&p->refcnt);
write_sequnlock_bh(&base->lock);
return p;
}
@ -444,7 +444,7 @@ relookup:
p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL;
if (p) {
p->daddr = *daddr;
atomic_set(&p->refcnt, 1);
refcount_set(&p->refcnt, 2);
atomic_set(&p->rid, 0);
p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
p->rate_tokens = 0;
@ -468,7 +468,7 @@ void inet_putpeer(struct inet_peer *p)
{
p->dtime = (__u32)jiffies;
smp_mb__before_atomic();
atomic_dec(&p->refcnt);
refcount_dec(&p->refcnt);
}
EXPORT_SYMBOL_GPL(inet_putpeer);

View file

@ -312,7 +312,7 @@ static int ip_frag_reinit(struct ipq *qp)
unsigned int sum_truesize = 0;
if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
atomic_inc(&qp->q.refcnt);
refcount_inc(&qp->q.refcnt);
return -ETIMEDOUT;
}

View file

@ -1037,7 +1037,7 @@ alloc_new_skb:
(flags & MSG_DONTWAIT), &err);
} else {
skb = NULL;
if (atomic_read(&sk->sk_wmem_alloc) <=
if (refcount_read(&sk->sk_wmem_alloc) <=
2 * sk->sk_sndbuf)
skb = sock_wmalloc(sk,
alloclen + hh_len + 15, 1,
@ -1145,7 +1145,7 @@ alloc_new_skb:
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
atomic_add(copy, &sk->sk_wmem_alloc);
refcount_add(copy, &sk->sk_wmem_alloc);
}
offset += copy;
length -= copy;
@ -1369,7 +1369,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
skb->len += len;
skb->data_len += len;
skb->truesize += len;
atomic_add(len, &sk->sk_wmem_alloc);
refcount_add(len, &sk->sk_wmem_alloc);
offset += len;
size -= len;
}

View file

@ -290,7 +290,7 @@ void ping_close(struct sock *sk, long timeout)
{
pr_debug("ping_close(sk=%p,sk->num=%u)\n",
inet_sk(sk), inet_sk(sk)->inet_num);
pr_debug("isk->refcnt = %d\n", sk->sk_refcnt.counter);
pr_debug("isk->refcnt = %d\n", refcount_read(&sk->sk_refcnt));
sk_common_release(sk);
}
@ -1127,7 +1127,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
0, 0L, 0,
from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
0, sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp,
refcount_read(&sp->sk_refcnt), sp,
atomic_read(&sp->sk_drops));
}

View file

@ -1063,7 +1063,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
0, 0L, 0,
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
0, sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
refcount_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
}
static int raw_seq_show(struct seq_file *seq, void *v)

View file

@ -213,7 +213,7 @@ struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst,
NULL, &own_req);
if (child) {
atomic_set(&req->rsk_refcnt, 1);
refcount_set(&req->rsk_refcnt, 1);
tcp_sk(child)->tsoffset = tsoff;
sock_rps_save_rxhash(child, skb);
inet_csk_reqsk_queue_add(sk, req, child);

View file

@ -664,7 +664,7 @@ static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
return skb->len < size_goal &&
sysctl_tcp_autocorking &&
skb != tcp_write_queue_head(sk) &&
atomic_read(&sk->sk_wmem_alloc) > skb->truesize;
refcount_read(&sk->sk_wmem_alloc) > skb->truesize;
}
static void tcp_push(struct sock *sk, int flags, int mss_now,
@ -692,7 +692,7 @@ static void tcp_push(struct sock *sk, int flags, int mss_now,
/* It is possible TX completion already happened
* before we set TSQ_THROTTLED.
*/
if (atomic_read(&sk->sk_wmem_alloc) > skb->truesize)
if (refcount_read(&sk->sk_wmem_alloc) > skb->truesize)
return;
}

View file

@ -214,7 +214,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
TCP_TIMEOUT_INIT, TCP_RTO_MAX);
atomic_set(&req->rsk_refcnt, 2);
refcount_set(&req->rsk_refcnt, 2);
/* Now finish processing the fastopen child socket. */
inet_csk(child)->icsk_af_ops->rebuild_header(child);

View file

@ -2323,7 +2323,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
icsk->icsk_probes_out,
sock_i_ino(sk),
atomic_read(&sk->sk_refcnt), sk,
refcount_read(&sk->sk_refcnt), sk,
jiffies_to_clock_t(icsk->icsk_rto),
jiffies_to_clock_t(icsk->icsk_ack.ato),
(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
@ -2349,7 +2349,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
atomic_read(&tw->tw_refcnt), tw);
refcount_read(&tw->tw_refcnt), tw);
}
#define TMPSZ 150

View file

@ -152,7 +152,7 @@ struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
swap(gso_skb->sk, skb->sk);
swap(gso_skb->destructor, skb->destructor);
sum_truesize += skb->truesize;
atomic_add(sum_truesize - gso_skb->truesize,
refcount_add(sum_truesize - gso_skb->truesize,
&skb->sk->sk_wmem_alloc);
}

View file

@ -861,12 +861,11 @@ void tcp_wfree(struct sk_buff *skb)
struct sock *sk = skb->sk;
struct tcp_sock *tp = tcp_sk(sk);
unsigned long flags, nval, oval;
int wmem;
/* Keep one reference on sk_wmem_alloc.
* Will be released by sk_free() from here or tcp_tasklet_func()
*/
wmem = atomic_sub_return(skb->truesize - 1, &sk->sk_wmem_alloc);
WARN_ON(refcount_sub_and_test(skb->truesize - 1, &sk->sk_wmem_alloc));
/* If this softirq is serviced by ksoftirqd, we are likely under stress.
* Wait until our queues (qdisc + devices) are drained.
@ -875,7 +874,7 @@ void tcp_wfree(struct sk_buff *skb)
* - chance for incoming ACK (processed by another cpu maybe)
* to migrate this flow (skb->ooo_okay will be eventually set)
*/
if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
if (refcount_read(&sk->sk_wmem_alloc) >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
goto out;
for (oval = READ_ONCE(sk->sk_tsq_flags);; oval = nval) {
@ -925,7 +924,7 @@ enum hrtimer_restart tcp_pace_kick(struct hrtimer *timer)
if (nval != oval)
continue;
if (!atomic_inc_not_zero(&sk->sk_wmem_alloc))
if (!refcount_inc_not_zero(&sk->sk_wmem_alloc))
break;
/* queue this socket to tasklet queue */
tsq = this_cpu_ptr(&tsq_tasklet);
@ -1045,7 +1044,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
skb->sk = sk;
skb->destructor = skb_is_tcp_pure_ack(skb) ? __sock_wfree : tcp_wfree;
skb_set_hash_from_sk(skb, sk);
atomic_add(skb->truesize, &sk->sk_wmem_alloc);
refcount_add(skb->truesize, &sk->sk_wmem_alloc);
skb_set_dst_pending_confirm(skb, sk->sk_dst_pending_confirm);
@ -2176,7 +2175,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
limit = min_t(u32, limit, sysctl_tcp_limit_output_bytes);
limit <<= factor;
if (atomic_read(&sk->sk_wmem_alloc) > limit) {
if (refcount_read(&sk->sk_wmem_alloc) > limit) {
/* Always send the 1st or 2nd skb in write queue.
* No need to wait for TX completion to call us back,
* after softirq/tasklet schedule.
@ -2192,7 +2191,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
* test again the condition.
*/
smp_mb__after_atomic();
if (atomic_read(&sk->sk_wmem_alloc) > limit)
if (refcount_read(&sk->sk_wmem_alloc) > limit)
return true;
}
return false;
@ -2812,7 +2811,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
/* Do not sent more than we queued. 1/4 is reserved for possible
* copying overhead: fragmentation, tunneling, mangling etc.
*/
if (atomic_read(&sk->sk_wmem_alloc) >
if (refcount_read(&sk->sk_wmem_alloc) >
min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
sk->sk_sndbuf))
return -EAGAIN;

View file

@ -577,7 +577,7 @@ struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
sk = __udp4_lib_lookup(net, saddr, sport, daddr, dport,
dif, &udp_table, NULL);
if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
sk = NULL;
return sk;
}
@ -2242,7 +2242,7 @@ void udp_v4_early_demux(struct sk_buff *skb)
uh->source, iph->saddr, dif);
}
if (!sk || !atomic_inc_not_zero_hint(&sk->sk_refcnt, 2))
if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
return;
skb->sk = sk;
@ -2691,7 +2691,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
0, 0L, 0,
from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
0, sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp,
refcount_read(&sp->sk_refcnt), sp,
atomic_read(&sp->sk_drops));
}

View file

@ -55,7 +55,7 @@ static int udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb,
req->id.idiag_dport,
req->id.idiag_if, tbl, NULL);
#endif
if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
sk = NULL;
rcu_read_unlock();
err = -ENOENT;
@ -206,7 +206,7 @@ static int __udp_diag_destroy(struct sk_buff *in_skb,
return -EINVAL;
}
if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
sk = NULL;
rcu_read_unlock();

View file

@ -227,7 +227,7 @@ static int calipso_cache_check(const unsigned char *key,
entry->key_len == key_len &&
memcmp(entry->key, key, key_len) == 0) {
entry->activity += 1;
atomic_inc(&entry->lsm_data->refcount);
refcount_inc(&entry->lsm_data->refcount);
secattr->cache = entry->lsm_data;
secattr->flags |= NETLBL_SECATTR_CACHE;
secattr->type = NETLBL_NLTYPE_CALIPSO;
@ -296,7 +296,7 @@ static int calipso_cache_add(const unsigned char *calipso_ptr,
}
entry->key_len = calipso_ptr_len;
entry->hash = calipso_map_cache_hash(calipso_ptr, calipso_ptr_len);
atomic_inc(&secattr->cache->refcount);
refcount_inc(&secattr->cache->refcount);
entry->lsm_data = secattr->cache;
bkt = entry->hash & (CALIPSO_CACHE_BUCKETS - 1);

View file

@ -1041,6 +1041,6 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
0,
sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp,
refcount_read(&sp->sk_refcnt), sp,
atomic_read(&sp->sk_drops));
}

View file

@ -275,7 +275,7 @@ int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info
skb->data_len += tailen;
skb->truesize += tailen;
if (sk)
atomic_add(tailen, &sk->sk_wmem_alloc);
refcount_add(tailen, &sk->sk_wmem_alloc);
goto out;
}

View file

@ -75,7 +75,7 @@ begin:
continue;
if (!INET6_MATCH(sk, net, saddr, daddr, ports, dif))
continue;
if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt)))
if (unlikely(!refcount_inc_not_zero(&sk->sk_refcnt)))
goto out;
if (unlikely(!INET6_MATCH(sk, net, saddr, daddr, ports, dif))) {
@ -172,7 +172,7 @@ struct sock *inet6_lookup(struct net *net, struct inet_hashinfo *hashinfo,
sk = __inet6_lookup(net, hashinfo, skb, doff, saddr, sport, daddr,
ntohs(dport), dif, &refcounted);
if (sk && !refcounted && !atomic_inc_not_zero(&sk->sk_refcnt))
if (sk && !refcounted && !refcount_inc_not_zero(&sk->sk_refcnt))
sk = NULL;
return sk;
}

View file

@ -1472,7 +1472,7 @@ alloc_new_skb:
(flags & MSG_DONTWAIT), &err);
} else {
skb = NULL;
if (atomic_read(&sk->sk_wmem_alloc) <=
if (refcount_read(&sk->sk_wmem_alloc) <=
2 * sk->sk_sndbuf)
skb = sock_wmalloc(sk,
alloclen + hh_len, 1,
@ -1581,7 +1581,7 @@ alloc_new_skb:
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
atomic_add(copy, &sk->sk_wmem_alloc);
refcount_add(copy, &sk->sk_wmem_alloc);
}
offset += copy;
length -= copy;

View file

@ -194,7 +194,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
if (ipv6_opt_accepted(sk, skb, &TCP_SKB_CB(skb)->header.h6) ||
np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim) {
atomic_inc(&skb->users);
refcount_inc(&skb->users);
ireq->pktopts = skb;
}

View file

@ -734,7 +734,7 @@ static void tcp_v6_init_req(struct request_sock *req,
np->rxopt.bits.rxinfo ||
np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
np->rxopt.bits.rxohlim || np->repflow)) {
atomic_inc(&skb->users);
refcount_inc(&skb->users);
ireq->pktopts = skb;
}
}
@ -1809,7 +1809,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
icsk->icsk_probes_out,
sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp,
refcount_read(&sp->sk_refcnt), sp,
jiffies_to_clock_t(icsk->icsk_rto),
jiffies_to_clock_t(icsk->icsk_ack.ato),
(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
@ -1842,7 +1842,7 @@ static void get_timewait6_sock(struct seq_file *seq,
dest->s6_addr32[2], dest->s6_addr32[3], destp,
tw->tw_substate, 0, 0,
3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
atomic_read(&tw->tw_refcnt), tw);
refcount_read(&tw->tw_refcnt), tw);
}
static int tcp6_seq_show(struct seq_file *seq, void *v)

View file

@ -325,7 +325,7 @@ struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be
sk = __udp6_lib_lookup(net, saddr, sport, daddr, dport,
dif, &udp_table, NULL);
if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
sk = NULL;
return sk;
}
@ -916,7 +916,7 @@ static void udp_v6_early_demux(struct sk_buff *skb)
else
return;
if (!sk || !atomic_inc_not_zero_hint(&sk->sk_refcnt, 2))
if (!sk || !refcount_inc_not_zero(&sk->sk_refcnt))
return;
skb->sk = sk;

View file

@ -162,7 +162,7 @@ static void kcm_format_psock(struct kcm_psock *psock, struct seq_file *seq,
psock->sk->sk_receive_queue.qlen,
atomic_read(&psock->sk->sk_rmem_alloc),
psock->sk->sk_write_queue.qlen,
atomic_read(&psock->sk->sk_wmem_alloc));
refcount_read(&psock->sk->sk_wmem_alloc));
if (psock->done)
seq_puts(seq, "Done ");

View file

@ -109,7 +109,7 @@ static void pfkey_sock_destruct(struct sock *sk)
}
WARN_ON(atomic_read(&sk->sk_rmem_alloc));
WARN_ON(atomic_read(&sk->sk_wmem_alloc));
WARN_ON(refcount_read(&sk->sk_wmem_alloc));
atomic_dec(&net_pfkey->socks_nr);
}
@ -203,11 +203,11 @@ static int pfkey_broadcast_one(struct sk_buff *skb, struct sk_buff **skb2,
sock_hold(sk);
if (*skb2 == NULL) {
if (atomic_read(&skb->users) != 1) {
if (refcount_read(&skb->users) != 1) {
*skb2 = skb_clone(skb, allocation);
} else {
*skb2 = skb;
atomic_inc(&skb->users);
refcount_inc(&skb->users);
}
}
if (*skb2 != NULL) {
@ -3739,7 +3739,7 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
else
seq_printf(f, "%pK %-6d %-6u %-6u %-6u %-6lu\n",
s,
atomic_read(&s->sk_refcnt),
refcount_read(&s->sk_refcnt),
sk_rmem_alloc_get(s),
sk_wmem_alloc_get(s),
from_kuid_munged(seq_user_ns(f), sock_i_uid(s)),

View file

@ -144,9 +144,8 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)
tunnel->encap == L2TP_ENCAPTYPE_IP ? "IP" :
"");
seq_printf(m, " %d sessions, refcnt %d/%d\n", session_count,
tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0,
tunnel->sock ? refcount_read(&tunnel->sock->sk_refcnt) : 0,
atomic_read(&tunnel->ref_count));
seq_printf(m, " %08x rx %ld/%ld/%ld rx %ld/%ld/%ld\n",
tunnel->debug,
atomic_long_read(&tunnel->stats.tx_packets),

View file

@ -507,7 +507,7 @@ again:
sk_nulls_for_each_rcu(rc, node, laddr_hb) {
if (llc_estab_match(sap, daddr, laddr, rc)) {
/* Extra checks required by SLAB_TYPESAFE_BY_RCU */
if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
if (unlikely(!refcount_inc_not_zero(&rc->sk_refcnt)))
goto again;
if (unlikely(llc_sk(rc)->sap != sap ||
!llc_estab_match(sap, daddr, laddr, rc))) {
@ -566,7 +566,7 @@ again:
sk_nulls_for_each_rcu(rc, node, laddr_hb) {
if (llc_listener_match(sap, laddr, rc)) {
/* Extra checks required by SLAB_TYPESAFE_BY_RCU */
if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
if (unlikely(!refcount_inc_not_zero(&rc->sk_refcnt)))
goto again;
if (unlikely(llc_sk(rc)->sap != sap ||
!llc_listener_match(sap, laddr, rc))) {
@ -973,9 +973,9 @@ void llc_sk_free(struct sock *sk)
skb_queue_purge(&sk->sk_write_queue);
skb_queue_purge(&llc->pdu_unack_q);
#ifdef LLC_REFCNT_DEBUG
if (atomic_read(&sk->sk_refcnt) != 1) {
if (refcount_read(&sk->sk_refcnt) != 1) {
printk(KERN_DEBUG "Destruction of LLC sock %p delayed in %s, cnt=%d\n",
sk, __func__, atomic_read(&sk->sk_refcnt));
sk, __func__, refcount_read(&sk->sk_refcnt));
printk(KERN_DEBUG "%d LLC sockets are still alive\n",
atomic_read(&llc_sock_nr));
} else {

View file

@ -329,7 +329,7 @@ again:
sk_nulls_for_each_rcu(rc, node, laddr_hb) {
if (llc_dgram_match(sap, laddr, rc)) {
/* Extra checks required by SLAB_TYPESAFE_BY_RCU */
if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
if (unlikely(!refcount_inc_not_zero(&rc->sk_refcnt)))
goto again;
if (unlikely(llc_sk(rc)->sap != sap ||
!llc_dgram_match(sap, laddr, rc))) {

View file

@ -127,7 +127,7 @@ nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
daddr, dport,
in->ifindex);
if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
sk = NULL;
/* NOTE: we return listeners even if bound to
* 0.0.0.0, those are filtered out in
@ -197,7 +197,7 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
daddr, ntohs(dport),
in->ifindex);
if (sk && !atomic_inc_not_zero(&sk->sk_refcnt))
if (sk && !refcount_inc_not_zero(&sk->sk_refcnt))
sk = NULL;
/* NOTE: we return listeners even if bound to
* 0.0.0.0, those are filtered out in

View file

@ -372,7 +372,7 @@ static void netlink_sock_destruct(struct sock *sk)
}
WARN_ON(atomic_read(&sk->sk_rmem_alloc));
WARN_ON(atomic_read(&sk->sk_wmem_alloc));
WARN_ON(refcount_read(&sk->sk_wmem_alloc));
WARN_ON(nlk_sk(sk)->groups);
}
@ -575,7 +575,7 @@ static void netlink_remove(struct sock *sk)
table = &nl_table[sk->sk_protocol];
if (!rhashtable_remove_fast(&table->hash, &nlk_sk(sk)->node,
netlink_rhashtable_params)) {
WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
__sock_put(sk);
}
@ -691,7 +691,7 @@ static void deferred_put_nlk_sk(struct rcu_head *head)
struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
struct sock *sk = &nlk->sk;
if (!atomic_dec_and_test(&sk->sk_refcnt))
if (!refcount_dec_and_test(&sk->sk_refcnt))
return;
if (nlk->cb_running && nlk->cb.done) {
@ -1848,7 +1848,7 @@ static int netlink_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
}
if (dst_group) {
atomic_inc(&skb->users);
refcount_inc(&skb->users);
netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL);
}
err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT);
@ -2226,7 +2226,7 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
struct netlink_sock *nlk;
int ret;
atomic_inc(&skb->users);
refcount_inc(&skb->users);
sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid);
if (sk == NULL) {
@ -2431,7 +2431,7 @@ int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
int exclude_portid = 0;
if (report) {
atomic_inc(&skb->users);
refcount_inc(&skb->users);
exclude_portid = portid;
}
@ -2568,7 +2568,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
sk_rmem_alloc_get(s),
sk_wmem_alloc_get(s),
nlk->cb_running,
atomic_read(&s->sk_refcnt),
refcount_read(&s->sk_refcnt),
atomic_read(&s->sk_drops),
sock_i_ino(s)
);

View file

@ -1317,7 +1317,7 @@ static void packet_sock_destruct(struct sock *sk)
skb_queue_purge(&sk->sk_error_queue);
WARN_ON(atomic_read(&sk->sk_rmem_alloc));
WARN_ON(atomic_read(&sk->sk_wmem_alloc));
WARN_ON(refcount_read(&sk->sk_wmem_alloc));
if (!sock_flag(sk, SOCK_DEAD)) {
pr_err("Attempt to release alive packet socket: %p\n", sk);
@ -1739,7 +1739,7 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
match->flags = flags;
INIT_LIST_HEAD(&match->list);
spin_lock_init(&match->lock);
atomic_set(&match->sk_ref, 0);
refcount_set(&match->sk_ref, 0);
fanout_init_data(match);
match->prot_hook.type = po->prot_hook.type;
match->prot_hook.dev = po->prot_hook.dev;
@ -1753,10 +1753,10 @@ static int fanout_add(struct sock *sk, u16 id, u16 type_flags)
match->prot_hook.type == po->prot_hook.type &&
match->prot_hook.dev == po->prot_hook.dev) {
err = -ENOSPC;
if (atomic_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
if (refcount_read(&match->sk_ref) < PACKET_FANOUT_MAX) {
__dev_remove_pack(&po->prot_hook);
po->fanout = match;
atomic_inc(&match->sk_ref);
refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
__fanout_link(sk, po);
err = 0;
}
@ -1785,7 +1785,7 @@ static struct packet_fanout *fanout_release(struct sock *sk)
if (f) {
po->fanout = NULL;
if (atomic_dec_and_test(&f->sk_ref))
if (refcount_dec_and_test(&f->sk_ref))
list_del(&f->list);
else
f = NULL;
@ -2523,7 +2523,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
skb->data_len = to_write;
skb->len += to_write;
skb->truesize += to_write;
atomic_add(to_write, &po->sk.sk_wmem_alloc);
refcount_add(to_write, &po->sk.sk_wmem_alloc);
while (likely(to_write)) {
nr_frags = skb_shinfo(skb)->nr_frags;
@ -4495,7 +4495,7 @@ static int packet_seq_show(struct seq_file *seq, void *v)
seq_printf(seq,
"%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
s,
atomic_read(&s->sk_refcnt),
refcount_read(&s->sk_refcnt),
s->sk_type,
ntohs(po->num),
po->ifindex,

View file

@ -1,6 +1,8 @@
#ifndef __PACKET_INTERNAL_H__
#define __PACKET_INTERNAL_H__
#include <linux/refcount.h>
struct packet_mclist {
struct packet_mclist *next;
int ifindex;
@ -86,7 +88,7 @@ struct packet_fanout {
struct list_head list;
struct sock *arr[PACKET_FANOUT_MAX];
spinlock_t lock;
atomic_t sk_ref;
refcount_t sk_ref;
struct packet_type prot_hook ____cacheline_aligned_in_smp;
};

View file

@ -360,7 +360,7 @@ static unsigned int pn_socket_poll(struct file *file, struct socket *sock,
return POLLHUP;
if (sk->sk_state == TCP_ESTABLISHED &&
atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
atomic_read(&pn->tx_credits))
mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
@ -614,7 +614,7 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
sock_i_ino(sk),
atomic_read(&sk->sk_refcnt), sk,
refcount_read(&sk->sk_refcnt), sk,
atomic_read(&sk->sk_drops));
}
seq_pad(seq, '\n');

View file

@ -202,7 +202,7 @@ void rds_tcp_write_space(struct sock *sk)
tc->t_last_seen_una = rds_tcp_snd_una(tc);
rds_send_path_drop_acked(cp, rds_tcp_snd_una(tc), rds_tcp_is_acked);
if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
if ((refcount_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf)
queue_delayed_work(rds_wq, &cp->cp_send_w, 0);
out:

View file

@ -53,7 +53,7 @@ static void rxrpc_sock_destructor(struct sock *);
*/
static inline int rxrpc_writable(struct sock *sk)
{
return atomic_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf;
return refcount_read(&sk->sk_wmem_alloc) < (size_t) sk->sk_sndbuf;
}
/*
@ -730,7 +730,7 @@ static void rxrpc_sock_destructor(struct sock *sk)
rxrpc_purge_queue(&sk->sk_receive_queue);
WARN_ON(atomic_read(&sk->sk_wmem_alloc));
WARN_ON(refcount_read(&sk->sk_wmem_alloc));
WARN_ON(!sk_unhashed(sk));
WARN_ON(sk->sk_socket);
@ -747,7 +747,7 @@ static int rxrpc_release_sock(struct sock *sk)
{
struct rxrpc_sock *rx = rxrpc_sk(sk);
_enter("%p{%d,%d}", sk, sk->sk_state, atomic_read(&sk->sk_refcnt));
_enter("%p{%d,%d}", sk, sk->sk_state, refcount_read(&sk->sk_refcnt));
/* declare the socket closed for business */
sock_orphan(sk);

View file

@ -27,7 +27,7 @@ void rxrpc_new_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
{
const void *here = __builtin_return_address(0);
int n = atomic_inc_return(select_skb_count(op));
trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
}
/*
@ -38,7 +38,7 @@ void rxrpc_see_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
const void *here = __builtin_return_address(0);
if (skb) {
int n = atomic_read(select_skb_count(op));
trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
}
}
@ -49,7 +49,7 @@ void rxrpc_get_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
{
const void *here = __builtin_return_address(0);
int n = atomic_inc_return(select_skb_count(op));
trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
skb_get(skb);
}
@ -63,7 +63,7 @@ void rxrpc_free_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
int n;
CHECK_SLAB_OKAY(&skb->users);
n = atomic_dec_return(select_skb_count(op));
trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
kfree_skb(skb);
}
}
@ -78,7 +78,7 @@ void rxrpc_lose_skb(struct sk_buff *skb, enum rxrpc_skb_trace op)
int n;
CHECK_SLAB_OKAY(&skb->users);
n = atomic_dec_return(select_skb_count(op));
trace_rxrpc_skb(skb, op, atomic_read(&skb->users), n, here);
trace_rxrpc_skb(skb, op, refcount_read(&skb->users), n, here);
kfree_skb(skb);
}
}
@ -93,7 +93,7 @@ void rxrpc_purge_queue(struct sk_buff_head *list)
while ((skb = skb_dequeue((list))) != NULL) {
int n = atomic_dec_return(select_skb_count(rxrpc_skb_rx_purged));
trace_rxrpc_skb(skb, rxrpc_skb_rx_purged,
atomic_read(&skb->users), n, here);
refcount_read(&skb->users), n, here);
kfree_skb(skb);
}
}

View file

@ -340,7 +340,7 @@ META_COLLECTOR(int_sk_refcnt)
*err = -1;
return;
}
dst->value = atomic_read(&skb->sk->sk_refcnt);
dst->value = refcount_read(&skb->sk->sk_refcnt);
}
META_COLLECTOR(int_sk_rcvbuf)

View file

@ -498,7 +498,7 @@ static void sch_atm_dequeue(unsigned long data)
ATM_SKB(skb)->vcc = flow->vcc;
memcpy(skb_push(skb, flow->hdr_len), flow->hdr,
flow->hdr_len);
atomic_add(skb->truesize,
refcount_add(skb->truesize,
&sk_atm(flow->vcc)->sk_wmem_alloc);
/* atm.atm_options are already set by atm_tc_enqueue */
flow->vcc->send(flow->vcc, skb);

View file

@ -402,7 +402,7 @@ static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk)
* therefore only reserve a single byte to keep socket around until
* the packet has been transmitted.
*/
atomic_inc(&sk->sk_wmem_alloc);
refcount_inc(&sk->sk_wmem_alloc);
}
static int sctp_packet_pack(struct sctp_packet *packet,

Some files were not shown because too many files have changed in this diff Show more