1
0
Fork 0

nfsd: convert file cache to use over/underflow safe refcount

Use the 'refcount_t' type instead of 'atomic_t' for improved
refcounting safety.

Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
alistair/sensors
Trond Myklebust 2020-01-14 12:02:44 -05:00 committed by J. Bruce Fields
parent c19285596d
commit 689827cd5b
3 changed files with 15 additions and 16 deletions

View File

@ -100,7 +100,7 @@ nfsd_file_mark_free(struct fsnotify_mark *mark)
static struct nfsd_file_mark *
nfsd_file_mark_get(struct nfsd_file_mark *nfm)
{
if (!atomic_inc_not_zero(&nfm->nfm_ref))
if (!refcount_inc_not_zero(&nfm->nfm_ref))
return NULL;
return nfm;
}
@ -108,8 +108,7 @@ nfsd_file_mark_get(struct nfsd_file_mark *nfm)
static void
nfsd_file_mark_put(struct nfsd_file_mark *nfm)
{
if (atomic_dec_and_test(&nfm->nfm_ref)) {
if (refcount_dec_and_test(&nfm->nfm_ref)) {
fsnotify_destroy_mark(&nfm->nfm_mark, nfsd_file_fsnotify_group);
fsnotify_put_mark(&nfm->nfm_mark);
}
@ -148,7 +147,7 @@ nfsd_file_mark_find_or_create(struct nfsd_file *nf)
return NULL;
fsnotify_init_mark(&new->nfm_mark, nfsd_file_fsnotify_group);
new->nfm_mark.mask = FS_ATTRIB|FS_DELETE_SELF;
atomic_set(&new->nfm_ref, 1);
refcount_set(&new->nfm_ref, 1);
err = fsnotify_add_inode_mark(&new->nfm_mark, inode, 0);
@ -186,7 +185,7 @@ nfsd_file_alloc(struct inode *inode, unsigned int may, unsigned int hashval,
nf->nf_flags = 0;
nf->nf_inode = inode;
nf->nf_hashval = hashval;
atomic_set(&nf->nf_ref, 1);
refcount_set(&nf->nf_ref, 1);
nf->nf_may = may & NFSD_FILE_MAY_MASK;
if (may & NFSD_MAY_NOT_BREAK_LEASE) {
if (may & NFSD_MAY_WRITE)
@ -280,7 +279,7 @@ nfsd_file_unhash_and_release_locked(struct nfsd_file *nf, struct list_head *disp
if (!nfsd_file_unhash(nf))
return false;
/* keep final reference for nfsd_file_lru_dispose */
if (atomic_add_unless(&nf->nf_ref, -1, 1))
if (refcount_dec_not_one(&nf->nf_ref))
return true;
list_add(&nf->nf_lru, dispose);
@ -292,7 +291,7 @@ nfsd_file_put_noref(struct nfsd_file *nf)
{
trace_nfsd_file_put(nf);
if (atomic_dec_and_test(&nf->nf_ref)) {
if (refcount_dec_and_test(&nf->nf_ref)) {
WARN_ON(test_bit(NFSD_FILE_HASHED, &nf->nf_flags));
nfsd_file_free(nf);
}
@ -304,7 +303,7 @@ nfsd_file_put(struct nfsd_file *nf)
bool is_hashed;
set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
if (atomic_read(&nf->nf_ref) > 2 || !nf->nf_file) {
if (refcount_read(&nf->nf_ref) > 2 || !nf->nf_file) {
nfsd_file_put_noref(nf);
return;
}
@ -321,7 +320,7 @@ nfsd_file_put(struct nfsd_file *nf)
struct nfsd_file *
nfsd_file_get(struct nfsd_file *nf)
{
if (likely(atomic_inc_not_zero(&nf->nf_ref)))
if (likely(refcount_inc_not_zero(&nf->nf_ref)))
return nf;
return NULL;
}
@ -347,7 +346,7 @@ nfsd_file_dispose_list_sync(struct list_head *dispose)
while(!list_empty(dispose)) {
nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
list_del(&nf->nf_lru);
if (!atomic_dec_and_test(&nf->nf_ref))
if (!refcount_dec_and_test(&nf->nf_ref))
continue;
if (nfsd_file_free(nf))
flush = true;
@ -430,7 +429,7 @@ nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
* counter. Here we check the counter and then test and clear the flag.
* That order is deliberate to ensure that we can do this locklessly.
*/
if (atomic_read(&nf->nf_ref) > 1)
if (refcount_read(&nf->nf_ref) > 1)
goto out_skip;
/*
@ -1019,7 +1018,7 @@ out:
open_file:
nf = new;
/* Take reference for the hashtable */
atomic_inc(&nf->nf_ref);
refcount_inc(&nf->nf_ref);
__set_bit(NFSD_FILE_HASHED, &nf->nf_flags);
__set_bit(NFSD_FILE_PENDING, &nf->nf_flags);
list_lru_add(&nfsd_file_lru, &nf->nf_lru);

View File

@ -19,7 +19,7 @@
*/
struct nfsd_file_mark {
struct fsnotify_mark nfm_mark;
atomic_t nfm_ref;
refcount_t nfm_ref;
};
/*
@ -43,7 +43,7 @@ struct nfsd_file {
unsigned long nf_flags;
struct inode *nf_inode;
unsigned int nf_hashval;
atomic_t nf_ref;
refcount_t nf_ref;
unsigned char nf_may;
struct nfsd_file_mark *nf_mark;
struct rw_semaphore nf_rwsem;

View File

@ -201,7 +201,7 @@ DECLARE_EVENT_CLASS(nfsd_file_class,
TP_fast_assign(
__entry->nf_hashval = nf->nf_hashval;
__entry->nf_inode = nf->nf_inode;
__entry->nf_ref = atomic_read(&nf->nf_ref);
__entry->nf_ref = refcount_read(&nf->nf_ref);
__entry->nf_flags = nf->nf_flags;
__entry->nf_may = nf->nf_may;
__entry->nf_file = nf->nf_file;
@ -250,7 +250,7 @@ TRACE_EVENT(nfsd_file_acquire,
__entry->hash = hash;
__entry->inode = inode;
__entry->may_flags = may_flags;
__entry->nf_ref = nf ? atomic_read(&nf->nf_ref) : 0;
__entry->nf_ref = nf ? refcount_read(&nf->nf_ref) : 0;
__entry->nf_flags = nf ? nf->nf_flags : 0;
__entry->nf_may = nf ? nf->nf_may : 0;
__entry->nf_file = nf ? nf->nf_file : NULL;