1
0
Fork 0

[GFS2] Use slab properly with glocks

We can take advantage of the slab allocator to ensure that all the list
heads and the spinlock (plus one or two other fields) are initialised
by slab to speed up allocation of glocks.

Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
hifive-unleashed-5.1
Steven Whitehouse 2006-08-30 10:36:52 -04:00
parent 5e2b0613ed
commit ec45d9f583
2 changed files with 34 additions and 28 deletions

View File

@ -278,31 +278,22 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, uint64_t number,
if (!gl)
return -ENOMEM;
memset(gl, 0, sizeof(struct gfs2_glock));
INIT_LIST_HEAD(&gl->gl_list);
gl->gl_flags = 0;
gl->gl_name = name;
kref_init(&gl->gl_ref);
spin_lock_init(&gl->gl_spin);
gl->gl_state = LM_ST_UNLOCKED;
gl->gl_owner = NULL;
gl->gl_ip = 0;
INIT_LIST_HEAD(&gl->gl_holders);
INIT_LIST_HEAD(&gl->gl_waiters1);
INIT_LIST_HEAD(&gl->gl_waiters2);
INIT_LIST_HEAD(&gl->gl_waiters3);
gl->gl_ops = glops;
gl->gl_req_gh = NULL;
gl->gl_req_bh = NULL;
gl->gl_vn = 0;
gl->gl_stamp = jiffies;
gl->gl_object = NULL;
gl->gl_bucket = bucket;
INIT_LIST_HEAD(&gl->gl_reclaim);
gl->gl_sbd = sdp;
gl->gl_aspace = NULL;
lops_init_le(&gl->gl_le, &gfs2_glock_lops);
INIT_LIST_HEAD(&gl->gl_ail_list);
/* If this glock protects actual on-disk data or metadata blocks,
create a VFS inode to manage the pages/buffers holding them. */
@ -334,13 +325,11 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, uint64_t number,
return 0;
fail_aspace:
fail_aspace:
if (gl->gl_aspace)
gfs2_aspace_put(gl->gl_aspace);
fail:
fail:
kmem_cache_free(gfs2_glock_cachep, gl);
return error;
}
@ -495,9 +484,7 @@ static int rq_promote(struct gfs2_holder *gh)
gfs2_reclaim_glock(sdp);
}
glops->go_xmote_th(gl, gh->gh_state,
gh->gh_flags);
glops->go_xmote_th(gl, gh->gh_state, gh->gh_flags);
spin_lock(&gl->gl_spin);
}
return 1;
@ -935,8 +922,7 @@ void gfs2_glock_xmote_th(struct gfs2_glock *gl, unsigned int state, int flags)
gfs2_glock_hold(gl);
gl->gl_req_bh = xmote_bh;
lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state,
lck_flags);
lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
return;
@ -1019,8 +1005,7 @@ void gfs2_glock_drop_th(struct gfs2_glock *gl)
if (gl->gl_state == LM_ST_EXCLUSIVE) {
if (glops->go_sync)
glops->go_sync(gl,
DIO_METADATA | DIO_DATA | DIO_RELEASE);
glops->go_sync(gl, DIO_METADATA | DIO_DATA | DIO_RELEASE);
}
gfs2_glock_hold(gl);

View File

@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/gfs2_ondisk.h>
#include <asm/atomic.h>
#include "gfs2.h"
#include "lm_interface.h"
@ -35,6 +36,25 @@ static void gfs2_init_inode_once(void *foo, kmem_cache_t *cachep, unsigned long
}
}
static void gfs2_init_glock_once(void *foo, kmem_cache_t *cachep, unsigned long flags)
{
struct gfs2_glock *gl = foo;
if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) ==
SLAB_CTOR_CONSTRUCTOR) {
INIT_LIST_HEAD(&gl->gl_list);
spin_lock_init(&gl->gl_spin);
INIT_LIST_HEAD(&gl->gl_holders);
INIT_LIST_HEAD(&gl->gl_waiters1);
INIT_LIST_HEAD(&gl->gl_waiters2);
INIT_LIST_HEAD(&gl->gl_waiters3);
gl->gl_lvb = NULL;
atomic_set(&gl->gl_lvb_count, 0);
INIT_LIST_HEAD(&gl->gl_reclaim);
INIT_LIST_HEAD(&gl->gl_ail_list);
atomic_set(&gl->gl_ail_count, 0);
}
}
/**
* init_gfs2_fs - Register GFS2 as a filesystem
*
@ -55,7 +75,8 @@ static int __init init_gfs2_fs(void)
gfs2_glock_cachep = kmem_cache_create("gfs2_glock",
sizeof(struct gfs2_glock),
0, 0, NULL, NULL);
0, 0,
gfs2_init_glock_once, NULL);
if (!gfs2_glock_cachep)
goto fail;