diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index c0f59d1b13dc..2cac34ac756a 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c @@ -384,7 +384,7 @@ void jbd2_journal_commit_transaction(journal_t *journal) struct buffer_head *bh = jh2bh(jh); jbd_lock_bh_state(bh); - jbd2_slab_free(jh->b_committed_data, bh->b_size); + jbd2_free(jh->b_committed_data, bh->b_size); jh->b_committed_data = NULL; jbd_unlock_bh_state(bh); } @@ -801,14 +801,14 @@ restart_loop: * Otherwise, we can just throw away the frozen data now. */ if (jh->b_committed_data) { - jbd2_slab_free(jh->b_committed_data, bh->b_size); + jbd2_free(jh->b_committed_data, bh->b_size); jh->b_committed_data = NULL; if (jh->b_frozen_data) { jh->b_committed_data = jh->b_frozen_data; jh->b_frozen_data = NULL; } } else if (jh->b_frozen_data) { - jbd2_slab_free(jh->b_frozen_data, bh->b_size); + jbd2_free(jh->b_frozen_data, bh->b_size); jh->b_frozen_data = NULL; } diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index f37324aee817..2d9ecca74f19 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c @@ -84,7 +84,6 @@ EXPORT_SYMBOL(jbd2_journal_force_commit); static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); static void __journal_abort_soft (journal_t *journal, int errno); -static int jbd2_journal_create_jbd_slab(size_t slab_size); /* * Helper function used to manage commit timeouts @@ -335,10 +334,10 @@ repeat: char *tmp; jbd_unlock_bh_state(bh_in); - tmp = jbd2_slab_alloc(bh_in->b_size, GFP_NOFS); + tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS); jbd_lock_bh_state(bh_in); if (jh_in->b_frozen_data) { - jbd2_slab_free(tmp, bh_in->b_size); + jbd2_free(tmp, bh_in->b_size); goto repeat; } @@ -1096,13 +1095,6 @@ int jbd2_journal_load(journal_t *journal) } } - /* - * Create a slab for this blocksize - */ - err = jbd2_journal_create_jbd_slab(be32_to_cpu(sb->s_blocksize)); - if (err) - return err; - /* Let the recovery code check whether it needs to recover any * data from the journal. */ if (jbd2_journal_recover(journal)) @@ -1635,77 +1627,6 @@ void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry) return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0)); } -/* - * jbd slab management: create 1k, 2k, 4k, 8k slabs as needed - * and allocate frozen and commit buffers from these slabs. - * - * Reason for doing this is to avoid, SLAB_DEBUG - since it could - * cause bh to cross page boundary. - */ - -#define JBD_MAX_SLABS 5 -#define JBD_SLAB_INDEX(size) (size >> 11) - -static struct kmem_cache *jbd_slab[JBD_MAX_SLABS]; -static const char *jbd_slab_names[JBD_MAX_SLABS] = { - "jbd2_1k", "jbd2_2k", "jbd2_4k", NULL, "jbd2_8k" -}; - -static void jbd2_journal_destroy_jbd_slabs(void) -{ - int i; - - for (i = 0; i < JBD_MAX_SLABS; i++) { - if (jbd_slab[i]) - kmem_cache_destroy(jbd_slab[i]); - jbd_slab[i] = NULL; - } -} - -static int jbd2_journal_create_jbd_slab(size_t slab_size) -{ - int i = JBD_SLAB_INDEX(slab_size); - - BUG_ON(i >= JBD_MAX_SLABS); - - /* - * Check if we already have a slab created for this size - */ - if (jbd_slab[i]) - return 0; - - /* - * Create a slab and force alignment to be same as slabsize - - * this will make sure that allocations won't cross the page - * boundary. - */ - jbd_slab[i] = kmem_cache_create(jbd_slab_names[i], - slab_size, slab_size, 0, NULL); - if (!jbd_slab[i]) { - printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n"); - return -ENOMEM; - } - return 0; -} - -void * jbd2_slab_alloc(size_t size, gfp_t flags) -{ - int idx; - - idx = JBD_SLAB_INDEX(size); - BUG_ON(jbd_slab[idx] == NULL); - return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL); -} - -void jbd2_slab_free(void *ptr, size_t size) -{ - int idx; - - idx = JBD_SLAB_INDEX(size); - BUG_ON(jbd_slab[idx] == NULL); - kmem_cache_free(jbd_slab[idx], ptr); -} - /* * Journal_head storage management */ @@ -1893,13 +1814,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh) printk(KERN_WARNING "%s: freeing " "b_frozen_data\n", __FUNCTION__); - jbd2_slab_free(jh->b_frozen_data, bh->b_size); + jbd2_free(jh->b_frozen_data, bh->b_size); } if (jh->b_committed_data) { printk(KERN_WARNING "%s: freeing " "b_committed_data\n", __FUNCTION__); - jbd2_slab_free(jh->b_committed_data, bh->b_size); + jbd2_free(jh->b_committed_data, bh->b_size); } bh->b_private = NULL; jh->b_bh = NULL; /* debug, really */ @@ -2040,7 +1961,6 @@ static void jbd2_journal_destroy_caches(void) jbd2_journal_destroy_revoke_caches(); jbd2_journal_destroy_jbd2_journal_head_cache(); jbd2_journal_destroy_handle_cache(); - jbd2_journal_destroy_jbd_slabs(); } static int __init journal_init(void) diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 7946ff43fc40..bd047f9af8e7 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -236,7 +236,7 @@ out: /* Allocate a new handle. This should probably be in a slab... */ static handle_t *new_handle(int nblocks) { - handle_t *handle = jbd_alloc_handle(GFP_NOFS); + handle_t *handle = jbd2_alloc_handle(GFP_NOFS); if (!handle) return NULL; memset(handle, 0, sizeof(*handle)); @@ -282,7 +282,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks) err = start_this_handle(journal, handle); if (err < 0) { - jbd_free_handle(handle); + jbd2_free_handle(handle); current->journal_info = NULL; handle = ERR_PTR(err); } @@ -668,7 +668,7 @@ repeat: JBUFFER_TRACE(jh, "allocate memory for buffer"); jbd_unlock_bh_state(bh); frozen_buffer = - jbd2_slab_alloc(jh2bh(jh)->b_size, + jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS); if (!frozen_buffer) { printk(KERN_EMERG @@ -728,7 +728,7 @@ done: out: if (unlikely(frozen_buffer)) /* It's usually NULL */ - jbd2_slab_free(frozen_buffer, bh->b_size); + jbd2_free(frozen_buffer, bh->b_size); JBUFFER_TRACE(jh, "exit"); return error; @@ -881,7 +881,7 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh) repeat: if (!jh->b_committed_data) { - committed_data = jbd2_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS); + committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS); if (!committed_data) { printk(KERN_EMERG "%s: No memory for committed data\n", __FUNCTION__); @@ -908,7 +908,7 @@ repeat: out: jbd2_journal_put_journal_head(jh); if (unlikely(committed_data)) - jbd2_slab_free(committed_data, bh->b_size); + jbd2_free(committed_data, bh->b_size); return err; } @@ -1411,7 +1411,7 @@ int jbd2_journal_stop(handle_t *handle) spin_unlock(&journal->j_state_lock); } - jbd_free_handle(handle); + jbd2_free_handle(handle); return err; } diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 260d6d76c5f3..e3677929884a 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -72,14 +72,22 @@ extern u8 jbd2_journal_enable_debug; #endif extern void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry); -extern void * jbd2_slab_alloc(size_t size, gfp_t flags); -extern void jbd2_slab_free(void *ptr, size_t size); - #define jbd_kmalloc(size, flags) \ __jbd2_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry) #define jbd_rep_kmalloc(size, flags) \ __jbd2_kmalloc(__FUNCTION__, (size), (flags), 1) + +static inline void *jbd2_alloc(size_t size, gfp_t flags) +{ + return (void *)__get_free_pages(flags, get_order(size)); +} + +static inline void jbd2_free(void *ptr, size_t size) +{ + free_pages((unsigned long)ptr, get_order(size)); +}; + #define JBD2_MIN_JOURNAL_BLOCKS 1024 #ifdef __KERNEL__ @@ -959,12 +967,12 @@ void jbd2_journal_put_journal_head(struct journal_head *jh); */ extern struct kmem_cache *jbd2_handle_cache; -static inline handle_t *jbd_alloc_handle(gfp_t gfp_flags) +static inline handle_t *jbd2_alloc_handle(gfp_t gfp_flags) { return kmem_cache_alloc(jbd2_handle_cache, gfp_flags); } -static inline void jbd_free_handle(handle_t *handle) +static inline void jbd2_free_handle(handle_t *handle) { kmem_cache_free(jbd2_handle_cache, handle); }