diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c index a003d50edcdb..a263d82761df 100644 --- a/fs/jbd/commit.c +++ b/fs/jbd/commit.c @@ -375,7 +375,7 @@ void journal_commit_transaction(journal_t *journal) struct buffer_head *bh = jh2bh(jh); jbd_lock_bh_state(bh); - jbd_slab_free(jh->b_committed_data, bh->b_size); + jbd_free(jh->b_committed_data, bh->b_size); jh->b_committed_data = NULL; jbd_unlock_bh_state(bh); } @@ -792,14 +792,14 @@ restart_loop: * Otherwise, we can just throw away the frozen data now. */ if (jh->b_committed_data) { - jbd_slab_free(jh->b_committed_data, bh->b_size); + jbd_free(jh->b_committed_data, bh->b_size); jh->b_committed_data = NULL; if (jh->b_frozen_data) { jh->b_committed_data = jh->b_frozen_data; jh->b_frozen_data = NULL; } } else if (jh->b_frozen_data) { - jbd_slab_free(jh->b_frozen_data, bh->b_size); + jbd_free(jh->b_frozen_data, bh->b_size); jh->b_frozen_data = NULL; } diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c index a6be78c05dce..7edf3fdfdadd 100644 --- a/fs/jbd/journal.c +++ b/fs/jbd/journal.c @@ -83,7 +83,6 @@ EXPORT_SYMBOL(journal_force_commit); static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *); static void __journal_abort_soft (journal_t *journal, int errno); -static int journal_create_jbd_slab(size_t slab_size); /* * Helper function used to manage commit timeouts @@ -334,10 +333,10 @@ repeat: char *tmp; jbd_unlock_bh_state(bh_in); - tmp = jbd_slab_alloc(bh_in->b_size, GFP_NOFS); + tmp = jbd_alloc(bh_in->b_size, GFP_NOFS); jbd_lock_bh_state(bh_in); if (jh_in->b_frozen_data) { - jbd_slab_free(tmp, bh_in->b_size); + jbd_free(tmp, bh_in->b_size); goto repeat; } @@ -1095,13 +1094,6 @@ int journal_load(journal_t *journal) } } - /* - * Create a slab for this blocksize - */ - err = journal_create_jbd_slab(be32_to_cpu(sb->s_blocksize)); - if (err) - return err; - /* Let the recovery code check whether it needs to recover any * data from the journal. */ if (journal_recover(journal)) @@ -1623,77 +1615,6 @@ void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry) return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0)); } -/* - * jbd slab management: create 1k, 2k, 4k, 8k slabs as needed - * and allocate frozen and commit buffers from these slabs. - * - * Reason for doing this is to avoid, SLAB_DEBUG - since it could - * cause bh to cross page boundary. - */ - -#define JBD_MAX_SLABS 5 -#define JBD_SLAB_INDEX(size) (size >> 11) - -static struct kmem_cache *jbd_slab[JBD_MAX_SLABS]; -static const char *jbd_slab_names[JBD_MAX_SLABS] = { - "jbd_1k", "jbd_2k", "jbd_4k", NULL, "jbd_8k" -}; - -static void journal_destroy_jbd_slabs(void) -{ - int i; - - for (i = 0; i < JBD_MAX_SLABS; i++) { - if (jbd_slab[i]) - kmem_cache_destroy(jbd_slab[i]); - jbd_slab[i] = NULL; - } -} - -static int journal_create_jbd_slab(size_t slab_size) -{ - int i = JBD_SLAB_INDEX(slab_size); - - BUG_ON(i >= JBD_MAX_SLABS); - - /* - * Check if we already have a slab created for this size - */ - if (jbd_slab[i]) - return 0; - - /* - * Create a slab and force alignment to be same as slabsize - - * this will make sure that allocations won't cross the page - * boundary. - */ - jbd_slab[i] = kmem_cache_create(jbd_slab_names[i], - slab_size, slab_size, 0, NULL); - if (!jbd_slab[i]) { - printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n"); - return -ENOMEM; - } - return 0; -} - -void * jbd_slab_alloc(size_t size, gfp_t flags) -{ - int idx; - - idx = JBD_SLAB_INDEX(size); - BUG_ON(jbd_slab[idx] == NULL); - return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL); -} - -void jbd_slab_free(void *ptr, size_t size) -{ - int idx; - - idx = JBD_SLAB_INDEX(size); - BUG_ON(jbd_slab[idx] == NULL); - kmem_cache_free(jbd_slab[idx], ptr); -} - /* * Journal_head storage management */ @@ -1881,13 +1802,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh) printk(KERN_WARNING "%s: freeing " "b_frozen_data\n", __FUNCTION__); - jbd_slab_free(jh->b_frozen_data, bh->b_size); + jbd_free(jh->b_frozen_data, bh->b_size); } if (jh->b_committed_data) { printk(KERN_WARNING "%s: freeing " "b_committed_data\n", __FUNCTION__); - jbd_slab_free(jh->b_committed_data, bh->b_size); + jbd_free(jh->b_committed_data, bh->b_size); } bh->b_private = NULL; jh->b_bh = NULL; /* debug, really */ @@ -2042,7 +1963,6 @@ static void journal_destroy_caches(void) journal_destroy_revoke_caches(); journal_destroy_journal_head_cache(); journal_destroy_handle_cache(); - journal_destroy_jbd_slabs(); } static int __init journal_init(void) diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index 8df5bac0b7a5..db8404514c92 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -675,7 +675,7 @@ repeat: JBUFFER_TRACE(jh, "allocate memory for buffer"); jbd_unlock_bh_state(bh); frozen_buffer = - jbd_slab_alloc(jh2bh(jh)->b_size, + jbd_alloc(jh2bh(jh)->b_size, GFP_NOFS); if (!frozen_buffer) { printk(KERN_EMERG @@ -735,7 +735,7 @@ done: out: if (unlikely(frozen_buffer)) /* It's usually NULL */ - jbd_slab_free(frozen_buffer, bh->b_size); + jbd_free(frozen_buffer, bh->b_size); JBUFFER_TRACE(jh, "exit"); return error; @@ -888,7 +888,7 @@ int journal_get_undo_access(handle_t *handle, struct buffer_head *bh) repeat: if (!jh->b_committed_data) { - committed_data = jbd_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS); + committed_data = jbd_alloc(jh2bh(jh)->b_size, GFP_NOFS); if (!committed_data) { printk(KERN_EMERG "%s: No memory for committed data\n", __FUNCTION__); @@ -915,7 +915,7 @@ repeat: out: journal_put_journal_head(jh); if (unlikely(committed_data)) - jbd_slab_free(committed_data, bh->b_size); + jbd_free(committed_data, bh->b_size); return err; } diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 72f522372924..1db3b684f557 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h @@ -73,14 +73,21 @@ extern int journal_enable_debug; #endif extern void * __jbd_kmalloc (const char *where, size_t size, gfp_t flags, int retry); -extern void * jbd_slab_alloc(size_t size, gfp_t flags); -extern void jbd_slab_free(void *ptr, size_t size); - #define jbd_kmalloc(size, flags) \ __jbd_kmalloc(__FUNCTION__, (size), (flags), journal_oom_retry) #define jbd_rep_kmalloc(size, flags) \ __jbd_kmalloc(__FUNCTION__, (size), (flags), 1) +static inline void *jbd_alloc(size_t size, gfp_t flags) +{ + return (void *)__get_free_pages(flags, get_order(size)); +} + +static inline void jbd_free(void *ptr, size_t size) +{ + free_pages((unsigned long)ptr, get_order(size)); +}; + #define JFS_MIN_JOURNAL_BLOCKS 1024 #ifdef __KERNEL__