diff --git a/fs/super.c b/fs/super.c index 181d42e2abff..269d96857caa 100644 --- a/fs/super.c +++ b/fs/super.c @@ -195,8 +195,12 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) INIT_HLIST_NODE(&s->s_instances); INIT_HLIST_BL_HEAD(&s->s_anon); INIT_LIST_HEAD(&s->s_inodes); - list_lru_init(&s->s_dentry_lru); - list_lru_init(&s->s_inode_lru); + + if (list_lru_init(&s->s_dentry_lru)) + goto err_out; + if (list_lru_init(&s->s_inode_lru)) + goto err_out_dentry_lru; + INIT_LIST_HEAD(&s->s_mounts); init_rwsem(&s->s_umount); lockdep_set_class(&s->s_umount, &type->s_umount_key); @@ -236,6 +240,9 @@ static struct super_block *alloc_super(struct file_system_type *type, int flags) } out: return s; + +err_out_dentry_lru: + list_lru_destroy(&s->s_dentry_lru); err_out: security_sb_free(s); #ifdef CONFIG_SMP diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c index d46f6a3dc1de..49fdb7bed481 100644 --- a/fs/xfs/xfs_buf.c +++ b/fs/xfs/xfs_buf.c @@ -1592,6 +1592,7 @@ xfs_free_buftarg( struct xfs_mount *mp, struct xfs_buftarg *btp) { + list_lru_destroy(&btp->bt_lru); unregister_shrinker(&btp->bt_shrinker); if (mp->m_flags & XFS_MOUNT_BARRIER) @@ -1666,9 +1667,12 @@ xfs_alloc_buftarg( if (!btp->bt_bdi) goto error; - list_lru_init(&btp->bt_lru); if (xfs_setsize_buftarg_early(btp, bdev)) goto error; + + if (list_lru_init(&btp->bt_lru)) + goto error; + btp->bt_shrinker.count_objects = xfs_buftarg_shrink_count; btp->bt_shrinker.scan_objects = xfs_buftarg_shrink_scan; btp->bt_shrinker.seeks = DEFAULT_SEEKS; diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c index a29169b062e3..7f4138629a80 100644 --- a/fs/xfs/xfs_qm.c +++ b/fs/xfs/xfs_qm.c @@ -831,11 +831,18 @@ xfs_qm_init_quotainfo( qinf = mp->m_quotainfo = kmem_zalloc(sizeof(xfs_quotainfo_t), KM_SLEEP); + if ((error = list_lru_init(&qinf->qi_lru))) { + kmem_free(qinf); + mp->m_quotainfo = NULL; + return error; + } + /* * See if quotainodes are setup, and if not, allocate them, * and change the superblock accordingly. */ if ((error = xfs_qm_init_quotainos(mp))) { + list_lru_destroy(&qinf->qi_lru); kmem_free(qinf); mp->m_quotainfo = NULL; return error; @@ -846,8 +853,6 @@ xfs_qm_init_quotainfo( INIT_RADIX_TREE(&qinf->qi_pquota_tree, GFP_NOFS); mutex_init(&qinf->qi_tree_lock); - list_lru_init(&qinf->qi_lru); - /* mutex used to serialize quotaoffs */ mutex_init(&qinf->qi_quotaofflock); @@ -935,6 +940,7 @@ xfs_qm_destroy_quotainfo( qi = mp->m_quotainfo; ASSERT(qi != NULL); + list_lru_destroy(&qi->qi_lru); unregister_shrinker(&qi->qi_shrinker); if (qi->qi_uquotaip) { diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h index 4d02ad3badab..3ce541753c88 100644 --- a/include/linux/list_lru.h +++ b/include/linux/list_lru.h @@ -27,20 +27,11 @@ struct list_lru_node { } ____cacheline_aligned_in_smp; struct list_lru { - /* - * Because we use a fixed-size array, this struct can be very big if - * MAX_NUMNODES is big. If this becomes a problem this is fixable by - * turning this into a pointer and dynamically allocating this to - * nr_node_ids. This quantity is firwmare-provided, and still would - * provide room for all nodes at the cost of a pointer lookup and an - * extra allocation. Because that allocation will most likely come from - * a different slab cache than the main structure holding this - * structure, we may very well fail. - */ - struct list_lru_node node[MAX_NUMNODES]; + struct list_lru_node *node; nodemask_t active_nodes; }; +void list_lru_destroy(struct list_lru *lru); int list_lru_init(struct list_lru *lru); /** diff --git a/mm/list_lru.c b/mm/list_lru.c index f91c24188573..72467914b856 100644 --- a/mm/list_lru.c +++ b/mm/list_lru.c @@ -8,6 +8,7 @@ #include #include #include +#include bool list_lru_add(struct list_lru *lru, struct list_head *item) { @@ -115,9 +116,14 @@ EXPORT_SYMBOL_GPL(list_lru_walk_node); int list_lru_init(struct list_lru *lru) { int i; + size_t size = sizeof(*lru->node) * nr_node_ids; + + lru->node = kzalloc(size, GFP_KERNEL); + if (!lru->node) + return -ENOMEM; nodes_clear(lru->active_nodes); - for (i = 0; i < MAX_NUMNODES; i++) { + for (i = 0; i < nr_node_ids; i++) { spin_lock_init(&lru->node[i].lock); INIT_LIST_HEAD(&lru->node[i].list); lru->node[i].nr_items = 0; @@ -125,3 +131,9 @@ int list_lru_init(struct list_lru *lru) return 0; } EXPORT_SYMBOL_GPL(list_lru_init); + +void list_lru_destroy(struct list_lru *lru) +{ + kfree(lru->node); +} +EXPORT_SYMBOL_GPL(list_lru_destroy);