[XFS] Add a greedy allocation interface, allocating within a min/max size

range.

SGI-PV: 955302
SGI-Modid: xfs-linux-melb:xfs-kern:26803a

Signed-off-by: Nathan Scott <nathans@sgi.com>
Signed-off-by: Tim Shimmin <tes@sgi.com>
This commit is contained in:
Nathan Scott 2006-09-28 11:03:27 +10:00 committed by Tim Shimmin
parent 572d95f49f
commit 77e4635ae1
5 changed files with 34 additions and 31 deletions

View file

@ -68,6 +68,22 @@ kmem_zalloc(size_t size, unsigned int __nocast flags)
return ptr; return ptr;
} }
void *
kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize,
unsigned int __nocast flags)
{
void *ptr;
while (!(ptr = kmem_zalloc(maxsize, flags))) {
if ((maxsize >>= 1) <= minsize) {
maxsize = minsize;
flags = KM_SLEEP;
}
}
*size = maxsize;
return ptr;
}
void void
kmem_free(void *ptr, size_t size) kmem_free(void *ptr, size_t size)
{ {

View file

@ -55,8 +55,9 @@ kmem_flags_convert(unsigned int __nocast flags)
} }
extern void *kmem_alloc(size_t, unsigned int __nocast); extern void *kmem_alloc(size_t, unsigned int __nocast);
extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast);
extern void *kmem_zalloc(size_t, unsigned int __nocast); extern void *kmem_zalloc(size_t, unsigned int __nocast);
extern void *kmem_zalloc_greedy(size_t *, size_t, size_t, unsigned int __nocast);
extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast);
extern void kmem_free(void *, size_t); extern void kmem_free(void *, size_t);
/* /*

View file

@ -112,17 +112,16 @@ xfs_Gqm_init(void)
{ {
xfs_dqhash_t *udqhash, *gdqhash; xfs_dqhash_t *udqhash, *gdqhash;
xfs_qm_t *xqm; xfs_qm_t *xqm;
uint i, hsize, flags = KM_SLEEP | KM_MAYFAIL | KM_LARGE; uint i, hsize;
/* /*
* Initialize the dquot hash tables. * Initialize the dquot hash tables.
*/ */
hsize = XFS_QM_HASHSIZE_HIGH; udqhash = kmem_zalloc_greedy(&hsize,
while (!(udqhash = kmem_zalloc(hsize * sizeof(*udqhash), flags))) { XFS_QM_HASHSIZE_LOW, XFS_QM_HASHSIZE_HIGH,
if ((hsize >>= 1) <= XFS_QM_HASHSIZE_LOW) KM_SLEEP | KM_MAYFAIL | KM_LARGE);
flags = KM_SLEEP; gdqhash = kmem_zalloc(hsize, KM_SLEEP | KM_LARGE);
} hsize /= sizeof(xfs_dqhash_t);
gdqhash = kmem_zalloc(hsize * sizeof(*gdqhash), KM_SLEEP | KM_LARGE);
ndquot = hsize << 8; ndquot = hsize << 8;
xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP); xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP);

View file

@ -50,7 +50,7 @@ void
xfs_ihash_init(xfs_mount_t *mp) xfs_ihash_init(xfs_mount_t *mp)
{ {
__uint64_t icount; __uint64_t icount;
uint i, flags = KM_SLEEP | KM_MAYFAIL | KM_LARGE; uint i;
if (!mp->m_ihsize) { if (!mp->m_ihsize) {
icount = mp->m_maxicount ? mp->m_maxicount : icount = mp->m_maxicount ? mp->m_maxicount :
@ -61,14 +61,13 @@ xfs_ihash_init(xfs_mount_t *mp)
(64 * NBPP) / sizeof(xfs_ihash_t)); (64 * NBPP) / sizeof(xfs_ihash_t));
} }
while (!(mp->m_ihash = (xfs_ihash_t *)kmem_zalloc(mp->m_ihsize * mp->m_ihash = kmem_zalloc_greedy(&mp->m_ihsize,
sizeof(xfs_ihash_t), flags))) { NBPC * sizeof(xfs_ihash_t),
if ((mp->m_ihsize >>= 1) <= NBPP) mp->m_ihsize * sizeof(xfs_ihash_t),
flags = KM_SLEEP; KM_SLEEP | KM_MAYFAIL | KM_LARGE);
} mp->m_ihsize /= sizeof(xfs_ihash_t);
for (i = 0; i < mp->m_ihsize; i++) { for (i = 0; i < mp->m_ihsize; i++)
rwlock_init(&(mp->m_ihash[i].ih_lock)); rwlock_init(&(mp->m_ihash[i].ih_lock));
}
} }
/* /*
@ -77,7 +76,7 @@ xfs_ihash_init(xfs_mount_t *mp)
void void
xfs_ihash_free(xfs_mount_t *mp) xfs_ihash_free(xfs_mount_t *mp)
{ {
kmem_free(mp->m_ihash, mp->m_ihsize*sizeof(xfs_ihash_t)); kmem_free(mp->m_ihash, mp->m_ihsize * sizeof(xfs_ihash_t));
mp->m_ihash = NULL; mp->m_ihash = NULL;
} }

View file

@ -326,7 +326,6 @@ xfs_bulkstat(
int i; /* loop index */ int i; /* loop index */
int icount; /* count of inodes good in irbuf */ int icount; /* count of inodes good in irbuf */
int irbsize; /* size of irec buffer in bytes */ int irbsize; /* size of irec buffer in bytes */
unsigned int kmflags; /* flags for allocating irec buffer */
xfs_ino_t ino; /* inode number (filesystem) */ xfs_ino_t ino; /* inode number (filesystem) */
xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */ xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */
xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
@ -371,19 +370,8 @@ xfs_bulkstat(
(XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog); (XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog);
nimask = ~(nicluster - 1); nimask = ~(nicluster - 1);
nbcluster = nicluster >> mp->m_sb.sb_inopblog; nbcluster = nicluster >> mp->m_sb.sb_inopblog;
/* irbuf = kmem_zalloc_greedy(&irbsize, NBPC, NBPC * 4,
* Allocate a local buffer for inode cluster btree records. KM_SLEEP | KM_MAYFAIL | KM_LARGE);
* This caps our maximum readahead window (so don't be stingy)
* but we must handle the case where we can't get a contiguous
* multi-page buffer, so we drop back toward pagesize; the end
* case we ensure succeeds, via appropriate allocation flags.
*/
irbsize = NBPP * 4;
kmflags = KM_SLEEP | KM_MAYFAIL;
while (!(irbuf = kmem_alloc(irbsize, kmflags))) {
if ((irbsize >>= 1) <= NBPP)
kmflags = KM_SLEEP;
}
nirbuf = irbsize / sizeof(*irbuf); nirbuf = irbsize / sizeof(*irbuf);
/* /*