[XFS] Minor code rearranging and cleanup to prevent some coverity false

positives.

SGI-PV: 955502
SGI-Modid: xfs-linux-melb:xfs-kern:26805a

Signed-off-by: Nathan Scott <nathans@sgi.com>
Signed-off-by: Tim Shimmin <tes@sgi.com>
This commit is contained in:
Nathan Scott 2006-09-28 11:03:44 +10:00 committed by Tim Shimmin
parent b627259c60
commit d432c80e68
3 changed files with 36 additions and 37 deletions

View file

@ -1477,8 +1477,10 @@ xfs_alloc_ag_vextent_small(
/* /*
* Can't allocate from the freelist for some reason. * Can't allocate from the freelist for some reason.
*/ */
else else {
fbno = NULLAGBLOCK;
flen = 0; flen = 0;
}
/* /*
* Can't do the allocation, give up. * Can't do the allocation, give up.
*/ */

View file

@ -1054,7 +1054,7 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
xfs_da_node_entry_t *btree; xfs_da_node_entry_t *btree;
xfs_dablk_t blkno; xfs_dablk_t blkno;
int probe, span, max, error, retval; int probe, span, max, error, retval;
xfs_dahash_t hashval; xfs_dahash_t hashval, btreehashval;
xfs_da_args_t *args; xfs_da_args_t *args;
args = state->args; args = state->args;
@ -1079,30 +1079,32 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
return(error); return(error);
} }
curr = blk->bp->data; curr = blk->bp->data;
ASSERT(be16_to_cpu(curr->magic) == XFS_DA_NODE_MAGIC || blk->magic = be16_to_cpu(curr->magic);
be16_to_cpu(curr->magic) == XFS_DIR2_LEAFN_MAGIC || ASSERT(blk->magic == XFS_DA_NODE_MAGIC ||
be16_to_cpu(curr->magic) == XFS_ATTR_LEAF_MAGIC); blk->magic == XFS_DIR2_LEAFN_MAGIC ||
blk->magic == XFS_ATTR_LEAF_MAGIC);
/* /*
* Search an intermediate node for a match. * Search an intermediate node for a match.
*/ */
blk->magic = be16_to_cpu(curr->magic);
if (blk->magic == XFS_DA_NODE_MAGIC) { if (blk->magic == XFS_DA_NODE_MAGIC) {
node = blk->bp->data; node = blk->bp->data;
blk->hashval = be32_to_cpu(node->btree[be16_to_cpu(node->hdr.count)-1].hashval); max = be16_to_cpu(node->hdr.count);
btreehashval = node->btree[max-1].hashval;
blk->hashval = be32_to_cpu(btreehashval);
/* /*
* Binary search. (note: small blocks will skip loop) * Binary search. (note: small blocks will skip loop)
*/ */
max = be16_to_cpu(node->hdr.count);
probe = span = max / 2; probe = span = max / 2;
hashval = args->hashval; hashval = args->hashval;
for (btree = &node->btree[probe]; span > 4; for (btree = &node->btree[probe]; span > 4;
btree = &node->btree[probe]) { btree = &node->btree[probe]) {
span /= 2; span /= 2;
if (be32_to_cpu(btree->hashval) < hashval) btreehashval = be32_to_cpu(btree->hashval);
if (btreehashval < hashval)
probe += span; probe += span;
else if (be32_to_cpu(btree->hashval) > hashval) else if (btreehashval > hashval)
probe -= span; probe -= span;
else else
break; break;
@ -1133,10 +1135,10 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
blk->index = probe; blk->index = probe;
blkno = be32_to_cpu(btree->before); blkno = be32_to_cpu(btree->before);
} }
} else if (be16_to_cpu(curr->magic) == XFS_ATTR_LEAF_MAGIC) { } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL); blk->hashval = xfs_attr_leaf_lasthash(blk->bp, NULL);
break; break;
} else if (be16_to_cpu(curr->magic) == XFS_DIR2_LEAFN_MAGIC) { } else if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL); blk->hashval = xfs_dir2_leafn_lasthash(blk->bp, NULL);
break; break;
} }
@ -1152,11 +1154,13 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
if (blk->magic == XFS_DIR2_LEAFN_MAGIC) { if (blk->magic == XFS_DIR2_LEAFN_MAGIC) {
retval = xfs_dir2_leafn_lookup_int(blk->bp, args, retval = xfs_dir2_leafn_lookup_int(blk->bp, args,
&blk->index, state); &blk->index, state);
} } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
retval = xfs_attr_leaf_lookup_int(blk->bp, args); retval = xfs_attr_leaf_lookup_int(blk->bp, args);
blk->index = args->index; blk->index = args->index;
args->blkno = blk->blkno; args->blkno = blk->blkno;
} else {
ASSERT(0);
return XFS_ERROR(EFSCORRUPTED);
} }
if (((retval == ENOENT) || (retval == ENOATTR)) && if (((retval == ENOENT) || (retval == ENOATTR)) &&
(blk->hashval == args->hashval)) { (blk->hashval == args->hashval)) {
@ -1166,8 +1170,7 @@ xfs_da_node_lookup_int(xfs_da_state_t *state, int *result)
return(error); return(error);
if (retval == 0) { if (retval == 0) {
continue; continue;
} } else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
else if (blk->magic == XFS_ATTR_LEAF_MAGIC) {
/* path_shift() gives ENOENT */ /* path_shift() gives ENOENT */
retval = XFS_ERROR(ENOATTR); retval = XFS_ERROR(ENOATTR);
} }

View file

@ -1976,7 +1976,10 @@ xfs_growfs_rt(
if ((error = xfs_growfs_rt_alloc(mp, rsumblocks, nrsumblocks, if ((error = xfs_growfs_rt_alloc(mp, rsumblocks, nrsumblocks,
mp->m_sb.sb_rsumino))) mp->m_sb.sb_rsumino)))
return error; return error;
nmp = NULL; /*
* Allocate a new (fake) mount/sb.
*/
nmp = kmem_alloc(sizeof(*nmp), KM_SLEEP);
/* /*
* Loop over the bitmap blocks. * Loop over the bitmap blocks.
* We will do everything one bitmap block at a time. * We will do everything one bitmap block at a time.
@ -1987,10 +1990,6 @@ xfs_growfs_rt(
((sbp->sb_rextents & ((1 << mp->m_blkbit_log) - 1)) != 0); ((sbp->sb_rextents & ((1 << mp->m_blkbit_log) - 1)) != 0);
bmbno < nrbmblocks; bmbno < nrbmblocks;
bmbno++) { bmbno++) {
/*
* Allocate a new (fake) mount/sb.
*/
nmp = kmem_alloc(sizeof(*nmp), KM_SLEEP);
*nmp = *mp; *nmp = *mp;
nsbp = &nmp->m_sb; nsbp = &nmp->m_sb;
/* /*
@ -2018,13 +2017,13 @@ xfs_growfs_rt(
cancelflags = 0; cancelflags = 0;
if ((error = xfs_trans_reserve(tp, 0, if ((error = xfs_trans_reserve(tp, 0,
XFS_GROWRTFREE_LOG_RES(nmp), 0, 0, 0))) XFS_GROWRTFREE_LOG_RES(nmp), 0, 0, 0)))
goto error_exit; break;
/* /*
* Lock out other callers by grabbing the bitmap inode lock. * Lock out other callers by grabbing the bitmap inode lock.
*/ */
if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0, if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rbmino, 0,
XFS_ILOCK_EXCL, &ip))) XFS_ILOCK_EXCL, &ip)))
goto error_exit; break;
ASSERT(ip == mp->m_rbmip); ASSERT(ip == mp->m_rbmip);
/* /*
* Update the bitmap inode's size. * Update the bitmap inode's size.
@ -2038,7 +2037,7 @@ xfs_growfs_rt(
*/ */
if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rsumino, 0, if ((error = xfs_trans_iget(mp, tp, mp->m_sb.sb_rsumino, 0,
XFS_ILOCK_EXCL, &ip))) XFS_ILOCK_EXCL, &ip)))
goto error_exit; break;
ASSERT(ip == mp->m_rsumip); ASSERT(ip == mp->m_rsumip);
/* /*
* Update the summary inode's size. * Update the summary inode's size.
@ -2053,7 +2052,7 @@ xfs_growfs_rt(
mp->m_rsumlevels != nmp->m_rsumlevels) { mp->m_rsumlevels != nmp->m_rsumlevels) {
error = xfs_rtcopy_summary(mp, nmp, tp); error = xfs_rtcopy_summary(mp, nmp, tp);
if (error) if (error)
goto error_exit; break;
} }
/* /*
* Update superblock fields. * Update superblock fields.
@ -2080,17 +2079,12 @@ xfs_growfs_rt(
error = xfs_rtfree_range(nmp, tp, sbp->sb_rextents, error = xfs_rtfree_range(nmp, tp, sbp->sb_rextents,
nsbp->sb_rextents - sbp->sb_rextents, &bp, &sumbno); nsbp->sb_rextents - sbp->sb_rextents, &bp, &sumbno);
if (error) if (error)
goto error_exit; break;
/* /*
* Mark more blocks free in the superblock. * Mark more blocks free in the superblock.
*/ */
xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS, xfs_trans_mod_sb(tp, XFS_TRANS_SB_FREXTENTS,
nsbp->sb_rextents - sbp->sb_rextents); nsbp->sb_rextents - sbp->sb_rextents);
/*
* Free the fake mp structure.
*/
kmem_free(nmp, sizeof(*nmp));
nmp = NULL;
/* /*
* Update mp values into the real mp structure. * Update mp values into the real mp structure.
*/ */
@ -2101,15 +2095,15 @@ xfs_growfs_rt(
*/ */
xfs_trans_commit(tp, 0, NULL); xfs_trans_commit(tp, 0, NULL);
} }
return 0;
if (error)
xfs_trans_cancel(tp, cancelflags);
/* /*
* Error paths come here. * Free the fake mp structure.
*/ */
error_exit: kmem_free(nmp, sizeof(*nmp));
if (nmp)
kmem_free(nmp, sizeof(*nmp));
xfs_trans_cancel(tp, cancelflags);
return error; return error;
} }