1
0
Fork 0

xfs: simplify xfs_iomap_eof_align_last_fsb

By open coding xfs_bmap_last_extent instead of calling it through a
double indirection we don't need to handle an error return that
can't happen given that we are guaranteed to have the extent list in
memory already.  Also simplify the calling conventions a little and
move the extent list assert from the only caller into the function.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
alistair/sunxi64-5.5-dsi
Christoph Hellwig 2019-10-30 12:24:57 -07:00 committed by Darrick J. Wong
parent 249bd9087a
commit ae7e403fa5
3 changed files with 22 additions and 50 deletions

View File

@ -179,29 +179,6 @@ xfs_bmap_rtalloc(
}
#endif /* CONFIG_XFS_RT */
/*
* Check if the endoff is outside the last extent. If so the caller will grow
* the allocation to a stripe unit boundary. All offsets are considered outside
* the end of file for an empty fork, so 1 is returned in *eof in that case.
*/
int
xfs_bmap_eof(
struct xfs_inode *ip,
xfs_fileoff_t endoff,
int whichfork,
int *eof)
{
struct xfs_bmbt_irec rec;
int error;
error = xfs_bmap_last_extent(NULL, ip, whichfork, &rec, eof);
if (error || *eof)
return error;
*eof = endoff >= rec.br_startoff + rec.br_blockcount;
return 0;
}
/*
* Extent tree block counting routines.
*/

View File

@ -30,8 +30,6 @@ xfs_bmap_rtalloc(struct xfs_bmalloca *ap)
}
#endif /* CONFIG_XFS_RT */
int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
int whichfork, int *eof);
int xfs_bmap_punch_delalloc_range(struct xfs_inode *ip,
xfs_fileoff_t start_fsb, xfs_fileoff_t length);

View File

@ -156,25 +156,33 @@ xfs_eof_alignment(
return align;
}
STATIC int
/*
* Check if last_fsb is outside the last extent, and if so grow it to the next
* stripe unit boundary.
*/
static xfs_fileoff_t
xfs_iomap_eof_align_last_fsb(
struct xfs_inode *ip,
xfs_extlen_t extsize,
xfs_fileoff_t *last_fsb)
xfs_fileoff_t end_fsb)
{
xfs_extlen_t align = xfs_eof_alignment(ip, extsize);
struct xfs_ifork *ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
xfs_extlen_t extsz = xfs_get_extsz_hint(ip);
xfs_extlen_t align = xfs_eof_alignment(ip, extsz);
struct xfs_bmbt_irec irec;
struct xfs_iext_cursor icur;
ASSERT(ifp->if_flags & XFS_IFEXTENTS);
if (align) {
xfs_fileoff_t new_last_fsb = roundup_64(*last_fsb, align);
int eof, error;
xfs_fileoff_t aligned_end_fsb = roundup_64(end_fsb, align);
error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
if (error)
return error;
if (eof)
*last_fsb = new_last_fsb;
xfs_iext_last(ifp, &icur);
if (!xfs_iext_get_extent(ifp, &icur, &irec) ||
aligned_end_fsb >= irec.br_startoff + irec.br_blockcount)
return aligned_end_fsb;
}
return 0;
return end_fsb;
}
int
@ -206,19 +214,8 @@ xfs_iomap_write_direct(
ASSERT(xfs_isilocked(ip, lockmode));
if ((offset + count) > XFS_ISIZE(ip)) {
/*
* Assert that the in-core extent list is present since this can
* call xfs_iread_extents() and we only have the ilock shared.
* This should be safe because the lock was held around a bmapi
* call in the caller and we only need it to access the in-core
* list.
*/
ASSERT(XFS_IFORK_PTR(ip, XFS_DATA_FORK)->if_flags &
XFS_IFEXTENTS);
error = xfs_iomap_eof_align_last_fsb(ip, extsz, &last_fsb);
if (error)
goto out_unlock;
if (offset + count > XFS_ISIZE(ip)) {
last_fsb = xfs_iomap_eof_align_last_fsb(ip, last_fsb);
} else {
if (nmaps && (imap->br_startblock == HOLESTARTBLOCK))
last_fsb = min(last_fsb, (xfs_fileoff_t)