xfs: avoid CIL allocation during insert

Now that we have the size of the log vector that has been allocated,
we can determine if we need to allocate a new log vector for
formatting and insertion. We only need to allocate a new vector if
it won't fit into the existing buffer.

However, we need to hold the CIL context lock while we do this so
that we can't race with a push draining the currently queued log
vectors. It is safe to do this as long as we do GFP_NOFS allocation
to avoid avoid memory allocation recursing into the filesystem.
Hence we can safely overwrite the existing log vector on the CIL if
it is large enough to hold all the dirty regions of the current
item.

Signed-off-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Mark Tinguely <tinguely@sgi.com>
Signed-off-by: Ben Myers <bpm@sgi.com>
This commit is contained in:
Dave Chinner 2013-08-12 20:50:06 +10:00 committed by Ben Myers
parent 7492c5b42d
commit f5baac354d
2 changed files with 33 additions and 20 deletions

View file

@ -185,6 +185,22 @@ xlog_cil_prepare_log_vecs(
buf_size = sizeof(struct xfs_log_vec) + nbytes + buf_size = sizeof(struct xfs_log_vec) + nbytes +
niovecs * sizeof(struct xfs_log_iovec); niovecs * sizeof(struct xfs_log_iovec);
/* compare to existing item size */
if (lip->li_lv && buf_size <= lip->li_lv->lv_size) {
/* same or smaller, optimise common overwrite case */
lv = lip->li_lv;
lv->lv_next = NULL;
if (ordered)
goto insert;
/* Ensure the lv is set up according to ->iop_size */
lv->lv_niovecs = niovecs;
lv->lv_buf = (char *)lv + buf_size - nbytes;
lv->lv_buf_len = xlog_cil_lv_item_format(lip, lv);
goto insert;
}
/* allocate new data chunk */ /* allocate new data chunk */
lv = kmem_zalloc(buf_size, KM_SLEEP|KM_NOFS); lv = kmem_zalloc(buf_size, KM_SLEEP|KM_NOFS);
lv->lv_item = lip; lv->lv_item = lip;
@ -204,8 +220,8 @@ xlog_cil_prepare_log_vecs(
lv->lv_buf = (char *)lv + buf_size - nbytes; lv->lv_buf = (char *)lv + buf_size - nbytes;
lv->lv_buf_len = xlog_cil_lv_item_format(lip, lv); lv->lv_buf_len = xlog_cil_lv_item_format(lip, lv);
ASSERT(lv->lv_buf_len <= nbytes);
insert: insert:
ASSERT(lv->lv_buf_len <= nbytes);
if (!ret_lv) if (!ret_lv)
ret_lv = lv; ret_lv = lv;
else else
@ -230,7 +246,17 @@ xfs_cil_prepare_item(
{ {
struct xfs_log_vec *old = lv->lv_item->li_lv; struct xfs_log_vec *old = lv->lv_item->li_lv;
if (old) { if (!old) {
/* new lv, must pin the log item */
ASSERT(!lv->lv_item->li_lv);
if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) {
*len += lv->lv_buf_len;
*diff_iovecs += lv->lv_niovecs;
}
lv->lv_item->li_ops->iop_pin(lv->lv_item);
} else if (old != lv) {
/* existing lv on log item, space used is a delta */ /* existing lv on log item, space used is a delta */
ASSERT((old->lv_buf && old->lv_buf_len && old->lv_niovecs) || ASSERT((old->lv_buf && old->lv_buf_len && old->lv_niovecs) ||
old->lv_buf_len == XFS_LOG_VEC_ORDERED); old->lv_buf_len == XFS_LOG_VEC_ORDERED);
@ -249,15 +275,8 @@ xfs_cil_prepare_item(
*diff_iovecs += lv->lv_niovecs - old->lv_niovecs; *diff_iovecs += lv->lv_niovecs - old->lv_niovecs;
kmem_free(old); kmem_free(old);
} else { } else {
/* new lv, must pin the log item */ /* re-used lv */
ASSERT(!lv->lv_item->li_lv); /* XXX: can't account for len/diff_iovecs yet */
if (lv->lv_buf_len != XFS_LOG_VEC_ORDERED) {
*len += lv->lv_buf_len;
*diff_iovecs += lv->lv_niovecs;
}
IOP_PIN(lv->lv_item);
} }
/* attach new log vector to log item */ /* attach new log vector to log item */
@ -733,18 +752,13 @@ xfs_log_commit_cil(
if (flags & XFS_TRANS_RELEASE_LOG_RES) if (flags & XFS_TRANS_RELEASE_LOG_RES)
log_flags = XFS_LOG_REL_PERM_RESERV; log_flags = XFS_LOG_REL_PERM_RESERV;
/* /* lock out background commit */
* Do all the hard work of formatting items (including memory down_read(&log->l_cilp->xc_ctx_lock);
* allocation) outside the CIL context lock. This prevents stalling CIL
* pushes when we are low on memory and a transaction commit spends a
* lot of time in memory reclaim.
*/
log_vector = xlog_cil_prepare_log_vecs(tp); log_vector = xlog_cil_prepare_log_vecs(tp);
if (!log_vector) if (!log_vector)
return ENOMEM; return ENOMEM;
/* lock out background commit */
down_read(&log->l_cilp->xc_ctx_lock);
if (commit_lsn) if (commit_lsn)
*commit_lsn = log->l_cilp->xc_ctx->sequence; *commit_lsn = log->l_cilp->xc_ctx->sequence;

View file

@ -77,7 +77,6 @@ struct xfs_item_ops {
void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t); void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t);
}; };
#define IOP_PIN(ip) (*(ip)->li_ops->iop_pin)(ip)
#define IOP_UNPIN(ip, remove) (*(ip)->li_ops->iop_unpin)(ip, remove) #define IOP_UNPIN(ip, remove) (*(ip)->li_ops->iop_unpin)(ip, remove)
#define IOP_PUSH(ip, list) (*(ip)->li_ops->iop_push)(ip, list) #define IOP_PUSH(ip, list) (*(ip)->li_ops->iop_push)(ip, list)
#define IOP_UNLOCK(ip) (*(ip)->li_ops->iop_unlock)(ip) #define IOP_UNLOCK(ip) (*(ip)->li_ops->iop_unlock)(ip)