1
0
Fork 0

ceph: fix cap flush race reentrancy

In e9964c10 we change cap flushing to do a delicate dance because some
inodes on the cap_dirty list could be in a migrating state (got EXPORT but
not IMPORT) in which we couldn't actually flush and move from
dirty->flushing, breaking the while (!empty) { process first } loop
structure.  It worked for a single sync thread, but was not reentrant and
triggered infinite loops when multiple syncers came along.

Instead, move inodes with dirty to a separate cap_dirty_migrating list
when in the limbo export-but-no-import state, allowing us to go back to
the simple loop structure (which was reentrant).  This is cleaner and more
robust.

Audited the cap_dirty users and this looks fine:
list_empty(&ci->i_dirty_item) is still a reliable indicator of whether we
have dirty caps (which list we're on is irrelevant) and list_del_init()
calls still do the right thing.

Signed-off-by: Sage Weil <sage@newdream.net>
hifive-unleashed-5.1
Sage Weil 2011-05-24 11:46:31 -07:00
parent cd634fb6ee
commit db3540522e
3 changed files with 31 additions and 29 deletions

View File

@ -2635,6 +2635,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
struct ceph_mds_session *session, struct ceph_mds_session *session,
int *open_target_sessions) int *open_target_sessions)
{ {
struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_inode_info *ci = ceph_inode(inode);
int mds = session->s_mds; int mds = session->s_mds;
unsigned mseq = le32_to_cpu(ex->migrate_seq); unsigned mseq = le32_to_cpu(ex->migrate_seq);
@ -2671,6 +2672,19 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
* export targets, so that we get the matching IMPORT * export targets, so that we get the matching IMPORT
*/ */
*open_target_sessions = 1; *open_target_sessions = 1;
/*
* we can't flush dirty caps that we've seen the
* EXPORT but no IMPORT for
*/
spin_lock(&mdsc->cap_dirty_lock);
if (!list_empty(&ci->i_dirty_item)) {
dout(" moving %p to cap_dirty_migrating\n",
inode);
list_move(&ci->i_dirty_item,
&mdsc->cap_dirty_migrating);
}
spin_unlock(&mdsc->cap_dirty_lock);
} }
__ceph_remove_cap(cap); __ceph_remove_cap(cap);
} }
@ -2708,6 +2722,13 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
ci->i_cap_exporting_issued = 0; ci->i_cap_exporting_issued = 0;
ci->i_cap_exporting_mseq = 0; ci->i_cap_exporting_mseq = 0;
ci->i_cap_exporting_mds = -1; ci->i_cap_exporting_mds = -1;
spin_lock(&mdsc->cap_dirty_lock);
if (!list_empty(&ci->i_dirty_item)) {
dout(" moving %p back to cap_dirty\n", inode);
list_move(&ci->i_dirty_item, &mdsc->cap_dirty);
}
spin_unlock(&mdsc->cap_dirty_lock);
} else { } else {
dout("handle_cap_import inode %p ci %p mds%d mseq %d\n", dout("handle_cap_import inode %p ci %p mds%d mseq %d\n",
inode, ci, mds, mseq); inode, ci, mds, mseq);
@ -2911,38 +2932,16 @@ void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
*/ */
void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc) void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
{ {
struct ceph_inode_info *ci, *nci = NULL; struct ceph_inode_info *ci;
struct inode *inode, *ninode = NULL; struct inode *inode;
struct list_head *p, *n;
dout("flush_dirty_caps\n"); dout("flush_dirty_caps\n");
spin_lock(&mdsc->cap_dirty_lock); spin_lock(&mdsc->cap_dirty_lock);
list_for_each_safe(p, n, &mdsc->cap_dirty) { while (!list_empty(&mdsc->cap_dirty)) {
if (nci) { ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info,
ci = nci; i_dirty_item);
inode = ninode; inode = igrab(&ci->vfs_inode);
ci->i_ceph_flags &= ~CEPH_I_NOFLUSH; dout("flush_dirty_caps %p\n", inode);
dout("flush_dirty_caps inode %p (was next inode)\n",
inode);
} else {
ci = list_entry(p, struct ceph_inode_info,
i_dirty_item);
inode = igrab(&ci->vfs_inode);
BUG_ON(!inode);
dout("flush_dirty_caps inode %p\n", inode);
}
if (n != &mdsc->cap_dirty) {
nci = list_entry(n, struct ceph_inode_info,
i_dirty_item);
ninode = igrab(&nci->vfs_inode);
BUG_ON(!ninode);
nci->i_ceph_flags |= CEPH_I_NOFLUSH;
dout("flush_dirty_caps next inode %p, noflush\n",
ninode);
} else {
nci = NULL;
ninode = NULL;
}
spin_unlock(&mdsc->cap_dirty_lock); spin_unlock(&mdsc->cap_dirty_lock);
if (inode) { if (inode) {
ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH,
@ -2952,6 +2951,7 @@ void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
spin_lock(&mdsc->cap_dirty_lock); spin_lock(&mdsc->cap_dirty_lock);
} }
spin_unlock(&mdsc->cap_dirty_lock); spin_unlock(&mdsc->cap_dirty_lock);
dout("flush_dirty_caps done\n");
} }
/* /*

View File

@ -3004,6 +3004,7 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
spin_lock_init(&mdsc->snap_flush_lock); spin_lock_init(&mdsc->snap_flush_lock);
mdsc->cap_flush_seq = 0; mdsc->cap_flush_seq = 0;
INIT_LIST_HEAD(&mdsc->cap_dirty); INIT_LIST_HEAD(&mdsc->cap_dirty);
INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
mdsc->num_cap_flushing = 0; mdsc->num_cap_flushing = 0;
spin_lock_init(&mdsc->cap_dirty_lock); spin_lock_init(&mdsc->cap_dirty_lock);
init_waitqueue_head(&mdsc->cap_flushing_wq); init_waitqueue_head(&mdsc->cap_flushing_wq);

View File

@ -278,6 +278,7 @@ struct ceph_mds_client {
u64 cap_flush_seq; u64 cap_flush_seq;
struct list_head cap_dirty; /* inodes with dirty caps */ struct list_head cap_dirty; /* inodes with dirty caps */
struct list_head cap_dirty_migrating; /* ...that are migration... */
int num_cap_flushing; /* # caps we are flushing */ int num_cap_flushing; /* # caps we are flushing */
spinlock_t cap_dirty_lock; /* protects above items */ spinlock_t cap_dirty_lock; /* protects above items */
wait_queue_head_t cap_flushing_wq; wait_queue_head_t cap_flushing_wq;