1
0
Fork 0

ceph: fix rdcache_gen usage and invalidate

We used to use rdcache_gen to indicate whether we "might" have cached
pages.  Now we just look at the mapping to determine that.  However, some
old behavior remains from that transition.

First, rdcache_gen == 0 no longer means we have no pages.  That can happen
at any time (presumably when we carry FILE_CACHE).  We should not reset it
to zero, and we should not check that it is zero.

That means that the only purpose for rdcache_revoking is to resolve races
between new issues of FILE_CACHE and an async invalidate.  If they are
equal, we should invalidate.  On success, we decrement rdcache_revoking,
so that it is no longer equal to rdcache_gen.  Similarly, if we success
in doing a sync invalidate, set revoking = gen - 1.  (This is a small
optimization to avoid doing unnecessary invalidate work and does not
affect correctness.)

Signed-off-by: Sage Weil <sage@newdream.net>
hifive-unleashed-5.1
Sage Weil 2010-11-04 11:05:05 -07:00
parent feb4cc9bb4
commit cd045cb42a
3 changed files with 10 additions and 14 deletions

View File

@ -1430,8 +1430,8 @@ static int try_nonblocking_invalidate(struct inode *inode)
invalidating_gen == ci->i_rdcache_gen) {
/* success. */
dout("try_nonblocking_invalidate %p success\n", inode);
ci->i_rdcache_gen = 0;
ci->i_rdcache_revoking = 0;
/* save any racing async invalidate some trouble */
ci->i_rdcache_revoking = ci->i_rdcache_gen - 1;
return 0;
}
dout("try_nonblocking_invalidate %p failed\n", inode);

View File

@ -1394,11 +1394,8 @@ static void ceph_invalidate_work(struct work_struct *work)
spin_lock(&inode->i_lock);
dout("invalidate_pages %p gen %d revoking %d\n", inode,
ci->i_rdcache_gen, ci->i_rdcache_revoking);
if (ci->i_rdcache_gen == 0 ||
ci->i_rdcache_revoking != ci->i_rdcache_gen) {
BUG_ON(ci->i_rdcache_revoking > ci->i_rdcache_gen);
if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
/* nevermind! */
ci->i_rdcache_revoking = 0;
spin_unlock(&inode->i_lock);
goto out;
}
@ -1408,15 +1405,16 @@ static void ceph_invalidate_work(struct work_struct *work)
ceph_invalidate_nondirty_pages(inode->i_mapping);
spin_lock(&inode->i_lock);
if (orig_gen == ci->i_rdcache_gen) {
if (orig_gen == ci->i_rdcache_gen &&
orig_gen == ci->i_rdcache_revoking) {
dout("invalidate_pages %p gen %d successful\n", inode,
ci->i_rdcache_gen);
ci->i_rdcache_gen = 0;
ci->i_rdcache_revoking = 0;
ci->i_rdcache_revoking--;
check = 1;
} else {
dout("invalidate_pages %p gen %d raced, gen now %d\n",
inode, orig_gen, ci->i_rdcache_gen);
dout("invalidate_pages %p gen %d raced, now %d revoking %d\n",
inode, orig_gen, ci->i_rdcache_gen,
ci->i_rdcache_revoking);
}
spin_unlock(&inode->i_lock);

View File

@ -293,9 +293,7 @@ struct ceph_inode_info {
int i_rd_ref, i_rdcache_ref, i_wr_ref;
int i_wrbuffer_ref, i_wrbuffer_ref_head;
u32 i_shared_gen; /* increment each time we get FILE_SHARED */
u32 i_rdcache_gen; /* we increment this each time we get
FILE_CACHE. If it's non-zero, we
_may_ have cached pages. */
u32 i_rdcache_gen; /* incremented each time we get FILE_CACHE. */
u32 i_rdcache_revoking; /* RDCACHE gen to async invalidate, if any */
struct list_head i_unsafe_writes; /* uncommitted sync writes */