gfs2: Revise glock reference counting model

In the current glock reference counting model, a bias of one is added to
a glock's refcount when it is locked (gl->gl_state != LM_ST_UNLOCKED).
A glock is removed from the lru_list when it is enqueued, and added back
when it is dequeued.  This isn't a very appropriate model because most
glocks are held for long periods of time (for example, the inode "owns"
references to its inode and iopen glocks as long as the inode is cached
even when the glock state changes to LM_ST_UNLOCKED), and they can only
be freed when they are no longer referenced, anyway.

Fix this by getting rid of the refcount bias for locked glocks.  That
way, we can use lockref_put_or_lock() to efficiently drop all but the
last glock reference, and put the glock onto the lru_list when the last
reference is dropped.  When find_insert_glock() returns a reference to a
cached glock, it removes the glock from the lru_list.

Dumping the "glocks" and "glstats" debugfs files also takes glock
references, but instead of removing the glocks from the lru_list in that
case as well, we leave them on the list.  This ensures that dumping
those files won't perturb the order of the glocks on the lru_list.

In addition, when the last reference to an *unlocked* glock is dropped,
we immediately free it; this preserves the preexisting behavior.  If it
later turns out that caching unlocked glocks is useful in some
situations, we can change the caching strategy.

It is currently unclear if a glock that has no active references can
have the GLF_LFLUSH flag set.  To make sure that such a glock won't
accidentally be evicted due to memory pressure, we add a GLF_LFLUSH
check to gfs2_dispose_glock_lru().

Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
This commit is contained in:
Andreas Gruenbacher 2024-04-10 06:58:00 +02:00
parent 30e388d573
commit 767fd5a016
3 changed files with 30 additions and 28 deletions

View File

@ -237,7 +237,7 @@ static int demote_ok(const struct gfs2_glock *gl)
}
void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
static void gfs2_glock_add_to_lru(struct gfs2_glock *gl)
{
if (!(gl->gl_ops->go_flags & GLOF_LRU))
return;
@ -305,6 +305,20 @@ static void __gfs2_glock_put(struct gfs2_glock *gl)
sdp->sd_lockstruct.ls_ops->lm_put_lock(gl);
}
static bool __gfs2_glock_put_or_lock(struct gfs2_glock *gl)
{
if (lockref_put_or_lock(&gl->gl_lockref))
return true;
GLOCK_BUG_ON(gl, gl->gl_lockref.count != 1);
if (gl->gl_state != LM_ST_UNLOCKED) {
gl->gl_lockref.count--;
gfs2_glock_add_to_lru(gl);
spin_unlock(&gl->gl_lockref.lock);
return true;
}
return false;
}
/**
* gfs2_glock_put() - Decrement reference count on glock
* @gl: The glock to put
@ -313,7 +327,7 @@ static void __gfs2_glock_put(struct gfs2_glock *gl)
void gfs2_glock_put(struct gfs2_glock *gl)
{
if (lockref_put_or_lock(&gl->gl_lockref))
if (__gfs2_glock_put_or_lock(gl))
return;
__gfs2_glock_put(gl);
@ -328,10 +342,9 @@ void gfs2_glock_put(struct gfs2_glock *gl)
*/
void gfs2_glock_put_async(struct gfs2_glock *gl)
{
if (lockref_put_or_lock(&gl->gl_lockref))
if (__gfs2_glock_put_or_lock(gl))
return;
GLOCK_BUG_ON(gl, gl->gl_lockref.count != 1);
gfs2_glock_queue_work(gl, 0);
spin_unlock(&gl->gl_lockref.lock);
}
@ -570,18 +583,6 @@ static inline struct gfs2_holder *find_last_waiter(const struct gfs2_glock *gl)
static void state_change(struct gfs2_glock *gl, unsigned int new_state)
{
int held1, held2;
held1 = (gl->gl_state != LM_ST_UNLOCKED);
held2 = (new_state != LM_ST_UNLOCKED);
if (held1 != held2) {
GLOCK_BUG_ON(gl, __lockref_is_dead(&gl->gl_lockref));
if (held2)
gl->gl_lockref.count++;
else
gl->gl_lockref.count--;
}
if (new_state != gl->gl_target)
/* shorten our minimum hold time */
gl->gl_hold_time = max(gl->gl_hold_time - GL_GLOCK_HOLD_DECR,
@ -1139,10 +1140,14 @@ static void glock_work_func(struct work_struct *work)
}
/* Drop the remaining glock references manually. */
GLOCK_BUG_ON(gl, gl->gl_lockref.count < drop_refs);
gl->gl_lockref.count -= drop_refs;
if (!gl->gl_lockref.count) {
__gfs2_glock_put(gl);
return;
if (gl->gl_state == LM_ST_UNLOCKED) {
__gfs2_glock_put(gl);
return;
}
gfs2_glock_add_to_lru(gl);
}
spin_unlock(&gl->gl_lockref.lock);
}
@ -1178,6 +1183,8 @@ again:
out:
rcu_read_unlock();
finish_wait(wq, &wait.wait);
if (gl)
gfs2_glock_remove_from_lru(gl);
return gl;
}
@ -1626,9 +1633,6 @@ unlock:
return error;
}
if (test_bit(GLF_LRU, &gl->gl_flags))
gfs2_glock_remove_from_lru(gl);
gh->gh_error = 0;
spin_lock(&gl->gl_lockref.lock);
add_to_queue(gh);
@ -1693,9 +1697,6 @@ static void __gfs2_glock_dq(struct gfs2_holder *gh)
fast_path = 1;
}
if (!test_bit(GLF_LFLUSH, &gl->gl_flags) && demote_ok(gl))
gfs2_glock_add_to_lru(gl);
if (unlikely(!fast_path)) {
gl->gl_lockref.count++;
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
@ -2008,10 +2009,12 @@ static int glock_cmp(void *priv, const struct list_head *a,
static bool can_free_glock(struct gfs2_glock *gl)
{
bool held = gl->gl_state != LM_ST_UNLOCKED;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
return !test_bit(GLF_LOCK, &gl->gl_flags) &&
gl->gl_lockref.count == held;
!gl->gl_lockref.count &&
(!test_bit(GLF_LFLUSH, &gl->gl_flags) ||
test_bit(SDF_KILL, &sdp->sd_flags));
}
/**
@ -2177,6 +2180,7 @@ static void thaw_glock(struct gfs2_glock *gl)
if (!lockref_get_not_dead(&gl->gl_lockref))
return;
gfs2_glock_remove_from_lru(gl);
spin_lock(&gl->gl_lockref.lock);
set_bit(GLF_HAVE_REPLY, &gl->gl_flags);
gfs2_glock_queue_work(gl, 0);

View File

@ -250,7 +250,6 @@ void gfs2_flush_delete_work(struct gfs2_sbd *sdp);
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
void gfs2_gl_dq_holders(struct gfs2_sbd *sdp);
void gfs2_glock_thaw(struct gfs2_sbd *sdp);
void gfs2_glock_add_to_lru(struct gfs2_glock *gl);
void gfs2_glock_free(struct gfs2_glock *gl);
void gfs2_glock_free_later(struct gfs2_glock *gl);

View File

@ -1524,7 +1524,6 @@ out:
if (ip->i_gl) {
glock_clear_object(ip->i_gl, ip);
wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
gfs2_glock_add_to_lru(ip->i_gl);
gfs2_glock_put_eventually(ip->i_gl);
rcu_assign_pointer(ip->i_gl, NULL);
}