mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
gfs2: Remove gl_spin define
Commit e66cf161
replaced the gl_spin spinlock in struct gfs2_glock with a
gl_lockref lockref and defined gl_spin as gl_lockref.lock (the spinlock in
gl_lockref). Remove that define to make the references to gl_lockref.lock more
obvious.
Signed-off-by: Andreas Gruenbacher <andreas.gruenbacher@gmail.com>
Signed-off-by: Bob Peterson <rpeterso@redhat.com>
This commit is contained in:
parent
491e94f790
commit
f3dd164912
@ -5,7 +5,7 @@ This documents the basic principles of the glock state machine
|
||||
internals. Each glock (struct gfs2_glock in fs/gfs2/incore.h)
|
||||
has two main (internal) locks:
|
||||
|
||||
1. A spinlock (gl_spin) which protects the internal state such
|
||||
1. A spinlock (gl_lockref.lock) which protects the internal state such
|
||||
as gl_state, gl_target and the list of holders (gl_holders)
|
||||
2. A non-blocking bit lock, GLF_LOCK, which is used to prevent other
|
||||
threads from making calls to the DLM, etc. at the same time. If a
|
||||
@ -82,8 +82,8 @@ rather than via the glock.
|
||||
|
||||
Locking rules for glock operations:
|
||||
|
||||
Operation | GLF_LOCK bit lock held | gl_spin spinlock held
|
||||
-----------------------------------------------------------------
|
||||
Operation | GLF_LOCK bit lock held | gl_lockref.lock spinlock held
|
||||
-------------------------------------------------------------------------
|
||||
go_xmote_th | Yes | No
|
||||
go_xmote_bh | Yes | No
|
||||
go_inval | Yes | No
|
||||
|
@ -246,8 +246,8 @@ static inline void do_error(struct gfs2_glock *gl, const int ret)
|
||||
*/
|
||||
|
||||
static int do_promote(struct gfs2_glock *gl)
|
||||
__releases(&gl->gl_spin)
|
||||
__acquires(&gl->gl_spin)
|
||||
__releases(&gl->gl_lockref.lock)
|
||||
__acquires(&gl->gl_lockref.lock)
|
||||
{
|
||||
const struct gfs2_glock_operations *glops = gl->gl_ops;
|
||||
struct gfs2_holder *gh, *tmp;
|
||||
@ -260,10 +260,10 @@ restart:
|
||||
if (may_grant(gl, gh)) {
|
||||
if (gh->gh_list.prev == &gl->gl_holders &&
|
||||
glops->go_lock) {
|
||||
spin_unlock(&gl->gl_spin);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
/* FIXME: eliminate this eventually */
|
||||
ret = glops->go_lock(gh);
|
||||
spin_lock(&gl->gl_spin);
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
if (ret) {
|
||||
if (ret == 1)
|
||||
return 2;
|
||||
@ -361,7 +361,7 @@ static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
|
||||
unsigned state = ret & LM_OUT_ST_MASK;
|
||||
int rv;
|
||||
|
||||
spin_lock(&gl->gl_spin);
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
trace_gfs2_glock_state_change(gl, state);
|
||||
state_change(gl, state);
|
||||
gh = find_first_waiter(gl);
|
||||
@ -405,7 +405,7 @@ retry:
|
||||
pr_err("wanted %u got %u\n", gl->gl_target, state);
|
||||
GLOCK_BUG_ON(gl, 1);
|
||||
}
|
||||
spin_unlock(&gl->gl_spin);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -414,9 +414,9 @@ retry:
|
||||
gfs2_demote_wake(gl);
|
||||
if (state != LM_ST_UNLOCKED) {
|
||||
if (glops->go_xmote_bh) {
|
||||
spin_unlock(&gl->gl_spin);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
rv = glops->go_xmote_bh(gl, gh);
|
||||
spin_lock(&gl->gl_spin);
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
if (rv) {
|
||||
do_error(gl, rv);
|
||||
goto out;
|
||||
@ -429,7 +429,7 @@ retry:
|
||||
out:
|
||||
clear_bit(GLF_LOCK, &gl->gl_flags);
|
||||
out_locked:
|
||||
spin_unlock(&gl->gl_spin);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -441,8 +441,8 @@ out_locked:
|
||||
*/
|
||||
|
||||
static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
|
||||
__releases(&gl->gl_spin)
|
||||
__acquires(&gl->gl_spin)
|
||||
__releases(&gl->gl_lockref.lock)
|
||||
__acquires(&gl->gl_lockref.lock)
|
||||
{
|
||||
const struct gfs2_glock_operations *glops = gl->gl_ops;
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
@ -464,7 +464,7 @@ __acquires(&gl->gl_spin)
|
||||
(gl->gl_state == LM_ST_EXCLUSIVE) ||
|
||||
(lck_flags & (LM_FLAG_TRY|LM_FLAG_TRY_1CB)))
|
||||
clear_bit(GLF_BLOCKING, &gl->gl_flags);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
if (glops->go_sync)
|
||||
glops->go_sync(gl);
|
||||
if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
|
||||
@ -485,7 +485,7 @@ __acquires(&gl->gl_spin)
|
||||
gfs2_glock_put(gl);
|
||||
}
|
||||
|
||||
spin_lock(&gl->gl_spin);
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -513,8 +513,8 @@ static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
|
||||
*/
|
||||
|
||||
static void run_queue(struct gfs2_glock *gl, const int nonblock)
|
||||
__releases(&gl->gl_spin)
|
||||
__acquires(&gl->gl_spin)
|
||||
__releases(&gl->gl_lockref.lock)
|
||||
__acquires(&gl->gl_lockref.lock)
|
||||
{
|
||||
struct gfs2_holder *gh = NULL;
|
||||
int ret;
|
||||
@ -596,7 +596,7 @@ static void glock_work_func(struct work_struct *work)
|
||||
finish_xmote(gl, gl->gl_reply);
|
||||
drop_ref = 1;
|
||||
}
|
||||
spin_lock(&gl->gl_spin);
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
|
||||
gl->gl_state != LM_ST_UNLOCKED &&
|
||||
gl->gl_demote_state != LM_ST_EXCLUSIVE) {
|
||||
@ -612,7 +612,7 @@ static void glock_work_func(struct work_struct *work)
|
||||
}
|
||||
}
|
||||
run_queue(gl, 0);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
if (!delay)
|
||||
gfs2_glock_put(gl);
|
||||
else {
|
||||
@ -876,8 +876,8 @@ void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
|
||||
*/
|
||||
|
||||
static inline void add_to_queue(struct gfs2_holder *gh)
|
||||
__releases(&gl->gl_spin)
|
||||
__acquires(&gl->gl_spin)
|
||||
__releases(&gl->gl_lockref.lock)
|
||||
__acquires(&gl->gl_lockref.lock)
|
||||
{
|
||||
struct gfs2_glock *gl = gh->gh_gl;
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
@ -926,10 +926,10 @@ fail:
|
||||
do_cancel:
|
||||
gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
|
||||
if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
|
||||
spin_unlock(&gl->gl_spin);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
if (sdp->sd_lockstruct.ls_ops->lm_cancel)
|
||||
sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
|
||||
spin_lock(&gl->gl_spin);
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
}
|
||||
return;
|
||||
|
||||
@ -967,7 +967,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
|
||||
if (test_bit(GLF_LRU, &gl->gl_flags))
|
||||
gfs2_glock_remove_from_lru(gl);
|
||||
|
||||
spin_lock(&gl->gl_spin);
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
add_to_queue(gh);
|
||||
if (unlikely((LM_FLAG_NOEXP & gh->gh_flags) &&
|
||||
test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))) {
|
||||
@ -977,7 +977,7 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
|
||||
gl->gl_lockref.count--;
|
||||
}
|
||||
run_queue(gl, 1);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
|
||||
if (!(gh->gh_flags & GL_ASYNC))
|
||||
error = gfs2_glock_wait(gh);
|
||||
@ -1010,7 +1010,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
|
||||
unsigned delay = 0;
|
||||
int fast_path = 0;
|
||||
|
||||
spin_lock(&gl->gl_spin);
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
if (gh->gh_flags & GL_NOCACHE)
|
||||
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
|
||||
|
||||
@ -1018,9 +1018,9 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
|
||||
if (find_first_holder(gl) == NULL) {
|
||||
if (glops->go_unlock) {
|
||||
GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
|
||||
spin_unlock(&gl->gl_spin);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
glops->go_unlock(gh);
|
||||
spin_lock(&gl->gl_spin);
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
clear_bit(GLF_LOCK, &gl->gl_flags);
|
||||
}
|
||||
if (list_empty(&gl->gl_holders) &&
|
||||
@ -1033,7 +1033,7 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
|
||||
gfs2_glock_add_to_lru(gl);
|
||||
|
||||
trace_gfs2_glock_queue(gh, 0);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
if (likely(fast_path))
|
||||
return;
|
||||
|
||||
@ -1217,9 +1217,9 @@ void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
|
||||
delay = gl->gl_hold_time;
|
||||
}
|
||||
|
||||
spin_lock(&gl->gl_spin);
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
handle_callback(gl, state, delay, true);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
|
||||
gfs2_glock_put(gl);
|
||||
}
|
||||
@ -1259,7 +1259,7 @@ static int gfs2_should_freeze(const struct gfs2_glock *gl)
|
||||
* @gl: Pointer to the glock
|
||||
* @ret: The return value from the dlm
|
||||
*
|
||||
* The gl_reply field is under the gl_spin lock so that it is ok
|
||||
* The gl_reply field is under the gl_lockref.lock lock so that it is ok
|
||||
* to use a bitfield shared with other glock state fields.
|
||||
*/
|
||||
|
||||
@ -1267,20 +1267,20 @@ void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
|
||||
{
|
||||
struct lm_lockstruct *ls = &gl->gl_name.ln_sbd->sd_lockstruct;
|
||||
|
||||
spin_lock(&gl->gl_spin);
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
gl->gl_reply = ret;
|
||||
|
||||
if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_recover_flags))) {
|
||||
if (gfs2_should_freeze(gl)) {
|
||||
set_bit(GLF_FROZEN, &gl->gl_flags);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
gl->gl_lockref.count++;
|
||||
set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
||||
gfs2_glock_put(gl);
|
||||
@ -1326,14 +1326,14 @@ __acquires(&lru_lock)
|
||||
while(!list_empty(list)) {
|
||||
gl = list_entry(list->next, struct gfs2_glock, gl_lru);
|
||||
list_del_init(&gl->gl_lru);
|
||||
if (!spin_trylock(&gl->gl_spin)) {
|
||||
if (!spin_trylock(&gl->gl_lockref.lock)) {
|
||||
add_back_to_lru:
|
||||
list_add(&gl->gl_lru, &lru_list);
|
||||
atomic_inc(&lru_count);
|
||||
continue;
|
||||
}
|
||||
if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
|
||||
spin_unlock(&gl->gl_spin);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
goto add_back_to_lru;
|
||||
}
|
||||
clear_bit(GLF_LRU, &gl->gl_flags);
|
||||
@ -1343,7 +1343,7 @@ add_back_to_lru:
|
||||
WARN_ON(!test_and_clear_bit(GLF_LOCK, &gl->gl_flags));
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
||||
gl->gl_lockref.count--;
|
||||
spin_unlock(&gl->gl_spin);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
cond_resched_lock(&lru_lock);
|
||||
}
|
||||
}
|
||||
@ -1461,10 +1461,10 @@ static void clear_glock(struct gfs2_glock *gl)
|
||||
{
|
||||
gfs2_glock_remove_from_lru(gl);
|
||||
|
||||
spin_lock(&gl->gl_spin);
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
if (gl->gl_state != LM_ST_UNLOCKED)
|
||||
handle_callback(gl, LM_ST_UNLOCKED, 0, false);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
|
||||
gfs2_glock_put(gl);
|
||||
}
|
||||
@ -1482,9 +1482,9 @@ void gfs2_glock_thaw(struct gfs2_sbd *sdp)
|
||||
|
||||
static void dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
|
||||
{
|
||||
spin_lock(&gl->gl_spin);
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
gfs2_dump_glock(seq, gl);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
}
|
||||
|
||||
static void dump_glock_func(struct gfs2_glock *gl)
|
||||
@ -1518,10 +1518,10 @@ void gfs2_glock_finish_truncate(struct gfs2_inode *ip)
|
||||
ret = gfs2_truncatei_resume(ip);
|
||||
gfs2_assert_withdraw(gl->gl_name.ln_sbd, ret == 0);
|
||||
|
||||
spin_lock(&gl->gl_spin);
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
clear_bit(GLF_LOCK, &gl->gl_flags);
|
||||
run_queue(gl, 1);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
}
|
||||
|
||||
static const char *state2str(unsigned state)
|
||||
|
@ -141,7 +141,7 @@ static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *
|
||||
struct pid *pid;
|
||||
|
||||
/* Look in glock's list of holders for one with current task as owner */
|
||||
spin_lock(&gl->gl_spin);
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
pid = task_pid(current);
|
||||
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
|
||||
if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
|
||||
@ -151,7 +151,7 @@ static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *
|
||||
}
|
||||
gh = NULL;
|
||||
out:
|
||||
spin_unlock(&gl->gl_spin);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
|
||||
return gh;
|
||||
}
|
||||
|
@ -146,11 +146,11 @@ static void rgrp_go_sync(struct gfs2_glock *gl)
|
||||
struct gfs2_rgrpd *rgd;
|
||||
int error;
|
||||
|
||||
spin_lock(&gl->gl_spin);
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
rgd = gl->gl_object;
|
||||
if (rgd)
|
||||
gfs2_rgrp_brelse(rgd);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
|
||||
if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
|
||||
return;
|
||||
@ -162,11 +162,11 @@ static void rgrp_go_sync(struct gfs2_glock *gl)
|
||||
mapping_set_error(mapping, error);
|
||||
gfs2_ail_empty_gl(gl);
|
||||
|
||||
spin_lock(&gl->gl_spin);
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
rgd = gl->gl_object;
|
||||
if (rgd)
|
||||
gfs2_free_clones(rgd);
|
||||
spin_unlock(&gl->gl_spin);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -542,7 +542,7 @@ static int freeze_go_demote_ok(const struct gfs2_glock *gl)
|
||||
* iopen_go_callback - schedule the dcache entry for the inode to be deleted
|
||||
* @gl: the glock
|
||||
*
|
||||
* gl_spin lock is held while calling this
|
||||
* gl_lockref.lock lock is held while calling this
|
||||
*/
|
||||
static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
|
||||
{
|
||||
|
@ -334,9 +334,8 @@ struct gfs2_glock {
|
||||
struct lm_lockname gl_name;
|
||||
|
||||
struct lockref gl_lockref;
|
||||
#define gl_spin gl_lockref.lock
|
||||
|
||||
/* State fields protected by gl_spin */
|
||||
/* State fields protected by gl_lockref.lock */
|
||||
unsigned int gl_state:2, /* Current state */
|
||||
gl_target:2, /* Target state */
|
||||
gl_demote_state:2, /* State requested by remote node */
|
||||
|
@ -50,7 +50,7 @@ static void gfs2_init_glock_once(void *foo)
|
||||
struct gfs2_glock *gl = foo;
|
||||
|
||||
INIT_HLIST_BL_NODE(&gl->gl_list);
|
||||
spin_lock_init(&gl->gl_spin);
|
||||
spin_lock_init(&gl->gl_lockref.lock);
|
||||
INIT_LIST_HEAD(&gl->gl_holders);
|
||||
INIT_LIST_HEAD(&gl->gl_lru);
|
||||
INIT_LIST_HEAD(&gl->gl_ail_list);
|
||||
|
@ -729,9 +729,9 @@ void gfs2_clear_rgrpd(struct gfs2_sbd *sdp)
|
||||
rb_erase(n, &sdp->sd_rindex_tree);
|
||||
|
||||
if (gl) {
|
||||
spin_lock(&gl->gl_spin);
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
gl->gl_object = NULL;
|
||||
spin_unlock(&gl->gl_spin);
|
||||
spin_unlock(&gl->gl_lockref.lock);
|
||||
gfs2_glock_add_to_lru(gl);
|
||||
gfs2_glock_put(gl);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user