[GFS2] Clean up the glock core
This patch implements a number of cleanups to the core of the GFS2 glock code. As a result a lot of code is removed. It looks like a really big change, but actually a large part of this patch is either removing or moving existing code. There are some new bits too though, such as the new run_queue() function which is considerably streamlined. Highlights of this patch include: o Fixes a cluster coherency bug during SH -> EX lock conversions o Removes the "glmutex" code in favour of a single bit lock o Removes the ->go_xmote_bh() for inodes since it was duplicating ->go_lock() o We now only use the ->lm_lock() function for both locks and unlocks (i.e. unlock is a lock with target mode LM_ST_UNLOCKED) o The fast path is considerably shortly, giving performance gains especially with lock_nolock o The glock_workqueue is now used for all the callbacks from the DLM which allows us to simplify the lock_dlm module (see following patch) o The way is now open to make further changes such as eliminating the two threads (gfs2_glockd and gfs2_scand) in favour of a more efficient scheme. This patch has undergone extensive testing with various test suites so it should be pretty stable by now. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com> Cc: Bob Peterson <rpeterso@redhat.com>
This commit is contained in:
parent
543cf4cb3f
commit
6802e3400f
1611
fs/gfs2/glock.c
1611
fs/gfs2/glock.c
File diff suppressed because it is too large
Load Diff
@ -26,11 +26,8 @@
|
|||||||
#define GL_SKIP 0x00000100
|
#define GL_SKIP 0x00000100
|
||||||
#define GL_ATIME 0x00000200
|
#define GL_ATIME 0x00000200
|
||||||
#define GL_NOCACHE 0x00000400
|
#define GL_NOCACHE 0x00000400
|
||||||
#define GL_FLOCK 0x00000800
|
|
||||||
#define GL_NOCANCEL 0x00001000
|
|
||||||
|
|
||||||
#define GLR_TRYFAILED 13
|
#define GLR_TRYFAILED 13
|
||||||
#define GLR_CANCELED 14
|
|
||||||
|
|
||||||
static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
|
static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
|
||||||
{
|
{
|
||||||
@ -41,6 +38,8 @@ static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *
|
|||||||
spin_lock(&gl->gl_spin);
|
spin_lock(&gl->gl_spin);
|
||||||
pid = task_pid(current);
|
pid = task_pid(current);
|
||||||
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
|
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
|
||||||
|
if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
|
||||||
|
break;
|
||||||
if (gh->gh_owner_pid == pid)
|
if (gh->gh_owner_pid == pid)
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
@ -70,7 +69,7 @@ static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl)
|
|||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
spin_lock(&gl->gl_spin);
|
spin_lock(&gl->gl_spin);
|
||||||
ret = test_bit(GLF_DEMOTE, &gl->gl_flags) || !list_empty(&gl->gl_waiters3);
|
ret = test_bit(GLF_DEMOTE, &gl->gl_flags);
|
||||||
spin_unlock(&gl->gl_spin);
|
spin_unlock(&gl->gl_spin);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -98,6 +97,7 @@ int gfs2_glock_nq_num(struct gfs2_sbd *sdp,
|
|||||||
int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
|
int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
|
||||||
void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
|
void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
|
||||||
void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
|
void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
|
||||||
|
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
|
* gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
|
||||||
@ -130,7 +130,6 @@ int gfs2_lvb_hold(struct gfs2_glock *gl);
|
|||||||
void gfs2_lvb_unhold(struct gfs2_glock *gl);
|
void gfs2_lvb_unhold(struct gfs2_glock *gl);
|
||||||
|
|
||||||
void gfs2_glock_cb(void *cb_data, unsigned int type, void *data);
|
void gfs2_glock_cb(void *cb_data, unsigned int type, void *data);
|
||||||
|
|
||||||
void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl);
|
void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl);
|
||||||
void gfs2_reclaim_glock(struct gfs2_sbd *sdp);
|
void gfs2_reclaim_glock(struct gfs2_sbd *sdp);
|
||||||
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait);
|
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait);
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
#include <linux/buffer_head.h>
|
#include <linux/buffer_head.h>
|
||||||
#include <linux/gfs2_ondisk.h>
|
#include <linux/gfs2_ondisk.h>
|
||||||
#include <linux/lm_interface.h>
|
#include <linux/lm_interface.h>
|
||||||
|
#include <linux/bio.h>
|
||||||
|
|
||||||
#include "gfs2.h"
|
#include "gfs2.h"
|
||||||
#include "incore.h"
|
#include "incore.h"
|
||||||
@ -171,26 +172,6 @@ static void inode_go_sync(struct gfs2_glock *gl)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* inode_go_xmote_bh - After promoting/demoting a glock
|
|
||||||
* @gl: the glock
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
static void inode_go_xmote_bh(struct gfs2_glock *gl)
|
|
||||||
{
|
|
||||||
struct gfs2_holder *gh = gl->gl_req_gh;
|
|
||||||
struct buffer_head *bh;
|
|
||||||
int error;
|
|
||||||
|
|
||||||
if (gl->gl_state != LM_ST_UNLOCKED &&
|
|
||||||
(!gh || !(gh->gh_flags & GL_SKIP))) {
|
|
||||||
error = gfs2_meta_read(gl, gl->gl_name.ln_number, 0, &bh);
|
|
||||||
if (!error)
|
|
||||||
brelse(bh);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* inode_go_inval - prepare a inode glock to be released
|
* inode_go_inval - prepare a inode glock to be released
|
||||||
* @gl: the glock
|
* @gl: the glock
|
||||||
@ -266,6 +247,26 @@ static int inode_go_lock(struct gfs2_holder *gh)
|
|||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* inode_go_dump - print information about an inode
|
||||||
|
* @seq: The iterator
|
||||||
|
* @ip: the inode
|
||||||
|
*
|
||||||
|
* Returns: 0 on success, -ENOBUFS when we run out of space
|
||||||
|
*/
|
||||||
|
|
||||||
|
static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
|
||||||
|
{
|
||||||
|
const struct gfs2_inode *ip = gl->gl_object;
|
||||||
|
if (ip == NULL)
|
||||||
|
return 0;
|
||||||
|
gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%08lx\n",
|
||||||
|
(unsigned long long)ip->i_no_formal_ino,
|
||||||
|
(unsigned long long)ip->i_no_addr,
|
||||||
|
IF2DT(ip->i_inode.i_mode), ip->i_flags);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
|
* rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
|
||||||
* @gl: the glock
|
* @gl: the glock
|
||||||
@ -305,6 +306,22 @@ static void rgrp_go_unlock(struct gfs2_holder *gh)
|
|||||||
gfs2_rgrp_bh_put(gh->gh_gl->gl_object);
|
gfs2_rgrp_bh_put(gh->gh_gl->gl_object);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* rgrp_go_dump - print out an rgrp
|
||||||
|
* @seq: The iterator
|
||||||
|
* @gl: The glock in question
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
static int rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
|
||||||
|
{
|
||||||
|
const struct gfs2_rgrpd *rgd = gl->gl_object;
|
||||||
|
if (rgd == NULL)
|
||||||
|
return 0;
|
||||||
|
gfs2_print_dbg(seq, " R: n:%llu\n", (unsigned long long)rgd->rd_addr);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* trans_go_sync - promote/demote the transaction glock
|
* trans_go_sync - promote/demote the transaction glock
|
||||||
* @gl: the glock
|
* @gl: the glock
|
||||||
@ -330,7 +347,7 @@ static void trans_go_sync(struct gfs2_glock *gl)
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static void trans_go_xmote_bh(struct gfs2_glock *gl)
|
static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
|
||||||
{
|
{
|
||||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||||
struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
|
struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
|
||||||
@ -338,8 +355,7 @@ static void trans_go_xmote_bh(struct gfs2_glock *gl)
|
|||||||
struct gfs2_log_header_host head;
|
struct gfs2_log_header_host head;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
if (gl->gl_state != LM_ST_UNLOCKED &&
|
if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
|
||||||
test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
|
|
||||||
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
|
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
|
||||||
|
|
||||||
error = gfs2_find_jhead(sdp->sd_jdesc, &head);
|
error = gfs2_find_jhead(sdp->sd_jdesc, &head);
|
||||||
@ -354,6 +370,7 @@ static void trans_go_xmote_bh(struct gfs2_glock *gl)
|
|||||||
gfs2_log_pointers_init(sdp, head.lh_blkno);
|
gfs2_log_pointers_init(sdp, head.lh_blkno);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -375,12 +392,12 @@ const struct gfs2_glock_operations gfs2_meta_glops = {
|
|||||||
|
|
||||||
const struct gfs2_glock_operations gfs2_inode_glops = {
|
const struct gfs2_glock_operations gfs2_inode_glops = {
|
||||||
.go_xmote_th = inode_go_sync,
|
.go_xmote_th = inode_go_sync,
|
||||||
.go_xmote_bh = inode_go_xmote_bh,
|
|
||||||
.go_inval = inode_go_inval,
|
.go_inval = inode_go_inval,
|
||||||
.go_demote_ok = inode_go_demote_ok,
|
.go_demote_ok = inode_go_demote_ok,
|
||||||
.go_lock = inode_go_lock,
|
.go_lock = inode_go_lock,
|
||||||
|
.go_dump = inode_go_dump,
|
||||||
.go_type = LM_TYPE_INODE,
|
.go_type = LM_TYPE_INODE,
|
||||||
.go_min_hold_time = HZ / 10,
|
.go_min_hold_time = HZ / 5,
|
||||||
};
|
};
|
||||||
|
|
||||||
const struct gfs2_glock_operations gfs2_rgrp_glops = {
|
const struct gfs2_glock_operations gfs2_rgrp_glops = {
|
||||||
@ -389,8 +406,9 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
|
|||||||
.go_demote_ok = rgrp_go_demote_ok,
|
.go_demote_ok = rgrp_go_demote_ok,
|
||||||
.go_lock = rgrp_go_lock,
|
.go_lock = rgrp_go_lock,
|
||||||
.go_unlock = rgrp_go_unlock,
|
.go_unlock = rgrp_go_unlock,
|
||||||
|
.go_dump = rgrp_go_dump,
|
||||||
.go_type = LM_TYPE_RGRP,
|
.go_type = LM_TYPE_RGRP,
|
||||||
.go_min_hold_time = HZ / 10,
|
.go_min_hold_time = HZ / 5,
|
||||||
};
|
};
|
||||||
|
|
||||||
const struct gfs2_glock_operations gfs2_trans_glops = {
|
const struct gfs2_glock_operations gfs2_trans_glops = {
|
||||||
|
@ -128,20 +128,20 @@ struct gfs2_bufdata {
|
|||||||
|
|
||||||
struct gfs2_glock_operations {
|
struct gfs2_glock_operations {
|
||||||
void (*go_xmote_th) (struct gfs2_glock *gl);
|
void (*go_xmote_th) (struct gfs2_glock *gl);
|
||||||
void (*go_xmote_bh) (struct gfs2_glock *gl);
|
int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh);
|
||||||
void (*go_inval) (struct gfs2_glock *gl, int flags);
|
void (*go_inval) (struct gfs2_glock *gl, int flags);
|
||||||
int (*go_demote_ok) (struct gfs2_glock *gl);
|
int (*go_demote_ok) (struct gfs2_glock *gl);
|
||||||
int (*go_lock) (struct gfs2_holder *gh);
|
int (*go_lock) (struct gfs2_holder *gh);
|
||||||
void (*go_unlock) (struct gfs2_holder *gh);
|
void (*go_unlock) (struct gfs2_holder *gh);
|
||||||
|
int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
|
||||||
const int go_type;
|
const int go_type;
|
||||||
const unsigned long go_min_hold_time;
|
const unsigned long go_min_hold_time;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
/* States */
|
/* States */
|
||||||
HIF_HOLDER = 6,
|
HIF_HOLDER = 6, /* Set for gh that "holds" the glock */
|
||||||
HIF_FIRST = 7,
|
HIF_FIRST = 7,
|
||||||
HIF_ABORTED = 9,
|
|
||||||
HIF_WAIT = 10,
|
HIF_WAIT = 10,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -154,20 +154,20 @@ struct gfs2_holder {
|
|||||||
unsigned gh_flags;
|
unsigned gh_flags;
|
||||||
|
|
||||||
int gh_error;
|
int gh_error;
|
||||||
unsigned long gh_iflags;
|
unsigned long gh_iflags; /* HIF_... */
|
||||||
unsigned long gh_ip;
|
unsigned long gh_ip;
|
||||||
};
|
};
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
GLF_LOCK = 1,
|
GLF_LOCK = 1,
|
||||||
GLF_STICKY = 2,
|
GLF_STICKY = 2,
|
||||||
GLF_DEMOTE = 3,
|
GLF_DEMOTE = 3,
|
||||||
GLF_PENDING_DEMOTE = 4,
|
GLF_PENDING_DEMOTE = 4,
|
||||||
GLF_DIRTY = 5,
|
GLF_DEMOTE_IN_PROGRESS = 5,
|
||||||
GLF_DEMOTE_IN_PROGRESS = 6,
|
GLF_DIRTY = 6,
|
||||||
GLF_LFLUSH = 7,
|
GLF_LFLUSH = 7,
|
||||||
GLF_WAITERS2 = 8,
|
GLF_INVALIDATE_IN_PROGRESS = 8,
|
||||||
GLF_CONV_DEADLK = 9,
|
GLF_REPLY_PENDING = 9,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct gfs2_glock {
|
struct gfs2_glock {
|
||||||
@ -179,19 +179,14 @@ struct gfs2_glock {
|
|||||||
spinlock_t gl_spin;
|
spinlock_t gl_spin;
|
||||||
|
|
||||||
unsigned int gl_state;
|
unsigned int gl_state;
|
||||||
|
unsigned int gl_target;
|
||||||
|
unsigned int gl_reply;
|
||||||
unsigned int gl_hash;
|
unsigned int gl_hash;
|
||||||
unsigned int gl_demote_state; /* state requested by remote node */
|
unsigned int gl_demote_state; /* state requested by remote node */
|
||||||
unsigned long gl_demote_time; /* time of first demote request */
|
unsigned long gl_demote_time; /* time of first demote request */
|
||||||
struct pid *gl_owner_pid;
|
|
||||||
unsigned long gl_ip;
|
|
||||||
struct list_head gl_holders;
|
struct list_head gl_holders;
|
||||||
struct list_head gl_waiters1; /* HIF_MUTEX */
|
|
||||||
struct list_head gl_waiters3; /* HIF_PROMOTE */
|
|
||||||
|
|
||||||
const struct gfs2_glock_operations *gl_ops;
|
const struct gfs2_glock_operations *gl_ops;
|
||||||
|
|
||||||
struct gfs2_holder *gl_req_gh;
|
|
||||||
|
|
||||||
void *gl_lock;
|
void *gl_lock;
|
||||||
char *gl_lvb;
|
char *gl_lvb;
|
||||||
atomic_t gl_lvb_count;
|
atomic_t gl_lvb_count;
|
||||||
|
@ -308,6 +308,9 @@ unsigned int gdlm_lock(void *lock, unsigned int cur_state,
|
|||||||
{
|
{
|
||||||
struct gdlm_lock *lp = lock;
|
struct gdlm_lock *lp = lock;
|
||||||
|
|
||||||
|
if (req_state == LM_ST_UNLOCKED)
|
||||||
|
return gdlm_unlock(lock, cur_state);
|
||||||
|
|
||||||
clear_bit(LFL_DLM_CANCEL, &lp->flags);
|
clear_bit(LFL_DLM_CANCEL, &lp->flags);
|
||||||
if (flags & LM_FLAG_NOEXP)
|
if (flags & LM_FLAG_NOEXP)
|
||||||
set_bit(LFL_NOBLOCK, &lp->flags);
|
set_bit(LFL_NOBLOCK, &lp->flags);
|
||||||
|
@ -107,6 +107,8 @@ static void nolock_put_lock(void *lock)
|
|||||||
static unsigned int nolock_lock(void *lock, unsigned int cur_state,
|
static unsigned int nolock_lock(void *lock, unsigned int cur_state,
|
||||||
unsigned int req_state, unsigned int flags)
|
unsigned int req_state, unsigned int flags)
|
||||||
{
|
{
|
||||||
|
if (req_state == LM_ST_UNLOCKED)
|
||||||
|
return 0;
|
||||||
return req_state | LM_OUT_CACHEABLE;
|
return req_state | LM_OUT_CACHEABLE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,8 +40,6 @@ static void gfs2_init_glock_once(struct kmem_cache *cachep, void *foo)
|
|||||||
INIT_HLIST_NODE(&gl->gl_list);
|
INIT_HLIST_NODE(&gl->gl_list);
|
||||||
spin_lock_init(&gl->gl_spin);
|
spin_lock_init(&gl->gl_spin);
|
||||||
INIT_LIST_HEAD(&gl->gl_holders);
|
INIT_LIST_HEAD(&gl->gl_holders);
|
||||||
INIT_LIST_HEAD(&gl->gl_waiters1);
|
|
||||||
INIT_LIST_HEAD(&gl->gl_waiters3);
|
|
||||||
gl->gl_lvb = NULL;
|
gl->gl_lvb = NULL;
|
||||||
atomic_set(&gl->gl_lvb_count, 0);
|
atomic_set(&gl->gl_lvb_count, 0);
|
||||||
INIT_LIST_HEAD(&gl->gl_reclaim);
|
INIT_LIST_HEAD(&gl->gl_reclaim);
|
||||||
|
@ -129,7 +129,7 @@ void gfs2_meta_sync(struct gfs2_glock *gl)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* getbuf - Get a buffer with a given address space
|
* gfs2_getbuf - Get a buffer with a given address space
|
||||||
* @gl: the glock
|
* @gl: the glock
|
||||||
* @blkno: the block number (filesystem scope)
|
* @blkno: the block number (filesystem scope)
|
||||||
* @create: 1 if the buffer should be created
|
* @create: 1 if the buffer should be created
|
||||||
@ -137,7 +137,7 @@ void gfs2_meta_sync(struct gfs2_glock *gl)
|
|||||||
* Returns: the buffer
|
* Returns: the buffer
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static struct buffer_head *getbuf(struct gfs2_glock *gl, u64 blkno, int create)
|
struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
|
||||||
{
|
{
|
||||||
struct address_space *mapping = gl->gl_aspace->i_mapping;
|
struct address_space *mapping = gl->gl_aspace->i_mapping;
|
||||||
struct gfs2_sbd *sdp = gl->gl_sbd;
|
struct gfs2_sbd *sdp = gl->gl_sbd;
|
||||||
@ -205,7 +205,7 @@ static void meta_prep_new(struct buffer_head *bh)
|
|||||||
struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
|
struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
|
||||||
{
|
{
|
||||||
struct buffer_head *bh;
|
struct buffer_head *bh;
|
||||||
bh = getbuf(gl, blkno, CREATE);
|
bh = gfs2_getbuf(gl, blkno, CREATE);
|
||||||
meta_prep_new(bh);
|
meta_prep_new(bh);
|
||||||
return bh;
|
return bh;
|
||||||
}
|
}
|
||||||
@ -223,7 +223,7 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
|
|||||||
int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
|
int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
|
||||||
struct buffer_head **bhp)
|
struct buffer_head **bhp)
|
||||||
{
|
{
|
||||||
*bhp = getbuf(gl, blkno, CREATE);
|
*bhp = gfs2_getbuf(gl, blkno, CREATE);
|
||||||
if (!buffer_uptodate(*bhp)) {
|
if (!buffer_uptodate(*bhp)) {
|
||||||
ll_rw_block(READ_META, 1, bhp);
|
ll_rw_block(READ_META, 1, bhp);
|
||||||
if (flags & DIO_WAIT) {
|
if (flags & DIO_WAIT) {
|
||||||
@ -346,7 +346,7 @@ void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
|
|||||||
struct buffer_head *bh;
|
struct buffer_head *bh;
|
||||||
|
|
||||||
while (blen) {
|
while (blen) {
|
||||||
bh = getbuf(ip->i_gl, bstart, NO_CREATE);
|
bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE);
|
||||||
if (bh) {
|
if (bh) {
|
||||||
lock_buffer(bh);
|
lock_buffer(bh);
|
||||||
gfs2_log_lock(sdp);
|
gfs2_log_lock(sdp);
|
||||||
@ -421,7 +421,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
|
|||||||
if (extlen > max_ra)
|
if (extlen > max_ra)
|
||||||
extlen = max_ra;
|
extlen = max_ra;
|
||||||
|
|
||||||
first_bh = getbuf(gl, dblock, CREATE);
|
first_bh = gfs2_getbuf(gl, dblock, CREATE);
|
||||||
|
|
||||||
if (buffer_uptodate(first_bh))
|
if (buffer_uptodate(first_bh))
|
||||||
goto out;
|
goto out;
|
||||||
@ -432,7 +432,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
|
|||||||
extlen--;
|
extlen--;
|
||||||
|
|
||||||
while (extlen) {
|
while (extlen) {
|
||||||
bh = getbuf(gl, dblock, CREATE);
|
bh = gfs2_getbuf(gl, dblock, CREATE);
|
||||||
|
|
||||||
if (!buffer_uptodate(bh) && !buffer_locked(bh))
|
if (!buffer_uptodate(bh) && !buffer_locked(bh))
|
||||||
ll_rw_block(READA, 1, &bh);
|
ll_rw_block(READA, 1, &bh);
|
||||||
|
@ -47,6 +47,7 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno);
|
|||||||
int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno,
|
int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno,
|
||||||
int flags, struct buffer_head **bhp);
|
int flags, struct buffer_head **bhp);
|
||||||
int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh);
|
int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh);
|
||||||
|
struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create);
|
||||||
|
|
||||||
void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
|
void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
|
||||||
int meta);
|
int meta);
|
||||||
|
@ -507,26 +507,23 @@ static int __gfs2_readpage(void *file, struct page *page)
|
|||||||
static int gfs2_readpage(struct file *file, struct page *page)
|
static int gfs2_readpage(struct file *file, struct page *page)
|
||||||
{
|
{
|
||||||
struct gfs2_inode *ip = GFS2_I(page->mapping->host);
|
struct gfs2_inode *ip = GFS2_I(page->mapping->host);
|
||||||
struct gfs2_holder *gh;
|
struct gfs2_holder gh;
|
||||||
int error;
|
int error;
|
||||||
|
|
||||||
gh = gfs2_glock_is_locked_by_me(ip->i_gl);
|
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME|LM_FLAG_TRY_1CB, &gh);
|
||||||
if (!gh) {
|
error = gfs2_glock_nq_atime(&gh);
|
||||||
gh = kmalloc(sizeof(struct gfs2_holder), GFP_NOFS);
|
if (unlikely(error)) {
|
||||||
if (!gh)
|
|
||||||
return -ENOBUFS;
|
|
||||||
gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, gh);
|
|
||||||
unlock_page(page);
|
unlock_page(page);
|
||||||
error = gfs2_glock_nq_atime(gh);
|
goto out;
|
||||||
if (likely(error != 0))
|
|
||||||
goto out;
|
|
||||||
return AOP_TRUNCATED_PAGE;
|
|
||||||
}
|
}
|
||||||
error = __gfs2_readpage(file, page);
|
error = __gfs2_readpage(file, page);
|
||||||
gfs2_glock_dq(gh);
|
gfs2_glock_dq(&gh);
|
||||||
out:
|
out:
|
||||||
gfs2_holder_uninit(gh);
|
gfs2_holder_uninit(&gh);
|
||||||
kfree(gh);
|
if (error == GLR_TRYFAILED) {
|
||||||
|
yield();
|
||||||
|
return AOP_TRUNCATED_PAGE;
|
||||||
|
}
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -669,8 +669,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
|
|||||||
int error = 0;
|
int error = 0;
|
||||||
|
|
||||||
state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
|
state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
|
||||||
flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE
|
flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
|
||||||
| GL_FLOCK;
|
|
||||||
|
|
||||||
mutex_lock(&fp->f_fl_mutex);
|
mutex_lock(&fp->f_fl_mutex);
|
||||||
|
|
||||||
@ -683,9 +682,8 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
|
|||||||
gfs2_glock_dq_wait(fl_gh);
|
gfs2_glock_dq_wait(fl_gh);
|
||||||
gfs2_holder_reinit(state, flags, fl_gh);
|
gfs2_holder_reinit(state, flags, fl_gh);
|
||||||
} else {
|
} else {
|
||||||
error = gfs2_glock_get(GFS2_SB(&ip->i_inode),
|
error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
|
||||||
ip->i_no_addr, &gfs2_flock_glops,
|
&gfs2_flock_glops, CREATE, &gl);
|
||||||
CREATE, &gl);
|
|
||||||
if (error)
|
if (error)
|
||||||
goto out;
|
goto out;
|
||||||
gfs2_holder_init(gl, state, flags, fl_gh);
|
gfs2_holder_init(gl, state, flags, fl_gh);
|
||||||
|
@ -505,7 +505,7 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd)
|
|||||||
|
|
||||||
error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED,
|
error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED,
|
||||||
LM_FLAG_NOEXP | LM_FLAG_PRIORITY |
|
LM_FLAG_NOEXP | LM_FLAG_PRIORITY |
|
||||||
GL_NOCANCEL | GL_NOCACHE, &t_gh);
|
GL_NOCACHE, &t_gh);
|
||||||
if (error)
|
if (error)
|
||||||
goto fail_gunlock_ji;
|
goto fail_gunlock_ji;
|
||||||
|
|
||||||
|
@ -941,8 +941,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
|
|||||||
}
|
}
|
||||||
|
|
||||||
error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_DEFERRED,
|
error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_DEFERRED,
|
||||||
LM_FLAG_PRIORITY | GL_NOCACHE,
|
GL_NOCACHE, t_gh);
|
||||||
t_gh);
|
|
||||||
|
|
||||||
list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
|
list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
|
||||||
error = gfs2_jdesc_check(jd);
|
error = gfs2_jdesc_check(jd);
|
||||||
|
Loading…
Reference in New Issue
Block a user