mirror of
https://github.com/torvalds/linux.git
synced 2024-11-17 09:31:50 +00:00
719ee34467
Until now, we've used the same scheme as GFS1 for atime. This has failed since atime is a per vfsmnt flag, not a per fs flag and as such the "noatime" flag was not getting passed down to the filesystems. This patch removes all the "special casing" around atime updates and we simply use the VFS's atime code. The net result is that GFS2 will now support all the same atime related mount options of any other filesystem on a per-vfsmnt basis. We do lose the "lazy atime" updates, but we gain "relatime". We could add lazy atime to the VFS at a later date, if there is a requirement for that variant still - I suspect relatime will be enough. Also we lose about 100 lines of code after this patch has been applied, and I have a suspicion that it will speed things up a bit, even when atime is "on". So it seems like a nice clean up as well. From a user perspective, everything stays the same except the loss of the per-fs atime quantum tweekable (ought to be per-vfsmnt at the very least, and to be honest I don't think anybody ever used it) and that a number of options which were ignored before now work correctly. Please let me know if you've got any comments. I'm pushing this out early so that you can all see what my plans are. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
145 lines
4.0 KiB
C
145 lines
4.0 KiB
C
/*
|
|
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
|
|
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
|
|
*
|
|
* This copyrighted material is made available to anyone wishing to use,
|
|
* modify, copy, or redistribute it subject to the terms and conditions
|
|
* of the GNU General Public License version 2.
|
|
*/
|
|
|
|
#ifndef __GLOCK_DOT_H__
|
|
#define __GLOCK_DOT_H__
|
|
|
|
#include <linux/sched.h>
|
|
#include "incore.h"
|
|
|
|
/* Flags for lock requests; used in gfs2_holder gh_flag field.
|
|
From lm_interface.h:
|
|
#define LM_FLAG_TRY 0x00000001
|
|
#define LM_FLAG_TRY_1CB 0x00000002
|
|
#define LM_FLAG_NOEXP 0x00000004
|
|
#define LM_FLAG_ANY 0x00000008
|
|
#define LM_FLAG_PRIORITY 0x00000010 */
|
|
|
|
#define GL_ASYNC 0x00000040
|
|
#define GL_EXACT 0x00000080
|
|
#define GL_SKIP 0x00000100
|
|
#define GL_NOCACHE 0x00000400
|
|
|
|
#define GLR_TRYFAILED 13
|
|
|
|
static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
|
|
{
|
|
struct gfs2_holder *gh;
|
|
struct pid *pid;
|
|
|
|
/* Look in glock's list of holders for one with current task as owner */
|
|
spin_lock(&gl->gl_spin);
|
|
pid = task_pid(current);
|
|
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
|
|
if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
|
|
break;
|
|
if (gh->gh_owner_pid == pid)
|
|
goto out;
|
|
}
|
|
gh = NULL;
|
|
out:
|
|
spin_unlock(&gl->gl_spin);
|
|
|
|
return gh;
|
|
}
|
|
|
|
static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl)
|
|
{
|
|
return gl->gl_state == LM_ST_EXCLUSIVE;
|
|
}
|
|
|
|
static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl)
|
|
{
|
|
return gl->gl_state == LM_ST_DEFERRED;
|
|
}
|
|
|
|
static inline int gfs2_glock_is_held_shrd(struct gfs2_glock *gl)
|
|
{
|
|
return gl->gl_state == LM_ST_SHARED;
|
|
}
|
|
|
|
static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl)
|
|
{
|
|
int ret;
|
|
spin_lock(&gl->gl_spin);
|
|
ret = test_bit(GLF_DEMOTE, &gl->gl_flags);
|
|
spin_unlock(&gl->gl_spin);
|
|
return ret;
|
|
}
|
|
|
|
int gfs2_glock_get(struct gfs2_sbd *sdp,
|
|
u64 number, const struct gfs2_glock_operations *glops,
|
|
int create, struct gfs2_glock **glp);
|
|
int gfs2_glock_put(struct gfs2_glock *gl);
|
|
void gfs2_holder_init(struct gfs2_glock *gl, unsigned int state, unsigned flags,
|
|
struct gfs2_holder *gh);
|
|
void gfs2_holder_reinit(unsigned int state, unsigned flags,
|
|
struct gfs2_holder *gh);
|
|
void gfs2_holder_uninit(struct gfs2_holder *gh);
|
|
int gfs2_glock_nq(struct gfs2_holder *gh);
|
|
int gfs2_glock_poll(struct gfs2_holder *gh);
|
|
int gfs2_glock_wait(struct gfs2_holder *gh);
|
|
void gfs2_glock_dq(struct gfs2_holder *gh);
|
|
void gfs2_glock_dq_wait(struct gfs2_holder *gh);
|
|
|
|
void gfs2_glock_dq_uninit(struct gfs2_holder *gh);
|
|
int gfs2_glock_nq_num(struct gfs2_sbd *sdp,
|
|
u64 number, const struct gfs2_glock_operations *glops,
|
|
unsigned int state, int flags, struct gfs2_holder *gh);
|
|
|
|
int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
|
|
void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
|
|
void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
|
|
void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
|
|
|
|
/**
|
|
* gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
|
|
* @gl: the glock
|
|
* @state: the state we're requesting
|
|
* @flags: the modifier flags
|
|
* @gh: the holder structure
|
|
*
|
|
* Returns: 0, GLR_*, or errno
|
|
*/
|
|
|
|
static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
|
|
unsigned int state, int flags,
|
|
struct gfs2_holder *gh)
|
|
{
|
|
int error;
|
|
|
|
gfs2_holder_init(gl, state, flags, gh);
|
|
|
|
error = gfs2_glock_nq(gh);
|
|
if (error)
|
|
gfs2_holder_uninit(gh);
|
|
|
|
return error;
|
|
}
|
|
|
|
/* Lock Value Block functions */
|
|
|
|
int gfs2_lvb_hold(struct gfs2_glock *gl);
|
|
void gfs2_lvb_unhold(struct gfs2_glock *gl);
|
|
|
|
void gfs2_glock_cb(void *cb_data, unsigned int type, void *data);
|
|
void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl);
|
|
void gfs2_reclaim_glock(struct gfs2_sbd *sdp);
|
|
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
|
|
|
|
int __init gfs2_glock_init(void);
|
|
void gfs2_glock_exit(void);
|
|
|
|
int gfs2_create_debugfs_file(struct gfs2_sbd *sdp);
|
|
void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
|
|
int gfs2_register_debugfs(void);
|
|
void gfs2_unregister_debugfs(void);
|
|
|
|
#endif /* __GLOCK_DOT_H__ */
|