mirror of
https://github.com/torvalds/linux.git
synced 2024-11-10 14:11:52 +00:00
dlm for 6.11
- New flag DLM_LSFL_SOFTIRQ_SAFE can be set by code using dlm to indicate callbacks can be run from softirq. - Change md-cluster to set DLM_LSFL_SOFTIRQ_SAFE. - Clean up for previous changes, e.g. unused code and parameters. - Remove custom pre-allocation of rsb structs which is unnecessary with kmem caches. - Change idr to xarray for lkb structs in use. - Change idr to xarray for rsb structs being recovered. - Change outdated naming related to internal rsb states. - Fix some incorrect add/remove of rsb on scan list. - Use rcu to free rsb structs. -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEcGkeEvkvjdvlR90nOBtzx/yAaaoFAmaVUlgACgkQOBtzx/yA aapCfQ//eqs19no6+TUagkzboIGxGbrPEqmJNj4Vu1sCSH3tVC4IrkI2IqqPJL9N tYHUQvp3BYOdenBZzw6tmbs6cvoA7Fps7YMqqkEKYfBCHcV9KtejqvwBdJfqiN6A RniImAph0qvvI6GK4Y+6nDyxU2n8enOhgnZMRDUS/KYV8frc70SxreqzPSkPMWLh ZnDgTIF4zahUBFEkILlXYArbbRk5FKL+SMkSDZyDd78bVnjX24KgtOt7HpDX9X70 /9DrDz3uI+XShXzpIint4Ee4ghZr1lM9g9LXDazuY62SBDknhGTzY0BYVxZ2U3NG ocUh2KbJoP29sncNxLf9Nev5JPc+Wx3iCTEgLKkOEc4Yf0jAZg+1xbopWDT+qjRV djsgTCQ1gjpHgQxrlUUo7N5ilo5ocgSXSHGJ8b886tG5eZaxiN1y3TB4T4JtO+FH Q4IkFJiaYDL44xYR85wpfOcct/5mR7kPvhuxouexKobO+lKXaUONP9Wj7pRgG/M5 qhrWY4EU8jcO/nPunPxvhJdL68T3WoHDN42tWt/7kYQqY2svvfmr6NEImde6GxqX PB3hW20cvD4wULumLM+h0rQacIWuuMQ5ahIX9og6jM7Yx/ucks1pgnRo0M0R1aUc OopoTAekSdRtgbRXr5IQPRxpKB6BFUp3Va/Yo+2g0fi5QywcVZc= =dDCi -----END PGP SIGNATURE----- Merge tag 'dlm-6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/teigland/linux-dlm Pull dlm updates from David Teigland: - New flag DLM_LSFL_SOFTIRQ_SAFE can be set by code using dlm to indicate callbacks can be run from softirq - Change md-cluster to set DLM_LSFL_SOFTIRQ_SAFE - Clean up for previous changes, e.g. unused code and parameters - Remove custom pre-allocation of rsb structs which is unnecessary with kmem caches - Change idr to xarray for lkb structs in use - Change idr to xarray for rsb structs being recovered - Change outdated naming related to internal rsb states - Fix some incorrect add/remove of rsb on scan list - Use rcu to free rsb structs * tag 'dlm-6.11' of git://git.kernel.org/pub/scm/linux/kernel/git/teigland/linux-dlm: dlm: add rcu_barrier before destroy kmem cache dlm: remove DLM_LSFL_SOFTIRQ from exflags fs: dlm: remove unused struct 'dlm_processed_nodes' md-cluster: use DLM_LSFL_SOFTIRQ for dlm_new_lockspace() dlm: implement LSFL_SOFTIRQ_SAFE dlm: introduce DLM_LSFL_SOFTIRQ_SAFE dlm: use LSFL_FS to check for kernel lockspace dlm: use rcu to avoid an extra rsb struct lookup dlm: fix add_scan and del_scan usage dlm: change list and timer names dlm: move recover idr to xarray datastructure dlm: move lkb idr to xarray datastructure dlm: drop own rsb pre allocation mechanism dlm: remove ls_local_handle from struct dlm_ls dlm: remove unused parameter in dlm_midcomms_addr dlm: don't kref_init rsbs created for toss list dlm: remove scand leftovers
This commit is contained in:
commit
f097ef0e76
@ -887,7 +887,7 @@ static int join(struct mddev *mddev, int nodes)
|
||||
memset(str, 0, 64);
|
||||
sprintf(str, "%pU", mddev->uuid);
|
||||
ret = dlm_new_lockspace(str, mddev->bitmap_info.cluster_name,
|
||||
0, LVB_SIZE, &md_ls_ops, mddev,
|
||||
DLM_LSFL_SOFTIRQ, LVB_SIZE, &md_ls_ops, mddev,
|
||||
&ops_rv, &cinfo->lockspace);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
170
fs/dlm/ast.c
170
fs/dlm/ast.c
@ -18,35 +18,52 @@
|
||||
#include "user.h"
|
||||
#include "ast.h"
|
||||
|
||||
static void dlm_run_callback(uint32_t ls_id, uint32_t lkb_id, int8_t mode,
|
||||
uint32_t flags, uint8_t sb_flags, int sb_status,
|
||||
struct dlm_lksb *lksb,
|
||||
void (*astfn)(void *astparam),
|
||||
void (*bastfn)(void *astparam, int mode),
|
||||
void *astparam, const char *res_name,
|
||||
size_t res_length)
|
||||
{
|
||||
if (flags & DLM_CB_BAST) {
|
||||
trace_dlm_bast(ls_id, lkb_id, mode, res_name, res_length);
|
||||
bastfn(astparam, mode);
|
||||
} else if (flags & DLM_CB_CAST) {
|
||||
trace_dlm_ast(ls_id, lkb_id, sb_status, sb_flags, res_name,
|
||||
res_length);
|
||||
lksb->sb_status = sb_status;
|
||||
lksb->sb_flags = sb_flags;
|
||||
astfn(astparam);
|
||||
}
|
||||
}
|
||||
|
||||
static void dlm_do_callback(struct dlm_callback *cb)
|
||||
{
|
||||
dlm_run_callback(cb->ls_id, cb->lkb_id, cb->mode, cb->flags,
|
||||
cb->sb_flags, cb->sb_status, cb->lkb_lksb,
|
||||
cb->astfn, cb->bastfn, cb->astparam,
|
||||
cb->res_name, cb->res_length);
|
||||
dlm_free_cb(cb);
|
||||
}
|
||||
|
||||
static void dlm_callback_work(struct work_struct *work)
|
||||
{
|
||||
struct dlm_callback *cb = container_of(work, struct dlm_callback, work);
|
||||
|
||||
if (cb->flags & DLM_CB_BAST) {
|
||||
trace_dlm_bast(cb->ls_id, cb->lkb_id, cb->mode, cb->res_name,
|
||||
cb->res_length);
|
||||
cb->bastfn(cb->astparam, cb->mode);
|
||||
} else if (cb->flags & DLM_CB_CAST) {
|
||||
trace_dlm_ast(cb->ls_id, cb->lkb_id, cb->sb_status,
|
||||
cb->sb_flags, cb->res_name, cb->res_length);
|
||||
cb->lkb_lksb->sb_status = cb->sb_status;
|
||||
cb->lkb_lksb->sb_flags = cb->sb_flags;
|
||||
cb->astfn(cb->astparam);
|
||||
}
|
||||
|
||||
dlm_free_cb(cb);
|
||||
dlm_do_callback(cb);
|
||||
}
|
||||
|
||||
int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
|
||||
int status, uint32_t sbflags,
|
||||
struct dlm_callback **cb)
|
||||
bool dlm_may_skip_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
|
||||
int status, uint32_t sbflags, int *copy_lvb)
|
||||
{
|
||||
struct dlm_rsb *rsb = lkb->lkb_resource;
|
||||
int rv = DLM_ENQUEUE_CALLBACK_SUCCESS;
|
||||
struct dlm_ls *ls = rsb->res_ls;
|
||||
int copy_lvb = 0;
|
||||
int prev_mode;
|
||||
|
||||
if (copy_lvb)
|
||||
*copy_lvb = 0;
|
||||
|
||||
if (flags & DLM_CB_BAST) {
|
||||
/* if cb is a bast, it should be skipped if the blocking mode is
|
||||
* compatible with the last granted mode
|
||||
@ -56,7 +73,7 @@ int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
|
||||
log_debug(ls, "skip %x bast mode %d for cast mode %d",
|
||||
lkb->lkb_id, mode,
|
||||
lkb->lkb_last_cast_cb_mode);
|
||||
goto out;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -74,7 +91,7 @@ int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
|
||||
(prev_mode > mode && prev_mode > DLM_LOCK_PR)) {
|
||||
log_debug(ls, "skip %x add bast mode %d for bast mode %d",
|
||||
lkb->lkb_id, mode, prev_mode);
|
||||
goto out;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -85,8 +102,10 @@ int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
|
||||
prev_mode = lkb->lkb_last_cast_cb_mode;
|
||||
|
||||
if (!status && lkb->lkb_lksb->sb_lvbptr &&
|
||||
dlm_lvb_operations[prev_mode + 1][mode + 1])
|
||||
copy_lvb = 1;
|
||||
dlm_lvb_operations[prev_mode + 1][mode + 1]) {
|
||||
if (copy_lvb)
|
||||
*copy_lvb = 1;
|
||||
}
|
||||
}
|
||||
|
||||
lkb->lkb_last_cast_cb_mode = mode;
|
||||
@ -96,11 +115,19 @@ int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
|
||||
lkb->lkb_last_cb_mode = mode;
|
||||
lkb->lkb_last_cb_flags = flags;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
int dlm_get_cb(struct dlm_lkb *lkb, uint32_t flags, int mode,
|
||||
int status, uint32_t sbflags,
|
||||
struct dlm_callback **cb)
|
||||
{
|
||||
struct dlm_rsb *rsb = lkb->lkb_resource;
|
||||
struct dlm_ls *ls = rsb->res_ls;
|
||||
|
||||
*cb = dlm_allocate_cb();
|
||||
if (!*cb) {
|
||||
rv = DLM_ENQUEUE_CALLBACK_FAILURE;
|
||||
goto out;
|
||||
}
|
||||
if (WARN_ON_ONCE(!*cb))
|
||||
return -ENOMEM;
|
||||
|
||||
/* for tracing */
|
||||
(*cb)->lkb_id = lkb->lkb_id;
|
||||
@ -112,19 +139,34 @@ int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
|
||||
(*cb)->mode = mode;
|
||||
(*cb)->sb_status = status;
|
||||
(*cb)->sb_flags = (sbflags & 0x000000FF);
|
||||
(*cb)->copy_lvb = copy_lvb;
|
||||
(*cb)->lkb_lksb = lkb->lkb_lksb;
|
||||
|
||||
rv = DLM_ENQUEUE_CALLBACK_NEED_SCHED;
|
||||
return 0;
|
||||
}
|
||||
|
||||
out:
|
||||
return rv;
|
||||
static int dlm_get_queue_cb(struct dlm_lkb *lkb, uint32_t flags, int mode,
|
||||
int status, uint32_t sbflags,
|
||||
struct dlm_callback **cb)
|
||||
{
|
||||
int rv;
|
||||
|
||||
rv = dlm_get_cb(lkb, flags, mode, status, sbflags, cb);
|
||||
if (rv)
|
||||
return rv;
|
||||
|
||||
(*cb)->astfn = lkb->lkb_astfn;
|
||||
(*cb)->bastfn = lkb->lkb_bastfn;
|
||||
(*cb)->astparam = lkb->lkb_astparam;
|
||||
INIT_WORK(&(*cb)->work, dlm_callback_work);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
|
||||
uint32_t sbflags)
|
||||
uint32_t sbflags)
|
||||
{
|
||||
struct dlm_ls *ls = lkb->lkb_resource->res_ls;
|
||||
struct dlm_rsb *rsb = lkb->lkb_resource;
|
||||
struct dlm_ls *ls = rsb->res_ls;
|
||||
struct dlm_callback *cb;
|
||||
int rv;
|
||||
|
||||
@ -133,34 +175,36 @@ void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
|
||||
return;
|
||||
}
|
||||
|
||||
rv = dlm_queue_lkb_callback(lkb, flags, mode, status, sbflags,
|
||||
&cb);
|
||||
switch (rv) {
|
||||
case DLM_ENQUEUE_CALLBACK_NEED_SCHED:
|
||||
cb->astfn = lkb->lkb_astfn;
|
||||
cb->bastfn = lkb->lkb_bastfn;
|
||||
cb->astparam = lkb->lkb_astparam;
|
||||
INIT_WORK(&cb->work, dlm_callback_work);
|
||||
if (dlm_may_skip_callback(lkb, flags, mode, status, sbflags, NULL))
|
||||
return;
|
||||
|
||||
spin_lock_bh(&ls->ls_cb_lock);
|
||||
if (test_bit(LSFL_CB_DELAY, &ls->ls_flags))
|
||||
spin_lock_bh(&ls->ls_cb_lock);
|
||||
if (test_bit(LSFL_CB_DELAY, &ls->ls_flags)) {
|
||||
rv = dlm_get_queue_cb(lkb, flags, mode, status, sbflags, &cb);
|
||||
if (!rv)
|
||||
list_add(&cb->list, &ls->ls_cb_delay);
|
||||
else
|
||||
queue_work(ls->ls_callback_wq, &cb->work);
|
||||
spin_unlock_bh(&ls->ls_cb_lock);
|
||||
break;
|
||||
case DLM_ENQUEUE_CALLBACK_SUCCESS:
|
||||
break;
|
||||
case DLM_ENQUEUE_CALLBACK_FAILURE:
|
||||
fallthrough;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
break;
|
||||
} else {
|
||||
if (test_bit(LSFL_SOFTIRQ, &ls->ls_flags)) {
|
||||
dlm_run_callback(ls->ls_global_id, lkb->lkb_id, mode, flags,
|
||||
sbflags, status, lkb->lkb_lksb,
|
||||
lkb->lkb_astfn, lkb->lkb_bastfn,
|
||||
lkb->lkb_astparam, rsb->res_name,
|
||||
rsb->res_length);
|
||||
} else {
|
||||
rv = dlm_get_queue_cb(lkb, flags, mode, status, sbflags, &cb);
|
||||
if (!rv)
|
||||
queue_work(ls->ls_callback_wq, &cb->work);
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&ls->ls_cb_lock);
|
||||
}
|
||||
|
||||
int dlm_callback_start(struct dlm_ls *ls)
|
||||
{
|
||||
if (!test_bit(LSFL_FS, &ls->ls_flags) ||
|
||||
test_bit(LSFL_SOFTIRQ, &ls->ls_flags))
|
||||
return 0;
|
||||
|
||||
ls->ls_callback_wq = alloc_ordered_workqueue("dlm_callback",
|
||||
WQ_HIGHPRI | WQ_MEM_RECLAIM);
|
||||
if (!ls->ls_callback_wq) {
|
||||
@ -178,13 +222,15 @@ void dlm_callback_stop(struct dlm_ls *ls)
|
||||
|
||||
void dlm_callback_suspend(struct dlm_ls *ls)
|
||||
{
|
||||
if (ls->ls_callback_wq) {
|
||||
spin_lock_bh(&ls->ls_cb_lock);
|
||||
set_bit(LSFL_CB_DELAY, &ls->ls_flags);
|
||||
spin_unlock_bh(&ls->ls_cb_lock);
|
||||
if (!test_bit(LSFL_FS, &ls->ls_flags))
|
||||
return;
|
||||
|
||||
spin_lock_bh(&ls->ls_cb_lock);
|
||||
set_bit(LSFL_CB_DELAY, &ls->ls_flags);
|
||||
spin_unlock_bh(&ls->ls_cb_lock);
|
||||
|
||||
if (ls->ls_callback_wq)
|
||||
flush_workqueue(ls->ls_callback_wq);
|
||||
}
|
||||
}
|
||||
|
||||
#define MAX_CB_QUEUE 25
|
||||
@ -195,14 +241,18 @@ void dlm_callback_resume(struct dlm_ls *ls)
|
||||
int count = 0, sum = 0;
|
||||
bool empty;
|
||||
|
||||
if (!ls->ls_callback_wq)
|
||||
if (!test_bit(LSFL_FS, &ls->ls_flags))
|
||||
return;
|
||||
|
||||
more:
|
||||
spin_lock_bh(&ls->ls_cb_lock);
|
||||
list_for_each_entry_safe(cb, safe, &ls->ls_cb_delay, list) {
|
||||
list_del(&cb->list);
|
||||
queue_work(ls->ls_callback_wq, &cb->work);
|
||||
if (test_bit(LSFL_SOFTIRQ, &ls->ls_flags))
|
||||
dlm_do_callback(cb);
|
||||
else
|
||||
queue_work(ls->ls_callback_wq, &cb->work);
|
||||
|
||||
count++;
|
||||
if (count == MAX_CB_QUEUE)
|
||||
break;
|
||||
|
11
fs/dlm/ast.h
11
fs/dlm/ast.h
@ -11,12 +11,11 @@
|
||||
#ifndef __ASTD_DOT_H__
|
||||
#define __ASTD_DOT_H__
|
||||
|
||||
#define DLM_ENQUEUE_CALLBACK_NEED_SCHED 1
|
||||
#define DLM_ENQUEUE_CALLBACK_SUCCESS 0
|
||||
#define DLM_ENQUEUE_CALLBACK_FAILURE -1
|
||||
int dlm_queue_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
|
||||
int status, uint32_t sbflags,
|
||||
struct dlm_callback **cb);
|
||||
bool dlm_may_skip_callback(struct dlm_lkb *lkb, uint32_t flags, int mode,
|
||||
int status, uint32_t sbflags, int *copy_lvb);
|
||||
int dlm_get_cb(struct dlm_lkb *lkb, uint32_t flags, int mode,
|
||||
int status, uint32_t sbflags,
|
||||
struct dlm_callback **cb);
|
||||
void dlm_add_cb(struct dlm_lkb *lkb, uint32_t flags, int mode, int status,
|
||||
uint32_t sbflags);
|
||||
|
||||
|
@ -672,7 +672,7 @@ static ssize_t comm_addr_store(struct config_item *item, const char *buf,
|
||||
|
||||
memcpy(addr, buf, len);
|
||||
|
||||
rv = dlm_midcomms_addr(cm->nodeid, addr, len);
|
||||
rv = dlm_midcomms_addr(cm->nodeid, addr);
|
||||
if (rv) {
|
||||
kfree(addr);
|
||||
return rv;
|
||||
|
@ -380,7 +380,7 @@ static const struct seq_operations format4_seq_ops;
|
||||
|
||||
static int table_seq_show(struct seq_file *seq, void *iter_ptr)
|
||||
{
|
||||
struct dlm_rsb *rsb = list_entry(iter_ptr, struct dlm_rsb, res_rsbs_list);
|
||||
struct dlm_rsb *rsb = list_entry(iter_ptr, struct dlm_rsb, res_slow_list);
|
||||
|
||||
if (seq->op == &format1_seq_ops)
|
||||
print_format1(rsb, seq);
|
||||
@ -409,9 +409,9 @@ static void *table_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
}
|
||||
|
||||
if (seq->op == &format4_seq_ops)
|
||||
list = &ls->ls_toss;
|
||||
list = &ls->ls_slow_inactive;
|
||||
else
|
||||
list = &ls->ls_keep;
|
||||
list = &ls->ls_slow_active;
|
||||
|
||||
read_lock_bh(&ls->ls_rsbtbl_lock);
|
||||
return seq_list_start(list, *pos);
|
||||
@ -423,9 +423,9 @@ static void *table_seq_next(struct seq_file *seq, void *iter_ptr, loff_t *pos)
|
||||
struct list_head *list;
|
||||
|
||||
if (seq->op == &format4_seq_ops)
|
||||
list = &ls->ls_toss;
|
||||
list = &ls->ls_slow_inactive;
|
||||
else
|
||||
list = &ls->ls_keep;
|
||||
list = &ls->ls_slow_active;
|
||||
|
||||
return seq_list_next(iter_ptr, list, pos);
|
||||
}
|
||||
|
@ -36,7 +36,7 @@
|
||||
#include <linux/miscdevice.h>
|
||||
#include <linux/rhashtable.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/idr.h>
|
||||
#include <linux/xarray.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
@ -316,26 +316,24 @@ struct dlm_rsb {
|
||||
int res_nodeid;
|
||||
int res_master_nodeid;
|
||||
int res_dir_nodeid;
|
||||
int res_id; /* for ls_recover_idr */
|
||||
unsigned long res_id; /* for ls_recover_xa */
|
||||
uint32_t res_lvbseq;
|
||||
uint32_t res_hash;
|
||||
unsigned long res_toss_time;
|
||||
uint32_t res_first_lkid;
|
||||
struct list_head res_lookup; /* lkbs waiting on first */
|
||||
union {
|
||||
struct list_head res_hashchain;
|
||||
struct rhash_head res_node; /* rsbtbl */
|
||||
};
|
||||
struct rhash_head res_node; /* rsbtbl */
|
||||
struct list_head res_grantqueue;
|
||||
struct list_head res_convertqueue;
|
||||
struct list_head res_waitqueue;
|
||||
|
||||
struct list_head res_rsbs_list;
|
||||
struct list_head res_slow_list; /* ls_slow_* */
|
||||
struct list_head res_scan_list;
|
||||
struct list_head res_root_list; /* used for recovery */
|
||||
struct list_head res_masters_list; /* used for recovery */
|
||||
struct list_head res_recover_list; /* used for recovery */
|
||||
struct list_head res_toss_q_list;
|
||||
int res_recover_locks_count;
|
||||
struct rcu_head rcu;
|
||||
|
||||
char *res_lvbptr;
|
||||
char res_name[DLM_RESNAME_MAXLEN+1];
|
||||
@ -368,7 +366,8 @@ enum rsb_flags {
|
||||
RSB_RECOVER_CONVERT,
|
||||
RSB_RECOVER_GRANT,
|
||||
RSB_RECOVER_LVB_INVAL,
|
||||
RSB_TOSS,
|
||||
RSB_INACTIVE,
|
||||
RSB_HASHED, /* set while rsb is on ls_rsbtbl */
|
||||
};
|
||||
|
||||
static inline void rsb_set_flag(struct dlm_rsb *r, enum rsb_flags flag)
|
||||
@ -559,16 +558,8 @@ struct rcom_lock {
|
||||
char rl_lvb[];
|
||||
};
|
||||
|
||||
/*
|
||||
* The max number of resources per rsbtbl bucket that shrink will attempt
|
||||
* to remove in each iteration.
|
||||
*/
|
||||
|
||||
#define DLM_REMOVE_NAMES_MAX 8
|
||||
|
||||
struct dlm_ls {
|
||||
struct list_head ls_list; /* list of lockspaces */
|
||||
dlm_lockspace_t *ls_local_handle;
|
||||
uint32_t ls_global_id; /* global unique lockspace ID */
|
||||
uint32_t ls_generation;
|
||||
uint32_t ls_exflags;
|
||||
@ -578,26 +569,21 @@ struct dlm_ls {
|
||||
wait_queue_head_t ls_count_wait;
|
||||
int ls_create_count; /* create/release refcount */
|
||||
unsigned long ls_flags; /* LSFL_ */
|
||||
unsigned long ls_scan_time;
|
||||
struct kobject ls_kobj;
|
||||
|
||||
struct idr ls_lkbidr;
|
||||
rwlock_t ls_lkbidr_lock;
|
||||
struct xarray ls_lkbxa;
|
||||
rwlock_t ls_lkbxa_lock;
|
||||
|
||||
/* an rsb is on rsbtl for primary locking functions,
|
||||
and on a slow list for recovery/dump iteration */
|
||||
struct rhashtable ls_rsbtbl;
|
||||
rwlock_t ls_rsbtbl_lock;
|
||||
rwlock_t ls_rsbtbl_lock; /* for ls_rsbtbl and ls_slow */
|
||||
struct list_head ls_slow_inactive; /* to iterate rsbtbl */
|
||||
struct list_head ls_slow_active; /* to iterate rsbtbl */
|
||||
|
||||
struct list_head ls_toss;
|
||||
struct list_head ls_keep;
|
||||
|
||||
struct timer_list ls_timer;
|
||||
/* this queue is ordered according the
|
||||
* absolute res_toss_time jiffies time
|
||||
* to mod_timer() with the first element
|
||||
* if necessary.
|
||||
*/
|
||||
struct list_head ls_toss_q;
|
||||
spinlock_t ls_toss_q_lock;
|
||||
struct timer_list ls_scan_timer; /* based on first scan_list rsb toss_time */
|
||||
struct list_head ls_scan_list; /* rsbs ordered by res_toss_time */
|
||||
spinlock_t ls_scan_lock;
|
||||
|
||||
spinlock_t ls_waiters_lock;
|
||||
struct list_head ls_waiters; /* lkbs needing a reply */
|
||||
@ -605,10 +591,6 @@ struct dlm_ls {
|
||||
spinlock_t ls_orphans_lock;
|
||||
struct list_head ls_orphans;
|
||||
|
||||
spinlock_t ls_new_rsb_spin;
|
||||
int ls_new_rsb_count;
|
||||
struct list_head ls_new_rsb; /* new rsb structs */
|
||||
|
||||
struct list_head ls_nodes; /* current nodes in ls */
|
||||
struct list_head ls_nodes_gone; /* dead node list, recovery */
|
||||
int ls_num_nodes; /* number of nodes in ls */
|
||||
@ -664,8 +646,8 @@ struct dlm_ls {
|
||||
struct list_head ls_recover_list;
|
||||
spinlock_t ls_recover_list_lock;
|
||||
int ls_recover_list_count;
|
||||
struct idr ls_recover_idr;
|
||||
spinlock_t ls_recover_idr_lock;
|
||||
struct xarray ls_recover_xa;
|
||||
spinlock_t ls_recover_xa_lock;
|
||||
wait_queue_head_t ls_wait_general;
|
||||
wait_queue_head_t ls_recover_lock_wait;
|
||||
spinlock_t ls_clear_proc_locks;
|
||||
@ -716,6 +698,8 @@ struct dlm_ls {
|
||||
#define LSFL_CB_DELAY 9
|
||||
#define LSFL_NODIR 10
|
||||
#define LSFL_RECV_MSG_BLOCKED 11
|
||||
#define LSFL_FS 12
|
||||
#define LSFL_SOFTIRQ 13
|
||||
|
||||
#define DLM_PROC_FLAGS_CLOSING 1
|
||||
#define DLM_PROC_FLAGS_COMPAT 2
|
||||
|
568
fs/dlm/lock.c
568
fs/dlm/lock.c
File diff suppressed because it is too large
Load Diff
@ -11,7 +11,6 @@
|
||||
#ifndef __LOCK_DOT_H__
|
||||
#define __LOCK_DOT_H__
|
||||
|
||||
void dlm_rsb_toss_timer(struct timer_list *timer);
|
||||
void dlm_dump_rsb(struct dlm_rsb *r);
|
||||
void dlm_dump_rsb_name(struct dlm_ls *ls, const char *name, int len);
|
||||
void dlm_print_lkb(struct dlm_lkb *lkb);
|
||||
@ -19,15 +18,15 @@ void dlm_receive_message_saved(struct dlm_ls *ls, const struct dlm_message *ms,
|
||||
uint32_t saved_seq);
|
||||
void dlm_receive_buffer(const union dlm_packet *p, int nodeid);
|
||||
int dlm_modes_compat(int mode1, int mode2);
|
||||
void free_toss_rsb(struct dlm_rsb *r);
|
||||
void free_inactive_rsb(struct dlm_rsb *r);
|
||||
void dlm_put_rsb(struct dlm_rsb *r);
|
||||
void dlm_hold_rsb(struct dlm_rsb *r);
|
||||
int dlm_put_lkb(struct dlm_lkb *lkb);
|
||||
void dlm_scan_rsbs(struct dlm_ls *ls);
|
||||
int dlm_lock_recovery_try(struct dlm_ls *ls);
|
||||
void dlm_lock_recovery(struct dlm_ls *ls);
|
||||
void dlm_unlock_recovery(struct dlm_ls *ls);
|
||||
void dlm_timer_resume(struct dlm_ls *ls);
|
||||
void dlm_rsb_scan(struct timer_list *timer);
|
||||
void resume_scan_timer(struct dlm_ls *ls);
|
||||
|
||||
int dlm_master_lookup(struct dlm_ls *ls, int from_nodeid, const char *name,
|
||||
int len, unsigned int flags, int *r_nodeid, int *result);
|
||||
|
@ -38,7 +38,7 @@ static ssize_t dlm_control_store(struct dlm_ls *ls, const char *buf, size_t len)
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
ls = dlm_find_lockspace_local(ls->ls_local_handle);
|
||||
ls = dlm_find_lockspace_local(ls);
|
||||
if (!ls)
|
||||
return -EINVAL;
|
||||
|
||||
@ -265,18 +265,9 @@ struct dlm_ls *dlm_find_lockspace_global(uint32_t id)
|
||||
|
||||
struct dlm_ls *dlm_find_lockspace_local(dlm_lockspace_t *lockspace)
|
||||
{
|
||||
struct dlm_ls *ls;
|
||||
struct dlm_ls *ls = lockspace;
|
||||
|
||||
spin_lock_bh(&lslist_lock);
|
||||
list_for_each_entry(ls, &lslist, ls_list) {
|
||||
if (ls->ls_local_handle == lockspace) {
|
||||
atomic_inc(&ls->ls_count);
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
ls = NULL;
|
||||
out:
|
||||
spin_unlock_bh(&lslist_lock);
|
||||
atomic_inc(&ls->ls_count);
|
||||
return ls;
|
||||
}
|
||||
|
||||
@ -410,37 +401,37 @@ static int new_lockspace(const char *name, const char *cluster,
|
||||
atomic_set(&ls->ls_count, 0);
|
||||
init_waitqueue_head(&ls->ls_count_wait);
|
||||
ls->ls_flags = 0;
|
||||
ls->ls_scan_time = jiffies;
|
||||
|
||||
if (ops && dlm_config.ci_recover_callbacks) {
|
||||
ls->ls_ops = ops;
|
||||
ls->ls_ops_arg = ops_arg;
|
||||
}
|
||||
|
||||
if (flags & DLM_LSFL_SOFTIRQ)
|
||||
set_bit(LSFL_SOFTIRQ, &ls->ls_flags);
|
||||
|
||||
/* ls_exflags are forced to match among nodes, and we don't
|
||||
* need to require all nodes to have some flags set
|
||||
*/
|
||||
ls->ls_exflags = (flags & ~(DLM_LSFL_FS | DLM_LSFL_NEWEXCL));
|
||||
ls->ls_exflags = (flags & ~(DLM_LSFL_FS | DLM_LSFL_NEWEXCL |
|
||||
DLM_LSFL_SOFTIRQ));
|
||||
|
||||
INIT_LIST_HEAD(&ls->ls_toss);
|
||||
INIT_LIST_HEAD(&ls->ls_keep);
|
||||
INIT_LIST_HEAD(&ls->ls_slow_inactive);
|
||||
INIT_LIST_HEAD(&ls->ls_slow_active);
|
||||
rwlock_init(&ls->ls_rsbtbl_lock);
|
||||
|
||||
error = rhashtable_init(&ls->ls_rsbtbl, &dlm_rhash_rsb_params);
|
||||
if (error)
|
||||
goto out_lsfree;
|
||||
|
||||
idr_init(&ls->ls_lkbidr);
|
||||
rwlock_init(&ls->ls_lkbidr_lock);
|
||||
xa_init_flags(&ls->ls_lkbxa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_BH);
|
||||
rwlock_init(&ls->ls_lkbxa_lock);
|
||||
|
||||
INIT_LIST_HEAD(&ls->ls_waiters);
|
||||
spin_lock_init(&ls->ls_waiters_lock);
|
||||
INIT_LIST_HEAD(&ls->ls_orphans);
|
||||
spin_lock_init(&ls->ls_orphans_lock);
|
||||
|
||||
INIT_LIST_HEAD(&ls->ls_new_rsb);
|
||||
spin_lock_init(&ls->ls_new_rsb_spin);
|
||||
|
||||
INIT_LIST_HEAD(&ls->ls_nodes);
|
||||
INIT_LIST_HEAD(&ls->ls_nodes_gone);
|
||||
ls->ls_num_nodes = 0;
|
||||
@ -484,7 +475,7 @@ static int new_lockspace(const char *name, const char *cluster,
|
||||
ls->ls_recover_buf = kmalloc(DLM_MAX_SOCKET_BUFSIZE, GFP_NOFS);
|
||||
if (!ls->ls_recover_buf) {
|
||||
error = -ENOMEM;
|
||||
goto out_lkbidr;
|
||||
goto out_lkbxa;
|
||||
}
|
||||
|
||||
ls->ls_slot = 0;
|
||||
@ -494,32 +485,31 @@ static int new_lockspace(const char *name, const char *cluster,
|
||||
|
||||
INIT_LIST_HEAD(&ls->ls_recover_list);
|
||||
spin_lock_init(&ls->ls_recover_list_lock);
|
||||
idr_init(&ls->ls_recover_idr);
|
||||
spin_lock_init(&ls->ls_recover_idr_lock);
|
||||
xa_init_flags(&ls->ls_recover_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_BH);
|
||||
spin_lock_init(&ls->ls_recover_xa_lock);
|
||||
ls->ls_recover_list_count = 0;
|
||||
ls->ls_local_handle = ls;
|
||||
init_waitqueue_head(&ls->ls_wait_general);
|
||||
INIT_LIST_HEAD(&ls->ls_masters_list);
|
||||
rwlock_init(&ls->ls_masters_lock);
|
||||
INIT_LIST_HEAD(&ls->ls_dir_dump_list);
|
||||
rwlock_init(&ls->ls_dir_dump_lock);
|
||||
|
||||
INIT_LIST_HEAD(&ls->ls_toss_q);
|
||||
spin_lock_init(&ls->ls_toss_q_lock);
|
||||
timer_setup(&ls->ls_timer, dlm_rsb_toss_timer,
|
||||
TIMER_DEFERRABLE);
|
||||
INIT_LIST_HEAD(&ls->ls_scan_list);
|
||||
spin_lock_init(&ls->ls_scan_lock);
|
||||
timer_setup(&ls->ls_scan_timer, dlm_rsb_scan, TIMER_DEFERRABLE);
|
||||
|
||||
spin_lock_bh(&lslist_lock);
|
||||
ls->ls_create_count = 1;
|
||||
list_add(&ls->ls_list, &lslist);
|
||||
spin_unlock_bh(&lslist_lock);
|
||||
|
||||
if (flags & DLM_LSFL_FS) {
|
||||
error = dlm_callback_start(ls);
|
||||
if (error) {
|
||||
log_error(ls, "can't start dlm_callback %d", error);
|
||||
goto out_delist;
|
||||
}
|
||||
if (flags & DLM_LSFL_FS)
|
||||
set_bit(LSFL_FS, &ls->ls_flags);
|
||||
|
||||
error = dlm_callback_start(ls);
|
||||
if (error) {
|
||||
log_error(ls, "can't start dlm_callback %d", error);
|
||||
goto out_delist;
|
||||
}
|
||||
|
||||
init_waitqueue_head(&ls->ls_recover_lock_wait);
|
||||
@ -584,10 +574,10 @@ static int new_lockspace(const char *name, const char *cluster,
|
||||
spin_lock_bh(&lslist_lock);
|
||||
list_del(&ls->ls_list);
|
||||
spin_unlock_bh(&lslist_lock);
|
||||
idr_destroy(&ls->ls_recover_idr);
|
||||
xa_destroy(&ls->ls_recover_xa);
|
||||
kfree(ls->ls_recover_buf);
|
||||
out_lkbidr:
|
||||
idr_destroy(&ls->ls_lkbidr);
|
||||
out_lkbxa:
|
||||
xa_destroy(&ls->ls_lkbxa);
|
||||
rhashtable_destroy(&ls->ls_rsbtbl);
|
||||
out_lsfree:
|
||||
if (do_unreg)
|
||||
@ -643,26 +633,15 @@ int dlm_new_user_lockspace(const char *name, const char *cluster,
|
||||
void *ops_arg, int *ops_result,
|
||||
dlm_lockspace_t **lockspace)
|
||||
{
|
||||
if (flags & DLM_LSFL_SOFTIRQ)
|
||||
return -EINVAL;
|
||||
|
||||
return __dlm_new_lockspace(name, cluster, flags, lvblen, ops,
|
||||
ops_arg, ops_result, lockspace);
|
||||
}
|
||||
|
||||
static int lkb_idr_is_local(int id, void *p, void *data)
|
||||
static int lkb_idr_free(struct dlm_lkb *lkb)
|
||||
{
|
||||
struct dlm_lkb *lkb = p;
|
||||
|
||||
return lkb->lkb_nodeid == 0 && lkb->lkb_grmode != DLM_LOCK_IV;
|
||||
}
|
||||
|
||||
static int lkb_idr_is_any(int id, void *p, void *data)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int lkb_idr_free(int id, void *p, void *data)
|
||||
{
|
||||
struct dlm_lkb *lkb = p;
|
||||
|
||||
if (lkb->lkb_lvbptr && test_bit(DLM_IFL_MSTCPY_BIT, &lkb->lkb_iflags))
|
||||
dlm_free_lvb(lkb->lkb_lvbptr);
|
||||
|
||||
@ -670,23 +649,34 @@ static int lkb_idr_free(int id, void *p, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* NOTE: We check the lkbidr here rather than the resource table.
|
||||
/* NOTE: We check the lkbxa here rather than the resource table.
|
||||
This is because there may be LKBs queued as ASTs that have been unlinked
|
||||
from their RSBs and are pending deletion once the AST has been delivered */
|
||||
|
||||
static int lockspace_busy(struct dlm_ls *ls, int force)
|
||||
{
|
||||
int rv;
|
||||
struct dlm_lkb *lkb;
|
||||
unsigned long id;
|
||||
int rv = 0;
|
||||
|
||||
read_lock_bh(&ls->ls_lkbidr_lock);
|
||||
read_lock_bh(&ls->ls_lkbxa_lock);
|
||||
if (force == 0) {
|
||||
rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_any, ls);
|
||||
xa_for_each(&ls->ls_lkbxa, id, lkb) {
|
||||
rv = 1;
|
||||
break;
|
||||
}
|
||||
} else if (force == 1) {
|
||||
rv = idr_for_each(&ls->ls_lkbidr, lkb_idr_is_local, ls);
|
||||
xa_for_each(&ls->ls_lkbxa, id, lkb) {
|
||||
if (lkb->lkb_nodeid == 0 &&
|
||||
lkb->lkb_grmode != DLM_LOCK_IV) {
|
||||
rv = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
rv = 0;
|
||||
}
|
||||
read_unlock_bh(&ls->ls_lkbidr_lock);
|
||||
read_unlock_bh(&ls->ls_lkbxa_lock);
|
||||
return rv;
|
||||
}
|
||||
|
||||
@ -699,7 +689,8 @@ static void rhash_free_rsb(void *ptr, void *arg)
|
||||
|
||||
static int release_lockspace(struct dlm_ls *ls, int force)
|
||||
{
|
||||
struct dlm_rsb *rsb;
|
||||
struct dlm_lkb *lkb;
|
||||
unsigned long id;
|
||||
int busy, rv;
|
||||
|
||||
busy = lockspace_busy(ls, force);
|
||||
@ -739,7 +730,7 @@ static int release_lockspace(struct dlm_ls *ls, int force)
|
||||
* time_shutdown_sync(), we don't care anymore
|
||||
*/
|
||||
clear_bit(LSFL_RUNNING, &ls->ls_flags);
|
||||
timer_shutdown_sync(&ls->ls_timer);
|
||||
timer_shutdown_sync(&ls->ls_scan_timer);
|
||||
|
||||
if (ls_count == 1) {
|
||||
dlm_clear_members(ls);
|
||||
@ -752,28 +743,22 @@ static int release_lockspace(struct dlm_ls *ls, int force)
|
||||
|
||||
dlm_delete_debug_file(ls);
|
||||
|
||||
idr_destroy(&ls->ls_recover_idr);
|
||||
xa_destroy(&ls->ls_recover_xa);
|
||||
kfree(ls->ls_recover_buf);
|
||||
|
||||
/*
|
||||
* Free all lkb's in idr
|
||||
* Free all lkb's in xa
|
||||
*/
|
||||
|
||||
idr_for_each(&ls->ls_lkbidr, lkb_idr_free, ls);
|
||||
idr_destroy(&ls->ls_lkbidr);
|
||||
xa_for_each(&ls->ls_lkbxa, id, lkb) {
|
||||
lkb_idr_free(lkb);
|
||||
}
|
||||
xa_destroy(&ls->ls_lkbxa);
|
||||
|
||||
/*
|
||||
* Free all rsb's on rsbtbl
|
||||
*/
|
||||
rhashtable_free_and_destroy(&ls->ls_rsbtbl, rhash_free_rsb, NULL);
|
||||
|
||||
while (!list_empty(&ls->ls_new_rsb)) {
|
||||
rsb = list_first_entry(&ls->ls_new_rsb, struct dlm_rsb,
|
||||
res_hashchain);
|
||||
list_del(&rsb->res_hashchain);
|
||||
dlm_free_rsb(rsb);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free structures on any other lists
|
||||
*/
|
||||
|
@ -461,7 +461,7 @@ static bool dlm_lowcomms_con_has_addr(const struct connection *con,
|
||||
return false;
|
||||
}
|
||||
|
||||
int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
|
||||
int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr)
|
||||
{
|
||||
struct connection *con;
|
||||
bool ret, idx;
|
||||
@ -858,12 +858,6 @@ static void free_processqueue_entry(struct processqueue_entry *pentry)
|
||||
kfree(pentry);
|
||||
}
|
||||
|
||||
struct dlm_processed_nodes {
|
||||
int nodeid;
|
||||
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
static void process_dlm_messages(struct work_struct *work)
|
||||
{
|
||||
struct processqueue_entry *pentry;
|
||||
|
@ -46,7 +46,7 @@ void dlm_lowcomms_put_msg(struct dlm_msg *msg);
|
||||
int dlm_lowcomms_resend_msg(struct dlm_msg *msg);
|
||||
int dlm_lowcomms_connect_node(int nodeid);
|
||||
int dlm_lowcomms_nodes_set_mark(int nodeid, unsigned int mark);
|
||||
int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len);
|
||||
int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr);
|
||||
void dlm_midcomms_receive_done(int nodeid);
|
||||
struct kmem_cache *dlm_lowcomms_writequeue_cache_create(void);
|
||||
struct kmem_cache *dlm_lowcomms_msg_cache_create(void);
|
||||
|
@ -642,7 +642,7 @@ int dlm_ls_stop(struct dlm_ls *ls)
|
||||
set_bit(LSFL_RECOVER_STOP, &ls->ls_flags);
|
||||
new = test_and_clear_bit(LSFL_RUNNING, &ls->ls_flags);
|
||||
if (new)
|
||||
timer_delete_sync(&ls->ls_timer);
|
||||
timer_delete_sync(&ls->ls_scan_timer);
|
||||
ls->ls_recover_seq++;
|
||||
|
||||
/* activate requestqueue and stop processing */
|
||||
|
@ -72,6 +72,8 @@ out:
|
||||
|
||||
void dlm_memory_exit(void)
|
||||
{
|
||||
rcu_barrier();
|
||||
|
||||
kmem_cache_destroy(writequeue_cache);
|
||||
kmem_cache_destroy(mhandle_cache);
|
||||
kmem_cache_destroy(msg_cache);
|
||||
@ -101,13 +103,19 @@ struct dlm_rsb *dlm_allocate_rsb(struct dlm_ls *ls)
|
||||
return r;
|
||||
}
|
||||
|
||||
void dlm_free_rsb(struct dlm_rsb *r)
|
||||
static void __free_rsb_rcu(struct rcu_head *rcu)
|
||||
{
|
||||
struct dlm_rsb *r = container_of(rcu, struct dlm_rsb, rcu);
|
||||
if (r->res_lvbptr)
|
||||
dlm_free_lvb(r->res_lvbptr);
|
||||
kmem_cache_free(rsb_cache, r);
|
||||
}
|
||||
|
||||
void dlm_free_rsb(struct dlm_rsb *r)
|
||||
{
|
||||
call_rcu(&r->rcu, __free_rsb_rcu);
|
||||
}
|
||||
|
||||
struct dlm_lkb *dlm_allocate_lkb(struct dlm_ls *ls)
|
||||
{
|
||||
struct dlm_lkb *lkb;
|
||||
|
@ -334,12 +334,12 @@ static struct midcomms_node *nodeid2node(int nodeid)
|
||||
return __find_node(nodeid, nodeid_hash(nodeid));
|
||||
}
|
||||
|
||||
int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
|
||||
int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr)
|
||||
{
|
||||
int ret, idx, r = nodeid_hash(nodeid);
|
||||
struct midcomms_node *node;
|
||||
|
||||
ret = dlm_lowcomms_addr(nodeid, addr, len);
|
||||
ret = dlm_lowcomms_addr(nodeid, addr);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -19,7 +19,7 @@ int dlm_process_incoming_buffer(int nodeid, unsigned char *buf, int buflen);
|
||||
struct dlm_mhandle *dlm_midcomms_get_mhandle(int nodeid, int len, char **ppc);
|
||||
void dlm_midcomms_commit_mhandle(struct dlm_mhandle *mh, const void *name,
|
||||
int namelen);
|
||||
int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr, int len);
|
||||
int dlm_midcomms_addr(int nodeid, struct sockaddr_storage *addr);
|
||||
void dlm_midcomms_version_wait(void);
|
||||
int dlm_midcomms_close(int nodeid);
|
||||
int dlm_midcomms_start(void);
|
||||
|
@ -293,73 +293,78 @@ static void recover_list_clear(struct dlm_ls *ls)
|
||||
spin_unlock_bh(&ls->ls_recover_list_lock);
|
||||
}
|
||||
|
||||
static int recover_idr_empty(struct dlm_ls *ls)
|
||||
static int recover_xa_empty(struct dlm_ls *ls)
|
||||
{
|
||||
int empty = 1;
|
||||
|
||||
spin_lock_bh(&ls->ls_recover_idr_lock);
|
||||
spin_lock_bh(&ls->ls_recover_xa_lock);
|
||||
if (ls->ls_recover_list_count)
|
||||
empty = 0;
|
||||
spin_unlock_bh(&ls->ls_recover_idr_lock);
|
||||
spin_unlock_bh(&ls->ls_recover_xa_lock);
|
||||
|
||||
return empty;
|
||||
}
|
||||
|
||||
static int recover_idr_add(struct dlm_rsb *r)
|
||||
static int recover_xa_add(struct dlm_rsb *r)
|
||||
{
|
||||
struct dlm_ls *ls = r->res_ls;
|
||||
struct xa_limit limit = {
|
||||
.min = 1,
|
||||
.max = UINT_MAX,
|
||||
};
|
||||
uint32_t id;
|
||||
int rv;
|
||||
|
||||
spin_lock_bh(&ls->ls_recover_idr_lock);
|
||||
spin_lock_bh(&ls->ls_recover_xa_lock);
|
||||
if (r->res_id) {
|
||||
rv = -1;
|
||||
goto out_unlock;
|
||||
}
|
||||
rv = idr_alloc(&ls->ls_recover_idr, r, 1, 0, GFP_NOWAIT);
|
||||
rv = xa_alloc(&ls->ls_recover_xa, &id, r, limit, GFP_ATOMIC);
|
||||
if (rv < 0)
|
||||
goto out_unlock;
|
||||
|
||||
r->res_id = rv;
|
||||
r->res_id = id;
|
||||
ls->ls_recover_list_count++;
|
||||
dlm_hold_rsb(r);
|
||||
rv = 0;
|
||||
out_unlock:
|
||||
spin_unlock_bh(&ls->ls_recover_idr_lock);
|
||||
spin_unlock_bh(&ls->ls_recover_xa_lock);
|
||||
return rv;
|
||||
}
|
||||
|
||||
static void recover_idr_del(struct dlm_rsb *r)
|
||||
static void recover_xa_del(struct dlm_rsb *r)
|
||||
{
|
||||
struct dlm_ls *ls = r->res_ls;
|
||||
|
||||
spin_lock_bh(&ls->ls_recover_idr_lock);
|
||||
idr_remove(&ls->ls_recover_idr, r->res_id);
|
||||
spin_lock_bh(&ls->ls_recover_xa_lock);
|
||||
xa_erase_bh(&ls->ls_recover_xa, r->res_id);
|
||||
r->res_id = 0;
|
||||
ls->ls_recover_list_count--;
|
||||
spin_unlock_bh(&ls->ls_recover_idr_lock);
|
||||
spin_unlock_bh(&ls->ls_recover_xa_lock);
|
||||
|
||||
dlm_put_rsb(r);
|
||||
}
|
||||
|
||||
static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id)
|
||||
static struct dlm_rsb *recover_xa_find(struct dlm_ls *ls, uint64_t id)
|
||||
{
|
||||
struct dlm_rsb *r;
|
||||
|
||||
spin_lock_bh(&ls->ls_recover_idr_lock);
|
||||
r = idr_find(&ls->ls_recover_idr, (int)id);
|
||||
spin_unlock_bh(&ls->ls_recover_idr_lock);
|
||||
spin_lock_bh(&ls->ls_recover_xa_lock);
|
||||
r = xa_load(&ls->ls_recover_xa, (int)id);
|
||||
spin_unlock_bh(&ls->ls_recover_xa_lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
static void recover_idr_clear(struct dlm_ls *ls)
|
||||
static void recover_xa_clear(struct dlm_ls *ls)
|
||||
{
|
||||
struct dlm_rsb *r;
|
||||
int id;
|
||||
unsigned long id;
|
||||
|
||||
spin_lock_bh(&ls->ls_recover_idr_lock);
|
||||
spin_lock_bh(&ls->ls_recover_xa_lock);
|
||||
|
||||
idr_for_each_entry(&ls->ls_recover_idr, r, id) {
|
||||
idr_remove(&ls->ls_recover_idr, id);
|
||||
xa_for_each(&ls->ls_recover_xa, id, r) {
|
||||
xa_erase_bh(&ls->ls_recover_xa, id);
|
||||
r->res_id = 0;
|
||||
r->res_recover_locks_count = 0;
|
||||
ls->ls_recover_list_count--;
|
||||
@ -372,7 +377,7 @@ static void recover_idr_clear(struct dlm_ls *ls)
|
||||
ls->ls_recover_list_count);
|
||||
ls->ls_recover_list_count = 0;
|
||||
}
|
||||
spin_unlock_bh(&ls->ls_recover_idr_lock);
|
||||
spin_unlock_bh(&ls->ls_recover_xa_lock);
|
||||
}
|
||||
|
||||
|
||||
@ -470,7 +475,7 @@ static int recover_master(struct dlm_rsb *r, unsigned int *count, uint64_t seq)
|
||||
set_new_master(r);
|
||||
error = 0;
|
||||
} else {
|
||||
recover_idr_add(r);
|
||||
recover_xa_add(r);
|
||||
error = dlm_send_rcom_lookup(r, dir_nodeid, seq);
|
||||
}
|
||||
|
||||
@ -551,10 +556,10 @@ int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq,
|
||||
|
||||
log_rinfo(ls, "dlm_recover_masters %u of %u", count, total);
|
||||
|
||||
error = dlm_wait_function(ls, &recover_idr_empty);
|
||||
error = dlm_wait_function(ls, &recover_xa_empty);
|
||||
out:
|
||||
if (error)
|
||||
recover_idr_clear(ls);
|
||||
recover_xa_clear(ls);
|
||||
return error;
|
||||
}
|
||||
|
||||
@ -563,7 +568,7 @@ int dlm_recover_master_reply(struct dlm_ls *ls, const struct dlm_rcom *rc)
|
||||
struct dlm_rsb *r;
|
||||
int ret_nodeid, new_master;
|
||||
|
||||
r = recover_idr_find(ls, le64_to_cpu(rc->rc_id));
|
||||
r = recover_xa_find(ls, le64_to_cpu(rc->rc_id));
|
||||
if (!r) {
|
||||
log_error(ls, "dlm_recover_master_reply no id %llx",
|
||||
(unsigned long long)le64_to_cpu(rc->rc_id));
|
||||
@ -582,9 +587,9 @@ int dlm_recover_master_reply(struct dlm_ls *ls, const struct dlm_rcom *rc)
|
||||
r->res_nodeid = new_master;
|
||||
set_new_master(r);
|
||||
unlock_rsb(r);
|
||||
recover_idr_del(r);
|
||||
recover_xa_del(r);
|
||||
|
||||
if (recover_idr_empty(ls))
|
||||
if (recover_xa_empty(ls))
|
||||
wake_up(&ls->ls_wait_general);
|
||||
out:
|
||||
return 0;
|
||||
@ -877,29 +882,26 @@ void dlm_recover_rsbs(struct dlm_ls *ls, const struct list_head *root_list)
|
||||
log_rinfo(ls, "dlm_recover_rsbs %d done", count);
|
||||
}
|
||||
|
||||
/* Create a single list of all root rsb's to be used during recovery */
|
||||
|
||||
void dlm_clear_toss(struct dlm_ls *ls)
|
||||
void dlm_clear_inactive(struct dlm_ls *ls)
|
||||
{
|
||||
struct dlm_rsb *r, *safe;
|
||||
unsigned int count = 0;
|
||||
|
||||
write_lock_bh(&ls->ls_rsbtbl_lock);
|
||||
list_for_each_entry_safe(r, safe, &ls->ls_toss, res_rsbs_list) {
|
||||
list_del(&r->res_rsbs_list);
|
||||
list_for_each_entry_safe(r, safe, &ls->ls_slow_inactive, res_slow_list) {
|
||||
list_del(&r->res_slow_list);
|
||||
rhashtable_remove_fast(&ls->ls_rsbtbl, &r->res_node,
|
||||
dlm_rhash_rsb_params);
|
||||
|
||||
/* remove it from the toss queue if its part of it */
|
||||
if (!list_empty(&r->res_toss_q_list))
|
||||
list_del_init(&r->res_toss_q_list);
|
||||
if (!list_empty(&r->res_scan_list))
|
||||
list_del_init(&r->res_scan_list);
|
||||
|
||||
free_toss_rsb(r);
|
||||
free_inactive_rsb(r);
|
||||
count++;
|
||||
}
|
||||
write_unlock_bh(&ls->ls_rsbtbl_lock);
|
||||
|
||||
if (count)
|
||||
log_rinfo(ls, "dlm_clear_toss %u done", count);
|
||||
log_rinfo(ls, "dlm_clear_inactive %u done", count);
|
||||
}
|
||||
|
||||
|
@ -25,7 +25,7 @@ int dlm_recover_master_reply(struct dlm_ls *ls, const struct dlm_rcom *rc);
|
||||
int dlm_recover_locks(struct dlm_ls *ls, uint64_t seq,
|
||||
const struct list_head *root_list);
|
||||
void dlm_recovered_lock(struct dlm_rsb *r);
|
||||
void dlm_clear_toss(struct dlm_ls *ls);
|
||||
void dlm_clear_inactive(struct dlm_ls *ls);
|
||||
void dlm_recover_rsbs(struct dlm_ls *ls, const struct list_head *root_list);
|
||||
|
||||
#endif /* __RECOVER_DOT_H__ */
|
||||
|
@ -33,7 +33,7 @@ static int dlm_create_masters_list(struct dlm_ls *ls)
|
||||
}
|
||||
|
||||
read_lock_bh(&ls->ls_rsbtbl_lock);
|
||||
list_for_each_entry(r, &ls->ls_keep, res_rsbs_list) {
|
||||
list_for_each_entry(r, &ls->ls_slow_active, res_slow_list) {
|
||||
if (r->res_nodeid)
|
||||
continue;
|
||||
|
||||
@ -63,12 +63,12 @@ static void dlm_create_root_list(struct dlm_ls *ls, struct list_head *root_list)
|
||||
struct dlm_rsb *r;
|
||||
|
||||
read_lock_bh(&ls->ls_rsbtbl_lock);
|
||||
list_for_each_entry(r, &ls->ls_keep, res_rsbs_list) {
|
||||
list_for_each_entry(r, &ls->ls_slow_active, res_slow_list) {
|
||||
list_add(&r->res_root_list, root_list);
|
||||
dlm_hold_rsb(r);
|
||||
}
|
||||
|
||||
WARN_ON_ONCE(!list_empty(&ls->ls_toss));
|
||||
WARN_ON_ONCE(!list_empty(&ls->ls_slow_inactive));
|
||||
read_unlock_bh(&ls->ls_rsbtbl_lock);
|
||||
}
|
||||
|
||||
@ -98,16 +98,16 @@ static int enable_locking(struct dlm_ls *ls, uint64_t seq)
|
||||
spin_lock_bh(&ls->ls_recover_lock);
|
||||
if (ls->ls_recover_seq == seq) {
|
||||
set_bit(LSFL_RUNNING, &ls->ls_flags);
|
||||
/* Schedule next timer if recovery put something on toss.
|
||||
/* Schedule next timer if recovery put something on inactive.
|
||||
*
|
||||
* The rsbs that was queued while recovery on toss hasn't
|
||||
* started yet because LSFL_RUNNING was set everything
|
||||
* else recovery hasn't started as well because ls_in_recovery
|
||||
* is still hold. So we should not run into the case that
|
||||
* dlm_timer_resume() queues a timer that can occur in
|
||||
* resume_scan_timer() queues a timer that can occur in
|
||||
* a no op.
|
||||
*/
|
||||
dlm_timer_resume(ls);
|
||||
resume_scan_timer(ls);
|
||||
/* unblocks processes waiting to enter the dlm */
|
||||
up_write(&ls->ls_in_recovery);
|
||||
clear_bit(LSFL_RECOVER_LOCK, &ls->ls_flags);
|
||||
@ -131,7 +131,7 @@ static int ls_recover(struct dlm_ls *ls, struct dlm_recover *rv)
|
||||
|
||||
dlm_callback_suspend(ls);
|
||||
|
||||
dlm_clear_toss(ls);
|
||||
dlm_clear_inactive(ls);
|
||||
|
||||
/*
|
||||
* This list of root rsb's will be the basis of most of the recovery
|
||||
|
@ -182,7 +182,7 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
|
||||
struct dlm_user_args *ua;
|
||||
struct dlm_user_proc *proc;
|
||||
struct dlm_callback *cb;
|
||||
int rv;
|
||||
int rv, copy_lvb;
|
||||
|
||||
if (test_bit(DLM_DFL_ORPHAN_BIT, &lkb->lkb_dflags) ||
|
||||
test_bit(DLM_IFL_DEAD_BIT, &lkb->lkb_iflags))
|
||||
@ -213,28 +213,22 @@ void dlm_user_add_ast(struct dlm_lkb *lkb, uint32_t flags, int mode,
|
||||
|
||||
spin_lock_bh(&proc->asts_spin);
|
||||
|
||||
rv = dlm_queue_lkb_callback(lkb, flags, mode, status, sbflags, &cb);
|
||||
switch (rv) {
|
||||
case DLM_ENQUEUE_CALLBACK_NEED_SCHED:
|
||||
cb->ua = *ua;
|
||||
cb->lkb_lksb = &cb->ua.lksb;
|
||||
if (cb->copy_lvb) {
|
||||
memcpy(cb->lvbptr, ua->lksb.sb_lvbptr,
|
||||
DLM_USER_LVB_LEN);
|
||||
cb->lkb_lksb->sb_lvbptr = cb->lvbptr;
|
||||
}
|
||||
if (!dlm_may_skip_callback(lkb, flags, mode, status, sbflags,
|
||||
©_lvb)) {
|
||||
rv = dlm_get_cb(lkb, flags, mode, status, sbflags, &cb);
|
||||
if (!rv) {
|
||||
cb->copy_lvb = copy_lvb;
|
||||
cb->ua = *ua;
|
||||
cb->lkb_lksb = &cb->ua.lksb;
|
||||
if (copy_lvb) {
|
||||
memcpy(cb->lvbptr, ua->lksb.sb_lvbptr,
|
||||
DLM_USER_LVB_LEN);
|
||||
cb->lkb_lksb->sb_lvbptr = cb->lvbptr;
|
||||
}
|
||||
|
||||
list_add_tail(&cb->list, &proc->asts);
|
||||
wake_up_interruptible(&proc->wait);
|
||||
break;
|
||||
case DLM_ENQUEUE_CALLBACK_SUCCESS:
|
||||
break;
|
||||
case DLM_ENQUEUE_CALLBACK_FAILURE:
|
||||
fallthrough;
|
||||
default:
|
||||
spin_unlock_bh(&proc->asts_spin);
|
||||
WARN_ON_ONCE(1);
|
||||
goto out;
|
||||
list_add_tail(&cb->list, &proc->asts);
|
||||
wake_up_interruptible(&proc->wait);
|
||||
}
|
||||
}
|
||||
spin_unlock_bh(&proc->asts_spin);
|
||||
|
||||
@ -454,7 +448,7 @@ static int device_remove_lockspace(struct dlm_lspace_params *params)
|
||||
if (params->flags & DLM_USER_LSFLG_FORCEFREE)
|
||||
force = 2;
|
||||
|
||||
lockspace = ls->ls_local_handle;
|
||||
lockspace = ls;
|
||||
dlm_put_lockspace(ls);
|
||||
|
||||
/* The final dlm_release_lockspace waits for references to go to
|
||||
@ -657,7 +651,7 @@ static int device_open(struct inode *inode, struct file *file)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
proc->lockspace = ls->ls_local_handle;
|
||||
proc->lockspace = ls;
|
||||
INIT_LIST_HEAD(&proc->asts);
|
||||
INIT_LIST_HEAD(&proc->locks);
|
||||
INIT_LIST_HEAD(&proc->unlocking);
|
||||
|
@ -35,6 +35,9 @@ struct dlm_lockspace_ops {
|
||||
int num_slots, int our_slot, uint32_t generation);
|
||||
};
|
||||
|
||||
/* only relevant for kernel lockspaces, will be removed in future */
|
||||
#define DLM_LSFL_SOFTIRQ __DLM_LSFL_RESERVED0
|
||||
|
||||
/*
|
||||
* dlm_new_lockspace
|
||||
*
|
||||
@ -55,6 +58,11 @@ struct dlm_lockspace_ops {
|
||||
* used to select the directory node. Must be the same on all nodes.
|
||||
* DLM_LSFL_NEWEXCL
|
||||
* dlm_new_lockspace() should return -EEXIST if the lockspace exists.
|
||||
* DLM_LSFL_SOFTIRQ
|
||||
* dlm request callbacks (ast, bast) are softirq safe. Flag should be
|
||||
* preferred by users. Will be default in some future. If set the
|
||||
* strongest context for ast, bast callback is softirq as it avoids
|
||||
* an additional context switch.
|
||||
*
|
||||
* lvblen: length of lvb in bytes. Must be multiple of 8.
|
||||
* dlm_new_lockspace() returns an error if this does not match
|
||||
@ -121,7 +129,14 @@ int dlm_release_lockspace(dlm_lockspace_t *lockspace, int force);
|
||||
* call.
|
||||
*
|
||||
* AST routines should not block (at least not for long), but may make
|
||||
* any locking calls they please.
|
||||
* any locking calls they please. If DLM_LSFL_SOFTIRQ for kernel
|
||||
* users of dlm_new_lockspace() is passed the ast and bast callbacks
|
||||
* can be processed in softirq context. Also some of the callback
|
||||
* contexts are in the same context as the DLM lock request API, users
|
||||
* must not hold locks while calling dlm lock request API and trying
|
||||
* to acquire this lock in the callback again, this will end in a
|
||||
* lock recursion. For newer implementation the DLM_LSFL_SOFTIRQ
|
||||
* should be used.
|
||||
*/
|
||||
|
||||
int dlm_lock(dlm_lockspace_t *lockspace,
|
||||
|
@ -71,6 +71,8 @@ struct dlm_lksb {
|
||||
/* DLM_LSFL_TIMEWARN is deprecated and reserved. DO NOT USE! */
|
||||
#define DLM_LSFL_TIMEWARN 0x00000002
|
||||
#define DLM_LSFL_NEWEXCL 0x00000008
|
||||
/* currently reserved due in-kernel use */
|
||||
#define __DLM_LSFL_RESERVED0 0x00000010
|
||||
|
||||
|
||||
#endif /* _UAPI__DLM_DOT_H__ */
|
||||
|
Loading…
Reference in New Issue
Block a user