dlm: move recover idr to xarray datastructure

According to kdoc idr is deprecated and xarrays should be used nowadays.
This patch is moving the recover idr implementation to xarray
datastructure.

Signed-off-by: Alexander Aring <aahringo@redhat.com>
Signed-off-by: David Teigland <teigland@redhat.com>
This commit is contained in:
Alexander Aring 2024-05-28 17:12:39 -04:00 committed by David Teigland
parent f455eb8490
commit fa0b54f17a
3 changed files with 40 additions and 36 deletions

View File

@ -36,7 +36,6 @@
#include <linux/miscdevice.h>
#include <linux/rhashtable.h>
#include <linux/mutex.h>
#include <linux/idr.h>
#include <linux/xarray.h>
#include <linux/ratelimit.h>
#include <linux/uaccess.h>
@ -317,7 +316,7 @@ struct dlm_rsb {
int res_nodeid;
int res_master_nodeid;
int res_dir_nodeid;
int res_id; /* for ls_recover_idr */
unsigned long res_id; /* for ls_recover_xa */
uint32_t res_lvbseq;
uint32_t res_hash;
unsigned long res_toss_time;
@ -649,8 +648,8 @@ struct dlm_ls {
struct list_head ls_recover_list;
spinlock_t ls_recover_list_lock;
int ls_recover_list_count;
struct idr ls_recover_idr;
spinlock_t ls_recover_idr_lock;
struct xarray ls_recover_xa;
spinlock_t ls_recover_xa_lock;
wait_queue_head_t ls_wait_general;
wait_queue_head_t ls_recover_lock_wait;
spinlock_t ls_clear_proc_locks;

View File

@ -481,8 +481,8 @@ static int new_lockspace(const char *name, const char *cluster,
INIT_LIST_HEAD(&ls->ls_recover_list);
spin_lock_init(&ls->ls_recover_list_lock);
idr_init(&ls->ls_recover_idr);
spin_lock_init(&ls->ls_recover_idr_lock);
xa_init_flags(&ls->ls_recover_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_BH);
spin_lock_init(&ls->ls_recover_xa_lock);
ls->ls_recover_list_count = 0;
init_waitqueue_head(&ls->ls_wait_general);
INIT_LIST_HEAD(&ls->ls_masters_list);
@ -570,7 +570,7 @@ static int new_lockspace(const char *name, const char *cluster,
spin_lock_bh(&lslist_lock);
list_del(&ls->ls_list);
spin_unlock_bh(&lslist_lock);
idr_destroy(&ls->ls_recover_idr);
xa_destroy(&ls->ls_recover_xa);
kfree(ls->ls_recover_buf);
out_lkbxa:
xa_destroy(&ls->ls_lkbxa);
@ -736,7 +736,7 @@ static int release_lockspace(struct dlm_ls *ls, int force)
dlm_delete_debug_file(ls);
idr_destroy(&ls->ls_recover_idr);
xa_destroy(&ls->ls_recover_xa);
kfree(ls->ls_recover_buf);
/*

View File

@ -293,73 +293,78 @@ static void recover_list_clear(struct dlm_ls *ls)
spin_unlock_bh(&ls->ls_recover_list_lock);
}
static int recover_idr_empty(struct dlm_ls *ls)
static int recover_xa_empty(struct dlm_ls *ls)
{
int empty = 1;
spin_lock_bh(&ls->ls_recover_idr_lock);
spin_lock_bh(&ls->ls_recover_xa_lock);
if (ls->ls_recover_list_count)
empty = 0;
spin_unlock_bh(&ls->ls_recover_idr_lock);
spin_unlock_bh(&ls->ls_recover_xa_lock);
return empty;
}
static int recover_idr_add(struct dlm_rsb *r)
static int recover_xa_add(struct dlm_rsb *r)
{
struct dlm_ls *ls = r->res_ls;
struct xa_limit limit = {
.min = 1,
.max = UINT_MAX,
};
uint32_t id;
int rv;
spin_lock_bh(&ls->ls_recover_idr_lock);
spin_lock_bh(&ls->ls_recover_xa_lock);
if (r->res_id) {
rv = -1;
goto out_unlock;
}
rv = idr_alloc(&ls->ls_recover_idr, r, 1, 0, GFP_NOWAIT);
rv = xa_alloc(&ls->ls_recover_xa, &id, r, limit, GFP_ATOMIC);
if (rv < 0)
goto out_unlock;
r->res_id = rv;
r->res_id = id;
ls->ls_recover_list_count++;
dlm_hold_rsb(r);
rv = 0;
out_unlock:
spin_unlock_bh(&ls->ls_recover_idr_lock);
spin_unlock_bh(&ls->ls_recover_xa_lock);
return rv;
}
static void recover_idr_del(struct dlm_rsb *r)
static void recover_xa_del(struct dlm_rsb *r)
{
struct dlm_ls *ls = r->res_ls;
spin_lock_bh(&ls->ls_recover_idr_lock);
idr_remove(&ls->ls_recover_idr, r->res_id);
spin_lock_bh(&ls->ls_recover_xa_lock);
xa_erase_bh(&ls->ls_recover_xa, r->res_id);
r->res_id = 0;
ls->ls_recover_list_count--;
spin_unlock_bh(&ls->ls_recover_idr_lock);
spin_unlock_bh(&ls->ls_recover_xa_lock);
dlm_put_rsb(r);
}
static struct dlm_rsb *recover_idr_find(struct dlm_ls *ls, uint64_t id)
static struct dlm_rsb *recover_xa_find(struct dlm_ls *ls, uint64_t id)
{
struct dlm_rsb *r;
spin_lock_bh(&ls->ls_recover_idr_lock);
r = idr_find(&ls->ls_recover_idr, (int)id);
spin_unlock_bh(&ls->ls_recover_idr_lock);
spin_lock_bh(&ls->ls_recover_xa_lock);
r = xa_load(&ls->ls_recover_xa, (int)id);
spin_unlock_bh(&ls->ls_recover_xa_lock);
return r;
}
static void recover_idr_clear(struct dlm_ls *ls)
static void recover_xa_clear(struct dlm_ls *ls)
{
struct dlm_rsb *r;
int id;
unsigned long id;
spin_lock_bh(&ls->ls_recover_idr_lock);
spin_lock_bh(&ls->ls_recover_xa_lock);
idr_for_each_entry(&ls->ls_recover_idr, r, id) {
idr_remove(&ls->ls_recover_idr, id);
xa_for_each(&ls->ls_recover_xa, id, r) {
xa_erase_bh(&ls->ls_recover_xa, id);
r->res_id = 0;
r->res_recover_locks_count = 0;
ls->ls_recover_list_count--;
@ -372,7 +377,7 @@ static void recover_idr_clear(struct dlm_ls *ls)
ls->ls_recover_list_count);
ls->ls_recover_list_count = 0;
}
spin_unlock_bh(&ls->ls_recover_idr_lock);
spin_unlock_bh(&ls->ls_recover_xa_lock);
}
@ -470,7 +475,7 @@ static int recover_master(struct dlm_rsb *r, unsigned int *count, uint64_t seq)
set_new_master(r);
error = 0;
} else {
recover_idr_add(r);
recover_xa_add(r);
error = dlm_send_rcom_lookup(r, dir_nodeid, seq);
}
@ -551,10 +556,10 @@ int dlm_recover_masters(struct dlm_ls *ls, uint64_t seq,
log_rinfo(ls, "dlm_recover_masters %u of %u", count, total);
error = dlm_wait_function(ls, &recover_idr_empty);
error = dlm_wait_function(ls, &recover_xa_empty);
out:
if (error)
recover_idr_clear(ls);
recover_xa_clear(ls);
return error;
}
@ -563,7 +568,7 @@ int dlm_recover_master_reply(struct dlm_ls *ls, const struct dlm_rcom *rc)
struct dlm_rsb *r;
int ret_nodeid, new_master;
r = recover_idr_find(ls, le64_to_cpu(rc->rc_id));
r = recover_xa_find(ls, le64_to_cpu(rc->rc_id));
if (!r) {
log_error(ls, "dlm_recover_master_reply no id %llx",
(unsigned long long)le64_to_cpu(rc->rc_id));
@ -582,9 +587,9 @@ int dlm_recover_master_reply(struct dlm_ls *ls, const struct dlm_rcom *rc)
r->res_nodeid = new_master;
set_new_master(r);
unlock_rsb(r);
recover_idr_del(r);
recover_xa_del(r);
if (recover_idr_empty(ls))
if (recover_xa_empty(ls))
wake_up(&ls->ls_wait_general);
out:
return 0;