2019-05-28 16:57:20 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2006-01-18 09:30:29 +00:00
|
|
|
/******************************************************************************
|
|
|
|
*******************************************************************************
|
|
|
|
**
|
2011-11-02 19:30:58 +00:00
|
|
|
** Copyright (C) 2005-2011 Red Hat, Inc. All rights reserved.
|
2006-01-18 09:30:29 +00:00
|
|
|
**
|
|
|
|
**
|
|
|
|
*******************************************************************************
|
|
|
|
******************************************************************************/
|
|
|
|
|
|
|
|
#include "dlm_internal.h"
|
|
|
|
#include "lockspace.h"
|
|
|
|
#include "member.h"
|
|
|
|
#include "recoverd.h"
|
|
|
|
#include "recover.h"
|
|
|
|
#include "rcom.h"
|
|
|
|
#include "config.h"
|
2021-05-21 19:08:41 +00:00
|
|
|
#include "midcomms.h"
|
2009-05-07 15:54:16 +00:00
|
|
|
#include "lowcomms.h"
|
2006-01-18 09:30:29 +00:00
|
|
|
|
2023-08-01 18:09:48 +00:00
|
|
|
int dlm_slots_version(const struct dlm_header *h)
|
2011-10-20 18:26:28 +00:00
|
|
|
{
|
2022-04-04 20:06:39 +00:00
|
|
|
if ((le32_to_cpu(h->h_version) & 0x0000FFFF) < DLM_HEADER_SLOTS)
|
2011-10-20 18:26:28 +00:00
|
|
|
return 0;
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
void dlm_slot_save(struct dlm_ls *ls, struct dlm_rcom *rc,
|
2011-11-02 19:30:58 +00:00
|
|
|
struct dlm_member *memb)
|
2011-10-20 18:26:28 +00:00
|
|
|
{
|
|
|
|
struct rcom_config *rf = (struct rcom_config *)rc->rc_buf;
|
|
|
|
|
|
|
|
if (!dlm_slots_version(&rc->rc_header))
|
|
|
|
return;
|
|
|
|
|
|
|
|
memb->slot = le16_to_cpu(rf->rf_our_slot);
|
|
|
|
memb->generation = le32_to_cpu(rf->rf_generation);
|
|
|
|
}
|
|
|
|
|
|
|
|
void dlm_slots_copy_out(struct dlm_ls *ls, struct dlm_rcom *rc)
|
|
|
|
{
|
|
|
|
struct dlm_slot *slot;
|
|
|
|
struct rcom_slot *ro;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
ro = (struct rcom_slot *)(rc->rc_buf + sizeof(struct rcom_config));
|
|
|
|
|
|
|
|
/* ls_slots array is sparse, but not rcom_slots */
|
|
|
|
|
|
|
|
for (i = 0; i < ls->ls_slots_size; i++) {
|
|
|
|
slot = &ls->ls_slots[i];
|
|
|
|
if (!slot->nodeid)
|
|
|
|
continue;
|
|
|
|
ro->ro_nodeid = cpu_to_le32(slot->nodeid);
|
|
|
|
ro->ro_slot = cpu_to_le16(slot->slot);
|
|
|
|
ro++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#define SLOT_DEBUG_LINE 128
|
|
|
|
|
2014-02-14 17:54:44 +00:00
|
|
|
static void log_slots(struct dlm_ls *ls, uint32_t gen, int num_slots,
|
|
|
|
struct rcom_slot *ro0, struct dlm_slot *array,
|
|
|
|
int array_size)
|
2011-10-20 18:26:28 +00:00
|
|
|
{
|
|
|
|
char line[SLOT_DEBUG_LINE];
|
|
|
|
int len = SLOT_DEBUG_LINE - 1;
|
|
|
|
int pos = 0;
|
|
|
|
int ret, i;
|
|
|
|
|
|
|
|
memset(line, 0, sizeof(line));
|
|
|
|
|
|
|
|
if (array) {
|
|
|
|
for (i = 0; i < array_size; i++) {
|
|
|
|
if (!array[i].nodeid)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
ret = snprintf(line + pos, len - pos, " %d:%d",
|
|
|
|
array[i].slot, array[i].nodeid);
|
|
|
|
if (ret >= len - pos)
|
|
|
|
break;
|
|
|
|
pos += ret;
|
|
|
|
}
|
|
|
|
} else if (ro0) {
|
|
|
|
for (i = 0; i < num_slots; i++) {
|
|
|
|
ret = snprintf(line + pos, len - pos, " %d:%d",
|
|
|
|
ro0[i].ro_slot, ro0[i].ro_nodeid);
|
|
|
|
if (ret >= len - pos)
|
|
|
|
break;
|
|
|
|
pos += ret;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-14 17:54:44 +00:00
|
|
|
log_rinfo(ls, "generation %u slots %d%s", gen, num_slots, line);
|
2011-10-20 18:26:28 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int dlm_slots_copy_in(struct dlm_ls *ls)
|
|
|
|
{
|
|
|
|
struct dlm_member *memb;
|
|
|
|
struct dlm_rcom *rc = ls->ls_recover_buf;
|
|
|
|
struct rcom_config *rf = (struct rcom_config *)rc->rc_buf;
|
|
|
|
struct rcom_slot *ro0, *ro;
|
|
|
|
int our_nodeid = dlm_our_nodeid();
|
|
|
|
int i, num_slots;
|
|
|
|
uint32_t gen;
|
|
|
|
|
|
|
|
if (!dlm_slots_version(&rc->rc_header))
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
gen = le32_to_cpu(rf->rf_generation);
|
|
|
|
if (gen <= ls->ls_generation) {
|
|
|
|
log_error(ls, "dlm_slots_copy_in gen %u old %u",
|
|
|
|
gen, ls->ls_generation);
|
|
|
|
}
|
|
|
|
ls->ls_generation = gen;
|
|
|
|
|
|
|
|
num_slots = le16_to_cpu(rf->rf_num_slots);
|
|
|
|
if (!num_slots)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
ro0 = (struct rcom_slot *)(rc->rc_buf + sizeof(struct rcom_config));
|
|
|
|
|
2014-02-14 17:54:44 +00:00
|
|
|
log_slots(ls, gen, num_slots, ro0, NULL, 0);
|
2011-10-20 18:26:28 +00:00
|
|
|
|
|
|
|
list_for_each_entry(memb, &ls->ls_nodes, list) {
|
|
|
|
for (i = 0, ro = ro0; i < num_slots; i++, ro++) {
|
2022-04-04 20:06:40 +00:00
|
|
|
if (le32_to_cpu(ro->ro_nodeid) != memb->nodeid)
|
2011-10-20 18:26:28 +00:00
|
|
|
continue;
|
2022-04-04 20:06:40 +00:00
|
|
|
memb->slot = le16_to_cpu(ro->ro_slot);
|
2011-10-20 18:26:28 +00:00
|
|
|
memb->slot_prev = memb->slot;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (memb->nodeid == our_nodeid) {
|
|
|
|
if (ls->ls_slot && ls->ls_slot != memb->slot) {
|
|
|
|
log_error(ls, "dlm_slots_copy_in our slot "
|
|
|
|
"changed %d %d", ls->ls_slot,
|
|
|
|
memb->slot);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!ls->ls_slot)
|
|
|
|
ls->ls_slot = memb->slot;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!memb->slot) {
|
|
|
|
log_error(ls, "dlm_slots_copy_in nodeid %d no slot",
|
|
|
|
memb->nodeid);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* for any nodes that do not support slots, we will not have set memb->slot
|
|
|
|
in wait_status_all(), so memb->slot will remain -1, and we will not
|
|
|
|
assign slots or set ls_num_slots here */
|
|
|
|
|
|
|
|
int dlm_slots_assign(struct dlm_ls *ls, int *num_slots, int *slots_size,
|
|
|
|
struct dlm_slot **slots_out, uint32_t *gen_out)
|
|
|
|
{
|
|
|
|
struct dlm_member *memb;
|
|
|
|
struct dlm_slot *array;
|
|
|
|
int our_nodeid = dlm_our_nodeid();
|
|
|
|
int array_size, max_slots, i;
|
|
|
|
int need = 0;
|
|
|
|
int max = 0;
|
|
|
|
int num = 0;
|
|
|
|
uint32_t gen = 0;
|
|
|
|
|
|
|
|
/* our own memb struct will have slot -1 gen 0 */
|
|
|
|
|
|
|
|
list_for_each_entry(memb, &ls->ls_nodes, list) {
|
|
|
|
if (memb->nodeid == our_nodeid) {
|
|
|
|
memb->slot = ls->ls_slot;
|
|
|
|
memb->generation = ls->ls_generation;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry(memb, &ls->ls_nodes, list) {
|
|
|
|
if (memb->generation > gen)
|
|
|
|
gen = memb->generation;
|
|
|
|
|
|
|
|
/* node doesn't support slots */
|
|
|
|
|
|
|
|
if (memb->slot == -1)
|
|
|
|
return -1;
|
|
|
|
|
|
|
|
/* node needs a slot assigned */
|
|
|
|
|
|
|
|
if (!memb->slot)
|
|
|
|
need++;
|
|
|
|
|
|
|
|
/* node has a slot assigned */
|
|
|
|
|
|
|
|
num++;
|
|
|
|
|
|
|
|
if (!max || max < memb->slot)
|
|
|
|
max = memb->slot;
|
|
|
|
|
|
|
|
/* sanity check, once slot is assigned it shouldn't change */
|
|
|
|
|
|
|
|
if (memb->slot_prev && memb->slot && memb->slot_prev != memb->slot) {
|
|
|
|
log_error(ls, "nodeid %d slot changed %d %d",
|
|
|
|
memb->nodeid, memb->slot_prev, memb->slot);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
memb->slot_prev = memb->slot;
|
|
|
|
}
|
|
|
|
|
|
|
|
array_size = max + need;
|
2017-05-06 07:34:53 +00:00
|
|
|
array = kcalloc(array_size, sizeof(*array), GFP_NOFS);
|
2011-10-20 18:26:28 +00:00
|
|
|
if (!array)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
num = 0;
|
|
|
|
|
|
|
|
/* fill in slots (offsets) that are used */
|
|
|
|
|
|
|
|
list_for_each_entry(memb, &ls->ls_nodes, list) {
|
|
|
|
if (!memb->slot)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (memb->slot > array_size) {
|
|
|
|
log_error(ls, "invalid slot number %d", memb->slot);
|
|
|
|
kfree(array);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
array[memb->slot - 1].nodeid = memb->nodeid;
|
|
|
|
array[memb->slot - 1].slot = memb->slot;
|
|
|
|
num++;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* assign new slots from unused offsets */
|
|
|
|
|
|
|
|
list_for_each_entry(memb, &ls->ls_nodes, list) {
|
|
|
|
if (memb->slot)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
for (i = 0; i < array_size; i++) {
|
|
|
|
if (array[i].nodeid)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
memb->slot = i + 1;
|
|
|
|
memb->slot_prev = memb->slot;
|
|
|
|
array[i].nodeid = memb->nodeid;
|
|
|
|
array[i].slot = memb->slot;
|
|
|
|
num++;
|
|
|
|
|
|
|
|
if (!ls->ls_slot && memb->nodeid == our_nodeid)
|
|
|
|
ls->ls_slot = memb->slot;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!memb->slot) {
|
|
|
|
log_error(ls, "no free slot found");
|
|
|
|
kfree(array);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
gen++;
|
|
|
|
|
2014-02-14 17:54:44 +00:00
|
|
|
log_slots(ls, gen, num, NULL, array, array_size);
|
2011-10-20 18:26:28 +00:00
|
|
|
|
2021-06-02 13:45:20 +00:00
|
|
|
max_slots = (DLM_MAX_APP_BUFSIZE - sizeof(struct dlm_rcom) -
|
2011-10-20 18:26:28 +00:00
|
|
|
sizeof(struct rcom_config)) / sizeof(struct rcom_slot);
|
|
|
|
|
|
|
|
if (num > max_slots) {
|
|
|
|
log_error(ls, "num_slots %d exceeds max_slots %d",
|
|
|
|
num, max_slots);
|
|
|
|
kfree(array);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
*gen_out = gen;
|
|
|
|
*slots_out = array;
|
|
|
|
*slots_size = array_size;
|
|
|
|
*num_slots = num;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-01-18 09:30:29 +00:00
|
|
|
static void add_ordered_member(struct dlm_ls *ls, struct dlm_member *new)
|
|
|
|
{
|
|
|
|
struct dlm_member *memb = NULL;
|
|
|
|
struct list_head *tmp;
|
|
|
|
struct list_head *newlist = &new->list;
|
|
|
|
struct list_head *head = &ls->ls_nodes;
|
|
|
|
|
|
|
|
list_for_each(tmp, head) {
|
|
|
|
memb = list_entry(tmp, struct dlm_member, list);
|
|
|
|
if (new->nodeid < memb->nodeid)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!memb)
|
|
|
|
list_add_tail(newlist, head);
|
|
|
|
else {
|
|
|
|
/* FIXME: can use list macro here */
|
|
|
|
newlist->prev = tmp->prev;
|
|
|
|
newlist->next = tmp;
|
|
|
|
tmp->prev->next = newlist;
|
|
|
|
tmp->prev = newlist;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-29 21:44:37 +00:00
|
|
|
static int add_remote_member(int nodeid)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (nodeid == dlm_our_nodeid())
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
error = dlm_lowcomms_connect_node(nodeid);
|
|
|
|
if (error < 0)
|
|
|
|
return error;
|
|
|
|
|
|
|
|
dlm_midcomms_add_member(nodeid);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-02 19:30:58 +00:00
|
|
|
static int dlm_add_member(struct dlm_ls *ls, struct dlm_config_node *node)
|
2006-01-18 09:30:29 +00:00
|
|
|
{
|
|
|
|
struct dlm_member *memb;
|
2011-11-02 19:30:58 +00:00
|
|
|
int error;
|
2006-01-18 09:30:29 +00:00
|
|
|
|
2017-05-06 07:45:59 +00:00
|
|
|
memb = kzalloc(sizeof(*memb), GFP_NOFS);
|
2006-01-18 09:30:29 +00:00
|
|
|
if (!memb)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2023-05-29 21:44:37 +00:00
|
|
|
memb->nodeid = node->nodeid;
|
|
|
|
memb->weight = node->weight;
|
|
|
|
memb->comm_seq = node->comm_seq;
|
|
|
|
|
|
|
|
error = add_remote_member(node->nodeid);
|
2009-05-07 15:54:16 +00:00
|
|
|
if (error < 0) {
|
|
|
|
kfree(memb);
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2006-01-18 09:30:29 +00:00
|
|
|
add_ordered_member(ls, memb);
|
|
|
|
ls->ls_num_nodes++;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-11-02 19:30:58 +00:00
|
|
|
static struct dlm_member *find_memb(struct list_head *head, int nodeid)
|
2006-01-18 09:30:29 +00:00
|
|
|
{
|
|
|
|
struct dlm_member *memb;
|
|
|
|
|
2011-11-02 19:30:58 +00:00
|
|
|
list_for_each_entry(memb, head, list) {
|
2006-01-18 09:30:29 +00:00
|
|
|
if (memb->nodeid == nodeid)
|
2011-11-02 19:30:58 +00:00
|
|
|
return memb;
|
2006-01-18 09:30:29 +00:00
|
|
|
}
|
2011-11-02 19:30:58 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
int dlm_is_member(struct dlm_ls *ls, int nodeid)
|
|
|
|
{
|
|
|
|
if (find_memb(&ls->ls_nodes, nodeid))
|
|
|
|
return 1;
|
2006-01-20 08:47:07 +00:00
|
|
|
return 0;
|
2006-01-18 09:30:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int dlm_is_removed(struct dlm_ls *ls, int nodeid)
|
|
|
|
{
|
2024-08-02 17:26:41 +00:00
|
|
|
WARN_ON_ONCE(!nodeid || nodeid == -1);
|
|
|
|
|
2011-11-02 19:30:58 +00:00
|
|
|
if (find_memb(&ls->ls_nodes_gone, nodeid))
|
|
|
|
return 1;
|
2006-01-20 08:47:07 +00:00
|
|
|
return 0;
|
2006-01-18 09:30:29 +00:00
|
|
|
}
|
|
|
|
|
2021-05-21 19:08:41 +00:00
|
|
|
static void clear_memb_list(struct list_head *head,
|
|
|
|
void (*after_del)(int nodeid))
|
2006-01-18 09:30:29 +00:00
|
|
|
{
|
|
|
|
struct dlm_member *memb;
|
|
|
|
|
|
|
|
while (!list_empty(head)) {
|
|
|
|
memb = list_entry(head->next, struct dlm_member, list);
|
|
|
|
list_del(&memb->list);
|
2021-05-21 19:08:41 +00:00
|
|
|
if (after_del)
|
|
|
|
after_del(memb->nodeid);
|
2006-01-18 09:30:29 +00:00
|
|
|
kfree(memb);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-29 21:44:37 +00:00
|
|
|
static void remove_remote_member(int nodeid)
|
2021-05-21 19:08:41 +00:00
|
|
|
{
|
2023-05-29 21:44:37 +00:00
|
|
|
if (nodeid == dlm_our_nodeid())
|
|
|
|
return;
|
|
|
|
|
2021-05-21 19:08:41 +00:00
|
|
|
dlm_midcomms_remove_member(nodeid);
|
|
|
|
}
|
|
|
|
|
2006-01-18 09:30:29 +00:00
|
|
|
void dlm_clear_members(struct dlm_ls *ls)
|
|
|
|
{
|
2023-08-01 18:09:43 +00:00
|
|
|
clear_memb_list(&ls->ls_nodes, remove_remote_member);
|
2006-01-18 09:30:29 +00:00
|
|
|
ls->ls_num_nodes = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
void dlm_clear_members_gone(struct dlm_ls *ls)
|
|
|
|
{
|
2021-05-21 19:08:41 +00:00
|
|
|
clear_memb_list(&ls->ls_nodes_gone, NULL);
|
2006-01-18 09:30:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void make_member_array(struct dlm_ls *ls)
|
|
|
|
{
|
|
|
|
struct dlm_member *memb;
|
|
|
|
int i, w, x = 0, total = 0, all_zero = 0, *array;
|
|
|
|
|
|
|
|
kfree(ls->ls_node_array);
|
|
|
|
ls->ls_node_array = NULL;
|
|
|
|
|
|
|
|
list_for_each_entry(memb, &ls->ls_nodes, list) {
|
|
|
|
if (memb->weight)
|
|
|
|
total += memb->weight;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* all nodes revert to weight of 1 if all have weight 0 */
|
|
|
|
|
|
|
|
if (!total) {
|
|
|
|
total = ls->ls_num_nodes;
|
|
|
|
all_zero = 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
ls->ls_total_weight = total;
|
2017-05-06 07:19:17 +00:00
|
|
|
array = kmalloc_array(total, sizeof(*array), GFP_NOFS);
|
2006-01-18 09:30:29 +00:00
|
|
|
if (!array)
|
|
|
|
return;
|
|
|
|
|
|
|
|
list_for_each_entry(memb, &ls->ls_nodes, list) {
|
|
|
|
if (!all_zero && !memb->weight)
|
|
|
|
continue;
|
|
|
|
|
|
|
|
if (all_zero)
|
|
|
|
w = 1;
|
|
|
|
else
|
|
|
|
w = memb->weight;
|
|
|
|
|
|
|
|
DLM_ASSERT(x < total, printk("total %d x %d\n", total, x););
|
|
|
|
|
|
|
|
for (i = 0; i < w; i++)
|
|
|
|
array[x++] = memb->nodeid;
|
|
|
|
}
|
|
|
|
|
|
|
|
ls->ls_node_array = array;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* send a status request to all members just to establish comms connections */
|
|
|
|
|
2023-08-01 18:09:45 +00:00
|
|
|
static int ping_members(struct dlm_ls *ls, uint64_t seq)
|
2006-01-18 09:30:29 +00:00
|
|
|
{
|
|
|
|
struct dlm_member *memb;
|
2006-08-08 22:06:07 +00:00
|
|
|
int error = 0;
|
|
|
|
|
|
|
|
list_for_each_entry(memb, &ls->ls_nodes, list) {
|
2021-11-02 19:17:13 +00:00
|
|
|
if (dlm_recovery_stopped(ls)) {
|
2021-08-18 20:27:14 +00:00
|
|
|
error = -EINTR;
|
2006-08-08 22:06:07 +00:00
|
|
|
break;
|
2021-08-18 20:27:14 +00:00
|
|
|
}
|
2023-08-01 18:09:45 +00:00
|
|
|
error = dlm_rcom_status(ls, memb->nodeid, 0, seq);
|
2006-08-08 22:06:07 +00:00
|
|
|
if (error)
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (error)
|
2014-02-14 17:54:44 +00:00
|
|
|
log_rinfo(ls, "ping_members aborted %d last nodeid %d",
|
2006-08-08 22:08:42 +00:00
|
|
|
error, ls->ls_recover_nodeid);
|
2006-08-08 22:06:07 +00:00
|
|
|
return error;
|
2006-01-18 09:30:29 +00:00
|
|
|
}
|
|
|
|
|
2011-11-02 19:30:58 +00:00
|
|
|
static void dlm_lsop_recover_prep(struct dlm_ls *ls)
|
|
|
|
{
|
|
|
|
if (!ls->ls_ops || !ls->ls_ops->recover_prep)
|
|
|
|
return;
|
|
|
|
ls->ls_ops->recover_prep(ls->ls_ops_arg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dlm_lsop_recover_slot(struct dlm_ls *ls, struct dlm_member *memb)
|
|
|
|
{
|
|
|
|
struct dlm_slot slot;
|
|
|
|
uint32_t seq;
|
|
|
|
int error;
|
|
|
|
|
|
|
|
if (!ls->ls_ops || !ls->ls_ops->recover_slot)
|
|
|
|
return;
|
|
|
|
|
|
|
|
/* if there is no comms connection with this node
|
|
|
|
or the present comms connection is newer
|
|
|
|
than the one when this member was added, then
|
|
|
|
we consider the node to have failed (versus
|
|
|
|
being removed due to dlm_release_lockspace) */
|
|
|
|
|
|
|
|
error = dlm_comm_seq(memb->nodeid, &seq);
|
|
|
|
|
|
|
|
if (!error && seq == memb->comm_seq)
|
|
|
|
return;
|
|
|
|
|
|
|
|
slot.nodeid = memb->nodeid;
|
|
|
|
slot.slot = memb->slot;
|
|
|
|
|
|
|
|
ls->ls_ops->recover_slot(ls->ls_ops_arg, &slot);
|
|
|
|
}
|
|
|
|
|
|
|
|
void dlm_lsop_recover_done(struct dlm_ls *ls)
|
|
|
|
{
|
|
|
|
struct dlm_member *memb;
|
|
|
|
struct dlm_slot *slots;
|
|
|
|
int i, num;
|
|
|
|
|
|
|
|
if (!ls->ls_ops || !ls->ls_ops->recover_done)
|
|
|
|
return;
|
|
|
|
|
|
|
|
num = ls->ls_num_nodes;
|
2017-05-06 07:34:53 +00:00
|
|
|
slots = kcalloc(num, sizeof(*slots), GFP_KERNEL);
|
2011-11-02 19:30:58 +00:00
|
|
|
if (!slots)
|
|
|
|
return;
|
|
|
|
|
|
|
|
i = 0;
|
|
|
|
list_for_each_entry(memb, &ls->ls_nodes, list) {
|
|
|
|
if (i == num) {
|
|
|
|
log_error(ls, "dlm_lsop_recover_done bad num %d", num);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
slots[i].nodeid = memb->nodeid;
|
|
|
|
slots[i].slot = memb->slot;
|
|
|
|
i++;
|
|
|
|
}
|
|
|
|
|
|
|
|
ls->ls_ops->recover_done(ls->ls_ops_arg, slots, num,
|
|
|
|
ls->ls_slot, ls->ls_generation);
|
|
|
|
out:
|
|
|
|
kfree(slots);
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct dlm_config_node *find_config_node(struct dlm_recover *rv,
|
|
|
|
int nodeid)
|
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < rv->nodes_count; i++) {
|
|
|
|
if (rv->nodes[i].nodeid == nodeid)
|
|
|
|
return &rv->nodes[i];
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2006-01-18 09:30:29 +00:00
|
|
|
int dlm_recover_members(struct dlm_ls *ls, struct dlm_recover *rv, int *neg_out)
|
|
|
|
{
|
|
|
|
struct dlm_member *memb, *safe;
|
2011-11-02 19:30:58 +00:00
|
|
|
struct dlm_config_node *node;
|
|
|
|
int i, error, neg = 0, low = -1;
|
2006-01-18 09:30:29 +00:00
|
|
|
|
2006-10-31 17:56:01 +00:00
|
|
|
/* previously removed members that we've not finished removing need to
|
2022-06-22 18:45:13 +00:00
|
|
|
* count as a negative change so the "neg" recovery steps will happen
|
|
|
|
*
|
|
|
|
* This functionality must report all member changes to lsops or
|
|
|
|
* midcomms layer and must never return before.
|
|
|
|
*/
|
2006-10-31 17:56:01 +00:00
|
|
|
|
|
|
|
list_for_each_entry(memb, &ls->ls_nodes_gone, list) {
|
2014-02-14 17:54:44 +00:00
|
|
|
log_rinfo(ls, "prev removed member %d", memb->nodeid);
|
2006-10-31 17:56:01 +00:00
|
|
|
neg++;
|
|
|
|
}
|
|
|
|
|
2006-01-18 09:30:29 +00:00
|
|
|
/* move departed members from ls_nodes to ls_nodes_gone */
|
|
|
|
|
|
|
|
list_for_each_entry_safe(memb, safe, &ls->ls_nodes, list) {
|
2011-11-02 19:30:58 +00:00
|
|
|
node = find_config_node(rv, memb->nodeid);
|
|
|
|
if (node && !node->new)
|
|
|
|
continue;
|
2006-01-18 09:30:29 +00:00
|
|
|
|
2011-11-02 19:30:58 +00:00
|
|
|
if (!node) {
|
2014-02-14 17:54:44 +00:00
|
|
|
log_rinfo(ls, "remove member %d", memb->nodeid);
|
2011-11-02 19:30:58 +00:00
|
|
|
} else {
|
|
|
|
/* removed and re-added */
|
2014-02-14 17:54:44 +00:00
|
|
|
log_rinfo(ls, "remove member %d comm_seq %u %u",
|
2011-11-02 19:30:58 +00:00
|
|
|
memb->nodeid, memb->comm_seq, node->comm_seq);
|
2006-01-18 09:30:29 +00:00
|
|
|
}
|
2008-03-18 19:22:11 +00:00
|
|
|
|
|
|
|
neg++;
|
2011-11-02 19:30:58 +00:00
|
|
|
list_move(&memb->list, &ls->ls_nodes_gone);
|
2023-05-29 21:44:37 +00:00
|
|
|
remove_remote_member(memb->nodeid);
|
2011-11-02 19:30:58 +00:00
|
|
|
ls->ls_num_nodes--;
|
|
|
|
dlm_lsop_recover_slot(ls, memb);
|
2008-03-18 19:22:11 +00:00
|
|
|
}
|
|
|
|
|
2006-01-18 09:30:29 +00:00
|
|
|
/* add new members to ls_nodes */
|
|
|
|
|
2011-11-02 19:30:58 +00:00
|
|
|
for (i = 0; i < rv->nodes_count; i++) {
|
|
|
|
node = &rv->nodes[i];
|
|
|
|
if (dlm_is_member(ls, node->nodeid))
|
2006-01-18 09:30:29 +00:00
|
|
|
continue;
|
2022-10-27 20:45:25 +00:00
|
|
|
error = dlm_add_member(ls, node);
|
|
|
|
if (error)
|
|
|
|
return error;
|
|
|
|
|
2014-02-14 17:54:44 +00:00
|
|
|
log_rinfo(ls, "add member %d", node->nodeid);
|
2006-01-18 09:30:29 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
list_for_each_entry(memb, &ls->ls_nodes, list) {
|
|
|
|
if (low == -1 || memb->nodeid < low)
|
|
|
|
low = memb->nodeid;
|
|
|
|
}
|
|
|
|
ls->ls_low_nodeid = low;
|
|
|
|
|
|
|
|
make_member_array(ls);
|
|
|
|
*neg_out = neg;
|
|
|
|
|
2023-08-01 18:09:45 +00:00
|
|
|
error = ping_members(ls, rv->seq);
|
2014-02-14 17:54:44 +00:00
|
|
|
log_rinfo(ls, "dlm_recover_members %d nodes", ls->ls_num_nodes);
|
2006-01-18 09:30:29 +00:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
2007-09-27 20:53:38 +00:00
|
|
|
/* Userspace guarantees that dlm_ls_stop() has completed on all nodes before
|
|
|
|
dlm_ls_start() is called on any of them to start the new recovery. */
|
2006-01-18 09:30:29 +00:00
|
|
|
|
|
|
|
int dlm_ls_stop(struct dlm_ls *ls)
|
|
|
|
{
|
|
|
|
int new;
|
|
|
|
|
|
|
|
/*
|
2007-09-27 20:53:38 +00:00
|
|
|
* Prevent dlm_recv from being in the middle of something when we do
|
|
|
|
* the stop. This includes ensuring dlm_recv isn't processing a
|
|
|
|
* recovery message (rcom), while dlm_recoverd is aborting and
|
|
|
|
* resetting things from an in-progress recovery. i.e. we want
|
|
|
|
* dlm_recoverd to abort its recovery without worrying about dlm_recv
|
|
|
|
* processing an rcom at the same time. Stopping dlm_recv also makes
|
|
|
|
* it easy for dlm_receive_message() to check locking stopped and add a
|
|
|
|
* message to the requestqueue without races.
|
|
|
|
*/
|
|
|
|
|
2024-04-02 19:18:09 +00:00
|
|
|
write_lock_bh(&ls->ls_recv_active);
|
2007-09-27 20:53:38 +00:00
|
|
|
|
|
|
|
/*
|
2012-08-02 16:08:21 +00:00
|
|
|
* Abort any recovery that's in progress (see RECOVER_STOP,
|
2007-09-27 20:53:38 +00:00
|
|
|
* dlm_recovery_stopped()) and tell any other threads running in the
|
|
|
|
* dlm to quit any processing (see RUNNING, dlm_locking_stopped()).
|
2006-01-18 09:30:29 +00:00
|
|
|
*/
|
|
|
|
|
2024-04-02 19:18:09 +00:00
|
|
|
spin_lock_bh(&ls->ls_recover_lock);
|
2012-08-02 16:08:21 +00:00
|
|
|
set_bit(LSFL_RECOVER_STOP, &ls->ls_flags);
|
2006-01-18 09:30:29 +00:00
|
|
|
new = test_and_clear_bit(LSFL_RUNNING, &ls->ls_flags);
|
2024-04-15 18:39:41 +00:00
|
|
|
if (new)
|
2024-06-10 20:02:31 +00:00
|
|
|
timer_delete_sync(&ls->ls_scan_timer);
|
2006-01-18 09:30:29 +00:00
|
|
|
ls->ls_recover_seq++;
|
2024-04-02 19:18:06 +00:00
|
|
|
|
|
|
|
/* activate requestqueue and stop processing */
|
2024-04-02 19:18:09 +00:00
|
|
|
write_lock_bh(&ls->ls_requestqueue_lock);
|
2024-04-02 19:18:06 +00:00
|
|
|
set_bit(LSFL_RECV_MSG_BLOCKED, &ls->ls_flags);
|
2024-04-02 19:18:09 +00:00
|
|
|
write_unlock_bh(&ls->ls_requestqueue_lock);
|
|
|
|
spin_unlock_bh(&ls->ls_recover_lock);
|
2006-01-18 09:30:29 +00:00
|
|
|
|
2007-09-27 20:53:38 +00:00
|
|
|
/*
|
|
|
|
* Let dlm_recv run again, now any normal messages will be saved on the
|
|
|
|
* requestqueue for later.
|
|
|
|
*/
|
|
|
|
|
2024-04-02 19:18:09 +00:00
|
|
|
write_unlock_bh(&ls->ls_recv_active);
|
2007-09-27 20:53:38 +00:00
|
|
|
|
2006-01-18 09:30:29 +00:00
|
|
|
/*
|
|
|
|
* This in_recovery lock does two things:
|
|
|
|
* 1) Keeps this function from returning until all threads are out
|
2009-12-11 21:35:39 +00:00
|
|
|
* of locking routines and locking is truly stopped.
|
2006-01-18 09:30:29 +00:00
|
|
|
* 2) Keeps any new requests from being processed until it's unlocked
|
|
|
|
* when recovery is complete.
|
|
|
|
*/
|
|
|
|
|
2012-08-02 16:08:21 +00:00
|
|
|
if (new) {
|
|
|
|
set_bit(LSFL_RECOVER_DOWN, &ls->ls_flags);
|
|
|
|
wake_up_process(ls->ls_recoverd_task);
|
|
|
|
wait_event(ls->ls_recover_lock_wait,
|
|
|
|
test_bit(LSFL_RECOVER_LOCK, &ls->ls_flags));
|
|
|
|
}
|
2006-01-18 09:30:29 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The recoverd suspend/resume makes sure that dlm_recoverd (if
|
2012-08-02 16:08:21 +00:00
|
|
|
* running) has noticed RECOVER_STOP above and quit processing the
|
2007-09-27 20:53:38 +00:00
|
|
|
* previous recovery.
|
2006-01-18 09:30:29 +00:00
|
|
|
*/
|
|
|
|
|
|
|
|
dlm_recoverd_suspend(ls);
|
2011-10-20 18:26:28 +00:00
|
|
|
|
2024-04-02 19:18:09 +00:00
|
|
|
spin_lock_bh(&ls->ls_recover_lock);
|
2011-10-20 18:26:28 +00:00
|
|
|
kfree(ls->ls_slots);
|
|
|
|
ls->ls_slots = NULL;
|
|
|
|
ls->ls_num_slots = 0;
|
|
|
|
ls->ls_slots_size = 0;
|
2006-01-18 09:30:29 +00:00
|
|
|
ls->ls_recover_status = 0;
|
2024-04-02 19:18:09 +00:00
|
|
|
spin_unlock_bh(&ls->ls_recover_lock);
|
2011-10-20 18:26:28 +00:00
|
|
|
|
2006-01-18 09:30:29 +00:00
|
|
|
dlm_recoverd_resume(ls);
|
2007-05-18 13:59:31 +00:00
|
|
|
|
|
|
|
if (!ls->ls_recover_begin)
|
|
|
|
ls->ls_recover_begin = jiffies;
|
2011-11-02 19:30:58 +00:00
|
|
|
|
2022-06-22 18:45:14 +00:00
|
|
|
/* call recover_prep ops only once and not multiple times
|
|
|
|
* for each possible dlm_ls_stop() when recovery is already
|
|
|
|
* stopped.
|
|
|
|
*
|
|
|
|
* If we successful was able to clear LSFL_RUNNING bit and
|
|
|
|
* it was set we know it is the first dlm_ls_stop() call.
|
|
|
|
*/
|
|
|
|
if (new)
|
|
|
|
dlm_lsop_recover_prep(ls);
|
|
|
|
|
2006-01-18 09:30:29 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
int dlm_ls_start(struct dlm_ls *ls)
|
|
|
|
{
|
2017-05-06 07:56:55 +00:00
|
|
|
struct dlm_recover *rv, *rv_old;
|
2018-11-13 17:39:50 +00:00
|
|
|
struct dlm_config_node *nodes = NULL;
|
2011-11-02 19:30:58 +00:00
|
|
|
int error, count;
|
2006-01-18 09:30:29 +00:00
|
|
|
|
2017-05-06 07:45:59 +00:00
|
|
|
rv = kzalloc(sizeof(*rv), GFP_NOFS);
|
2006-01-18 09:30:29 +00:00
|
|
|
if (!rv)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2011-11-02 19:30:58 +00:00
|
|
|
error = dlm_config_nodes(ls->ls_name, &nodes, &count);
|
2008-03-18 19:22:11 +00:00
|
|
|
if (error < 0)
|
2018-11-02 20:18:20 +00:00
|
|
|
goto fail_rv;
|
2006-01-18 09:30:29 +00:00
|
|
|
|
2024-04-02 19:18:09 +00:00
|
|
|
spin_lock_bh(&ls->ls_recover_lock);
|
2006-01-18 09:30:29 +00:00
|
|
|
|
|
|
|
/* the lockspace needs to be stopped before it can be started */
|
|
|
|
|
|
|
|
if (!dlm_locking_stopped(ls)) {
|
2024-04-02 19:18:09 +00:00
|
|
|
spin_unlock_bh(&ls->ls_recover_lock);
|
2006-01-18 09:30:29 +00:00
|
|
|
log_error(ls, "start ignored: lockspace running");
|
|
|
|
error = -EINVAL;
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2011-11-02 19:30:58 +00:00
|
|
|
rv->nodes = nodes;
|
|
|
|
rv->nodes_count = count;
|
2006-01-18 09:30:29 +00:00
|
|
|
rv->seq = ++ls->ls_recover_seq;
|
|
|
|
rv_old = ls->ls_recover_args;
|
|
|
|
ls->ls_recover_args = rv;
|
2024-04-02 19:18:09 +00:00
|
|
|
spin_unlock_bh(&ls->ls_recover_lock);
|
2006-01-18 09:30:29 +00:00
|
|
|
|
|
|
|
if (rv_old) {
|
2008-03-18 19:22:11 +00:00
|
|
|
log_error(ls, "unused recovery %llx %d",
|
2011-11-02 19:30:58 +00:00
|
|
|
(unsigned long long)rv_old->seq, rv_old->nodes_count);
|
|
|
|
kfree(rv_old->nodes);
|
2006-01-18 09:30:29 +00:00
|
|
|
kfree(rv_old);
|
|
|
|
}
|
|
|
|
|
2012-08-02 16:08:21 +00:00
|
|
|
set_bit(LSFL_RECOVER_WORK, &ls->ls_flags);
|
|
|
|
wake_up_process(ls->ls_recoverd_task);
|
2006-01-18 09:30:29 +00:00
|
|
|
return 0;
|
|
|
|
|
|
|
|
fail:
|
2011-11-02 19:30:58 +00:00
|
|
|
kfree(nodes);
|
2018-11-02 20:18:20 +00:00
|
|
|
fail_rv:
|
|
|
|
kfree(rv);
|
2006-01-18 09:30:29 +00:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|