2018-06-06 02:42:14 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
2016-10-03 16:11:20 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2016 Oracle. All Rights Reserved.
|
|
|
|
* Author: Darrick J. Wong <darrick.wong@oracle.com>
|
|
|
|
*/
|
|
|
|
#include "xfs.h"
|
|
|
|
#include "xfs_fs.h"
|
|
|
|
#include "xfs_format.h"
|
|
|
|
#include "xfs_log_format.h"
|
|
|
|
#include "xfs_trans_resv.h"
|
2016-10-03 16:11:21 +00:00
|
|
|
#include "xfs_bit.h"
|
2018-02-22 22:41:25 +00:00
|
|
|
#include "xfs_shared.h"
|
2016-10-03 16:11:20 +00:00
|
|
|
#include "xfs_mount.h"
|
2016-10-03 16:11:21 +00:00
|
|
|
#include "xfs_defer.h"
|
2016-10-03 16:11:20 +00:00
|
|
|
#include "xfs_trans.h"
|
|
|
|
#include "xfs_trans_priv.h"
|
|
|
|
#include "xfs_refcount_item.h"
|
|
|
|
#include "xfs_log.h"
|
2016-10-03 16:11:21 +00:00
|
|
|
#include "xfs_refcount.h"
|
2019-11-02 16:40:53 +00:00
|
|
|
#include "xfs_error.h"
|
2020-05-01 23:00:49 +00:00
|
|
|
#include "xfs_log_priv.h"
|
2020-05-01 23:00:45 +00:00
|
|
|
#include "xfs_log_recover.h"
|
2023-04-12 01:59:55 +00:00
|
|
|
#include "xfs_ag.h"
|
2016-10-03 16:11:20 +00:00
|
|
|
|
2021-10-12 18:09:23 +00:00
|
|
|
struct kmem_cache *xfs_cui_cache;
|
|
|
|
struct kmem_cache *xfs_cud_cache;
|
2016-10-03 16:11:20 +00:00
|
|
|
|
2020-05-01 23:00:51 +00:00
|
|
|
static const struct xfs_item_ops xfs_cui_item_ops;
|
|
|
|
|
2016-10-03 16:11:20 +00:00
|
|
|
static inline struct xfs_cui_log_item *CUI_ITEM(struct xfs_log_item *lip)
|
|
|
|
{
|
|
|
|
return container_of(lip, struct xfs_cui_log_item, cui_item);
|
|
|
|
}
|
|
|
|
|
2020-05-01 23:00:49 +00:00
|
|
|
STATIC void
|
2016-10-03 16:11:20 +00:00
|
|
|
xfs_cui_item_free(
|
|
|
|
struct xfs_cui_log_item *cuip)
|
|
|
|
{
|
2022-05-04 01:45:11 +00:00
|
|
|
kmem_free(cuip->cui_item.li_lv_shadow);
|
2016-10-03 16:11:20 +00:00
|
|
|
if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS)
|
|
|
|
kmem_free(cuip);
|
|
|
|
else
|
2021-10-12 18:09:23 +00:00
|
|
|
kmem_cache_free(xfs_cui_cache, cuip);
|
2016-10-03 16:11:20 +00:00
|
|
|
}
|
|
|
|
|
2018-04-03 03:08:27 +00:00
|
|
|
/*
|
|
|
|
* Freeing the CUI requires that we remove it from the AIL if it has already
|
|
|
|
* been placed there. However, the CUI may not yet have been placed in the AIL
|
|
|
|
* when called by xfs_cui_release() from CUD processing due to the ordering of
|
|
|
|
* committed vs unpin operations in bulk insert operations. Hence the reference
|
|
|
|
* count to ensure only the last caller frees the CUI.
|
|
|
|
*/
|
2020-05-01 23:00:51 +00:00
|
|
|
STATIC void
|
2018-04-03 03:08:27 +00:00
|
|
|
xfs_cui_release(
|
|
|
|
struct xfs_cui_log_item *cuip)
|
|
|
|
{
|
|
|
|
ASSERT(atomic_read(&cuip->cui_refcount) > 0);
|
2022-05-04 01:46:47 +00:00
|
|
|
if (!atomic_dec_and_test(&cuip->cui_refcount))
|
|
|
|
return;
|
|
|
|
|
|
|
|
xfs_trans_ail_delete(&cuip->cui_item, 0);
|
|
|
|
xfs_cui_item_free(cuip);
|
2018-04-03 03:08:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2016-10-03 16:11:20 +00:00
|
|
|
STATIC void
|
|
|
|
xfs_cui_item_size(
|
|
|
|
struct xfs_log_item *lip,
|
|
|
|
int *nvecs,
|
|
|
|
int *nbytes)
|
|
|
|
{
|
|
|
|
struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
|
|
|
|
|
|
|
|
*nvecs += 1;
|
|
|
|
*nbytes += xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is called to fill in the vector of log iovecs for the
|
|
|
|
* given cui log item. We use only 1 iovec, and we point that
|
|
|
|
* at the cui_log_format structure embedded in the cui item.
|
|
|
|
* It is at this point that we assert that all of the extent
|
|
|
|
* slots in the cui item have been filled.
|
|
|
|
*/
|
|
|
|
STATIC void
|
|
|
|
xfs_cui_item_format(
|
|
|
|
struct xfs_log_item *lip,
|
|
|
|
struct xfs_log_vec *lv)
|
|
|
|
{
|
|
|
|
struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
|
|
|
|
struct xfs_log_iovec *vecp = NULL;
|
|
|
|
|
|
|
|
ASSERT(atomic_read(&cuip->cui_next_extent) ==
|
|
|
|
cuip->cui_format.cui_nextents);
|
|
|
|
|
|
|
|
cuip->cui_format.cui_type = XFS_LI_CUI;
|
|
|
|
cuip->cui_format.cui_size = 1;
|
|
|
|
|
|
|
|
xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUI_FORMAT, &cuip->cui_format,
|
|
|
|
xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The unpin operation is the last place an CUI is manipulated in the log. It is
|
|
|
|
* either inserted in the AIL or aborted in the event of a log I/O error. In
|
|
|
|
* either case, the CUI transaction has been successfully committed to make it
|
|
|
|
* this far. Therefore, we expect whoever committed the CUI to either construct
|
|
|
|
* and commit the CUD or drop the CUD's reference in the event of error. Simply
|
|
|
|
* drop the log's CUI reference now that the log is done with it.
|
|
|
|
*/
|
|
|
|
STATIC void
|
|
|
|
xfs_cui_item_unpin(
|
|
|
|
struct xfs_log_item *lip,
|
|
|
|
int remove)
|
|
|
|
{
|
|
|
|
struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
|
|
|
|
|
|
|
|
xfs_cui_release(cuip);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The CUI has been either committed or aborted if the transaction has been
|
|
|
|
* cancelled. If the transaction was cancelled, an CUD isn't going to be
|
|
|
|
* constructed and thus we free the CUI here directly.
|
|
|
|
*/
|
|
|
|
STATIC void
|
2019-06-29 02:27:32 +00:00
|
|
|
xfs_cui_item_release(
|
2016-10-03 16:11:20 +00:00
|
|
|
struct xfs_log_item *lip)
|
|
|
|
{
|
2019-06-29 02:27:32 +00:00
|
|
|
xfs_cui_release(CUI_ITEM(lip));
|
2016-10-03 16:11:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Allocate and initialize an cui item with the given number of extents.
|
|
|
|
*/
|
2020-05-01 23:00:49 +00:00
|
|
|
STATIC struct xfs_cui_log_item *
|
2016-10-03 16:11:20 +00:00
|
|
|
xfs_cui_init(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
uint nextents)
|
|
|
|
|
|
|
|
{
|
|
|
|
struct xfs_cui_log_item *cuip;
|
|
|
|
|
|
|
|
ASSERT(nextents > 0);
|
|
|
|
if (nextents > XFS_CUI_MAX_FAST_EXTENTS)
|
|
|
|
cuip = kmem_zalloc(xfs_cui_log_item_sizeof(nextents),
|
2019-08-26 19:06:22 +00:00
|
|
|
0);
|
2016-10-03 16:11:20 +00:00
|
|
|
else
|
2021-10-12 18:09:23 +00:00
|
|
|
cuip = kmem_cache_zalloc(xfs_cui_cache,
|
2020-07-22 16:23:10 +00:00
|
|
|
GFP_KERNEL | __GFP_NOFAIL);
|
2016-10-03 16:11:20 +00:00
|
|
|
|
|
|
|
xfs_log_item_init(mp, &cuip->cui_item, XFS_LI_CUI, &xfs_cui_item_ops);
|
|
|
|
cuip->cui_format.cui_nextents = nextents;
|
|
|
|
cuip->cui_format.cui_id = (uintptr_t)(void *)cuip;
|
|
|
|
atomic_set(&cuip->cui_next_extent, 0);
|
|
|
|
atomic_set(&cuip->cui_refcount, 2);
|
|
|
|
|
|
|
|
return cuip;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct xfs_cud_log_item *CUD_ITEM(struct xfs_log_item *lip)
|
|
|
|
{
|
|
|
|
return container_of(lip, struct xfs_cud_log_item, cud_item);
|
|
|
|
}
|
|
|
|
|
|
|
|
STATIC void
|
|
|
|
xfs_cud_item_size(
|
|
|
|
struct xfs_log_item *lip,
|
|
|
|
int *nvecs,
|
|
|
|
int *nbytes)
|
|
|
|
{
|
|
|
|
*nvecs += 1;
|
|
|
|
*nbytes += sizeof(struct xfs_cud_log_format);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is called to fill in the vector of log iovecs for the
|
|
|
|
* given cud log item. We use only 1 iovec, and we point that
|
|
|
|
* at the cud_log_format structure embedded in the cud item.
|
|
|
|
* It is at this point that we assert that all of the extent
|
|
|
|
* slots in the cud item have been filled.
|
|
|
|
*/
|
|
|
|
STATIC void
|
|
|
|
xfs_cud_item_format(
|
|
|
|
struct xfs_log_item *lip,
|
|
|
|
struct xfs_log_vec *lv)
|
|
|
|
{
|
|
|
|
struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
|
|
|
|
struct xfs_log_iovec *vecp = NULL;
|
|
|
|
|
|
|
|
cudp->cud_format.cud_type = XFS_LI_CUD;
|
|
|
|
cudp->cud_format.cud_size = 1;
|
|
|
|
|
|
|
|
xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUD_FORMAT, &cudp->cud_format,
|
|
|
|
sizeof(struct xfs_cud_log_format));
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The CUD is either committed or aborted if the transaction is cancelled. If
|
|
|
|
* the transaction is cancelled, drop our reference to the CUI and free the
|
|
|
|
* CUD.
|
|
|
|
*/
|
|
|
|
STATIC void
|
2019-06-29 02:27:32 +00:00
|
|
|
xfs_cud_item_release(
|
2016-10-03 16:11:20 +00:00
|
|
|
struct xfs_log_item *lip)
|
|
|
|
{
|
|
|
|
struct xfs_cud_log_item *cudp = CUD_ITEM(lip);
|
|
|
|
|
2019-06-29 02:27:32 +00:00
|
|
|
xfs_cui_release(cudp->cud_cuip);
|
2022-05-04 01:45:11 +00:00
|
|
|
kmem_free(cudp->cud_item.li_lv_shadow);
|
2021-10-12 18:09:23 +00:00
|
|
|
kmem_cache_free(xfs_cud_cache, cudp);
|
2016-10-03 16:11:20 +00:00
|
|
|
}
|
|
|
|
|
2022-05-04 01:46:39 +00:00
|
|
|
static struct xfs_log_item *
|
|
|
|
xfs_cud_item_intent(
|
|
|
|
struct xfs_log_item *lip)
|
|
|
|
{
|
|
|
|
return &CUD_ITEM(lip)->cud_cuip->cui_item;
|
|
|
|
}
|
|
|
|
|
2016-10-03 16:11:20 +00:00
|
|
|
static const struct xfs_item_ops xfs_cud_item_ops = {
|
2022-05-04 01:46:09 +00:00
|
|
|
.flags = XFS_ITEM_RELEASE_WHEN_COMMITTED |
|
|
|
|
XFS_ITEM_INTENT_DONE,
|
2016-10-03 16:11:20 +00:00
|
|
|
.iop_size = xfs_cud_item_size,
|
|
|
|
.iop_format = xfs_cud_item_format,
|
2019-06-29 02:27:32 +00:00
|
|
|
.iop_release = xfs_cud_item_release,
|
2022-05-04 01:46:39 +00:00
|
|
|
.iop_intent = xfs_cud_item_intent,
|
2016-10-03 16:11:20 +00:00
|
|
|
};
|
|
|
|
|
2019-06-29 02:29:41 +00:00
|
|
|
static struct xfs_cud_log_item *
|
2019-06-29 02:27:35 +00:00
|
|
|
xfs_trans_get_cud(
|
|
|
|
struct xfs_trans *tp,
|
2016-10-03 16:11:20 +00:00
|
|
|
struct xfs_cui_log_item *cuip)
|
|
|
|
{
|
2019-06-29 02:27:35 +00:00
|
|
|
struct xfs_cud_log_item *cudp;
|
2016-10-03 16:11:20 +00:00
|
|
|
|
2021-10-12 18:09:23 +00:00
|
|
|
cudp = kmem_cache_zalloc(xfs_cud_cache, GFP_KERNEL | __GFP_NOFAIL);
|
2019-06-29 02:27:35 +00:00
|
|
|
xfs_log_item_init(tp->t_mountp, &cudp->cud_item, XFS_LI_CUD,
|
|
|
|
&xfs_cud_item_ops);
|
2016-10-03 16:11:20 +00:00
|
|
|
cudp->cud_cuip = cuip;
|
|
|
|
cudp->cud_format.cud_cui_id = cuip->cui_format.cui_id;
|
|
|
|
|
2019-06-29 02:27:35 +00:00
|
|
|
xfs_trans_add_item(tp, &cudp->cud_item);
|
2016-10-03 16:11:20 +00:00
|
|
|
return cudp;
|
|
|
|
}
|
2016-10-03 16:11:21 +00:00
|
|
|
|
2019-06-29 02:29:41 +00:00
|
|
|
/*
|
|
|
|
* Finish an refcount update and log it to the CUD. Note that the
|
|
|
|
* transaction is marked dirty regardless of whether the refcount
|
|
|
|
* update succeeds or fails to support the CUI/CUD lifecycle rules.
|
|
|
|
*/
|
|
|
|
static int
|
|
|
|
xfs_trans_log_finish_refcount_update(
|
|
|
|
struct xfs_trans *tp,
|
|
|
|
struct xfs_cud_log_item *cudp,
|
2023-02-01 18:16:04 +00:00
|
|
|
struct xfs_refcount_intent *ri,
|
2019-06-29 02:29:41 +00:00
|
|
|
struct xfs_btree_cur **pcur)
|
|
|
|
{
|
|
|
|
int error;
|
|
|
|
|
2023-02-01 18:16:04 +00:00
|
|
|
error = xfs_refcount_finish_one(tp, ri, pcur);
|
2019-06-29 02:29:41 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Mark the transaction dirty, even on error. This ensures the
|
|
|
|
* transaction is aborted, which:
|
|
|
|
*
|
|
|
|
* 1.) releases the CUI and frees the CUD
|
|
|
|
* 2.) shuts down the filesystem
|
|
|
|
*/
|
2022-05-04 01:46:21 +00:00
|
|
|
tp->t_flags |= XFS_TRANS_DIRTY | XFS_TRANS_HAS_INTENT_DONE;
|
2019-06-29 02:29:41 +00:00
|
|
|
set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
|
|
|
|
|
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Sort refcount intents by AG. */
|
|
|
|
static int
|
|
|
|
xfs_refcount_update_diff_items(
|
|
|
|
void *priv,
|
2021-04-08 18:28:34 +00:00
|
|
|
const struct list_head *a,
|
|
|
|
const struct list_head *b)
|
2019-06-29 02:29:41 +00:00
|
|
|
{
|
|
|
|
struct xfs_refcount_intent *ra;
|
|
|
|
struct xfs_refcount_intent *rb;
|
|
|
|
|
|
|
|
ra = container_of(a, struct xfs_refcount_intent, ri_list);
|
|
|
|
rb = container_of(b, struct xfs_refcount_intent, ri_list);
|
2023-04-12 01:59:55 +00:00
|
|
|
|
|
|
|
return ra->ri_pag->pag_agno - rb->ri_pag->pag_agno;
|
2019-06-29 02:29:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Set the phys extent flags for this reverse mapping. */
|
|
|
|
static void
|
|
|
|
xfs_trans_set_refcount_flags(
|
2023-02-01 18:16:04 +00:00
|
|
|
struct xfs_phys_extent *pmap,
|
2019-06-29 02:29:41 +00:00
|
|
|
enum xfs_refcount_intent_type type)
|
|
|
|
{
|
2023-02-01 18:16:04 +00:00
|
|
|
pmap->pe_flags = 0;
|
2019-06-29 02:29:41 +00:00
|
|
|
switch (type) {
|
|
|
|
case XFS_REFCOUNT_INCREASE:
|
|
|
|
case XFS_REFCOUNT_DECREASE:
|
|
|
|
case XFS_REFCOUNT_ALLOC_COW:
|
|
|
|
case XFS_REFCOUNT_FREE_COW:
|
2023-02-01 18:16:04 +00:00
|
|
|
pmap->pe_flags |= type;
|
2019-06-29 02:29:41 +00:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
ASSERT(0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Log refcount updates in the intent item. */
|
|
|
|
STATIC void
|
|
|
|
xfs_refcount_update_log_item(
|
|
|
|
struct xfs_trans *tp,
|
2020-04-30 19:52:20 +00:00
|
|
|
struct xfs_cui_log_item *cuip,
|
2023-02-01 18:16:04 +00:00
|
|
|
struct xfs_refcount_intent *ri)
|
2019-06-29 02:29:41 +00:00
|
|
|
{
|
|
|
|
uint next_extent;
|
2023-02-01 18:16:04 +00:00
|
|
|
struct xfs_phys_extent *pmap;
|
2019-06-29 02:29:41 +00:00
|
|
|
|
|
|
|
tp->t_flags |= XFS_TRANS_DIRTY;
|
|
|
|
set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* atomic_inc_return gives us the value after the increment;
|
|
|
|
* we want to use it as an array index so we need to subtract 1 from
|
|
|
|
* it.
|
|
|
|
*/
|
|
|
|
next_extent = atomic_inc_return(&cuip->cui_next_extent) - 1;
|
|
|
|
ASSERT(next_extent < cuip->cui_format.cui_nextents);
|
2023-02-01 18:16:04 +00:00
|
|
|
pmap = &cuip->cui_format.cui_extents[next_extent];
|
|
|
|
pmap->pe_startblock = ri->ri_startblock;
|
|
|
|
pmap->pe_len = ri->ri_blockcount;
|
|
|
|
xfs_trans_set_refcount_flags(pmap, ri->ri_type);
|
2019-06-29 02:29:41 +00:00
|
|
|
}
|
|
|
|
|
2020-04-30 19:52:21 +00:00
|
|
|
static struct xfs_log_item *
|
2020-04-30 19:52:20 +00:00
|
|
|
xfs_refcount_update_create_intent(
|
|
|
|
struct xfs_trans *tp,
|
|
|
|
struct list_head *items,
|
2020-04-30 19:52:20 +00:00
|
|
|
unsigned int count,
|
|
|
|
bool sort)
|
2020-04-30 19:52:20 +00:00
|
|
|
{
|
|
|
|
struct xfs_mount *mp = tp->t_mountp;
|
|
|
|
struct xfs_cui_log_item *cuip = xfs_cui_init(mp, count);
|
2023-02-01 18:16:04 +00:00
|
|
|
struct xfs_refcount_intent *ri;
|
2020-04-30 19:52:20 +00:00
|
|
|
|
|
|
|
ASSERT(count > 0);
|
|
|
|
|
|
|
|
xfs_trans_add_item(tp, &cuip->cui_item);
|
2020-04-30 19:52:20 +00:00
|
|
|
if (sort)
|
|
|
|
list_sort(mp, items, xfs_refcount_update_diff_items);
|
2023-02-01 18:16:04 +00:00
|
|
|
list_for_each_entry(ri, items, ri_list)
|
|
|
|
xfs_refcount_update_log_item(tp, cuip, ri);
|
2020-04-30 19:52:21 +00:00
|
|
|
return &cuip->cui_item;
|
2020-04-30 19:52:20 +00:00
|
|
|
}
|
|
|
|
|
2019-06-29 02:29:41 +00:00
|
|
|
/* Get an CUD so we can process all the deferred refcount updates. */
|
2020-04-30 19:52:22 +00:00
|
|
|
static struct xfs_log_item *
|
2019-06-29 02:29:41 +00:00
|
|
|
xfs_refcount_update_create_done(
|
|
|
|
struct xfs_trans *tp,
|
2020-04-30 19:52:21 +00:00
|
|
|
struct xfs_log_item *intent,
|
2019-06-29 02:29:41 +00:00
|
|
|
unsigned int count)
|
|
|
|
{
|
2020-04-30 19:52:22 +00:00
|
|
|
return &xfs_trans_get_cud(tp, CUI_ITEM(intent))->cud_item;
|
2019-06-29 02:29:41 +00:00
|
|
|
}
|
|
|
|
|
2023-04-12 01:59:55 +00:00
|
|
|
/* Take a passive ref to the AG containing the space we're refcounting. */
|
|
|
|
void
|
|
|
|
xfs_refcount_update_get_group(
|
|
|
|
struct xfs_mount *mp,
|
|
|
|
struct xfs_refcount_intent *ri)
|
|
|
|
{
|
|
|
|
xfs_agnumber_t agno;
|
|
|
|
|
|
|
|
agno = XFS_FSB_TO_AGNO(mp, ri->ri_startblock);
|
xfs: allow queued AG intents to drain before scrubbing
When a writer thread executes a chain of log intent items, the AG header
buffer locks will cycle during a transaction roll to get from one intent
item to the next in a chain. Although scrub takes all AG header buffer
locks, this isn't sufficient to guard against scrub checking an AG while
that writer thread is in the middle of finishing a chain because there's
no higher level locking primitive guarding allocation groups.
When there's a collision, cross-referencing between data structures
(e.g. rmapbt and refcountbt) yields false corruption events; if repair
is running, this results in incorrect repairs, which is catastrophic.
Fix this by adding to the perag structure the count of active intents
and make scrub wait until it has both AG header buffer locks and the
intent counter reaches zero.
One quirk of the drain code is that deferred bmap updates also bump and
drop the intent counter. A fundamental decision made during the design
phase of the reverse mapping feature is that updates to the rmapbt
records are always made by the same code that updates the primary
metadata. In other words, callers of bmapi functions expect that the
bmapi functions will queue deferred rmap updates.
Some parts of the reflink code queue deferred refcount (CUI) and bmap
(BUI) updates in the same head transaction, but the deferred work
manager completely finishes the CUI before the BUI work is started. As
a result, the CUI drops the intent count long before the deferred rmap
(RUI) update even has a chance to bump the intent count. The only way
to keep the intent count elevated between the CUI and RUI is for the BUI
to bump the counter until the RUI has been created.
A second quirk of the intent drain code is that deferred work items must
increment the intent counter as soon as the work item is added to the
transaction. When a BUI completes and queues an RUI, the RUI must
increment the counter before the BUI decrements it. The only way to
accomplish this is to require that the counter be bumped as soon as the
deferred work item is created in memory.
In the next patches we'll improve on this facility, but this patch
provides the basic functionality.
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
2023-04-12 01:59:58 +00:00
|
|
|
ri->ri_pag = xfs_perag_intent_get(mp, agno);
|
2023-04-12 01:59:55 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Release a passive AG ref after finishing refcounting work. */
|
|
|
|
static inline void
|
|
|
|
xfs_refcount_update_put_group(
|
|
|
|
struct xfs_refcount_intent *ri)
|
|
|
|
{
|
xfs: allow queued AG intents to drain before scrubbing
When a writer thread executes a chain of log intent items, the AG header
buffer locks will cycle during a transaction roll to get from one intent
item to the next in a chain. Although scrub takes all AG header buffer
locks, this isn't sufficient to guard against scrub checking an AG while
that writer thread is in the middle of finishing a chain because there's
no higher level locking primitive guarding allocation groups.
When there's a collision, cross-referencing between data structures
(e.g. rmapbt and refcountbt) yields false corruption events; if repair
is running, this results in incorrect repairs, which is catastrophic.
Fix this by adding to the perag structure the count of active intents
and make scrub wait until it has both AG header buffer locks and the
intent counter reaches zero.
One quirk of the drain code is that deferred bmap updates also bump and
drop the intent counter. A fundamental decision made during the design
phase of the reverse mapping feature is that updates to the rmapbt
records are always made by the same code that updates the primary
metadata. In other words, callers of bmapi functions expect that the
bmapi functions will queue deferred rmap updates.
Some parts of the reflink code queue deferred refcount (CUI) and bmap
(BUI) updates in the same head transaction, but the deferred work
manager completely finishes the CUI before the BUI work is started. As
a result, the CUI drops the intent count long before the deferred rmap
(RUI) update even has a chance to bump the intent count. The only way
to keep the intent count elevated between the CUI and RUI is for the BUI
to bump the counter until the RUI has been created.
A second quirk of the intent drain code is that deferred work items must
increment the intent counter as soon as the work item is added to the
transaction. When a BUI completes and queues an RUI, the RUI must
increment the counter before the BUI decrements it. The only way to
accomplish this is to require that the counter be bumped as soon as the
deferred work item is created in memory.
In the next patches we'll improve on this facility, but this patch
provides the basic functionality.
Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
2023-04-12 01:59:58 +00:00
|
|
|
xfs_perag_intent_put(ri->ri_pag);
|
2023-04-12 01:59:55 +00:00
|
|
|
}
|
|
|
|
|
2019-06-29 02:29:41 +00:00
|
|
|
/* Process a deferred refcount update. */
|
|
|
|
STATIC int
|
|
|
|
xfs_refcount_update_finish_item(
|
|
|
|
struct xfs_trans *tp,
|
2020-04-30 19:52:22 +00:00
|
|
|
struct xfs_log_item *done,
|
2019-06-29 02:29:41 +00:00
|
|
|
struct list_head *item,
|
2020-04-30 19:52:22 +00:00
|
|
|
struct xfs_btree_cur **state)
|
2019-06-29 02:29:41 +00:00
|
|
|
{
|
2023-02-01 18:16:04 +00:00
|
|
|
struct xfs_refcount_intent *ri;
|
2019-06-29 02:29:41 +00:00
|
|
|
int error;
|
|
|
|
|
2023-02-01 18:16:04 +00:00
|
|
|
ri = container_of(item, struct xfs_refcount_intent, ri_list);
|
|
|
|
error = xfs_trans_log_finish_refcount_update(tp, CUD_ITEM(done), ri,
|
|
|
|
state);
|
2020-04-30 19:52:22 +00:00
|
|
|
|
2019-06-29 02:29:41 +00:00
|
|
|
/* Did we run out of reservation? Requeue what we didn't finish. */
|
2023-02-01 18:16:04 +00:00
|
|
|
if (!error && ri->ri_blockcount > 0) {
|
|
|
|
ASSERT(ri->ri_type == XFS_REFCOUNT_INCREASE ||
|
|
|
|
ri->ri_type == XFS_REFCOUNT_DECREASE);
|
2019-06-29 02:29:41 +00:00
|
|
|
return -EAGAIN;
|
|
|
|
}
|
2023-04-12 01:59:55 +00:00
|
|
|
|
|
|
|
xfs_refcount_update_put_group(ri);
|
2023-02-01 18:16:04 +00:00
|
|
|
kmem_cache_free(xfs_refcount_intent_cache, ri);
|
2019-06-29 02:29:41 +00:00
|
|
|
return error;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Abort all pending CUIs. */
|
|
|
|
STATIC void
|
|
|
|
xfs_refcount_update_abort_intent(
|
2020-04-30 19:52:21 +00:00
|
|
|
struct xfs_log_item *intent)
|
2019-06-29 02:29:41 +00:00
|
|
|
{
|
2020-04-30 19:52:21 +00:00
|
|
|
xfs_cui_release(CUI_ITEM(intent));
|
2019-06-29 02:29:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Cancel a deferred refcount update. */
|
|
|
|
STATIC void
|
|
|
|
xfs_refcount_update_cancel_item(
|
|
|
|
struct list_head *item)
|
|
|
|
{
|
2023-02-01 18:16:04 +00:00
|
|
|
struct xfs_refcount_intent *ri;
|
2019-06-29 02:29:41 +00:00
|
|
|
|
2023-02-01 18:16:04 +00:00
|
|
|
ri = container_of(item, struct xfs_refcount_intent, ri_list);
|
2023-04-12 01:59:55 +00:00
|
|
|
|
|
|
|
xfs_refcount_update_put_group(ri);
|
2023-02-01 18:16:04 +00:00
|
|
|
kmem_cache_free(xfs_refcount_intent_cache, ri);
|
2019-06-29 02:29:41 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const struct xfs_defer_op_type xfs_refcount_update_defer_type = {
|
|
|
|
.max_items = XFS_CUI_MAX_FAST_EXTENTS,
|
|
|
|
.create_intent = xfs_refcount_update_create_intent,
|
|
|
|
.abort_intent = xfs_refcount_update_abort_intent,
|
|
|
|
.create_done = xfs_refcount_update_create_done,
|
|
|
|
.finish_item = xfs_refcount_update_finish_item,
|
2020-04-30 19:52:22 +00:00
|
|
|
.finish_cleanup = xfs_refcount_finish_one_cleanup,
|
2019-06-29 02:29:41 +00:00
|
|
|
.cancel_item = xfs_refcount_update_cancel_item,
|
|
|
|
};
|
|
|
|
|
2020-11-30 00:33:37 +00:00
|
|
|
/* Is this recovered CUI ok? */
|
|
|
|
static inline bool
|
|
|
|
xfs_cui_validate_phys(
|
|
|
|
struct xfs_mount *mp,
|
2023-02-01 18:16:04 +00:00
|
|
|
struct xfs_phys_extent *pmap)
|
2020-11-30 00:33:37 +00:00
|
|
|
{
|
2021-08-19 01:46:37 +00:00
|
|
|
if (!xfs_has_reflink(mp))
|
2020-11-30 00:33:39 +00:00
|
|
|
return false;
|
|
|
|
|
2023-02-01 18:16:04 +00:00
|
|
|
if (pmap->pe_flags & ~XFS_REFCOUNT_EXTENT_FLAGS)
|
2020-11-30 00:33:37 +00:00
|
|
|
return false;
|
2020-11-30 00:33:37 +00:00
|
|
|
|
2023-02-01 18:16:04 +00:00
|
|
|
switch (pmap->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK) {
|
2020-11-30 00:33:37 +00:00
|
|
|
case XFS_REFCOUNT_INCREASE:
|
|
|
|
case XFS_REFCOUNT_DECREASE:
|
|
|
|
case XFS_REFCOUNT_ALLOC_COW:
|
|
|
|
case XFS_REFCOUNT_FREE_COW:
|
|
|
|
break;
|
|
|
|
default:
|
2020-11-30 00:33:37 +00:00
|
|
|
return false;
|
2020-11-30 00:33:37 +00:00
|
|
|
}
|
2020-11-30 00:33:37 +00:00
|
|
|
|
2023-02-01 18:16:04 +00:00
|
|
|
return xfs_verify_fsbext(mp, pmap->pe_startblock, pmap->pe_len);
|
2020-11-30 00:33:37 +00:00
|
|
|
}
|
|
|
|
|
2016-10-03 16:11:21 +00:00
|
|
|
/*
|
|
|
|
* Process a refcount update intent item that was recovered from the log.
|
|
|
|
* We need to update the refcountbt.
|
|
|
|
*/
|
2020-05-01 23:00:51 +00:00
|
|
|
STATIC int
|
2020-05-01 23:00:55 +00:00
|
|
|
xfs_cui_item_recover(
|
|
|
|
struct xfs_log_item *lip,
|
xfs: proper replay of deferred ops queued during log recovery
When we replay unfinished intent items that have been recovered from the
log, it's possible that the replay will cause the creation of more
deferred work items. As outlined in commit 509955823cc9c ("xfs: log
recovery should replay deferred ops in order"), later work items have an
implicit ordering dependency on earlier work items. Therefore, recovery
must replay the items (both recovered and created) in the same order
that they would have been during normal operation.
For log recovery, we enforce this ordering by using an empty transaction
to collect deferred ops that get created in the process of recovering a
log intent item to prevent them from being committed before the rest of
the recovered intent items. After we finish committing all the
recovered log items, we allocate a transaction with an enormous block
reservation, splice our huge list of created deferred ops into that
transaction, and commit it, thereby finishing all those ops.
This is /really/ hokey -- it's the one place in XFS where we allow
nested transactions; the splicing of the defer ops list is is inelegant
and has to be done twice per recovery function; and the broken way we
handle inode pointers and block reservations cause subtle use-after-free
and allocator problems that will be fixed by this patch and the two
patches after it.
Therefore, replace the hokey empty transaction with a structure designed
to capture each chain of deferred ops that are created as part of
recovering a single unfinished log intent. Finally, refactor the loop
that replays those chains to do so using one transaction per chain.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
2020-09-26 00:39:37 +00:00
|
|
|
struct list_head *capture_list)
|
2016-10-03 16:11:21 +00:00
|
|
|
{
|
2020-05-01 23:00:55 +00:00
|
|
|
struct xfs_cui_log_item *cuip = CUI_ITEM(lip);
|
2016-10-03 16:11:22 +00:00
|
|
|
struct xfs_cud_log_item *cudp;
|
|
|
|
struct xfs_trans *tp;
|
|
|
|
struct xfs_btree_cur *rcur = NULL;
|
2022-03-17 16:09:12 +00:00
|
|
|
struct xfs_mount *mp = lip->li_log->l_mp;
|
2020-05-01 23:00:55 +00:00
|
|
|
unsigned int refc_type;
|
2016-10-03 16:11:22 +00:00
|
|
|
bool requeue_only = false;
|
2020-05-01 23:00:55 +00:00
|
|
|
int i;
|
|
|
|
int error = 0;
|
2016-10-03 16:11:21 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* First check the validity of the extents described by the
|
|
|
|
* CUI. If any are bad, then assume that all are bad and
|
|
|
|
* just toss the CUI.
|
|
|
|
*/
|
|
|
|
for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
|
2020-11-30 00:33:37 +00:00
|
|
|
if (!xfs_cui_validate_phys(mp,
|
|
|
|
&cuip->cui_format.cui_extents[i])) {
|
|
|
|
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
|
|
|
|
&cuip->cui_format,
|
|
|
|
sizeof(cuip->cui_format));
|
2019-11-06 17:17:43 +00:00
|
|
|
return -EFSCORRUPTED;
|
2020-11-30 00:33:37 +00:00
|
|
|
}
|
2016-10-03 16:11:21 +00:00
|
|
|
}
|
|
|
|
|
2016-10-03 16:11:22 +00:00
|
|
|
/*
|
|
|
|
* Under normal operation, refcount updates are deferred, so we
|
|
|
|
* wouldn't be adding them directly to a transaction. All
|
|
|
|
* refcount updates manage reservation usage internally and
|
|
|
|
* dynamically by deferring work that won't fit in the
|
|
|
|
* transaction. Normally, any work that needs to be deferred
|
|
|
|
* gets attached to the same defer_ops that scheduled the
|
|
|
|
* refcount update. However, we're in log recovery here, so we
|
2020-08-05 15:49:58 +00:00
|
|
|
* use the passed in defer_ops and to finish up any work that
|
2018-02-22 22:41:25 +00:00
|
|
|
* doesn't fit. We need to reserve enough blocks to handle a
|
|
|
|
* full btree split on either end of the refcount range.
|
2016-10-03 16:11:22 +00:00
|
|
|
*/
|
2018-02-22 22:41:25 +00:00
|
|
|
error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
|
|
|
|
mp->m_refc_maxlevels * 2, 0, XFS_TRANS_RESERVE, &tp);
|
2016-10-03 16:11:22 +00:00
|
|
|
if (error)
|
|
|
|
return error;
|
xfs: proper replay of deferred ops queued during log recovery
When we replay unfinished intent items that have been recovered from the
log, it's possible that the replay will cause the creation of more
deferred work items. As outlined in commit 509955823cc9c ("xfs: log
recovery should replay deferred ops in order"), later work items have an
implicit ordering dependency on earlier work items. Therefore, recovery
must replay the items (both recovered and created) in the same order
that they would have been during normal operation.
For log recovery, we enforce this ordering by using an empty transaction
to collect deferred ops that get created in the process of recovering a
log intent item to prevent them from being committed before the rest of
the recovered intent items. After we finish committing all the
recovered log items, we allocate a transaction with an enormous block
reservation, splice our huge list of created deferred ops into that
transaction, and commit it, thereby finishing all those ops.
This is /really/ hokey -- it's the one place in XFS where we allow
nested transactions; the splicing of the defer ops list is is inelegant
and has to be done twice per recovery function; and the broken way we
handle inode pointers and block reservations cause subtle use-after-free
and allocator problems that will be fixed by this patch and the two
patches after it.
Therefore, replace the hokey empty transaction with a structure designed
to capture each chain of deferred ops that are created as part of
recovering a single unfinished log intent. Finally, refactor the loop
that replays those chains to do so using one transaction per chain.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
2020-09-26 00:39:37 +00:00
|
|
|
|
2016-10-03 16:11:22 +00:00
|
|
|
cudp = xfs_trans_get_cud(tp, cuip);
|
|
|
|
|
|
|
|
for (i = 0; i < cuip->cui_format.cui_nextents; i++) {
|
2023-02-01 18:16:04 +00:00
|
|
|
struct xfs_refcount_intent fake = { };
|
2023-02-01 18:16:04 +00:00
|
|
|
struct xfs_phys_extent *pmap;
|
2023-02-01 18:16:04 +00:00
|
|
|
|
2023-02-01 18:16:04 +00:00
|
|
|
pmap = &cuip->cui_format.cui_extents[i];
|
|
|
|
refc_type = pmap->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK;
|
2016-10-03 16:11:22 +00:00
|
|
|
switch (refc_type) {
|
|
|
|
case XFS_REFCOUNT_INCREASE:
|
|
|
|
case XFS_REFCOUNT_DECREASE:
|
|
|
|
case XFS_REFCOUNT_ALLOC_COW:
|
|
|
|
case XFS_REFCOUNT_FREE_COW:
|
2023-02-01 18:16:04 +00:00
|
|
|
fake.ri_type = refc_type;
|
2016-10-03 16:11:22 +00:00
|
|
|
break;
|
|
|
|
default:
|
2022-10-25 22:07:14 +00:00
|
|
|
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
|
|
|
|
&cuip->cui_format,
|
|
|
|
sizeof(cuip->cui_format));
|
2016-10-03 16:11:22 +00:00
|
|
|
error = -EFSCORRUPTED;
|
|
|
|
goto abort_error;
|
|
|
|
}
|
2023-02-01 18:16:04 +00:00
|
|
|
|
2023-02-01 18:16:04 +00:00
|
|
|
fake.ri_startblock = pmap->pe_startblock;
|
|
|
|
fake.ri_blockcount = pmap->pe_len;
|
2023-04-12 01:59:55 +00:00
|
|
|
|
|
|
|
if (!requeue_only) {
|
|
|
|
xfs_refcount_update_get_group(mp, &fake);
|
2016-10-03 16:11:22 +00:00
|
|
|
error = xfs_trans_log_finish_refcount_update(tp, cudp,
|
2023-02-01 18:16:04 +00:00
|
|
|
&fake, &rcur);
|
2023-04-12 01:59:55 +00:00
|
|
|
xfs_refcount_update_put_group(&fake);
|
|
|
|
}
|
2021-08-06 18:06:35 +00:00
|
|
|
if (error == -EFSCORRUPTED)
|
|
|
|
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
|
2022-10-25 22:07:14 +00:00
|
|
|
&cuip->cui_format,
|
|
|
|
sizeof(cuip->cui_format));
|
2016-10-03 16:11:22 +00:00
|
|
|
if (error)
|
|
|
|
goto abort_error;
|
|
|
|
|
|
|
|
/* Requeue what we didn't finish. */
|
2023-02-01 18:16:04 +00:00
|
|
|
if (fake.ri_blockcount > 0) {
|
|
|
|
struct xfs_bmbt_irec irec = {
|
|
|
|
.br_startblock = fake.ri_startblock,
|
|
|
|
.br_blockcount = fake.ri_blockcount,
|
|
|
|
};
|
|
|
|
|
|
|
|
switch (fake.ri_type) {
|
2016-10-03 16:11:22 +00:00
|
|
|
case XFS_REFCOUNT_INCREASE:
|
2019-08-27 00:06:04 +00:00
|
|
|
xfs_refcount_increase_extent(tp, &irec);
|
2016-10-03 16:11:22 +00:00
|
|
|
break;
|
|
|
|
case XFS_REFCOUNT_DECREASE:
|
2019-08-27 00:06:04 +00:00
|
|
|
xfs_refcount_decrease_extent(tp, &irec);
|
2016-10-03 16:11:22 +00:00
|
|
|
break;
|
2016-10-03 16:11:39 +00:00
|
|
|
case XFS_REFCOUNT_ALLOC_COW:
|
2019-08-27 00:06:04 +00:00
|
|
|
xfs_refcount_alloc_cow_extent(tp,
|
2016-10-03 16:11:39 +00:00
|
|
|
irec.br_startblock,
|
|
|
|
irec.br_blockcount);
|
|
|
|
break;
|
|
|
|
case XFS_REFCOUNT_FREE_COW:
|
2019-08-27 00:06:04 +00:00
|
|
|
xfs_refcount_free_cow_extent(tp,
|
2016-10-03 16:11:39 +00:00
|
|
|
irec.br_startblock,
|
|
|
|
irec.br_blockcount);
|
|
|
|
break;
|
2016-10-03 16:11:22 +00:00
|
|
|
default:
|
|
|
|
ASSERT(0);
|
|
|
|
}
|
|
|
|
requeue_only = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
xfs_refcount_finish_one_cleanup(tp, rcur, error);
|
2021-09-17 00:28:07 +00:00
|
|
|
return xfs_defer_ops_capture_and_commit(tp, capture_list);
|
2016-10-03 16:11:22 +00:00
|
|
|
|
|
|
|
abort_error:
|
|
|
|
xfs_refcount_finish_one_cleanup(tp, rcur, error);
|
|
|
|
xfs_trans_cancel(tp);
|
2016-10-03 16:11:21 +00:00
|
|
|
return error;
|
|
|
|
}
|
2020-05-01 23:00:45 +00:00
|
|
|
|
2020-05-01 23:00:54 +00:00
|
|
|
STATIC bool
|
|
|
|
xfs_cui_item_match(
|
|
|
|
struct xfs_log_item *lip,
|
|
|
|
uint64_t intent_id)
|
|
|
|
{
|
|
|
|
return CUI_ITEM(lip)->cui_format.cui_id == intent_id;
|
|
|
|
}
|
|
|
|
|
xfs: periodically relog deferred intent items
There's a subtle design flaw in the deferred log item code that can lead
to pinning the log tail. Taking up the defer ops chain examples from
the previous commit, we can get trapped in sequences like this:
Caller hands us a transaction t0 with D0-D3 attached. The defer ops
chain will look like the following if the transaction rolls succeed:
t1: D0(t0), D1(t0), D2(t0), D3(t0)
t2: d4(t1), d5(t1), D1(t0), D2(t0), D3(t0)
t3: d5(t1), D1(t0), D2(t0), D3(t0)
...
t9: d9(t7), D3(t0)
t10: D3(t0)
t11: d10(t10), d11(t10)
t12: d11(t10)
In transaction 9, we finish d9 and try to roll to t10 while holding onto
an intent item for D3 that we logged in t0.
The previous commit changed the order in which we place new defer ops in
the defer ops processing chain to reduce the maximum chain length. Now
make xfs_defer_finish_noroll capable of relogging the entire chain
periodically so that we can always move the log tail forward. Most
chains will never get relogged, except for operations that generate very
long chains (large extents containing many blocks with different sharing
levels) or are on filesystems with small logs and a lot of ongoing
metadata updates.
Callers are now required to ensure that the transaction reservation is
large enough to handle logging done items and new intent items for the
maximum possible chain length. Most callers are careful to keep the
chain lengths low, so the overhead should be minimal.
The decision to relog an intent item is made based on whether the intent
was logged in a previous checkpoint, since there's no point in relogging
an intent into the same checkpoint.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
2020-09-27 23:18:13 +00:00
|
|
|
/* Relog an intent item to push the log tail forward. */
|
|
|
|
static struct xfs_log_item *
|
|
|
|
xfs_cui_item_relog(
|
|
|
|
struct xfs_log_item *intent,
|
|
|
|
struct xfs_trans *tp)
|
|
|
|
{
|
|
|
|
struct xfs_cud_log_item *cudp;
|
|
|
|
struct xfs_cui_log_item *cuip;
|
2023-02-01 18:16:04 +00:00
|
|
|
struct xfs_phys_extent *pmap;
|
xfs: periodically relog deferred intent items
There's a subtle design flaw in the deferred log item code that can lead
to pinning the log tail. Taking up the defer ops chain examples from
the previous commit, we can get trapped in sequences like this:
Caller hands us a transaction t0 with D0-D3 attached. The defer ops
chain will look like the following if the transaction rolls succeed:
t1: D0(t0), D1(t0), D2(t0), D3(t0)
t2: d4(t1), d5(t1), D1(t0), D2(t0), D3(t0)
t3: d5(t1), D1(t0), D2(t0), D3(t0)
...
t9: d9(t7), D3(t0)
t10: D3(t0)
t11: d10(t10), d11(t10)
t12: d11(t10)
In transaction 9, we finish d9 and try to roll to t10 while holding onto
an intent item for D3 that we logged in t0.
The previous commit changed the order in which we place new defer ops in
the defer ops processing chain to reduce the maximum chain length. Now
make xfs_defer_finish_noroll capable of relogging the entire chain
periodically so that we can always move the log tail forward. Most
chains will never get relogged, except for operations that generate very
long chains (large extents containing many blocks with different sharing
levels) or are on filesystems with small logs and a lot of ongoing
metadata updates.
Callers are now required to ensure that the transaction reservation is
large enough to handle logging done items and new intent items for the
maximum possible chain length. Most callers are careful to keep the
chain lengths low, so the overhead should be minimal.
The decision to relog an intent item is made based on whether the intent
was logged in a previous checkpoint, since there's no point in relogging
an intent into the same checkpoint.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
2020-09-27 23:18:13 +00:00
|
|
|
unsigned int count;
|
|
|
|
|
|
|
|
count = CUI_ITEM(intent)->cui_format.cui_nextents;
|
2023-02-01 18:16:04 +00:00
|
|
|
pmap = CUI_ITEM(intent)->cui_format.cui_extents;
|
xfs: periodically relog deferred intent items
There's a subtle design flaw in the deferred log item code that can lead
to pinning the log tail. Taking up the defer ops chain examples from
the previous commit, we can get trapped in sequences like this:
Caller hands us a transaction t0 with D0-D3 attached. The defer ops
chain will look like the following if the transaction rolls succeed:
t1: D0(t0), D1(t0), D2(t0), D3(t0)
t2: d4(t1), d5(t1), D1(t0), D2(t0), D3(t0)
t3: d5(t1), D1(t0), D2(t0), D3(t0)
...
t9: d9(t7), D3(t0)
t10: D3(t0)
t11: d10(t10), d11(t10)
t12: d11(t10)
In transaction 9, we finish d9 and try to roll to t10 while holding onto
an intent item for D3 that we logged in t0.
The previous commit changed the order in which we place new defer ops in
the defer ops processing chain to reduce the maximum chain length. Now
make xfs_defer_finish_noroll capable of relogging the entire chain
periodically so that we can always move the log tail forward. Most
chains will never get relogged, except for operations that generate very
long chains (large extents containing many blocks with different sharing
levels) or are on filesystems with small logs and a lot of ongoing
metadata updates.
Callers are now required to ensure that the transaction reservation is
large enough to handle logging done items and new intent items for the
maximum possible chain length. Most callers are careful to keep the
chain lengths low, so the overhead should be minimal.
The decision to relog an intent item is made based on whether the intent
was logged in a previous checkpoint, since there's no point in relogging
an intent into the same checkpoint.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
2020-09-27 23:18:13 +00:00
|
|
|
|
|
|
|
tp->t_flags |= XFS_TRANS_DIRTY;
|
|
|
|
cudp = xfs_trans_get_cud(tp, CUI_ITEM(intent));
|
|
|
|
set_bit(XFS_LI_DIRTY, &cudp->cud_item.li_flags);
|
|
|
|
|
|
|
|
cuip = xfs_cui_init(tp->t_mountp, count);
|
2023-02-01 18:16:04 +00:00
|
|
|
memcpy(cuip->cui_format.cui_extents, pmap, count * sizeof(*pmap));
|
xfs: periodically relog deferred intent items
There's a subtle design flaw in the deferred log item code that can lead
to pinning the log tail. Taking up the defer ops chain examples from
the previous commit, we can get trapped in sequences like this:
Caller hands us a transaction t0 with D0-D3 attached. The defer ops
chain will look like the following if the transaction rolls succeed:
t1: D0(t0), D1(t0), D2(t0), D3(t0)
t2: d4(t1), d5(t1), D1(t0), D2(t0), D3(t0)
t3: d5(t1), D1(t0), D2(t0), D3(t0)
...
t9: d9(t7), D3(t0)
t10: D3(t0)
t11: d10(t10), d11(t10)
t12: d11(t10)
In transaction 9, we finish d9 and try to roll to t10 while holding onto
an intent item for D3 that we logged in t0.
The previous commit changed the order in which we place new defer ops in
the defer ops processing chain to reduce the maximum chain length. Now
make xfs_defer_finish_noroll capable of relogging the entire chain
periodically so that we can always move the log tail forward. Most
chains will never get relogged, except for operations that generate very
long chains (large extents containing many blocks with different sharing
levels) or are on filesystems with small logs and a lot of ongoing
metadata updates.
Callers are now required to ensure that the transaction reservation is
large enough to handle logging done items and new intent items for the
maximum possible chain length. Most callers are careful to keep the
chain lengths low, so the overhead should be minimal.
The decision to relog an intent item is made based on whether the intent
was logged in a previous checkpoint, since there's no point in relogging
an intent into the same checkpoint.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
2020-09-27 23:18:13 +00:00
|
|
|
atomic_set(&cuip->cui_next_extent, count);
|
|
|
|
xfs_trans_add_item(tp, &cuip->cui_item);
|
|
|
|
set_bit(XFS_LI_DIRTY, &cuip->cui_item.li_flags);
|
|
|
|
return &cuip->cui_item;
|
|
|
|
}
|
|
|
|
|
2020-05-01 23:00:51 +00:00
|
|
|
static const struct xfs_item_ops xfs_cui_item_ops = {
|
2022-05-04 01:46:09 +00:00
|
|
|
.flags = XFS_ITEM_INTENT,
|
2020-05-01 23:00:51 +00:00
|
|
|
.iop_size = xfs_cui_item_size,
|
|
|
|
.iop_format = xfs_cui_item_format,
|
|
|
|
.iop_unpin = xfs_cui_item_unpin,
|
|
|
|
.iop_release = xfs_cui_item_release,
|
|
|
|
.iop_recover = xfs_cui_item_recover,
|
2020-05-01 23:00:54 +00:00
|
|
|
.iop_match = xfs_cui_item_match,
|
xfs: periodically relog deferred intent items
There's a subtle design flaw in the deferred log item code that can lead
to pinning the log tail. Taking up the defer ops chain examples from
the previous commit, we can get trapped in sequences like this:
Caller hands us a transaction t0 with D0-D3 attached. The defer ops
chain will look like the following if the transaction rolls succeed:
t1: D0(t0), D1(t0), D2(t0), D3(t0)
t2: d4(t1), d5(t1), D1(t0), D2(t0), D3(t0)
t3: d5(t1), D1(t0), D2(t0), D3(t0)
...
t9: d9(t7), D3(t0)
t10: D3(t0)
t11: d10(t10), d11(t10)
t12: d11(t10)
In transaction 9, we finish d9 and try to roll to t10 while holding onto
an intent item for D3 that we logged in t0.
The previous commit changed the order in which we place new defer ops in
the defer ops processing chain to reduce the maximum chain length. Now
make xfs_defer_finish_noroll capable of relogging the entire chain
periodically so that we can always move the log tail forward. Most
chains will never get relogged, except for operations that generate very
long chains (large extents containing many blocks with different sharing
levels) or are on filesystems with small logs and a lot of ongoing
metadata updates.
Callers are now required to ensure that the transaction reservation is
large enough to handle logging done items and new intent items for the
maximum possible chain length. Most callers are careful to keep the
chain lengths low, so the overhead should be minimal.
The decision to relog an intent item is made based on whether the intent
was logged in a previous checkpoint, since there's no point in relogging
an intent into the same checkpoint.
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>
2020-09-27 23:18:13 +00:00
|
|
|
.iop_relog = xfs_cui_item_relog,
|
2020-05-01 23:00:51 +00:00
|
|
|
};
|
|
|
|
|
2022-10-20 23:23:16 +00:00
|
|
|
static inline void
|
2020-05-01 23:00:49 +00:00
|
|
|
xfs_cui_copy_format(
|
2022-10-20 23:23:16 +00:00
|
|
|
struct xfs_cui_log_format *dst,
|
|
|
|
const struct xfs_cui_log_format *src)
|
2020-05-01 23:00:49 +00:00
|
|
|
{
|
2022-10-20 23:23:16 +00:00
|
|
|
unsigned int i;
|
2020-05-01 23:00:49 +00:00
|
|
|
|
2022-10-20 23:23:16 +00:00
|
|
|
memcpy(dst, src, offsetof(struct xfs_cui_log_format, cui_extents));
|
2020-05-01 23:00:49 +00:00
|
|
|
|
2022-10-20 23:23:16 +00:00
|
|
|
for (i = 0; i < src->cui_nextents; i++)
|
|
|
|
memcpy(&dst->cui_extents[i], &src->cui_extents[i],
|
|
|
|
sizeof(struct xfs_phys_extent));
|
2020-05-01 23:00:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This routine is called to create an in-core extent refcount update
|
|
|
|
* item from the cui format structure which was logged on disk.
|
|
|
|
* It allocates an in-core cui, copies the extents from the format
|
|
|
|
* structure into it, and adds the cui to the AIL with the given
|
|
|
|
* LSN.
|
|
|
|
*/
|
|
|
|
STATIC int
|
|
|
|
xlog_recover_cui_commit_pass2(
|
|
|
|
struct xlog *log,
|
|
|
|
struct list_head *buffer_list,
|
|
|
|
struct xlog_recover_item *item,
|
|
|
|
xfs_lsn_t lsn)
|
|
|
|
{
|
|
|
|
struct xfs_mount *mp = log->l_mp;
|
|
|
|
struct xfs_cui_log_item *cuip;
|
|
|
|
struct xfs_cui_log_format *cui_formatp;
|
2022-10-20 23:23:16 +00:00
|
|
|
size_t len;
|
2020-05-01 23:00:49 +00:00
|
|
|
|
|
|
|
cui_formatp = item->ri_buf[0].i_addr;
|
|
|
|
|
2022-10-20 23:23:16 +00:00
|
|
|
if (item->ri_buf[0].i_len < xfs_cui_log_format_sizeof(0)) {
|
2022-10-25 22:07:14 +00:00
|
|
|
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
|
|
|
|
item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
|
2022-10-20 23:23:16 +00:00
|
|
|
return -EFSCORRUPTED;
|
|
|
|
}
|
|
|
|
|
|
|
|
len = xfs_cui_log_format_sizeof(cui_formatp->cui_nextents);
|
|
|
|
if (item->ri_buf[0].i_len != len) {
|
2022-10-25 22:07:14 +00:00
|
|
|
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp,
|
|
|
|
item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
|
2022-10-20 23:23:16 +00:00
|
|
|
return -EFSCORRUPTED;
|
2020-05-01 23:00:49 +00:00
|
|
|
}
|
2022-10-20 23:23:16 +00:00
|
|
|
|
|
|
|
cuip = xfs_cui_init(mp, cui_formatp->cui_nextents);
|
|
|
|
xfs_cui_copy_format(&cuip->cui_format, cui_formatp);
|
2020-05-01 23:00:49 +00:00
|
|
|
atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents);
|
|
|
|
/*
|
2020-05-01 23:00:54 +00:00
|
|
|
* Insert the intent into the AIL directly and drop one reference so
|
|
|
|
* that finishing or canceling the work will drop the other.
|
2020-05-01 23:00:49 +00:00
|
|
|
*/
|
2020-05-01 23:00:54 +00:00
|
|
|
xfs_trans_ail_insert(log->l_ailp, &cuip->cui_item, lsn);
|
2020-05-01 23:00:49 +00:00
|
|
|
xfs_cui_release(cuip);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-05-01 23:00:45 +00:00
|
|
|
const struct xlog_recover_item_ops xlog_cui_item_ops = {
|
|
|
|
.item_type = XFS_LI_CUI,
|
2020-05-01 23:00:49 +00:00
|
|
|
.commit_pass2 = xlog_recover_cui_commit_pass2,
|
2020-05-01 23:00:45 +00:00
|
|
|
};
|
|
|
|
|
2020-05-01 23:00:49 +00:00
|
|
|
/*
|
|
|
|
* This routine is called when an CUD format structure is found in a committed
|
|
|
|
* transaction in the log. Its purpose is to cancel the corresponding CUI if it
|
|
|
|
* was still in the log. To do this it searches the AIL for the CUI with an id
|
|
|
|
* equal to that in the CUD format structure. If we find it we drop the CUD
|
|
|
|
* reference, which removes the CUI from the AIL and frees it.
|
|
|
|
*/
|
|
|
|
STATIC int
|
|
|
|
xlog_recover_cud_commit_pass2(
|
|
|
|
struct xlog *log,
|
|
|
|
struct list_head *buffer_list,
|
|
|
|
struct xlog_recover_item *item,
|
|
|
|
xfs_lsn_t lsn)
|
|
|
|
{
|
|
|
|
struct xfs_cud_log_format *cud_formatp;
|
|
|
|
|
|
|
|
cud_formatp = item->ri_buf[0].i_addr;
|
|
|
|
if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) {
|
2022-10-25 22:07:14 +00:00
|
|
|
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp,
|
|
|
|
item->ri_buf[0].i_addr, item->ri_buf[0].i_len);
|
2020-05-01 23:00:49 +00:00
|
|
|
return -EFSCORRUPTED;
|
|
|
|
}
|
|
|
|
|
2020-05-01 23:00:54 +00:00
|
|
|
xlog_recover_release_intent(log, XFS_LI_CUI, cud_formatp->cud_cui_id);
|
2020-05-01 23:00:49 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2020-05-01 23:00:45 +00:00
|
|
|
const struct xlog_recover_item_ops xlog_cud_item_ops = {
|
|
|
|
.item_type = XFS_LI_CUD,
|
2020-05-01 23:00:49 +00:00
|
|
|
.commit_pass2 = xlog_recover_cud_commit_pass2,
|
2020-05-01 23:00:45 +00:00
|
|
|
};
|