xfs: constrain dirty buffers while formatting a staged btree

Constrain the number of dirty buffers that are locked by the btree
staging code at any given time by establishing a threshold at which we
put them all on the delwri queue and push them to disk.  This limits
memory consumption while writing out new btrees.

Signed-off-by: Darrick J. Wong <djwong@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
This commit is contained in:
Darrick J. Wong 2023-12-15 10:03:29 -08:00
parent 6dfeb0c2ec
commit e069d54970
3 changed files with 50 additions and 11 deletions

View File

@ -333,24 +333,41 @@ xfs_btree_commit_ifakeroot(
/*
* Put a btree block that we're loading onto the ordered list and release it.
* The btree blocks will be written to disk when bulk loading is finished.
* If we reach the dirty buffer threshold, flush them to disk before
* continuing.
*/
static void
static int
xfs_btree_bload_drop_buf(
struct list_head *buffers_list,
struct xfs_buf **bpp)
struct xfs_btree_bload *bbl,
struct list_head *buffers_list,
struct xfs_buf **bpp)
{
if (*bpp == NULL)
return;
struct xfs_buf *bp = *bpp;
int error;
if (!bp)
return 0;
/*
* Mark this buffer XBF_DONE (i.e. uptodate) so that a subsequent
* xfs_buf_read will not pointlessly reread the contents from the disk.
*/
(*bpp)->b_flags |= XBF_DONE;
bp->b_flags |= XBF_DONE;
xfs_buf_delwri_queue_here(*bpp, buffers_list);
xfs_buf_relse(*bpp);
xfs_buf_delwri_queue_here(bp, buffers_list);
xfs_buf_relse(bp);
*bpp = NULL;
bbl->nr_dirty++;
if (!bbl->max_dirty || bbl->nr_dirty < bbl->max_dirty)
return 0;
error = xfs_buf_delwri_submit(buffers_list);
if (error)
return error;
bbl->nr_dirty = 0;
return 0;
}
/*
@ -422,7 +439,10 @@ xfs_btree_bload_prep_block(
*/
if (*blockp)
xfs_btree_set_sibling(cur, *blockp, &new_ptr, XFS_BB_RIGHTSIB);
xfs_btree_bload_drop_buf(buffers_list, bpp);
ret = xfs_btree_bload_drop_buf(bbl, buffers_list, bpp);
if (ret)
return ret;
/* Initialize the new btree block. */
xfs_btree_init_block_cur(cur, new_bp, level, nr_this_block);
@ -770,6 +790,7 @@ xfs_btree_bload(
cur->bc_nlevels = bbl->btree_height;
xfs_btree_set_ptr_null(cur, &child_ptr);
xfs_btree_set_ptr_null(cur, &ptr);
bbl->nr_dirty = 0;
xfs_btree_bload_level_geometry(cur, bbl, level, nr_this_level,
&avg_per_block, &blocks, &blocks_with_extra);
@ -808,7 +829,10 @@ xfs_btree_bload(
xfs_btree_copy_ptrs(cur, &child_ptr, &ptr, 1);
}
total_blocks += blocks;
xfs_btree_bload_drop_buf(&buffers_list, &bp);
ret = xfs_btree_bload_drop_buf(bbl, &buffers_list, &bp);
if (ret)
goto out;
/* Populate the internal btree nodes. */
for (level = 1; level < cur->bc_nlevels; level++) {
@ -850,7 +874,11 @@ xfs_btree_bload(
xfs_btree_copy_ptrs(cur, &first_ptr, &ptr, 1);
}
total_blocks += blocks;
xfs_btree_bload_drop_buf(&buffers_list, &bp);
ret = xfs_btree_bload_drop_buf(bbl, &buffers_list, &bp);
if (ret)
goto out;
xfs_btree_copy_ptrs(cur, &child_ptr, &first_ptr, 1);
}

View File

@ -112,6 +112,16 @@ struct xfs_btree_bload {
* height of the new btree.
*/
unsigned int btree_height;
/*
* Flush the new btree block buffer list to disk after this many blocks
* have been formatted. Zero prohibits writing any buffers until all
* blocks have been formatted.
*/
uint16_t max_dirty;
/* Number of dirty buffers. */
uint16_t nr_dirty;
};
int xfs_btree_bload_compute_geometry(struct xfs_btree_cur *cur,

View File

@ -94,6 +94,7 @@ xrep_newbt_init_ag(
xnr->alloc_hint = alloc_hint;
xnr->resv = resv;
INIT_LIST_HEAD(&xnr->resv_list);
xnr->bload.max_dirty = XFS_B_TO_FSBT(sc->mp, 256U << 10); /* 256K */
xrep_newbt_estimate_slack(xnr);
}