mirror of
https://github.com/torvalds/linux.git
synced 2024-12-29 14:21:47 +00:00
5b593ee172
Some errors may need to be fixed in order for GC to successfully run - walk and mark all metadata. But we can't start the allocators and do normal btree updates until after GC has completed, and allocation information is known to be consistent, so we need a different method of doing btree updates. Fortunately, we already have code for walking the btree while overlaying keys from the journal to be replayed. This patch adds an update path that adds keys to the list of keys to be replayed by journal replay, and also fixes up iterators. Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com> Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
121 lines
3.4 KiB
C
121 lines
3.4 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _BCACHEFS_BTREE_GC_H
|
|
#define _BCACHEFS_BTREE_GC_H
|
|
|
|
#include "btree_types.h"
|
|
|
|
void bch2_coalesce(struct bch_fs *);
|
|
|
|
int bch2_gc(struct bch_fs *, bool);
|
|
int bch2_gc_gens(struct bch_fs *);
|
|
void bch2_gc_thread_stop(struct bch_fs *);
|
|
int bch2_gc_thread_start(struct bch_fs *);
|
|
void bch2_mark_dev_superblock(struct bch_fs *, struct bch_dev *, unsigned);
|
|
|
|
/*
|
|
* For concurrent mark and sweep (with other index updates), we define a total
|
|
* ordering of _all_ references GC walks:
|
|
*
|
|
* Note that some references will have the same GC position as others - e.g.
|
|
* everything within the same btree node; in those cases we're relying on
|
|
* whatever locking exists for where those references live, i.e. the write lock
|
|
* on a btree node.
|
|
*
|
|
* That locking is also required to ensure GC doesn't pass the updater in
|
|
* between the updater adding/removing the reference and updating the GC marks;
|
|
* without that, we would at best double count sometimes.
|
|
*
|
|
* That part is important - whenever calling bch2_mark_pointers(), a lock _must_
|
|
* be held that prevents GC from passing the position the updater is at.
|
|
*
|
|
* (What about the start of gc, when we're clearing all the marks? GC clears the
|
|
* mark with the gc pos seqlock held, and bch_mark_bucket checks against the gc
|
|
* position inside its cmpxchg loop, so crap magically works).
|
|
*/
|
|
|
|
/* Position of (the start of) a gc phase: */
|
|
static inline struct gc_pos gc_phase(enum gc_phase phase)
|
|
{
|
|
return (struct gc_pos) {
|
|
.phase = phase,
|
|
.pos = POS_MIN,
|
|
.level = 0,
|
|
};
|
|
}
|
|
|
|
static inline int gc_pos_cmp(struct gc_pos l, struct gc_pos r)
|
|
{
|
|
if (l.phase != r.phase)
|
|
return l.phase < r.phase ? -1 : 1;
|
|
if (bkey_cmp(l.pos, r.pos))
|
|
return bkey_cmp(l.pos, r.pos);
|
|
if (l.level != r.level)
|
|
return l.level < r.level ? -1 : 1;
|
|
return 0;
|
|
}
|
|
|
|
static inline enum gc_phase btree_id_to_gc_phase(enum btree_id id)
|
|
{
|
|
switch (id) {
|
|
#define x(n, v, s) case BTREE_ID_##n: return GC_PHASE_BTREE_##n;
|
|
BCH_BTREE_IDS()
|
|
#undef x
|
|
default:
|
|
BUG();
|
|
}
|
|
}
|
|
|
|
static inline struct gc_pos gc_pos_btree(enum btree_id id,
|
|
struct bpos pos, unsigned level)
|
|
{
|
|
return (struct gc_pos) {
|
|
.phase = btree_id_to_gc_phase(id),
|
|
.pos = pos,
|
|
.level = level,
|
|
};
|
|
}
|
|
|
|
/*
|
|
* GC position of the pointers within a btree node: note, _not_ for &b->key
|
|
* itself, that lives in the parent node:
|
|
*/
|
|
static inline struct gc_pos gc_pos_btree_node(struct btree *b)
|
|
{
|
|
return gc_pos_btree(b->c.btree_id, b->key.k.p, b->c.level);
|
|
}
|
|
|
|
/*
|
|
* GC position of the pointer to a btree root: we don't use
|
|
* gc_pos_pointer_to_btree_node() here to avoid a potential race with
|
|
* btree_split() increasing the tree depth - the new root will have level > the
|
|
* old root and thus have a greater gc position than the old root, but that
|
|
* would be incorrect since once gc has marked the root it's not coming back.
|
|
*/
|
|
static inline struct gc_pos gc_pos_btree_root(enum btree_id id)
|
|
{
|
|
return gc_pos_btree(id, POS_MAX, BTREE_MAX_DEPTH);
|
|
}
|
|
|
|
static inline struct gc_pos gc_pos_alloc(struct bch_fs *c, struct open_bucket *ob)
|
|
{
|
|
return (struct gc_pos) {
|
|
.phase = GC_PHASE_ALLOC,
|
|
.pos = POS(ob ? ob - c->open_buckets : 0, 0),
|
|
};
|
|
}
|
|
|
|
static inline bool gc_visited(struct bch_fs *c, struct gc_pos pos)
|
|
{
|
|
unsigned seq;
|
|
bool ret;
|
|
|
|
do {
|
|
seq = read_seqcount_begin(&c->gc_pos_lock);
|
|
ret = gc_pos_cmp(pos, c->gc_pos) <= 0;
|
|
} while (read_seqcount_retry(&c->gc_pos_lock, seq));
|
|
|
|
return ret;
|
|
}
|
|
|
|
#endif /* _BCACHEFS_BTREE_GC_H */
|