bcachefs: Fixes for building in userspace

- Marking a non-static function as inline doesn't actually work and is
   now causing problems - drop that

 - Introduce BCACHEFS_LOG_PREFIX for when we want to prefix log messages
   with bcachefs (filesystem name)

 - Userspace doesn't have real percpu variables (maybe we can get this
   fixed someday), put an #ifdef around bch2_disk_reservation_add()
   fastpath

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2022-11-13 20:01:42 -05:00
parent a7ecd30c83
commit b2d1d56b1d
11 changed files with 25 additions and 18 deletions

View File

@ -226,6 +226,10 @@ do { \
dynamic_fault("bcachefs:meta:write:" name)
#ifdef __KERNEL__
#define BCACHEFS_LOG_PREFIX
#endif
#ifdef BCACHEFS_LOG_PREFIX
#define bch2_log_msg(_c, fmt) "bcachefs (%s): " fmt, ((_c)->name)
#define bch2_fmt(_c, fmt) bch2_log_msg(_c, fmt "\n")
#define bch2_fmt_inum(_c, _inum, fmt) "bcachefs (%s inum %llu): " fmt "\n", ((_c)->name), (_inum)

View File

@ -636,9 +636,9 @@ static inline void __btree_path_level_init(struct btree_path *path,
bch2_btree_node_iter_peek(&l->iter, l->b);
}
inline void bch2_btree_path_level_init(struct btree_trans *trans,
struct btree_path *path,
struct btree *b)
void bch2_btree_path_level_init(struct btree_trans *trans,
struct btree_path *path,
struct btree *b)
{
BUG_ON(path->cached);
@ -1554,7 +1554,7 @@ struct btree_path *bch2_path_get(struct btree_trans *trans,
return path;
}
inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *path, struct bkey *u)
{
struct btree_path_level *l = path_l(path);

View File

@ -177,13 +177,12 @@ int __must_check bch2_btree_path_traverse(struct btree_trans *,
struct btree_path *, unsigned);
struct btree_path *bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
unsigned, unsigned, unsigned);
inline struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *,
struct btree_iter *, struct bpos);
inline void bch2_btree_path_level_init(struct btree_trans *,
struct btree_path *, struct btree *);
void bch2_btree_path_level_init(struct btree_trans *, struct btree_path *, struct btree *);
#ifdef CONFIG_BCACHEFS_DEBUG
void bch2_trans_verify_paths(struct btree_trans *);

View File

@ -179,10 +179,9 @@ static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
}
if (unlikely(!best)) {
struct bch_fs *c = g->g->trans->c;
struct printbuf buf = PRINTBUF;
bch_err(c, "cycle of nofail locks");
prt_printf(&buf, bch2_fmt(g->g->trans->c, "cycle of nofail locks"));
for (i = g->g; i < g->g + g->nr; i++) {
struct btree_trans *trans = i->trans;

View File

@ -255,6 +255,7 @@ int __bch2_disk_reservation_add(struct bch_fs *,
static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reservation *res,
u64 sectors, int flags)
{
#ifdef __KERNEL__
u64 old, new;
do {
@ -268,6 +269,9 @@ static inline int bch2_disk_reservation_add(struct bch_fs *c, struct disk_reserv
this_cpu_add(*c->online_reserved, sectors);
res->sectors += sectors;
return 0;
#else
return __bch2_disk_reservation_add(c, res, sectors, flags);
#endif
}
static inline struct disk_reservation

View File

@ -125,8 +125,10 @@ int bch2_fsck_err(struct bch_fs *c, unsigned flags, const char *fmt, ...)
s->nr++;
}
#ifdef BCACHEFS_LOG_PREFIX
if (!strncmp(fmt, "bcachefs:", 9))
prt_printf(out, bch2_log_msg(c, ""));
#endif
va_start(args, fmt);
prt_vprintf(out, fmt, args);

View File

@ -177,7 +177,7 @@ void bch2_moving_ctxt_init(struct moving_context *ctxt,
}
}
void bch_move_stats_init(struct bch_move_stats *stats, char *name)
void bch2_move_stats_init(struct bch_move_stats *stats, char *name)
{
memset(stats, 0, sizeof(*stats));
scnprintf(stats->name, sizeof(stats->name), "%s", name);
@ -755,7 +755,7 @@ int bch2_data_job(struct bch_fs *c,
switch (op.op) {
case BCH_DATA_OP_REREPLICATE:
bch_move_stats_init(stats, "rereplicate");
bch2_move_stats_init(stats, "rereplicate");
stats->data_type = BCH_DATA_journal;
ret = bch2_journal_flush_device_pins(&c->journal, -1);
@ -779,7 +779,7 @@ int bch2_data_job(struct bch_fs *c,
if (op.migrate.dev >= c->sb.nr_devices)
return -EINVAL;
bch_move_stats_init(stats, "migrate");
bch2_move_stats_init(stats, "migrate");
stats->data_type = BCH_DATA_journal;
ret = bch2_journal_flush_device_pins(&c->journal, op.migrate.dev);
@ -800,7 +800,7 @@ int bch2_data_job(struct bch_fs *c,
ret = bch2_replicas_gc2(c) ?: ret;
break;
case BCH_DATA_OP_REWRITE_OLD_NODES:
bch_move_stats_init(stats, "rewrite_old_nodes");
bch2_move_stats_init(stats, "rewrite_old_nodes");
ret = bch2_scan_old_btree_nodes(c, stats);
break;
default:

View File

@ -50,8 +50,7 @@ int bch2_data_job(struct bch_fs *,
struct bch_move_stats *,
struct bch_ioctl_data);
inline void bch_move_stats_init(struct bch_move_stats *stats,
char *name);
void bch2_move_stats_init(struct bch_move_stats *stats, char *name);
#endif /* _BCACHEFS_MOVE_H */

View File

@ -213,7 +213,7 @@ static int bch2_copygc(struct bch_fs *c)
size_t heap_size = 0;
int ret;
bch_move_stats_init(&move_stats, "copygc");
bch2_move_stats_init(&move_stats, "copygc");
/*
* Find buckets with lowest sector counts, skipping completely

View File

@ -189,7 +189,7 @@ static int bch2_rebalance_thread(void *arg)
prev_start = jiffies;
prev_cputime = curr_cputime();
bch_move_stats_init(&move_stats, "rebalance");
bch2_move_stats_init(&move_stats, "rebalance");
while (!kthread_wait_freezable(r->enabled)) {
cond_resched();

View File

@ -1376,7 +1376,7 @@ use_clean:
le16_to_cpu(c->sb.version_min) < bcachefs_metadata_version_btree_ptr_sectors_written) {
struct bch_move_stats stats;
bch_move_stats_init(&stats, "recovery");
bch2_move_stats_init(&stats, "recovery");
bch_info(c, "scanning for old btree nodes");
ret = bch2_fs_read_write(c);