bcachefs: Improved errcodes

Instead of overloading standard error codes (EINTR/EAGAIN), and defining
short lists of error codes in multiple places that potentially end up
overlapping & conflicting, we're now going to have one master list of
error codes.

Error codes are defined with an x-macro: thus we also have
bch2_err_str() now.

Also, error codes have a class field. Now, instead of checking for
errors with ==, code should use bch2_err_matches(), which returns true
if the error is equal to or a sub-error of the error class.

This means we can define unique errors for every source location where
an error is generated, which will help improve our error messages.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>
This commit is contained in:
Kent Overstreet 2022-07-17 22:31:21 -04:00 committed by Kent Overstreet
parent 3ab25c1b4e
commit 615f867c14
8 changed files with 120 additions and 44 deletions

View File

@ -21,6 +21,7 @@ config BCACHEFS_FS
select XOR_BLOCKS
select XXHASH
select SRCU
select SYMBOLIC_ERRNAME
help
The bcachefs filesystem - a modern, copy on write filesystem, with
support for multiple devices, compression, checksumming, etc.

View File

@ -27,6 +27,7 @@ bcachefs-y := \
disk_groups.o \
data_update.o \
ec.o \
errcode.o \
error.o \
extents.o \
extent_update.o \

View File

@ -1051,7 +1051,8 @@ static void bch2_do_discards_work(struct work_struct *work)
percpu_ref_put(&c->writes);
trace_discard_buckets(c, seen, open, need_journal_commit, discarded, ret);
trace_discard_buckets(c, seen, open, need_journal_commit, discarded,
bch2_err_str(ret));
}
void bch2_do_discards(struct bch_fs *c)

View File

@ -238,7 +238,7 @@ static struct open_bucket *__try_alloc_bucket(struct bch_fs *c, struct bch_dev *
c->blocked_allocate_open_bucket = local_clock();
spin_unlock(&c->freelist_lock);
return ERR_PTR(-OPEN_BUCKETS_EMPTY);
return ERR_PTR(-BCH_ERR_open_buckets_empty);
}
/* Recheck under lock: */
@ -440,7 +440,7 @@ again:
goto again;
}
return ob ?: ERR_PTR(ret ?: -FREELIST_EMPTY);
return ob ?: ERR_PTR(ret ?: -BCH_ERR_no_buckets_found);
}
static struct open_bucket *bch2_bucket_alloc_freelist(struct btree_trans *trans,
@ -548,7 +548,7 @@ again:
if (!c->blocked_allocate)
c->blocked_allocate = local_clock();
ob = ERR_PTR(-FREELIST_EMPTY);
ob = ERR_PTR(-BCH_ERR_freelist_empty);
goto err;
}
@ -579,7 +579,7 @@ again:
bch2_journal_flush_async(&c->journal, NULL);
err:
if (!ob)
ob = ERR_PTR(-FREELIST_EMPTY);
ob = ERR_PTR(-BCH_ERR_no_buckets_found);
if (!IS_ERR(ob)) {
trace_bucket_alloc(ca, bch2_alloc_reserves[reserve],
@ -591,7 +591,8 @@ err:
skipped_open,
skipped_need_journal_commit,
skipped_nouse,
cl == NULL, PTR_ERR_OR_ZERO(ob));
cl == NULL,
"");
} else {
trace_bucket_alloc_fail(ca, bch2_alloc_reserves[reserve],
usage.d[BCH_DATA_free].buckets,
@ -602,7 +603,8 @@ err:
skipped_open,
skipped_need_journal_commit,
skipped_nouse,
cl == NULL, PTR_ERR_OR_ZERO(ob));
cl == NULL,
bch2_err_str(PTR_ERR(ob)));
atomic_long_inc(&c->bucket_alloc_fail);
}
@ -750,7 +752,7 @@ static int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
if (*nr_effective >= nr_replicas)
ret = 0;
else if (!ret)
ret = -INSUFFICIENT_DEVICES;
ret = -BCH_ERR_insufficient_devices;
return ret;
}
@ -923,8 +925,8 @@ static int open_bucket_add_buckets(struct btree_trans *trans,
nr_replicas, nr_effective,
have_cache, flags, _cl);
if (ret == -EINTR ||
ret == -FREELIST_EMPTY ||
ret == -OPEN_BUCKETS_EMPTY)
bch2_err_matches(ret, BCH_ERR_freelist_empty) ||
bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
return ret;
if (*nr_effective >= nr_replicas)
return 0;
@ -947,7 +949,7 @@ retry_blocking:
reserve, flags, cl);
if (ret &&
ret != -EINTR &&
ret != -INSUFFICIENT_DEVICES &&
!bch2_err_matches(ret, BCH_ERR_insufficient_devices) &&
!cl && _cl) {
cl = _cl;
goto retry_blocking;
@ -1203,7 +1205,7 @@ alloc_done:
if (erasure_code && !ec_open_bucket(c, &ptrs))
pr_debug("failed to get ec bucket: ret %u", ret);
if (ret == -INSUFFICIENT_DEVICES &&
if (ret == -BCH_ERR_insufficient_devices &&
nr_effective >= nr_replicas_required)
ret = 0;
@ -1234,19 +1236,18 @@ err:
mutex_unlock(&wp->lock);
if (ret == -FREELIST_EMPTY &&
if (bch2_err_matches(ret, BCH_ERR_freelist_empty) &&
try_decrease_writepoints(c, write_points_nr))
goto retry;
switch (ret) {
case -OPEN_BUCKETS_EMPTY:
case -FREELIST_EMPTY:
if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) ||
bch2_err_matches(ret, BCH_ERR_freelist_empty))
return cl ? -EAGAIN : -ENOSPC;
case -INSUFFICIENT_DEVICES:
if (bch2_err_matches(ret, BCH_ERR_insufficient_devices))
return -EROFS;
default:
return ret;
}
return ret;
}
int bch2_alloc_sectors_start(struct bch_fs *c,

51
fs/bcachefs/errcode.c Normal file
View File

@ -0,0 +1,51 @@
// SPDX-License-Identifier: GPL-2.0
#include "bcachefs.h"
#include "errcode.h"
#include <linux/errname.h>
static const char * const bch2_errcode_strs[] = {
#define x(class, err) [BCH_ERR_##err - BCH_ERR_START] = #err,
BCH_ERRCODES()
#undef x
NULL
};
#define BCH_ERR_0 0
static unsigned bch2_errcode_parents[] = {
#define x(class, err) [BCH_ERR_##err - BCH_ERR_START] = BCH_ERR_##class,
BCH_ERRCODES()
#undef x
};
const char *bch2_err_str(int err)
{
const char *errstr;
err = abs(err);
BUG_ON(err >= BCH_ERR_MAX);
if (err >= BCH_ERR_START)
errstr = bch2_errcode_strs[err - BCH_ERR_START];
else if (err)
errstr = errname(err);
else
errstr = "(No error)";
return errstr ?: "(Invalid error)";
}
bool __bch2_err_matches(int err, int class)
{
err = abs(err);
class = abs(class);
BUG_ON(err >= BCH_ERR_MAX);
BUG_ON(class >= BCH_ERR_MAX);
while (err >= BCH_ERR_START && err != class)
err = bch2_errcode_parents[err - BCH_ERR_START];
return err == class;
}

View File

@ -2,12 +2,33 @@
#ifndef _BCACHEFS_ERRCODE_H
#define _BCACHEFS_ERRCODE_H
enum {
/* Bucket allocator: */
OPEN_BUCKETS_EMPTY = 2048,
FREELIST_EMPTY, /* Allocator thread not keeping up */
INSUFFICIENT_DEVICES,
NEED_SNAPSHOT_CLEANUP,
#define BCH_ERRCODES() \
x(0, open_buckets_empty) \
x(0, freelist_empty) \
x(freelist_empty, no_buckets_found) \
x(0, insufficient_devices) \
x(0, need_snapshot_cleanup)
enum bch_errcode {
BCH_ERR_START = 2048,
#define x(class, err) BCH_ERR_##err,
BCH_ERRCODES()
#undef x
BCH_ERR_MAX
};
const char *bch2_err_str(int);
bool __bch2_err_matches(int, int);
static inline bool _bch2_err_matches(int err, int class)
{
return err && __bch2_err_matches(err, class);
}
#define bch2_err_matches(_err, _class) \
({ \
BUILD_BUG_ON(!__builtin_constant_p(_class)); \
_bch2_err_matches(_err, _class); \
})
#endif /* _BCACHFES_ERRCODE_H */

View File

@ -534,7 +534,7 @@ static int snapshots_seen_update(struct bch_fs *c, struct snapshots_seen *s,
bch2_btree_ids[btree_id],
pos.inode, pos.offset,
i->id, n.id, n.equiv);
return -NEED_SNAPSHOT_CLEANUP;
return -BCH_ERR_need_snapshot_cleanup;
}
return 0;
@ -2371,7 +2371,7 @@ again:
check_nlinks(c) ?:
fix_reflink_p(c);
if (ret == -NEED_SNAPSHOT_CLEANUP) {
if (bch2_err_matches(ret, BCH_ERR_need_snapshot_cleanup)) {
set_bit(BCH_FS_HAVE_DELETED_SNAPSHOTS, &c->flags);
goto again;
}

View File

@ -449,9 +449,9 @@ DECLARE_EVENT_CLASS(bucket_alloc,
u64 need_journal_commit,
u64 nouse,
bool nonblocking,
int ret),
const char *err),
TP_ARGS(ca, alloc_reserve, free, avail, copygc_wait_amount, copygc_waiting_for,
seen, open, need_journal_commit, nouse, nonblocking, ret),
seen, open, need_journal_commit, nouse, nonblocking, err),
TP_STRUCT__entry(
__field(dev_t, dev )
@ -465,7 +465,7 @@ DECLARE_EVENT_CLASS(bucket_alloc,
__field(u64, need_journal_commit )
__field(u64, nouse )
__field(bool, nonblocking )
__field(int, ret )
__array(char, err, 16 )
),
TP_fast_assign(
@ -480,10 +480,10 @@ DECLARE_EVENT_CLASS(bucket_alloc,
__entry->need_journal_commit = need_journal_commit;
__entry->nouse = nouse;
__entry->nonblocking = nonblocking;
__entry->ret = ret;
strlcpy(__entry->err, err, sizeof(__entry->err));
),
TP_printk("%d,%d reserve %s free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nonblocking %u ret %i",
TP_printk("%d,%d reserve %s free %llu avail %llu copygc_wait %llu/%lli seen %llu open %llu need_journal_commit %llu nouse %llu nonblocking %u err %s",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->reserve,
__entry->free,
@ -495,7 +495,7 @@ DECLARE_EVENT_CLASS(bucket_alloc,
__entry->need_journal_commit,
__entry->nouse,
__entry->nonblocking,
__entry->ret)
__entry->err)
);
DEFINE_EVENT(bucket_alloc, bucket_alloc,
@ -509,9 +509,9 @@ DEFINE_EVENT(bucket_alloc, bucket_alloc,
u64 need_journal_commit,
u64 nouse,
bool nonblocking,
int ret),
const char *err),
TP_ARGS(ca, alloc_reserve, free, avail, copygc_wait_amount, copygc_waiting_for,
seen, open, need_journal_commit, nouse, nonblocking, ret)
seen, open, need_journal_commit, nouse, nonblocking, err)
);
DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
@ -525,15 +525,15 @@ DEFINE_EVENT(bucket_alloc, bucket_alloc_fail,
u64 need_journal_commit,
u64 nouse,
bool nonblocking,
int ret),
const char *err),
TP_ARGS(ca, alloc_reserve, free, avail, copygc_wait_amount, copygc_waiting_for,
seen, open, need_journal_commit, nouse, nonblocking, ret)
seen, open, need_journal_commit, nouse, nonblocking, err)
);
TRACE_EVENT(discard_buckets,
TP_PROTO(struct bch_fs *c, u64 seen, u64 open,
u64 need_journal_commit, u64 discarded, int ret),
TP_ARGS(c, seen, open, need_journal_commit, discarded, ret),
u64 need_journal_commit, u64 discarded, const char *err),
TP_ARGS(c, seen, open, need_journal_commit, discarded, err),
TP_STRUCT__entry(
__field(dev_t, dev )
@ -541,7 +541,7 @@ TRACE_EVENT(discard_buckets,
__field(u64, open )
__field(u64, need_journal_commit )
__field(u64, discarded )
__field(int, ret )
__array(char, err, 16 )
),
TP_fast_assign(
@ -550,16 +550,16 @@ TRACE_EVENT(discard_buckets,
__entry->open = open;
__entry->need_journal_commit = need_journal_commit;
__entry->discarded = discarded;
__entry->ret = ret;
strlcpy(__entry->err, err, sizeof(__entry->err));
),
TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu ret %i",
TP_printk("%d%d seen %llu open %llu need_journal_commit %llu discarded %llu err %s",
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->seen,
__entry->open,
__entry->need_journal_commit,
__entry->discarded,
__entry->ret)
__entry->err)
);
TRACE_EVENT(invalidate_bucket,