mirror of
https://github.com/torvalds/linux.git
synced 2024-11-18 01:51:53 +00:00
bcache: Add bch_keylist_init_single()
This will potentially save us an allocation when we've got inode/dirent bkeys that don't fit in the keylist's inline keys. Signed-off-by: Kent Overstreet <kmo@daterainc.com>
This commit is contained in:
parent
1575402052
commit
c13f3af924
@ -478,6 +478,12 @@ static inline void bch_keylist_init(struct keylist *l)
|
||||
l->top_p = l->keys_p = l->inline_keys;
|
||||
}
|
||||
|
||||
static inline void bch_keylist_init_single(struct keylist *l, struct bkey *k)
|
||||
{
|
||||
l->keys = k;
|
||||
l->top = bkey_next(k);
|
||||
}
|
||||
|
||||
static inline void bch_keylist_push(struct keylist *l)
|
||||
{
|
||||
l->top = bkey_next(l->top);
|
||||
|
@ -313,8 +313,6 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
|
||||
uint64_t start = i->j.last_seq, end = i->j.seq, n = start;
|
||||
struct keylist keylist;
|
||||
|
||||
bch_keylist_init(&keylist);
|
||||
|
||||
list_for_each_entry(i, list, list) {
|
||||
BUG_ON(i->pin && atomic_read(i->pin) != 1);
|
||||
|
||||
@ -327,8 +325,7 @@ int bch_journal_replay(struct cache_set *s, struct list_head *list)
|
||||
k = bkey_next(k)) {
|
||||
trace_bcache_journal_replay_key(k);
|
||||
|
||||
bkey_copy(keylist.top, k);
|
||||
bch_keylist_push(&keylist);
|
||||
bch_keylist_init_single(&keylist, k);
|
||||
|
||||
ret = bch_btree_insert(s, &keylist, i->pin, NULL);
|
||||
if (ret)
|
||||
|
Loading…
Reference in New Issue
Block a user