mirror of
https://github.com/Dasharo/linux.git
synced 2026-03-06 15:25:10 -08:00
Merge tag 'bcachefs-2024-09-21' of git://evilpiepirate.org/bcachefs
Pull bcachefs updates from Kent Overstreet: - rcu_pending, btree key cache rework: this solves lock contenting in the key cache, eliminating the biggest source of the srcu lock hold time warnings, and drastically improving performance on some metadata heavy workloads - on multithreaded creates we're now 3-4x faster than xfs. - We're now using an rhashtable instead of the system inode hash table; this is another significant performance improvement on multithreaded metadata workloads, eliminating more lock contention. - for_each_btree_key_in_subvolume_upto(): new helper for iterating over keys within a specific subvolume, eliminating a lot of open coded "subvolume_get_snapshot()" and also fixing another source of srcu lock time warnings, by running each loop iteration in its own transaction (as the existing for_each_btree_key() does). - More work on btree_trans locking asserts; we now assert that we don't hold btree node locks when trans->locked is false, which is important because we don't use lockdep for tracking individual btree node locks. - Some cleanups and improvements in the bset.c btree node lookup code, from Alan. - Rework of btree node pinning, which we use in backpointers fsck. The old hacky implementation, where the shrinker just skipped over nodes in the pinned range, was causing OOMs; instead we now use another shrinker with a much higher seeks number for pinned nodes. - Rebalance now uses BCH_WRITE_ONLY_SPECIFIED_DEVS; this fixes an issue where rebalance would sometimes fall back to allocating from the full filesystem, which is not what we want when it's trying to move data to a specific target. - Use __GFP_ACCOUNT, GFP_RECLAIMABLE for btree node, key cache allocations. - Idmap mounts are now supported (Hongbo Li) - Rename whiteouts are now supported (Hongbo Li) - Erasure coding can now handle devices being marked as failed, or forcibly removed. We still need the evacuate path for erasure coding, but it's getting very close to ready for people to start using. * tag 'bcachefs-2024-09-21' of git://evilpiepirate.org/bcachefs: (99 commits) bcachefs: return err ptr instead of null in read sb clean bcachefs: Remove duplicated include in backpointers.c bcachefs: Don't drop devices with stripe pointers bcachefs: bch2_ec_stripe_head_get() now checks for change in rw devices bcachefs: bch_fs.rw_devs_change_count bcachefs: bch2_dev_remove_stripes() bcachefs: bch2_trigger_ptr() calculates sectors even when no device bcachefs: improve error messages in bch2_ec_read_extent() bcachefs: improve error message on too few devices for ec bcachefs: improve bch2_new_stripe_to_text() bcachefs: ec_stripe_head.nr_created bcachefs: bch_stripe.disk_label bcachefs: stripe_to_mem() bcachefs: EIO errcode cleanup bcachefs: Rework btree node pinning bcachefs: split up btree cache counters for live, freeable bcachefs: btree cache counters should be size_t bcachefs: Don't count "skipped access bit" as touched in btree cache scan bcachefs: Failed devices no longer require mounting in degraded mode bcachefs: bch2_dev_rcu_noerror() ...
This commit is contained in:
@@ -175,7 +175,7 @@ errors in our thinking by running our code and seeing what happens. If your
|
||||
time is being wasted because your tools are bad or too slow - don't accept it,
|
||||
fix it.
|
||||
|
||||
Put effort into your documentation, commmit messages, and code comments - but
|
||||
Put effort into your documentation, commit messages, and code comments - but
|
||||
don't go overboard. A good commit message is wonderful - but if the information
|
||||
was important enough to go in a commit message, ask yourself if it would be
|
||||
even better as a code comment.
|
||||
|
||||
@@ -87,6 +87,13 @@ config BCACHEFS_SIX_OPTIMISTIC_SPIN
|
||||
is held by another thread, spin for a short while, as long as the
|
||||
thread owning the lock is running.
|
||||
|
||||
config BCACHEFS_PATH_TRACEPOINTS
|
||||
bool "Extra btree_path tracepoints"
|
||||
depends on BCACHEFS_FS
|
||||
help
|
||||
Enable extra tracepoints for debugging btree_path operations; we don't
|
||||
normally want these enabled because they happen at very high rates.
|
||||
|
||||
config MEAN_AND_VARIANCE_UNIT_TEST
|
||||
tristate "mean_and_variance unit tests" if !KUNIT_ALL_TESTS
|
||||
depends on KUNIT
|
||||
|
||||
@@ -69,6 +69,7 @@ bcachefs-y := \
|
||||
printbuf.o \
|
||||
quota.o \
|
||||
rebalance.o \
|
||||
rcu_pending.o \
|
||||
recovery.o \
|
||||
recovery_passes.o \
|
||||
reflink.o \
|
||||
|
||||
@@ -361,7 +361,7 @@ retry:
|
||||
bch2_trans_begin(trans);
|
||||
acl = _acl;
|
||||
|
||||
ret = bch2_subvol_is_ro_trans(trans, inode->ei_subvol) ?:
|
||||
ret = bch2_subvol_is_ro_trans(trans, inode->ei_inum.subvol) ?:
|
||||
bch2_inode_peek(trans, &inode_iter, &inode_u, inode_inum(inode),
|
||||
BTREE_ITER_intent);
|
||||
if (ret)
|
||||
|
||||
@@ -30,6 +30,7 @@
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/sched/task.h>
|
||||
#include <linux/sort.h>
|
||||
#include <linux/jiffies.h>
|
||||
|
||||
static void bch2_discard_one_bucket_fast(struct bch_dev *, u64);
|
||||
|
||||
@@ -2183,7 +2184,7 @@ int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
|
||||
* freespace/need_discard/need_gc_gens btrees as needed:
|
||||
*/
|
||||
while (1) {
|
||||
if (last_updated + HZ * 10 < jiffies) {
|
||||
if (time_after(jiffies, last_updated + HZ * 10)) {
|
||||
bch_info(ca, "%s: currently at %llu/%llu",
|
||||
__func__, iter.pos.offset, ca->mi.nbuckets);
|
||||
last_updated = jiffies;
|
||||
@@ -2297,6 +2298,36 @@ int bch2_fs_freespace_init(struct bch_fs *c)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* device removal */
|
||||
|
||||
int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
|
||||
{
|
||||
struct bpos start = POS(ca->dev_idx, 0);
|
||||
struct bpos end = POS(ca->dev_idx, U64_MAX);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* We clear the LRU and need_discard btrees first so that we don't race
|
||||
* with bch2_do_invalidates() and bch2_do_discards()
|
||||
*/
|
||||
ret = bch2_dev_remove_stripes(c, ca->dev_idx) ?:
|
||||
bch2_btree_delete_range(c, BTREE_ID_lru, start, end,
|
||||
BTREE_TRIGGER_norun, NULL) ?:
|
||||
bch2_btree_delete_range(c, BTREE_ID_need_discard, start, end,
|
||||
BTREE_TRIGGER_norun, NULL) ?:
|
||||
bch2_btree_delete_range(c, BTREE_ID_freespace, start, end,
|
||||
BTREE_TRIGGER_norun, NULL) ?:
|
||||
bch2_btree_delete_range(c, BTREE_ID_backpointers, start, end,
|
||||
BTREE_TRIGGER_norun, NULL) ?:
|
||||
bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end,
|
||||
BTREE_TRIGGER_norun, NULL) ?:
|
||||
bch2_btree_delete_range(c, BTREE_ID_alloc, start, end,
|
||||
BTREE_TRIGGER_norun, NULL) ?:
|
||||
bch2_dev_usage_remove(c, ca->dev_idx);
|
||||
bch_err_msg(ca, ret, "removing dev alloc info");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Bucket IO clocks: */
|
||||
|
||||
int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
|
||||
@@ -2432,13 +2463,15 @@ static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
|
||||
/* device goes ro: */
|
||||
void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
|
||||
{
|
||||
unsigned i;
|
||||
lockdep_assert_held(&c->state_lock);
|
||||
|
||||
/* First, remove device from allocation groups: */
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
|
||||
clear_bit(ca->dev_idx, c->rw_devs[i].d);
|
||||
|
||||
c->rw_devs_change_count++;
|
||||
|
||||
/*
|
||||
* Capacity is calculated based off of devices in allocation groups:
|
||||
*/
|
||||
@@ -2467,11 +2500,13 @@ void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
|
||||
/* device goes rw: */
|
||||
void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
|
||||
{
|
||||
unsigned i;
|
||||
lockdep_assert_held(&c->state_lock);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
|
||||
if (ca->mi.data_allowed & (1 << i))
|
||||
set_bit(ca->dev_idx, c->rw_devs[i].d);
|
||||
|
||||
c->rw_devs_change_count++;
|
||||
}
|
||||
|
||||
void bch2_dev_allocator_background_exit(struct bch_dev *ca)
|
||||
|
||||
@@ -16,7 +16,7 @@ enum bch_validate_flags;
|
||||
static inline bool bch2_dev_bucket_exists(struct bch_fs *c, struct bpos pos)
|
||||
{
|
||||
rcu_read_lock();
|
||||
struct bch_dev *ca = bch2_dev_rcu(c, pos.inode);
|
||||
struct bch_dev *ca = bch2_dev_rcu_noerror(c, pos.inode);
|
||||
bool ret = ca && bucket_valid(ca, pos.offset);
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
@@ -338,6 +338,7 @@ static inline const struct bch_backpointer *alloc_v4_backpointers_c(const struct
|
||||
|
||||
int bch2_dev_freespace_init(struct bch_fs *, struct bch_dev *, u64, u64);
|
||||
int bch2_fs_freespace_init(struct bch_fs *);
|
||||
int bch2_dev_remove_alloc(struct bch_fs *, struct bch_dev *);
|
||||
|
||||
void bch2_recalc_capacity(struct bch_fs *);
|
||||
u64 bch2_min_rw_member_capacity(struct bch_fs *);
|
||||
|
||||
@@ -600,6 +600,7 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
|
||||
enum bch_watermark watermark,
|
||||
enum bch_data_type data_type,
|
||||
struct closure *cl,
|
||||
bool nowait,
|
||||
struct bch_dev_usage *usage)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
@@ -609,7 +610,7 @@ static struct open_bucket *bch2_bucket_alloc_trans(struct btree_trans *trans,
|
||||
struct bucket_alloc_state s = {
|
||||
.btree_bitmap = data_type == BCH_DATA_btree,
|
||||
};
|
||||
bool waiting = false;
|
||||
bool waiting = nowait;
|
||||
again:
|
||||
bch2_dev_usage_read_fast(ca, usage);
|
||||
avail = dev_buckets_free(ca, *usage, watermark);
|
||||
@@ -685,7 +686,7 @@ struct open_bucket *bch2_bucket_alloc(struct bch_fs *c, struct bch_dev *ca,
|
||||
|
||||
bch2_trans_do(c, NULL, NULL, 0,
|
||||
PTR_ERR_OR_ZERO(ob = bch2_bucket_alloc_trans(trans, ca, watermark,
|
||||
data_type, cl, &usage)));
|
||||
data_type, cl, false, &usage)));
|
||||
return ob;
|
||||
}
|
||||
|
||||
@@ -748,7 +749,6 @@ static int add_new_bucket(struct bch_fs *c,
|
||||
unsigned nr_replicas,
|
||||
unsigned *nr_effective,
|
||||
bool *have_cache,
|
||||
unsigned flags,
|
||||
struct open_bucket *ob)
|
||||
{
|
||||
unsigned durability = ob_dev(c, ob)->mi.durability;
|
||||
@@ -775,7 +775,7 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
|
||||
unsigned nr_replicas,
|
||||
unsigned *nr_effective,
|
||||
bool *have_cache,
|
||||
unsigned flags,
|
||||
enum bch_write_flags flags,
|
||||
enum bch_data_type data_type,
|
||||
enum bch_watermark watermark,
|
||||
struct closure *cl)
|
||||
@@ -801,7 +801,8 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
|
||||
continue;
|
||||
}
|
||||
|
||||
ob = bch2_bucket_alloc_trans(trans, ca, watermark, data_type, cl, &usage);
|
||||
ob = bch2_bucket_alloc_trans(trans, ca, watermark, data_type,
|
||||
cl, flags & BCH_WRITE_ALLOC_NOWAIT, &usage);
|
||||
if (!IS_ERR(ob))
|
||||
bch2_dev_stripe_increment_inlined(ca, stripe, &usage);
|
||||
bch2_dev_put(ca);
|
||||
@@ -815,7 +816,7 @@ int bch2_bucket_alloc_set_trans(struct btree_trans *trans,
|
||||
|
||||
if (add_new_bucket(c, ptrs, devs_may_alloc,
|
||||
nr_replicas, nr_effective,
|
||||
have_cache, flags, ob)) {
|
||||
have_cache, ob)) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
@@ -841,7 +842,7 @@ static int bucket_alloc_from_stripe(struct btree_trans *trans,
|
||||
unsigned *nr_effective,
|
||||
bool *have_cache,
|
||||
enum bch_watermark watermark,
|
||||
unsigned flags,
|
||||
enum bch_write_flags flags,
|
||||
struct closure *cl)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
@@ -883,7 +884,7 @@ got_bucket:
|
||||
|
||||
ret = add_new_bucket(c, ptrs, devs_may_alloc,
|
||||
nr_replicas, nr_effective,
|
||||
have_cache, flags, ob);
|
||||
have_cache, ob);
|
||||
out_put_head:
|
||||
bch2_ec_stripe_head_put(c, h);
|
||||
return ret;
|
||||
@@ -922,7 +923,7 @@ static int bucket_alloc_set_writepoint(struct bch_fs *c,
|
||||
unsigned nr_replicas,
|
||||
unsigned *nr_effective,
|
||||
bool *have_cache,
|
||||
bool ec, unsigned flags)
|
||||
bool ec)
|
||||
{
|
||||
struct open_buckets ptrs_skip = { .nr = 0 };
|
||||
struct open_bucket *ob;
|
||||
@@ -934,7 +935,7 @@ static int bucket_alloc_set_writepoint(struct bch_fs *c,
|
||||
have_cache, ec, ob))
|
||||
ret = add_new_bucket(c, ptrs, devs_may_alloc,
|
||||
nr_replicas, nr_effective,
|
||||
have_cache, flags, ob);
|
||||
have_cache, ob);
|
||||
else
|
||||
ob_push(c, &ptrs_skip, ob);
|
||||
}
|
||||
@@ -950,8 +951,7 @@ static int bucket_alloc_set_partial(struct bch_fs *c,
|
||||
unsigned nr_replicas,
|
||||
unsigned *nr_effective,
|
||||
bool *have_cache, bool ec,
|
||||
enum bch_watermark watermark,
|
||||
unsigned flags)
|
||||
enum bch_watermark watermark)
|
||||
{
|
||||
int i, ret = 0;
|
||||
|
||||
@@ -983,7 +983,7 @@ static int bucket_alloc_set_partial(struct bch_fs *c,
|
||||
|
||||
ret = add_new_bucket(c, ptrs, devs_may_alloc,
|
||||
nr_replicas, nr_effective,
|
||||
have_cache, flags, ob);
|
||||
have_cache, ob);
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
@@ -1003,7 +1003,7 @@ static int __open_bucket_add_buckets(struct btree_trans *trans,
|
||||
unsigned *nr_effective,
|
||||
bool *have_cache,
|
||||
enum bch_watermark watermark,
|
||||
unsigned flags,
|
||||
enum bch_write_flags flags,
|
||||
struct closure *_cl)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
@@ -1022,18 +1022,15 @@ static int __open_bucket_add_buckets(struct btree_trans *trans,
|
||||
open_bucket_for_each(c, ptrs, ob, i)
|
||||
__clear_bit(ob->dev, devs.d);
|
||||
|
||||
if (erasure_code && ec_open_bucket(c, ptrs))
|
||||
return 0;
|
||||
|
||||
ret = bucket_alloc_set_writepoint(c, ptrs, wp, &devs,
|
||||
nr_replicas, nr_effective,
|
||||
have_cache, erasure_code, flags);
|
||||
have_cache, erasure_code);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = bucket_alloc_set_partial(c, ptrs, wp, &devs,
|
||||
nr_replicas, nr_effective,
|
||||
have_cache, erasure_code, watermark, flags);
|
||||
have_cache, erasure_code, watermark);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -1074,12 +1071,12 @@ static int open_bucket_add_buckets(struct btree_trans *trans,
|
||||
unsigned *nr_effective,
|
||||
bool *have_cache,
|
||||
enum bch_watermark watermark,
|
||||
unsigned flags,
|
||||
enum bch_write_flags flags,
|
||||
struct closure *cl)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (erasure_code) {
|
||||
if (erasure_code && !ec_open_bucket(trans->c, ptrs)) {
|
||||
ret = __open_bucket_add_buckets(trans, ptrs, wp,
|
||||
devs_have, target, erasure_code,
|
||||
nr_replicas, nr_effective, have_cache,
|
||||
@@ -1376,7 +1373,7 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
|
||||
unsigned nr_replicas,
|
||||
unsigned nr_replicas_required,
|
||||
enum bch_watermark watermark,
|
||||
unsigned flags,
|
||||
enum bch_write_flags flags,
|
||||
struct closure *cl,
|
||||
struct write_point **wp_ret)
|
||||
{
|
||||
@@ -1392,8 +1389,6 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *trans,
|
||||
if (!IS_ENABLED(CONFIG_BCACHEFS_ERASURE_CODING))
|
||||
erasure_code = false;
|
||||
|
||||
BUG_ON(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS);
|
||||
|
||||
BUG_ON(!nr_replicas || !nr_replicas_required);
|
||||
retry:
|
||||
ptrs.nr = 0;
|
||||
@@ -1498,11 +1493,12 @@ err:
|
||||
try_decrease_writepoints(trans, write_points_nr))
|
||||
goto retry;
|
||||
|
||||
if (bch2_err_matches(ret, BCH_ERR_open_buckets_empty) ||
|
||||
if (cl && bch2_err_matches(ret, BCH_ERR_open_buckets_empty))
|
||||
ret = -BCH_ERR_bucket_alloc_blocked;
|
||||
|
||||
if (cl && !(flags & BCH_WRITE_ALLOC_NOWAIT) &&
|
||||
bch2_err_matches(ret, BCH_ERR_freelist_empty))
|
||||
return cl
|
||||
? -BCH_ERR_bucket_alloc_blocked
|
||||
: -BCH_ERR_ENOSPC_bucket_alloc;
|
||||
ret = -BCH_ERR_bucket_alloc_blocked;
|
||||
|
||||
return ret;
|
||||
}
|
||||
@@ -1733,13 +1729,6 @@ void bch2_dev_alloc_debug_to_text(struct printbuf *out, struct bch_dev *ca)
|
||||
for (unsigned i = 0; i < ARRAY_SIZE(c->open_buckets); i++)
|
||||
nr[c->open_buckets[i].data_type]++;
|
||||
|
||||
printbuf_tabstops_reset(out);
|
||||
printbuf_tabstop_push(out, 12);
|
||||
printbuf_tabstop_push(out, 16);
|
||||
printbuf_tabstop_push(out, 16);
|
||||
printbuf_tabstop_push(out, 16);
|
||||
printbuf_tabstop_push(out, 16);
|
||||
|
||||
bch2_dev_usage_to_text(out, ca, &stats);
|
||||
|
||||
prt_newline(out);
|
||||
|
||||
@@ -155,9 +155,10 @@ static inline bool bch2_bucket_is_open_safe(struct bch_fs *c, unsigned dev, u64
|
||||
return ret;
|
||||
}
|
||||
|
||||
enum bch_write_flags;
|
||||
int bch2_bucket_alloc_set_trans(struct btree_trans *, struct open_buckets *,
|
||||
struct dev_stripe_state *, struct bch_devs_mask *,
|
||||
unsigned, unsigned *, bool *, unsigned,
|
||||
unsigned, unsigned *, bool *, enum bch_write_flags,
|
||||
enum bch_data_type, enum bch_watermark,
|
||||
struct closure *);
|
||||
|
||||
@@ -167,7 +168,7 @@ int bch2_alloc_sectors_start_trans(struct btree_trans *,
|
||||
struct bch_devs_list *,
|
||||
unsigned, unsigned,
|
||||
enum bch_watermark,
|
||||
unsigned,
|
||||
enum bch_write_flags,
|
||||
struct closure *,
|
||||
struct write_point **);
|
||||
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
#include "btree_update_interior.h"
|
||||
#include "btree_write_buffer.h"
|
||||
#include "checksum.h"
|
||||
#include "disk_accounting.h"
|
||||
#include "error.h"
|
||||
|
||||
#include <linux/mm.h>
|
||||
@@ -53,7 +54,7 @@ int bch2_backpointer_validate(struct bch_fs *c, struct bkey_s_c k,
|
||||
struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
|
||||
|
||||
rcu_read_lock();
|
||||
struct bch_dev *ca = bch2_dev_rcu(c, bp.k->p.inode);
|
||||
struct bch_dev *ca = bch2_dev_rcu_noerror(c, bp.k->p.inode);
|
||||
if (!ca) {
|
||||
/* these will be caught by fsck */
|
||||
rcu_read_unlock();
|
||||
@@ -87,7 +88,7 @@ void bch2_backpointer_to_text(struct printbuf *out, const struct bch_backpointer
|
||||
void bch2_backpointer_k_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
|
||||
{
|
||||
rcu_read_lock();
|
||||
struct bch_dev *ca = bch2_dev_rcu(c, k.k->p.inode);
|
||||
struct bch_dev *ca = bch2_dev_rcu_noerror(c, k.k->p.inode);
|
||||
if (ca) {
|
||||
struct bpos bucket = bp_pos_to_bucket(ca, k.k->p);
|
||||
rcu_read_unlock();
|
||||
@@ -671,7 +672,7 @@ static int check_extent_to_backpointers(struct btree_trans *trans,
|
||||
continue;
|
||||
|
||||
rcu_read_lock();
|
||||
struct bch_dev *ca = bch2_dev_rcu(c, p.ptr.dev);
|
||||
struct bch_dev *ca = bch2_dev_rcu_noerror(c, p.ptr.dev);
|
||||
if (ca)
|
||||
bch2_extent_ptr_to_bp(c, ca, btree, level, k, p, entry, &bucket_pos, &bp);
|
||||
rcu_read_unlock();
|
||||
@@ -750,10 +751,12 @@ static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
|
||||
s64 mem_may_pin = mem_may_pin_bytes(c);
|
||||
int ret = 0;
|
||||
|
||||
bch2_btree_cache_unpin(c);
|
||||
|
||||
btree_interior_mask |= btree_leaf_mask;
|
||||
|
||||
c->btree_cache.pinned_nodes_leaf_mask = btree_leaf_mask;
|
||||
c->btree_cache.pinned_nodes_interior_mask = btree_interior_mask;
|
||||
c->btree_cache.pinned_nodes_mask[0] = btree_leaf_mask;
|
||||
c->btree_cache.pinned_nodes_mask[1] = btree_interior_mask;
|
||||
c->btree_cache.pinned_nodes_start = start;
|
||||
c->btree_cache.pinned_nodes_end = *end = BBPOS_MAX;
|
||||
|
||||
@@ -775,6 +778,7 @@ static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
|
||||
BBPOS(btree, b->key.k.p);
|
||||
break;
|
||||
}
|
||||
bch2_node_pin(c, b);
|
||||
0;
|
||||
}));
|
||||
}
|
||||
@@ -782,12 +786,80 @@ static int bch2_get_btree_in_memory_pos(struct btree_trans *trans,
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct progress_indicator_state {
|
||||
unsigned long next_print;
|
||||
u64 nodes_seen;
|
||||
u64 nodes_total;
|
||||
struct btree *last_node;
|
||||
};
|
||||
|
||||
static inline void progress_init(struct progress_indicator_state *s,
|
||||
struct bch_fs *c,
|
||||
u64 btree_id_mask)
|
||||
{
|
||||
memset(s, 0, sizeof(*s));
|
||||
|
||||
s->next_print = jiffies + HZ * 10;
|
||||
|
||||
for (unsigned i = 0; i < BTREE_ID_NR; i++) {
|
||||
if (!(btree_id_mask & BIT_ULL(i)))
|
||||
continue;
|
||||
|
||||
struct disk_accounting_pos acc = {
|
||||
.type = BCH_DISK_ACCOUNTING_btree,
|
||||
.btree.id = i,
|
||||
};
|
||||
|
||||
u64 v;
|
||||
bch2_accounting_mem_read(c, disk_accounting_pos_to_bpos(&acc), &v, 1);
|
||||
s->nodes_total += div64_ul(v, btree_sectors(c));
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool progress_update_p(struct progress_indicator_state *s)
|
||||
{
|
||||
bool ret = time_after_eq(jiffies, s->next_print);
|
||||
|
||||
if (ret)
|
||||
s->next_print = jiffies + HZ * 10;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void progress_update_iter(struct btree_trans *trans,
|
||||
struct progress_indicator_state *s,
|
||||
struct btree_iter *iter,
|
||||
const char *msg)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct btree *b = path_l(btree_iter_path(trans, iter))->b;
|
||||
|
||||
s->nodes_seen += b != s->last_node;
|
||||
s->last_node = b;
|
||||
|
||||
if (progress_update_p(s)) {
|
||||
struct printbuf buf = PRINTBUF;
|
||||
unsigned percent = s->nodes_total
|
||||
? div64_u64(s->nodes_seen * 100, s->nodes_total)
|
||||
: 0;
|
||||
|
||||
prt_printf(&buf, "%s: %d%%, done %llu/%llu nodes, at ",
|
||||
msg, percent, s->nodes_seen, s->nodes_total);
|
||||
bch2_bbpos_to_text(&buf, BBPOS(iter->btree_id, iter->pos));
|
||||
|
||||
bch_info(c, "%s", buf.buf);
|
||||
printbuf_exit(&buf);
|
||||
}
|
||||
}
|
||||
|
||||
static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
|
||||
struct extents_to_bp_state *s)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct progress_indicator_state progress;
|
||||
int ret = 0;
|
||||
|
||||
progress_init(&progress, trans->c, BIT_ULL(BTREE_ID_extents)|BIT_ULL(BTREE_ID_reflink));
|
||||
|
||||
for (enum btree_id btree_id = 0;
|
||||
btree_id < btree_id_nr_alive(c);
|
||||
btree_id++) {
|
||||
@@ -805,6 +877,7 @@ static int bch2_check_extents_to_backpointers_pass(struct btree_trans *trans,
|
||||
BTREE_ITER_prefetch);
|
||||
|
||||
ret = for_each_btree_key_continue(trans, iter, 0, k, ({
|
||||
progress_update_iter(trans, &progress, &iter, "extents_to_backpointers");
|
||||
check_extent_to_backpointers(trans, s, btree_id, level, k) ?:
|
||||
bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
|
||||
}));
|
||||
@@ -865,8 +938,7 @@ int bch2_check_extents_to_backpointers(struct bch_fs *c)
|
||||
bch2_trans_put(trans);
|
||||
bch2_bkey_buf_exit(&s.last_flushed, c);
|
||||
|
||||
c->btree_cache.pinned_nodes_leaf_mask = 0;
|
||||
c->btree_cache.pinned_nodes_interior_mask = 0;
|
||||
bch2_btree_cache_unpin(c);
|
||||
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
@@ -920,19 +992,24 @@ static int bch2_check_backpointers_to_extents_pass(struct btree_trans *trans,
|
||||
struct bbpos start,
|
||||
struct bbpos end)
|
||||
{
|
||||
struct bch_fs *c = trans->c;
|
||||
struct bkey_buf last_flushed;
|
||||
struct progress_indicator_state progress;
|
||||
|
||||
bch2_bkey_buf_init(&last_flushed);
|
||||
bkey_init(&last_flushed.k->k);
|
||||
progress_init(&progress, trans->c, BIT_ULL(BTREE_ID_backpointers));
|
||||
|
||||
int ret = for_each_btree_key_commit(trans, iter, BTREE_ID_backpointers,
|
||||
POS_MIN, BTREE_ITER_prefetch, k,
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
|
||||
check_one_backpointer(trans, start, end,
|
||||
bkey_s_c_to_backpointer(k),
|
||||
&last_flushed));
|
||||
NULL, NULL, BCH_TRANS_COMMIT_no_enospc, ({
|
||||
progress_update_iter(trans, &progress, &iter, "backpointers_to_extents");
|
||||
check_one_backpointer(trans, start, end,
|
||||
bkey_s_c_to_backpointer(k),
|
||||
&last_flushed);
|
||||
}));
|
||||
|
||||
bch2_bkey_buf_exit(&last_flushed, trans->c);
|
||||
bch2_bkey_buf_exit(&last_flushed, c);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -977,8 +1054,7 @@ int bch2_check_backpointers_to_extents(struct bch_fs *c)
|
||||
}
|
||||
bch2_trans_put(trans);
|
||||
|
||||
c->btree_cache.pinned_nodes_leaf_mask = 0;
|
||||
c->btree_cache.pinned_nodes_interior_mask = 0;
|
||||
bch2_btree_cache_unpin(c);
|
||||
|
||||
bch_err_fn(c, ret);
|
||||
return ret;
|
||||
|
||||
@@ -134,26 +134,35 @@ static inline enum bch_data_type bch2_bkey_ptr_data_type(struct bkey_s_c k,
|
||||
}
|
||||
}
|
||||
|
||||
static inline void __bch2_extent_ptr_to_bp(struct bch_fs *c, struct bch_dev *ca,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c k, struct extent_ptr_decoded p,
|
||||
const union bch_extent_entry *entry,
|
||||
struct bpos *bucket_pos, struct bch_backpointer *bp,
|
||||
u64 sectors)
|
||||
{
|
||||
u32 bucket_offset;
|
||||
*bucket_pos = PTR_BUCKET_POS_OFFSET(ca, &p.ptr, &bucket_offset);
|
||||
*bp = (struct bch_backpointer) {
|
||||
.btree_id = btree_id,
|
||||
.level = level,
|
||||
.data_type = bch2_bkey_ptr_data_type(k, p, entry),
|
||||
.bucket_offset = ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) +
|
||||
p.crc.offset,
|
||||
.bucket_len = sectors,
|
||||
.pos = k.k->p,
|
||||
};
|
||||
}
|
||||
|
||||
static inline void bch2_extent_ptr_to_bp(struct bch_fs *c, struct bch_dev *ca,
|
||||
enum btree_id btree_id, unsigned level,
|
||||
struct bkey_s_c k, struct extent_ptr_decoded p,
|
||||
const union bch_extent_entry *entry,
|
||||
struct bpos *bucket_pos, struct bch_backpointer *bp)
|
||||
{
|
||||
enum bch_data_type data_type = bch2_bkey_ptr_data_type(k, p, entry);
|
||||
s64 sectors = level ? btree_sectors(c) : k.k->size;
|
||||
u32 bucket_offset;
|
||||
u64 sectors = ptr_disk_sectors(level ? btree_sectors(c) : k.k->size, p);
|
||||
|
||||
*bucket_pos = PTR_BUCKET_POS_OFFSET(ca, &p.ptr, &bucket_offset);
|
||||
*bp = (struct bch_backpointer) {
|
||||
.btree_id = btree_id,
|
||||
.level = level,
|
||||
.data_type = data_type,
|
||||
.bucket_offset = ((u64) bucket_offset << MAX_EXTENT_COMPRESS_RATIO_SHIFT) +
|
||||
p.crc.offset,
|
||||
.bucket_len = ptr_disk_sectors(sectors, p),
|
||||
.pos = k.k->p,
|
||||
};
|
||||
__bch2_extent_ptr_to_bp(c, ca, btree_id, level, k, p, entry, bucket_pos, bp, sectors);
|
||||
}
|
||||
|
||||
int bch2_get_next_backpointer(struct btree_trans *, struct bch_dev *ca, struct bpos, int,
|
||||
|
||||
@@ -542,7 +542,7 @@ struct bch_dev {
|
||||
* gc_gens_lock, for device resize - holding any is sufficient for
|
||||
* access: Or rcu_read_lock(), but only for dev_ptr_stale():
|
||||
*/
|
||||
struct bucket_array __rcu *buckets_gc;
|
||||
GENRADIX(struct bucket) buckets_gc;
|
||||
struct bucket_gens __rcu *bucket_gens;
|
||||
u8 *oldest_gen;
|
||||
unsigned long *buckets_nouse;
|
||||
@@ -871,6 +871,7 @@ struct bch_fs {
|
||||
|
||||
/* ALLOCATION */
|
||||
struct bch_devs_mask rw_devs[BCH_DATA_NR];
|
||||
unsigned long rw_devs_change_count;
|
||||
|
||||
u64 capacity; /* sectors */
|
||||
u64 reserved; /* sectors */
|
||||
@@ -1023,6 +1024,7 @@ struct bch_fs {
|
||||
/* fs.c */
|
||||
struct list_head vfs_inodes_list;
|
||||
struct mutex vfs_inodes_lock;
|
||||
struct rhashtable vfs_inodes_table;
|
||||
|
||||
/* VFS IO PATH - fs-io.c */
|
||||
struct bio_set writepage_bioset;
|
||||
@@ -1044,8 +1046,6 @@ struct bch_fs {
|
||||
* for signaling to the toplevel code which pass we want to run now.
|
||||
*/
|
||||
enum bch_recovery_pass curr_recovery_pass;
|
||||
/* bitmap of explicitly enabled recovery passes: */
|
||||
u64 recovery_passes_explicit;
|
||||
/* bitmask of recovery passes that we actually ran */
|
||||
u64 recovery_passes_complete;
|
||||
/* never rewinds version of curr_recovery_pass */
|
||||
@@ -1085,7 +1085,6 @@ struct bch_fs {
|
||||
u64 __percpu *counters;
|
||||
|
||||
unsigned copy_gc_enabled:1;
|
||||
bool promote_whole_extents;
|
||||
|
||||
struct bch2_time_stats times[BCH_TIME_STAT_NR];
|
||||
|
||||
@@ -1195,12 +1194,15 @@ static inline bool btree_id_cached(const struct bch_fs *c, enum btree_id btree)
|
||||
static inline struct timespec64 bch2_time_to_timespec(const struct bch_fs *c, s64 time)
|
||||
{
|
||||
struct timespec64 t;
|
||||
s64 sec;
|
||||
s32 rem;
|
||||
|
||||
time += c->sb.time_base_lo;
|
||||
|
||||
t.tv_sec = div_s64_rem(time, c->sb.time_units_per_sec, &rem);
|
||||
t.tv_nsec = rem * c->sb.nsec_per_time_unit;
|
||||
sec = div_s64_rem(time, c->sb.time_units_per_sec, &rem);
|
||||
|
||||
set_normalized_timespec64(&t, sec, rem * (s64)c->sb.nsec_per_time_unit);
|
||||
|
||||
return t;
|
||||
}
|
||||
|
||||
|
||||
@@ -795,6 +795,8 @@ LE64_BITMASK(BCH_SB_HAS_ERRORS, struct bch_sb, flags[0], 60, 61);
|
||||
LE64_BITMASK(BCH_SB_HAS_TOPOLOGY_ERRORS,struct bch_sb, flags[0], 61, 62);
|
||||
|
||||
LE64_BITMASK(BCH_SB_BIG_ENDIAN, struct bch_sb, flags[0], 62, 63);
|
||||
LE64_BITMASK(BCH_SB_PROMOTE_WHOLE_EXTENTS,
|
||||
struct bch_sb, flags[0], 63, 64);
|
||||
|
||||
LE64_BITMASK(BCH_SB_STR_HASH_TYPE, struct bch_sb, flags[1], 0, 4);
|
||||
LE64_BITMASK(BCH_SB_COMPRESSION_TYPE_LO,struct bch_sb, flags[1], 4, 8);
|
||||
|
||||
@@ -304,11 +304,6 @@ struct bkey_float {
|
||||
};
|
||||
#define BKEY_MANTISSA_BITS 16
|
||||
|
||||
static unsigned bkey_float_byte_offset(unsigned idx)
|
||||
{
|
||||
return idx * sizeof(struct bkey_float);
|
||||
}
|
||||
|
||||
struct ro_aux_tree {
|
||||
u8 nothing[0];
|
||||
struct bkey_float f[];
|
||||
@@ -328,8 +323,7 @@ static unsigned bset_aux_tree_buf_end(const struct bset_tree *t)
|
||||
return t->aux_data_offset;
|
||||
case BSET_RO_AUX_TREE:
|
||||
return t->aux_data_offset +
|
||||
DIV_ROUND_UP(t->size * sizeof(struct bkey_float) +
|
||||
t->size * sizeof(u8), 8);
|
||||
DIV_ROUND_UP(t->size * sizeof(struct bkey_float), 8);
|
||||
case BSET_RW_AUX_TREE:
|
||||
return t->aux_data_offset +
|
||||
DIV_ROUND_UP(sizeof(struct rw_aux_tree) * t->size, 8);
|
||||
@@ -360,14 +354,6 @@ static struct ro_aux_tree *ro_aux_tree_base(const struct btree *b,
|
||||
return __aux_tree_base(b, t);
|
||||
}
|
||||
|
||||
static u8 *ro_aux_tree_prev(const struct btree *b,
|
||||
const struct bset_tree *t)
|
||||
{
|
||||
EBUG_ON(bset_aux_tree_type(t) != BSET_RO_AUX_TREE);
|
||||
|
||||
return __aux_tree_base(b, t) + bkey_float_byte_offset(t->size);
|
||||
}
|
||||
|
||||
static struct bkey_float *bkey_float(const struct btree *b,
|
||||
const struct bset_tree *t,
|
||||
unsigned idx)
|
||||
@@ -479,15 +465,6 @@ static inline struct bkey_packed *tree_to_bkey(const struct btree *b,
|
||||
bkey_float(b, t, j)->key_offset);
|
||||
}
|
||||
|
||||
static struct bkey_packed *tree_to_prev_bkey(const struct btree *b,
|
||||
const struct bset_tree *t,
|
||||
unsigned j)
|
||||
{
|
||||
unsigned prev_u64s = ro_aux_tree_prev(b, t)[j];
|
||||
|
||||
return (void *) ((u64 *) tree_to_bkey(b, t, j)->_data - prev_u64s);
|
||||
}
|
||||
|
||||
static struct rw_aux_tree *rw_aux_tree(const struct btree *b,
|
||||
const struct bset_tree *t)
|
||||
{
|
||||
@@ -585,8 +562,7 @@ static unsigned rw_aux_tree_bsearch(struct btree *b,
|
||||
}
|
||||
|
||||
static inline unsigned bkey_mantissa(const struct bkey_packed *k,
|
||||
const struct bkey_float *f,
|
||||
unsigned idx)
|
||||
const struct bkey_float *f)
|
||||
{
|
||||
u64 v;
|
||||
|
||||
@@ -617,7 +593,7 @@ static __always_inline void make_bfloat(struct btree *b, struct bset_tree *t,
|
||||
struct bkey_packed *m = tree_to_bkey(b, t, j);
|
||||
struct bkey_packed *l = is_power_of_2(j)
|
||||
? min_key
|
||||
: tree_to_prev_bkey(b, t, j >> ffs(j));
|
||||
: tree_to_bkey(b, t, j >> ffs(j));
|
||||
struct bkey_packed *r = is_power_of_2(j + 1)
|
||||
? max_key
|
||||
: tree_to_bkey(b, t, j >> (ffz(j) + 1));
|
||||
@@ -668,7 +644,7 @@ static __always_inline void make_bfloat(struct btree *b, struct bset_tree *t,
|
||||
EBUG_ON(shift < 0 || shift >= BFLOAT_FAILED);
|
||||
|
||||
f->exponent = shift;
|
||||
mantissa = bkey_mantissa(m, f, j);
|
||||
mantissa = bkey_mantissa(m, f);
|
||||
|
||||
/*
|
||||
* If we've got garbage bits, set them to all 1s - it's legal for the
|
||||
@@ -690,8 +666,7 @@ static unsigned __bset_tree_capacity(struct btree *b, const struct bset_tree *t)
|
||||
|
||||
static unsigned bset_ro_tree_capacity(struct btree *b, const struct bset_tree *t)
|
||||
{
|
||||
return __bset_tree_capacity(b, t) /
|
||||
(sizeof(struct bkey_float) + sizeof(u8));
|
||||
return __bset_tree_capacity(b, t) / sizeof(struct bkey_float);
|
||||
}
|
||||
|
||||
static unsigned bset_rw_tree_capacity(struct btree *b, const struct bset_tree *t)
|
||||
@@ -720,7 +695,7 @@ static noinline void __build_rw_aux_tree(struct btree *b, struct bset_tree *t)
|
||||
|
||||
static noinline void __build_ro_aux_tree(struct btree *b, struct bset_tree *t)
|
||||
{
|
||||
struct bkey_packed *prev = NULL, *k = btree_bkey_first(b, t);
|
||||
struct bkey_packed *k = btree_bkey_first(b, t);
|
||||
struct bkey_i min_key, max_key;
|
||||
unsigned cacheline = 1;
|
||||
|
||||
@@ -733,12 +708,12 @@ retry:
|
||||
return;
|
||||
}
|
||||
|
||||
t->extra = (t->size - rounddown_pow_of_two(t->size - 1)) << 1;
|
||||
t->extra = eytzinger1_extra(t->size - 1);
|
||||
|
||||
/* First we figure out where the first key in each cacheline is */
|
||||
eytzinger1_for_each(j, t->size - 1) {
|
||||
while (bkey_to_cacheline(b, t, k) < cacheline)
|
||||
prev = k, k = bkey_p_next(k);
|
||||
k = bkey_p_next(k);
|
||||
|
||||
if (k >= btree_bkey_last(b, t)) {
|
||||
/* XXX: this path sucks */
|
||||
@@ -746,17 +721,12 @@ retry:
|
||||
goto retry;
|
||||
}
|
||||
|
||||
ro_aux_tree_prev(b, t)[j] = prev->u64s;
|
||||
bkey_float(b, t, j)->key_offset =
|
||||
bkey_to_cacheline_offset(b, t, cacheline++, k);
|
||||
|
||||
EBUG_ON(tree_to_prev_bkey(b, t, j) != prev);
|
||||
EBUG_ON(tree_to_bkey(b, t, j) != k);
|
||||
}
|
||||
|
||||
while (k != btree_bkey_last(b, t))
|
||||
prev = k, k = bkey_p_next(k);
|
||||
|
||||
if (!bkey_pack_pos(bkey_to_packed(&min_key), b->data->min_key, b)) {
|
||||
bkey_init(&min_key.k);
|
||||
min_key.k.p = b->data->min_key;
|
||||
@@ -915,66 +885,18 @@ struct bkey_packed *bch2_bkey_prev_filter(struct btree *b,
|
||||
|
||||
/* Insert */
|
||||
|
||||
static void bch2_bset_fix_lookup_table(struct btree *b,
|
||||
struct bset_tree *t,
|
||||
struct bkey_packed *_where,
|
||||
unsigned clobber_u64s,
|
||||
unsigned new_u64s)
|
||||
static void rw_aux_tree_insert_entry(struct btree *b,
|
||||
struct bset_tree *t,
|
||||
unsigned idx)
|
||||
{
|
||||
int shift = new_u64s - clobber_u64s;
|
||||
unsigned l, j, where = __btree_node_key_to_offset(b, _where);
|
||||
|
||||
EBUG_ON(bset_has_ro_aux_tree(t));
|
||||
|
||||
if (!bset_has_rw_aux_tree(t))
|
||||
return;
|
||||
|
||||
/* returns first entry >= where */
|
||||
l = rw_aux_tree_bsearch(b, t, where);
|
||||
|
||||
if (!l) /* never delete first entry */
|
||||
l++;
|
||||
else if (l < t->size &&
|
||||
where < t->end_offset &&
|
||||
rw_aux_tree(b, t)[l].offset == where)
|
||||
rw_aux_tree_set(b, t, l++, _where);
|
||||
|
||||
/* l now > where */
|
||||
|
||||
for (j = l;
|
||||
j < t->size &&
|
||||
rw_aux_tree(b, t)[j].offset < where + clobber_u64s;
|
||||
j++)
|
||||
;
|
||||
|
||||
if (j < t->size &&
|
||||
rw_aux_tree(b, t)[j].offset + shift ==
|
||||
rw_aux_tree(b, t)[l - 1].offset)
|
||||
j++;
|
||||
|
||||
memmove(&rw_aux_tree(b, t)[l],
|
||||
&rw_aux_tree(b, t)[j],
|
||||
(void *) &rw_aux_tree(b, t)[t->size] -
|
||||
(void *) &rw_aux_tree(b, t)[j]);
|
||||
t->size -= j - l;
|
||||
|
||||
for (j = l; j < t->size; j++)
|
||||
rw_aux_tree(b, t)[j].offset += shift;
|
||||
|
||||
EBUG_ON(l < t->size &&
|
||||
rw_aux_tree(b, t)[l].offset ==
|
||||
rw_aux_tree(b, t)[l - 1].offset);
|
||||
EBUG_ON(!idx || idx > t->size);
|
||||
struct bkey_packed *start = rw_aux_to_bkey(b, t, idx - 1);
|
||||
struct bkey_packed *end = idx < t->size
|
||||
? rw_aux_to_bkey(b, t, idx)
|
||||
: btree_bkey_last(b, t);
|
||||
|
||||
if (t->size < bset_rw_tree_capacity(b, t) &&
|
||||
(l < t->size
|
||||
? rw_aux_tree(b, t)[l].offset
|
||||
: t->end_offset) -
|
||||
rw_aux_tree(b, t)[l - 1].offset >
|
||||
L1_CACHE_BYTES / sizeof(u64)) {
|
||||
struct bkey_packed *start = rw_aux_to_bkey(b, t, l - 1);
|
||||
struct bkey_packed *end = l < t->size
|
||||
? rw_aux_to_bkey(b, t, l)
|
||||
: btree_bkey_last(b, t);
|
||||
(void *) end - (void *) start > L1_CACHE_BYTES) {
|
||||
struct bkey_packed *k = start;
|
||||
|
||||
while (1) {
|
||||
@@ -983,23 +905,78 @@ static void bch2_bset_fix_lookup_table(struct btree *b,
|
||||
break;
|
||||
|
||||
if ((void *) k - (void *) start >= L1_CACHE_BYTES) {
|
||||
memmove(&rw_aux_tree(b, t)[l + 1],
|
||||
&rw_aux_tree(b, t)[l],
|
||||
memmove(&rw_aux_tree(b, t)[idx + 1],
|
||||
&rw_aux_tree(b, t)[idx],
|
||||
(void *) &rw_aux_tree(b, t)[t->size] -
|
||||
(void *) &rw_aux_tree(b, t)[l]);
|
||||
(void *) &rw_aux_tree(b, t)[idx]);
|
||||
t->size++;
|
||||
rw_aux_tree_set(b, t, l, k);
|
||||
rw_aux_tree_set(b, t, idx, k);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void bch2_bset_fix_lookup_table(struct btree *b,
|
||||
struct bset_tree *t,
|
||||
struct bkey_packed *_where,
|
||||
unsigned clobber_u64s,
|
||||
unsigned new_u64s)
|
||||
{
|
||||
int shift = new_u64s - clobber_u64s;
|
||||
unsigned idx, j, where = __btree_node_key_to_offset(b, _where);
|
||||
|
||||
EBUG_ON(bset_has_ro_aux_tree(t));
|
||||
|
||||
if (!bset_has_rw_aux_tree(t))
|
||||
return;
|
||||
|
||||
if (where > rw_aux_tree(b, t)[t->size - 1].offset) {
|
||||
rw_aux_tree_insert_entry(b, t, t->size);
|
||||
goto verify;
|
||||
}
|
||||
|
||||
/* returns first entry >= where */
|
||||
idx = rw_aux_tree_bsearch(b, t, where);
|
||||
|
||||
if (rw_aux_tree(b, t)[idx].offset == where) {
|
||||
if (!idx) { /* never delete first entry */
|
||||
idx++;
|
||||
} else if (where < t->end_offset) {
|
||||
rw_aux_tree_set(b, t, idx++, _where);
|
||||
} else {
|
||||
EBUG_ON(where != t->end_offset);
|
||||
rw_aux_tree_insert_entry(b, t, --t->size);
|
||||
goto verify;
|
||||
}
|
||||
}
|
||||
|
||||
EBUG_ON(idx < t->size && rw_aux_tree(b, t)[idx].offset <= where);
|
||||
if (idx < t->size &&
|
||||
rw_aux_tree(b, t)[idx].offset + shift ==
|
||||
rw_aux_tree(b, t)[idx - 1].offset) {
|
||||
memmove(&rw_aux_tree(b, t)[idx],
|
||||
&rw_aux_tree(b, t)[idx + 1],
|
||||
(void *) &rw_aux_tree(b, t)[t->size] -
|
||||
(void *) &rw_aux_tree(b, t)[idx + 1]);
|
||||
t->size -= 1;
|
||||
}
|
||||
|
||||
for (j = idx; j < t->size; j++)
|
||||
rw_aux_tree(b, t)[j].offset += shift;
|
||||
|
||||
EBUG_ON(idx < t->size &&
|
||||
rw_aux_tree(b, t)[idx].offset ==
|
||||
rw_aux_tree(b, t)[idx - 1].offset);
|
||||
|
||||
rw_aux_tree_insert_entry(b, t, idx);
|
||||
|
||||
verify:
|
||||
bch2_bset_verify_rw_aux_tree(b, t);
|
||||
bset_aux_tree_verify(b);
|
||||
}
|
||||
|
||||
void bch2_bset_insert(struct btree *b,
|
||||
struct btree_node_iter *iter,
|
||||
struct bkey_packed *where,
|
||||
struct bkey_i *insert,
|
||||
unsigned clobber_u64s)
|
||||
@@ -1098,8 +1075,7 @@ static inline void prefetch_four_cachelines(void *p)
|
||||
}
|
||||
|
||||
static inline bool bkey_mantissa_bits_dropped(const struct btree *b,
|
||||
const struct bkey_float *f,
|
||||
unsigned idx)
|
||||
const struct bkey_float *f)
|
||||
{
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
unsigned key_bits_start = b->format.key_u64s * 64 - b->nr_key_bits;
|
||||
@@ -1133,9 +1109,9 @@ static struct bkey_packed *bset_search_tree(const struct btree *b,
|
||||
goto slowpath;
|
||||
|
||||
l = f->mantissa;
|
||||
r = bkey_mantissa(packed_search, f, n);
|
||||
r = bkey_mantissa(packed_search, f);
|
||||
|
||||
if (unlikely(l == r) && bkey_mantissa_bits_dropped(b, f, n))
|
||||
if (unlikely(l == r) && bkey_mantissa_bits_dropped(b, f))
|
||||
goto slowpath;
|
||||
|
||||
n = n * 2 + (l < r);
|
||||
|
||||
@@ -270,8 +270,8 @@ void bch2_bset_init_first(struct btree *, struct bset *);
|
||||
void bch2_bset_init_next(struct btree *, struct btree_node_entry *);
|
||||
void bch2_bset_build_aux_tree(struct btree *, struct bset_tree *, bool);
|
||||
|
||||
void bch2_bset_insert(struct btree *, struct btree_node_iter *,
|
||||
struct bkey_packed *, struct bkey_i *, unsigned);
|
||||
void bch2_bset_insert(struct btree *, struct bkey_packed *, struct bkey_i *,
|
||||
unsigned);
|
||||
void bch2_bset_delete(struct btree *, struct bkey_packed *, unsigned);
|
||||
|
||||
/* Bkey utility code */
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -19,6 +19,9 @@ int __bch2_btree_node_hash_insert(struct btree_cache *, struct btree *);
|
||||
int bch2_btree_node_hash_insert(struct btree_cache *, struct btree *,
|
||||
unsigned, enum btree_id);
|
||||
|
||||
void bch2_node_pin(struct bch_fs *, struct btree *);
|
||||
void bch2_btree_cache_unpin(struct bch_fs *);
|
||||
|
||||
void bch2_btree_node_update_key_early(struct btree_trans *, enum btree_id, unsigned,
|
||||
struct bkey_s_c, struct bkey_i *);
|
||||
|
||||
|
||||
@@ -549,9 +549,8 @@ reconstruct_root:
|
||||
six_unlock_read(&b->c.lock);
|
||||
|
||||
if (ret == DROP_THIS_NODE) {
|
||||
bch2_btree_node_hash_remove(&c->btree_cache, b);
|
||||
mutex_lock(&c->btree_cache.lock);
|
||||
list_move(&b->list, &c->btree_cache.freeable);
|
||||
bch2_btree_node_hash_remove(&c->btree_cache, b);
|
||||
mutex_unlock(&c->btree_cache.lock);
|
||||
|
||||
r->b = NULL;
|
||||
@@ -753,10 +752,8 @@ static void bch2_gc_free(struct bch_fs *c)
|
||||
genradix_free(&c->reflink_gc_table);
|
||||
genradix_free(&c->gc_stripes);
|
||||
|
||||
for_each_member_device(c, ca) {
|
||||
kvfree(rcu_dereference_protected(ca->buckets_gc, 1));
|
||||
ca->buckets_gc = NULL;
|
||||
}
|
||||
for_each_member_device(c, ca)
|
||||
genradix_free(&ca->buckets_gc);
|
||||
}
|
||||
|
||||
static int bch2_gc_start(struct bch_fs *c)
|
||||
@@ -910,20 +907,12 @@ static int bch2_gc_alloc_start(struct bch_fs *c)
|
||||
int ret = 0;
|
||||
|
||||
for_each_member_device(c, ca) {
|
||||
struct bucket_array *buckets = kvmalloc(sizeof(struct bucket_array) +
|
||||
ca->mi.nbuckets * sizeof(struct bucket),
|
||||
GFP_KERNEL|__GFP_ZERO);
|
||||
if (!buckets) {
|
||||
ret = genradix_prealloc(&ca->buckets_gc, ca->mi.nbuckets, GFP_KERNEL);
|
||||
if (ret) {
|
||||
bch2_dev_put(ca);
|
||||
ret = -BCH_ERR_ENOMEM_gc_alloc_start;
|
||||
break;
|
||||
}
|
||||
|
||||
buckets->first_bucket = ca->mi.first_bucket;
|
||||
buckets->nbuckets = ca->mi.nbuckets;
|
||||
buckets->nbuckets_minus_first =
|
||||
buckets->nbuckets - buckets->first_bucket;
|
||||
rcu_assign_pointer(ca->buckets_gc, buckets);
|
||||
}
|
||||
|
||||
bch_err_fn(c, ret);
|
||||
|
||||
@@ -1666,7 +1666,7 @@ void bch2_btree_node_read(struct btree_trans *trans, struct btree *b,
|
||||
bch2_btree_pos_to_text(&buf, c, b);
|
||||
bch_err_ratelimited(c, "%s", buf.buf);
|
||||
|
||||
if (c->recovery_passes_explicit & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
|
||||
if (c->opts.recovery_passes & BIT_ULL(BCH_RECOVERY_PASS_check_topology) &&
|
||||
c->curr_recovery_pass > BCH_RECOVERY_PASS_check_topology)
|
||||
bch2_fatal_error(c);
|
||||
|
||||
@@ -1749,10 +1749,8 @@ static int __bch2_btree_root_read(struct btree_trans *trans, enum btree_id id,
|
||||
bch2_btree_node_read(trans, b, true);
|
||||
|
||||
if (btree_node_read_error(b)) {
|
||||
bch2_btree_node_hash_remove(&c->btree_cache, b);
|
||||
|
||||
mutex_lock(&c->btree_cache.lock);
|
||||
list_move(&b->list, &c->btree_cache.freeable);
|
||||
bch2_btree_node_hash_remove(&c->btree_cache, b);
|
||||
mutex_unlock(&c->btree_cache.lock);
|
||||
|
||||
ret = -BCH_ERR_btree_node_read_error;
|
||||
@@ -2031,7 +2029,7 @@ void __bch2_btree_node_write(struct bch_fs *c, struct btree *b, unsigned flags)
|
||||
do_write:
|
||||
BUG_ON((type == BTREE_WRITE_initial) != (b->written == 0));
|
||||
|
||||
atomic_dec(&c->btree_cache.dirty);
|
||||
atomic_long_dec(&c->btree_cache.nr_dirty);
|
||||
|
||||
BUG_ON(btree_node_fake(b));
|
||||
BUG_ON((b->will_make_reachable != 0) != !b->written);
|
||||
|
||||
@@ -18,13 +18,13 @@ struct btree_node_read_all;
|
||||
static inline void set_btree_node_dirty_acct(struct bch_fs *c, struct btree *b)
|
||||
{
|
||||
if (!test_and_set_bit(BTREE_NODE_dirty, &b->flags))
|
||||
atomic_inc(&c->btree_cache.dirty);
|
||||
atomic_long_inc(&c->btree_cache.nr_dirty);
|
||||
}
|
||||
|
||||
static inline void clear_btree_node_dirty_acct(struct bch_fs *c, struct btree *b)
|
||||
{
|
||||
if (test_and_clear_bit(BTREE_NODE_dirty, &b->flags))
|
||||
atomic_dec(&c->btree_cache.dirty);
|
||||
atomic_long_dec(&c->btree_cache.nr_dirty);
|
||||
}
|
||||
|
||||
static inline unsigned btree_ptr_sectors_written(struct bkey_s_c k)
|
||||
|
||||
@@ -1010,9 +1010,9 @@ retry_all:
|
||||
* the same position:
|
||||
*/
|
||||
if (trans->paths[idx].uptodate) {
|
||||
__btree_path_get(&trans->paths[idx], false);
|
||||
__btree_path_get(trans, &trans->paths[idx], false);
|
||||
ret = bch2_btree_path_traverse_one(trans, idx, 0, _THIS_IP_);
|
||||
__btree_path_put(&trans->paths[idx], false);
|
||||
__btree_path_put(trans, &trans->paths[idx], false);
|
||||
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
|
||||
bch2_err_matches(ret, ENOMEM))
|
||||
@@ -1131,6 +1131,8 @@ int bch2_btree_path_traverse_one(struct btree_trans *trans,
|
||||
if (unlikely(!trans->srcu_held))
|
||||
bch2_trans_srcu_lock(trans);
|
||||
|
||||
trace_btree_path_traverse_start(trans, path);
|
||||
|
||||
/*
|
||||
* Ensure we obey path->should_be_locked: if it's set, we can't unlock
|
||||
* and re-traverse the path without a transaction restart:
|
||||
@@ -1194,6 +1196,7 @@ int bch2_btree_path_traverse_one(struct btree_trans *trans,
|
||||
|
||||
out_uptodate:
|
||||
path->uptodate = BTREE_ITER_UPTODATE;
|
||||
trace_btree_path_traverse_end(trans, path);
|
||||
out:
|
||||
if (bch2_err_matches(ret, BCH_ERR_transaction_restart) != !!trans->restarted)
|
||||
panic("ret %s (%i) trans->restarted %s (%i)\n",
|
||||
@@ -1225,7 +1228,7 @@ static btree_path_idx_t btree_path_clone(struct btree_trans *trans, btree_path_i
|
||||
{
|
||||
btree_path_idx_t new = btree_path_alloc(trans, src);
|
||||
btree_path_copy(trans, trans->paths + new, trans->paths + src);
|
||||
__btree_path_get(trans->paths + new, intent);
|
||||
__btree_path_get(trans, trans->paths + new, intent);
|
||||
#ifdef TRACK_PATH_ALLOCATED
|
||||
trans->paths[new].ip_allocated = ip;
|
||||
#endif
|
||||
@@ -1236,8 +1239,10 @@ __flatten
|
||||
btree_path_idx_t __bch2_btree_path_make_mut(struct btree_trans *trans,
|
||||
btree_path_idx_t path, bool intent, unsigned long ip)
|
||||
{
|
||||
__btree_path_put(trans->paths + path, intent);
|
||||
struct btree_path *old = trans->paths + path;
|
||||
__btree_path_put(trans, trans->paths + path, intent);
|
||||
path = btree_path_clone(trans, path, intent, ip);
|
||||
trace_btree_path_clone(trans, old, trans->paths + path);
|
||||
trans->paths[path].preserve = false;
|
||||
return path;
|
||||
}
|
||||
@@ -1252,6 +1257,8 @@ __bch2_btree_path_set_pos(struct btree_trans *trans,
|
||||
bch2_trans_verify_not_in_restart(trans);
|
||||
EBUG_ON(!trans->paths[path_idx].ref);
|
||||
|
||||
trace_btree_path_set_pos(trans, trans->paths + path_idx, &new_pos);
|
||||
|
||||
path_idx = bch2_btree_path_make_mut(trans, path_idx, intent, ip);
|
||||
|
||||
struct btree_path *path = trans->paths + path_idx;
|
||||
@@ -1361,13 +1368,15 @@ void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool in
|
||||
{
|
||||
struct btree_path *path = trans->paths + path_idx, *dup;
|
||||
|
||||
if (!__btree_path_put(path, intent))
|
||||
if (!__btree_path_put(trans, path, intent))
|
||||
return;
|
||||
|
||||
dup = path->preserve
|
||||
? have_path_at_pos(trans, path)
|
||||
: have_node_at_pos(trans, path);
|
||||
|
||||
trace_btree_path_free(trans, path_idx, dup);
|
||||
|
||||
if (!dup && !(!path->preserve && !is_btree_node(path, path->level)))
|
||||
return;
|
||||
|
||||
@@ -1392,7 +1401,7 @@ void bch2_path_put(struct btree_trans *trans, btree_path_idx_t path_idx, bool in
|
||||
static void bch2_path_put_nokeep(struct btree_trans *trans, btree_path_idx_t path,
|
||||
bool intent)
|
||||
{
|
||||
if (!__btree_path_put(trans->paths + path, intent))
|
||||
if (!__btree_path_put(trans, trans->paths + path, intent))
|
||||
return;
|
||||
|
||||
__bch2_path_free(trans, path);
|
||||
@@ -1421,8 +1430,8 @@ void __noreturn bch2_trans_unlocked_error(struct btree_trans *trans)
|
||||
noinline __cold
|
||||
void bch2_trans_updates_to_text(struct printbuf *buf, struct btree_trans *trans)
|
||||
{
|
||||
prt_printf(buf, "transaction updates for %s journal seq %llu\n",
|
||||
trans->fn, trans->journal_res.seq);
|
||||
prt_printf(buf, "%u transaction updates for %s journal seq %llu\n",
|
||||
trans->nr_updates, trans->fn, trans->journal_res.seq);
|
||||
printbuf_indent_add(buf, 2);
|
||||
|
||||
trans_for_each_update(trans, i) {
|
||||
@@ -1464,7 +1473,7 @@ static void bch2_btree_path_to_text_short(struct printbuf *out, struct btree_tra
|
||||
{
|
||||
struct btree_path *path = trans->paths + path_idx;
|
||||
|
||||
prt_printf(out, "path: idx %2u ref %u:%u %c %c %c btree=%s l=%u pos ",
|
||||
prt_printf(out, "path: idx %3u ref %u:%u %c %c %c btree=%s l=%u pos ",
|
||||
path_idx, path->ref, path->intent_ref,
|
||||
path->preserve ? 'P' : ' ',
|
||||
path->should_be_locked ? 'S' : ' ',
|
||||
@@ -1716,14 +1725,16 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
|
||||
trans->paths[path_pos].cached == cached &&
|
||||
trans->paths[path_pos].btree_id == btree_id &&
|
||||
trans->paths[path_pos].level == level) {
|
||||
__btree_path_get(trans->paths + path_pos, intent);
|
||||
trace_btree_path_get(trans, trans->paths + path_pos, &pos);
|
||||
|
||||
__btree_path_get(trans, trans->paths + path_pos, intent);
|
||||
path_idx = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
|
||||
path = trans->paths + path_idx;
|
||||
} else {
|
||||
path_idx = btree_path_alloc(trans, path_pos);
|
||||
path = trans->paths + path_idx;
|
||||
|
||||
__btree_path_get(path, intent);
|
||||
__btree_path_get(trans, path, intent);
|
||||
path->pos = pos;
|
||||
path->btree_id = btree_id;
|
||||
path->cached = cached;
|
||||
@@ -1738,6 +1749,8 @@ btree_path_idx_t bch2_path_get(struct btree_trans *trans,
|
||||
path->ip_allocated = ip;
|
||||
#endif
|
||||
trans->paths_sorted = false;
|
||||
|
||||
trace_btree_path_alloc(trans, path);
|
||||
}
|
||||
|
||||
if (!(flags & BTREE_ITER_nopreserve))
|
||||
@@ -1857,7 +1870,7 @@ bch2_btree_iter_traverse(struct btree_iter *iter)
|
||||
|
||||
struct btree_path *path = btree_iter_path(trans, iter);
|
||||
if (btree_path_node(path, path->level))
|
||||
btree_path_set_should_be_locked(path);
|
||||
btree_path_set_should_be_locked(trans, path);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -1889,7 +1902,7 @@ struct btree *bch2_btree_iter_peek_node(struct btree_iter *iter)
|
||||
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
btree_iter_ip_allocated(iter));
|
||||
btree_path_set_should_be_locked(btree_iter_path(trans, iter));
|
||||
btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
|
||||
out:
|
||||
bch2_btree_iter_verify_entry_exit(iter);
|
||||
bch2_btree_iter_verify(iter);
|
||||
@@ -1983,7 +1996,7 @@ struct btree *bch2_btree_iter_next_node(struct btree_iter *iter)
|
||||
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
btree_iter_ip_allocated(iter));
|
||||
btree_path_set_should_be_locked(btree_iter_path(trans, iter));
|
||||
btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
|
||||
EBUG_ON(btree_iter_path(trans, iter)->uptodate);
|
||||
out:
|
||||
bch2_btree_iter_verify_entry_exit(iter);
|
||||
@@ -2155,7 +2168,7 @@ struct bkey_s_c btree_trans_peek_key_cache(struct btree_iter *iter, struct bpos
|
||||
if (unlikely(ret))
|
||||
return bkey_s_c_err(ret);
|
||||
|
||||
btree_path_set_should_be_locked(trans->paths + iter->key_cache_path);
|
||||
btree_path_set_should_be_locked(trans, trans->paths + iter->key_cache_path);
|
||||
|
||||
k = bch2_btree_path_peek_slot(trans->paths + iter->key_cache_path, &u);
|
||||
if (k.k && !bkey_err(k)) {
|
||||
@@ -2199,7 +2212,7 @@ static struct bkey_s_c __bch2_btree_iter_peek(struct btree_iter *iter, struct bp
|
||||
goto out;
|
||||
}
|
||||
|
||||
btree_path_set_should_be_locked(path);
|
||||
btree_path_set_should_be_locked(trans, path);
|
||||
|
||||
k = btree_path_level_peek_all(trans->c, l, &iter->k);
|
||||
|
||||
@@ -2326,7 +2339,7 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
|
||||
* advance, same as on exit for iter->path, but only up
|
||||
* to snapshot
|
||||
*/
|
||||
__btree_path_get(trans->paths + iter->path, iter->flags & BTREE_ITER_intent);
|
||||
__btree_path_get(trans, trans->paths + iter->path, iter->flags & BTREE_ITER_intent);
|
||||
iter->update_path = iter->path;
|
||||
|
||||
iter->update_path = bch2_btree_path_set_pos(trans,
|
||||
@@ -2382,14 +2395,14 @@ struct bkey_s_c bch2_btree_iter_peek_upto(struct btree_iter *iter, struct bpos e
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
btree_iter_ip_allocated(iter));
|
||||
|
||||
btree_path_set_should_be_locked(btree_iter_path(trans, iter));
|
||||
btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
|
||||
out_no_locked:
|
||||
if (iter->update_path) {
|
||||
ret = bch2_btree_path_relock(trans, trans->paths + iter->update_path, _THIS_IP_);
|
||||
if (unlikely(ret))
|
||||
k = bkey_s_c_err(ret);
|
||||
else
|
||||
btree_path_set_should_be_locked(trans->paths + iter->update_path);
|
||||
btree_path_set_should_be_locked(trans, trans->paths + iter->update_path);
|
||||
}
|
||||
|
||||
if (!(iter->flags & BTREE_ITER_all_snapshots))
|
||||
@@ -2511,6 +2524,7 @@ struct bkey_s_c bch2_btree_iter_peek_prev(struct btree_iter *iter)
|
||||
iter->flags & BTREE_ITER_intent,
|
||||
_THIS_IP_);
|
||||
path = btree_iter_path(trans, iter);
|
||||
trace_btree_path_save_pos(trans, path, trans->paths + saved_path);
|
||||
saved_k = *k.k;
|
||||
saved_v = k.v;
|
||||
}
|
||||
@@ -2527,7 +2541,7 @@ got_key:
|
||||
continue;
|
||||
}
|
||||
|
||||
btree_path_set_should_be_locked(path);
|
||||
btree_path_set_should_be_locked(trans, path);
|
||||
break;
|
||||
} else if (likely(!bpos_eq(path->l[0].b->data->min_key, POS_MIN))) {
|
||||
/* Advance to previous leaf node: */
|
||||
@@ -2685,7 +2699,7 @@ struct bkey_s_c bch2_btree_iter_peek_slot(struct btree_iter *iter)
|
||||
}
|
||||
}
|
||||
out:
|
||||
btree_path_set_should_be_locked(btree_iter_path(trans, iter));
|
||||
btree_path_set_should_be_locked(trans, btree_iter_path(trans, iter));
|
||||
out_no_locked:
|
||||
bch2_btree_iter_verify_entry_exit(iter);
|
||||
bch2_btree_iter_verify(iter);
|
||||
@@ -2712,6 +2726,7 @@ struct bkey_s_c bch2_btree_iter_prev_slot(struct btree_iter *iter)
|
||||
return bch2_btree_iter_peek_slot(iter);
|
||||
}
|
||||
|
||||
/* Obsolete, but still used by rust wrapper in -tools */
|
||||
struct bkey_s_c bch2_btree_iter_peek_and_restart_outlined(struct btree_iter *iter)
|
||||
{
|
||||
struct bkey_s_c k;
|
||||
@@ -2911,9 +2926,9 @@ void bch2_trans_copy_iter(struct btree_iter *dst, struct btree_iter *src)
|
||||
dst->ip_allocated = _RET_IP_;
|
||||
#endif
|
||||
if (src->path)
|
||||
__btree_path_get(trans->paths + src->path, src->flags & BTREE_ITER_intent);
|
||||
__btree_path_get(trans, trans->paths + src->path, src->flags & BTREE_ITER_intent);
|
||||
if (src->update_path)
|
||||
__btree_path_get(trans->paths + src->update_path, src->flags & BTREE_ITER_intent);
|
||||
__btree_path_get(trans, trans->paths + src->update_path, src->flags & BTREE_ITER_intent);
|
||||
dst->key_cache_path = 0;
|
||||
}
|
||||
|
||||
@@ -3237,7 +3252,7 @@ void bch2_trans_put(struct btree_trans *trans)
|
||||
bch2_trans_unlock(trans);
|
||||
|
||||
trans_for_each_update(trans, i)
|
||||
__btree_path_put(trans->paths + i->path, true);
|
||||
__btree_path_put(trans, trans->paths + i->path, true);
|
||||
trans->nr_updates = 0;
|
||||
|
||||
check_btree_paths_leaked(trans);
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user