mirror of
https://github.com/Dasharo/linux.git
synced 2026-03-06 15:25:10 -08:00
Merge tag 'mm-nonmm-stable-2024-07-21-15-07' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull non-MM updates from Andrew Morton: - In the series "treewide: Refactor heap related implementation", Kuan-Wei Chiu has significantly reworked the min_heap library code and has taught bcachefs to use the new more generic implementation. - Yury Norov's series "Cleanup cpumask.h inclusion in core headers" reworks the cpumask and nodemask headers to make things generally more rational. - Kuan-Wei Chiu has sent along some maintenance work against our sorting library code in the series "lib/sort: Optimizations and cleanups". - More library maintainance work from Christophe Jaillet in the series "Remove usage of the deprecated ida_simple_xx() API". - Ryusuke Konishi continues with the nilfs2 fixes and clanups in the series "nilfs2: eliminate the call to inode_attach_wb()". - Kuan-Ying Lee has some fixes to the gdb scripts in the series "Fix GDB command error". - Plus the usual shower of singleton patches all over the place. Please see the relevant changelogs for details. * tag 'mm-nonmm-stable-2024-07-21-15-07' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (98 commits) ia64: scrub ia64 from poison.h watchdog/perf: properly initialize the turbo mode timestamp and rearm counter tsacct: replace strncpy() with strscpy() lib/bch.c: use swap() to improve code test_bpf: convert comma to semicolon init/modpost: conditionally check section mismatch to __meminit* init: remove unused __MEMINIT* macros nilfs2: Constify struct kobj_type nilfs2: avoid undefined behavior in nilfs_cnt32_ge macro math: rational: add missing MODULE_DESCRIPTION() macro lib/zlib: add missing MODULE_DESCRIPTION() macro fs: ufs: add MODULE_DESCRIPTION() lib/rbtree.c: fix the example typo ocfs2: add bounds checking to ocfs2_check_dir_entry() fs: add kernel-doc comments to ocfs2_prepare_orphan_dir() coredump: simplify zap_process() selftests/fpu: add missing MODULE_DESCRIPTION() macro compiler.h: simplify data_race() macro build-id: require program headers to be right after ELF header resource: add missing MODULE_DESCRIPTION() ...
This commit is contained in:
@@ -3801,8 +3801,10 @@ F: include/linux/bitmap-str.h
|
||||
F: include/linux/bitmap.h
|
||||
F: include/linux/bits.h
|
||||
F: include/linux/cpumask.h
|
||||
F: include/linux/cpumask_types.h
|
||||
F: include/linux/find.h
|
||||
F: include/linux/nodemask.h
|
||||
F: include/linux/nodemask_types.h
|
||||
F: include/vdso/bits.h
|
||||
F: lib/bitmap-str.c
|
||||
F: lib/bitmap.c
|
||||
|
||||
@@ -123,8 +123,6 @@ SECTIONS
|
||||
*/
|
||||
*(.sfpr);
|
||||
*(.text.asan.* .text.tsan.*)
|
||||
MEM_KEEP(init.text)
|
||||
MEM_KEEP(exit.text)
|
||||
} :text
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
|
||||
@@ -656,17 +656,16 @@ static int occ_probe(struct platform_device *pdev)
|
||||
rc = of_property_read_u32(dev->of_node, "reg", ®);
|
||||
if (!rc) {
|
||||
/* make sure we don't have a duplicate from dts */
|
||||
occ->idx = ida_simple_get(&occ_ida, reg, reg + 1,
|
||||
GFP_KERNEL);
|
||||
occ->idx = ida_alloc_range(&occ_ida, reg, reg,
|
||||
GFP_KERNEL);
|
||||
if (occ->idx < 0)
|
||||
occ->idx = ida_simple_get(&occ_ida, 1, INT_MAX,
|
||||
GFP_KERNEL);
|
||||
occ->idx = ida_alloc_min(&occ_ida, 1,
|
||||
GFP_KERNEL);
|
||||
} else {
|
||||
occ->idx = ida_simple_get(&occ_ida, 1, INT_MAX,
|
||||
GFP_KERNEL);
|
||||
occ->idx = ida_alloc_min(&occ_ida, 1, GFP_KERNEL);
|
||||
}
|
||||
} else {
|
||||
occ->idx = ida_simple_get(&occ_ida, 1, INT_MAX, GFP_KERNEL);
|
||||
occ->idx = ida_alloc_min(&occ_ida, 1, GFP_KERNEL);
|
||||
}
|
||||
|
||||
platform_set_drvdata(pdev, occ);
|
||||
@@ -680,7 +679,7 @@ static int occ_probe(struct platform_device *pdev)
|
||||
rc = misc_register(&occ->mdev);
|
||||
if (rc) {
|
||||
dev_err(dev, "failed to register miscdevice: %d\n", rc);
|
||||
ida_simple_remove(&occ_ida, occ->idx);
|
||||
ida_free(&occ_ida, occ->idx);
|
||||
kvfree(occ->buffer);
|
||||
return rc;
|
||||
}
|
||||
@@ -719,7 +718,7 @@ static int occ_remove(struct platform_device *pdev)
|
||||
else
|
||||
device_for_each_child(&pdev->dev, NULL, occ_unregister_of_child);
|
||||
|
||||
ida_simple_remove(&occ_ida, occ->idx);
|
||||
ida_free(&occ_ida, occ->idx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -340,7 +340,7 @@ static int eb_create(struct i915_execbuffer *eb)
|
||||
* Without a 1:1 association between relocation handles and
|
||||
* the execobject[] index, we instead create a hashtable.
|
||||
* We size it dynamically based on available memory, starting
|
||||
* first with 1:1 assocative hash and scaling back until
|
||||
* first with 1:1 associative hash and scaling back until
|
||||
* the allocation succeeds.
|
||||
*
|
||||
* Later on we use a positive lut_size to indicate we are
|
||||
|
||||
@@ -164,40 +164,68 @@ static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
|
||||
* prio is worth 1/8th of what INITIAL_PRIO is worth.
|
||||
*/
|
||||
|
||||
#define bucket_prio(b) \
|
||||
({ \
|
||||
unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8; \
|
||||
\
|
||||
(b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b); \
|
||||
})
|
||||
static inline unsigned int new_bucket_prio(struct cache *ca, struct bucket *b)
|
||||
{
|
||||
unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8;
|
||||
|
||||
#define bucket_max_cmp(l, r) (bucket_prio(l) < bucket_prio(r))
|
||||
#define bucket_min_cmp(l, r) (bucket_prio(l) > bucket_prio(r))
|
||||
return (b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b);
|
||||
}
|
||||
|
||||
static inline bool new_bucket_max_cmp(const void *l, const void *r, void *args)
|
||||
{
|
||||
struct bucket **lhs = (struct bucket **)l;
|
||||
struct bucket **rhs = (struct bucket **)r;
|
||||
struct cache *ca = args;
|
||||
|
||||
return new_bucket_prio(ca, *lhs) > new_bucket_prio(ca, *rhs);
|
||||
}
|
||||
|
||||
static inline bool new_bucket_min_cmp(const void *l, const void *r, void *args)
|
||||
{
|
||||
struct bucket **lhs = (struct bucket **)l;
|
||||
struct bucket **rhs = (struct bucket **)r;
|
||||
struct cache *ca = args;
|
||||
|
||||
return new_bucket_prio(ca, *lhs) < new_bucket_prio(ca, *rhs);
|
||||
}
|
||||
|
||||
static inline void new_bucket_swap(void *l, void *r, void __always_unused *args)
|
||||
{
|
||||
struct bucket **lhs = l, **rhs = r;
|
||||
|
||||
swap(*lhs, *rhs);
|
||||
}
|
||||
|
||||
static void invalidate_buckets_lru(struct cache *ca)
|
||||
{
|
||||
struct bucket *b;
|
||||
ssize_t i;
|
||||
const struct min_heap_callbacks bucket_max_cmp_callback = {
|
||||
.less = new_bucket_max_cmp,
|
||||
.swp = new_bucket_swap,
|
||||
};
|
||||
const struct min_heap_callbacks bucket_min_cmp_callback = {
|
||||
.less = new_bucket_min_cmp,
|
||||
.swp = new_bucket_swap,
|
||||
};
|
||||
|
||||
ca->heap.used = 0;
|
||||
ca->heap.nr = 0;
|
||||
|
||||
for_each_bucket(b, ca) {
|
||||
if (!bch_can_invalidate_bucket(ca, b))
|
||||
continue;
|
||||
|
||||
if (!heap_full(&ca->heap))
|
||||
heap_add(&ca->heap, b, bucket_max_cmp);
|
||||
else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
|
||||
if (!min_heap_full(&ca->heap))
|
||||
min_heap_push(&ca->heap, &b, &bucket_max_cmp_callback, ca);
|
||||
else if (!new_bucket_max_cmp(&b, min_heap_peek(&ca->heap), ca)) {
|
||||
ca->heap.data[0] = b;
|
||||
heap_sift(&ca->heap, 0, bucket_max_cmp);
|
||||
min_heap_sift_down(&ca->heap, 0, &bucket_max_cmp_callback, ca);
|
||||
}
|
||||
}
|
||||
|
||||
for (i = ca->heap.used / 2 - 1; i >= 0; --i)
|
||||
heap_sift(&ca->heap, i, bucket_min_cmp);
|
||||
min_heapify_all(&ca->heap, &bucket_min_cmp_callback, ca);
|
||||
|
||||
while (!fifo_full(&ca->free_inc)) {
|
||||
if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
|
||||
if (!ca->heap.nr) {
|
||||
/*
|
||||
* We don't want to be calling invalidate_buckets()
|
||||
* multiple times when it can't do anything
|
||||
@@ -206,6 +234,8 @@ static void invalidate_buckets_lru(struct cache *ca)
|
||||
wake_up_gc(ca->set);
|
||||
return;
|
||||
}
|
||||
b = min_heap_peek(&ca->heap)[0];
|
||||
min_heap_pop(&ca->heap, &bucket_min_cmp_callback, ca);
|
||||
|
||||
bch_invalidate_one_bucket(ca, b);
|
||||
}
|
||||
|
||||
@@ -458,7 +458,7 @@ struct cache {
|
||||
/* Allocation stuff: */
|
||||
struct bucket *buckets;
|
||||
|
||||
DECLARE_HEAP(struct bucket *, heap);
|
||||
DEFINE_MIN_HEAP(struct bucket *, cache_heap) heap;
|
||||
|
||||
/*
|
||||
* If nonzero, we know we aren't going to find any buckets to invalidate
|
||||
|
||||
@@ -54,9 +54,11 @@ void bch_dump_bucket(struct btree_keys *b)
|
||||
int __bch_count_data(struct btree_keys *b)
|
||||
{
|
||||
unsigned int ret = 0;
|
||||
struct btree_iter_stack iter;
|
||||
struct btree_iter iter;
|
||||
struct bkey *k;
|
||||
|
||||
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||
|
||||
if (b->ops->is_extents)
|
||||
for_each_key(b, k, &iter)
|
||||
ret += KEY_SIZE(k);
|
||||
@@ -67,9 +69,11 @@ void __bch_check_keys(struct btree_keys *b, const char *fmt, ...)
|
||||
{
|
||||
va_list args;
|
||||
struct bkey *k, *p = NULL;
|
||||
struct btree_iter_stack iter;
|
||||
struct btree_iter iter;
|
||||
const char *err;
|
||||
|
||||
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||
|
||||
for_each_key(b, k, &iter) {
|
||||
if (b->ops->is_extents) {
|
||||
err = "Keys out of order";
|
||||
@@ -110,9 +114,9 @@ bug:
|
||||
|
||||
static void bch_btree_iter_next_check(struct btree_iter *iter)
|
||||
{
|
||||
struct bkey *k = iter->data->k, *next = bkey_next(k);
|
||||
struct bkey *k = iter->heap.data->k, *next = bkey_next(k);
|
||||
|
||||
if (next < iter->data->end &&
|
||||
if (next < iter->heap.data->end &&
|
||||
bkey_cmp(k, iter->b->ops->is_extents ?
|
||||
&START_KEY(next) : next) > 0) {
|
||||
bch_dump_bucket(iter->b);
|
||||
@@ -879,12 +883,14 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
|
||||
unsigned int status = BTREE_INSERT_STATUS_NO_INSERT;
|
||||
struct bset *i = bset_tree_last(b)->data;
|
||||
struct bkey *m, *prev = NULL;
|
||||
struct btree_iter_stack iter;
|
||||
struct btree_iter iter;
|
||||
struct bkey preceding_key_on_stack = ZERO_KEY;
|
||||
struct bkey *preceding_key_p = &preceding_key_on_stack;
|
||||
|
||||
BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
|
||||
|
||||
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||
|
||||
/*
|
||||
* If k has preceding key, preceding_key_p will be set to address
|
||||
* of k's preceding key; otherwise preceding_key_p will be set
|
||||
@@ -895,9 +901,9 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
|
||||
else
|
||||
preceding_key(k, &preceding_key_p);
|
||||
|
||||
m = bch_btree_iter_stack_init(b, &iter, preceding_key_p);
|
||||
m = bch_btree_iter_init(b, &iter, preceding_key_p);
|
||||
|
||||
if (b->ops->insert_fixup(b, k, &iter.iter, replace_key))
|
||||
if (b->ops->insert_fixup(b, k, &iter, replace_key))
|
||||
return status;
|
||||
|
||||
status = BTREE_INSERT_STATUS_INSERT;
|
||||
@@ -1077,79 +1083,102 @@ struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
|
||||
|
||||
/* Btree iterator */
|
||||
|
||||
typedef bool (btree_iter_cmp_fn)(struct btree_iter_set,
|
||||
struct btree_iter_set);
|
||||
typedef bool (new_btree_iter_cmp_fn)(const void *, const void *, void *);
|
||||
|
||||
static inline bool btree_iter_cmp(struct btree_iter_set l,
|
||||
struct btree_iter_set r)
|
||||
static inline bool new_btree_iter_cmp(const void *l, const void *r, void __always_unused *args)
|
||||
{
|
||||
return bkey_cmp(l.k, r.k) > 0;
|
||||
const struct btree_iter_set *_l = l;
|
||||
const struct btree_iter_set *_r = r;
|
||||
|
||||
return bkey_cmp(_l->k, _r->k) <= 0;
|
||||
}
|
||||
|
||||
static inline void new_btree_iter_swap(void *iter1, void *iter2, void __always_unused *args)
|
||||
{
|
||||
struct btree_iter_set *_iter1 = iter1;
|
||||
struct btree_iter_set *_iter2 = iter2;
|
||||
|
||||
swap(*_iter1, *_iter2);
|
||||
}
|
||||
|
||||
static inline bool btree_iter_end(struct btree_iter *iter)
|
||||
{
|
||||
return !iter->used;
|
||||
return !iter->heap.nr;
|
||||
}
|
||||
|
||||
void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
|
||||
struct bkey *end)
|
||||
{
|
||||
const struct min_heap_callbacks callbacks = {
|
||||
.less = new_btree_iter_cmp,
|
||||
.swp = new_btree_iter_swap,
|
||||
};
|
||||
|
||||
if (k != end)
|
||||
BUG_ON(!heap_add(iter,
|
||||
((struct btree_iter_set) { k, end }),
|
||||
btree_iter_cmp));
|
||||
BUG_ON(!min_heap_push(&iter->heap,
|
||||
&((struct btree_iter_set) { k, end }),
|
||||
&callbacks,
|
||||
NULL));
|
||||
}
|
||||
|
||||
static struct bkey *__bch_btree_iter_stack_init(struct btree_keys *b,
|
||||
struct btree_iter_stack *iter,
|
||||
struct bkey *search,
|
||||
struct bset_tree *start)
|
||||
static struct bkey *__bch_btree_iter_init(struct btree_keys *b,
|
||||
struct btree_iter *iter,
|
||||
struct bkey *search,
|
||||
struct bset_tree *start)
|
||||
{
|
||||
struct bkey *ret = NULL;
|
||||
|
||||
iter->iter.size = ARRAY_SIZE(iter->stack_data);
|
||||
iter->iter.used = 0;
|
||||
iter->heap.size = ARRAY_SIZE(iter->heap.preallocated);
|
||||
iter->heap.nr = 0;
|
||||
|
||||
#ifdef CONFIG_BCACHE_DEBUG
|
||||
iter->iter.b = b;
|
||||
iter->b = b;
|
||||
#endif
|
||||
|
||||
for (; start <= bset_tree_last(b); start++) {
|
||||
ret = bch_bset_search(b, start, search);
|
||||
bch_btree_iter_push(&iter->iter, ret, bset_bkey_last(start->data));
|
||||
bch_btree_iter_push(iter, ret, bset_bkey_last(start->data));
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
|
||||
struct btree_iter_stack *iter,
|
||||
struct bkey *bch_btree_iter_init(struct btree_keys *b,
|
||||
struct btree_iter *iter,
|
||||
struct bkey *search)
|
||||
{
|
||||
return __bch_btree_iter_stack_init(b, iter, search, b->set);
|
||||
return __bch_btree_iter_init(b, iter, search, b->set);
|
||||
}
|
||||
|
||||
static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
|
||||
btree_iter_cmp_fn *cmp)
|
||||
new_btree_iter_cmp_fn *cmp)
|
||||
{
|
||||
struct btree_iter_set b __maybe_unused;
|
||||
struct bkey *ret = NULL;
|
||||
const struct min_heap_callbacks callbacks = {
|
||||
.less = cmp,
|
||||
.swp = new_btree_iter_swap,
|
||||
};
|
||||
|
||||
if (!btree_iter_end(iter)) {
|
||||
bch_btree_iter_next_check(iter);
|
||||
|
||||
ret = iter->data->k;
|
||||
iter->data->k = bkey_next(iter->data->k);
|
||||
ret = iter->heap.data->k;
|
||||
iter->heap.data->k = bkey_next(iter->heap.data->k);
|
||||
|
||||
if (iter->data->k > iter->data->end) {
|
||||
if (iter->heap.data->k > iter->heap.data->end) {
|
||||
WARN_ONCE(1, "bset was corrupt!\n");
|
||||
iter->data->k = iter->data->end;
|
||||
iter->heap.data->k = iter->heap.data->end;
|
||||
}
|
||||
|
||||
if (iter->data->k == iter->data->end)
|
||||
heap_pop(iter, b, cmp);
|
||||
if (iter->heap.data->k == iter->heap.data->end) {
|
||||
if (iter->heap.nr) {
|
||||
b = min_heap_peek(&iter->heap)[0];
|
||||
min_heap_pop(&iter->heap, &callbacks, NULL);
|
||||
}
|
||||
}
|
||||
else
|
||||
heap_sift(iter, 0, cmp);
|
||||
min_heap_sift_down(&iter->heap, 0, &callbacks, NULL);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@@ -1157,7 +1186,7 @@ static inline struct bkey *__bch_btree_iter_next(struct btree_iter *iter,
|
||||
|
||||
struct bkey *bch_btree_iter_next(struct btree_iter *iter)
|
||||
{
|
||||
return __bch_btree_iter_next(iter, btree_iter_cmp);
|
||||
return __bch_btree_iter_next(iter, new_btree_iter_cmp);
|
||||
|
||||
}
|
||||
|
||||
@@ -1195,16 +1224,18 @@ static void btree_mergesort(struct btree_keys *b, struct bset *out,
|
||||
struct btree_iter *iter,
|
||||
bool fixup, bool remove_stale)
|
||||
{
|
||||
int i;
|
||||
struct bkey *k, *last = NULL;
|
||||
BKEY_PADDED(k) tmp;
|
||||
bool (*bad)(struct btree_keys *, const struct bkey *) = remove_stale
|
||||
? bch_ptr_bad
|
||||
: bch_ptr_invalid;
|
||||
const struct min_heap_callbacks callbacks = {
|
||||
.less = b->ops->sort_cmp,
|
||||
.swp = new_btree_iter_swap,
|
||||
};
|
||||
|
||||
/* Heapify the iterator, using our comparison function */
|
||||
for (i = iter->used / 2 - 1; i >= 0; --i)
|
||||
heap_sift(iter, i, b->ops->sort_cmp);
|
||||
min_heapify_all(&iter->heap, &callbacks, NULL);
|
||||
|
||||
while (!btree_iter_end(iter)) {
|
||||
if (b->ops->sort_fixup && fixup)
|
||||
@@ -1293,10 +1324,11 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
|
||||
struct bset_sort_state *state)
|
||||
{
|
||||
size_t order = b->page_order, keys = 0;
|
||||
struct btree_iter_stack iter;
|
||||
struct btree_iter iter;
|
||||
int oldsize = bch_count_data(b);
|
||||
|
||||
__bch_btree_iter_stack_init(b, &iter, NULL, &b->set[start]);
|
||||
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||
__bch_btree_iter_init(b, &iter, NULL, &b->set[start]);
|
||||
|
||||
if (start) {
|
||||
unsigned int i;
|
||||
@@ -1307,7 +1339,7 @@ void bch_btree_sort_partial(struct btree_keys *b, unsigned int start,
|
||||
order = get_order(__set_bytes(b->set->data, keys));
|
||||
}
|
||||
|
||||
__btree_sort(b, &iter.iter, start, order, false, state);
|
||||
__btree_sort(b, &iter, start, order, false, state);
|
||||
|
||||
EBUG_ON(oldsize >= 0 && bch_count_data(b) != oldsize);
|
||||
}
|
||||
@@ -1323,11 +1355,13 @@ void bch_btree_sort_into(struct btree_keys *b, struct btree_keys *new,
|
||||
struct bset_sort_state *state)
|
||||
{
|
||||
uint64_t start_time = local_clock();
|
||||
struct btree_iter_stack iter;
|
||||
struct btree_iter iter;
|
||||
|
||||
bch_btree_iter_stack_init(b, &iter, NULL);
|
||||
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||
|
||||
btree_mergesort(b, new->set->data, &iter.iter, false, true);
|
||||
bch_btree_iter_init(b, &iter, NULL);
|
||||
|
||||
btree_mergesort(b, new->set->data, &iter, false, true);
|
||||
|
||||
bch_time_stats_update(&state->time, start_time);
|
||||
|
||||
|
||||
@@ -187,8 +187,9 @@ struct bset_tree {
|
||||
};
|
||||
|
||||
struct btree_keys_ops {
|
||||
bool (*sort_cmp)(struct btree_iter_set l,
|
||||
struct btree_iter_set r);
|
||||
bool (*sort_cmp)(const void *l,
|
||||
const void *r,
|
||||
void *args);
|
||||
struct bkey *(*sort_fixup)(struct btree_iter *iter,
|
||||
struct bkey *tmp);
|
||||
bool (*insert_fixup)(struct btree_keys *b,
|
||||
@@ -312,23 +313,17 @@ enum {
|
||||
BTREE_INSERT_STATUS_FRONT_MERGE,
|
||||
};
|
||||
|
||||
struct btree_iter_set {
|
||||
struct bkey *k, *end;
|
||||
};
|
||||
|
||||
/* Btree key iteration */
|
||||
|
||||
struct btree_iter {
|
||||
size_t size, used;
|
||||
#ifdef CONFIG_BCACHE_DEBUG
|
||||
struct btree_keys *b;
|
||||
#endif
|
||||
struct btree_iter_set {
|
||||
struct bkey *k, *end;
|
||||
} data[];
|
||||
};
|
||||
|
||||
/* Fixed-size btree_iter that can be allocated on the stack */
|
||||
|
||||
struct btree_iter_stack {
|
||||
struct btree_iter iter;
|
||||
struct btree_iter_set stack_data[MAX_BSETS];
|
||||
MIN_HEAP_PREALLOCATED(struct btree_iter_set, btree_iter_heap, MAX_BSETS) heap;
|
||||
};
|
||||
|
||||
typedef bool (*ptr_filter_fn)(struct btree_keys *b, const struct bkey *k);
|
||||
@@ -340,9 +335,9 @@ struct bkey *bch_btree_iter_next_filter(struct btree_iter *iter,
|
||||
|
||||
void bch_btree_iter_push(struct btree_iter *iter, struct bkey *k,
|
||||
struct bkey *end);
|
||||
struct bkey *bch_btree_iter_stack_init(struct btree_keys *b,
|
||||
struct btree_iter_stack *iter,
|
||||
struct bkey *search);
|
||||
struct bkey *bch_btree_iter_init(struct btree_keys *b,
|
||||
struct btree_iter *iter,
|
||||
struct bkey *search);
|
||||
|
||||
struct bkey *__bch_bset_search(struct btree_keys *b, struct bset_tree *t,
|
||||
const struct bkey *search);
|
||||
@@ -357,14 +352,13 @@ static inline struct bkey *bch_bset_search(struct btree_keys *b,
|
||||
return search ? __bch_bset_search(b, t, search) : t->data->start;
|
||||
}
|
||||
|
||||
#define for_each_key_filter(b, k, stack_iter, filter) \
|
||||
for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \
|
||||
((k) = bch_btree_iter_next_filter(&((stack_iter)->iter), (b), \
|
||||
filter));)
|
||||
#define for_each_key_filter(b, k, iter, filter) \
|
||||
for (bch_btree_iter_init((b), (iter), NULL); \
|
||||
((k) = bch_btree_iter_next_filter((iter), (b), filter));)
|
||||
|
||||
#define for_each_key(b, k, stack_iter) \
|
||||
for (bch_btree_iter_stack_init((b), (stack_iter), NULL); \
|
||||
((k) = bch_btree_iter_next(&((stack_iter)->iter)));)
|
||||
#define for_each_key(b, k, iter) \
|
||||
for (bch_btree_iter_init((b), (iter), NULL); \
|
||||
((k) = bch_btree_iter_next(iter));)
|
||||
|
||||
/* Sorting */
|
||||
|
||||
|
||||
@@ -149,19 +149,19 @@ void bch_btree_node_read_done(struct btree *b)
|
||||
{
|
||||
const char *err = "bad btree header";
|
||||
struct bset *i = btree_bset_first(b);
|
||||
struct btree_iter *iter;
|
||||
struct btree_iter iter;
|
||||
|
||||
/*
|
||||
* c->fill_iter can allocate an iterator with more memory space
|
||||
* than static MAX_BSETS.
|
||||
* See the comment arount cache_set->fill_iter.
|
||||
*/
|
||||
iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
|
||||
iter->size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
|
||||
iter->used = 0;
|
||||
iter.heap.data = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
|
||||
iter.heap.size = b->c->cache->sb.bucket_size / b->c->cache->sb.block_size;
|
||||
iter.heap.nr = 0;
|
||||
|
||||
#ifdef CONFIG_BCACHE_DEBUG
|
||||
iter->b = &b->keys;
|
||||
iter.b = &b->keys;
|
||||
#endif
|
||||
|
||||
if (!i->seq)
|
||||
@@ -199,7 +199,7 @@ void bch_btree_node_read_done(struct btree *b)
|
||||
if (i != b->keys.set[0].data && !i->keys)
|
||||
goto err;
|
||||
|
||||
bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
|
||||
bch_btree_iter_push(&iter, i->start, bset_bkey_last(i));
|
||||
|
||||
b->written += set_blocks(i, block_bytes(b->c->cache));
|
||||
}
|
||||
@@ -211,7 +211,7 @@ void bch_btree_node_read_done(struct btree *b)
|
||||
if (i->seq == b->keys.set[0].data->seq)
|
||||
goto err;
|
||||
|
||||
bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
|
||||
bch_btree_sort_and_fix_extents(&b->keys, &iter, &b->c->sort);
|
||||
|
||||
i = b->keys.set[0].data;
|
||||
err = "short btree key";
|
||||
@@ -223,7 +223,7 @@ void bch_btree_node_read_done(struct btree *b)
|
||||
bch_bset_init_next(&b->keys, write_block(b),
|
||||
bset_magic(&b->c->cache->sb));
|
||||
out:
|
||||
mempool_free(iter, &b->c->fill_iter);
|
||||
mempool_free(iter.heap.data, &b->c->fill_iter);
|
||||
return;
|
||||
err:
|
||||
set_btree_node_io_error(b);
|
||||
@@ -1309,9 +1309,11 @@ static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
|
||||
uint8_t stale = 0;
|
||||
unsigned int keys = 0, good_keys = 0;
|
||||
struct bkey *k;
|
||||
struct btree_iter_stack iter;
|
||||
struct btree_iter iter;
|
||||
struct bset_tree *t;
|
||||
|
||||
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||
|
||||
gc->nodes++;
|
||||
|
||||
for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
|
||||
@@ -1570,9 +1572,11 @@ static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
|
||||
static unsigned int btree_gc_count_keys(struct btree *b)
|
||||
{
|
||||
struct bkey *k;
|
||||
struct btree_iter_stack iter;
|
||||
struct btree_iter iter;
|
||||
unsigned int ret = 0;
|
||||
|
||||
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||
|
||||
for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
|
||||
ret += bkey_u64s(k);
|
||||
|
||||
@@ -1611,18 +1615,18 @@ static int btree_gc_recurse(struct btree *b, struct btree_op *op,
|
||||
int ret = 0;
|
||||
bool should_rewrite;
|
||||
struct bkey *k;
|
||||
struct btree_iter_stack iter;
|
||||
struct btree_iter iter;
|
||||
struct gc_merge_info r[GC_MERGE_NODES];
|
||||
struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
|
||||
|
||||
bch_btree_iter_stack_init(&b->keys, &iter, &b->c->gc_done);
|
||||
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||
bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
|
||||
|
||||
for (i = r; i < r + ARRAY_SIZE(r); i++)
|
||||
i->b = ERR_PTR(-EINTR);
|
||||
|
||||
while (1) {
|
||||
k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
|
||||
bch_ptr_bad);
|
||||
k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
|
||||
if (k) {
|
||||
r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
|
||||
true, b);
|
||||
@@ -1917,7 +1921,9 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
|
||||
{
|
||||
int ret = 0;
|
||||
struct bkey *k, *p = NULL;
|
||||
struct btree_iter_stack iter;
|
||||
struct btree_iter iter;
|
||||
|
||||
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||
|
||||
for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
|
||||
bch_initial_mark_key(b->c, b->level, k);
|
||||
@@ -1925,10 +1931,10 @@ static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
|
||||
bch_initial_mark_key(b->c, b->level + 1, &b->key);
|
||||
|
||||
if (b->level) {
|
||||
bch_btree_iter_stack_init(&b->keys, &iter, NULL);
|
||||
bch_btree_iter_init(&b->keys, &iter, NULL);
|
||||
|
||||
do {
|
||||
k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
|
||||
k = bch_btree_iter_next_filter(&iter, &b->keys,
|
||||
bch_ptr_bad);
|
||||
if (k) {
|
||||
btree_node_prefetch(b, k);
|
||||
@@ -1956,7 +1962,7 @@ static int bch_btree_check_thread(void *arg)
|
||||
struct btree_check_info *info = arg;
|
||||
struct btree_check_state *check_state = info->state;
|
||||
struct cache_set *c = check_state->c;
|
||||
struct btree_iter_stack iter;
|
||||
struct btree_iter iter;
|
||||
struct bkey *k, *p;
|
||||
int cur_idx, prev_idx, skip_nr;
|
||||
|
||||
@@ -1964,9 +1970,11 @@ static int bch_btree_check_thread(void *arg)
|
||||
cur_idx = prev_idx = 0;
|
||||
ret = 0;
|
||||
|
||||
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||
|
||||
/* root node keys are checked before thread created */
|
||||
bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
|
||||
k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
|
||||
bch_btree_iter_init(&c->root->keys, &iter, NULL);
|
||||
k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
|
||||
BUG_ON(!k);
|
||||
|
||||
p = k;
|
||||
@@ -1984,7 +1992,7 @@ static int bch_btree_check_thread(void *arg)
|
||||
skip_nr = cur_idx - prev_idx;
|
||||
|
||||
while (skip_nr) {
|
||||
k = bch_btree_iter_next_filter(&iter.iter,
|
||||
k = bch_btree_iter_next_filter(&iter,
|
||||
&c->root->keys,
|
||||
bch_ptr_bad);
|
||||
if (k)
|
||||
@@ -2057,9 +2065,11 @@ int bch_btree_check(struct cache_set *c)
|
||||
int ret = 0;
|
||||
int i;
|
||||
struct bkey *k = NULL;
|
||||
struct btree_iter_stack iter;
|
||||
struct btree_iter iter;
|
||||
struct btree_check_state check_state;
|
||||
|
||||
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||
|
||||
/* check and mark root node keys */
|
||||
for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
|
||||
bch_initial_mark_key(c, c->root->level, k);
|
||||
@@ -2553,11 +2563,12 @@ static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
|
||||
|
||||
if (b->level) {
|
||||
struct bkey *k;
|
||||
struct btree_iter_stack iter;
|
||||
struct btree_iter iter;
|
||||
|
||||
bch_btree_iter_stack_init(&b->keys, &iter, from);
|
||||
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||
bch_btree_iter_init(&b->keys, &iter, from);
|
||||
|
||||
while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
|
||||
while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
|
||||
bch_ptr_bad))) {
|
||||
ret = bcache_btree(map_nodes_recurse, k, b,
|
||||
op, from, fn, flags);
|
||||
@@ -2586,12 +2597,12 @@ int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
|
||||
{
|
||||
int ret = MAP_CONTINUE;
|
||||
struct bkey *k;
|
||||
struct btree_iter_stack iter;
|
||||
struct btree_iter iter;
|
||||
|
||||
bch_btree_iter_stack_init(&b->keys, &iter, from);
|
||||
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||
bch_btree_iter_init(&b->keys, &iter, from);
|
||||
|
||||
while ((k = bch_btree_iter_next_filter(&iter.iter, &b->keys,
|
||||
bch_ptr_bad))) {
|
||||
while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
|
||||
ret = !b->level
|
||||
? fn(op, b, k)
|
||||
: bcache_btree(map_keys_recurse, k,
|
||||
|
||||
@@ -33,15 +33,16 @@ static void sort_key_next(struct btree_iter *iter,
|
||||
i->k = bkey_next(i->k);
|
||||
|
||||
if (i->k == i->end)
|
||||
*i = iter->data[--iter->used];
|
||||
*i = iter->heap.data[--iter->heap.nr];
|
||||
}
|
||||
|
||||
static bool bch_key_sort_cmp(struct btree_iter_set l,
|
||||
struct btree_iter_set r)
|
||||
static bool new_bch_key_sort_cmp(const void *l, const void *r, void *args)
|
||||
{
|
||||
int64_t c = bkey_cmp(l.k, r.k);
|
||||
struct btree_iter_set *_l = (struct btree_iter_set *)l;
|
||||
struct btree_iter_set *_r = (struct btree_iter_set *)r;
|
||||
int64_t c = bkey_cmp(_l->k, _r->k);
|
||||
|
||||
return c ? c > 0 : l.k < r.k;
|
||||
return !(c ? c > 0 : _l->k < _r->k);
|
||||
}
|
||||
|
||||
static bool __ptr_invalid(struct cache_set *c, const struct bkey *k)
|
||||
@@ -238,7 +239,7 @@ static bool bch_btree_ptr_insert_fixup(struct btree_keys *bk,
|
||||
}
|
||||
|
||||
const struct btree_keys_ops bch_btree_keys_ops = {
|
||||
.sort_cmp = bch_key_sort_cmp,
|
||||
.sort_cmp = new_bch_key_sort_cmp,
|
||||
.insert_fixup = bch_btree_ptr_insert_fixup,
|
||||
.key_invalid = bch_btree_ptr_invalid,
|
||||
.key_bad = bch_btree_ptr_bad,
|
||||
@@ -255,22 +256,36 @@ const struct btree_keys_ops bch_btree_keys_ops = {
|
||||
* Necessary for btree_sort_fixup() - if there are multiple keys that compare
|
||||
* equal in different sets, we have to process them newest to oldest.
|
||||
*/
|
||||
static bool bch_extent_sort_cmp(struct btree_iter_set l,
|
||||
struct btree_iter_set r)
|
||||
{
|
||||
int64_t c = bkey_cmp(&START_KEY(l.k), &START_KEY(r.k));
|
||||
|
||||
return c ? c > 0 : l.k < r.k;
|
||||
static bool new_bch_extent_sort_cmp(const void *l, const void *r, void __always_unused *args)
|
||||
{
|
||||
struct btree_iter_set *_l = (struct btree_iter_set *)l;
|
||||
struct btree_iter_set *_r = (struct btree_iter_set *)r;
|
||||
int64_t c = bkey_cmp(&START_KEY(_l->k), &START_KEY(_r->k));
|
||||
|
||||
return !(c ? c > 0 : _l->k < _r->k);
|
||||
}
|
||||
|
||||
static inline void new_btree_iter_swap(void *iter1, void *iter2, void __always_unused *args)
|
||||
{
|
||||
struct btree_iter_set *_iter1 = iter1;
|
||||
struct btree_iter_set *_iter2 = iter2;
|
||||
|
||||
swap(*_iter1, *_iter2);
|
||||
}
|
||||
|
||||
static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
|
||||
struct bkey *tmp)
|
||||
{
|
||||
while (iter->used > 1) {
|
||||
struct btree_iter_set *top = iter->data, *i = top + 1;
|
||||
const struct min_heap_callbacks callbacks = {
|
||||
.less = new_bch_extent_sort_cmp,
|
||||
.swp = new_btree_iter_swap,
|
||||
};
|
||||
while (iter->heap.nr > 1) {
|
||||
struct btree_iter_set *top = iter->heap.data, *i = top + 1;
|
||||
|
||||
if (iter->used > 2 &&
|
||||
bch_extent_sort_cmp(i[0], i[1]))
|
||||
if (iter->heap.nr > 2 &&
|
||||
!new_bch_extent_sort_cmp(&i[0], &i[1], NULL))
|
||||
i++;
|
||||
|
||||
if (bkey_cmp(top->k, &START_KEY(i->k)) <= 0)
|
||||
@@ -278,7 +293,7 @@ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
|
||||
|
||||
if (!KEY_SIZE(i->k)) {
|
||||
sort_key_next(iter, i);
|
||||
heap_sift(iter, i - top, bch_extent_sort_cmp);
|
||||
min_heap_sift_down(&iter->heap, i - top, &callbacks, NULL);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -288,7 +303,7 @@ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
|
||||
else
|
||||
bch_cut_front(top->k, i->k);
|
||||
|
||||
heap_sift(iter, i - top, bch_extent_sort_cmp);
|
||||
min_heap_sift_down(&iter->heap, i - top, &callbacks, NULL);
|
||||
} else {
|
||||
/* can't happen because of comparison func */
|
||||
BUG_ON(!bkey_cmp(&START_KEY(top->k), &START_KEY(i->k)));
|
||||
@@ -298,7 +313,7 @@ static struct bkey *bch_extent_sort_fixup(struct btree_iter *iter,
|
||||
|
||||
bch_cut_back(&START_KEY(i->k), tmp);
|
||||
bch_cut_front(i->k, top->k);
|
||||
heap_sift(iter, 0, bch_extent_sort_cmp);
|
||||
min_heap_sift_down(&iter->heap, 0, &callbacks, NULL);
|
||||
|
||||
return tmp;
|
||||
} else {
|
||||
@@ -618,7 +633,7 @@ static bool bch_extent_merge(struct btree_keys *bk,
|
||||
}
|
||||
|
||||
const struct btree_keys_ops bch_extent_keys_ops = {
|
||||
.sort_cmp = bch_extent_sort_cmp,
|
||||
.sort_cmp = new_bch_extent_sort_cmp,
|
||||
.sort_fixup = bch_extent_sort_fixup,
|
||||
.insert_fixup = bch_extent_insert_fixup,
|
||||
.key_invalid = bch_extent_invalid,
|
||||
|
||||
@@ -182,16 +182,27 @@ err: if (!IS_ERR_OR_NULL(w->private))
|
||||
closure_sync(&cl);
|
||||
}
|
||||
|
||||
static bool bucket_cmp(struct bucket *l, struct bucket *r)
|
||||
static bool new_bucket_cmp(const void *l, const void *r, void __always_unused *args)
|
||||
{
|
||||
return GC_SECTORS_USED(l) < GC_SECTORS_USED(r);
|
||||
struct bucket **_l = (struct bucket **)l;
|
||||
struct bucket **_r = (struct bucket **)r;
|
||||
|
||||
return GC_SECTORS_USED(*_l) >= GC_SECTORS_USED(*_r);
|
||||
}
|
||||
|
||||
static void new_bucket_swap(void *l, void *r, void __always_unused *args)
|
||||
{
|
||||
struct bucket **_l = l;
|
||||
struct bucket **_r = r;
|
||||
|
||||
swap(*_l, *_r);
|
||||
}
|
||||
|
||||
static unsigned int bucket_heap_top(struct cache *ca)
|
||||
{
|
||||
struct bucket *b;
|
||||
|
||||
return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
|
||||
return (b = min_heap_peek(&ca->heap)[0]) ? GC_SECTORS_USED(b) : 0;
|
||||
}
|
||||
|
||||
void bch_moving_gc(struct cache_set *c)
|
||||
@@ -199,6 +210,10 @@ void bch_moving_gc(struct cache_set *c)
|
||||
struct cache *ca = c->cache;
|
||||
struct bucket *b;
|
||||
unsigned long sectors_to_move, reserve_sectors;
|
||||
const struct min_heap_callbacks callbacks = {
|
||||
.less = new_bucket_cmp,
|
||||
.swp = new_bucket_swap,
|
||||
};
|
||||
|
||||
if (!c->copy_gc_enabled)
|
||||
return;
|
||||
@@ -209,7 +224,7 @@ void bch_moving_gc(struct cache_set *c)
|
||||
reserve_sectors = ca->sb.bucket_size *
|
||||
fifo_used(&ca->free[RESERVE_MOVINGGC]);
|
||||
|
||||
ca->heap.used = 0;
|
||||
ca->heap.nr = 0;
|
||||
|
||||
for_each_bucket(b, ca) {
|
||||
if (GC_MARK(b) == GC_MARK_METADATA ||
|
||||
@@ -218,25 +233,31 @@ void bch_moving_gc(struct cache_set *c)
|
||||
atomic_read(&b->pin))
|
||||
continue;
|
||||
|
||||
if (!heap_full(&ca->heap)) {
|
||||
if (!min_heap_full(&ca->heap)) {
|
||||
sectors_to_move += GC_SECTORS_USED(b);
|
||||
heap_add(&ca->heap, b, bucket_cmp);
|
||||
} else if (bucket_cmp(b, heap_peek(&ca->heap))) {
|
||||
min_heap_push(&ca->heap, &b, &callbacks, NULL);
|
||||
} else if (!new_bucket_cmp(&b, min_heap_peek(&ca->heap), ca)) {
|
||||
sectors_to_move -= bucket_heap_top(ca);
|
||||
sectors_to_move += GC_SECTORS_USED(b);
|
||||
|
||||
ca->heap.data[0] = b;
|
||||
heap_sift(&ca->heap, 0, bucket_cmp);
|
||||
min_heap_sift_down(&ca->heap, 0, &callbacks, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
while (sectors_to_move > reserve_sectors) {
|
||||
heap_pop(&ca->heap, b, bucket_cmp);
|
||||
if (ca->heap.nr) {
|
||||
b = min_heap_peek(&ca->heap)[0];
|
||||
min_heap_pop(&ca->heap, &callbacks, NULL);
|
||||
}
|
||||
sectors_to_move -= GC_SECTORS_USED(b);
|
||||
}
|
||||
|
||||
while (heap_pop(&ca->heap, b, bucket_cmp))
|
||||
while (ca->heap.nr) {
|
||||
b = min_heap_peek(&ca->heap)[0];
|
||||
min_heap_pop(&ca->heap, &callbacks, NULL);
|
||||
SET_GC_MOVE(b, 1);
|
||||
}
|
||||
|
||||
mutex_unlock(&c->bucket_lock);
|
||||
|
||||
|
||||
@@ -1907,8 +1907,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
|
||||
INIT_LIST_HEAD(&c->btree_cache_freed);
|
||||
INIT_LIST_HEAD(&c->data_buckets);
|
||||
|
||||
iter_size = sizeof(struct btree_iter) +
|
||||
((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size) *
|
||||
iter_size = ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size) *
|
||||
sizeof(struct btree_iter_set);
|
||||
|
||||
c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL);
|
||||
|
||||
@@ -660,7 +660,9 @@ static unsigned int bch_root_usage(struct cache_set *c)
|
||||
unsigned int bytes = 0;
|
||||
struct bkey *k;
|
||||
struct btree *b;
|
||||
struct btree_iter_stack iter;
|
||||
struct btree_iter iter;
|
||||
|
||||
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||
|
||||
goto lock_root;
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* random utiility code, for bcache but in theory not specific to bcache
|
||||
* random utility code, for bcache but in theory not specific to bcache
|
||||
*
|
||||
* Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
|
||||
* Copyright 2012 Google, Inc.
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched/clock.h>
|
||||
#include <linux/llist.h>
|
||||
#include <linux/min_heap.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/workqueue.h>
|
||||
@@ -30,16 +31,10 @@ struct closure;
|
||||
|
||||
#endif
|
||||
|
||||
#define DECLARE_HEAP(type, name) \
|
||||
struct { \
|
||||
size_t size, used; \
|
||||
type *data; \
|
||||
} name
|
||||
|
||||
#define init_heap(heap, _size, gfp) \
|
||||
({ \
|
||||
size_t _bytes; \
|
||||
(heap)->used = 0; \
|
||||
(heap)->nr = 0; \
|
||||
(heap)->size = (_size); \
|
||||
_bytes = (heap)->size * sizeof(*(heap)->data); \
|
||||
(heap)->data = kvmalloc(_bytes, (gfp) & GFP_KERNEL); \
|
||||
@@ -52,64 +47,6 @@ do { \
|
||||
(heap)->data = NULL; \
|
||||
} while (0)
|
||||
|
||||
#define heap_swap(h, i, j) swap((h)->data[i], (h)->data[j])
|
||||
|
||||
#define heap_sift(h, i, cmp) \
|
||||
do { \
|
||||
size_t _r, _j = i; \
|
||||
\
|
||||
for (; _j * 2 + 1 < (h)->used; _j = _r) { \
|
||||
_r = _j * 2 + 1; \
|
||||
if (_r + 1 < (h)->used && \
|
||||
cmp((h)->data[_r], (h)->data[_r + 1])) \
|
||||
_r++; \
|
||||
\
|
||||
if (cmp((h)->data[_r], (h)->data[_j])) \
|
||||
break; \
|
||||
heap_swap(h, _r, _j); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define heap_sift_down(h, i, cmp) \
|
||||
do { \
|
||||
while (i) { \
|
||||
size_t p = (i - 1) / 2; \
|
||||
if (cmp((h)->data[i], (h)->data[p])) \
|
||||
break; \
|
||||
heap_swap(h, i, p); \
|
||||
i = p; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#define heap_add(h, d, cmp) \
|
||||
({ \
|
||||
bool _r = !heap_full(h); \
|
||||
if (_r) { \
|
||||
size_t _i = (h)->used++; \
|
||||
(h)->data[_i] = d; \
|
||||
\
|
||||
heap_sift_down(h, _i, cmp); \
|
||||
heap_sift(h, _i, cmp); \
|
||||
} \
|
||||
_r; \
|
||||
})
|
||||
|
||||
#define heap_pop(h, d, cmp) \
|
||||
({ \
|
||||
bool _r = (h)->used; \
|
||||
if (_r) { \
|
||||
(d) = (h)->data[0]; \
|
||||
(h)->used--; \
|
||||
heap_swap(h, 0, (h)->used); \
|
||||
heap_sift(h, 0, cmp); \
|
||||
} \
|
||||
_r; \
|
||||
})
|
||||
|
||||
#define heap_peek(h) ((h)->used ? (h)->data[0] : NULL)
|
||||
|
||||
#define heap_full(h) ((h)->used == (h)->size)
|
||||
|
||||
#define DECLARE_FIFO(type, name) \
|
||||
struct { \
|
||||
size_t front, back, size, mask; \
|
||||
|
||||
@@ -908,15 +908,16 @@ static int bch_dirty_init_thread(void *arg)
|
||||
struct dirty_init_thrd_info *info = arg;
|
||||
struct bch_dirty_init_state *state = info->state;
|
||||
struct cache_set *c = state->c;
|
||||
struct btree_iter_stack iter;
|
||||
struct btree_iter iter;
|
||||
struct bkey *k, *p;
|
||||
int cur_idx, prev_idx, skip_nr;
|
||||
|
||||
k = p = NULL;
|
||||
prev_idx = 0;
|
||||
|
||||
bch_btree_iter_stack_init(&c->root->keys, &iter, NULL);
|
||||
k = bch_btree_iter_next_filter(&iter.iter, &c->root->keys, bch_ptr_bad);
|
||||
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||
bch_btree_iter_init(&c->root->keys, &iter, NULL);
|
||||
k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
|
||||
BUG_ON(!k);
|
||||
|
||||
p = k;
|
||||
@@ -930,7 +931,7 @@ static int bch_dirty_init_thread(void *arg)
|
||||
skip_nr = cur_idx - prev_idx;
|
||||
|
||||
while (skip_nr) {
|
||||
k = bch_btree_iter_next_filter(&iter.iter,
|
||||
k = bch_btree_iter_next_filter(&iter,
|
||||
&c->root->keys,
|
||||
bch_ptr_bad);
|
||||
if (k)
|
||||
@@ -979,11 +980,13 @@ void bch_sectors_dirty_init(struct bcache_device *d)
|
||||
int i;
|
||||
struct btree *b = NULL;
|
||||
struct bkey *k = NULL;
|
||||
struct btree_iter_stack iter;
|
||||
struct btree_iter iter;
|
||||
struct sectors_dirty_init op;
|
||||
struct cache_set *c = d->c;
|
||||
struct bch_dirty_init_state state;
|
||||
|
||||
min_heap_init(&iter.heap, NULL, MAX_BSETS);
|
||||
|
||||
retry_lock:
|
||||
b = c->root;
|
||||
rw_lock(0, b, b->level);
|
||||
|
||||
@@ -51,6 +51,8 @@ struct recovery_point {
|
||||
bool increment_applied;
|
||||
};
|
||||
|
||||
DEFINE_MIN_HEAP(struct numbered_block_mapping, replay_heap);
|
||||
|
||||
struct repair_completion {
|
||||
/* The completion header */
|
||||
struct vdo_completion completion;
|
||||
@@ -97,7 +99,7 @@ struct repair_completion {
|
||||
* order, then original journal order. This permits efficient iteration over the journal
|
||||
* entries in order.
|
||||
*/
|
||||
struct min_heap replay_heap;
|
||||
struct replay_heap replay_heap;
|
||||
/* Fields tracking progress through the journal entries. */
|
||||
struct numbered_block_mapping *current_entry;
|
||||
struct numbered_block_mapping *current_unfetched_entry;
|
||||
@@ -135,7 +137,7 @@ struct repair_completion {
|
||||
* to sort by slot while still ensuring we replay all entries with the same slot in the exact order
|
||||
* as they appeared in the journal.
|
||||
*/
|
||||
static bool mapping_is_less_than(const void *item1, const void *item2)
|
||||
static bool mapping_is_less_than(const void *item1, const void *item2, void __always_unused *args)
|
||||
{
|
||||
const struct numbered_block_mapping *mapping1 =
|
||||
(const struct numbered_block_mapping *) item1;
|
||||
@@ -154,7 +156,7 @@ static bool mapping_is_less_than(const void *item1, const void *item2)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void swap_mappings(void *item1, void *item2)
|
||||
static void swap_mappings(void *item1, void *item2, void __always_unused *args)
|
||||
{
|
||||
struct numbered_block_mapping *mapping1 = item1;
|
||||
struct numbered_block_mapping *mapping2 = item2;
|
||||
@@ -163,14 +165,13 @@ static void swap_mappings(void *item1, void *item2)
|
||||
}
|
||||
|
||||
static const struct min_heap_callbacks repair_min_heap = {
|
||||
.elem_size = sizeof(struct numbered_block_mapping),
|
||||
.less = mapping_is_less_than,
|
||||
.swp = swap_mappings,
|
||||
};
|
||||
|
||||
static struct numbered_block_mapping *sort_next_heap_element(struct repair_completion *repair)
|
||||
{
|
||||
struct min_heap *heap = &repair->replay_heap;
|
||||
struct replay_heap *heap = &repair->replay_heap;
|
||||
struct numbered_block_mapping *last;
|
||||
|
||||
if (heap->nr == 0)
|
||||
@@ -181,8 +182,8 @@ static struct numbered_block_mapping *sort_next_heap_element(struct repair_compl
|
||||
* restore the heap invariant, and return a pointer to the popped element.
|
||||
*/
|
||||
last = &repair->entries[--heap->nr];
|
||||
swap_mappings(heap->data, last);
|
||||
min_heapify(heap, 0, &repair_min_heap);
|
||||
swap_mappings(heap->data, last, NULL);
|
||||
min_heap_sift_down(heap, 0, &repair_min_heap, NULL);
|
||||
return last;
|
||||
}
|
||||
|
||||
@@ -1116,12 +1117,12 @@ static void recover_block_map(struct vdo_completion *completion)
|
||||
* Organize the journal entries into a binary heap so we can iterate over them in sorted
|
||||
* order incrementally, avoiding an expensive sort call.
|
||||
*/
|
||||
repair->replay_heap = (struct min_heap) {
|
||||
repair->replay_heap = (struct replay_heap) {
|
||||
.data = repair->entries,
|
||||
.nr = repair->block_map_entry_count,
|
||||
.size = repair->block_map_entry_count,
|
||||
};
|
||||
min_heapify_all(&repair->replay_heap, &repair_min_heap);
|
||||
min_heapify_all(&repair->replay_heap, &repair_min_heap, NULL);
|
||||
|
||||
vdo_log_info("Replaying %zu recovery entries into block map",
|
||||
repair->block_map_entry_count);
|
||||
|
||||
@@ -3288,7 +3288,8 @@ int vdo_release_block_reference(struct block_allocator *allocator,
|
||||
* Thus, the ordering is reversed from the usual sense since min_heap returns smaller elements
|
||||
* before larger ones.
|
||||
*/
|
||||
static bool slab_status_is_less_than(const void *item1, const void *item2)
|
||||
static bool slab_status_is_less_than(const void *item1, const void *item2,
|
||||
void __always_unused *args)
|
||||
{
|
||||
const struct slab_status *info1 = item1;
|
||||
const struct slab_status *info2 = item2;
|
||||
@@ -3300,7 +3301,7 @@ static bool slab_status_is_less_than(const void *item1, const void *item2)
|
||||
return info1->slab_number < info2->slab_number;
|
||||
}
|
||||
|
||||
static void swap_slab_statuses(void *item1, void *item2)
|
||||
static void swap_slab_statuses(void *item1, void *item2, void __always_unused *args)
|
||||
{
|
||||
struct slab_status *info1 = item1;
|
||||
struct slab_status *info2 = item2;
|
||||
@@ -3309,7 +3310,6 @@ static void swap_slab_statuses(void *item1, void *item2)
|
||||
}
|
||||
|
||||
static const struct min_heap_callbacks slab_status_min_heap = {
|
||||
.elem_size = sizeof(struct slab_status),
|
||||
.less = slab_status_is_less_than,
|
||||
.swp = swap_slab_statuses,
|
||||
};
|
||||
@@ -3509,7 +3509,7 @@ static int get_slab_statuses(struct block_allocator *allocator,
|
||||
static int __must_check vdo_prepare_slabs_for_allocation(struct block_allocator *allocator)
|
||||
{
|
||||
struct slab_status current_slab_status;
|
||||
struct min_heap heap;
|
||||
DEFINE_MIN_HEAP(struct slab_status, heap) heap;
|
||||
int result;
|
||||
struct slab_status *slab_statuses;
|
||||
struct slab_depot *depot = allocator->depot;
|
||||
@@ -3521,12 +3521,12 @@ static int __must_check vdo_prepare_slabs_for_allocation(struct block_allocator
|
||||
return result;
|
||||
|
||||
/* Sort the slabs by cleanliness, then by emptiness hint. */
|
||||
heap = (struct min_heap) {
|
||||
heap = (struct heap) {
|
||||
.data = slab_statuses,
|
||||
.nr = allocator->slab_count,
|
||||
.size = allocator->slab_count,
|
||||
};
|
||||
min_heapify_all(&heap, &slab_status_min_heap);
|
||||
min_heapify_all(&heap, &slab_status_min_heap, NULL);
|
||||
|
||||
while (heap.nr > 0) {
|
||||
bool high_priority;
|
||||
@@ -3534,7 +3534,7 @@ static int __must_check vdo_prepare_slabs_for_allocation(struct block_allocator
|
||||
struct slab_journal *journal;
|
||||
|
||||
current_slab_status = slab_statuses[0];
|
||||
min_heap_pop(&heap, &slab_status_min_heap);
|
||||
min_heap_pop(&heap, &slab_status_min_heap, NULL);
|
||||
slab = depot->slabs[current_slab_status.slab_number];
|
||||
|
||||
if ((depot->load_type == VDO_SLAB_DEPOT_REBUILD_LOAD) ||
|
||||
|
||||
@@ -1286,7 +1286,7 @@ int most_register_interface(struct most_interface *iface)
|
||||
!iface->poison_channel || (iface->num_channels > MAX_CHANNELS))
|
||||
return -EINVAL;
|
||||
|
||||
id = ida_simple_get(&mdev_id, 0, 0, GFP_KERNEL);
|
||||
id = ida_alloc(&mdev_id, GFP_KERNEL);
|
||||
if (id < 0) {
|
||||
dev_err(iface->dev, "Failed to allocate device ID\n");
|
||||
return id;
|
||||
@@ -1294,7 +1294,7 @@ int most_register_interface(struct most_interface *iface)
|
||||
|
||||
iface->p = kzalloc(sizeof(*iface->p), GFP_KERNEL);
|
||||
if (!iface->p) {
|
||||
ida_simple_remove(&mdev_id, id);
|
||||
ida_free(&mdev_id, id);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@@ -1308,7 +1308,7 @@ int most_register_interface(struct most_interface *iface)
|
||||
dev_err(iface->dev, "Failed to register interface device\n");
|
||||
kfree(iface->p);
|
||||
put_device(iface->dev);
|
||||
ida_simple_remove(&mdev_id, id);
|
||||
ida_free(&mdev_id, id);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@@ -1366,7 +1366,7 @@ err_free_resources:
|
||||
}
|
||||
kfree(iface->p);
|
||||
device_unregister(iface->dev);
|
||||
ida_simple_remove(&mdev_id, id);
|
||||
ida_free(&mdev_id, id);
|
||||
return -ENOMEM;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(most_register_interface);
|
||||
@@ -1397,7 +1397,7 @@ void most_deregister_interface(struct most_interface *iface)
|
||||
device_unregister(&c->dev);
|
||||
}
|
||||
|
||||
ida_simple_remove(&mdev_id, iface->p->dev_id);
|
||||
ida_free(&mdev_id, iface->p->dev_id);
|
||||
kfree(iface->p);
|
||||
device_unregister(iface->dev);
|
||||
}
|
||||
|
||||
@@ -100,7 +100,7 @@ static void destroy_cdev(struct comp_channel *c)
|
||||
|
||||
static void destroy_channel(struct comp_channel *c)
|
||||
{
|
||||
ida_simple_remove(&comp.minor_id, MINOR(c->devno));
|
||||
ida_free(&comp.minor_id, MINOR(c->devno));
|
||||
kfifo_free(&c->fifo);
|
||||
kfree(c);
|
||||
}
|
||||
@@ -425,7 +425,7 @@ static int comp_probe(struct most_interface *iface, int channel_id,
|
||||
if (c)
|
||||
return -EEXIST;
|
||||
|
||||
current_minor = ida_simple_get(&comp.minor_id, 0, 0, GFP_KERNEL);
|
||||
current_minor = ida_alloc(&comp.minor_id, GFP_KERNEL);
|
||||
if (current_minor < 0)
|
||||
return current_minor;
|
||||
|
||||
@@ -472,7 +472,7 @@ err_del_cdev_and_free_channel:
|
||||
err_free_c:
|
||||
kfree(c);
|
||||
err_remove_ida:
|
||||
ida_simple_remove(&comp.minor_id, current_minor);
|
||||
ida_free(&comp.minor_id, current_minor);
|
||||
return retval;
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user