Merge tag 'v6.1.115'

This is the 6.1.115 stable release

* tag 'v6.1.115': (2780 commits)
  Linux 6.1.115
  xfrm: validate new SA's prefixlen using SA family when sel.family is unset
  arm64/uprobes: change the uprobe_opcode_t typedef to fix the sparse warning
  ACPI: PRM: Clean up guid type in struct prm_handler_info
  platform/x86: dell-wmi: Ignore suspend notifications
  ASoC: qcom: Fix NULL Dereference in asoc_qcom_lpass_cpu_platform_probe()
  net: phy: dp83822: Fix reset pin definitions
  serial: protect uart_port_dtr_rts() in uart_shutdown() too
  selinux: improve error checking in sel_write_load()
  drm/amd/display: Disable PSR-SU on Parade 08-01 TCON too
  hv_netvsc: Fix VF namespace also in synthetic NIC NETDEV_REGISTER event
  xfrm: fix one more kernel-infoleak in algo dumping
  LoongArch: Get correct cores_per_package for SMT systems
  ALSA: hda/realtek: Add subwoofer quirk for Acer Predator G9-593
  KVM: arm64: Don't eagerly teardown the vgic on init error
  KVM: nSVM: Ignore nCR3[4:0] when loading PDPTEs from memory
  openat2: explicitly return -E2BIG for (usize > PAGE_SIZE)
  nilfs2: fix kernel bug due to missing clearing of buffer delay flag
  ACPI: button: Add DMI quirk for Samsung Galaxy Book2 to fix initial lid detection issue
  ACPI: PRM: Find EFI_MEMORY_RUNTIME block for PRM handler and context
  ...

Change-Id: Iee600c49a5c914b79141c62cda38e787e429a167

Conflicts:
	arch/arm64/boot/dts/rockchip/rk356x.dtsi
	drivers/gpio/gpio-rockchip.c
	drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
	drivers/gpu/drm/rockchip/rockchip_drm_vop.c
	drivers/gpu/drm/rockchip/rockchip_drm_vop.h
	drivers/gpu/drm/rockchip/rockchip_drm_vop2.c
	drivers/gpu/drm/rockchip/rockchip_vop_reg.c
	drivers/media/i2c/imx335.c
	drivers/pci/controller/dwc/pcie-dw-rockchip.c
	drivers/spi/spi-rockchip.c
	drivers/spi/spidev.c
	drivers/usb/dwc3/gadget.c
	drivers/usb/host/xhci.h
This commit is contained in:
Tao Huang
2025-01-10 17:48:30 +08:00
2200 changed files with 33639 additions and 21718 deletions

View File

@@ -901,7 +901,8 @@ static int __init xbc_parse_tree(void)
}
/**
* xbc_exit() - Clean up all parsed bootconfig
* _xbc_exit() - Clean up all parsed bootconfig
* @early: Set true if this is called before budy system is initialized.
*
* This clears all data structures of parsed bootconfig on memory.
* If you need to reuse xbc_init() with new boot config, you can

View File

@@ -18,31 +18,37 @@ static int parse_build_id_buf(unsigned char *build_id,
const void *note_start,
Elf32_Word note_size)
{
Elf32_Word note_offs = 0, new_offs;
const char note_name[] = "GNU";
const size_t note_name_sz = sizeof(note_name);
u64 note_off = 0, new_off, name_sz, desc_sz;
const char *data;
while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
while (note_off + sizeof(Elf32_Nhdr) < note_size &&
note_off + sizeof(Elf32_Nhdr) > note_off /* overflow */) {
Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_off);
name_sz = READ_ONCE(nhdr->n_namesz);
desc_sz = READ_ONCE(nhdr->n_descsz);
new_off = note_off + sizeof(Elf32_Nhdr);
if (check_add_overflow(new_off, ALIGN(name_sz, 4), &new_off) ||
check_add_overflow(new_off, ALIGN(desc_sz, 4), &new_off) ||
new_off > note_size)
break;
if (nhdr->n_type == BUILD_ID &&
nhdr->n_namesz == sizeof("GNU") &&
!strcmp((char *)(nhdr + 1), "GNU") &&
nhdr->n_descsz > 0 &&
nhdr->n_descsz <= BUILD_ID_SIZE_MAX) {
memcpy(build_id,
note_start + note_offs +
ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr),
nhdr->n_descsz);
memset(build_id + nhdr->n_descsz, 0,
BUILD_ID_SIZE_MAX - nhdr->n_descsz);
name_sz == note_name_sz &&
memcmp(nhdr + 1, note_name, note_name_sz) == 0 &&
desc_sz > 0 && desc_sz <= BUILD_ID_SIZE_MAX) {
data = note_start + note_off + ALIGN(note_name_sz, 4);
memcpy(build_id, data, desc_sz);
memset(build_id + desc_sz, 0, BUILD_ID_SIZE_MAX - desc_sz);
if (size)
*size = nhdr->n_descsz;
*size = desc_sz;
return 0;
}
new_offs = note_offs + sizeof(Elf32_Nhdr) +
ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4);
if (new_offs <= note_offs) /* overflow */
break;
note_offs = new_offs;
note_off = new_off;
}
return -EINVAL;
@@ -71,20 +77,28 @@ static int get_build_id_32(const void *page_addr, unsigned char *build_id,
{
Elf32_Ehdr *ehdr = (Elf32_Ehdr *)page_addr;
Elf32_Phdr *phdr;
int i;
__u32 i, phnum;
/*
* FIXME
* Neither ELF spec nor ELF loader require that program headers
* start immediately after ELF header.
*/
if (ehdr->e_phoff != sizeof(Elf32_Ehdr))
return -EINVAL;
phnum = READ_ONCE(ehdr->e_phnum);
/* only supports phdr that fits in one page */
if (ehdr->e_phnum >
(PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
if (phnum > (PAGE_SIZE - sizeof(Elf32_Ehdr)) / sizeof(Elf32_Phdr))
return -EINVAL;
phdr = (Elf32_Phdr *)(page_addr + sizeof(Elf32_Ehdr));
for (i = 0; i < ehdr->e_phnum; ++i) {
for (i = 0; i < phnum; ++i) {
if (phdr[i].p_type == PT_NOTE &&
!parse_build_id(page_addr, build_id, size,
page_addr + phdr[i].p_offset,
phdr[i].p_filesz))
page_addr + READ_ONCE(phdr[i].p_offset),
READ_ONCE(phdr[i].p_filesz)))
return 0;
}
return -EINVAL;
@@ -96,20 +110,28 @@ static int get_build_id_64(const void *page_addr, unsigned char *build_id,
{
Elf64_Ehdr *ehdr = (Elf64_Ehdr *)page_addr;
Elf64_Phdr *phdr;
int i;
__u32 i, phnum;
/*
* FIXME
* Neither ELF spec nor ELF loader require that program headers
* start immediately after ELF header.
*/
if (ehdr->e_phoff != sizeof(Elf64_Ehdr))
return -EINVAL;
phnum = READ_ONCE(ehdr->e_phnum);
/* only supports phdr that fits in one page */
if (ehdr->e_phnum >
(PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
if (phnum > (PAGE_SIZE - sizeof(Elf64_Ehdr)) / sizeof(Elf64_Phdr))
return -EINVAL;
phdr = (Elf64_Phdr *)(page_addr + sizeof(Elf64_Ehdr));
for (i = 0; i < ehdr->e_phnum; ++i) {
for (i = 0; i < phnum; ++i) {
if (phdr[i].p_type == PT_NOTE &&
!parse_build_id(page_addr, build_id, size,
page_addr + phdr[i].p_offset,
phdr[i].p_filesz))
page_addr + READ_ONCE(phdr[i].p_offset),
READ_ONCE(phdr[i].p_filesz)))
return 0;
}
return -EINVAL;
@@ -138,6 +160,10 @@ int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
page = find_get_page(vma->vm_file->f_mapping, 0);
if (!page)
return -EFAULT; /* page not mapped */
if (!PageUptodate(page)) {
put_page(page);
return -EFAULT;
}
ret = -EINVAL;
page_addr = kmap_atomic(page);

View File

@@ -141,13 +141,14 @@ static void fill_pool(void)
* READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
* sections.
*/
while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
while (READ_ONCE(obj_nr_tofree) &&
READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
raw_spin_lock_irqsave(&pool_lock, flags);
/*
* Recheck with the lock held as the worker thread might have
* won the race and freed the global free list already.
*/
while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
while (obj_nr_tofree && (obj_pool_free < debug_objects_pool_min_level)) {
obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
hlist_del(&obj->node);
WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);

View File

@@ -232,7 +232,8 @@ static int INIT get_next_block(struct bunzip_data *bd)
RUNB) */
symCount = symTotal+2;
for (j = 0; j < groupCount; j++) {
unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1];
unsigned char length[MAX_SYMBOLS];
unsigned short temp[MAX_HUFCODE_BITS+1];
int minLen, maxLen, pp;
/* Read Huffman code lengths for each symbol. They're
stored in a way similar to mtf; record a starting

View File

@@ -131,6 +131,8 @@ void *__genradix_ptr_alloc(struct __genradix *radix, size_t offset,
if ((v = cmpxchg_release(&radix->root, r, new_root)) == r) {
v = new_root;
new_node = NULL;
} else {
new_node->children[0] = NULL;
}
}

View File

@@ -432,8 +432,23 @@ static void zap_modalias_env(struct kobj_uevent_env *env)
len = strlen(env->envp[i]) + 1;
if (i != env->envp_idx - 1) {
/* @env->envp[] contains pointers to @env->buf[]
* with @env->buflen chars, and we are removing
* variable MODALIAS here pointed by @env->envp[i]
* with length @len as shown below:
*
* 0 @env->buf[] @env->buflen
* ---------------------------------------------
* ^ ^ ^ ^
* | |-> @len <-| target block |
* @env->envp[0] @env->envp[i] @env->envp[i + 1]
*
* so the "target block" indicated above is moved
* backward by @len, and its right size is
* @env->buflen - (@env->envp[i + 1] - @env->envp[0]).
*/
memmove(env->envp[i], env->envp[i + 1],
env->buflen - len);
env->buflen - (env->envp[i + 1] - env->envp[0]));
for (j = i; j < env->envp_idx - 1; j++)
env->envp[j] = env->envp[j + 1] - len;

View File

@@ -2298,6 +2298,8 @@ static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode)
/*
* mas_wr_node_walk() - Find the correct offset for the index in the @mas.
* If @mas->index cannot be found within the containing
* node, we traverse to the last entry in the node.
* @wr_mas: The maple write state
*
* Uses mas_slot_locked() and does not need to worry about dead nodes.
@@ -3831,7 +3833,7 @@ static bool mas_wr_walk(struct ma_wr_state *wr_mas)
return true;
}
static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
static void mas_wr_walk_index(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
@@ -3840,11 +3842,9 @@ static bool mas_wr_walk_index(struct ma_wr_state *wr_mas)
wr_mas->content = mas_slot_locked(mas, wr_mas->slots,
mas->offset);
if (ma_is_leaf(wr_mas->type))
return true;
return;
mas_wr_walk_traverse(wr_mas);
}
return true;
}
/*
* mas_extend_spanning_null() - Extend a store of a %NULL to include surrounding %NULLs.
@@ -4081,8 +4081,8 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
memset(&b_node, 0, sizeof(struct maple_big_node));
/* Copy l_mas and store the value in b_node. */
mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end);
/* Copy r_mas into b_node. */
if (r_mas.offset <= r_wr_mas.node_end)
/* Copy r_mas into b_node if there is anything to copy. */
if (r_mas.max > r_mas.last)
mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end,
&b_node, b_node.b_end + 1);
else

View File

@@ -6,8 +6,6 @@
#include <linux/prime_numbers.h>
#include <linux/slab.h>
#define bitmap_size(nbits) (BITS_TO_LONGS(nbits) * sizeof(unsigned long))
struct primes {
struct rcu_head rcu;
unsigned long last, sz;

View File

@@ -167,6 +167,9 @@ static int objagg_obj_parent_assign(struct objagg *objagg,
{
void *delta_priv;
if (WARN_ON(!objagg_obj_is_root(parent)))
return -EINVAL;
delta_priv = objagg->ops->delta_create(objagg->priv, parent->obj,
objagg_obj->obj);
if (IS_ERR(delta_priv))
@@ -903,20 +906,6 @@ static const struct objagg_opt_algo *objagg_opt_algos[] = {
[OBJAGG_OPT_ALGO_SIMPLE_GREEDY] = &objagg_opt_simple_greedy,
};
static int objagg_hints_obj_cmp(struct rhashtable_compare_arg *arg,
const void *obj)
{
struct rhashtable *ht = arg->ht;
struct objagg_hints *objagg_hints =
container_of(ht, struct objagg_hints, node_ht);
const struct objagg_ops *ops = objagg_hints->ops;
const char *ptr = obj;
ptr += ht->p.key_offset;
return ops->hints_obj_cmp ? ops->hints_obj_cmp(ptr, arg->key) :
memcmp(ptr, arg->key, ht->p.key_len);
}
/**
* objagg_hints_get - obtains hints instance
* @objagg: objagg instance
@@ -955,7 +944,6 @@ struct objagg_hints *objagg_hints_get(struct objagg *objagg,
offsetof(struct objagg_hints_node, obj);
objagg_hints->ht_params.head_offset =
offsetof(struct objagg_hints_node, ht_node);
objagg_hints->ht_params.obj_cmpfn = objagg_hints_obj_cmp;
err = rhashtable_init(&objagg_hints->node_ht, &objagg_hints->ht_params);
if (err)

View File

@@ -60,12 +60,30 @@ static inline void update_alloc_hint_after_get(struct sbitmap *sb,
/*
* See if we have deferred clears that we can batch move
*/
static inline bool sbitmap_deferred_clear(struct sbitmap_word *map)
static inline bool sbitmap_deferred_clear(struct sbitmap_word *map,
unsigned int depth, unsigned int alloc_hint, bool wrap)
{
unsigned long mask;
unsigned long mask, word_mask;
if (!READ_ONCE(map->cleared))
return false;
guard(raw_spinlock_irqsave)(&map->swap_lock);
if (!map->cleared) {
if (depth == 0)
return false;
word_mask = (~0UL) >> (BITS_PER_LONG - depth);
/*
* The current behavior is to always retry after moving
* ->cleared to word, and we change it to retry in case
* of any free bits. To avoid an infinite loop, we need
* to take wrap & alloc_hint into account, otherwise a
* soft lockup may occur.
*/
if (!wrap && alloc_hint)
word_mask &= ~((1UL << alloc_hint) - 1);
return (READ_ONCE(map->word) & word_mask) != word_mask;
}
/*
* First get a stable cleared mask, setting the old mask to 0.
@@ -85,6 +103,7 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
bool alloc_hint)
{
unsigned int bits_per_word;
int i;
if (shift < 0)
shift = sbitmap_calculate_shift(depth);
@@ -116,6 +135,9 @@ int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
return -ENOMEM;
}
for (i = 0; i < sb->map_nr; i++)
raw_spin_lock_init(&sb->map[i].swap_lock);
return 0;
}
EXPORT_SYMBOL_GPL(sbitmap_init_node);
@@ -126,7 +148,7 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
unsigned int i;
for (i = 0; i < sb->map_nr; i++)
sbitmap_deferred_clear(&sb->map[i]);
sbitmap_deferred_clear(&sb->map[i], 0, 0, 0);
sb->depth = depth;
sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
@@ -167,18 +189,19 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
return nr;
}
static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
unsigned int alloc_hint)
static int sbitmap_find_bit_in_word(struct sbitmap_word *map,
unsigned int depth,
unsigned int alloc_hint,
bool wrap)
{
struct sbitmap_word *map = &sb->map[index];
int nr;
do {
nr = __sbitmap_get_word(&map->word, __map_depth(sb, index),
alloc_hint, !sb->round_robin);
nr = __sbitmap_get_word(&map->word, depth,
alloc_hint, wrap);
if (nr != -1)
break;
if (!sbitmap_deferred_clear(map))
if (!sbitmap_deferred_clear(map, depth, alloc_hint, wrap))
break;
} while (1);
@@ -203,7 +226,9 @@ static int __sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint)
alloc_hint = 0;
for (i = 0; i < sb->map_nr; i++) {
nr = sbitmap_find_bit_in_index(sb, index, alloc_hint);
nr = sbitmap_find_bit_in_word(&sb->map[index],
__map_depth(sb, index),
alloc_hint, !sb->round_robin);
if (nr != -1) {
nr += index << sb->shift;
break;
@@ -243,30 +268,24 @@ static int __sbitmap_get_shallow(struct sbitmap *sb,
int nr = -1;
index = SB_NR_TO_INDEX(sb, alloc_hint);
alloc_hint = SB_NR_TO_BIT(sb, alloc_hint);
for (i = 0; i < sb->map_nr; i++) {
again:
nr = __sbitmap_get_word(&sb->map[index].word,
min_t(unsigned int,
__map_depth(sb, index),
shallow_depth),
SB_NR_TO_BIT(sb, alloc_hint), true);
nr = sbitmap_find_bit_in_word(&sb->map[index],
min_t(unsigned int,
__map_depth(sb, index),
shallow_depth),
alloc_hint, true);
if (nr != -1) {
nr += index << sb->shift;
break;
}
if (sbitmap_deferred_clear(&sb->map[index]))
goto again;
/* Jump to next index. */
index++;
alloc_hint = index << sb->shift;
if (index >= sb->map_nr) {
alloc_hint = 0;
if (++index >= sb->map_nr)
index = 0;
alloc_hint = 0;
}
}
return nr;
@@ -506,18 +525,18 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
struct sbitmap_word *map = &sb->map[index];
unsigned long get_mask;
unsigned int map_depth = __map_depth(sb, index);
unsigned long val;
sbitmap_deferred_clear(map);
if (map->word == (1UL << (map_depth - 1)) - 1)
sbitmap_deferred_clear(map, 0, 0, 0);
val = READ_ONCE(map->word);
if (val == (1UL << (map_depth - 1)) - 1)
goto next;
nr = find_first_zero_bit(&map->word, map_depth);
nr = find_first_zero_bit(&val, map_depth);
if (nr + nr_tags <= map_depth) {
atomic_long_t *ptr = (atomic_long_t *) &map->word;
unsigned long val;
get_mask = ((1UL << nr_tags) - 1) << nr;
val = READ_ONCE(map->word);
while (!atomic_long_try_cmpxchg(ptr, &val,
get_mask | val))
;

View File

@@ -1756,6 +1756,97 @@ static noinline void check_get_order(struct xarray *xa)
}
}
static noinline void check_xas_get_order(struct xarray *xa)
{
XA_STATE(xas, xa, 0);
unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
unsigned int order;
unsigned long i, j;
for (order = 0; order < max_order; order++) {
for (i = 0; i < 10; i++) {
xas_set_order(&xas, i << order, order);
do {
xas_lock(&xas);
xas_store(&xas, xa_mk_value(i));
xas_unlock(&xas);
} while (xas_nomem(&xas, GFP_KERNEL));
for (j = i << order; j < (i + 1) << order; j++) {
xas_set_order(&xas, j, 0);
rcu_read_lock();
xas_load(&xas);
XA_BUG_ON(xa, xas_get_order(&xas) != order);
rcu_read_unlock();
}
xas_lock(&xas);
xas_set_order(&xas, i << order, order);
xas_store(&xas, NULL);
xas_unlock(&xas);
}
}
}
static noinline void check_xas_conflict_get_order(struct xarray *xa)
{
XA_STATE(xas, xa, 0);
void *entry;
int only_once;
unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
unsigned int order;
unsigned long i, j, k;
for (order = 0; order < max_order; order++) {
for (i = 0; i < 10; i++) {
xas_set_order(&xas, i << order, order);
do {
xas_lock(&xas);
xas_store(&xas, xa_mk_value(i));
xas_unlock(&xas);
} while (xas_nomem(&xas, GFP_KERNEL));
/*
* Ensure xas_get_order works with xas_for_each_conflict.
*/
j = i << order;
for (k = 0; k < order; k++) {
only_once = 0;
xas_set_order(&xas, j + (1 << k), k);
xas_lock(&xas);
xas_for_each_conflict(&xas, entry) {
XA_BUG_ON(xa, entry != xa_mk_value(i));
XA_BUG_ON(xa, xas_get_order(&xas) != order);
only_once++;
}
XA_BUG_ON(xa, only_once != 1);
xas_unlock(&xas);
}
if (order < max_order - 1) {
only_once = 0;
xas_set_order(&xas, (i & ~1UL) << order, order + 1);
xas_lock(&xas);
xas_for_each_conflict(&xas, entry) {
XA_BUG_ON(xa, entry != xa_mk_value(i));
XA_BUG_ON(xa, xas_get_order(&xas) != order);
only_once++;
}
XA_BUG_ON(xa, only_once != 1);
xas_unlock(&xas);
}
xas_set_order(&xas, i << order, order);
xas_lock(&xas);
xas_store(&xas, NULL);
xas_unlock(&xas);
}
}
}
static noinline void check_destroy(struct xarray *xa)
{
unsigned long index;
@@ -1805,6 +1896,8 @@ static int xarray_checks(void)
check_reserve(&xa0);
check_multi_store(&array);
check_get_order(&array);
check_xas_get_order(&array);
check_xas_conflict_get_order(&array);
check_xa_alloc();
check_find(&array);
check_find_entry(&array);

View File

@@ -1751,6 +1751,36 @@ unlock:
}
EXPORT_SYMBOL(xa_store_range);
/**
* xas_get_order() - Get the order of an entry.
* @xas: XArray operation state.
*
* Called after xas_load, the xas should not be in an error state.
*
* Return: A number between 0 and 63 indicating the order of the entry.
*/
int xas_get_order(struct xa_state *xas)
{
int order = 0;
if (!xas->xa_node)
return 0;
for (;;) {
unsigned int slot = xas->xa_offset + (1 << order);
if (slot >= XA_CHUNK_SIZE)
break;
if (!xa_is_sibling(xa_entry(xas->xa, xas->xa_node, slot)))
break;
order++;
}
order += xas->xa_node->shift;
return order;
}
EXPORT_SYMBOL_GPL(xas_get_order);
/**
* xa_get_order() - Get the order of an entry.
* @xa: XArray.
@@ -1761,30 +1791,13 @@ EXPORT_SYMBOL(xa_store_range);
int xa_get_order(struct xarray *xa, unsigned long index)
{
XA_STATE(xas, xa, index);
void *entry;
int order = 0;
void *entry;
rcu_read_lock();
entry = xas_load(&xas);
if (!entry)
goto unlock;
if (!xas.xa_node)
goto unlock;
for (;;) {
unsigned int slot = xas.xa_offset + (1 << order);
if (slot >= XA_CHUNK_SIZE)
break;
if (!xa_is_sibling(xas.xa_node->slots[slot]))
break;
order++;
}
order += xas.xa_node->shift;
unlock:
if (entry)
order = xas_get_order(&xas);
rcu_read_unlock();
return order;

View File

@@ -29,7 +29,7 @@ STATIC_RW_DATA uint32_t xz_crc32_table[256];
XZ_EXTERN void xz_crc32_init(void)
{
const uint32_t poly = CRC32_POLY_LE;
const uint32_t poly = 0xEDB88320;
uint32_t i;
uint32_t j;

View File

@@ -105,10 +105,6 @@
# endif
#endif
#ifndef CRC32_POLY_LE
#define CRC32_POLY_LE 0xedb88320
#endif
/*
* Allocate memory for LZMA2 decoder. xz_dec_lzma2_reset() must be used
* before calling xz_dec_lzma2_run().