You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
Merge 5.10.193 into android12-5.10-lts
Changes in 5.10.193
objtool/x86: Fix SRSO mess
NFSv4: fix out path in __nfs4_get_acl_uncached
xprtrdma: Remap Receive buffers after a reconnect
PCI: acpiphp: Reassign resources on bridge if necessary
dlm: improve plock logging if interrupted
dlm: replace usage of found with dedicated list iterator variable
fs: dlm: add pid to debug log
fs: dlm: change plock interrupted message to debug again
fs: dlm: use dlm_plock_info for do_unlock_close
fs: dlm: fix mismatch of plock results from userspace
MIPS: cpu-features: Enable octeon_cache by cpu_type
MIPS: cpu-features: Use boot_cpu_type for CPU type based features
fbdev: Improve performance of sys_imageblit()
fbdev: Fix sys_imageblit() for arbitrary image widths
fbdev: fix potential OOB read in fast_imageblit()
dm integrity: increase RECALC_SECTORS to improve recalculate speed
dm integrity: reduce vmalloc space footprint on 32-bit architectures
ALSA: pcm: Fix potential data race at PCM memory allocation helpers
drm/amd/display: do not wait for mpc idle if tg is disabled
drm/amd/display: check TG is non-null before checking if enabled
libceph, rbd: ignore addr->type while comparing in some cases
rbd: make get_lock_owner_info() return a single locker or NULL
rbd: retrieve and check lock owner twice before blocklisting
rbd: prevent busy loop when requesting exclusive lock
tracing: Fix cpu buffers unavailable due to 'record_disabled' missed
tracing: Fix memleak due to race between current_tracer and trace
octeontx2-af: SDP: fix receive link config
sock: annotate data-races around prot->memory_pressure
dccp: annotate data-races in dccp_poll()
ipvlan: Fix a reference count leak warning in ipvlan_ns_exit()
net: bgmac: Fix return value check for fixed_phy_register()
net: bcmgenet: Fix return value check for fixed_phy_register()
net: validate veth and vxcan peer ifindexes
ice: fix receive buffer size miscalculation
igb: Avoid starting unnecessary workqueues
net/sched: fix a qdisc modification with ambiguous command request
netfilter: nf_tables: fix out of memory error handling
rtnetlink: return ENODEV when ifname does not exist and group is given
rtnetlink: Reject negative ifindexes in RTM_NEWLINK
net: remove bond_slave_has_mac_rcu()
bonding: fix macvlan over alb bond support
ibmveth: Use dcbf rather than dcbfl
NFSv4: Fix dropped lock for racing OPEN and delegation return
clk: Fix slab-out-of-bounds error in devm_clk_release()
mm: add a call to flush_cache_vmap() in vmap_pfn()
NFS: Fix a use after free in nfs_direct_join_group()
nfsd: Fix race to FREE_STATEID and cl_revoked
selinux: set next pointer before attaching to list
batman-adv: Trigger events for auto adjusted MTU
batman-adv: Don't increase MTU when set by user
batman-adv: Do not get eth header before batadv_check_management_packet
batman-adv: Fix TT global entry leak when client roamed back
batman-adv: Fix batadv_v_ogm_aggr_send memory leak
batman-adv: Hold rtnl lock during MTU update via netlink
lib/clz_ctz.c: Fix __clzdi2() and __ctzdi2() for 32-bit kernels
radix tree: remove unused variable
of: dynamic: Refactor action prints to not use "%pOF" inside devtree_lock
media: vcodec: Fix potential array out-of-bounds in encoder queue_setup
PCI: acpiphp: Use pci_assign_unassigned_bridge_resources() only for non-root bus
drm/vmwgfx: Fix shader stage validation
drm/display/dp: Fix the DP DSC Receiver cap size
x86/fpu: Set X86_FEATURE_OSXSAVE feature after enabling OSXSAVE in CR4
torture: Fix hang during kthread shutdown phase
tick: Detect and fix jiffies update stall
timers/nohz: Switch to ONESHOT_STOPPED in the low-res handler when the tick is stopped
cgroup/cpuset: Rename functions dealing with DEADLINE accounting
sched/cpuset: Bring back cpuset_mutex
sched/cpuset: Keep track of SCHED_DEADLINE task in cpusets
cgroup/cpuset: Iterate only if DEADLINE tasks are present
sched/deadline: Create DL BW alloc, free & check overflow interface
cgroup/cpuset: Free DL BW in case can_attach() fails
drm/i915: Fix premature release of request's reusable memory
ASoC: rt711: add two jack detection modes
scsi: snic: Fix double free in snic_tgt_create()
scsi: core: raid_class: Remove raid_component_add()
clk: Fix undefined reference to `clk_rate_exclusive_{get,put}'
pinctrl: renesas: rza2: Add lock around pinctrl_generic{{add,remove}_group,{add,remove}_function}
dma-buf/sw_sync: Avoid recursive lock during fence signal
mm,hwpoison: refactor get_any_page
mm: fix page reference leak in soft_offline_page()
mm: memory-failure: kill soft_offline_free_page()
mm: memory-failure: fix unexpected return value in soft_offline_page()
ASoC: Intel: sof_sdw: include rt711.h for RT711 JD mode
mm,hwpoison: fix printing of page flags
Linux 5.10.193
Change-Id: I7c6ce55cbc73cef27a5cbe8954131a052b67dac2
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
This commit is contained in:
2
Makefile
2
Makefile
@@ -1,7 +1,7 @@
|
||||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 192
|
||||
SUBLEVEL = 193
|
||||
EXTRAVERSION =
|
||||
NAME = Dare mighty things
|
||||
|
||||
|
||||
@@ -126,7 +126,24 @@
|
||||
#define cpu_has_tx39_cache __opt(MIPS_CPU_TX39_CACHE)
|
||||
#endif
|
||||
#ifndef cpu_has_octeon_cache
|
||||
#define cpu_has_octeon_cache 0
|
||||
#define cpu_has_octeon_cache \
|
||||
({ \
|
||||
int __res; \
|
||||
\
|
||||
switch (boot_cpu_type()) { \
|
||||
case CPU_CAVIUM_OCTEON: \
|
||||
case CPU_CAVIUM_OCTEON_PLUS: \
|
||||
case CPU_CAVIUM_OCTEON2: \
|
||||
case CPU_CAVIUM_OCTEON3: \
|
||||
__res = 1; \
|
||||
break; \
|
||||
\
|
||||
default: \
|
||||
__res = 0; \
|
||||
} \
|
||||
\
|
||||
__res; \
|
||||
})
|
||||
#endif
|
||||
/* Don't override `cpu_has_fpu' to 1 or the "nofpu" option won't work. */
|
||||
#ifndef cpu_has_fpu
|
||||
@@ -353,7 +370,7 @@
|
||||
({ \
|
||||
int __res; \
|
||||
\
|
||||
switch (current_cpu_type()) { \
|
||||
switch (boot_cpu_type()) { \
|
||||
case CPU_M14KC: \
|
||||
case CPU_74K: \
|
||||
case CPU_1074K: \
|
||||
|
||||
@@ -892,6 +892,14 @@ void __init fpu__init_system_xstate(void)
|
||||
setup_init_fpu_buf();
|
||||
setup_xstate_comp_offsets();
|
||||
setup_supervisor_only_offsets();
|
||||
|
||||
/*
|
||||
* CPU capabilities initialization runs before FPU init. So
|
||||
* X86_FEATURE_OSXSAVE is not set. Now that XSAVE is completely
|
||||
* functional, set the feature bit so depending code works.
|
||||
*/
|
||||
setup_force_cpu_cap(X86_FEATURE_OSXSAVE);
|
||||
|
||||
print_xstate_offset_size();
|
||||
|
||||
pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is %d bytes, using '%s' format.\n",
|
||||
|
||||
@@ -3740,7 +3740,7 @@ static int rbd_lock(struct rbd_device *rbd_dev)
|
||||
ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
|
||||
RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
|
||||
RBD_LOCK_TAG, "", 0);
|
||||
if (ret)
|
||||
if (ret && ret != -EEXIST)
|
||||
return ret;
|
||||
|
||||
__rbd_lock(rbd_dev, cookie);
|
||||
@@ -3914,10 +3914,26 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
|
||||
list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
|
||||
}
|
||||
|
||||
static int get_lock_owner_info(struct rbd_device *rbd_dev,
|
||||
struct ceph_locker **lockers, u32 *num_lockers)
|
||||
static bool locker_equal(const struct ceph_locker *lhs,
|
||||
const struct ceph_locker *rhs)
|
||||
{
|
||||
return lhs->id.name.type == rhs->id.name.type &&
|
||||
lhs->id.name.num == rhs->id.name.num &&
|
||||
!strcmp(lhs->id.cookie, rhs->id.cookie) &&
|
||||
ceph_addr_equal_no_type(&lhs->info.addr, &rhs->info.addr);
|
||||
}
|
||||
|
||||
static void free_locker(struct ceph_locker *locker)
|
||||
{
|
||||
if (locker)
|
||||
ceph_free_lockers(locker, 1);
|
||||
}
|
||||
|
||||
static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev)
|
||||
{
|
||||
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
|
||||
struct ceph_locker *lockers;
|
||||
u32 num_lockers;
|
||||
u8 lock_type;
|
||||
char *lock_tag;
|
||||
int ret;
|
||||
@@ -3926,39 +3942,45 @@ static int get_lock_owner_info(struct rbd_device *rbd_dev,
|
||||
|
||||
ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
|
||||
&rbd_dev->header_oloc, RBD_LOCK_NAME,
|
||||
&lock_type, &lock_tag, lockers, num_lockers);
|
||||
if (ret)
|
||||
return ret;
|
||||
&lock_type, &lock_tag, &lockers, &num_lockers);
|
||||
if (ret) {
|
||||
rbd_warn(rbd_dev, "failed to get header lockers: %d", ret);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
if (*num_lockers == 0) {
|
||||
if (num_lockers == 0) {
|
||||
dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
|
||||
lockers = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (strcmp(lock_tag, RBD_LOCK_TAG)) {
|
||||
rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
|
||||
lock_tag);
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
goto err_busy;
|
||||
}
|
||||
|
||||
if (lock_type == CEPH_CLS_LOCK_SHARED) {
|
||||
rbd_warn(rbd_dev, "shared lock type detected");
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
goto err_busy;
|
||||
}
|
||||
|
||||
if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
|
||||
WARN_ON(num_lockers != 1);
|
||||
if (strncmp(lockers[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
|
||||
strlen(RBD_LOCK_COOKIE_PREFIX))) {
|
||||
rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
|
||||
(*lockers)[0].id.cookie);
|
||||
ret = -EBUSY;
|
||||
goto out;
|
||||
lockers[0].id.cookie);
|
||||
goto err_busy;
|
||||
}
|
||||
|
||||
out:
|
||||
kfree(lock_tag);
|
||||
return ret;
|
||||
return lockers;
|
||||
|
||||
err_busy:
|
||||
kfree(lock_tag);
|
||||
ceph_free_lockers(lockers, num_lockers);
|
||||
return ERR_PTR(-EBUSY);
|
||||
}
|
||||
|
||||
static int find_watcher(struct rbd_device *rbd_dev,
|
||||
@@ -3974,13 +3996,19 @@ static int find_watcher(struct rbd_device *rbd_dev,
|
||||
ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
|
||||
&rbd_dev->header_oloc, &watchers,
|
||||
&num_watchers);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
rbd_warn(rbd_dev, "failed to get watchers: %d", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
|
||||
for (i = 0; i < num_watchers; i++) {
|
||||
if (!memcmp(&watchers[i].addr, &locker->info.addr,
|
||||
sizeof(locker->info.addr)) &&
|
||||
/*
|
||||
* Ignore addr->type while comparing. This mimics
|
||||
* entity_addr_t::get_legacy_str() + strcmp().
|
||||
*/
|
||||
if (ceph_addr_equal_no_type(&watchers[i].addr,
|
||||
&locker->info.addr) &&
|
||||
watchers[i].cookie == cookie) {
|
||||
struct rbd_client_id cid = {
|
||||
.gid = le64_to_cpu(watchers[i].name.num),
|
||||
@@ -4008,51 +4036,72 @@ out:
|
||||
static int rbd_try_lock(struct rbd_device *rbd_dev)
|
||||
{
|
||||
struct ceph_client *client = rbd_dev->rbd_client->client;
|
||||
struct ceph_locker *lockers;
|
||||
u32 num_lockers;
|
||||
struct ceph_locker *locker, *refreshed_locker;
|
||||
int ret;
|
||||
|
||||
for (;;) {
|
||||
locker = refreshed_locker = NULL;
|
||||
|
||||
ret = rbd_lock(rbd_dev);
|
||||
if (ret != -EBUSY)
|
||||
return ret;
|
||||
if (!ret)
|
||||
goto out;
|
||||
if (ret != -EBUSY) {
|
||||
rbd_warn(rbd_dev, "failed to lock header: %d", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* determine if the current lock holder is still alive */
|
||||
ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (num_lockers == 0)
|
||||
locker = get_lock_owner_info(rbd_dev);
|
||||
if (IS_ERR(locker)) {
|
||||
ret = PTR_ERR(locker);
|
||||
locker = NULL;
|
||||
goto out;
|
||||
}
|
||||
if (!locker)
|
||||
goto again;
|
||||
|
||||
ret = find_watcher(rbd_dev, lockers);
|
||||
ret = find_watcher(rbd_dev, locker);
|
||||
if (ret)
|
||||
goto out; /* request lock or error */
|
||||
|
||||
refreshed_locker = get_lock_owner_info(rbd_dev);
|
||||
if (IS_ERR(refreshed_locker)) {
|
||||
ret = PTR_ERR(refreshed_locker);
|
||||
refreshed_locker = NULL;
|
||||
goto out;
|
||||
}
|
||||
if (!refreshed_locker ||
|
||||
!locker_equal(locker, refreshed_locker))
|
||||
goto again;
|
||||
|
||||
rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
|
||||
ENTITY_NAME(lockers[0].id.name));
|
||||
ENTITY_NAME(locker->id.name));
|
||||
|
||||
ret = ceph_monc_blocklist_add(&client->monc,
|
||||
&lockers[0].info.addr);
|
||||
&locker->info.addr);
|
||||
if (ret) {
|
||||
rbd_warn(rbd_dev, "blocklist of %s%llu failed: %d",
|
||||
ENTITY_NAME(lockers[0].id.name), ret);
|
||||
rbd_warn(rbd_dev, "failed to blocklist %s%llu: %d",
|
||||
ENTITY_NAME(locker->id.name), ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
|
||||
&rbd_dev->header_oloc, RBD_LOCK_NAME,
|
||||
lockers[0].id.cookie,
|
||||
&lockers[0].id.name);
|
||||
if (ret && ret != -ENOENT)
|
||||
locker->id.cookie, &locker->id.name);
|
||||
if (ret && ret != -ENOENT) {
|
||||
rbd_warn(rbd_dev, "failed to break header lock: %d",
|
||||
ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
again:
|
||||
ceph_free_lockers(lockers, num_lockers);
|
||||
free_locker(refreshed_locker);
|
||||
free_locker(locker);
|
||||
}
|
||||
|
||||
out:
|
||||
ceph_free_lockers(lockers, num_lockers);
|
||||
free_locker(refreshed_locker);
|
||||
free_locker(locker);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -4102,11 +4151,8 @@ static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
|
||||
|
||||
ret = rbd_try_lock(rbd_dev);
|
||||
if (ret < 0) {
|
||||
rbd_warn(rbd_dev, "failed to lock header: %d", ret);
|
||||
if (ret == -EBLOCKLISTED)
|
||||
goto out;
|
||||
|
||||
ret = 1; /* request lock anyway */
|
||||
rbd_warn(rbd_dev, "failed to acquire lock: %d", ret);
|
||||
goto out;
|
||||
}
|
||||
if (ret > 0) {
|
||||
up_write(&rbd_dev->lock_rwsem);
|
||||
@@ -6656,12 +6702,11 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
|
||||
cancel_delayed_work_sync(&rbd_dev->lock_dwork);
|
||||
if (!ret)
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret);
|
||||
return ret;
|
||||
rbd_warn(rbd_dev, "failed to acquire lock: %ld", ret);
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* The lock may have been released by now, unless automatic lock
|
||||
|
||||
@@ -205,18 +205,19 @@ EXPORT_SYMBOL(devm_clk_put);
|
||||
struct clk *devm_get_clk_from_child(struct device *dev,
|
||||
struct device_node *np, const char *con_id)
|
||||
{
|
||||
struct clk **ptr, *clk;
|
||||
struct devm_clk_state *state;
|
||||
struct clk *clk;
|
||||
|
||||
ptr = devres_alloc(devm_clk_release, sizeof(*ptr), GFP_KERNEL);
|
||||
if (!ptr)
|
||||
state = devres_alloc(devm_clk_release, sizeof(*state), GFP_KERNEL);
|
||||
if (!state)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
clk = of_clk_get_by_name(np, con_id);
|
||||
if (!IS_ERR(clk)) {
|
||||
*ptr = clk;
|
||||
devres_add(dev, ptr);
|
||||
state->clk = clk;
|
||||
devres_add(dev, state);
|
||||
} else {
|
||||
devres_free(ptr);
|
||||
devres_free(state);
|
||||
}
|
||||
|
||||
return clk;
|
||||
|
||||
@@ -191,6 +191,7 @@ static const struct dma_fence_ops timeline_fence_ops = {
|
||||
*/
|
||||
static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
|
||||
{
|
||||
LIST_HEAD(signalled);
|
||||
struct sync_pt *pt, *next;
|
||||
|
||||
trace_sync_timeline(obj);
|
||||
@@ -203,21 +204,20 @@ static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
|
||||
if (!timeline_fence_signaled(&pt->base))
|
||||
break;
|
||||
|
||||
list_del_init(&pt->link);
|
||||
dma_fence_get(&pt->base);
|
||||
|
||||
list_move_tail(&pt->link, &signalled);
|
||||
rb_erase(&pt->node, &obj->pt_tree);
|
||||
|
||||
/*
|
||||
* A signal callback may release the last reference to this
|
||||
* fence, causing it to be freed. That operation has to be
|
||||
* last to avoid a use after free inside this loop, and must
|
||||
* be after we remove the fence from the timeline in order to
|
||||
* prevent deadlocking on timeline->lock inside
|
||||
* timeline_fence_release().
|
||||
*/
|
||||
dma_fence_signal_locked(&pt->base);
|
||||
}
|
||||
|
||||
spin_unlock_irq(&obj->lock);
|
||||
|
||||
list_for_each_entry_safe(pt, next, &signalled, link) {
|
||||
list_del_init(&pt->link);
|
||||
dma_fence_put(&pt->base);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@@ -3282,7 +3282,9 @@ void dcn10_wait_for_mpcc_disconnect(
|
||||
if (pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst]) {
|
||||
struct hubp *hubp = get_hubp_by_inst(res_pool, mpcc_inst);
|
||||
|
||||
res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
|
||||
if (pipe_ctx->stream_res.tg &&
|
||||
pipe_ctx->stream_res.tg->funcs->is_tg_enabled(pipe_ctx->stream_res.tg))
|
||||
res_pool->mpc->funcs->wait_for_idle(res_pool->mpc, mpcc_inst);
|
||||
pipe_ctx->stream_res.opp->mpcc_disconnect_pending[mpcc_inst] = false;
|
||||
hubp->funcs->set_blank(hubp, true);
|
||||
}
|
||||
|
||||
@@ -457,8 +457,11 @@ int i915_active_ref(struct i915_active *ref, u64 idx, struct dma_fence *fence)
|
||||
}
|
||||
} while (unlikely(is_barrier(active)));
|
||||
|
||||
if (!__i915_active_fence_set(active, fence))
|
||||
fence = __i915_active_fence_set(active, fence);
|
||||
if (!fence)
|
||||
__i915_active_acquire(ref);
|
||||
else
|
||||
dma_fence_put(fence);
|
||||
|
||||
out:
|
||||
i915_active_release(ref);
|
||||
@@ -477,13 +480,9 @@ __i915_active_set_fence(struct i915_active *ref,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
rcu_read_lock();
|
||||
prev = __i915_active_fence_set(active, fence);
|
||||
if (prev)
|
||||
prev = dma_fence_get_rcu(prev);
|
||||
else
|
||||
if (!prev)
|
||||
__i915_active_acquire(ref);
|
||||
rcu_read_unlock();
|
||||
|
||||
return prev;
|
||||
}
|
||||
@@ -1050,10 +1049,11 @@ void i915_request_add_active_barriers(struct i915_request *rq)
|
||||
*
|
||||
* Records the new @fence as the last active fence along its timeline in
|
||||
* this active tracker, moving the tracking callbacks from the previous
|
||||
* fence onto this one. Returns the previous fence (if not already completed),
|
||||
* which the caller must ensure is executed before the new fence. To ensure
|
||||
* that the order of fences within the timeline of the i915_active_fence is
|
||||
* understood, it should be locked by the caller.
|
||||
* fence onto this one. Gets and returns a reference to the previous fence
|
||||
* (if not already completed), which the caller must put after making sure
|
||||
* that it is executed before the new fence. To ensure that the order of
|
||||
* fences within the timeline of the i915_active_fence is understood, it
|
||||
* should be locked by the caller.
|
||||
*/
|
||||
struct dma_fence *
|
||||
__i915_active_fence_set(struct i915_active_fence *active,
|
||||
@@ -1062,7 +1062,23 @@ __i915_active_fence_set(struct i915_active_fence *active,
|
||||
struct dma_fence *prev;
|
||||
unsigned long flags;
|
||||
|
||||
if (fence == rcu_access_pointer(active->fence))
|
||||
/*
|
||||
* In case of fences embedded in i915_requests, their memory is
|
||||
* SLAB_FAILSAFE_BY_RCU, then it can be reused right after release
|
||||
* by new requests. Then, there is a risk of passing back a pointer
|
||||
* to a new, completely unrelated fence that reuses the same memory
|
||||
* while tracked under a different active tracker. Combined with i915
|
||||
* perf open/close operations that build await dependencies between
|
||||
* engine kernel context requests and user requests from different
|
||||
* timelines, this can lead to dependency loops and infinite waits.
|
||||
*
|
||||
* As a countermeasure, we try to get a reference to the active->fence
|
||||
* first, so if we succeed and pass it back to our user then it is not
|
||||
* released and potentially reused by an unrelated request before the
|
||||
* user has a chance to set up an await dependency on it.
|
||||
*/
|
||||
prev = i915_active_fence_get(active);
|
||||
if (fence == prev)
|
||||
return fence;
|
||||
|
||||
GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
|
||||
@@ -1071,27 +1087,56 @@ __i915_active_fence_set(struct i915_active_fence *active,
|
||||
* Consider that we have two threads arriving (A and B), with
|
||||
* C already resident as the active->fence.
|
||||
*
|
||||
* A does the xchg first, and so it sees C or NULL depending
|
||||
* on the timing of the interrupt handler. If it is NULL, the
|
||||
* previous fence must have been signaled and we know that
|
||||
* we are first on the timeline. If it is still present,
|
||||
* we acquire the lock on that fence and serialise with the interrupt
|
||||
* handler, in the process removing it from any future interrupt
|
||||
* callback. A will then wait on C before executing (if present).
|
||||
*
|
||||
* As B is second, it sees A as the previous fence and so waits for
|
||||
* it to complete its transition and takes over the occupancy for
|
||||
* itself -- remembering that it needs to wait on A before executing.
|
||||
* Both A and B have got a reference to C or NULL, depending on the
|
||||
* timing of the interrupt handler. Let's assume that if A has got C
|
||||
* then it has locked C first (before B).
|
||||
*
|
||||
* Note the strong ordering of the timeline also provides consistent
|
||||
* nesting rules for the fence->lock; the inner lock is always the
|
||||
* older lock.
|
||||
*/
|
||||
spin_lock_irqsave(fence->lock, flags);
|
||||
prev = xchg(__active_fence_slot(active), fence);
|
||||
if (prev) {
|
||||
GEM_BUG_ON(prev == fence);
|
||||
if (prev)
|
||||
spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
|
||||
|
||||
/*
|
||||
* A does the cmpxchg first, and so it sees C or NULL, as before, or
|
||||
* something else, depending on the timing of other threads and/or
|
||||
* interrupt handler. If not the same as before then A unlocks C if
|
||||
* applicable and retries, starting from an attempt to get a new
|
||||
* active->fence. Meanwhile, B follows the same path as A.
|
||||
* Once A succeeds with cmpxch, B fails again, retires, gets A from
|
||||
* active->fence, locks it as soon as A completes, and possibly
|
||||
* succeeds with cmpxchg.
|
||||
*/
|
||||
while (cmpxchg(__active_fence_slot(active), prev, fence) != prev) {
|
||||
if (prev) {
|
||||
spin_unlock(prev->lock);
|
||||
dma_fence_put(prev);
|
||||
}
|
||||
spin_unlock_irqrestore(fence->lock, flags);
|
||||
|
||||
prev = i915_active_fence_get(active);
|
||||
GEM_BUG_ON(prev == fence);
|
||||
|
||||
spin_lock_irqsave(fence->lock, flags);
|
||||
if (prev)
|
||||
spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
|
||||
}
|
||||
|
||||
/*
|
||||
* If prev is NULL then the previous fence must have been signaled
|
||||
* and we know that we are first on the timeline. If it is still
|
||||
* present then, having the lock on that fence already acquired, we
|
||||
* serialise with the interrupt handler, in the process of removing it
|
||||
* from any future interrupt callback. A will then wait on C before
|
||||
* executing (if present).
|
||||
*
|
||||
* As B is second, it sees A as the previous fence and so waits for
|
||||
* it to complete its transition and takes over the occupancy for
|
||||
* itself -- remembering that it needs to wait on A before executing.
|
||||
*/
|
||||
if (prev) {
|
||||
__list_del_entry(&active->cb.node);
|
||||
spin_unlock(prev->lock); /* serialise with prev->cb_list */
|
||||
}
|
||||
@@ -1108,11 +1153,7 @@ int i915_active_fence_set(struct i915_active_fence *active,
|
||||
int err = 0;
|
||||
|
||||
/* Must maintain timeline ordering wrt previous active requests */
|
||||
rcu_read_lock();
|
||||
fence = __i915_active_fence_set(active, &rq->fence);
|
||||
if (fence) /* but the previous fence may not belong to that timeline! */
|
||||
fence = dma_fence_get_rcu(fence);
|
||||
rcu_read_unlock();
|
||||
if (fence) {
|
||||
err = i915_request_await_dma_fence(rq, fence);
|
||||
dma_fence_put(fence);
|
||||
|
||||
@@ -1525,6 +1525,8 @@ __i915_request_add_to_timeline(struct i915_request *rq)
|
||||
&rq->dep,
|
||||
0);
|
||||
}
|
||||
if (prev)
|
||||
i915_request_put(prev);
|
||||
|
||||
/*
|
||||
* Make sure that no request gazumped us - if it was allocated after
|
||||
|
||||
@@ -1606,4 +1606,17 @@ static inline void vmw_mmio_write(u32 value, u32 *addr)
|
||||
{
|
||||
WRITE_ONCE(*addr, value);
|
||||
}
|
||||
|
||||
static inline bool vmw_shadertype_is_valid(enum vmw_sm_type shader_model,
|
||||
u32 shader_type)
|
||||
{
|
||||
SVGA3dShaderType max_allowed = SVGA3D_SHADERTYPE_PREDX_MAX;
|
||||
|
||||
if (shader_model >= VMW_SM_5)
|
||||
max_allowed = SVGA3D_SHADERTYPE_MAX;
|
||||
else if (shader_model >= VMW_SM_4)
|
||||
max_allowed = SVGA3D_SHADERTYPE_DX10_MAX;
|
||||
return shader_type >= SVGA3D_SHADERTYPE_MIN && shader_type < max_allowed;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1998,7 +1998,7 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
|
||||
|
||||
cmd = container_of(header, typeof(*cmd), header);
|
||||
|
||||
if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
|
||||
if (!vmw_shadertype_is_valid(VMW_SM_LEGACY, cmd->body.type)) {
|
||||
VMW_DEBUG_USER("Illegal shader type %u.\n",
|
||||
(unsigned int) cmd->body.type);
|
||||
return -EINVAL;
|
||||
@@ -2120,8 +2120,6 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
|
||||
SVGA3dCmdHeader *header)
|
||||
{
|
||||
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
|
||||
SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ?
|
||||
SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10;
|
||||
|
||||
struct vmw_resource *res = NULL;
|
||||
struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
|
||||
@@ -2138,6 +2136,14 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type) ||
|
||||
cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
|
||||
VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
|
||||
(unsigned int) cmd->body.type,
|
||||
(unsigned int) cmd->body.slot);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
binding.bi.ctx = ctx_node->ctx;
|
||||
binding.bi.res = res;
|
||||
binding.bi.bt = vmw_ctx_binding_cb;
|
||||
@@ -2146,14 +2152,6 @@ vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
|
||||
binding.size = cmd->body.sizeInBytes;
|
||||
binding.slot = cmd->body.slot;
|
||||
|
||||
if (binding.shader_slot >= max_shader_num ||
|
||||
binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
|
||||
VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
|
||||
(unsigned int) cmd->body.type,
|
||||
(unsigned int) binding.slot);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
|
||||
binding.slot);
|
||||
|
||||
@@ -2174,15 +2172,13 @@ static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
|
||||
{
|
||||
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
|
||||
container_of(header, typeof(*cmd), header);
|
||||
SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
|
||||
SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
|
||||
|
||||
u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
|
||||
sizeof(SVGA3dShaderResourceViewId);
|
||||
|
||||
if ((u64) cmd->body.startView + (u64) num_sr_view >
|
||||
(u64) SVGA3D_DX_MAX_SRVIEWS ||
|
||||
cmd->body.type >= max_allowed) {
|
||||
!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
|
||||
VMW_DEBUG_USER("Invalid shader binding.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -2206,8 +2202,6 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
|
||||
SVGA3dCmdHeader *header)
|
||||
{
|
||||
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
|
||||
SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
|
||||
SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
|
||||
struct vmw_resource *res = NULL;
|
||||
struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
|
||||
struct vmw_ctx_bindinfo_shader binding;
|
||||
@@ -2218,8 +2212,7 @@ static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
|
||||
|
||||
cmd = container_of(header, typeof(*cmd), header);
|
||||
|
||||
if (cmd->body.type >= max_allowed ||
|
||||
cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
|
||||
if (!vmw_shadertype_is_valid(dev_priv->sm_type, cmd->body.type)) {
|
||||
VMW_DEBUG_USER("Illegal shader type %u.\n",
|
||||
(unsigned int) cmd->body.type);
|
||||
return -EINVAL;
|
||||
|
||||
@@ -31,11 +31,11 @@
|
||||
#define DEFAULT_BUFFER_SECTORS 128
|
||||
#define DEFAULT_JOURNAL_WATERMARK 50
|
||||
#define DEFAULT_SYNC_MSEC 10000
|
||||
#define DEFAULT_MAX_JOURNAL_SECTORS 131072
|
||||
#define DEFAULT_MAX_JOURNAL_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 131072 : 8192)
|
||||
#define MIN_LOG2_INTERLEAVE_SECTORS 3
|
||||
#define MAX_LOG2_INTERLEAVE_SECTORS 31
|
||||
#define METADATA_WORKQUEUE_MAX_ACTIVE 16
|
||||
#define RECALC_SECTORS 8192
|
||||
#define RECALC_SECTORS (IS_ENABLED(CONFIG_64BIT) ? 32768 : 2048)
|
||||
#define RECALC_WRITE_SUPER 16
|
||||
#define BITMAP_BLOCK_SIZE 4096 /* don't change it */
|
||||
#define BITMAP_FLUSH_INTERVAL (10 * HZ)
|
||||
|
||||
@@ -729,6 +729,8 @@ static int vb2ops_venc_queue_setup(struct vb2_queue *vq,
|
||||
return -EINVAL;
|
||||
|
||||
if (*nplanes) {
|
||||
if (*nplanes != q_data->fmt->num_planes)
|
||||
return -EINVAL;
|
||||
for (i = 0; i < *nplanes; i++)
|
||||
if (sizes[i] < q_data->sizeimage[i])
|
||||
return -EINVAL;
|
||||
|
||||
@@ -656,10 +656,10 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
|
||||
return NULL;
|
||||
arp = (struct arp_pkt *)skb_network_header(skb);
|
||||
|
||||
/* Don't modify or load balance ARPs that do not originate locally
|
||||
* (e.g.,arrive via a bridge).
|
||||
/* Don't modify or load balance ARPs that do not originate
|
||||
* from the bond itself or a VLAN directly above the bond.
|
||||
*/
|
||||
if (!bond_slave_has_mac_rx(bond, arp->mac_src))
|
||||
if (!bond_slave_has_mac_rcu(bond, arp->mac_src))
|
||||
return NULL;
|
||||
|
||||
if (arp->op_code == htons(ARPOP_REPLY)) {
|
||||
|
||||
@@ -179,12 +179,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev,
|
||||
|
||||
nla_peer = data[VXCAN_INFO_PEER];
|
||||
ifmp = nla_data(nla_peer);
|
||||
err = rtnl_nla_parse_ifla(peer_tb,
|
||||
nla_data(nla_peer) +
|
||||
sizeof(struct ifinfomsg),
|
||||
nla_len(nla_peer) -
|
||||
sizeof(struct ifinfomsg),
|
||||
NULL);
|
||||
err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
|
||||
@@ -1448,7 +1448,7 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac)
|
||||
int err;
|
||||
|
||||
phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
|
||||
if (!phy_dev || IS_ERR(phy_dev)) {
|
||||
if (IS_ERR(phy_dev)) {
|
||||
dev_err(bgmac->dev, "Failed to register fixed PHY device\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
@@ -568,7 +568,7 @@ static int bcmgenet_mii_pd_init(struct bcmgenet_priv *priv)
|
||||
};
|
||||
|
||||
phydev = fixed_phy_register(PHY_POLL, &fphy_status, NULL);
|
||||
if (!phydev || IS_ERR(phydev)) {
|
||||
if (IS_ERR(phydev)) {
|
||||
dev_err(kdev, "failed to register fixed PHY device\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
@@ -196,7 +196,7 @@ static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
|
||||
unsigned long offset;
|
||||
|
||||
for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
|
||||
asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
|
||||
asm("dcbf %0,%1,1" :: "b" (addr), "r" (offset));
|
||||
}
|
||||
|
||||
/* replenish the buffers for a pool. note that we don't need to
|
||||
|
||||
@@ -353,7 +353,8 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
|
||||
/* Receive Packet Data Buffer Size.
|
||||
* The Packet Data Buffer Size is defined in 128 byte units.
|
||||
*/
|
||||
rlan_ctx.dbuf = ring->rx_buf_len >> ICE_RLAN_CTX_DBUF_S;
|
||||
rlan_ctx.dbuf = DIV_ROUND_UP(ring->rx_buf_len,
|
||||
BIT_ULL(ICE_RLAN_CTX_DBUF_S));
|
||||
|
||||
/* use 32 byte descriptors */
|
||||
rlan_ctx.dsize = 1;
|
||||
|
||||
@@ -1262,18 +1262,6 @@ void igb_ptp_init(struct igb_adapter *adapter)
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock_init(&adapter->tmreg_lock);
|
||||
INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
|
||||
|
||||
if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
|
||||
INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
|
||||
igb_ptp_overflow_check);
|
||||
|
||||
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
|
||||
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
|
||||
|
||||
igb_ptp_reset(adapter);
|
||||
|
||||
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
|
||||
&adapter->pdev->dev);
|
||||
if (IS_ERR(adapter->ptp_clock)) {
|
||||
@@ -1283,6 +1271,18 @@ void igb_ptp_init(struct igb_adapter *adapter)
|
||||
dev_info(&adapter->pdev->dev, "added PHC on %s\n",
|
||||
adapter->netdev->name);
|
||||
adapter->ptp_flags |= IGB_PTP_ENABLED;
|
||||
|
||||
spin_lock_init(&adapter->tmreg_lock);
|
||||
INIT_WORK(&adapter->ptp_tx_work, igb_ptp_tx_work);
|
||||
|
||||
if (adapter->ptp_flags & IGB_PTP_OVERFLOW_CHECK)
|
||||
INIT_DELAYED_WORK(&adapter->ptp_overflow_work,
|
||||
igb_ptp_overflow_check);
|
||||
|
||||
adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
|
||||
adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
|
||||
|
||||
igb_ptp_reset(adapter);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user