You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
drm/i915: Coordinate i915_active with its own mutex
Forgo the struct_mutex serialisation for i915_active, and interpose its own mutex handling for active/retire. This is a multi-layered sleight-of-hand. First, we had to ensure that no active/retire callbacks accidentally inverted the mutex ordering rules, nor assumed that they were themselves serialised by struct_mutex. More challenging though, is the rule over updating elements of the active rbtree. Instead of the whole i915_active now being serialised by struct_mutex, allocations/rotations of the tree are serialised by the i915_active.mutex and individual nodes are serialised by the caller using the i915_timeline.mutex (we need to use nested spinlocks to interact with the dma_fence callback lists). The pain point here is that instead of a single mutex around execbuf, we now have to take a mutex for active tracker (one for each vma, context, etc) and a couple of spinlocks for each fence update. The improvement in fine grained locking allowing for multiple concurrent clients (eventually!) should be worth it in typical loads. v2: Add some comments that barely elucidate anything :( Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20191004134015.13204-6-chris@chris-wilson.co.uk
This commit is contained in:
@@ -257,7 +257,7 @@ intel_frontbuffer_get(struct drm_i915_gem_object *obj)
|
||||
front->obj = obj;
|
||||
kref_init(&front->ref);
|
||||
atomic_set(&front->bits, 0);
|
||||
i915_active_init(i915, &front->write,
|
||||
i915_active_init(&front->write,
|
||||
frontbuffer_active,
|
||||
i915_active_may_sleep(frontbuffer_retire));
|
||||
|
||||
|
||||
@@ -1360,8 +1360,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
|
||||
overlay->contrast = 75;
|
||||
overlay->saturation = 146;
|
||||
|
||||
i915_active_init(dev_priv,
|
||||
&overlay->last_flip,
|
||||
i915_active_init(&overlay->last_flip,
|
||||
NULL, intel_overlay_last_flip_retire);
|
||||
|
||||
ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(dev_priv));
|
||||
|
||||
@@ -868,20 +868,18 @@ static int context_barrier_task(struct i915_gem_context *ctx,
|
||||
void (*task)(void *data),
|
||||
void *data)
|
||||
{
|
||||
struct drm_i915_private *i915 = ctx->i915;
|
||||
struct context_barrier_task *cb;
|
||||
struct i915_gem_engines_iter it;
|
||||
struct intel_context *ce;
|
||||
int err = 0;
|
||||
|
||||
lockdep_assert_held(&i915->drm.struct_mutex);
|
||||
GEM_BUG_ON(!task);
|
||||
|
||||
cb = kmalloc(sizeof(*cb), GFP_KERNEL);
|
||||
if (!cb)
|
||||
return -ENOMEM;
|
||||
|
||||
i915_active_init(i915, &cb->base, NULL, cb_retire);
|
||||
i915_active_init(&cb->base, NULL, cb_retire);
|
||||
err = i915_active_acquire(&cb->base);
|
||||
if (err) {
|
||||
kfree(cb);
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
#define __I915_GEM_OBJECT_TYPES_H__
|
||||
|
||||
#include <drm/drm_gem.h>
|
||||
#include <uapi/drm/i915_drm.h>
|
||||
|
||||
#include "i915_active.h"
|
||||
#include "i915_selftest.h"
|
||||
|
||||
@@ -16,14 +16,11 @@ static void call_idle_barriers(struct intel_engine_cs *engine)
|
||||
struct llist_node *node, *next;
|
||||
|
||||
llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
|
||||
struct i915_active_request *active =
|
||||
struct dma_fence_cb *cb =
|
||||
container_of((struct list_head *)node,
|
||||
typeof(*active), link);
|
||||
typeof(*cb), node);
|
||||
|
||||
INIT_LIST_HEAD(&active->link);
|
||||
RCU_INIT_POINTER(active->request, NULL);
|
||||
|
||||
active->retire(active, NULL);
|
||||
cb->func(NULL, cb);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -240,7 +240,7 @@ intel_context_init(struct intel_context *ce,
|
||||
|
||||
mutex_init(&ce->pin_mutex);
|
||||
|
||||
i915_active_init(ctx->i915, &ce->active,
|
||||
i915_active_init(&ce->active,
|
||||
__intel_context_active, __intel_context_retire);
|
||||
}
|
||||
|
||||
@@ -307,7 +307,7 @@ int intel_context_prepare_remote_request(struct intel_context *ce,
|
||||
return err;
|
||||
|
||||
/* Queue this switch after current activity by this context. */
|
||||
err = i915_active_request_set(&tl->last_request, rq);
|
||||
err = i915_active_fence_set(&tl->last_request, rq);
|
||||
mutex_unlock(&tl->mutex);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@@ -95,7 +95,7 @@ node_create(struct intel_engine_pool *pool, size_t sz)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
node->pool = pool;
|
||||
i915_active_init(engine->i915, &node->active, pool_active, pool_retire);
|
||||
i915_active_init(&node->active, pool_active, pool_retire);
|
||||
|
||||
obj = i915_gem_object_create_internal(engine->i915, sz);
|
||||
if (IS_ERR(obj)) {
|
||||
|
||||
@@ -844,10 +844,10 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
|
||||
*/
|
||||
spin_lock_irqsave(&timelines->lock, flags);
|
||||
list_for_each_entry(tl, &timelines->active_list, link) {
|
||||
struct i915_request *rq;
|
||||
struct dma_fence *fence;
|
||||
|
||||
rq = i915_active_request_get_unlocked(&tl->last_request);
|
||||
if (!rq)
|
||||
fence = i915_active_fence_get(&tl->last_request);
|
||||
if (!fence)
|
||||
continue;
|
||||
|
||||
spin_unlock_irqrestore(&timelines->lock, flags);
|
||||
@@ -859,8 +859,8 @@ static bool __intel_gt_unset_wedged(struct intel_gt *gt)
|
||||
* (I915_FENCE_TIMEOUT) so this wait should not be unbounded
|
||||
* in the worst case.
|
||||
*/
|
||||
dma_fence_default_wait(&rq->fence, false, MAX_SCHEDULE_TIMEOUT);
|
||||
i915_request_put(rq);
|
||||
dma_fence_default_wait(fence, false, MAX_SCHEDULE_TIMEOUT);
|
||||
dma_fence_put(fence);
|
||||
|
||||
/* Restart iteration after droping lock */
|
||||
spin_lock_irqsave(&timelines->lock, flags);
|
||||
|
||||
@@ -178,8 +178,7 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline)
|
||||
cl->hwsp = hwsp;
|
||||
cl->vaddr = page_pack_bits(vaddr, cacheline);
|
||||
|
||||
i915_active_init(hwsp->gt->i915, &cl->active,
|
||||
__cacheline_active, __cacheline_retire);
|
||||
i915_active_init(&cl->active, __cacheline_active, __cacheline_retire);
|
||||
|
||||
return cl;
|
||||
}
|
||||
@@ -255,7 +254,7 @@ int intel_timeline_init(struct intel_timeline *timeline,
|
||||
|
||||
mutex_init(&timeline->mutex);
|
||||
|
||||
INIT_ACTIVE_REQUEST(&timeline->last_request, &timeline->mutex);
|
||||
INIT_ACTIVE_FENCE(&timeline->last_request, &timeline->mutex);
|
||||
INIT_LIST_HEAD(&timeline->requests);
|
||||
|
||||
i915_syncmap_init(&timeline->sync);
|
||||
@@ -443,7 +442,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
|
||||
* free it after the current request is retired, which ensures that
|
||||
* all writes into the cacheline from previous requests are complete.
|
||||
*/
|
||||
err = i915_active_ref(&tl->hwsp_cacheline->active, tl, rq);
|
||||
err = i915_active_ref(&tl->hwsp_cacheline->active, tl, &rq->fence);
|
||||
if (err)
|
||||
goto err_cacheline;
|
||||
|
||||
|
||||
@@ -58,12 +58,13 @@ struct intel_timeline {
|
||||
*/
|
||||
struct list_head requests;
|
||||
|
||||
/* Contains an RCU guarded pointer to the last request. No reference is
|
||||
/*
|
||||
* Contains an RCU guarded pointer to the last request. No reference is
|
||||
* held to the request, users must carefully acquire a reference to
|
||||
* the request using i915_active_request_get_request_rcu(), or hold the
|
||||
* struct_mutex.
|
||||
* the request using i915_active_fence_get(), or manage the RCU
|
||||
* protection themselves (cf the i915_active_fence API).
|
||||
*/
|
||||
struct i915_active_request last_request;
|
||||
struct i915_active_fence last_request;
|
||||
|
||||
/**
|
||||
* We track the most recent seqno that we wait on in every context so
|
||||
|
||||
@@ -47,24 +47,20 @@ static int context_sync(struct intel_context *ce)
|
||||
|
||||
mutex_lock(&tl->mutex);
|
||||
do {
|
||||
struct i915_request *rq;
|
||||
struct dma_fence *fence;
|
||||
long timeout;
|
||||
|
||||
rcu_read_lock();
|
||||
rq = rcu_dereference(tl->last_request.request);
|
||||
if (rq)
|
||||
rq = i915_request_get_rcu(rq);
|
||||
rcu_read_unlock();
|
||||
if (!rq)
|
||||
fence = i915_active_fence_get(&tl->last_request);
|
||||
if (!fence)
|
||||
break;
|
||||
|
||||
timeout = i915_request_wait(rq, 0, HZ / 10);
|
||||
timeout = dma_fence_wait_timeout(fence, false, HZ / 10);
|
||||
if (timeout < 0)
|
||||
err = timeout;
|
||||
else
|
||||
i915_request_retire_upto(rq);
|
||||
i915_request_retire_upto(to_request(fence));
|
||||
|
||||
i915_request_put(rq);
|
||||
dma_fence_put(fence);
|
||||
} while (!err);
|
||||
mutex_unlock(&tl->mutex);
|
||||
|
||||
|
||||
@@ -1172,9 +1172,13 @@ static struct i915_request *dummy_request(struct intel_engine_cs *engine)
|
||||
if (!rq)
|
||||
return NULL;
|
||||
|
||||
INIT_LIST_HEAD(&rq->active_list);
|
||||
rq->engine = engine;
|
||||
|
||||
spin_lock_init(&rq->lock);
|
||||
INIT_LIST_HEAD(&rq->fence.cb_list);
|
||||
rq->fence.lock = &rq->lock;
|
||||
rq->fence.ops = &i915_fence_ops;
|
||||
|
||||
i915_sched_node_init(&rq->sched);
|
||||
|
||||
/* mark this request as permanently incomplete */
|
||||
@@ -1267,8 +1271,8 @@ static int live_suppress_wait_preempt(void *arg)
|
||||
}
|
||||
|
||||
/* Disable NEWCLIENT promotion */
|
||||
__i915_active_request_set(&i915_request_timeline(rq[i])->last_request,
|
||||
dummy);
|
||||
__i915_active_fence_set(&i915_request_timeline(rq[i])->last_request,
|
||||
&dummy->fence);
|
||||
i915_request_add(rq[i]);
|
||||
}
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ void mock_timeline_init(struct intel_timeline *timeline, u64 context)
|
||||
|
||||
mutex_init(&timeline->mutex);
|
||||
|
||||
INIT_ACTIVE_REQUEST(&timeline->last_request, &timeline->mutex);
|
||||
INIT_ACTIVE_FENCE(&timeline->last_request, &timeline->mutex);
|
||||
INIT_LIST_HEAD(&timeline->requests);
|
||||
|
||||
i915_syncmap_init(&timeline->sync);
|
||||
|
||||
@@ -385,11 +385,8 @@ intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
struct intel_vgpu *vgpu = workload->vgpu;
|
||||
struct intel_vgpu_submission *s = &vgpu->submission;
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
struct i915_request *rq;
|
||||
|
||||
lockdep_assert_held(&dev_priv->drm.struct_mutex);
|
||||
|
||||
if (workload->req)
|
||||
return 0;
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -12,6 +12,10 @@
|
||||
#include "i915_active_types.h"
|
||||
#include "i915_request.h"
|
||||
|
||||
struct i915_request;
|
||||
struct intel_engine_cs;
|
||||
struct intel_timeline;
|
||||
|
||||
/*
|
||||
* We treat requests as fences. This is not be to confused with our
|
||||
* "fence registers" but pipeline synchronisation objects ala GL_ARB_sync.
|
||||
@@ -28,308 +32,108 @@
|
||||
* write access so that we can perform concurrent read operations between
|
||||
* the CPU and GPU engines, as well as waiting for all rendering to
|
||||
* complete, or waiting for the last GPU user of a "fence register". The
|
||||
* object then embeds a #i915_active_request to track the most recent (in
|
||||
* object then embeds a #i915_active_fence to track the most recent (in
|
||||
* retirement order) request relevant for the desired mode of access.
|
||||
* The #i915_active_request is updated with i915_active_request_set() to
|
||||
* The #i915_active_fence is updated with i915_active_fence_set() to
|
||||
* track the most recent fence request, typically this is done as part of
|
||||
* i915_vma_move_to_active().
|
||||
*
|
||||
* When the #i915_active_request completes (is retired), it will
|
||||
* When the #i915_active_fence completes (is retired), it will
|
||||
* signal its completion to the owner through a callback as well as mark
|
||||
* itself as idle (i915_active_request.request == NULL). The owner
|
||||
* itself as idle (i915_active_fence.request == NULL). The owner
|
||||
* can then perform any action, such as delayed freeing of an active
|
||||
* resource including itself.
|
||||
*/
|
||||
|
||||
void i915_active_retire_noop(struct i915_active_request *active,
|
||||
struct i915_request *request);
|
||||
void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb);
|
||||
|
||||
/**
|
||||
* i915_active_request_init - prepares the activity tracker for use
|
||||
* __i915_active_fence_init - prepares the activity tracker for use
|
||||
* @active - the active tracker
|
||||
* @rq - initial request to track, can be NULL
|
||||
* @fence - initial fence to track, can be NULL
|
||||
* @func - a callback when then the tracker is retired (becomes idle),
|
||||
* can be NULL
|
||||
*
|
||||
* i915_active_request_init() prepares the embedded @active struct for use as
|
||||
* an activity tracker, that is for tracking the last known active request
|
||||
* associated with it. When the last request becomes idle, when it is retired
|
||||
* i915_active_fence_init() prepares the embedded @active struct for use as
|
||||
* an activity tracker, that is for tracking the last known active fence
|
||||
* associated with it. When the last fence becomes idle, when it is retired
|
||||
* after completion, the optional callback @func is invoked.
|
||||
*/
|
||||
static inline void
|
||||
i915_active_request_init(struct i915_active_request *active,
|
||||
__i915_active_fence_init(struct i915_active_fence *active,
|
||||
struct mutex *lock,
|
||||
struct i915_request *rq,
|
||||
i915_active_retire_fn retire)
|
||||
void *fence,
|
||||
dma_fence_func_t fn)
|
||||
{
|
||||
RCU_INIT_POINTER(active->request, rq);
|
||||
INIT_LIST_HEAD(&active->link);
|
||||
active->retire = retire ?: i915_active_retire_noop;
|
||||
RCU_INIT_POINTER(active->fence, fence);
|
||||
active->cb.func = fn ?: i915_active_noop;
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
|
||||
active->lock = lock;
|
||||
#endif
|
||||
}
|
||||
|
||||
#define INIT_ACTIVE_REQUEST(name, lock) \
|
||||
i915_active_request_init((name), (lock), NULL, NULL)
|
||||
#define INIT_ACTIVE_FENCE(A, LOCK) \
|
||||
__i915_active_fence_init((A), (LOCK), NULL, NULL)
|
||||
|
||||
struct dma_fence *
|
||||
__i915_active_fence_set(struct i915_active_fence *active,
|
||||
struct dma_fence *fence);
|
||||
|
||||
/**
|
||||
* i915_active_request_set - updates the tracker to watch the current request
|
||||
* i915_active_fence_set - updates the tracker to watch the current fence
|
||||
* @active - the active tracker
|
||||
* @request - the request to watch
|
||||
* @rq - the request to watch
|
||||
*
|
||||
* __i915_active_request_set() watches the given @request for completion. Whilst
|
||||
* that @request is busy, the @active reports busy. When that @request is
|
||||
* retired, the @active tracker is updated to report idle.
|
||||
* i915_active_fence_set() watches the given @rq for completion. While
|
||||
* that @rq is busy, the @active reports busy. When that @rq is signaled
|
||||
* (or else retired) the @active tracker is updated to report idle.
|
||||
*/
|
||||
static inline void
|
||||
__i915_active_request_set(struct i915_active_request *active,
|
||||
struct i915_request *request)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
|
||||
lockdep_assert_held(active->lock);
|
||||
#endif
|
||||
list_move(&active->link, &request->active_list);
|
||||
rcu_assign_pointer(active->request, request);
|
||||
}
|
||||
|
||||
int __must_check
|
||||
i915_active_request_set(struct i915_active_request *active,
|
||||
struct i915_request *rq);
|
||||
|
||||
i915_active_fence_set(struct i915_active_fence *active,
|
||||
struct i915_request *rq);
|
||||
/**
|
||||
* i915_active_request_raw - return the active request
|
||||
* i915_active_fence_get - return a reference to the active fence
|
||||
* @active - the active tracker
|
||||
*
|
||||
* i915_active_request_raw() returns the current request being tracked, or NULL.
|
||||
* It does not obtain a reference on the request for the caller, so the caller
|
||||
* must hold struct_mutex.
|
||||
*/
|
||||
static inline struct i915_request *
|
||||
i915_active_request_raw(const struct i915_active_request *active,
|
||||
struct mutex *mutex)
|
||||
{
|
||||
return rcu_dereference_protected(active->request,
|
||||
lockdep_is_held(mutex));
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_active_request_peek - report the active request being monitored
|
||||
* @active - the active tracker
|
||||
*
|
||||
* i915_active_request_peek() returns the current request being tracked if
|
||||
* still active, or NULL. It does not obtain a reference on the request
|
||||
* for the caller, so the caller must hold struct_mutex.
|
||||
*/
|
||||
static inline struct i915_request *
|
||||
i915_active_request_peek(const struct i915_active_request *active,
|
||||
struct mutex *mutex)
|
||||
{
|
||||
struct i915_request *request;
|
||||
|
||||
request = i915_active_request_raw(active, mutex);
|
||||
if (!request || i915_request_completed(request))
|
||||
return NULL;
|
||||
|
||||
return request;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_active_request_get - return a reference to the active request
|
||||
* @active - the active tracker
|
||||
*
|
||||
* i915_active_request_get() returns a reference to the active request, or NULL
|
||||
* if the active tracker is idle. The caller must hold struct_mutex.
|
||||
*/
|
||||
static inline struct i915_request *
|
||||
i915_active_request_get(const struct i915_active_request *active,
|
||||
struct mutex *mutex)
|
||||
{
|
||||
return i915_request_get(i915_active_request_peek(active, mutex));
|
||||
}
|
||||
|
||||
/**
|
||||
* __i915_active_request_get_rcu - return a reference to the active request
|
||||
* @active - the active tracker
|
||||
*
|
||||
* __i915_active_request_get() returns a reference to the active request,
|
||||
* or NULL if the active tracker is idle. The caller must hold the RCU read
|
||||
* lock, but the returned pointer is safe to use outside of RCU.
|
||||
*/
|
||||
static inline struct i915_request *
|
||||
__i915_active_request_get_rcu(const struct i915_active_request *active)
|
||||
{
|
||||
/*
|
||||
* Performing a lockless retrieval of the active request is super
|
||||
* tricky. SLAB_TYPESAFE_BY_RCU merely guarantees that the backing
|
||||
* slab of request objects will not be freed whilst we hold the
|
||||
* RCU read lock. It does not guarantee that the request itself
|
||||
* will not be freed and then *reused*. Viz,
|
||||
*
|
||||
* Thread A Thread B
|
||||
*
|
||||
* rq = active.request
|
||||
* retire(rq) -> free(rq);
|
||||
* (rq is now first on the slab freelist)
|
||||
* active.request = NULL
|
||||
*
|
||||
* rq = new submission on a new object
|
||||
* ref(rq)
|
||||
*
|
||||
* To prevent the request from being reused whilst the caller
|
||||
* uses it, we take a reference like normal. Whilst acquiring
|
||||
* the reference we check that it is not in a destroyed state
|
||||
* (refcnt == 0). That prevents the request being reallocated
|
||||
* whilst the caller holds on to it. To check that the request
|
||||
* was not reallocated as we acquired the reference we have to
|
||||
* check that our request remains the active request across
|
||||
* the lookup, in the same manner as a seqlock. The visibility
|
||||
* of the pointer versus the reference counting is controlled
|
||||
* by using RCU barriers (rcu_dereference and rcu_assign_pointer).
|
||||
*
|
||||
* In the middle of all that, we inspect whether the request is
|
||||
* complete. Retiring is lazy so the request may be completed long
|
||||
* before the active tracker is updated. Querying whether the
|
||||
* request is complete is far cheaper (as it involves no locked
|
||||
* instructions setting cachelines to exclusive) than acquiring
|
||||
* the reference, so we do it first. The RCU read lock ensures the
|
||||
* pointer dereference is valid, but does not ensure that the
|
||||
* seqno nor HWS is the right one! However, if the request was
|
||||
* reallocated, that means the active tracker's request was complete.
|
||||
* If the new request is also complete, then both are and we can
|
||||
* just report the active tracker is idle. If the new request is
|
||||
* incomplete, then we acquire a reference on it and check that
|
||||
* it remained the active request.
|
||||
*
|
||||
* It is then imperative that we do not zero the request on
|
||||
* reallocation, so that we can chase the dangling pointers!
|
||||
* See i915_request_alloc().
|
||||
*/
|
||||
do {
|
||||
struct i915_request *request;
|
||||
|
||||
request = rcu_dereference(active->request);
|
||||
if (!request || i915_request_completed(request))
|
||||
return NULL;
|
||||
|
||||
/*
|
||||
* An especially silly compiler could decide to recompute the
|
||||
* result of i915_request_completed, more specifically
|
||||
* re-emit the load for request->fence.seqno. A race would catch
|
||||
* a later seqno value, which could flip the result from true to
|
||||
* false. Which means part of the instructions below might not
|
||||
* be executed, while later on instructions are executed. Due to
|
||||
* barriers within the refcounting the inconsistency can't reach
|
||||
* past the call to i915_request_get_rcu, but not executing
|
||||
* that while still executing i915_request_put() creates
|
||||
* havoc enough. Prevent this with a compiler barrier.
|
||||
*/
|
||||
barrier();
|
||||
|
||||
request = i915_request_get_rcu(request);
|
||||
|
||||
/*
|
||||
* What stops the following rcu_access_pointer() from occurring
|
||||
* before the above i915_request_get_rcu()? If we were
|
||||
* to read the value before pausing to get the reference to
|
||||
* the request, we may not notice a change in the active
|
||||
* tracker.
|
||||
*
|
||||
* The rcu_access_pointer() is a mere compiler barrier, which
|
||||
* means both the CPU and compiler are free to perform the
|
||||
* memory read without constraint. The compiler only has to
|
||||
* ensure that any operations after the rcu_access_pointer()
|
||||
* occur afterwards in program order. This means the read may
|
||||
* be performed earlier by an out-of-order CPU, or adventurous
|
||||
* compiler.
|
||||
*
|
||||
* The atomic operation at the heart of
|
||||
* i915_request_get_rcu(), see dma_fence_get_rcu(), is
|
||||
* atomic_inc_not_zero() which is only a full memory barrier
|
||||
* when successful. That is, if i915_request_get_rcu()
|
||||
* returns the request (and so with the reference counted
|
||||
* incremented) then the following read for rcu_access_pointer()
|
||||
* must occur after the atomic operation and so confirm
|
||||
* that this request is the one currently being tracked.
|
||||
*
|
||||
* The corresponding write barrier is part of
|
||||
* rcu_assign_pointer().
|
||||
*/
|
||||
if (!request || request == rcu_access_pointer(active->request))
|
||||
return rcu_pointer_handoff(request);
|
||||
|
||||
i915_request_put(request);
|
||||
} while (1);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_active_request_get_unlocked - return a reference to the active request
|
||||
* @active - the active tracker
|
||||
*
|
||||
* i915_active_request_get_unlocked() returns a reference to the active request,
|
||||
* i915_active_fence_get() returns a reference to the active fence,
|
||||
* or NULL if the active tracker is idle. The reference is obtained under RCU,
|
||||
* so no locking is required by the caller.
|
||||
*
|
||||
* The reference should be freed with i915_request_put().
|
||||
* The reference should be freed with dma_fence_put().
|
||||
*/
|
||||
static inline struct i915_request *
|
||||
i915_active_request_get_unlocked(const struct i915_active_request *active)
|
||||
static inline struct dma_fence *
|
||||
i915_active_fence_get(struct i915_active_fence *active)
|
||||
{
|
||||
struct i915_request *request;
|
||||
struct dma_fence *fence;
|
||||
|
||||
rcu_read_lock();
|
||||
request = __i915_active_request_get_rcu(active);
|
||||
fence = dma_fence_get_rcu_safe(&active->fence);
|
||||
rcu_read_unlock();
|
||||
|
||||
return request;
|
||||
return fence;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_active_request_isset - report whether the active tracker is assigned
|
||||
* i915_active_fence_isset - report whether the active tracker is assigned
|
||||
* @active - the active tracker
|
||||
*
|
||||
* i915_active_request_isset() returns true if the active tracker is currently
|
||||
* assigned to a request. Due to the lazy retiring, that request may be idle
|
||||
* i915_active_fence_isset() returns true if the active tracker is currently
|
||||
* assigned to a fence. Due to the lazy retiring, that fence may be idle
|
||||
* and this may report stale information.
|
||||
*/
|
||||
static inline bool
|
||||
i915_active_request_isset(const struct i915_active_request *active)
|
||||
i915_active_fence_isset(const struct i915_active_fence *active)
|
||||
{
|
||||
return rcu_access_pointer(active->request);
|
||||
return rcu_access_pointer(active->fence);
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_active_request_retire - waits until the request is retired
|
||||
* @active - the active request on which to wait
|
||||
*
|
||||
* i915_active_request_retire() waits until the request is completed,
|
||||
* and then ensures that at least the retirement handler for this
|
||||
* @active tracker is called before returning. If the @active
|
||||
* tracker is idle, the function returns immediately.
|
||||
*/
|
||||
static inline int __must_check
|
||||
i915_active_request_retire(struct i915_active_request *active,
|
||||
struct mutex *mutex)
|
||||
static inline void
|
||||
i915_active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
|
||||
{
|
||||
struct i915_request *request;
|
||||
long ret;
|
||||
struct i915_active_fence *active =
|
||||
container_of(cb, typeof(*active), cb);
|
||||
|
||||
request = i915_active_request_raw(active, mutex);
|
||||
if (!request)
|
||||
return 0;
|
||||
|
||||
ret = i915_request_wait(request,
|
||||
I915_WAIT_INTERRUPTIBLE,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
list_del_init(&active->link);
|
||||
RCU_INIT_POINTER(active->request, NULL);
|
||||
|
||||
active->retire(active, request);
|
||||
|
||||
return 0;
|
||||
RCU_INIT_POINTER(active->fence, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -358,47 +162,40 @@ i915_active_request_retire(struct i915_active_request *active,
|
||||
* synchronisation.
|
||||
*/
|
||||
|
||||
void __i915_active_init(struct drm_i915_private *i915,
|
||||
struct i915_active *ref,
|
||||
void __i915_active_init(struct i915_active *ref,
|
||||
int (*active)(struct i915_active *ref),
|
||||
void (*retire)(struct i915_active *ref),
|
||||
struct lock_class_key *key);
|
||||
#define i915_active_init(i915, ref, active, retire) do { \
|
||||
#define i915_active_init(ref, active, retire) do { \
|
||||
static struct lock_class_key __key; \
|
||||
\
|
||||
__i915_active_init(i915, ref, active, retire, &__key); \
|
||||
__i915_active_init(ref, active, retire, &__key); \
|
||||
} while (0)
|
||||
|
||||
int i915_active_ref(struct i915_active *ref,
|
||||
struct intel_timeline *tl,
|
||||
struct i915_request *rq);
|
||||
struct dma_fence *fence);
|
||||
|
||||
static inline int
|
||||
i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
|
||||
{
|
||||
return i915_active_ref(ref, i915_request_timeline(rq), rq);
|
||||
return i915_active_ref(ref, i915_request_timeline(rq), &rq->fence);
|
||||
}
|
||||
|
||||
void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f);
|
||||
|
||||
static inline bool i915_active_has_exclusive(struct i915_active *ref)
|
||||
{
|
||||
return rcu_access_pointer(ref->excl);
|
||||
return rcu_access_pointer(ref->excl.fence);
|
||||
}
|
||||
|
||||
int i915_active_wait(struct i915_active *ref);
|
||||
|
||||
int i915_request_await_active(struct i915_request *rq,
|
||||
struct i915_active *ref);
|
||||
int i915_request_await_active_request(struct i915_request *rq,
|
||||
struct i915_active_request *active);
|
||||
int i915_request_await_active(struct i915_request *rq, struct i915_active *ref);
|
||||
|
||||
int i915_active_acquire(struct i915_active *ref);
|
||||
bool i915_active_acquire_if_busy(struct i915_active *ref);
|
||||
void i915_active_release(struct i915_active *ref);
|
||||
void __i915_active_release_nested(struct i915_active *ref, int subclass);
|
||||
|
||||
bool i915_active_trygrab(struct i915_active *ref);
|
||||
void i915_active_ungrab(struct i915_active *ref);
|
||||
|
||||
static inline bool
|
||||
i915_active_is_idle(const struct i915_active *ref)
|
||||
|
||||
@@ -17,17 +17,9 @@
|
||||
|
||||
#include "i915_utils.h"
|
||||
|
||||
struct drm_i915_private;
|
||||
struct i915_active_request;
|
||||
struct i915_request;
|
||||
|
||||
typedef void (*i915_active_retire_fn)(struct i915_active_request *,
|
||||
struct i915_request *);
|
||||
|
||||
struct i915_active_request {
|
||||
struct i915_request __rcu *request;
|
||||
struct list_head link;
|
||||
i915_active_retire_fn retire;
|
||||
struct i915_active_fence {
|
||||
struct dma_fence __rcu *fence;
|
||||
struct dma_fence_cb cb;
|
||||
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
|
||||
/*
|
||||
* Incorporeal!
|
||||
@@ -53,20 +45,17 @@ struct active_node;
|
||||
#define i915_active_may_sleep(fn) ptr_pack_bits(&(fn), I915_ACTIVE_MAY_SLEEP, 2)
|
||||
|
||||
struct i915_active {
|
||||
struct drm_i915_private *i915;
|
||||
atomic_t count;
|
||||
struct mutex mutex;
|
||||
|
||||
struct active_node *cache;
|
||||
struct rb_root tree;
|
||||
struct mutex mutex;
|
||||
atomic_t count;
|
||||
|
||||
/* Preallocated "exclusive" node */
|
||||
struct dma_fence __rcu *excl;
|
||||
struct dma_fence_cb excl_cb;
|
||||
struct i915_active_fence excl;
|
||||
|
||||
unsigned long flags;
|
||||
#define I915_ACTIVE_RETIRE_SLEEPS BIT(0)
|
||||
#define I915_ACTIVE_GRAB_BIT 1
|
||||
|
||||
int (*active)(struct i915_active *ref);
|
||||
void (*retire)(struct i915_active *ref);
|
||||
|
||||
@@ -892,28 +892,38 @@ wait_for_timelines(struct intel_gt *gt, unsigned int wait, long timeout)
|
||||
|
||||
spin_lock_irqsave(&timelines->lock, flags);
|
||||
list_for_each_entry(tl, &timelines->active_list, link) {
|
||||
struct i915_request *rq;
|
||||
struct dma_fence *fence;
|
||||
|
||||
rq = i915_active_request_get_unlocked(&tl->last_request);
|
||||
if (!rq)
|
||||
fence = i915_active_fence_get(&tl->last_request);
|
||||
if (!fence)
|
||||
continue;
|
||||
|
||||
spin_unlock_irqrestore(&timelines->lock, flags);
|
||||
|
||||
/*
|
||||
* "Race-to-idle".
|
||||
*
|
||||
* Switching to the kernel context is often used a synchronous
|
||||
* step prior to idling, e.g. in suspend for flushing all
|
||||
* current operations to memory before sleeping. These we
|
||||
* want to complete as quickly as possible to avoid prolonged
|
||||
* stalls, so allow the gpu to boost to maximum clocks.
|
||||
*/
|
||||
if (wait & I915_WAIT_FOR_IDLE_BOOST)
|
||||
gen6_rps_boost(rq);
|
||||
if (!dma_fence_is_i915(fence)) {
|
||||
timeout = dma_fence_wait_timeout(fence,
|
||||
flags & I915_WAIT_INTERRUPTIBLE,
|
||||
timeout);
|
||||
} else {
|
||||
struct i915_request *rq = to_request(fence);
|
||||
|
||||
timeout = i915_request_wait(rq, wait, timeout);
|
||||
i915_request_put(rq);
|
||||
/*
|
||||
* "Race-to-idle".
|
||||
*
|
||||
* Switching to the kernel context is often used as
|
||||
* a synchronous step prior to idling, e.g. in suspend
|
||||
* for flushing all current operations to memory before
|
||||
* sleeping. These we want to complete as quickly as
|
||||
* possible to avoid prolonged stalls, so allow the gpu
|
||||
* to boost to maximum clocks.
|
||||
*/
|
||||
if (flags & I915_WAIT_FOR_IDLE_BOOST)
|
||||
gen6_rps_boost(rq);
|
||||
|
||||
timeout = i915_request_wait(rq, flags, timeout);
|
||||
}
|
||||
|
||||
dma_fence_put(fence);
|
||||
if (timeout < 0)
|
||||
return timeout;
|
||||
|
||||
|
||||
@@ -1861,7 +1861,6 @@ static const struct i915_vma_ops pd_vma_ops = {
|
||||
|
||||
static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
|
||||
{
|
||||
struct drm_i915_private *i915 = ppgtt->base.vm.i915;
|
||||
struct i915_ggtt *ggtt = ppgtt->base.vm.gt->ggtt;
|
||||
struct i915_vma *vma;
|
||||
|
||||
@@ -1872,7 +1871,7 @@ static struct i915_vma *pd_vma_create(struct gen6_ppgtt *ppgtt, int size)
|
||||
if (!vma)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
i915_active_init(i915, &vma->active, NULL, NULL);
|
||||
i915_active_init(&vma->active, NULL, NULL);
|
||||
|
||||
mutex_init(&vma->pages_mutex);
|
||||
vma->vm = i915_vm_get(&ggtt->vm);
|
||||
|
||||
@@ -1299,7 +1299,7 @@ capture_vma(struct capture_vma *next,
|
||||
if (!c)
|
||||
return next;
|
||||
|
||||
if (!i915_active_trygrab(&vma->active)) {
|
||||
if (!i915_active_acquire_if_busy(&vma->active)) {
|
||||
kfree(c);
|
||||
return next;
|
||||
}
|
||||
@@ -1439,7 +1439,7 @@ gem_record_rings(struct i915_gpu_state *error, struct compress *compress)
|
||||
*this->slot =
|
||||
i915_error_object_create(i915, vma, compress);
|
||||
|
||||
i915_active_ungrab(&vma->active);
|
||||
i915_active_release(&vma->active);
|
||||
i915_vma_put(vma);
|
||||
|
||||
capture = this->next;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user