You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
dma-buf: add enum dma_resv_usage v4
This change adds the dma_resv_usage enum and allows us to specify why a
dma_resv object is queried for its containing fences.
Additional to that a dma_resv_usage_rw() helper function is added to aid
retrieving the fences for a read or write userspace submission.
This is then deployed to the different query functions of the dma_resv
object and all of their users. When the write paratermer was previously
true we now use DMA_RESV_USAGE_WRITE and DMA_RESV_USAGE_READ otherwise.
v2: add KERNEL/OTHER in separate patch
v3: some kerneldoc suggestions by Daniel
v4: some more kerneldoc suggestions by Daniel, fix missing cases lost in
the rebase pointed out by Bas.
Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/msgid/20220407085946.744568-2-christian.koenig@amd.com
This commit is contained in:
@@ -216,7 +216,8 @@ static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
|
||||
struct dma_fence *fence;
|
||||
int r;
|
||||
|
||||
dma_resv_for_each_fence(&cursor, resv, write, fence) {
|
||||
dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
|
||||
fence) {
|
||||
dma_fence_get(fence);
|
||||
r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
|
||||
if (!r)
|
||||
@@ -1124,7 +1125,8 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
|
||||
long ret;
|
||||
|
||||
/* Wait on any implicit rendering fences */
|
||||
ret = dma_resv_wait_timeout(resv, write, true, MAX_SCHEDULE_TIMEOUT);
|
||||
ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
|
||||
true, MAX_SCHEDULE_TIMEOUT);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
|
||||
@@ -384,7 +384,7 @@ static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
|
||||
cursor->seq = read_seqcount_begin(&cursor->obj->seq);
|
||||
cursor->index = -1;
|
||||
cursor->shared_count = 0;
|
||||
if (cursor->all_fences) {
|
||||
if (cursor->usage >= DMA_RESV_USAGE_READ) {
|
||||
cursor->fences = dma_resv_shared_list(cursor->obj);
|
||||
if (cursor->fences)
|
||||
cursor->shared_count = cursor->fences->shared_count;
|
||||
@@ -496,7 +496,7 @@ struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
|
||||
dma_resv_assert_held(cursor->obj);
|
||||
|
||||
cursor->index = 0;
|
||||
if (cursor->all_fences)
|
||||
if (cursor->usage >= DMA_RESV_USAGE_READ)
|
||||
cursor->fences = dma_resv_shared_list(cursor->obj);
|
||||
else
|
||||
cursor->fences = NULL;
|
||||
@@ -551,7 +551,7 @@ int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
|
||||
list = NULL;
|
||||
excl = NULL;
|
||||
|
||||
dma_resv_iter_begin(&cursor, src, true);
|
||||
dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_READ);
|
||||
dma_resv_for_each_fence_unlocked(&cursor, f) {
|
||||
|
||||
if (dma_resv_iter_is_restarted(&cursor)) {
|
||||
@@ -597,7 +597,7 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
|
||||
* dma_resv_get_fences - Get an object's shared and exclusive
|
||||
* fences without update side lock held
|
||||
* @obj: the reservation object
|
||||
* @write: true if we should return all fences
|
||||
* @usage: controls which fences to include, see enum dma_resv_usage.
|
||||
* @num_fences: the number of fences returned
|
||||
* @fences: the array of fence ptrs returned (array is krealloc'd to the
|
||||
* required size, and must be freed by caller)
|
||||
@@ -605,7 +605,7 @@ EXPORT_SYMBOL(dma_resv_copy_fences);
|
||||
* Retrieve all fences from the reservation object.
|
||||
* Returns either zero or -ENOMEM.
|
||||
*/
|
||||
int dma_resv_get_fences(struct dma_resv *obj, bool write,
|
||||
int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
|
||||
unsigned int *num_fences, struct dma_fence ***fences)
|
||||
{
|
||||
struct dma_resv_iter cursor;
|
||||
@@ -614,7 +614,7 @@ int dma_resv_get_fences(struct dma_resv *obj, bool write,
|
||||
*num_fences = 0;
|
||||
*fences = NULL;
|
||||
|
||||
dma_resv_iter_begin(&cursor, obj, write);
|
||||
dma_resv_iter_begin(&cursor, obj, usage);
|
||||
dma_resv_for_each_fence_unlocked(&cursor, fence) {
|
||||
|
||||
if (dma_resv_iter_is_restarted(&cursor)) {
|
||||
@@ -646,7 +646,7 @@ EXPORT_SYMBOL_GPL(dma_resv_get_fences);
|
||||
/**
|
||||
* dma_resv_get_singleton - Get a single fence for all the fences
|
||||
* @obj: the reservation object
|
||||
* @write: true if we should return all fences
|
||||
* @usage: controls which fences to include, see enum dma_resv_usage.
|
||||
* @fence: the resulting fence
|
||||
*
|
||||
* Get a single fence representing all the fences inside the resv object.
|
||||
@@ -658,7 +658,7 @@ EXPORT_SYMBOL_GPL(dma_resv_get_fences);
|
||||
*
|
||||
* Returns 0 on success and negative error values on failure.
|
||||
*/
|
||||
int dma_resv_get_singleton(struct dma_resv *obj, bool write,
|
||||
int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
struct dma_fence_array *array;
|
||||
@@ -666,7 +666,7 @@ int dma_resv_get_singleton(struct dma_resv *obj, bool write,
|
||||
unsigned count;
|
||||
int r;
|
||||
|
||||
r = dma_resv_get_fences(obj, write, &count, &fences);
|
||||
r = dma_resv_get_fences(obj, usage, &count, &fences);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@@ -700,7 +700,7 @@ EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
|
||||
* dma_resv_wait_timeout - Wait on reservation's objects
|
||||
* shared and/or exclusive fences.
|
||||
* @obj: the reservation object
|
||||
* @wait_all: if true, wait on all fences, else wait on just exclusive fence
|
||||
* @usage: controls which fences to include, see enum dma_resv_usage.
|
||||
* @intr: if true, do interruptible wait
|
||||
* @timeout: timeout value in jiffies or zero to return immediately
|
||||
*
|
||||
@@ -710,14 +710,14 @@ EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
|
||||
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
|
||||
* greater than zer on success.
|
||||
*/
|
||||
long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
|
||||
unsigned long timeout)
|
||||
long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
|
||||
bool intr, unsigned long timeout)
|
||||
{
|
||||
long ret = timeout ? timeout : 1;
|
||||
struct dma_resv_iter cursor;
|
||||
struct dma_fence *fence;
|
||||
|
||||
dma_resv_iter_begin(&cursor, obj, wait_all);
|
||||
dma_resv_iter_begin(&cursor, obj, usage);
|
||||
dma_resv_for_each_fence_unlocked(&cursor, fence) {
|
||||
|
||||
ret = dma_fence_wait_timeout(fence, intr, ret);
|
||||
@@ -737,8 +737,7 @@ EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
|
||||
* dma_resv_test_signaled - Test if a reservation object's fences have been
|
||||
* signaled.
|
||||
* @obj: the reservation object
|
||||
* @test_all: if true, test all fences, otherwise only test the exclusive
|
||||
* fence
|
||||
* @usage: controls which fences to include, see enum dma_resv_usage.
|
||||
*
|
||||
* Callers are not required to hold specific locks, but maybe hold
|
||||
* dma_resv_lock() already.
|
||||
@@ -747,12 +746,12 @@ EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
|
||||
*
|
||||
* True if all fences signaled, else false.
|
||||
*/
|
||||
bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
|
||||
bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage)
|
||||
{
|
||||
struct dma_resv_iter cursor;
|
||||
struct dma_fence *fence;
|
||||
|
||||
dma_resv_iter_begin(&cursor, obj, test_all);
|
||||
dma_resv_iter_begin(&cursor, obj, usage);
|
||||
dma_resv_for_each_fence_unlocked(&cursor, fence) {
|
||||
dma_resv_iter_end(&cursor);
|
||||
return false;
|
||||
@@ -775,7 +774,7 @@ void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
|
||||
struct dma_resv_iter cursor;
|
||||
struct dma_fence *fence;
|
||||
|
||||
dma_resv_for_each_fence(&cursor, obj, true, fence) {
|
||||
dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) {
|
||||
seq_printf(seq, "\t%s fence:",
|
||||
dma_resv_iter_is_exclusive(&cursor) ?
|
||||
"Exclusive" : "Shared");
|
||||
|
||||
@@ -58,7 +58,7 @@ static int sanitycheck(void *arg)
|
||||
return r;
|
||||
}
|
||||
|
||||
static int test_signaling(void *arg, bool shared)
|
||||
static int test_signaling(void *arg, enum dma_resv_usage usage)
|
||||
{
|
||||
struct dma_resv resv;
|
||||
struct dma_fence *f;
|
||||
@@ -81,18 +81,18 @@ static int test_signaling(void *arg, bool shared)
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
if (shared)
|
||||
if (usage >= DMA_RESV_USAGE_READ)
|
||||
dma_resv_add_shared_fence(&resv, f);
|
||||
else
|
||||
dma_resv_add_excl_fence(&resv, f);
|
||||
|
||||
if (dma_resv_test_signaled(&resv, shared)) {
|
||||
if (dma_resv_test_signaled(&resv, usage)) {
|
||||
pr_err("Resv unexpectedly signaled\n");
|
||||
r = -EINVAL;
|
||||
goto err_unlock;
|
||||
}
|
||||
dma_fence_signal(f);
|
||||
if (!dma_resv_test_signaled(&resv, shared)) {
|
||||
if (!dma_resv_test_signaled(&resv, usage)) {
|
||||
pr_err("Resv not reporting signaled\n");
|
||||
r = -EINVAL;
|
||||
goto err_unlock;
|
||||
@@ -107,15 +107,15 @@ err_free:
|
||||
|
||||
static int test_excl_signaling(void *arg)
|
||||
{
|
||||
return test_signaling(arg, false);
|
||||
return test_signaling(arg, DMA_RESV_USAGE_WRITE);
|
||||
}
|
||||
|
||||
static int test_shared_signaling(void *arg)
|
||||
{
|
||||
return test_signaling(arg, true);
|
||||
return test_signaling(arg, DMA_RESV_USAGE_READ);
|
||||
}
|
||||
|
||||
static int test_for_each(void *arg, bool shared)
|
||||
static int test_for_each(void *arg, enum dma_resv_usage usage)
|
||||
{
|
||||
struct dma_resv_iter cursor;
|
||||
struct dma_fence *f, *fence;
|
||||
@@ -139,13 +139,13 @@ static int test_for_each(void *arg, bool shared)
|
||||
goto err_unlock;
|
||||
}
|
||||
|
||||
if (shared)
|
||||
if (usage >= DMA_RESV_USAGE_READ)
|
||||
dma_resv_add_shared_fence(&resv, f);
|
||||
else
|
||||
dma_resv_add_excl_fence(&resv, f);
|
||||
|
||||
r = -ENOENT;
|
||||
dma_resv_for_each_fence(&cursor, &resv, shared, fence) {
|
||||
dma_resv_for_each_fence(&cursor, &resv, usage, fence) {
|
||||
if (!r) {
|
||||
pr_err("More than one fence found\n");
|
||||
r = -EINVAL;
|
||||
@@ -156,7 +156,8 @@ static int test_for_each(void *arg, bool shared)
|
||||
r = -EINVAL;
|
||||
goto err_unlock;
|
||||
}
|
||||
if (dma_resv_iter_is_exclusive(&cursor) != !shared) {
|
||||
if (dma_resv_iter_is_exclusive(&cursor) !=
|
||||
(usage >= DMA_RESV_USAGE_READ)) {
|
||||
pr_err("Unexpected fence usage\n");
|
||||
r = -EINVAL;
|
||||
goto err_unlock;
|
||||
@@ -178,15 +179,15 @@ err_free:
|
||||
|
||||
static int test_excl_for_each(void *arg)
|
||||
{
|
||||
return test_for_each(arg, false);
|
||||
return test_for_each(arg, DMA_RESV_USAGE_WRITE);
|
||||
}
|
||||
|
||||
static int test_shared_for_each(void *arg)
|
||||
{
|
||||
return test_for_each(arg, true);
|
||||
return test_for_each(arg, DMA_RESV_USAGE_READ);
|
||||
}
|
||||
|
||||
static int test_for_each_unlocked(void *arg, bool shared)
|
||||
static int test_for_each_unlocked(void *arg, enum dma_resv_usage usage)
|
||||
{
|
||||
struct dma_resv_iter cursor;
|
||||
struct dma_fence *f, *fence;
|
||||
@@ -211,14 +212,14 @@ static int test_for_each_unlocked(void *arg, bool shared)
|
||||
goto err_free;
|
||||
}
|
||||
|
||||
if (shared)
|
||||
if (usage >= DMA_RESV_USAGE_READ)
|
||||
dma_resv_add_shared_fence(&resv, f);
|
||||
else
|
||||
dma_resv_add_excl_fence(&resv, f);
|
||||
dma_resv_unlock(&resv);
|
||||
|
||||
r = -ENOENT;
|
||||
dma_resv_iter_begin(&cursor, &resv, shared);
|
||||
dma_resv_iter_begin(&cursor, &resv, usage);
|
||||
dma_resv_for_each_fence_unlocked(&cursor, fence) {
|
||||
if (!r) {
|
||||
pr_err("More than one fence found\n");
|
||||
@@ -234,7 +235,8 @@ static int test_for_each_unlocked(void *arg, bool shared)
|
||||
r = -EINVAL;
|
||||
goto err_iter_end;
|
||||
}
|
||||
if (dma_resv_iter_is_exclusive(&cursor) != !shared) {
|
||||
if (dma_resv_iter_is_exclusive(&cursor) !=
|
||||
(usage >= DMA_RESV_USAGE_READ)) {
|
||||
pr_err("Unexpected fence usage\n");
|
||||
r = -EINVAL;
|
||||
goto err_iter_end;
|
||||
@@ -262,15 +264,15 @@ err_free:
|
||||
|
||||
static int test_excl_for_each_unlocked(void *arg)
|
||||
{
|
||||
return test_for_each_unlocked(arg, false);
|
||||
return test_for_each_unlocked(arg, DMA_RESV_USAGE_WRITE);
|
||||
}
|
||||
|
||||
static int test_shared_for_each_unlocked(void *arg)
|
||||
{
|
||||
return test_for_each_unlocked(arg, true);
|
||||
return test_for_each_unlocked(arg, DMA_RESV_USAGE_READ);
|
||||
}
|
||||
|
||||
static int test_get_fences(void *arg, bool shared)
|
||||
static int test_get_fences(void *arg, enum dma_resv_usage usage)
|
||||
{
|
||||
struct dma_fence *f, **fences = NULL;
|
||||
struct dma_resv resv;
|
||||
@@ -294,13 +296,13 @@ static int test_get_fences(void *arg, bool shared)
|
||||
goto err_resv;
|
||||
}
|
||||
|
||||
if (shared)
|
||||
if (usage >= DMA_RESV_USAGE_READ)
|
||||
dma_resv_add_shared_fence(&resv, f);
|
||||
else
|
||||
dma_resv_add_excl_fence(&resv, f);
|
||||
dma_resv_unlock(&resv);
|
||||
|
||||
r = dma_resv_get_fences(&resv, shared, &i, &fences);
|
||||
r = dma_resv_get_fences(&resv, usage, &i, &fences);
|
||||
if (r) {
|
||||
pr_err("get_fences failed\n");
|
||||
goto err_free;
|
||||
@@ -324,12 +326,12 @@ err_resv:
|
||||
|
||||
static int test_excl_get_fences(void *arg)
|
||||
{
|
||||
return test_get_fences(arg, false);
|
||||
return test_get_fences(arg, DMA_RESV_USAGE_WRITE);
|
||||
}
|
||||
|
||||
static int test_shared_get_fences(void *arg)
|
||||
{
|
||||
return test_get_fences(arg, true);
|
||||
return test_get_fences(arg, DMA_RESV_USAGE_READ);
|
||||
}
|
||||
|
||||
int dma_resv(void)
|
||||
|
||||
@@ -1288,7 +1288,9 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
|
||||
*
|
||||
* TODO: Remove together with dma_resv rework.
|
||||
*/
|
||||
dma_resv_for_each_fence(&cursor, resv, false, fence) {
|
||||
dma_resv_for_each_fence(&cursor, resv,
|
||||
DMA_RESV_USAGE_WRITE,
|
||||
fence) {
|
||||
break;
|
||||
}
|
||||
dma_fence_chain_init(chain, fence, dma_fence_get(p->fence), 1);
|
||||
|
||||
@@ -200,8 +200,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
|
||||
goto unpin;
|
||||
}
|
||||
|
||||
/* TODO: Unify this with other drivers */
|
||||
r = dma_resv_get_fences(new_abo->tbo.base.resv, true,
|
||||
r = dma_resv_get_fences(new_abo->tbo.base.resv, DMA_RESV_USAGE_WRITE,
|
||||
&work->shared_count,
|
||||
&work->shared);
|
||||
if (unlikely(r != 0)) {
|
||||
|
||||
@@ -526,7 +526,8 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
||||
return -ENOENT;
|
||||
}
|
||||
robj = gem_to_amdgpu_bo(gobj);
|
||||
ret = dma_resv_wait_timeout(robj->tbo.base.resv, true, true, timeout);
|
||||
ret = dma_resv_wait_timeout(robj->tbo.base.resv, DMA_RESV_USAGE_READ,
|
||||
true, timeout);
|
||||
|
||||
/* ret == 0 means not signaled,
|
||||
* ret > 0 means signaled
|
||||
|
||||
@@ -111,7 +111,7 @@ void amdgpu_pasid_free_delayed(struct dma_resv *resv,
|
||||
struct dma_fence *fence;
|
||||
int r;
|
||||
|
||||
r = dma_resv_get_singleton(resv, true, &fence);
|
||||
r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_READ, &fence);
|
||||
if (r)
|
||||
goto fallback;
|
||||
|
||||
@@ -139,7 +139,8 @@ fallback:
|
||||
/* Not enough memory for the delayed delete, as last resort
|
||||
* block for all the fences to complete.
|
||||
*/
|
||||
dma_resv_wait_timeout(resv, true, false, MAX_SCHEDULE_TIMEOUT);
|
||||
dma_resv_wait_timeout(resv, DMA_RESV_USAGE_READ,
|
||||
false, MAX_SCHEDULE_TIMEOUT);
|
||||
amdgpu_pasid_free(pasid);
|
||||
}
|
||||
|
||||
|
||||
@@ -75,8 +75,8 @@ static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni,
|
||||
|
||||
mmu_interval_set_seq(mni, cur_seq);
|
||||
|
||||
r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_READ,
|
||||
false, MAX_SCHEDULE_TIMEOUT);
|
||||
mutex_unlock(&adev->notifier_lock);
|
||||
if (r <= 0)
|
||||
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
|
||||
|
||||
@@ -768,8 +768,8 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
r = dma_resv_wait_timeout(bo->tbo.base.resv, false, false,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_WRITE,
|
||||
false, MAX_SCHEDULE_TIMEOUT);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
|
||||
@@ -259,7 +259,8 @@ int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
||||
if (resv == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
dma_resv_for_each_fence(&cursor, resv, true, f) {
|
||||
/* TODO: Use DMA_RESV_USAGE_READ here */
|
||||
dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, f) {
|
||||
dma_fence_chain_for_each(f, f) {
|
||||
struct dma_fence *tmp = dma_fence_chain_contained(f);
|
||||
|
||||
|
||||
@@ -1344,7 +1344,8 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
|
||||
* If true, then return false as any KFD process needs all its BOs to
|
||||
* be resident to run successfully
|
||||
*/
|
||||
dma_resv_for_each_fence(&resv_cursor, bo->base.resv, true, f) {
|
||||
dma_resv_for_each_fence(&resv_cursor, bo->base.resv,
|
||||
DMA_RESV_USAGE_READ, f) {
|
||||
if (amdkfd_fence_check_mm(f, current->mm))
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -1163,7 +1163,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
||||
ib->length_dw = 16;
|
||||
|
||||
if (direct) {
|
||||
r = dma_resv_wait_timeout(bo->tbo.base.resv, true, false,
|
||||
r = dma_resv_wait_timeout(bo->tbo.base.resv,
|
||||
DMA_RESV_USAGE_WRITE, false,
|
||||
msecs_to_jiffies(10));
|
||||
if (r == 0)
|
||||
r = -ETIMEDOUT;
|
||||
|
||||
@@ -2059,7 +2059,7 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
||||
struct dma_resv_iter cursor;
|
||||
struct dma_fence *fence;
|
||||
|
||||
dma_resv_for_each_fence(&cursor, resv, true, fence) {
|
||||
dma_resv_for_each_fence(&cursor, resv, DMA_RESV_USAGE_READ, fence) {
|
||||
/* Add a callback for each fence in the reservation object */
|
||||
amdgpu_vm_prt_get(adev);
|
||||
amdgpu_vm_add_prt_cb(adev, fence);
|
||||
@@ -2665,7 +2665,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
|
||||
return true;
|
||||
|
||||
/* Don't evict VM page tables while they are busy */
|
||||
if (!dma_resv_test_signaled(bo->tbo.base.resv, true))
|
||||
if (!dma_resv_test_signaled(bo->tbo.base.resv, DMA_RESV_USAGE_READ))
|
||||
return false;
|
||||
|
||||
/* Try to block ongoing updates */
|
||||
@@ -2845,7 +2845,8 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
|
||||
*/
|
||||
long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
|
||||
{
|
||||
timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv, true,
|
||||
timeout = dma_resv_wait_timeout(vm->root.bo->tbo.base.resv,
|
||||
DMA_RESV_USAGE_READ,
|
||||
true, timeout);
|
||||
if (timeout <= 0)
|
||||
return timeout;
|
||||
|
||||
@@ -9236,7 +9236,8 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
|
||||
* deadlock during GPU reset when this fence will not signal
|
||||
* but we hold reservation lock for the BO.
|
||||
*/
|
||||
r = dma_resv_wait_timeout(abo->tbo.base.resv, true, false,
|
||||
r = dma_resv_wait_timeout(abo->tbo.base.resv,
|
||||
DMA_RESV_USAGE_WRITE, false,
|
||||
msecs_to_jiffies(5000));
|
||||
if (unlikely(r <= 0))
|
||||
DRM_ERROR("Waiting for fences timed out!");
|
||||
|
||||
@@ -771,7 +771,8 @@ long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = dma_resv_wait_timeout(obj->resv, wait_all, true, timeout);
|
||||
ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(wait_all),
|
||||
true, timeout);
|
||||
if (ret == 0)
|
||||
ret = -ETIME;
|
||||
else if (ret > 0)
|
||||
|
||||
@@ -151,7 +151,7 @@ int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_st
|
||||
return 0;
|
||||
|
||||
obj = drm_gem_fb_get_obj(state->fb, 0);
|
||||
ret = dma_resv_get_singleton(obj->resv, false, &fence);
|
||||
ret = dma_resv_get_singleton(obj->resv, DMA_RESV_USAGE_WRITE, &fence);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
||||
@@ -380,12 +380,14 @@ int etnaviv_gem_cpu_prep(struct drm_gem_object *obj, u32 op,
|
||||
}
|
||||
|
||||
if (op & ETNA_PREP_NOSYNC) {
|
||||
if (!dma_resv_test_signaled(obj->resv, write))
|
||||
if (!dma_resv_test_signaled(obj->resv,
|
||||
dma_resv_usage_rw(write)))
|
||||
return -EBUSY;
|
||||
} else {
|
||||
unsigned long remain = etnaviv_timeout_to_jiffies(timeout);
|
||||
|
||||
ret = dma_resv_wait_timeout(obj->resv, write, true, remain);
|
||||
ret = dma_resv_wait_timeout(obj->resv, dma_resv_usage_rw(write),
|
||||
true, remain);
|
||||
if (ret <= 0)
|
||||
return ret == 0 ? -ETIMEDOUT : ret;
|
||||
}
|
||||
|
||||
@@ -997,7 +997,8 @@ intel_prepare_plane_fb(struct drm_plane *_plane,
|
||||
if (ret < 0)
|
||||
goto unpin_fb;
|
||||
|
||||
dma_resv_iter_begin(&cursor, obj->base.resv, false);
|
||||
dma_resv_iter_begin(&cursor, obj->base.resv,
|
||||
DMA_RESV_USAGE_WRITE);
|
||||
dma_resv_for_each_fence_unlocked(&cursor, fence) {
|
||||
add_rps_boost_after_vblank(new_plane_state->hw.crtc,
|
||||
fence);
|
||||
|
||||
@@ -138,12 +138,12 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
|
||||
* Alternatively, we can trade that extra information on read/write
|
||||
* activity with
|
||||
* args->busy =
|
||||
* !dma_resv_test_signaled(obj->resv, true);
|
||||
* !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ);
|
||||
* to report the overall busyness. This is what the wait-ioctl does.
|
||||
*
|
||||
*/
|
||||
args->busy = 0;
|
||||
dma_resv_iter_begin(&cursor, obj->base.resv, true);
|
||||
dma_resv_iter_begin(&cursor, obj->base.resv, DMA_RESV_USAGE_READ);
|
||||
dma_resv_for_each_fence_unlocked(&cursor, fence) {
|
||||
if (dma_resv_iter_is_restarted(&cursor))
|
||||
args->busy = 0;
|
||||
|
||||
@@ -66,7 +66,7 @@ bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
|
||||
struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
|
||||
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, true) &&
|
||||
GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, DMA_RESV_USAGE_READ) &&
|
||||
i915_gem_object_evictable(obj));
|
||||
#endif
|
||||
return mr && (mr->type == INTEL_MEMORY_LOCAL ||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user