You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
dma-buf: rename reservation_object to dma_resv
Be more consistent with the naming of the other DMA-buf objects. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Link: https://patchwork.freedesktop.org/patch/323401/
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
|
||||
reservation.o seqno-fence.o
|
||||
dma-resv.o seqno-fence.o
|
||||
obj-$(CONFIG_SYNC_FILE) += sync_file.o
|
||||
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
|
||||
obj-$(CONFIG_UDMABUF) += udmabuf.o
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/reservation.h>
|
||||
#include <linux/dma-resv.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/mount.h>
|
||||
#include <linux/pseudo_fs.h>
|
||||
@@ -104,8 +104,8 @@ static int dma_buf_release(struct inode *inode, struct file *file)
|
||||
list_del(&dmabuf->list_node);
|
||||
mutex_unlock(&db_list.lock);
|
||||
|
||||
if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
|
||||
reservation_object_fini(dmabuf->resv);
|
||||
if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
|
||||
dma_resv_fini(dmabuf->resv);
|
||||
|
||||
module_put(dmabuf->owner);
|
||||
kfree(dmabuf);
|
||||
@@ -165,7 +165,7 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
|
||||
* To support cross-device and cross-driver synchronization of buffer access
|
||||
* implicit fences (represented internally in the kernel with &struct fence) can
|
||||
* be attached to a &dma_buf. The glue for that and a few related things are
|
||||
* provided in the &reservation_object structure.
|
||||
* provided in the &dma_resv structure.
|
||||
*
|
||||
* Userspace can query the state of these implicitly tracked fences using poll()
|
||||
* and related system calls:
|
||||
@@ -195,8 +195,8 @@ static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
|
||||
static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
|
||||
{
|
||||
struct dma_buf *dmabuf;
|
||||
struct reservation_object *resv;
|
||||
struct reservation_object_list *fobj;
|
||||
struct dma_resv *resv;
|
||||
struct dma_resv_list *fobj;
|
||||
struct dma_fence *fence_excl;
|
||||
__poll_t events;
|
||||
unsigned shared_count;
|
||||
@@ -214,7 +214,7 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
|
||||
return 0;
|
||||
|
||||
rcu_read_lock();
|
||||
reservation_object_fences(resv, &fence_excl, &fobj, &shared_count);
|
||||
dma_resv_fences(resv, &fence_excl, &fobj, &shared_count);
|
||||
if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
|
||||
struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
|
||||
__poll_t pevents = EPOLLIN;
|
||||
@@ -493,13 +493,13 @@ err_alloc_file:
|
||||
struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|
||||
{
|
||||
struct dma_buf *dmabuf;
|
||||
struct reservation_object *resv = exp_info->resv;
|
||||
struct dma_resv *resv = exp_info->resv;
|
||||
struct file *file;
|
||||
size_t alloc_size = sizeof(struct dma_buf);
|
||||
int ret;
|
||||
|
||||
if (!exp_info->resv)
|
||||
alloc_size += sizeof(struct reservation_object);
|
||||
alloc_size += sizeof(struct dma_resv);
|
||||
else
|
||||
/* prevent &dma_buf[1] == dma_buf->resv */
|
||||
alloc_size += 1;
|
||||
@@ -531,8 +531,8 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
|
||||
dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
|
||||
|
||||
if (!resv) {
|
||||
resv = (struct reservation_object *)&dmabuf[1];
|
||||
reservation_object_init(resv);
|
||||
resv = (struct dma_resv *)&dmabuf[1];
|
||||
dma_resv_init(resv);
|
||||
}
|
||||
dmabuf->resv = resv;
|
||||
|
||||
@@ -896,11 +896,11 @@ static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
|
||||
{
|
||||
bool write = (direction == DMA_BIDIRECTIONAL ||
|
||||
direction == DMA_TO_DEVICE);
|
||||
struct reservation_object *resv = dmabuf->resv;
|
||||
struct dma_resv *resv = dmabuf->resv;
|
||||
long ret;
|
||||
|
||||
/* Wait on any implicit rendering fences */
|
||||
ret = reservation_object_wait_timeout_rcu(resv, write, true,
|
||||
ret = dma_resv_wait_timeout_rcu(resv, write, true,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
@@ -1141,8 +1141,8 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
|
||||
int ret;
|
||||
struct dma_buf *buf_obj;
|
||||
struct dma_buf_attachment *attach_obj;
|
||||
struct reservation_object *robj;
|
||||
struct reservation_object_list *fobj;
|
||||
struct dma_resv *robj;
|
||||
struct dma_resv_list *fobj;
|
||||
struct dma_fence *fence;
|
||||
int count = 0, attach_count, shared_count, i;
|
||||
size_t size = 0;
|
||||
@@ -1175,7 +1175,7 @@ static int dma_buf_debug_show(struct seq_file *s, void *unused)
|
||||
|
||||
robj = buf_obj->resv;
|
||||
rcu_read_lock();
|
||||
reservation_object_fences(robj, &fence, &fobj, &shared_count);
|
||||
dma_resv_fences(robj, &fence, &fobj, &shared_count);
|
||||
rcu_read_unlock();
|
||||
|
||||
if (fence)
|
||||
|
||||
@@ -60,7 +60,7 @@ static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
|
||||
*
|
||||
* - Then there's also implicit fencing, where the synchronization points are
|
||||
* implicitly passed around as part of shared &dma_buf instances. Such
|
||||
* implicit fences are stored in &struct reservation_object through the
|
||||
* implicit fences are stored in &struct dma_resv through the
|
||||
* &dma_buf.resv pointer.
|
||||
*/
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#include <linux/reservation.h>
|
||||
#include <linux/dma-resv.h>
|
||||
#include <linux/export.h>
|
||||
|
||||
/**
|
||||
@@ -50,16 +50,15 @@ DEFINE_WD_CLASS(reservation_ww_class);
|
||||
EXPORT_SYMBOL(reservation_ww_class);
|
||||
|
||||
/**
|
||||
* reservation_object_list_alloc - allocate fence list
|
||||
* dma_resv_list_alloc - allocate fence list
|
||||
* @shared_max: number of fences we need space for
|
||||
*
|
||||
* Allocate a new reservation_object_list and make sure to correctly initialize
|
||||
* Allocate a new dma_resv_list and make sure to correctly initialize
|
||||
* shared_max.
|
||||
*/
|
||||
static struct reservation_object_list *
|
||||
reservation_object_list_alloc(unsigned int shared_max)
|
||||
static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
|
||||
{
|
||||
struct reservation_object_list *list;
|
||||
struct dma_resv_list *list;
|
||||
|
||||
list = kmalloc(offsetof(typeof(*list), shared[shared_max]), GFP_KERNEL);
|
||||
if (!list)
|
||||
@@ -72,12 +71,12 @@ reservation_object_list_alloc(unsigned int shared_max)
|
||||
}
|
||||
|
||||
/**
|
||||
* reservation_object_list_free - free fence list
|
||||
* dma_resv_list_free - free fence list
|
||||
* @list: list to free
|
||||
*
|
||||
* Free a reservation_object_list and make sure to drop all references.
|
||||
* Free a dma_resv_list and make sure to drop all references.
|
||||
*/
|
||||
static void reservation_object_list_free(struct reservation_object_list *list)
|
||||
static void dma_resv_list_free(struct dma_resv_list *list)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
@@ -91,24 +90,24 @@ static void reservation_object_list_free(struct reservation_object_list *list)
|
||||
}
|
||||
|
||||
/**
|
||||
* reservation_object_init - initialize a reservation object
|
||||
* dma_resv_init - initialize a reservation object
|
||||
* @obj: the reservation object
|
||||
*/
|
||||
void reservation_object_init(struct reservation_object *obj)
|
||||
void dma_resv_init(struct dma_resv *obj)
|
||||
{
|
||||
ww_mutex_init(&obj->lock, &reservation_ww_class);
|
||||
RCU_INIT_POINTER(obj->fence, NULL);
|
||||
RCU_INIT_POINTER(obj->fence_excl, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL(reservation_object_init);
|
||||
EXPORT_SYMBOL(dma_resv_init);
|
||||
|
||||
/**
|
||||
* reservation_object_fini - destroys a reservation object
|
||||
* dma_resv_fini - destroys a reservation object
|
||||
* @obj: the reservation object
|
||||
*/
|
||||
void reservation_object_fini(struct reservation_object *obj)
|
||||
void dma_resv_fini(struct dma_resv *obj)
|
||||
{
|
||||
struct reservation_object_list *fobj;
|
||||
struct dma_resv_list *fobj;
|
||||
struct dma_fence *excl;
|
||||
|
||||
/*
|
||||
@@ -120,32 +119,31 @@ void reservation_object_fini(struct reservation_object *obj)
|
||||
dma_fence_put(excl);
|
||||
|
||||
fobj = rcu_dereference_protected(obj->fence, 1);
|
||||
reservation_object_list_free(fobj);
|
||||
dma_resv_list_free(fobj);
|
||||
ww_mutex_destroy(&obj->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(reservation_object_fini);
|
||||
EXPORT_SYMBOL(dma_resv_fini);
|
||||
|
||||
/**
|
||||
* reservation_object_reserve_shared - Reserve space to add shared fences to
|
||||
* a reservation_object.
|
||||
* dma_resv_reserve_shared - Reserve space to add shared fences to
|
||||
* a dma_resv.
|
||||
* @obj: reservation object
|
||||
* @num_fences: number of fences we want to add
|
||||
*
|
||||
* Should be called before reservation_object_add_shared_fence(). Must
|
||||
* Should be called before dma_resv_add_shared_fence(). Must
|
||||
* be called with obj->lock held.
|
||||
*
|
||||
* RETURNS
|
||||
* Zero for success, or -errno
|
||||
*/
|
||||
int reservation_object_reserve_shared(struct reservation_object *obj,
|
||||
unsigned int num_fences)
|
||||
int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
|
||||
{
|
||||
struct reservation_object_list *old, *new;
|
||||
struct dma_resv_list *old, *new;
|
||||
unsigned int i, j, k, max;
|
||||
|
||||
reservation_object_assert_held(obj);
|
||||
dma_resv_assert_held(obj);
|
||||
|
||||
old = reservation_object_get_list(obj);
|
||||
old = dma_resv_get_list(obj);
|
||||
|
||||
if (old && old->shared_max) {
|
||||
if ((old->shared_count + num_fences) <= old->shared_max)
|
||||
@@ -157,7 +155,7 @@ int reservation_object_reserve_shared(struct reservation_object *obj,
|
||||
max = 4;
|
||||
}
|
||||
|
||||
new = reservation_object_list_alloc(max);
|
||||
new = dma_resv_list_alloc(max);
|
||||
if (!new)
|
||||
return -ENOMEM;
|
||||
|
||||
@@ -171,7 +169,7 @@ int reservation_object_reserve_shared(struct reservation_object *obj,
|
||||
struct dma_fence *fence;
|
||||
|
||||
fence = rcu_dereference_protected(old->shared[i],
|
||||
reservation_object_held(obj));
|
||||
dma_resv_held(obj));
|
||||
if (dma_fence_is_signaled(fence))
|
||||
RCU_INIT_POINTER(new->shared[--k], fence);
|
||||
else
|
||||
@@ -197,41 +195,40 @@ int reservation_object_reserve_shared(struct reservation_object *obj,
|
||||
struct dma_fence *fence;
|
||||
|
||||
fence = rcu_dereference_protected(new->shared[i],
|
||||
reservation_object_held(obj));
|
||||
dma_resv_held(obj));
|
||||
dma_fence_put(fence);
|
||||
}
|
||||
kfree_rcu(old, rcu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(reservation_object_reserve_shared);
|
||||
EXPORT_SYMBOL(dma_resv_reserve_shared);
|
||||
|
||||
/**
|
||||
* reservation_object_add_shared_fence - Add a fence to a shared slot
|
||||
* dma_resv_add_shared_fence - Add a fence to a shared slot
|
||||
* @obj: the reservation object
|
||||
* @fence: the shared fence to add
|
||||
*
|
||||
* Add a fence to a shared slot, obj->lock must be held, and
|
||||
* reservation_object_reserve_shared() has been called.
|
||||
* dma_resv_reserve_shared() has been called.
|
||||
*/
|
||||
void reservation_object_add_shared_fence(struct reservation_object *obj,
|
||||
struct dma_fence *fence)
|
||||
void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
|
||||
{
|
||||
struct reservation_object_list *fobj;
|
||||
struct dma_resv_list *fobj;
|
||||
struct dma_fence *old;
|
||||
unsigned int i, count;
|
||||
|
||||
dma_fence_get(fence);
|
||||
|
||||
reservation_object_assert_held(obj);
|
||||
dma_resv_assert_held(obj);
|
||||
|
||||
fobj = reservation_object_get_list(obj);
|
||||
fobj = dma_resv_get_list(obj);
|
||||
count = fobj->shared_count;
|
||||
|
||||
for (i = 0; i < count; ++i) {
|
||||
|
||||
old = rcu_dereference_protected(fobj->shared[i],
|
||||
reservation_object_held(obj));
|
||||
dma_resv_held(obj));
|
||||
if (old->context == fence->context ||
|
||||
dma_fence_is_signaled(old))
|
||||
goto replace;
|
||||
@@ -247,25 +244,24 @@ replace:
|
||||
smp_store_mb(fobj->shared_count, count);
|
||||
dma_fence_put(old);
|
||||
}
|
||||
EXPORT_SYMBOL(reservation_object_add_shared_fence);
|
||||
EXPORT_SYMBOL(dma_resv_add_shared_fence);
|
||||
|
||||
/**
|
||||
* reservation_object_add_excl_fence - Add an exclusive fence.
|
||||
* dma_resv_add_excl_fence - Add an exclusive fence.
|
||||
* @obj: the reservation object
|
||||
* @fence: the shared fence to add
|
||||
*
|
||||
* Add a fence to the exclusive slot. The obj->lock must be held.
|
||||
*/
|
||||
void reservation_object_add_excl_fence(struct reservation_object *obj,
|
||||
struct dma_fence *fence)
|
||||
void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
|
||||
{
|
||||
struct dma_fence *old_fence = reservation_object_get_excl(obj);
|
||||
struct reservation_object_list *old;
|
||||
struct dma_fence *old_fence = dma_resv_get_excl(obj);
|
||||
struct dma_resv_list *old;
|
||||
u32 i = 0;
|
||||
|
||||
reservation_object_assert_held(obj);
|
||||
dma_resv_assert_held(obj);
|
||||
|
||||
old = reservation_object_get_list(obj);
|
||||
old = dma_resv_get_list(obj);
|
||||
if (old)
|
||||
i = old->shared_count;
|
||||
|
||||
@@ -282,41 +278,40 @@ void reservation_object_add_excl_fence(struct reservation_object *obj,
|
||||
/* inplace update, no shared fences */
|
||||
while (i--)
|
||||
dma_fence_put(rcu_dereference_protected(old->shared[i],
|
||||
reservation_object_held(obj)));
|
||||
dma_resv_held(obj)));
|
||||
|
||||
dma_fence_put(old_fence);
|
||||
}
|
||||
EXPORT_SYMBOL(reservation_object_add_excl_fence);
|
||||
EXPORT_SYMBOL(dma_resv_add_excl_fence);
|
||||
|
||||
/**
|
||||
* reservation_object_copy_fences - Copy all fences from src to dst.
|
||||
* dma_resv_copy_fences - Copy all fences from src to dst.
|
||||
* @dst: the destination reservation object
|
||||
* @src: the source reservation object
|
||||
*
|
||||
* Copy all fences from src to dst. dst-lock must be held.
|
||||
*/
|
||||
int reservation_object_copy_fences(struct reservation_object *dst,
|
||||
struct reservation_object *src)
|
||||
int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
|
||||
{
|
||||
struct reservation_object_list *src_list, *dst_list;
|
||||
struct dma_resv_list *src_list, *dst_list;
|
||||
struct dma_fence *old, *new;
|
||||
unsigned int i, shared_count;
|
||||
|
||||
reservation_object_assert_held(dst);
|
||||
dma_resv_assert_held(dst);
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
retry:
|
||||
reservation_object_fences(src, &new, &src_list, &shared_count);
|
||||
dma_resv_fences(src, &new, &src_list, &shared_count);
|
||||
if (shared_count) {
|
||||
rcu_read_unlock();
|
||||
|
||||
dst_list = reservation_object_list_alloc(shared_count);
|
||||
dst_list = dma_resv_list_alloc(shared_count);
|
||||
if (!dst_list)
|
||||
return -ENOMEM;
|
||||
|
||||
rcu_read_lock();
|
||||
reservation_object_fences(src, &new, &src_list, &shared_count);
|
||||
dma_resv_fences(src, &new, &src_list, &shared_count);
|
||||
if (!src_list || shared_count > dst_list->shared_max) {
|
||||
kfree(dst_list);
|
||||
goto retry;
|
||||
@@ -332,7 +327,7 @@ retry:
|
||||
continue;
|
||||
|
||||
if (!dma_fence_get_rcu(fence)) {
|
||||
reservation_object_list_free(dst_list);
|
||||
dma_resv_list_free(dst_list);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
@@ -348,28 +343,28 @@ retry:
|
||||
}
|
||||
|
||||
if (new && !dma_fence_get_rcu(new)) {
|
||||
reservation_object_list_free(dst_list);
|
||||
dma_resv_list_free(dst_list);
|
||||
goto retry;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
src_list = reservation_object_get_list(dst);
|
||||
old = reservation_object_get_excl(dst);
|
||||
src_list = dma_resv_get_list(dst);
|
||||
old = dma_resv_get_excl(dst);
|
||||
|
||||
preempt_disable();
|
||||
rcu_assign_pointer(dst->fence_excl, new);
|
||||
rcu_assign_pointer(dst->fence, dst_list);
|
||||
preempt_enable();
|
||||
|
||||
reservation_object_list_free(src_list);
|
||||
dma_resv_list_free(src_list);
|
||||
dma_fence_put(old);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(reservation_object_copy_fences);
|
||||
EXPORT_SYMBOL(dma_resv_copy_fences);
|
||||
|
||||
/**
|
||||
* reservation_object_get_fences_rcu - Get an object's shared and exclusive
|
||||
* dma_resv_get_fences_rcu - Get an object's shared and exclusive
|
||||
* fences without update side lock held
|
||||
* @obj: the reservation object
|
||||
* @pfence_excl: the returned exclusive fence (or NULL)
|
||||
@@ -381,10 +376,10 @@ EXPORT_SYMBOL(reservation_object_copy_fences);
|
||||
* exclusive fence is not specified the fence is put into the array of the
|
||||
* shared fences as well. Returns either zero or -ENOMEM.
|
||||
*/
|
||||
int reservation_object_get_fences_rcu(struct reservation_object *obj,
|
||||
struct dma_fence **pfence_excl,
|
||||
unsigned *pshared_count,
|
||||
struct dma_fence ***pshared)
|
||||
int dma_resv_get_fences_rcu(struct dma_resv *obj,
|
||||
struct dma_fence **pfence_excl,
|
||||
unsigned *pshared_count,
|
||||
struct dma_fence ***pshared)
|
||||
{
|
||||
struct dma_fence **shared = NULL;
|
||||
struct dma_fence *fence_excl;
|
||||
@@ -392,14 +387,14 @@ int reservation_object_get_fences_rcu(struct reservation_object *obj,
|
||||
int ret = 1;
|
||||
|
||||
do {
|
||||
struct reservation_object_list *fobj;
|
||||
struct dma_resv_list *fobj;
|
||||
unsigned int i;
|
||||
size_t sz = 0;
|
||||
|
||||
i = 0;
|
||||
|
||||
rcu_read_lock();
|
||||
reservation_object_fences(obj, &fence_excl, &fobj,
|
||||
dma_resv_fences(obj, &fence_excl, &fobj,
|
||||
&shared_count);
|
||||
|
||||
if (fence_excl && !dma_fence_get_rcu(fence_excl))
|
||||
@@ -465,10 +460,10 @@ unlock:
|
||||
*pshared = shared;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
|
||||
EXPORT_SYMBOL_GPL(dma_resv_get_fences_rcu);
|
||||
|
||||
/**
|
||||
* reservation_object_wait_timeout_rcu - Wait on reservation's objects
|
||||
* dma_resv_wait_timeout_rcu - Wait on reservation's objects
|
||||
* shared and/or exclusive fences.
|
||||
* @obj: the reservation object
|
||||
* @wait_all: if true, wait on all fences, else wait on just exclusive fence
|
||||
@@ -479,11 +474,11 @@ EXPORT_SYMBOL_GPL(reservation_object_get_fences_rcu);
|
||||
* Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
|
||||
* greater than zer on success.
|
||||
*/
|
||||
long reservation_object_wait_timeout_rcu(struct reservation_object *obj,
|
||||
bool wait_all, bool intr,
|
||||
unsigned long timeout)
|
||||
long dma_resv_wait_timeout_rcu(struct dma_resv *obj,
|
||||
bool wait_all, bool intr,
|
||||
unsigned long timeout)
|
||||
{
|
||||
struct reservation_object_list *fobj;
|
||||
struct dma_resv_list *fobj;
|
||||
struct dma_fence *fence;
|
||||
unsigned shared_count;
|
||||
long ret = timeout ? timeout : 1;
|
||||
@@ -493,7 +488,7 @@ retry:
|
||||
rcu_read_lock();
|
||||
i = -1;
|
||||
|
||||
reservation_object_fences(obj, &fence, &fobj, &shared_count);
|
||||
dma_resv_fences(obj, &fence, &fobj, &shared_count);
|
||||
if (fence && !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
|
||||
if (!dma_fence_get_rcu(fence))
|
||||
goto unlock_retry;
|
||||
@@ -541,11 +536,10 @@ unlock_retry:
|
||||
rcu_read_unlock();
|
||||
goto retry;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(reservation_object_wait_timeout_rcu);
|
||||
EXPORT_SYMBOL_GPL(dma_resv_wait_timeout_rcu);
|
||||
|
||||
|
||||
static inline int
|
||||
reservation_object_test_signaled_single(struct dma_fence *passed_fence)
|
||||
static inline int dma_resv_test_signaled_single(struct dma_fence *passed_fence)
|
||||
{
|
||||
struct dma_fence *fence, *lfence = passed_fence;
|
||||
int ret = 1;
|
||||
@@ -562,7 +556,7 @@ reservation_object_test_signaled_single(struct dma_fence *passed_fence)
|
||||
}
|
||||
|
||||
/**
|
||||
* reservation_object_test_signaled_rcu - Test if a reservation object's
|
||||
* dma_resv_test_signaled_rcu - Test if a reservation object's
|
||||
* fences have been signaled.
|
||||
* @obj: the reservation object
|
||||
* @test_all: if true, test all fences, otherwise only test the exclusive
|
||||
@@ -571,10 +565,9 @@ reservation_object_test_signaled_single(struct dma_fence *passed_fence)
|
||||
* RETURNS
|
||||
* true if all fences signaled, else false
|
||||
*/
|
||||
bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
|
||||
bool test_all)
|
||||
bool dma_resv_test_signaled_rcu(struct dma_resv *obj, bool test_all)
|
||||
{
|
||||
struct reservation_object_list *fobj;
|
||||
struct dma_resv_list *fobj;
|
||||
struct dma_fence *fence_excl;
|
||||
unsigned shared_count;
|
||||
int ret;
|
||||
@@ -583,14 +576,14 @@ bool reservation_object_test_signaled_rcu(struct reservation_object *obj,
|
||||
retry:
|
||||
ret = true;
|
||||
|
||||
reservation_object_fences(obj, &fence_excl, &fobj, &shared_count);
|
||||
dma_resv_fences(obj, &fence_excl, &fobj, &shared_count);
|
||||
if (test_all) {
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < shared_count; ++i) {
|
||||
struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
|
||||
|
||||
ret = reservation_object_test_signaled_single(fence);
|
||||
ret = dma_resv_test_signaled_single(fence);
|
||||
if (ret < 0)
|
||||
goto retry;
|
||||
else if (!ret)
|
||||
@@ -599,7 +592,7 @@ retry:
|
||||
}
|
||||
|
||||
if (!shared_count && fence_excl) {
|
||||
ret = reservation_object_test_signaled_single(fence_excl);
|
||||
ret = dma_resv_test_signaled_single(fence_excl);
|
||||
if (ret < 0)
|
||||
goto retry;
|
||||
}
|
||||
@@ -607,4 +600,4 @@ retry:
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(reservation_object_test_signaled_rcu);
|
||||
EXPORT_SYMBOL_GPL(dma_resv_test_signaled_rcu);
|
||||
@@ -218,14 +218,14 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
|
||||
static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
|
||||
struct amdgpu_amdkfd_fence *ef)
|
||||
{
|
||||
struct reservation_object *resv = bo->tbo.base.resv;
|
||||
struct reservation_object_list *old, *new;
|
||||
struct dma_resv *resv = bo->tbo.base.resv;
|
||||
struct dma_resv_list *old, *new;
|
||||
unsigned int i, j, k;
|
||||
|
||||
if (!ef)
|
||||
return -EINVAL;
|
||||
|
||||
old = reservation_object_get_list(resv);
|
||||
old = dma_resv_get_list(resv);
|
||||
if (!old)
|
||||
return 0;
|
||||
|
||||
@@ -241,7 +241,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
|
||||
struct dma_fence *f;
|
||||
|
||||
f = rcu_dereference_protected(old->shared[i],
|
||||
reservation_object_held(resv));
|
||||
dma_resv_held(resv));
|
||||
|
||||
if (f->context == ef->base.context)
|
||||
RCU_INIT_POINTER(new->shared[--j], f);
|
||||
@@ -258,7 +258,7 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
|
||||
struct dma_fence *f;
|
||||
|
||||
f = rcu_dereference_protected(new->shared[i],
|
||||
reservation_object_held(resv));
|
||||
dma_resv_held(resv));
|
||||
dma_fence_put(f);
|
||||
}
|
||||
kfree_rcu(old, rcu);
|
||||
@@ -882,7 +882,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info,
|
||||
AMDGPU_FENCE_OWNER_KFD, false);
|
||||
if (ret)
|
||||
goto wait_pd_fail;
|
||||
ret = reservation_object_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
|
||||
ret = dma_resv_reserve_shared(vm->root.base.bo->tbo.base.resv, 1);
|
||||
if (ret)
|
||||
goto reserve_shared_fail;
|
||||
amdgpu_bo_fence(vm->root.base.bo,
|
||||
@@ -2127,7 +2127,7 @@ int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem
|
||||
* Add process eviction fence to bo so they can
|
||||
* evict each other.
|
||||
*/
|
||||
ret = reservation_object_reserve_shared(gws_bo->tbo.base.resv, 1);
|
||||
ret = dma_resv_reserve_shared(gws_bo->tbo.base.resv, 1);
|
||||
if (ret)
|
||||
goto reserve_shared_fail;
|
||||
amdgpu_bo_fence(gws_bo, &process_info->eviction_fence->base, true);
|
||||
|
||||
@@ -730,7 +730,7 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
|
||||
|
||||
list_for_each_entry(e, &p->validated, tv.head) {
|
||||
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
|
||||
struct reservation_object *resv = bo->tbo.base.resv;
|
||||
struct dma_resv *resv = bo->tbo.base.resv;
|
||||
|
||||
r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
|
||||
amdgpu_bo_explicit_sync(bo));
|
||||
@@ -1729,7 +1729,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
|
||||
*map = mapping;
|
||||
|
||||
/* Double check that the BO is reserved by this CS */
|
||||
if (reservation_object_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
|
||||
if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket)
|
||||
return -EINVAL;
|
||||
|
||||
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
|
||||
|
||||
@@ -204,7 +204,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
|
||||
goto unpin;
|
||||
}
|
||||
|
||||
r = reservation_object_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
|
||||
r = dma_resv_get_fences_rcu(new_abo->tbo.base.resv, &work->excl,
|
||||
&work->shared_count,
|
||||
&work->shared);
|
||||
if (unlikely(r != 0)) {
|
||||
|
||||
@@ -137,23 +137,23 @@ int amdgpu_gem_prime_mmap(struct drm_gem_object *obj,
|
||||
}
|
||||
|
||||
static int
|
||||
__reservation_object_make_exclusive(struct reservation_object *obj)
|
||||
__dma_resv_make_exclusive(struct dma_resv *obj)
|
||||
{
|
||||
struct dma_fence **fences;
|
||||
unsigned int count;
|
||||
int r;
|
||||
|
||||
if (!reservation_object_get_list(obj)) /* no shared fences to convert */
|
||||
if (!dma_resv_get_list(obj)) /* no shared fences to convert */
|
||||
return 0;
|
||||
|
||||
r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
|
||||
r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (count == 0) {
|
||||
/* Now that was unexpected. */
|
||||
} else if (count == 1) {
|
||||
reservation_object_add_excl_fence(obj, fences[0]);
|
||||
dma_resv_add_excl_fence(obj, fences[0]);
|
||||
dma_fence_put(fences[0]);
|
||||
kfree(fences);
|
||||
} else {
|
||||
@@ -165,7 +165,7 @@ __reservation_object_make_exclusive(struct reservation_object *obj)
|
||||
if (!array)
|
||||
goto err_fences_put;
|
||||
|
||||
reservation_object_add_excl_fence(obj, &array->base);
|
||||
dma_resv_add_excl_fence(obj, &array->base);
|
||||
dma_fence_put(&array->base);
|
||||
}
|
||||
|
||||
@@ -216,7 +216,7 @@ static int amdgpu_dma_buf_map_attach(struct dma_buf *dma_buf,
|
||||
* fences on the reservation object into a single exclusive
|
||||
* fence.
|
||||
*/
|
||||
r = __reservation_object_make_exclusive(bo->tbo.base.resv);
|
||||
r = __dma_resv_make_exclusive(bo->tbo.base.resv);
|
||||
if (r)
|
||||
goto error_unreserve;
|
||||
}
|
||||
@@ -367,7 +367,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
struct dma_buf_attachment *attach,
|
||||
struct sg_table *sg)
|
||||
{
|
||||
struct reservation_object *resv = attach->dmabuf->resv;
|
||||
struct dma_resv *resv = attach->dmabuf->resv;
|
||||
struct amdgpu_device *adev = dev->dev_private;
|
||||
struct amdgpu_bo *bo;
|
||||
struct amdgpu_bo_param bp;
|
||||
@@ -380,7 +380,7 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
bp.flags = 0;
|
||||
bp.type = ttm_bo_type_sg;
|
||||
bp.resv = resv;
|
||||
reservation_object_lock(resv, NULL);
|
||||
dma_resv_lock(resv, NULL);
|
||||
ret = amdgpu_bo_create(adev, &bp, &bo);
|
||||
if (ret)
|
||||
goto error;
|
||||
@@ -392,11 +392,11 @@ amdgpu_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
if (attach->dmabuf->ops != &amdgpu_dmabuf_ops)
|
||||
bo->prime_shared_count = 1;
|
||||
|
||||
reservation_object_unlock(resv);
|
||||
dma_resv_unlock(resv);
|
||||
return &bo->tbo.base;
|
||||
|
||||
error:
|
||||
reservation_object_unlock(resv);
|
||||
dma_resv_unlock(resv);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
|
||||
@@ -50,7 +50,7 @@ void amdgpu_gem_object_free(struct drm_gem_object *gobj)
|
||||
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
|
||||
int alignment, u32 initial_domain,
|
||||
u64 flags, enum ttm_bo_type type,
|
||||
struct reservation_object *resv,
|
||||
struct dma_resv *resv,
|
||||
struct drm_gem_object **obj)
|
||||
{
|
||||
struct amdgpu_bo *bo;
|
||||
@@ -215,7 +215,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
|
||||
union drm_amdgpu_gem_create *args = data;
|
||||
uint64_t flags = args->in.domain_flags;
|
||||
uint64_t size = args->in.bo_size;
|
||||
struct reservation_object *resv = NULL;
|
||||
struct dma_resv *resv = NULL;
|
||||
struct drm_gem_object *gobj;
|
||||
uint32_t handle;
|
||||
int r;
|
||||
@@ -433,7 +433,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
|
||||
return -ENOENT;
|
||||
}
|
||||
robj = gem_to_amdgpu_bo(gobj);
|
||||
ret = reservation_object_wait_timeout_rcu(robj->tbo.base.resv, true, true,
|
||||
ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true,
|
||||
timeout);
|
||||
|
||||
/* ret == 0 means not signaled,
|
||||
|
||||
@@ -47,7 +47,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev);
|
||||
int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size,
|
||||
int alignment, u32 initial_domain,
|
||||
u64 flags, enum ttm_bo_type type,
|
||||
struct reservation_object *resv,
|
||||
struct dma_resv *resv,
|
||||
struct drm_gem_object **obj);
|
||||
|
||||
int amdgpu_mode_dumb_create(struct drm_file *file_priv,
|
||||
|
||||
@@ -104,7 +104,7 @@ static void amdgpu_pasid_free_cb(struct dma_fence *fence,
|
||||
*
|
||||
* Free the pasid only after all the fences in resv are signaled.
|
||||
*/
|
||||
void amdgpu_pasid_free_delayed(struct reservation_object *resv,
|
||||
void amdgpu_pasid_free_delayed(struct dma_resv *resv,
|
||||
unsigned int pasid)
|
||||
{
|
||||
struct dma_fence *fence, **fences;
|
||||
@@ -112,7 +112,7 @@ void amdgpu_pasid_free_delayed(struct reservation_object *resv,
|
||||
unsigned count;
|
||||
int r;
|
||||
|
||||
r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences);
|
||||
r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences);
|
||||
if (r)
|
||||
goto fallback;
|
||||
|
||||
@@ -156,7 +156,7 @@ fallback:
|
||||
/* Not enough memory for the delayed delete, as last resort
|
||||
* block for all the fences to complete.
|
||||
*/
|
||||
reservation_object_wait_timeout_rcu(resv, true, false,
|
||||
dma_resv_wait_timeout_rcu(resv, true, false,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
amdgpu_pasid_free(pasid);
|
||||
}
|
||||
|
||||
@@ -72,7 +72,7 @@ struct amdgpu_vmid_mgr {
|
||||
|
||||
int amdgpu_pasid_alloc(unsigned int bits);
|
||||
void amdgpu_pasid_free(unsigned int pasid);
|
||||
void amdgpu_pasid_free_delayed(struct reservation_object *resv,
|
||||
void amdgpu_pasid_free_delayed(struct dma_resv *resv,
|
||||
unsigned int pasid);
|
||||
|
||||
bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
|
||||
|
||||
@@ -179,7 +179,7 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
|
||||
if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start, end))
|
||||
continue;
|
||||
|
||||
r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv,
|
||||
r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
|
||||
true, false, MAX_SCHEDULE_TIMEOUT);
|
||||
if (r <= 0)
|
||||
DRM_ERROR("(%ld) failed to wait for user bo\n", r);
|
||||
|
||||
@@ -544,7 +544,7 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
|
||||
|
||||
fail_unreserve:
|
||||
if (!bp->resv)
|
||||
reservation_object_unlock(bo->tbo.base.resv);
|
||||
dma_resv_unlock(bo->tbo.base.resv);
|
||||
amdgpu_bo_unref(&bo);
|
||||
return r;
|
||||
}
|
||||
@@ -606,13 +606,13 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
|
||||
|
||||
if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
|
||||
if (!bp->resv)
|
||||
WARN_ON(reservation_object_lock((*bo_ptr)->tbo.base.resv,
|
||||
WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv,
|
||||
NULL));
|
||||
|
||||
r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
|
||||
|
||||
if (!bp->resv)
|
||||
reservation_object_unlock((*bo_ptr)->tbo.base.resv);
|
||||
dma_resv_unlock((*bo_ptr)->tbo.base.resv);
|
||||
|
||||
if (r)
|
||||
amdgpu_bo_unref(bo_ptr);
|
||||
@@ -709,7 +709,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv, false, false,
|
||||
r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
|
||||
MAX_SCHEDULE_TIMEOUT);
|
||||
if (r < 0)
|
||||
return r;
|
||||
@@ -1087,7 +1087,7 @@ int amdgpu_bo_set_tiling_flags(struct amdgpu_bo *bo, u64 tiling_flags)
|
||||
*/
|
||||
void amdgpu_bo_get_tiling_flags(struct amdgpu_bo *bo, u64 *tiling_flags)
|
||||
{
|
||||
reservation_object_assert_held(bo->tbo.base.resv);
|
||||
dma_resv_assert_held(bo->tbo.base.resv);
|
||||
|
||||
if (tiling_flags)
|
||||
*tiling_flags = bo->tiling_flags;
|
||||
@@ -1283,12 +1283,12 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
|
||||
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
|
||||
bool shared)
|
||||
{
|
||||
struct reservation_object *resv = bo->tbo.base.resv;
|
||||
struct dma_resv *resv = bo->tbo.base.resv;
|
||||
|
||||
if (shared)
|
||||
reservation_object_add_shared_fence(resv, fence);
|
||||
dma_resv_add_shared_fence(resv, fence);
|
||||
else
|
||||
reservation_object_add_excl_fence(resv, fence);
|
||||
dma_resv_add_excl_fence(resv, fence);
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1328,7 +1328,7 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
|
||||
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
|
||||
{
|
||||
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
|
||||
WARN_ON_ONCE(!reservation_object_is_locked(bo->tbo.base.resv) &&
|
||||
WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
|
||||
!bo->pin_count && bo->tbo.type != ttm_bo_type_kernel);
|
||||
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
|
||||
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
|
||||
|
||||
@@ -41,7 +41,7 @@ struct amdgpu_bo_param {
|
||||
u32 preferred_domain;
|
||||
u64 flags;
|
||||
enum ttm_bo_type type;
|
||||
struct reservation_object *resv;
|
||||
struct dma_resv *resv;
|
||||
};
|
||||
|
||||
/* bo virtual addresses in a vm */
|
||||
|
||||
@@ -190,10 +190,10 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
||||
*/
|
||||
int amdgpu_sync_resv(struct amdgpu_device *adev,
|
||||
struct amdgpu_sync *sync,
|
||||
struct reservation_object *resv,
|
||||
struct dma_resv *resv,
|
||||
void *owner, bool explicit_sync)
|
||||
{
|
||||
struct reservation_object_list *flist;
|
||||
struct dma_resv_list *flist;
|
||||
struct dma_fence *f;
|
||||
void *fence_owner;
|
||||
unsigned i;
|
||||
@@ -203,16 +203,16 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
|
||||
return -EINVAL;
|
||||
|
||||
/* always sync to the exclusive fence */
|
||||
f = reservation_object_get_excl(resv);
|
||||
f = dma_resv_get_excl(resv);
|
||||
r = amdgpu_sync_fence(adev, sync, f, false);
|
||||
|
||||
flist = reservation_object_get_list(resv);
|
||||
flist = dma_resv_get_list(resv);
|
||||
if (!flist || r)
|
||||
return r;
|
||||
|
||||
for (i = 0; i < flist->shared_count; ++i) {
|
||||
f = rcu_dereference_protected(flist->shared[i],
|
||||
reservation_object_held(resv));
|
||||
dma_resv_held(resv));
|
||||
/* We only want to trigger KFD eviction fences on
|
||||
* evict or move jobs. Skip KFD fences otherwise.
|
||||
*/
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
#include <linux/hashtable.h>
|
||||
|
||||
struct dma_fence;
|
||||
struct reservation_object;
|
||||
struct dma_resv;
|
||||
struct amdgpu_device;
|
||||
struct amdgpu_ring;
|
||||
|
||||
@@ -44,7 +44,7 @@ int amdgpu_sync_fence(struct amdgpu_device *adev, struct amdgpu_sync *sync,
|
||||
struct dma_fence *f, bool explicit);
|
||||
int amdgpu_sync_resv(struct amdgpu_device *adev,
|
||||
struct amdgpu_sync *sync,
|
||||
struct reservation_object *resv,
|
||||
struct dma_resv *resv,
|
||||
void *owner,
|
||||
bool explicit_sync);
|
||||
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
|
||||
|
||||
@@ -303,7 +303,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
|
||||
struct amdgpu_copy_mem *src,
|
||||
struct amdgpu_copy_mem *dst,
|
||||
uint64_t size,
|
||||
struct reservation_object *resv,
|
||||
struct dma_resv *resv,
|
||||
struct dma_fence **f)
|
||||
{
|
||||
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
|
||||
@@ -1470,7 +1470,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
|
||||
{
|
||||
unsigned long num_pages = bo->mem.num_pages;
|
||||
struct drm_mm_node *node = bo->mem.mm_node;
|
||||
struct reservation_object_list *flist;
|
||||
struct dma_resv_list *flist;
|
||||
struct dma_fence *f;
|
||||
int i;
|
||||
|
||||
@@ -1478,18 +1478,18 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
|
||||
* cleanly handle page faults.
|
||||
*/
|
||||
if (bo->type == ttm_bo_type_kernel &&
|
||||
!reservation_object_test_signaled_rcu(bo->base.resv, true))
|
||||
!dma_resv_test_signaled_rcu(bo->base.resv, true))
|
||||
return false;
|
||||
|
||||
/* If bo is a KFD BO, check if the bo belongs to the current process.
|
||||
* If true, then return false as any KFD process needs all its BOs to
|
||||
* be resident to run successfully
|
||||
*/
|
||||
flist = reservation_object_get_list(bo->base.resv);
|
||||
flist = dma_resv_get_list(bo->base.resv);
|
||||
if (flist) {
|
||||
for (i = 0; i < flist->shared_count; ++i) {
|
||||
f = rcu_dereference_protected(flist->shared[i],
|
||||
reservation_object_held(bo->base.resv));
|
||||
dma_resv_held(bo->base.resv));
|
||||
if (amdkfd_fence_check_mm(f, current->mm))
|
||||
return false;
|
||||
}
|
||||
@@ -1992,7 +1992,7 @@ error_free:
|
||||
|
||||
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
|
||||
uint64_t dst_offset, uint32_t byte_count,
|
||||
struct reservation_object *resv,
|
||||
struct dma_resv *resv,
|
||||
struct dma_fence **fence, bool direct_submit,
|
||||
bool vm_needs_flush)
|
||||
{
|
||||
@@ -2066,7 +2066,7 @@ error_free:
|
||||
|
||||
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
||||
uint32_t src_data,
|
||||
struct reservation_object *resv,
|
||||
struct dma_resv *resv,
|
||||
struct dma_fence **fence)
|
||||
{
|
||||
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
||||
|
||||
@@ -83,18 +83,18 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev,
|
||||
|
||||
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
|
||||
uint64_t dst_offset, uint32_t byte_count,
|
||||
struct reservation_object *resv,
|
||||
struct dma_resv *resv,
|
||||
struct dma_fence **fence, bool direct_submit,
|
||||
bool vm_needs_flush);
|
||||
int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
|
||||
struct amdgpu_copy_mem *src,
|
||||
struct amdgpu_copy_mem *dst,
|
||||
uint64_t size,
|
||||
struct reservation_object *resv,
|
||||
struct dma_resv *resv,
|
||||
struct dma_fence **f);
|
||||
int amdgpu_fill_buffer(struct amdgpu_bo *bo,
|
||||
uint32_t src_data,
|
||||
struct reservation_object *resv,
|
||||
struct dma_resv *resv,
|
||||
struct dma_fence **fence);
|
||||
|
||||
int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
|
||||
@@ -1073,7 +1073,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
|
||||
ib->length_dw = 16;
|
||||
|
||||
if (direct) {
|
||||
r = reservation_object_wait_timeout_rcu(bo->tbo.base.resv,
|
||||
r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv,
|
||||
true, false,
|
||||
msecs_to_jiffies(10));
|
||||
if (r == 0)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user