drm/ttm: rename bo->mem and make it a pointer

When we want to decouble resource management from buffer management we need to
be able to handle resources separately.

Add a resource pointer and rename bo->mem so that all code needs to
change to access the pointer instead.

No functional change.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210430092508.60710-4-christian.koenig@amd.com
This commit is contained in:
Christian König
2021-04-12 15:11:47 +02:00
parent 9450129ed9
commit d3116756a7
49 changed files with 274 additions and 265 deletions

View File

@@ -1666,7 +1666,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
* the next restore worker
*/
if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) &&
bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
is_invalid_userptr = true;
ret = vm_validate_pt_pd_bos(avm);

View File

@@ -4103,9 +4103,9 @@ static int amdgpu_device_recover_vram(struct amdgpu_device *adev)
list_for_each_entry(shadow, &adev->shadow_list, shadow_list) {
/* No need to recover an evicted BO */
if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
if (shadow->tbo.resource->mem_type != TTM_PL_TT ||
shadow->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET ||
shadow->parent->tbo.resource->mem_type != TTM_PL_VRAM)
continue;
r = amdgpu_bo_restore_shadow(shadow, &next);

View File

@@ -226,12 +226,12 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
if (r)
return ERR_PTR(r);
} else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) &
} else if (!(amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type) &
AMDGPU_GEM_DOMAIN_GTT)) {
return ERR_PTR(-EBUSY);
}
switch (bo->tbo.mem.mem_type) {
switch (bo->tbo.resource->mem_type) {
case TTM_PL_TT:
sgt = drm_prime_pages_to_sg(obj->dev,
bo->tbo.ttm->pages,
@@ -245,8 +245,9 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
break;
case TTM_PL_VRAM:
r = amdgpu_vram_mgr_alloc_sgt(adev, &bo->tbo.mem, 0,
bo->tbo.base.size, attach->dev, dir, &sgt);
r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0,
bo->tbo.base.size, attach->dev,
dir, &sgt);
if (r)
return ERR_PTR(r);
break;
@@ -436,7 +437,7 @@ amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
struct amdgpu_vm_bo_base *bo_base;
int r;
if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
if (bo->tbo.resource->mem_type == TTM_PL_SYSTEM)
return;
r = ttm_bo_validate(&bo->tbo, &placement, &ctx);

View File

@@ -101,7 +101,7 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
switch (bo->tbo.mem.mem_type) {
switch (bo->tbo.resource->mem_type) {
case TTM_PL_TT:
*addr = bo->tbo.ttm->dma_address[0];
break;
@@ -112,7 +112,7 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level,
*addr = 0;
break;
}
*flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, &bo->tbo.mem);
*flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, bo->tbo.resource);
amdgpu_gmc_get_vm_pde(adev, level, addr, flags);
}

View File

@@ -122,7 +122,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
int r;
spin_lock(&mgr->lock);
if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) &&
if ((tbo->resource == mem || tbo->resource->mem_type != TTM_PL_TT) &&
atomic64_read(&mgr->available) < mem->num_pages) {
spin_unlock(&mgr->lock);
return -ENOSPC;

View File

@@ -362,14 +362,14 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
if (cpu_addr)
amdgpu_bo_kunmap(*bo_ptr);
ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
ttm_resource_free(&(*bo_ptr)->tbo, (*bo_ptr)->tbo.resource);
for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
}
r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
&(*bo_ptr)->tbo.mem, &ctx);
(*bo_ptr)->tbo.resource, &ctx);
if (r)
goto error;
@@ -573,15 +573,15 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
return r;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
bo->tbo.mem.mem_type == TTM_PL_VRAM &&
bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
bo->tbo.resource->mem_type == TTM_PL_VRAM &&
bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
ctx.bytes_moved);
else
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
bo->tbo.mem.mem_type == TTM_PL_VRAM) {
bo->tbo.resource->mem_type == TTM_PL_VRAM) {
struct dma_fence *fence;
r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
@@ -761,7 +761,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
if (r < 0)
return r;
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
if (r)
return r;
@@ -884,8 +884,8 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
if (bo->tbo.pin_count) {
uint32_t mem_type = bo->tbo.mem.mem_type;
uint32_t mem_flags = bo->tbo.mem.placement;
uint32_t mem_type = bo->tbo.resource->mem_type;
uint32_t mem_flags = bo->tbo.resource->placement;
if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
return -EINVAL;
@@ -935,7 +935,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
ttm_bo_pin(&bo->tbo);
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
@@ -987,11 +987,11 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo)
if (bo->tbo.base.import_attach)
dma_buf_unpin(bo->tbo.base.import_attach);
if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
&adev->visible_pin_size);
} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
} else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
}
}
@@ -1223,7 +1223,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
struct ttm_resource *old_mem = &bo->mem;
struct ttm_resource *old_mem = bo->resource;
if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
@@ -1234,7 +1234,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
amdgpu_bo_kunmap(abo);
if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
bo->mem.mem_type != TTM_PL_SYSTEM)
bo->resource->mem_type != TTM_PL_SYSTEM)
dma_buf_move_notify(abo->tbo.base.dma_buf);
/* remember the eviction */
@@ -1254,7 +1254,7 @@ void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
{
unsigned int domain;
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
*vram_mem += amdgpu_bo_size(bo);
@@ -1296,7 +1296,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (bo->base.resv == &bo->base._resv)
amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
if (bo->resource->mem_type != TTM_PL_VRAM || !bo->resource->mm_node ||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
return;
@@ -1333,10 +1333,10 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
if (bo->mem.mem_type != TTM_PL_VRAM)
if (bo->resource->mem_type != TTM_PL_VRAM)
return 0;
offset = bo->mem.start << PAGE_SHIFT;
offset = bo->resource->start << PAGE_SHIFT;
if ((offset + bo->base.size) <= adev->gmc.visible_vram_size)
return 0;
@@ -1359,9 +1359,9 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
else if (unlikely(r))
return VM_FAULT_SIGBUS;
offset = bo->mem.start << PAGE_SHIFT;
offset = bo->resource->start << PAGE_SHIFT;
/* this should never happen */
if (bo->mem.mem_type == TTM_PL_VRAM &&
if (bo->resource->mem_type == TTM_PL_VRAM &&
(offset + bo->base.size) > adev->gmc.visible_vram_size)
return VM_FAULT_SIGBUS;
@@ -1446,11 +1446,11 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
*/
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
{
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM);
WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
!bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET);
WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM &&
!(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
return amdgpu_bo_gpu_offset_no_check(bo);
@@ -1468,8 +1468,8 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
uint64_t offset;
offset = (bo->tbo.mem.start << PAGE_SHIFT) +
amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
offset = (bo->tbo.resource->start << PAGE_SHIFT) +
amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
return amdgpu_gmc_sign_extend(offset);
}
@@ -1522,7 +1522,7 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
unsigned int pin_count;
u64 size;
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
placement = "VRAM";

View File

@@ -219,10 +219,10 @@ static inline bool amdgpu_bo_in_cpu_visible_vram(struct amdgpu_bo *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_res_cursor cursor;
if (bo->tbo.mem.mem_type != TTM_PL_VRAM)
if (bo->tbo.resource->mem_type != TTM_PL_VRAM)
return false;
amdgpu_res_first(&bo->tbo.mem, 0, amdgpu_bo_size(bo), &cursor);
amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &cursor);
while (cursor.remaining) {
if (cursor.start < adev->gmc.visible_vram_size)
return true;

View File

@@ -127,8 +127,8 @@ TRACE_EVENT(amdgpu_bo_create,
TP_fast_assign(
__entry->bo = bo;
__entry->pages = bo->tbo.mem.num_pages;
__entry->type = bo->tbo.mem.mem_type;
__entry->pages = bo->tbo.resource->num_pages;
__entry->type = bo->tbo.resource->mem_type;
__entry->prefer = bo->preferred_domains;
__entry->allow = bo->allowed_domains;
__entry->visible = bo->flags;

View File

@@ -125,7 +125,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
rcu_read_unlock();
return;
}
switch (bo->mem.mem_type) {
switch (bo->resource->mem_type) {
case AMDGPU_PL_GDS:
case AMDGPU_PL_GWS:
case AMDGPU_PL_OA:
@@ -458,7 +459,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
{
struct amdgpu_device *adev;
struct amdgpu_bo *abo;
struct ttm_resource *old_mem = &bo->mem;
struct ttm_resource *old_mem = bo->resource;
int r;
if (new_mem->mem_type == TTM_PL_TT) {
@@ -490,7 +491,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
return r;
amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
ttm_resource_free(bo, &bo->mem);
ttm_resource_free(bo, bo->resource);
ttm_bo_assign_mem(bo, new_mem);
goto out;
}
@@ -599,7 +600,8 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_res_cursor cursor;
amdgpu_res_first(&bo->mem, (u64)page_offset << PAGE_SHIFT, 0, &cursor);
amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
&cursor);
return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
}
@@ -954,12 +956,12 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
uint64_t addr, flags;
int r;
if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
return 0;
addr = amdgpu_gmc_agp_addr(bo);
if (addr != AMDGPU_BO_INVALID_OFFSET) {
bo->mem.start = addr >> PAGE_SHIFT;
bo->resource->start = addr >> PAGE_SHIFT;
} else {
/* allocate GART space */
@@ -970,7 +972,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
placements.fpfn = 0;
placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
placements.mem_type = TTM_PL_TT;
placements.flags = bo->mem.placement;
placements.flags = bo->resource->placement;
r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
if (unlikely(r))
@@ -987,8 +989,8 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
return r;
}
ttm_resource_free(bo, &bo->mem);
bo->mem = tmp;
ttm_resource_free(bo, bo->resource);
ttm_bo_assign_mem(bo, &tmp);
}
return 0;
@@ -1009,7 +1011,7 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
if (!tbo->ttm)
return 0;
flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
r = amdgpu_ttm_gart_bind(adev, tbo, flags);
return r;
@@ -1322,7 +1324,7 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
unsigned long num_pages = bo->mem.num_pages;
unsigned long num_pages = bo->resource->num_pages;
struct amdgpu_res_cursor cursor;
struct dma_resv_list *flist;
struct dma_fence *f;
@@ -1346,7 +1348,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
}
}
switch (bo->mem.mem_type) {
switch (bo->resource->mem_type) {
case TTM_PL_TT:
if (amdgpu_bo_is_amdgpu_bo(bo) &&
amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
@@ -1355,7 +1357,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
case TTM_PL_VRAM:
/* Check each drm MM node individually */
amdgpu_res_first(&bo->mem, 0, (u64)num_pages << PAGE_SHIFT,
amdgpu_res_first(bo->resource, 0, (u64)num_pages << PAGE_SHIFT,
&cursor);
while (cursor.remaining) {
if (place->fpfn < PFN_DOWN(cursor.start + cursor.size)
@@ -1397,10 +1399,10 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
uint32_t value = 0;
int ret = 0;
if (bo->mem.mem_type != TTM_PL_VRAM)
if (bo->resource->mem_type != TTM_PL_VRAM)
return -EIO;
amdgpu_res_first(&bo->mem, offset, len, &cursor);
amdgpu_res_first(bo->resource, offset, len, &cursor);
while (cursor.remaining) {
uint64_t aligned_pos = cursor.start & ~(uint64_t)3;
uint64_t bytes = 4 - (cursor.start & 3);
@@ -1917,16 +1919,16 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
return -EINVAL;
}
if (bo->tbo.mem.mem_type == TTM_PL_TT) {
if (bo->tbo.resource->mem_type == TTM_PL_TT) {
r = amdgpu_ttm_alloc_gart(&bo->tbo);
if (r)
return r;
}
num_bytes = bo->tbo.mem.num_pages << PAGE_SHIFT;
num_bytes = bo->tbo.resource->num_pages << PAGE_SHIFT;
num_loops = 0;
amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor);
amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
while (cursor.remaining) {
num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes);
amdgpu_res_next(&cursor, cursor.size);
@@ -1951,12 +1953,13 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
}
}
amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor);
amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
while (cursor.remaining) {
uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes);
uint64_t dst_addr = cursor.start;
dst_addr += amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
dst_addr += amdgpu_ttm_domain_start(adev,
bo->tbo.resource->mem_type);
amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
cur_size);

View File

@@ -342,7 +342,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
amdgpu_vm_bo_idle(base);
if (bo->preferred_domains &
amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type))
return;
/*
@@ -657,11 +657,11 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
if (!bo->parent)
continue;
ttm_bo_move_to_lru_tail(&bo->tbo, &bo->tbo.mem,
ttm_bo_move_to_lru_tail(&bo->tbo, bo->tbo.resource,
&vm->lru_bulk_move);
if (bo->shadow)
ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
&bo->shadow->tbo.mem,
bo->shadow->tbo.resource,
&vm->lru_bulk_move);
}
spin_unlock(&adev->mman.bdev.lru_lock);
@@ -1818,10 +1818,10 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
struct drm_gem_object *gobj = dma_buf->priv;
struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj);
if (abo->tbo.mem.mem_type == TTM_PL_VRAM)
if (abo->tbo.resource->mem_type == TTM_PL_VRAM)
bo = gem_to_amdgpu_bo(gobj);
}
mem = &bo->tbo.mem;
mem = bo->tbo.resource;
if (mem->mem_type == TTM_PL_TT)
pages_addr = bo->tbo.ttm->dma_address;
}
@@ -1881,7 +1881,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
* next command submission.
*/
if (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) {
uint32_t mem_type = bo->tbo.mem.mem_type;
uint32_t mem_type = bo->tbo.resource->mem_type;
if (!(bo->preferred_domains &
amdgpu_mem_type_to_domain(mem_type)))

View File

@@ -217,7 +217,7 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct ttm_resource *mem = &bo->tbo.mem;
struct ttm_resource *mem = bo->tbo.resource;
struct drm_mm_node *nodes = mem->mm_node;
unsigned pages = mem->num_pages;
u64 usage;

View File

@@ -409,7 +409,7 @@ svm_range_validate_svm_bo(struct amdgpu_device *adev, struct svm_range *prange)
pr_debug("reuse old bo svms 0x%p [0x%lx 0x%lx]\n",
prange->svms, prange->start, prange->last);
prange->ttm_res = &prange->svm_bo->bo->tbo.mem;
prange->ttm_res = prange->svm_bo->bo->tbo.resource;
return true;
}
@@ -515,7 +515,7 @@ svm_range_vram_node_new(struct amdgpu_device *adev, struct svm_range *prange,
svm_bo->bo = bo;
prange->svm_bo = svm_bo;
prange->ttm_res = &bo->tbo.mem;
prange->ttm_res = bo->tbo.resource;
prange->offset = 0;
spin_lock(&svm_bo->list_lock);

View File

@@ -40,12 +40,12 @@ void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent,
const struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
drm_printf_indent(p, indent, "placement=");
drm_print_bits(p, bo->mem.placement, plname, ARRAY_SIZE(plname));
drm_print_bits(p, bo->resource->placement, plname, ARRAY_SIZE(plname));
drm_printf(p, "\n");
if (bo->mem.bus.is_iomem)
if (bo->resource->bus.is_iomem)
drm_printf_indent(p, indent, "bus.offset=%lx\n",
(unsigned long)bo->mem.bus.offset);
(unsigned long)bo->resource->bus.offset);
}
EXPORT_SYMBOL(drm_gem_ttm_print_info);

View File

@@ -248,10 +248,10 @@ EXPORT_SYMBOL(drm_gem_vram_put);
static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo)
{
/* Keep TTM behavior for now, remove when drivers are audited */
if (WARN_ON_ONCE(!gbo->bo.mem.mm_node))
if (WARN_ON_ONCE(!gbo->bo.resource->mm_node))
return 0;
return gbo->bo.mem.start;
return gbo->bo.resource->start;
}
/**

View File

@@ -312,7 +312,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
NOUVEAU_GEM_DOMAIN_GART;
else
if (chan->chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM)
if (chan->chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM)
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
else
init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;

View File

@@ -433,7 +433,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
if (nvbo->bo.pin_count) {
bool error = evict;
switch (bo->mem.mem_type) {
switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
error |= !(domain & NOUVEAU_GEM_DOMAIN_VRAM);
break;
@@ -446,7 +446,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
if (error) {
NV_ERROR(drm, "bo %p pinned elsewhere: "
"0x%08x vs 0x%08x\n", bo,
bo->mem.mem_type, domain);
bo->resource->mem_type, domain);
ret = -EBUSY;
}
ttm_bo_pin(&nvbo->bo);
@@ -467,7 +467,7 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t domain, bool contig)
ttm_bo_pin(&nvbo->bo);
switch (bo->mem.mem_type) {
switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
drm->gem.vram_available -= bo->base.size;
break;
@@ -498,7 +498,7 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo)
ttm_bo_unpin(&nvbo->bo);
if (!nvbo->bo.pin_count) {
switch (bo->mem.mem_type) {
switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
drm->gem.vram_available += bo->base.size;
break;
@@ -523,7 +523,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
if (ret)
return ret;
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, &nvbo->kmap);
ttm_bo_unreserve(&nvbo->bo);
return ret;
@@ -737,7 +737,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
{
struct nouveau_bo *nvbo = nouveau_bo(bo);
switch (bo->mem.mem_type) {
switch (bo->resource->mem_type) {
case TTM_PL_VRAM:
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART,
NOUVEAU_GEM_DOMAIN_CPU);
@@ -754,7 +754,7 @@ static int
nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
struct ttm_resource *reg)
{
struct nouveau_mem *old_mem = nouveau_mem(&bo->mem);
struct nouveau_mem *old_mem = nouveau_mem(bo->resource);
struct nouveau_mem *new_mem = nouveau_mem(reg);
struct nvif_vmm *vmm = &drm->client.vmm.vmm;
int ret;
@@ -809,7 +809,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict,
mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, ctx->interruptible);
if (ret == 0) {
ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
ret = drm->ttm.move(chan, bo, bo->resource, new_reg);
if (ret == 0) {
ret = nouveau_fence_new(chan, false, &fence);
if (ret == 0) {
@@ -969,7 +969,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
{
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct ttm_resource *old_reg = &bo->mem;
struct ttm_resource *old_reg = bo->resource;
struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
@@ -1009,7 +1009,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict,
if (old_reg->mem_type == TTM_PL_TT &&
new_reg->mem_type == TTM_PL_SYSTEM) {
nouveau_ttm_tt_unbind(bo->bdev, bo->ttm);
ttm_resource_free(bo, &bo->mem);
ttm_resource_free(bo, bo->resource);
ttm_bo_assign_mem(bo, new_reg);
goto out;
}
@@ -1045,7 +1045,7 @@ out:
}
out_ntfy:
if (ret) {
nouveau_bo_move_ntfy(bo, &bo->mem);
nouveau_bo_move_ntfy(bo, bo->resource);
}
return ret;
}
@@ -1170,7 +1170,7 @@ out:
list_del_init(&nvbo->io_reserve_lru);
drm_vma_node_unmap(&nvbo->bo.base.vma_node,
bdev->dev_mapping);
nouveau_ttm_io_mem_free_locked(drm, &nvbo->bo.mem);
nouveau_ttm_io_mem_free_locked(drm, nvbo->bo.resource);
goto retry;
}
@@ -1200,12 +1200,12 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
/* as long as the bo isn't in vram, and isn't tiled, we've got
* nothing to do here.
*/
if (bo->mem.mem_type != TTM_PL_VRAM) {
if (bo->resource->mem_type != TTM_PL_VRAM) {
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
!nvbo->kind)
return 0;
if (bo->mem.mem_type != TTM_PL_SYSTEM)
if (bo->resource->mem_type != TTM_PL_SYSTEM)
return 0;
nouveau_bo_placement_set(nvbo, NOUVEAU_GEM_DOMAIN_GART, 0);
@@ -1213,7 +1213,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
} else {
/* make sure bo is in mappable vram */
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
bo->mem.start + bo->mem.num_pages < mappable)
bo->resource->start + bo->resource->num_pages < mappable)
return 0;
for (i = 0; i < nvbo->placement.num_placement; ++i) {

View File

@@ -212,7 +212,7 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
args.start = 0;
args.limit = chan->vmm->vmm.limit - 1;
} else
if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) {
if (chan->push.buffer->bo.resource->mem_type == TTM_PL_VRAM) {
if (device->info.family == NV_DEVICE_INFO_V0_TNT) {
/* nv04 vram pushbuf hack, retarget to its location in
* the framebuffer bar rather than direct vram access..

View File

@@ -378,7 +378,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_IMAGEBLIT;
info->fbops = &nouveau_fbcon_sw_ops;
info->fix.smem_start = nvbo->bo.mem.bus.offset;
info->fix.smem_start = nvbo->bo.resource->bus.offset;
info->fix.smem_len = nvbo->bo.base.size;
info->screen_base = nvbo_kmap_obj_iovirtual(nvbo);

View File

@@ -276,7 +276,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
if (is_power_of_2(nvbo->valid_domains))
rep->domain = nvbo->valid_domains;
else if (nvbo->bo.mem.mem_type == TTM_PL_TT)
else if (nvbo->bo.resource->mem_type == TTM_PL_TT)
rep->domain = NOUVEAU_GEM_DOMAIN_GART;
else
rep->domain = NOUVEAU_GEM_DOMAIN_VRAM;
@@ -347,11 +347,11 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
valid_domains &= ~(NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART);
if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) &&
bo->mem.mem_type == TTM_PL_VRAM)
bo->resource->mem_type == TTM_PL_VRAM)
pref_domains |= NOUVEAU_GEM_DOMAIN_VRAM;
else if ((domains & NOUVEAU_GEM_DOMAIN_GART) &&
bo->mem.mem_type == TTM_PL_TT)
bo->resource->mem_type == TTM_PL_TT)
pref_domains |= NOUVEAU_GEM_DOMAIN_GART;
else if (domains & NOUVEAU_GEM_DOMAIN_VRAM)
@@ -561,13 +561,13 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
if (nvbo->offset == b->presumed.offset &&
((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
((nvbo->bo.resource->mem_type == TTM_PL_VRAM &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
(nvbo->bo.mem.mem_type == TTM_PL_TT &&
(nvbo->bo.resource->mem_type == TTM_PL_TT &&
b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART)))
continue;
if (nvbo->bo.mem.mem_type == TTM_PL_TT)
if (nvbo->bo.resource->mem_type == TTM_PL_TT)
b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART;
else
b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM;
@@ -681,7 +681,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
}
if (!nvbo->kmap.virtual) {
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages,
ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages,
&nvbo->kmap);
if (ret) {
NV_PRINTK(err, cli, "failed kmap for reloc\n");
@@ -870,7 +870,7 @@ revalidate:
if (unlikely(cmd != req->suffix0)) {
if (!nvbo->kmap.virtual) {
ret = ttm_bo_kmap(&nvbo->bo, 0,
nvbo->bo.mem.
nvbo->bo.resource->
num_pages,
&nvbo->kmap);
if (ret) {

View File

@@ -77,7 +77,7 @@ int
nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
struct nouveau_vma **pvma)
{
struct nouveau_mem *mem = nouveau_mem(&nvbo->bo.mem);
struct nouveau_mem *mem = nouveau_mem(nvbo->bo.resource);
struct nouveau_vma *vma;
struct nvif_vma tmp;
int ret;
@@ -96,7 +96,7 @@ nouveau_vma_new(struct nouveau_bo *nvbo, struct nouveau_vmm *vmm,
vma->fence = NULL;
list_add_tail(&vma->head, &nvbo->vma_list);
if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
if (nvbo->bo.resource->mem_type != TTM_PL_SYSTEM &&
mem->mem.page == nvbo->page) {
ret = nvif_vmm_get(&vmm->vmm, LAZY, false, mem->mem.page, 0,
mem->mem.size, &tmp);

Some files were not shown because too many files have changed in this diff Show More