drm/ttm: device naming cleanup

Rename ttm_bo_device to ttm_device.
Rename ttm_bo_driver to ttm_device_funcs.
Rename ttm_bo_global to ttm_global.

Move global and device related functions to ttm_device.[ch].

No functional change.

Signed-off-by: Christian König <christian.koenig@amd.com>
Acked-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: https://patchwork.freedesktop.org/patch/415222/
This commit is contained in:
Christian König
2020-10-01 14:51:40 +02:00
parent b99c2c9541
commit 8af8a109b3
41 changed files with 759 additions and 715 deletions

View File

@@ -1053,7 +1053,7 @@ static inline struct drm_device *adev_to_drm(struct amdgpu_device *adev)
return &adev->ddev;
}
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_bo_device *bdev)
static inline struct amdgpu_device *amdgpu_ttm_adev(struct ttm_device *bdev)
{
return container_of(bdev, struct amdgpu_device, mman.bdev);
}

View File

@@ -40,13 +40,13 @@ static atomic_t fence_seq = ATOMIC_INIT(0);
* All the BOs in a process share an eviction fence. When process X wants
* to map VRAM memory but TTM can't find enough space, TTM will attempt to
* evict BOs from its LRU list. TTM checks if the BO is valuable to evict
* by calling ttm_bo_driver->eviction_valuable().
* by calling ttm_device_funcs->eviction_valuable().
*
* ttm_bo_driver->eviction_valuable() - will return false if the BO belongs
* ttm_device_funcs->eviction_valuable() - will return false if the BO belongs
* to process X. Otherwise, it will return true to indicate BO can be
* evicted by TTM.
*
* If ttm_bo_driver->eviction_valuable returns true, then TTM will continue
* If ttm_device_funcs->eviction_valuable returns true, then TTM will continue
* the evcition process for that BO by calling ttm_bo_evict --> amdgpu_bo_move
* --> amdgpu_copy_buffer(). This sets up job in GPU scheduler.
*

View File

@@ -71,7 +71,7 @@
*/
static int amdgpu_gart_dummy_page_init(struct amdgpu_device *adev)
{
struct page *dummy_page = ttm_bo_glob.dummy_read_page;
struct page *dummy_page = ttm_glob.dummy_read_page;
if (adev->dummy_page_addr)
return 0;

View File

@@ -61,10 +61,10 @@
#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem);
static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm);
static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
@@ -646,7 +646,7 @@ out:
*
* Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
*/
static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct drm_mm_node *mm_node = mem->mm_node;
@@ -893,7 +893,7 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
*
* Called by amdgpu_ttm_backend_bind()
**/
static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
@@ -931,7 +931,7 @@ release_sg:
/*
* amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
*/
static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev,
static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
@@ -1015,7 +1015,7 @@ gart_bind_fail:
* Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
* This handles binding GTT memory to the device address space.
*/
static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem)
{
@@ -1155,7 +1155,7 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
* Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
* ttm_tt_destroy().
*/
static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
@@ -1180,7 +1180,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
gtt->bound = false;
}
static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -1234,7 +1234,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
* Map the pages of a ttm_tt object to an address space visible
* to the underlying device.
*/
static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx)
{
@@ -1278,7 +1278,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
* Unmaps pages of a ttm_tt object from the device address space and
* unpopulates the page array backing it.
*/
static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -1603,7 +1603,7 @@ amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
amdgpu_bo_move_notify(bo, false, NULL);
}
static struct ttm_bo_driver amdgpu_bo_driver = {
static struct ttm_device_funcs amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
@@ -1785,7 +1785,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
mutex_init(&adev->mman.gtt_window_lock);
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
adev_to_drm(adev)->anon_inode->i_mapping,
adev_to_drm(adev)->vma_offset_manager,
adev->need_swiotlb,
@@ -1926,7 +1926,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
ttm_bo_device_release(&adev->mman.bdev);
ttm_device_fini(&adev->mman.bdev);
adev->mman.initialized = false;
DRM_INFO("amdgpu: ttm finalized\n");
}

View File

@@ -60,7 +60,7 @@ struct amdgpu_gtt_mgr {
};
struct amdgpu_mman {
struct ttm_bo_device bdev;
struct ttm_device bdev;
bool initialized;
void __iomem *aper_base_kaddr;

View File

@@ -638,15 +638,15 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm_bo_base *bo_base;
if (vm->bulk_moveable) {
spin_lock(&ttm_bo_glob.lru_lock);
spin_lock(&ttm_glob.lru_lock);
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
spin_unlock(&ttm_bo_glob.lru_lock);
spin_unlock(&ttm_glob.lru_lock);
return;
}
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
spin_lock(&ttm_bo_glob.lru_lock);
spin_lock(&ttm_glob.lru_lock);
list_for_each_entry(bo_base, &vm->idle, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
@@ -660,7 +660,7 @@ void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
&bo->shadow->tbo.mem,
&vm->lru_bulk_move);
}
spin_unlock(&ttm_bo_glob.lru_lock);
spin_unlock(&ttm_glob.lru_lock);
vm->bulk_moveable = true;
}

View File

@@ -187,7 +187,7 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
struct drm_gem_vram_object *gbo;
struct drm_gem_object *gem;
struct drm_vram_mm *vmm = dev->vram_mm;
struct ttm_bo_device *bdev;
struct ttm_device *bdev;
int ret;
size_t acc_size;
@@ -551,7 +551,7 @@ err_drm_gem_object_put:
EXPORT_SYMBOL(drm_gem_vram_fill_create_dumb);
/*
* Helpers for struct ttm_bo_driver
* Helpers for struct ttm_device_funcs
*/
static bool drm_is_gem_vram(struct ttm_buffer_object *bo)
@@ -893,7 +893,7 @@ static const struct drm_gem_object_funcs drm_gem_vram_object_funcs = {
* TTM TT
*/
static void bo_driver_ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *tt)
static void bo_driver_ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *tt)
{
ttm_tt_destroy_common(bdev, tt);
ttm_tt_fini(tt);
@@ -965,7 +965,7 @@ static int bo_driver_move(struct ttm_buffer_object *bo,
return drm_gem_vram_bo_driver_move(gbo, evict, ctx, new_mem);
}
static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
static int bo_driver_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
struct drm_vram_mm *vmm = drm_vram_mm_of_bdev(bdev);
@@ -985,7 +985,7 @@ static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev,
return 0;
}
static struct ttm_bo_driver bo_driver = {
static struct ttm_device_funcs bo_driver = {
.ttm_tt_create = bo_driver_ttm_tt_create,
.ttm_tt_destroy = bo_driver_ttm_tt_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
@@ -1036,7 +1036,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
vmm->vram_base = vram_base;
vmm->vram_size = vram_size;
ret = ttm_bo_device_init(&vmm->bdev, &bo_driver, dev->dev,
ret = ttm_device_init(&vmm->bdev, &bo_driver, dev->dev,
dev->anon_inode->i_mapping,
dev->vma_offset_manager,
false, true);
@@ -1054,7 +1054,7 @@ static int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
static void drm_vram_mm_cleanup(struct drm_vram_mm *vmm)
{
ttm_range_man_fini(&vmm->bdev, TTM_PL_VRAM);
ttm_bo_device_release(&vmm->bdev);
ttm_device_fini(&vmm->bdev);
}
/*

View File

@@ -43,9 +43,9 @@
#include <nvif/if500b.h>
#include <nvif/if900b.h>
static int nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
static int nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm,
struct ttm_resource *reg);
static void nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
static void nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm);
/*
* NV10-NV40 tiling helpers
@@ -674,7 +674,7 @@ nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
}
static int
nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
nouveau_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm,
struct ttm_resource *reg)
{
#if IS_ENABLED(CONFIG_AGP)
@@ -690,7 +690,7 @@ nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm,
}
static void
nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
nouveau_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm)
{
#if IS_ENABLED(CONFIG_AGP)
struct nouveau_drm *drm = nouveau_bdev(bdev);
@@ -1055,7 +1055,7 @@ nouveau_ttm_io_mem_free_locked(struct nouveau_drm *drm,
}
static int
nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg)
nouveau_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *reg)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nvkm_device *device = nvxx_device(&drm->client.device);
@@ -1163,7 +1163,7 @@ out:
}
static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_resource *reg)
nouveau_ttm_io_mem_free(struct ttm_device *bdev, struct ttm_resource *reg)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
@@ -1223,7 +1223,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
}
static int
nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
nouveau_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm, struct ttm_operation_ctx *ctx)
{
struct ttm_tt *ttm_dma = (void *)ttm;
@@ -1247,7 +1247,7 @@ nouveau_ttm_tt_populate(struct ttm_bo_device *bdev,
}
static void
nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
nouveau_ttm_tt_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct nouveau_drm *drm;
@@ -1264,7 +1264,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
}
static void
nouveau_ttm_tt_destroy(struct ttm_bo_device *bdev,
nouveau_ttm_tt_destroy(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
#if IS_ENABLED(CONFIG_AGP)
@@ -1296,7 +1296,7 @@ nouveau_bo_delete_mem_notify(struct ttm_buffer_object *bo)
nouveau_bo_move_ntfy(bo, false, NULL);
}
struct ttm_bo_driver nouveau_bo_driver = {
struct ttm_device_funcs nouveau_bo_driver = {
.ttm_tt_create = &nouveau_ttm_tt_create,
.ttm_tt_populate = &nouveau_ttm_tt_populate,
.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,

View File

@@ -68,7 +68,7 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
return 0;
}
extern struct ttm_bo_driver nouveau_bo_driver;
extern struct ttm_device_funcs nouveau_bo_driver;
void nouveau_bo_move_init(struct nouveau_drm *);
struct nouveau_bo *nouveau_bo_alloc(struct nouveau_cli *, u64 *size, int *align,

View File

@@ -151,7 +151,7 @@ struct nouveau_drm {
/* TTM interface support */
struct {
struct ttm_bo_device bdev;
struct ttm_device bdev;
atomic_t validate_sequence;
int (*move)(struct nouveau_channel *,
struct ttm_buffer_object *,

View File

@@ -16,7 +16,7 @@ struct nouveau_sgdma_be {
};
void
nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
@@ -29,7 +29,7 @@ nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
}
int
nouveau_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg)
nouveau_sgdma_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nouveau_drm *drm = nouveau_bdev(bdev);
@@ -56,7 +56,7 @@ nouveau_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_re
}
void
nouveau_sgdma_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
nouveau_sgdma_unbind(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
if (nvbe->mem) {

View File

@@ -324,10 +324,10 @@ nouveau_ttm_init(struct nouveau_drm *drm)
need_swiotlb = !!swiotlb_nr_tbl();
#endif
ret = ttm_bo_device_init(&drm->ttm.bdev, &nouveau_bo_driver,
drm->dev->dev, dev->anon_inode->i_mapping,
dev->vma_offset_manager, need_swiotlb,
drm->client.mmu.dmabits <= 32);
ret = ttm_device_init(&drm->ttm.bdev, &nouveau_bo_driver, drm->dev->dev,
dev->anon_inode->i_mapping,
dev->vma_offset_manager, need_swiotlb,
drm->client.mmu.dmabits <= 32);
if (ret) {
NV_ERROR(drm, "error initialising bo driver, %d\n", ret);
return ret;
@@ -377,7 +377,7 @@ nouveau_ttm_fini(struct nouveau_drm *drm)
nouveau_ttm_fini_vram(drm);
nouveau_ttm_fini_gtt(drm);
ttm_bo_device_release(&drm->ttm.bdev);
ttm_device_fini(&drm->ttm.bdev);
arch_phys_wc_del(drm->ttm.mtrr);
drm->ttm.mtrr = 0;

View File

@@ -3,7 +3,7 @@
#define __NOUVEAU_TTM_H__
static inline struct nouveau_drm *
nouveau_bdev(struct ttm_bo_device *bd)
nouveau_bdev(struct ttm_device *bd)
{
return container_of(bd, struct nouveau_drm, ttm.bdev);
}
@@ -22,7 +22,7 @@ int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
int nouveau_ttm_global_init(struct nouveau_drm *);
void nouveau_ttm_global_release(struct nouveau_drm *);
int nouveau_sgdma_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg);
void nouveau_sgdma_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
void nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
int nouveau_sgdma_bind(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg);
void nouveau_sgdma_unbind(struct ttm_device *bdev, struct ttm_tt *ttm);
void nouveau_sgdma_destroy(struct ttm_device *bdev, struct ttm_tt *ttm);
#endif

View File

@@ -125,7 +125,7 @@ struct qxl_output {
#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc)
struct qxl_mman {
struct ttm_bo_device bdev;
struct ttm_device bdev;
};
struct qxl_memslot {
@@ -335,7 +335,7 @@ int qxl_mode_dumb_mmap(struct drm_file *filp,
/* qxl ttm */
int qxl_ttm_init(struct qxl_device *qdev);
void qxl_ttm_fini(struct qxl_device *qdev);
int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
int qxl_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem);
/* qxl image */

View File

@@ -429,7 +429,7 @@ void qxl_release_unmap(struct qxl_device *qdev,
void qxl_release_fence_buffer_objects(struct qxl_release *release)
{
struct ttm_buffer_object *bo;
struct ttm_bo_device *bdev;
struct ttm_device *bdev;
struct ttm_validate_buffer *entry;
struct qxl_device *qdev;
@@ -450,7 +450,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
release->id | 0xf0000000, release->base.seqno);
trace_dma_fence_emit(&release->base);
spin_lock(&ttm_bo_glob.lru_lock);
spin_lock(&ttm_glob.lru_lock);
list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo;
@@ -459,7 +459,7 @@ void qxl_release_fence_buffer_objects(struct qxl_release *release)
ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
dma_resv_unlock(bo->base.resv);
}
spin_unlock(&ttm_bo_glob.lru_lock);
spin_unlock(&ttm_glob.lru_lock);
ww_acquire_fini(&release->ticket);
}

View File

@@ -36,7 +36,7 @@
#include "qxl_drv.h"
#include "qxl_object.h"
static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
static struct qxl_device *qxl_get_qdev(struct ttm_device *bdev)
{
struct qxl_mman *mman;
struct qxl_device *qdev;
@@ -69,7 +69,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo,
*placement = qbo->placement;
}
int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
int qxl_ttm_io_mem_reserve(struct ttm_device *bdev,
struct ttm_resource *mem)
{
struct qxl_device *qdev = qxl_get_qdev(bdev);
@@ -98,8 +98,7 @@ int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
/*
* TTM backend functions.
*/
static void qxl_ttm_backend_destroy(struct ttm_bo_device *bdev,
struct ttm_tt *ttm)
static void qxl_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
ttm_tt_destroy_common(bdev, ttm);
ttm_tt_fini(ttm);
@@ -170,7 +169,7 @@ static void qxl_bo_delete_mem_notify(struct ttm_buffer_object *bo)
qxl_bo_move_notify(bo, false, NULL);
}
static struct ttm_bo_driver qxl_bo_driver = {
static struct ttm_device_funcs qxl_bo_driver = {
.ttm_tt_create = &qxl_ttm_tt_create,
.ttm_tt_destroy = &qxl_ttm_backend_destroy,
.eviction_valuable = ttm_bo_eviction_valuable,
@@ -193,10 +192,10 @@ int qxl_ttm_init(struct qxl_device *qdev)
int num_io_pages; /* != rom->num_io_pages, we include surface0 */
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL,
qdev->ddev.anon_inode->i_mapping,
qdev->ddev.vma_offset_manager,
false, false);
r = ttm_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL,
qdev->ddev.anon_inode->i_mapping,
qdev->ddev.vma_offset_manager,
false, false);
if (r) {
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
return r;
@@ -227,7 +226,7 @@ void qxl_ttm_fini(struct qxl_device *qdev)
{
ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_VRAM);
ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_PRIV);
ttm_bo_device_release(&qdev->mman.bdev);
ttm_device_fini(&qdev->mman.bdev);
DRM_INFO("qxl: ttm finalized\n");
}

View File

@@ -451,7 +451,7 @@ struct radeon_surface_reg {
* TTM.
*/
struct radeon_mman {
struct ttm_bo_device bdev;
struct ttm_device bdev;
bool initialized;
#if defined(CONFIG_DEBUG_FS)
@@ -2822,7 +2822,7 @@ extern int radeon_ttm_tt_set_userptr(struct radeon_device *rdev,
uint32_t flags);
extern bool radeon_ttm_tt_has_userptr(struct radeon_device *rdev, struct ttm_tt *ttm);
extern bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev, struct ttm_tt *ttm);
bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
bool radeon_ttm_tt_is_bound(struct ttm_device *bdev, struct ttm_tt *ttm);
extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base);
extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc);
extern int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
@@ -2832,7 +2832,7 @@ extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size
extern void radeon_program_register_sequence(struct radeon_device *rdev,
const u32 *registers,
const u32 array_size);
struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev);
struct radeon_device *radeon_get_rdev(struct ttm_device *bdev);
/* KMS */

View File

@@ -372,7 +372,7 @@ void radeon_bo_unpin(struct radeon_bo *bo)
int radeon_bo_evict_vram(struct radeon_device *rdev)
{
struct ttm_bo_device *bdev = &rdev->mman.bdev;
struct ttm_device *bdev = &rdev->mman.bdev;
struct ttm_resource_manager *man;
/* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */

View File

@@ -55,13 +55,11 @@
static int radeon_ttm_debugfs_init(struct radeon_device *rdev);
static void radeon_ttm_debugfs_fini(struct radeon_device *rdev);
static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm,
static int radeon_ttm_tt_bind(struct ttm_device *bdev, struct ttm_tt *ttm,
struct ttm_resource *bo_mem);
static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev,
struct ttm_tt *ttm);
static void radeon_ttm_tt_unbind(struct ttm_device *bdev, struct ttm_tt *ttm);
struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
struct radeon_device *radeon_get_rdev(struct ttm_device *bdev)
{
struct radeon_mman *mman;
struct radeon_device *rdev;
@@ -280,7 +278,7 @@ out:
return 0;
}
static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
static int radeon_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem)
{
struct radeon_device *rdev = radeon_get_rdev(bdev);
size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
@@ -347,7 +345,7 @@ struct radeon_ttm_tt {
};
/* prepare the sg table with the user pages */
static int radeon_ttm_tt_pin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
static int radeon_ttm_tt_pin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct radeon_device *rdev = radeon_get_rdev(bdev);
struct radeon_ttm_tt *gtt = (void *)ttm;
@@ -408,7 +406,7 @@ release_pages:
return r;
}
static void radeon_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
static void radeon_ttm_tt_unpin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct radeon_device *rdev = radeon_get_rdev(bdev);
struct radeon_ttm_tt *gtt = (void *)ttm;
@@ -444,7 +442,7 @@ static bool radeon_ttm_backend_is_bound(struct ttm_tt *ttm)
return (gtt->bound);
}
static int radeon_ttm_backend_bind(struct ttm_bo_device *bdev,
static int radeon_ttm_backend_bind(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem)
{
@@ -480,7 +478,7 @@ static int radeon_ttm_backend_bind(struct ttm_bo_device *bdev,
return 0;
}
static void radeon_ttm_backend_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
static void radeon_ttm_backend_unbind(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct radeon_ttm_tt *gtt = (void *)ttm;
struct radeon_device *rdev = radeon_get_rdev(bdev);
@@ -495,7 +493,7 @@ static void radeon_ttm_backend_unbind(struct ttm_bo_device *bdev, struct ttm_tt
gtt->bound = false;
}
static void radeon_ttm_backend_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
static void radeon_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct radeon_ttm_tt *gtt = (void *)ttm;
@@ -554,7 +552,7 @@ static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev,
return container_of(ttm, struct radeon_ttm_tt, ttm);
}
static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
static int radeon_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx)
{
@@ -580,7 +578,7 @@ static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev,
return ttm_pool_alloc(&rdev->mman.bdev.pool, ttm, ctx);
}
static void radeon_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm)
static void radeon_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
{
struct radeon_device *rdev = radeon_get_rdev(bdev);
struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(rdev, ttm);
@@ -613,7 +611,7 @@ int radeon_ttm_tt_set_userptr(struct radeon_device *rdev,
return 0;
}
bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev,
bool radeon_ttm_tt_is_bound(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
#if IS_ENABLED(CONFIG_AGP)
@@ -624,7 +622,7 @@ bool radeon_ttm_tt_is_bound(struct ttm_bo_device *bdev,
return radeon_ttm_backend_is_bound(ttm);
}
static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev,
static int radeon_ttm_tt_bind(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem)
{
@@ -642,7 +640,7 @@ static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev,
return radeon_ttm_backend_bind(bdev, ttm, bo_mem);
}
static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev,
static void radeon_ttm_tt_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
#if IS_ENABLED(CONFIG_AGP)
@@ -656,7 +654,7 @@ static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev,
radeon_ttm_backend_unbind(bdev, ttm);
}
static void radeon_ttm_tt_destroy(struct ttm_bo_device *bdev,
static void radeon_ttm_tt_destroy(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
#if IS_ENABLED(CONFIG_AGP)
@@ -700,7 +698,7 @@ radeon_bo_delete_mem_notify(struct ttm_buffer_object *bo)
radeon_bo_move_notify(bo, false, NULL);
}
static struct ttm_bo_driver radeon_bo_driver = {
static struct ttm_device_funcs radeon_bo_driver = {
.ttm_tt_create = &radeon_ttm_tt_create,
.ttm_tt_populate = &radeon_ttm_tt_populate,
.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
@@ -718,7 +716,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
int r;
/* No others user of address space so set it to 0 */
r = ttm_bo_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev,
r = ttm_device_init(&rdev->mman.bdev, &radeon_bo_driver, rdev->dev,
rdev->ddev->anon_inode->i_mapping,
rdev->ddev->vma_offset_manager,
rdev->need_swiotlb,
@@ -791,7 +789,7 @@ void radeon_ttm_fini(struct radeon_device *rdev)
}
ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_VRAM);
ttm_range_man_fini(&rdev->mman.bdev, TTM_PL_TT);
ttm_bo_device_release(&rdev->mman.bdev);
ttm_device_fini(&rdev->mman.bdev);
radeon_gart_fini(rdev);
rdev->mman.initialized = false;
DRM_INFO("radeon: ttm finalized\n");

View File

@@ -5,7 +5,7 @@
ttm-y := ttm_memory.o ttm_tt.o ttm_bo.o \
ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
ttm_execbuf_util.o ttm_range_manager.o \
ttm_resource.o ttm_pool.o
ttm_resource.o ttm_pool.o ttm_device.o
ttm-$(CONFIG_AGP) += ttm_agp_backend.o
obj-$(CONFIG_DRM_TTM) += ttm.o

Some files were not shown because too many files have changed in this diff Show More