You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (45 commits) drm/nv04: Fix set_operation software method. drm/nouveau: initialise DMA tracking parameters earlier drm/nouveau: use dma.max rather than pushbuf size for checking GET validity drm/nv04: differentiate between nv04/nv05 drm/nouveau: Fix null deref in nouveau_fence_emit due to deleted fence drm/nv50: prevent a possible ctxprog hang drm/nouveau: have ttm's fault handler called directly drm/nv50: restore correct cache1 get/put address on fifoctx load drm/nouveau: create function for "dealing" with gpu lockup drm/nouveau: remove unused nouveau_channel_idle() function drm/nouveau: fix handling of fbcon colours in 8bpp drm/nv04: Context switching fixes. drm/nouveau: Use the software object for fencing. drm/nouveau: Allocate a per-channel instance of NV_SW. drm/nv50: make the blocksize depend on vram size drm/nouveau: better alignment of bo sizes and use roundup instead of ALIGN drm/nouveau: Don't skip card take down on nv0x. drm/nouveau: Implement nv42-nv43 TV load detection. drm/nouveau: Clean up the nv17-nv4x load detection code a bit. drm/nv50: fix fillrect color ...
This commit is contained in:
@@ -158,6 +158,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
|
||||
{ DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
|
||||
{ DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
|
||||
{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
|
||||
{ DRM_MODE_CONNECTOR_eDP, "Embedded DisplayPort", 0 },
|
||||
};
|
||||
|
||||
static struct drm_prop_enum_list drm_encoder_enum_list[] =
|
||||
|
||||
@@ -216,7 +216,7 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
|
||||
EXPORT_SYMBOL(drm_helper_crtc_in_use);
|
||||
|
||||
/**
|
||||
* drm_disable_unused_functions - disable unused objects
|
||||
* drm_helper_disable_unused_functions - disable unused objects
|
||||
* @dev: DRM device
|
||||
*
|
||||
* LOCKING:
|
||||
@@ -1032,7 +1032,7 @@ bool drm_helper_initial_config(struct drm_device *dev)
|
||||
/*
|
||||
* we shouldn't end up with no modes here.
|
||||
*/
|
||||
WARN(!count, "No connectors reported connected with modes\n");
|
||||
printk(KERN_INFO "No connectors reported conncted with modes\n");
|
||||
|
||||
drm_setup_crtcs(dev);
|
||||
|
||||
@@ -1162,6 +1162,9 @@ EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
|
||||
int drm_helper_resume_force_mode(struct drm_device *dev)
|
||||
{
|
||||
struct drm_crtc *crtc;
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_encoder_helper_funcs *encoder_funcs;
|
||||
struct drm_crtc_helper_funcs *crtc_funcs;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
@@ -1174,6 +1177,25 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
|
||||
|
||||
if (ret == false)
|
||||
DRM_ERROR("failed to set mode on crtc %p\n", crtc);
|
||||
|
||||
/* Turn off outputs that were already powered off */
|
||||
if (drm_helper_choose_crtc_dpms(crtc)) {
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
|
||||
if(encoder->crtc != crtc)
|
||||
continue;
|
||||
|
||||
encoder_funcs = encoder->helper_private;
|
||||
if (encoder_funcs->dpms)
|
||||
(*encoder_funcs->dpms) (encoder,
|
||||
drm_helper_choose_encoder_dpms(encoder));
|
||||
|
||||
crtc_funcs = crtc->helper_private;
|
||||
if (crtc_funcs->dpms)
|
||||
(*crtc_funcs->dpms) (crtc,
|
||||
drm_helper_choose_crtc_dpms(crtc));
|
||||
}
|
||||
}
|
||||
}
|
||||
/* disable the unused connectors while restoring the modesetting */
|
||||
drm_helper_disable_unused_functions(dev);
|
||||
|
||||
@@ -606,11 +606,10 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
|
||||
return -EINVAL;
|
||||
|
||||
/* Need to resize the fb object !!! */
|
||||
if (var->xres > fb->width || var->yres > fb->height) {
|
||||
DRM_ERROR("Requested width/height is greater than current fb "
|
||||
"object %dx%d > %dx%d\n", var->xres, var->yres,
|
||||
fb->width, fb->height);
|
||||
DRM_ERROR("Need resizing code.\n");
|
||||
if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) {
|
||||
DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
|
||||
"object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel,
|
||||
fb->width, fb->height, fb->bits_per_pixel);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
@@ -115,6 +115,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
|
||||
|
||||
dev->num_crtcs = 0;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_vblank_cleanup);
|
||||
|
||||
int drm_vblank_init(struct drm_device *dev, int num_crtcs)
|
||||
{
|
||||
@@ -163,7 +164,6 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
|
||||
}
|
||||
|
||||
dev->vblank_disable_allowed = 0;
|
||||
|
||||
return 0;
|
||||
|
||||
err:
|
||||
@@ -493,6 +493,9 @@ EXPORT_SYMBOL(drm_vblank_off);
|
||||
*/
|
||||
void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
|
||||
{
|
||||
/* vblank is not initialized (IRQ not installed ?) */
|
||||
if (!dev->num_crtcs)
|
||||
return;
|
||||
/*
|
||||
* To avoid all the problems that might happen if interrupts
|
||||
* were enabled/disabled around or between these calls, we just
|
||||
|
||||
@@ -30,12 +30,11 @@ config DRM_NOUVEAU_DEBUG
|
||||
via debugfs.
|
||||
|
||||
menu "I2C encoder or helper chips"
|
||||
depends on DRM && I2C
|
||||
depends on DRM && DRM_KMS_HELPER && I2C
|
||||
|
||||
config DRM_I2C_CH7006
|
||||
tristate "Chrontel ch7006 TV encoder"
|
||||
depends on DRM_NOUVEAU
|
||||
default m
|
||||
default m if DRM_NOUVEAU
|
||||
help
|
||||
Support for Chrontel ch7006 and similar TV encoders, found
|
||||
on some nVidia video cards.
|
||||
|
||||
@@ -33,10 +33,13 @@
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_dma.h"
|
||||
|
||||
#include <linux/log2.h>
|
||||
|
||||
static void
|
||||
nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
|
||||
ttm_bo_kunmap(&nvbo->kmap);
|
||||
@@ -44,12 +47,87 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
|
||||
if (unlikely(nvbo->gem))
|
||||
DRM_ERROR("bo %p still attached to GEM object\n", bo);
|
||||
|
||||
if (nvbo->tile)
|
||||
nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
|
||||
|
||||
spin_lock(&dev_priv->ttm.bo_list_lock);
|
||||
list_del(&nvbo->head);
|
||||
spin_unlock(&dev_priv->ttm.bo_list_lock);
|
||||
kfree(nvbo);
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_bo_fixup_align(struct drm_device *dev,
|
||||
uint32_t tile_mode, uint32_t tile_flags,
|
||||
int *align, int *size)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
/*
|
||||
* Some of the tile_flags have a periodic structure of N*4096 bytes,
|
||||
* align to to that as well as the page size. Overallocate memory to
|
||||
* avoid corruption of other buffer objects.
|
||||
*/
|
||||
if (dev_priv->card_type == NV_50) {
|
||||
uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15;
|
||||
int i;
|
||||
|
||||
switch (tile_flags) {
|
||||
case 0x1800:
|
||||
case 0x2800:
|
||||
case 0x4800:
|
||||
case 0x7a00:
|
||||
*size = roundup(*size, block_size);
|
||||
if (is_power_of_2(block_size)) {
|
||||
*size += 3 * block_size;
|
||||
for (i = 1; i < 10; i++) {
|
||||
*align = 12 * i * block_size;
|
||||
if (!(*align % 65536))
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
*size += 6 * block_size;
|
||||
for (i = 1; i < 10; i++) {
|
||||
*align = 8 * i * block_size;
|
||||
if (!(*align % 65536))
|
||||
break;
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
} else {
|
||||
if (tile_mode) {
|
||||
if (dev_priv->chipset >= 0x40) {
|
||||
*align = 65536;
|
||||
*size = roundup(*size, 64 * tile_mode);
|
||||
|
||||
} else if (dev_priv->chipset >= 0x30) {
|
||||
*align = 32768;
|
||||
*size = roundup(*size, 64 * tile_mode);
|
||||
|
||||
} else if (dev_priv->chipset >= 0x20) {
|
||||
*align = 16384;
|
||||
*size = roundup(*size, 64 * tile_mode);
|
||||
|
||||
} else if (dev_priv->chipset >= 0x10) {
|
||||
*align = 16384;
|
||||
*size = roundup(*size, 32 * tile_mode);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* ALIGN works only on powers of two. */
|
||||
*size = roundup(*size, PAGE_SIZE);
|
||||
|
||||
if (dev_priv->card_type == NV_50) {
|
||||
*size = roundup(*size, 65536);
|
||||
*align = max(65536, *align);
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
|
||||
int size, int align, uint32_t flags, uint32_t tile_mode,
|
||||
@@ -58,7 +136,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_bo *nvbo;
|
||||
int ret, n = 0;
|
||||
int ret = 0;
|
||||
|
||||
nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
|
||||
if (!nvbo)
|
||||
@@ -70,59 +148,14 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
|
||||
nvbo->tile_mode = tile_mode;
|
||||
nvbo->tile_flags = tile_flags;
|
||||
|
||||
/*
|
||||
* Some of the tile_flags have a periodic structure of N*4096 bytes,
|
||||
* align to to that as well as the page size. Overallocate memory to
|
||||
* avoid corruption of other buffer objects.
|
||||
*/
|
||||
switch (tile_flags) {
|
||||
case 0x1800:
|
||||
case 0x2800:
|
||||
case 0x4800:
|
||||
case 0x7a00:
|
||||
if (dev_priv->chipset >= 0xA0) {
|
||||
/* This is based on high end cards with 448 bits
|
||||
* memory bus, could be different elsewhere.*/
|
||||
size += 6 * 28672;
|
||||
/* 8 * 28672 is the actual alignment requirement,
|
||||
* but we must also align to page size. */
|
||||
align = 2 * 8 * 28672;
|
||||
} else if (dev_priv->chipset >= 0x90) {
|
||||
size += 3 * 16384;
|
||||
align = 12 * 16834;
|
||||
} else {
|
||||
size += 3 * 8192;
|
||||
/* 12 * 8192 is the actual alignment requirement,
|
||||
* but we must also align to page size. */
|
||||
align = 2 * 12 * 8192;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
|
||||
align >>= PAGE_SHIFT;
|
||||
|
||||
size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
|
||||
if (dev_priv->card_type == NV_50) {
|
||||
size = (size + 65535) & ~65535;
|
||||
if (align < (65536 / PAGE_SIZE))
|
||||
align = (65536 / PAGE_SIZE);
|
||||
}
|
||||
|
||||
if (flags & TTM_PL_FLAG_VRAM)
|
||||
nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
|
||||
if (flags & TTM_PL_FLAG_TT)
|
||||
nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
|
||||
nvbo->placement.fpfn = 0;
|
||||
nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
|
||||
nvbo->placement.placement = nvbo->placements;
|
||||
nvbo->placement.busy_placement = nvbo->placements;
|
||||
nvbo->placement.num_placement = n;
|
||||
nvbo->placement.num_busy_placement = n;
|
||||
nouveau_bo_placement_set(nvbo, flags);
|
||||
|
||||
nvbo->channel = chan;
|
||||
nouveau_bo_placement_set(nvbo, flags);
|
||||
ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
|
||||
ttm_bo_type_device, &nvbo->placement, align, 0,
|
||||
false, NULL, size, nouveau_bo_del_ttm);
|
||||
@@ -421,6 +454,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
|
||||
/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
|
||||
* TTM_PL_{VRAM,TT} directly.
|
||||
*/
|
||||
|
||||
static int
|
||||
nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
|
||||
struct nouveau_bo *nvbo, bool evict, bool no_wait,
|
||||
@@ -455,11 +489,12 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, int no_wait,
|
||||
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
|
||||
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
|
||||
int no_wait, struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
struct nouveau_channel *chan;
|
||||
uint64_t src_offset, dst_offset;
|
||||
uint32_t page_count;
|
||||
@@ -547,7 +582,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
|
||||
|
||||
placement.fpfn = placement.lpfn = 0;
|
||||
placement.num_placement = placement.num_busy_placement = 1;
|
||||
placement.placement = &placement_memtype;
|
||||
placement.placement = placement.busy_placement = &placement_memtype;
|
||||
|
||||
tmp_mem = *new_mem;
|
||||
tmp_mem.mm_node = NULL;
|
||||
@@ -559,7 +594,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, &tmp_mem);
|
||||
ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -585,7 +620,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
|
||||
|
||||
placement.fpfn = placement.lpfn = 0;
|
||||
placement.num_placement = placement.num_busy_placement = 1;
|
||||
placement.placement = &placement_memtype;
|
||||
placement.placement = placement.busy_placement = &placement_memtype;
|
||||
|
||||
tmp_mem = *new_mem;
|
||||
tmp_mem.mm_node = NULL;
|
||||
@@ -597,7 +632,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, new_mem);
|
||||
ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
@@ -612,52 +647,106 @@ out:
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
|
||||
bool no_wait, struct ttm_mem_reg *new_mem)
|
||||
nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
|
||||
struct nouveau_tile_reg **new_tile)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
uint64_t offset;
|
||||
int ret;
|
||||
|
||||
if (dev_priv->card_type == NV_50 && new_mem->mem_type == TTM_PL_VRAM &&
|
||||
!nvbo->no_vm) {
|
||||
uint64_t offset = new_mem->mm_node->start << PAGE_SHIFT;
|
||||
if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
|
||||
/* Nothing to do. */
|
||||
*new_tile = NULL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
offset = new_mem->mm_node->start << PAGE_SHIFT;
|
||||
|
||||
if (dev_priv->card_type == NV_50) {
|
||||
ret = nv50_mem_vm_bind_linear(dev,
|
||||
offset + dev_priv->vm_vram_base,
|
||||
new_mem->size, nvbo->tile_flags,
|
||||
offset);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
} else if (dev_priv->card_type >= NV_10) {
|
||||
*new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
|
||||
nvbo->tile_mode);
|
||||
}
|
||||
|
||||
if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
|
||||
!dev_priv->channel)
|
||||
return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
|
||||
struct nouveau_tile_reg *new_tile,
|
||||
struct nouveau_tile_reg **old_tile)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
if (dev_priv->card_type >= NV_10 &&
|
||||
dev_priv->card_type < NV_50) {
|
||||
if (*old_tile)
|
||||
nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
|
||||
|
||||
*old_tile = new_tile;
|
||||
}
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
|
||||
bool no_wait, struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
struct nouveau_tile_reg *new_tile = NULL;
|
||||
int ret = 0;
|
||||
|
||||
ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Software copy if the card isn't up and running yet. */
|
||||
if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
|
||||
!dev_priv->channel) {
|
||||
ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Fake bo copy. */
|
||||
if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
|
||||
BUG_ON(bo->mem.mm_node != NULL);
|
||||
bo->mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
return 0;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (new_mem->mem_type == TTM_PL_SYSTEM) {
|
||||
if (old_mem->mem_type == TTM_PL_SYSTEM)
|
||||
return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
if (nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem))
|
||||
return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
} else if (old_mem->mem_type == TTM_PL_SYSTEM) {
|
||||
if (nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem))
|
||||
return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
} else {
|
||||
if (nouveau_bo_move_m2mf(bo, evict, no_wait, old_mem, new_mem))
|
||||
return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
}
|
||||
/* Hardware assisted copy. */
|
||||
if (new_mem->mem_type == TTM_PL_SYSTEM)
|
||||
ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
|
||||
else if (old_mem->mem_type == TTM_PL_SYSTEM)
|
||||
ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
|
||||
else
|
||||
ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
|
||||
|
||||
return 0;
|
||||
if (!ret)
|
||||
goto out;
|
||||
|
||||
/* Fallback to software copy. */
|
||||
ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
|
||||
out:
|
||||
if (ret)
|
||||
nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
|
||||
else
|
||||
nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
|
||||
@@ -158,6 +158,8 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
|
||||
return ret;
|
||||
}
|
||||
|
||||
nouveau_dma_pre_init(chan);
|
||||
|
||||
/* Locate channel's user control regs */
|
||||
if (dev_priv->card_type < NV_40)
|
||||
user = NV03_USER(channel);
|
||||
@@ -235,47 +237,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_channel_idle(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_engine *engine = &dev_priv->engine;
|
||||
uint32_t caches;
|
||||
int idle;
|
||||
|
||||
if (!chan) {
|
||||
NV_ERROR(dev, "no channel...\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
caches = nv_rd32(dev, NV03_PFIFO_CACHES);
|
||||
nv_wr32(dev, NV03_PFIFO_CACHES, caches & ~1);
|
||||
|
||||
if (engine->fifo.channel_id(dev) != chan->id) {
|
||||
struct nouveau_gpuobj *ramfc =
|
||||
chan->ramfc ? chan->ramfc->gpuobj : NULL;
|
||||
|
||||
if (!ramfc) {
|
||||
NV_ERROR(dev, "No RAMFC for channel %d\n", chan->id);
|
||||
return 1;
|
||||
}
|
||||
|
||||
engine->instmem.prepare_access(dev, false);
|
||||
if (nv_ro32(dev, ramfc, 0) != nv_ro32(dev, ramfc, 1))
|
||||
idle = 0;
|
||||
else
|
||||
idle = 1;
|
||||
engine->instmem.finish_access(dev);
|
||||
} else {
|
||||
idle = (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET) ==
|
||||
nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
|
||||
}
|
||||
|
||||
nv_wr32(dev, NV03_PFIFO_CACHES, caches);
|
||||
return idle;
|
||||
}
|
||||
|
||||
/* stops a fifo */
|
||||
void
|
||||
nouveau_channel_free(struct nouveau_channel *chan)
|
||||
@@ -414,7 +375,9 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
|
||||
init->subchan[0].grclass = 0x0039;
|
||||
else
|
||||
init->subchan[0].grclass = 0x5039;
|
||||
init->nr_subchan = 1;
|
||||
init->subchan[1].handle = NvSw;
|
||||
init->subchan[1].grclass = NV_SW;
|
||||
init->nr_subchan = 2;
|
||||
|
||||
/* Named memory object area */
|
||||
ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
|
||||
|
||||
@@ -29,12 +29,22 @@
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_dma.h"
|
||||
|
||||
void
|
||||
nouveau_dma_pre_init(struct nouveau_channel *chan)
|
||||
{
|
||||
chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
|
||||
chan->dma.put = 0;
|
||||
chan->dma.cur = chan->dma.put;
|
||||
chan->dma.free = chan->dma.max - chan->dma.cur;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_dma_init(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *m2mf = NULL;
|
||||
struct nouveau_gpuobj *nvsw = NULL;
|
||||
int ret, i;
|
||||
|
||||
/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
|
||||
@@ -47,6 +57,15 @@ nouveau_dma_init(struct nouveau_channel *chan)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Create an NV_SW object for various sync purposes */
|
||||
ret = nouveau_gpuobj_sw_new(chan, NV_SW, &nvsw);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_ref_add(dev, chan, NvSw, nvsw, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
|
||||
ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
|
||||
if (ret)
|
||||
@@ -64,12 +83,6 @@ nouveau_dma_init(struct nouveau_channel *chan)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Initialise DMA vars */
|
||||
chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
|
||||
chan->dma.put = 0;
|
||||
chan->dma.cur = chan->dma.put;
|
||||
chan->dma.free = chan->dma.max - chan->dma.cur;
|
||||
|
||||
/* Insert NOPS for NOUVEAU_DMA_SKIPS */
|
||||
ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
|
||||
if (ret)
|
||||
@@ -87,6 +100,13 @@ nouveau_dma_init(struct nouveau_channel *chan)
|
||||
BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
|
||||
OUT_RING(chan, NvNotify0);
|
||||
|
||||
/* Initialise NV_SW */
|
||||
ret = RING_SPACE(chan, 2);
|
||||
if (ret)
|
||||
return ret;
|
||||
BEGIN_RING(chan, NvSubSw, 0, 1);
|
||||
OUT_RING(chan, NvSw);
|
||||
|
||||
/* Sit back and pray the channel works.. */
|
||||
FIRE_RING(chan);
|
||||
|
||||
@@ -113,7 +133,7 @@ READ_GET(struct nouveau_channel *chan, uint32_t *get)
|
||||
|
||||
val = nvchan_rd32(chan, chan->user_get);
|
||||
if (val < chan->pushbuf_base ||
|
||||
val >= chan->pushbuf_base + chan->pushbuf_bo->bo.mem.size) {
|
||||
val > chan->pushbuf_base + (chan->dma.max << 2)) {
|
||||
/* meaningless to dma_wait() except to know whether the
|
||||
* GPU has stalled or not
|
||||
*/
|
||||
|
||||
@@ -46,10 +46,11 @@
|
||||
/* Hardcoded object assignments to subchannels (subchannel id). */
|
||||
enum {
|
||||
NvSubM2MF = 0,
|
||||
NvSub2D = 1,
|
||||
NvSubCtxSurf2D = 1,
|
||||
NvSubGdiRect = 2,
|
||||
NvSubImageBlit = 3
|
||||
NvSubSw = 1,
|
||||
NvSub2D = 2,
|
||||
NvSubCtxSurf2D = 2,
|
||||
NvSubGdiRect = 3,
|
||||
NvSubImageBlit = 4
|
||||
};
|
||||
|
||||
/* Object handles. */
|
||||
@@ -67,6 +68,7 @@ enum {
|
||||
NvClipRect = 0x8000000b,
|
||||
NvGdiRect = 0x8000000c,
|
||||
NvImageBlit = 0x8000000d,
|
||||
NvSw = 0x8000000e,
|
||||
|
||||
/* G80+ display objects */
|
||||
NvEvoVRAM = 0x01000000,
|
||||
|
||||
@@ -59,11 +59,19 @@ struct nouveau_grctx;
|
||||
#define MAX_NUM_DCB_ENTRIES 16
|
||||
|
||||
#define NOUVEAU_MAX_CHANNEL_NR 128
|
||||
#define NOUVEAU_MAX_TILE_NR 15
|
||||
|
||||
#define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL)
|
||||
#define NV50_VM_BLOCK (512*1024*1024ULL)
|
||||
#define NV50_VM_VRAM_NR (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
|
||||
|
||||
struct nouveau_tile_reg {
|
||||
struct nouveau_fence *fence;
|
||||
uint32_t addr;
|
||||
uint32_t size;
|
||||
bool used;
|
||||
};
|
||||
|
||||
struct nouveau_bo {
|
||||
struct ttm_buffer_object bo;
|
||||
struct ttm_placement placement;
|
||||
@@ -83,6 +91,7 @@ struct nouveau_bo {
|
||||
|
||||
uint32_t tile_mode;
|
||||
uint32_t tile_flags;
|
||||
struct nouveau_tile_reg *tile;
|
||||
|
||||
struct drm_gem_object *gem;
|
||||
struct drm_file *cpu_filp;
|
||||
@@ -277,8 +286,13 @@ struct nouveau_timer_engine {
|
||||
};
|
||||
|
||||
struct nouveau_fb_engine {
|
||||
int num_tiles;
|
||||
|
||||
int (*init)(struct drm_device *dev);
|
||||
void (*takedown)(struct drm_device *dev);
|
||||
|
||||
void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch);
|
||||
};
|
||||
|
||||
struct nouveau_fifo_engine {
|
||||
@@ -292,6 +306,8 @@ struct nouveau_fifo_engine {
|
||||
void (*disable)(struct drm_device *);
|
||||
void (*enable)(struct drm_device *);
|
||||
bool (*reassign)(struct drm_device *, bool enable);
|
||||
bool (*cache_flush)(struct drm_device *dev);
|
||||
bool (*cache_pull)(struct drm_device *dev, bool enable);
|
||||
|
||||
int (*channel_id)(struct drm_device *);
|
||||
|
||||
@@ -330,6 +346,9 @@ struct nouveau_pgraph_engine {
|
||||
void (*destroy_context)(struct nouveau_channel *);
|
||||
int (*load_context)(struct nouveau_channel *);
|
||||
int (*unload_context)(struct drm_device *);
|
||||
|
||||
void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch);
|
||||
};
|
||||
|
||||
struct nouveau_engine {
|
||||
@@ -548,6 +567,12 @@ struct drm_nouveau_private {
|
||||
unsigned long sg_handle;
|
||||
} gart_info;
|
||||
|
||||
/* nv10-nv40 tiling regions */
|
||||
struct {
|
||||
struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
|
||||
spinlock_t lock;
|
||||
} tile;
|
||||
|
||||
/* G8x/G9x virtual address space */
|
||||
uint64_t vm_gart_base;
|
||||
uint64_t vm_gart_size;
|
||||
@@ -685,6 +710,13 @@ extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
|
||||
extern int nouveau_mem_init(struct drm_device *);
|
||||
extern int nouveau_mem_init_agp(struct drm_device *);
|
||||
extern void nouveau_mem_close(struct drm_device *);
|
||||
extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev,
|
||||
uint32_t addr,
|
||||
uint32_t size,
|
||||
uint32_t pitch);
|
||||
extern void nv10_mem_expire_tiling(struct drm_device *dev,
|
||||
struct nouveau_tile_reg *tile,
|
||||
struct nouveau_fence *fence);
|
||||
extern int nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
|
||||
uint32_t size, uint32_t flags,
|
||||
uint64_t phys);
|
||||
@@ -713,7 +745,6 @@ extern int nouveau_channel_alloc(struct drm_device *dev,
|
||||
struct drm_file *file_priv,
|
||||
uint32_t fb_ctxdma, uint32_t tt_ctxdma);
|
||||
extern void nouveau_channel_free(struct nouveau_channel *);
|
||||
extern int nouveau_channel_idle(struct nouveau_channel *chan);
|
||||
|
||||
/* nouveau_object.c */
|
||||
extern int nouveau_gpuobj_early_init(struct drm_device *);
|
||||
@@ -756,6 +787,8 @@ extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
|
||||
uint32_t *o_ret);
|
||||
extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
|
||||
struct nouveau_gpuobj **);
|
||||
extern int nouveau_gpuobj_sw_new(struct nouveau_channel *, int class,
|
||||
struct nouveau_gpuobj **);
|
||||
extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
|
||||
struct drm_file *);
|
||||
extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
|
||||
@@ -804,6 +837,7 @@ nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
|
||||
#endif
|
||||
|
||||
/* nouveau_dma.c */
|
||||
extern void nouveau_dma_pre_init(struct nouveau_channel *);
|
||||
extern int nouveau_dma_init(struct nouveau_channel *);
|
||||
extern int nouveau_dma_wait(struct nouveau_channel *, int size);
|
||||
|
||||
@@ -879,16 +913,22 @@ extern void nv04_fb_takedown(struct drm_device *);
|
||||
/* nv10_fb.c */
|
||||
extern int nv10_fb_init(struct drm_device *);
|
||||
extern void nv10_fb_takedown(struct drm_device *);
|
||||
extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t,
|
||||
uint32_t, uint32_t);
|
||||
|
||||
/* nv40_fb.c */
|
||||
extern int nv40_fb_init(struct drm_device *);
|
||||
extern void nv40_fb_takedown(struct drm_device *);
|
||||
extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
|
||||
uint32_t, uint32_t);
|
||||
|
||||
/* nv04_fifo.c */
|
||||
extern int nv04_fifo_init(struct drm_device *);
|
||||
extern void nv04_fifo_disable(struct drm_device *);
|
||||
extern void nv04_fifo_enable(struct drm_device *);
|
||||
extern bool nv04_fifo_reassign(struct drm_device *, bool);
|
||||
extern bool nv04_fifo_cache_flush(struct drm_device *);
|
||||
extern bool nv04_fifo_cache_pull(struct drm_device *, bool);
|
||||
extern int nv04_fifo_channel_id(struct drm_device *);
|
||||
extern int nv04_fifo_create_context(struct nouveau_channel *);
|
||||
extern void nv04_fifo_destroy_context(struct nouveau_channel *);
|
||||
@@ -941,6 +981,8 @@ extern void nv10_graph_destroy_context(struct nouveau_channel *);
|
||||
extern int nv10_graph_load_context(struct nouveau_channel *);
|
||||
extern int nv10_graph_unload_context(struct drm_device *);
|
||||
extern void nv10_graph_context_switch(struct drm_device *);
|
||||
extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t,
|
||||
uint32_t, uint32_t);
|
||||
|
||||
/* nv20_graph.c */
|
||||
extern struct nouveau_pgraph_object_class nv20_graph_grclass[];
|
||||
@@ -952,6 +994,8 @@ extern int nv20_graph_unload_context(struct drm_device *);
|
||||
extern int nv20_graph_init(struct drm_device *);
|
||||
extern void nv20_graph_takedown(struct drm_device *);
|
||||
extern int nv30_graph_init(struct drm_device *);
|
||||
extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t,
|
||||
uint32_t, uint32_t);
|
||||
|
||||
/* nv40_graph.c */
|
||||
extern struct nouveau_pgraph_object_class nv40_graph_grclass[];
|
||||
@@ -963,6 +1007,8 @@ extern void nv40_graph_destroy_context(struct nouveau_channel *);
|
||||
extern int nv40_graph_load_context(struct nouveau_channel *);
|
||||
extern int nv40_graph_unload_context(struct drm_device *);
|
||||
extern void nv40_grctx_init(struct nouveau_grctx *);
|
||||
extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t,
|
||||
uint32_t, uint32_t);
|
||||
|
||||
/* nv50_graph.c */
|
||||
extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
|
||||
@@ -1030,8 +1076,7 @@ extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
|
||||
|
||||
/* nv04_dac.c */
|
||||
extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry);
|
||||
extern enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
|
||||
struct drm_connector *connector);
|
||||
extern uint32_t nv17_dac_sample_load(struct drm_encoder *encoder);
|
||||
extern int nv04_dac_output_offset(struct drm_encoder *encoder);
|
||||
extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable);
|
||||
|
||||
@@ -1049,9 +1094,6 @@ extern int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry);
|
||||
|
||||
/* nv17_tv.c */
|
||||
extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry);
|
||||
extern enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
|
||||
struct drm_connector *connector,
|
||||
uint32_t pin_mask);
|
||||
|
||||
/* nv04_display.c */
|
||||
extern int nv04_display_create(struct drm_device *);
|
||||
@@ -1290,14 +1332,14 @@ nv_two_reg_pll(struct drm_device *dev)
|
||||
return false;
|
||||
}
|
||||
|
||||
#define NV50_NVSW 0x0000506e
|
||||
#define NV50_NVSW_DMA_SEMAPHORE 0x00000060
|
||||
#define NV50_NVSW_SEMAPHORE_OFFSET 0x00000064
|
||||
#define NV50_NVSW_SEMAPHORE_ACQUIRE 0x00000068
|
||||
#define NV50_NVSW_SEMAPHORE_RELEASE 0x0000006c
|
||||
#define NV50_NVSW_DMA_VBLSEM 0x0000018c
|
||||
#define NV50_NVSW_VBLSEM_OFFSET 0x00000400
|
||||
#define NV50_NVSW_VBLSEM_RELEASE_VALUE 0x00000404
|
||||
#define NV50_NVSW_VBLSEM_RELEASE 0x00000408
|
||||
#define NV_SW 0x0000506e
|
||||
#define NV_SW_DMA_SEMAPHORE 0x00000060
|
||||
#define NV_SW_SEMAPHORE_OFFSET 0x00000064
|
||||
#define NV_SW_SEMAPHORE_ACQUIRE 0x00000068
|
||||
#define NV_SW_SEMAPHORE_RELEASE 0x0000006c
|
||||
#define NV_SW_DMA_VBLSEM 0x0000018c
|
||||
#define NV_SW_VBLSEM_OFFSET 0x00000400
|
||||
#define NV_SW_VBLSEM_RELEASE_VALUE 0x00000404
|
||||
#define NV_SW_VBLSEM_RELEASE 0x00000408
|
||||
|
||||
#endif /* __NOUVEAU_DRV_H__ */
|
||||
|
||||
@@ -64,8 +64,7 @@ nouveau_fbcon_sync(struct fb_info *info)
|
||||
return 0;
|
||||
|
||||
if (RING_SPACE(chan, 4)) {
|
||||
NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
|
||||
info->flags |= FBINFO_HWACCEL_DISABLED;
|
||||
nouveau_fbcon_gpu_lockup(info);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -86,8 +85,7 @@ nouveau_fbcon_sync(struct fb_info *info)
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
|
||||
info->flags |= FBINFO_HWACCEL_DISABLED;
|
||||
nouveau_fbcon_gpu_lockup(info);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -212,11 +210,11 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
|
||||
|
||||
mode_cmd.bpp = surface_bpp;
|
||||
mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
|
||||
mode_cmd.pitch = ALIGN(mode_cmd.pitch, 256);
|
||||
mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
|
||||
mode_cmd.depth = surface_depth;
|
||||
|
||||
size = mode_cmd.pitch * mode_cmd.height;
|
||||
size = ALIGN(size, PAGE_SIZE);
|
||||
size = roundup(size, PAGE_SIZE);
|
||||
|
||||
ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM,
|
||||
0, 0x0000, false, true, &nvbo);
|
||||
@@ -380,3 +378,12 @@ nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nouveau_fbcon_gpu_lockup(struct fb_info *info)
|
||||
{
|
||||
struct nouveau_fbcon_par *par = info->par;
|
||||
struct drm_device *dev = par->dev;
|
||||
|
||||
NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
|
||||
info->flags |= FBINFO_HWACCEL_DISABLED;
|
||||
}
|
||||
|
||||
@@ -43,5 +43,6 @@ void nouveau_fbcon_zfill(struct drm_device *dev);
|
||||
int nv04_fbcon_accel_init(struct fb_info *info);
|
||||
int nv50_fbcon_accel_init(struct fb_info *info);
|
||||
|
||||
void nouveau_fbcon_gpu_lockup(struct fb_info *info);
|
||||
#endif /* __NV50_FBCON_H__ */
|
||||
|
||||
|
||||
@@ -142,7 +142,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
|
||||
list_add_tail(&fence->entry, &chan->fence.pending);
|
||||
spin_unlock_irqrestore(&chan->fence.lock, flags);
|
||||
|
||||
BEGIN_RING(chan, NvSubM2MF, USE_REFCNT ? 0x0050 : 0x0150, 1);
|
||||
BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1);
|
||||
OUT_RING(chan, fence->sequence);
|
||||
FIRE_RING(chan);
|
||||
|
||||
|
||||
@@ -220,7 +220,6 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
|
||||
}
|
||||
|
||||
struct validate_op {
|
||||
struct nouveau_fence *fence;
|
||||
struct list_head vram_list;
|
||||
struct list_head gart_list;
|
||||
struct list_head both_list;
|
||||
@@ -252,17 +251,11 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
|
||||
}
|
||||
|
||||
static void
|
||||
validate_fini(struct validate_op *op, bool success)
|
||||
validate_fini(struct validate_op *op, struct nouveau_fence* fence)
|
||||
{
|
||||
struct nouveau_fence *fence = op->fence;
|
||||
|
||||
if (unlikely(!success))
|
||||
op->fence = NULL;
|
||||
|
||||
validate_fini_list(&op->vram_list, op->fence);
|
||||
validate_fini_list(&op->gart_list, op->fence);
|
||||
validate_fini_list(&op->both_list, op->fence);
|
||||
nouveau_fence_unref((void *)&fence);
|
||||
validate_fini_list(&op->vram_list, fence);
|
||||
validate_fini_list(&op->gart_list, fence);
|
||||
validate_fini_list(&op->both_list, fence);
|
||||
}
|
||||
|
||||
static int
|
||||
@@ -420,10 +413,6 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
|
||||
INIT_LIST_HEAD(&op->gart_list);
|
||||
INIT_LIST_HEAD(&op->both_list);
|
||||
|
||||
ret = nouveau_fence_new(chan, &op->fence, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (nr_buffers == 0)
|
||||
return 0;
|
||||
|
||||
@@ -541,6 +530,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
|
||||
struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
|
||||
struct nouveau_channel *chan;
|
||||
struct validate_op op;
|
||||
struct nouveau_fence* fence = 0;
|
||||
uint32_t *pushbuf = NULL;
|
||||
int ret = 0, do_reloc = 0, i;
|
||||
|
||||
@@ -597,7 +587,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
|
||||
|
||||
OUT_RINGp(chan, pushbuf, req->nr_dwords);
|
||||
|
||||
ret = nouveau_fence_emit(op.fence);
|
||||
ret = nouveau_fence_new(chan, &fence, true);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
|
||||
WIND_RING(chan);
|
||||
@@ -605,7 +595,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
if (nouveau_gem_pushbuf_sync(chan)) {
|
||||
ret = nouveau_fence_wait(op.fence, NULL, false, false);
|
||||
ret = nouveau_fence_wait(fence, NULL, false, false);
|
||||
if (ret) {
|
||||
for (i = 0; i < req->nr_dwords; i++)
|
||||
NV_ERROR(dev, "0x%08x\n", pushbuf[i]);
|
||||
@@ -614,7 +604,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
out:
|
||||
validate_fini(&op, ret == 0);
|
||||
validate_fini(&op, fence);
|
||||
nouveau_fence_unref((void**)&fence);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
kfree(pushbuf);
|
||||
kfree(bo);
|
||||
@@ -634,6 +625,7 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
|
||||
struct drm_gem_object *gem;
|
||||
struct nouveau_bo *pbbo;
|
||||
struct validate_op op;
|
||||
struct nouveau_fence* fence = 0;
|
||||
int i, ret = 0, do_reloc = 0;
|
||||
|
||||
NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
|
||||
@@ -772,7 +764,7 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
|
||||
OUT_RING(chan, 0);
|
||||
}
|
||||
|
||||
ret = nouveau_fence_emit(op.fence);
|
||||
ret = nouveau_fence_new(chan, &fence, true);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
|
||||
WIND_RING(chan);
|
||||
@@ -780,7 +772,8 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
|
||||
}
|
||||
|
||||
out:
|
||||
validate_fini(&op, ret == 0);
|
||||
validate_fini(&op, fence);
|
||||
nouveau_fence_unref((void**)&fence);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
kfree(bo);
|
||||
|
||||
|
||||
@@ -635,6 +635,7 @@ nv50_pgraph_irq_handler(struct drm_device *dev)
|
||||
|
||||
if ((nv_rd32(dev, 0x400500) & isb) != isb)
|
||||
nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb);
|
||||
nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
|
||||
}
|
||||
|
||||
nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
|
||||
|
||||
@@ -191,6 +191,92 @@ void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* NV10-NV40 tiling helpers
|
||||
*/
|
||||
|
||||
static void
|
||||
nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
|
||||
uint32_t size, uint32_t pitch)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
|
||||
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
|
||||
|
||||
tile->addr = addr;
|
||||
tile->size = size;
|
||||
tile->used = !!pitch;
|
||||
nouveau_fence_unref((void **)&tile->fence);
|
||||
|
||||
if (!pfifo->cache_flush(dev))
|
||||
return;
|
||||
|
||||
pfifo->reassign(dev, false);
|
||||
pfifo->cache_flush(dev);
|
||||
pfifo->cache_pull(dev, false);
|
||||
|
||||
nouveau_wait_for_idle(dev);
|
||||
|
||||
pgraph->set_region_tiling(dev, i, addr, size, pitch);
|
||||
pfb->set_region_tiling(dev, i, addr, size, pitch);
|
||||
|
||||
pfifo->cache_pull(dev, true);
|
||||
pfifo->reassign(dev, true);
|
||||
}
|
||||
|
||||
struct nouveau_tile_reg *
|
||||
nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
|
||||
uint32_t pitch)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
|
||||
struct nouveau_tile_reg *tile = dev_priv->tile.reg, *found = NULL;
|
||||
int i;
|
||||
|
||||
spin_lock(&dev_priv->tile.lock);
|
||||
|
||||
for (i = 0; i < pfb->num_tiles; i++) {
|
||||
if (tile[i].used)
|
||||
/* Tile region in use. */
|
||||
continue;
|
||||
|
||||
if (tile[i].fence &&
|
||||
!nouveau_fence_signalled(tile[i].fence, NULL))
|
||||
/* Pending tile region. */
|
||||
continue;
|
||||
|
||||
if (max(tile[i].addr, addr) <
|
||||
min(tile[i].addr + tile[i].size, addr + size))
|
||||
/* Kill an intersecting tile region. */
|
||||
nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
|
||||
|
||||
if (pitch && !found) {
|
||||
/* Free tile region. */
|
||||
nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
|
||||
found = &tile[i];
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock(&dev_priv->tile.lock);
|
||||
|
||||
return found;
|
||||
}
|
||||
|
||||
void
|
||||
nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile,
|
||||
struct nouveau_fence *fence)
|
||||
{
|
||||
if (fence) {
|
||||
/* Mark it as pending. */
|
||||
tile->fence = fence;
|
||||
nouveau_fence_ref(fence);
|
||||
}
|
||||
|
||||
tile->used = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* NV50 VM helpers
|
||||
*/
|
||||
@@ -513,6 +599,7 @@ nouveau_mem_init(struct drm_device *dev)
|
||||
|
||||
INIT_LIST_HEAD(&dev_priv->ttm.bo_list);
|
||||
spin_lock_init(&dev_priv->ttm.bo_list_lock);
|
||||
spin_lock_init(&dev_priv->tile.lock);
|
||||
|
||||
dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
|
||||
|
||||
|
||||
@@ -881,7 +881,7 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
int
|
||||
nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
|
||||
struct nouveau_gpuobj **gpuobj_ret)
|
||||
{
|
||||
|
||||
@@ -349,19 +349,19 @@
|
||||
#define NV04_PGRAPH_BLEND 0x00400824
|
||||
#define NV04_PGRAPH_STORED_FMT 0x00400830
|
||||
#define NV04_PGRAPH_PATT_COLORRAM 0x00400900
|
||||
#define NV40_PGRAPH_TILE0(i) (0x00400900 + (i*16))
|
||||
#define NV40_PGRAPH_TLIMIT0(i) (0x00400904 + (i*16))
|
||||
#define NV40_PGRAPH_TSIZE0(i) (0x00400908 + (i*16))
|
||||
#define NV40_PGRAPH_TSTATUS0(i) (0x0040090C + (i*16))
|
||||
#define NV20_PGRAPH_TILE(i) (0x00400900 + (i*16))
|
||||
#define NV20_PGRAPH_TLIMIT(i) (0x00400904 + (i*16))
|
||||
#define NV20_PGRAPH_TSIZE(i) (0x00400908 + (i*16))
|
||||
#define NV20_PGRAPH_TSTATUS(i) (0x0040090C + (i*16))
|
||||
#define NV10_PGRAPH_TILE(i) (0x00400B00 + (i*16))
|
||||
#define NV10_PGRAPH_TLIMIT(i) (0x00400B04 + (i*16))
|
||||
#define NV10_PGRAPH_TSIZE(i) (0x00400B08 + (i*16))
|
||||
#define NV10_PGRAPH_TSTATUS(i) (0x00400B0C + (i*16))
|
||||
#define NV04_PGRAPH_U_RAM 0x00400D00
|
||||
#define NV47_PGRAPH_TILE0(i) (0x00400D00 + (i*16))
|
||||
#define NV47_PGRAPH_TLIMIT0(i) (0x00400D04 + (i*16))
|
||||
#define NV47_PGRAPH_TSIZE0(i) (0x00400D08 + (i*16))
|
||||
#define NV47_PGRAPH_TSTATUS0(i) (0x00400D0C + (i*16))
|
||||
#define NV47_PGRAPH_TILE(i) (0x00400D00 + (i*16))
|
||||
#define NV47_PGRAPH_TLIMIT(i) (0x00400D04 + (i*16))
|
||||
#define NV47_PGRAPH_TSIZE(i) (0x00400D08 + (i*16))
|
||||
#define NV47_PGRAPH_TSTATUS(i) (0x00400D0C + (i*16))
|
||||
#define NV04_PGRAPH_V_RAM 0x00400D40
|
||||
#define NV04_PGRAPH_W_RAM 0x00400D80
|
||||
#define NV10_PGRAPH_COMBINER0_IN_ALPHA 0x00400E40
|
||||
|
||||
@@ -76,6 +76,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->fifo.disable = nv04_fifo_disable;
|
||||
engine->fifo.enable = nv04_fifo_enable;
|
||||
engine->fifo.reassign = nv04_fifo_reassign;
|
||||
engine->fifo.cache_flush = nv04_fifo_cache_flush;
|
||||
engine->fifo.cache_pull = nv04_fifo_cache_pull;
|
||||
engine->fifo.channel_id = nv04_fifo_channel_id;
|
||||
engine->fifo.create_context = nv04_fifo_create_context;
|
||||
engine->fifo.destroy_context = nv04_fifo_destroy_context;
|
||||
@@ -100,6 +102,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->timer.takedown = nv04_timer_takedown;
|
||||
engine->fb.init = nv10_fb_init;
|
||||
engine->fb.takedown = nv10_fb_takedown;
|
||||
engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
|
||||
engine->graph.grclass = nv10_graph_grclass;
|
||||
engine->graph.init = nv10_graph_init;
|
||||
engine->graph.takedown = nv10_graph_takedown;
|
||||
@@ -109,12 +112,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->graph.fifo_access = nv04_graph_fifo_access;
|
||||
engine->graph.load_context = nv10_graph_load_context;
|
||||
engine->graph.unload_context = nv10_graph_unload_context;
|
||||
engine->graph.set_region_tiling = nv10_graph_set_region_tiling;
|
||||
engine->fifo.channels = 32;
|
||||
engine->fifo.init = nv10_fifo_init;
|
||||
engine->fifo.takedown = nouveau_stub_takedown;
|
||||
engine->fifo.disable = nv04_fifo_disable;
|
||||
engine->fifo.enable = nv04_fifo_enable;
|
||||
engine->fifo.reassign = nv04_fifo_reassign;
|
||||
engine->fifo.cache_flush = nv04_fifo_cache_flush;
|
||||
engine->fifo.cache_pull = nv04_fifo_cache_pull;
|
||||
engine->fifo.channel_id = nv10_fifo_channel_id;
|
||||
engine->fifo.create_context = nv10_fifo_create_context;
|
||||
engine->fifo.destroy_context = nv10_fifo_destroy_context;
|
||||
@@ -139,6 +145,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->timer.takedown = nv04_timer_takedown;
|
||||
engine->fb.init = nv10_fb_init;
|
||||
engine->fb.takedown = nv10_fb_takedown;
|
||||
engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
|
||||
engine->graph.grclass = nv20_graph_grclass;
|
||||
engine->graph.init = nv20_graph_init;
|
||||
engine->graph.takedown = nv20_graph_takedown;
|
||||
@@ -148,12 +155,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->graph.fifo_access = nv04_graph_fifo_access;
|
||||
engine->graph.load_context = nv20_graph_load_context;
|
||||
engine->graph.unload_context = nv20_graph_unload_context;
|
||||
engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
|
||||
engine->fifo.channels = 32;
|
||||
engine->fifo.init = nv10_fifo_init;
|
||||
engine->fifo.takedown = nouveau_stub_takedown;
|
||||
engine->fifo.disable = nv04_fifo_disable;
|
||||
engine->fifo.enable = nv04_fifo_enable;
|
||||
engine->fifo.reassign = nv04_fifo_reassign;
|
||||
engine->fifo.cache_flush = nv04_fifo_cache_flush;
|
||||
engine->fifo.cache_pull = nv04_fifo_cache_pull;
|
||||
engine->fifo.channel_id = nv10_fifo_channel_id;
|
||||
engine->fifo.create_context = nv10_fifo_create_context;
|
||||
engine->fifo.destroy_context = nv10_fifo_destroy_context;
|
||||
@@ -178,6 +188,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->timer.takedown = nv04_timer_takedown;
|
||||
engine->fb.init = nv10_fb_init;
|
||||
engine->fb.takedown = nv10_fb_takedown;
|
||||
engine->fb.set_region_tiling = nv10_fb_set_region_tiling;
|
||||
engine->graph.grclass = nv30_graph_grclass;
|
||||
engine->graph.init = nv30_graph_init;
|
||||
engine->graph.takedown = nv20_graph_takedown;
|
||||
@@ -187,12 +198,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->graph.destroy_context = nv20_graph_destroy_context;
|
||||
engine->graph.load_context = nv20_graph_load_context;
|
||||
engine->graph.unload_context = nv20_graph_unload_context;
|
||||
engine->graph.set_region_tiling = nv20_graph_set_region_tiling;
|
||||
engine->fifo.channels = 32;
|
||||
engine->fifo.init = nv10_fifo_init;
|
||||
engine->fifo.takedown = nouveau_stub_takedown;
|
||||
engine->fifo.disable = nv04_fifo_disable;
|
||||
engine->fifo.enable = nv04_fifo_enable;
|
||||
engine->fifo.reassign = nv04_fifo_reassign;
|
||||
engine->fifo.cache_flush = nv04_fifo_cache_flush;
|
||||
engine->fifo.cache_pull = nv04_fifo_cache_pull;
|
||||
engine->fifo.channel_id = nv10_fifo_channel_id;
|
||||
engine->fifo.create_context = nv10_fifo_create_context;
|
||||
engine->fifo.destroy_context = nv10_fifo_destroy_context;
|
||||
@@ -218,6 +232,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->timer.takedown = nv04_timer_takedown;
|
||||
engine->fb.init = nv40_fb_init;
|
||||
engine->fb.takedown = nv40_fb_takedown;
|
||||
engine->fb.set_region_tiling = nv40_fb_set_region_tiling;
|
||||
engine->graph.grclass = nv40_graph_grclass;
|
||||
engine->graph.init = nv40_graph_init;
|
||||
engine->graph.takedown = nv40_graph_takedown;
|
||||
@@ -227,12 +242,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
|
||||
engine->graph.destroy_context = nv40_graph_destroy_context;
|
||||
engine->graph.load_context = nv40_graph_load_context;
|
||||
engine->graph.unload_context = nv40_graph_unload_context;
|
||||
engine->graph.set_region_tiling = nv40_graph_set_region_tiling;
|
||||
engine->fifo.channels = 32;
|
||||
engine->fifo.init = nv40_fifo_init;
|
||||
engine->fifo.takedown = nouveau_stub_takedown;
|
||||
engine->fifo.disable = nv04_fifo_disable;
|
||||
engine->fifo.enable = nv04_fifo_enable;
|
||||
engine->fifo.reassign = nv04_fifo_reassign;
|
||||
engine->fifo.cache_flush = nv04_fifo_cache_flush;
|
||||
engine->fifo.cache_pull = nv04_fifo_cache_pull;
|
||||
engine->fifo.channel_id = nv10_fifo_channel_id;
|
||||
engine->fifo.create_context = nv40_fifo_create_context;
|
||||
engine->fifo.destroy_context = nv40_fifo_destroy_context;
|
||||
@@ -624,7 +642,10 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
|
||||
dev_priv->chipset = (reg0 & 0xff00000) >> 20;
|
||||
/* NV04 or NV05 */
|
||||
} else if ((reg0 & 0xff00fff0) == 0x20004000) {
|
||||
dev_priv->chipset = 0x04;
|
||||
if (reg0 & 0x00f00000)
|
||||
dev_priv->chipset = 0x05;
|
||||
else
|
||||
dev_priv->chipset = 0x04;
|
||||
} else
|
||||
dev_priv->chipset = 0xff;
|
||||
|
||||
@@ -704,8 +725,8 @@ static void nouveau_close(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
/* In the case of an error dev_priv may not be be allocated yet */
|
||||
if (dev_priv && dev_priv->card_type)
|
||||
/* In the case of an error dev_priv may not be allocated yet */
|
||||
if (dev_priv)
|
||||
nouveau_card_takedown(dev);
|
||||
}
|
||||
|
||||
|
||||
@@ -28,45 +28,17 @@
|
||||
|
||||
#include "nouveau_drv.h"
|
||||
|
||||
static struct vm_operations_struct nouveau_ttm_vm_ops;
|
||||
static const struct vm_operations_struct *ttm_vm_ops;
|
||||
|
||||
static int
|
||||
nouveau_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
{
|
||||
struct ttm_buffer_object *bo = vma->vm_private_data;
|
||||
int ret;
|
||||
|
||||
if (unlikely(bo == NULL))
|
||||
return VM_FAULT_NOPAGE;
|
||||
|
||||
ret = ttm_vm_ops->fault(vma, vmf);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
|
||||
{
|
||||
struct drm_file *file_priv = filp->private_data;
|
||||
struct drm_nouveau_private *dev_priv =
|
||||
file_priv->minor->dev->dev_private;
|
||||
int ret;
|
||||
|
||||
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
|
||||
return drm_mmap(filp, vma);
|
||||
|
||||
ret = ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
if (unlikely(ttm_vm_ops == NULL)) {
|
||||
ttm_vm_ops = vma->vm_ops;
|
||||
nouveau_ttm_vm_ops = *ttm_vm_ops;
|
||||
nouveau_ttm_vm_ops.fault = &nouveau_ttm_fault;
|
||||
}
|
||||
|
||||
vma->vm_ops = &nouveau_ttm_vm_ops;
|
||||
return 0;
|
||||
return ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
|
||||
}
|
||||
|
||||
static int
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user