mirror of
https://github.com/Dasharo/linux.git
synced 2026-03-06 15:25:10 -08:00
drm/radeon: remove UMS support
It's been deprecated behind a kconfig option for almost two years and hasn't really been supported for years before that. DDX support was dropped more than three years ago. Acked-by: Christian König <christian.koenig@amd.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
This commit is contained in:
@@ -5,12 +5,3 @@ config DRM_RADEON_USERPTR
|
||||
help
|
||||
This option selects CONFIG_MMU_NOTIFIER if it isn't already
|
||||
selected to enabled full userptr support.
|
||||
|
||||
config DRM_RADEON_UMS
|
||||
bool "Enable userspace modesetting on radeon (DEPRECATED)"
|
||||
depends on DRM_RADEON
|
||||
help
|
||||
Choose this option if you still need userspace modesetting.
|
||||
|
||||
Userspace modesetting is deprecated for quite some time now, so
|
||||
enable this only if you have ancient versions of the DDX drivers.
|
||||
|
||||
@@ -58,10 +58,6 @@ $(obj)/evergreen_cs.o: $(obj)/evergreen_reg_safe.h $(obj)/cayman_reg_safe.h
|
||||
|
||||
radeon-y := radeon_drv.o
|
||||
|
||||
# add UMS driver
|
||||
radeon-$(CONFIG_DRM_RADEON_UMS)+= radeon_cp.o radeon_state.o radeon_mem.o \
|
||||
radeon_irq.o r300_cmdbuf.o r600_cp.o r600_blit.o drm_buffer.o
|
||||
|
||||
# add KMS driver
|
||||
radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
|
||||
radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
|
||||
|
||||
@@ -1,177 +0,0 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2010 Pauli Nieminen.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Multipart buffer for coping data which is larger than the page size.
|
||||
*
|
||||
* Authors:
|
||||
* Pauli Nieminen <suokkos-at-gmail-dot-com>
|
||||
*/
|
||||
|
||||
#include <linux/export.h>
|
||||
#include "drm_buffer.h"
|
||||
|
||||
/**
|
||||
* Allocate the drm buffer object.
|
||||
*
|
||||
* buf: Pointer to a pointer where the object is stored.
|
||||
* size: The number of bytes to allocate.
|
||||
*/
|
||||
int drm_buffer_alloc(struct drm_buffer **buf, int size)
|
||||
{
|
||||
int nr_pages = size / PAGE_SIZE + 1;
|
||||
int idx;
|
||||
|
||||
/* Allocating pointer table to end of structure makes drm_buffer
|
||||
* variable sized */
|
||||
*buf = kzalloc(sizeof(struct drm_buffer) + nr_pages*sizeof(char *),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (*buf == NULL) {
|
||||
DRM_ERROR("Failed to allocate drm buffer object to hold"
|
||||
" %d bytes in %d pages.\n",
|
||||
size, nr_pages);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
(*buf)->size = size;
|
||||
|
||||
for (idx = 0; idx < nr_pages; ++idx) {
|
||||
|
||||
(*buf)->data[idx] =
|
||||
kmalloc(min(PAGE_SIZE, size - idx * PAGE_SIZE),
|
||||
GFP_KERNEL);
|
||||
|
||||
|
||||
if ((*buf)->data[idx] == NULL) {
|
||||
DRM_ERROR("Failed to allocate %dth page for drm"
|
||||
" buffer with %d bytes and %d pages.\n",
|
||||
idx + 1, size, nr_pages);
|
||||
goto error_out;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error_out:
|
||||
|
||||
for (; idx >= 0; --idx)
|
||||
kfree((*buf)->data[idx]);
|
||||
|
||||
kfree(*buf);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy the user data to the begin of the buffer and reset the processing
|
||||
* iterator.
|
||||
*
|
||||
* user_data: A pointer the data that is copied to the buffer.
|
||||
* size: The Number of bytes to copy.
|
||||
*/
|
||||
int drm_buffer_copy_from_user(struct drm_buffer *buf,
|
||||
void __user *user_data, int size)
|
||||
{
|
||||
int nr_pages = size / PAGE_SIZE + 1;
|
||||
int idx;
|
||||
|
||||
if (size > buf->size) {
|
||||
DRM_ERROR("Requesting to copy %d bytes to a drm buffer with"
|
||||
" %d bytes space\n",
|
||||
size, buf->size);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
for (idx = 0; idx < nr_pages; ++idx) {
|
||||
|
||||
if (copy_from_user(buf->data[idx],
|
||||
user_data + idx * PAGE_SIZE,
|
||||
min(PAGE_SIZE, size - idx * PAGE_SIZE))) {
|
||||
DRM_ERROR("Failed to copy user data (%p) to drm buffer"
|
||||
" (%p) %dth page.\n",
|
||||
user_data, buf, idx);
|
||||
return -EFAULT;
|
||||
|
||||
}
|
||||
}
|
||||
buf->iterator = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Free the drm buffer object
|
||||
*/
|
||||
void drm_buffer_free(struct drm_buffer *buf)
|
||||
{
|
||||
|
||||
if (buf != NULL) {
|
||||
|
||||
int nr_pages = buf->size / PAGE_SIZE + 1;
|
||||
int idx;
|
||||
for (idx = 0; idx < nr_pages; ++idx)
|
||||
kfree(buf->data[idx]);
|
||||
|
||||
kfree(buf);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Read an object from buffer that may be split to multiple parts. If object
|
||||
* is not split function just returns the pointer to object in buffer. But in
|
||||
* case of split object data is copied to given stack object that is suplied
|
||||
* by caller.
|
||||
*
|
||||
* The processing location of the buffer is also advanced to the next byte
|
||||
* after the object.
|
||||
*
|
||||
* objsize: The size of the objet in bytes.
|
||||
* stack_obj: A pointer to a memory location where object can be copied.
|
||||
*/
|
||||
void *drm_buffer_read_object(struct drm_buffer *buf,
|
||||
int objsize, void *stack_obj)
|
||||
{
|
||||
int idx = drm_buffer_index(buf);
|
||||
int page = drm_buffer_page(buf);
|
||||
void *obj = NULL;
|
||||
|
||||
if (idx + objsize <= PAGE_SIZE) {
|
||||
obj = &buf->data[page][idx];
|
||||
} else {
|
||||
/* The object is split which forces copy to temporary object.*/
|
||||
int beginsz = PAGE_SIZE - idx;
|
||||
memcpy(stack_obj, &buf->data[page][idx], beginsz);
|
||||
|
||||
memcpy(stack_obj + beginsz, &buf->data[page + 1][0],
|
||||
objsize - beginsz);
|
||||
|
||||
obj = stack_obj;
|
||||
}
|
||||
|
||||
drm_buffer_advance(buf, objsize);
|
||||
return obj;
|
||||
}
|
||||
@@ -1,148 +0,0 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright 2010 Pauli Nieminen.
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Multipart buffer for coping data which is larger than the page size.
|
||||
*
|
||||
* Authors:
|
||||
* Pauli Nieminen <suokkos-at-gmail-dot-com>
|
||||
*/
|
||||
|
||||
#ifndef _DRM_BUFFER_H_
|
||||
#define _DRM_BUFFER_H_
|
||||
|
||||
#include <drm/drmP.h>
|
||||
|
||||
struct drm_buffer {
|
||||
int iterator;
|
||||
int size;
|
||||
char *data[];
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Return the index of page that buffer is currently pointing at.
|
||||
*/
|
||||
static inline int drm_buffer_page(struct drm_buffer *buf)
|
||||
{
|
||||
return buf->iterator / PAGE_SIZE;
|
||||
}
|
||||
/**
|
||||
* Return the index of the current byte in the page
|
||||
*/
|
||||
static inline int drm_buffer_index(struct drm_buffer *buf)
|
||||
{
|
||||
return buf->iterator & (PAGE_SIZE - 1);
|
||||
}
|
||||
/**
|
||||
* Return number of bytes that is left to process
|
||||
*/
|
||||
static inline int drm_buffer_unprocessed(struct drm_buffer *buf)
|
||||
{
|
||||
return buf->size - buf->iterator;
|
||||
}
|
||||
|
||||
/**
|
||||
* Advance the buffer iterator number of bytes that is given.
|
||||
*/
|
||||
static inline void drm_buffer_advance(struct drm_buffer *buf, int bytes)
|
||||
{
|
||||
buf->iterator += bytes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate the drm buffer object.
|
||||
*
|
||||
* buf: A pointer to a pointer where the object is stored.
|
||||
* size: The number of bytes to allocate.
|
||||
*/
|
||||
extern int drm_buffer_alloc(struct drm_buffer **buf, int size);
|
||||
|
||||
/**
|
||||
* Copy the user data to the begin of the buffer and reset the processing
|
||||
* iterator.
|
||||
*
|
||||
* user_data: A pointer the data that is copied to the buffer.
|
||||
* size: The Number of bytes to copy.
|
||||
*/
|
||||
extern int drm_buffer_copy_from_user(struct drm_buffer *buf,
|
||||
void __user *user_data, int size);
|
||||
|
||||
/**
|
||||
* Free the drm buffer object
|
||||
*/
|
||||
extern void drm_buffer_free(struct drm_buffer *buf);
|
||||
|
||||
/**
|
||||
* Read an object from buffer that may be split to multiple parts. If object
|
||||
* is not split function just returns the pointer to object in buffer. But in
|
||||
* case of split object data is copied to given stack object that is suplied
|
||||
* by caller.
|
||||
*
|
||||
* The processing location of the buffer is also advanced to the next byte
|
||||
* after the object.
|
||||
*
|
||||
* objsize: The size of the objet in bytes.
|
||||
* stack_obj: A pointer to a memory location where object can be copied.
|
||||
*/
|
||||
extern void *drm_buffer_read_object(struct drm_buffer *buf,
|
||||
int objsize, void *stack_obj);
|
||||
|
||||
/**
|
||||
* Returns the pointer to the dword which is offset number of elements from the
|
||||
* current processing location.
|
||||
*
|
||||
* Caller must make sure that dword is not split in the buffer. This
|
||||
* requirement is easily met if all the sizes of objects in buffer are
|
||||
* multiples of dword and PAGE_SIZE is multiple dword.
|
||||
*
|
||||
* Call to this function doesn't change the processing location.
|
||||
*
|
||||
* offset: The index of the dword relative to the internat iterator.
|
||||
*/
|
||||
static inline void *drm_buffer_pointer_to_dword(struct drm_buffer *buffer,
|
||||
int offset)
|
||||
{
|
||||
int iter = buffer->iterator + offset * 4;
|
||||
return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)];
|
||||
}
|
||||
/**
|
||||
* Returns the pointer to the dword which is offset number of elements from
|
||||
* the current processing location.
|
||||
*
|
||||
* Call to this function doesn't change the processing location.
|
||||
*
|
||||
* offset: The index of the byte relative to the internat iterator.
|
||||
*/
|
||||
static inline void *drm_buffer_pointer_to_byte(struct drm_buffer *buffer,
|
||||
int offset)
|
||||
{
|
||||
int iter = buffer->iterator + offset;
|
||||
return &buffer->data[iter / PAGE_SIZE][iter & (PAGE_SIZE - 1)];
|
||||
}
|
||||
|
||||
#endif
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -2328,101 +2328,6 @@ int r600_cs_parse(struct radeon_cs_parser *p)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DRM_RADEON_UMS
|
||||
|
||||
/**
|
||||
* cs_parser_fini() - clean parser states
|
||||
* @parser: parser structure holding parsing context.
|
||||
* @error: error number
|
||||
*
|
||||
* If error is set than unvalidate buffer, otherwise just free memory
|
||||
* used by parsing context.
|
||||
**/
|
||||
static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
kfree(parser->relocs);
|
||||
for (i = 0; i < parser->nchunks; i++)
|
||||
drm_free_large(parser->chunks[i].kdata);
|
||||
kfree(parser->chunks);
|
||||
kfree(parser->chunks_array);
|
||||
}
|
||||
|
||||
static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
|
||||
{
|
||||
if (p->chunk_relocs == NULL) {
|
||||
return 0;
|
||||
}
|
||||
p->relocs = kzalloc(sizeof(struct radeon_bo_list), GFP_KERNEL);
|
||||
if (p->relocs == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
|
||||
unsigned family, u32 *ib, int *l)
|
||||
{
|
||||
struct radeon_cs_parser parser;
|
||||
struct radeon_cs_chunk *ib_chunk;
|
||||
struct r600_cs_track *track;
|
||||
int r;
|
||||
|
||||
/* initialize tracker */
|
||||
track = kzalloc(sizeof(*track), GFP_KERNEL);
|
||||
if (track == NULL)
|
||||
return -ENOMEM;
|
||||
r600_cs_track_init(track);
|
||||
r600_cs_legacy_get_tiling_conf(dev, &track->npipes, &track->nbanks, &track->group_size);
|
||||
/* initialize parser */
|
||||
memset(&parser, 0, sizeof(struct radeon_cs_parser));
|
||||
parser.filp = filp;
|
||||
parser.dev = &dev->pdev->dev;
|
||||
parser.rdev = NULL;
|
||||
parser.family = family;
|
||||
parser.track = track;
|
||||
parser.ib.ptr = ib;
|
||||
r = radeon_cs_parser_init(&parser, data);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to initialize parser !\n");
|
||||
r600_cs_parser_fini(&parser, r);
|
||||
return r;
|
||||
}
|
||||
r = r600_cs_parser_relocs_legacy(&parser);
|
||||
if (r) {
|
||||
DRM_ERROR("Failed to parse relocation !\n");
|
||||
r600_cs_parser_fini(&parser, r);
|
||||
return r;
|
||||
}
|
||||
/* Copy the packet into the IB, the parser will read from the
|
||||
* input memory (cached) and write to the IB (which can be
|
||||
* uncached). */
|
||||
ib_chunk = parser.chunk_ib;
|
||||
parser.ib.length_dw = ib_chunk->length_dw;
|
||||
*l = parser.ib.length_dw;
|
||||
if (copy_from_user(ib, ib_chunk->user_ptr, ib_chunk->length_dw * 4)) {
|
||||
r = -EFAULT;
|
||||
r600_cs_parser_fini(&parser, r);
|
||||
return r;
|
||||
}
|
||||
r = r600_cs_parse(&parser);
|
||||
if (r) {
|
||||
DRM_ERROR("Invalid command stream !\n");
|
||||
r600_cs_parser_fini(&parser, r);
|
||||
return r;
|
||||
}
|
||||
r600_cs_parser_fini(&parser, r);
|
||||
return r;
|
||||
}
|
||||
|
||||
void r600_cs_legacy_init(void)
|
||||
{
|
||||
r600_nomm = 1;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* DMA
|
||||
*/
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -291,88 +291,6 @@ static struct pci_device_id pciidlist[] = {
|
||||
|
||||
MODULE_DEVICE_TABLE(pci, pciidlist);
|
||||
|
||||
#ifdef CONFIG_DRM_RADEON_UMS
|
||||
|
||||
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
|
||||
return 0;
|
||||
|
||||
/* Disable *all* interrupts */
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600)
|
||||
RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
|
||||
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int radeon_resume(struct drm_device *dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
|
||||
return 0;
|
||||
|
||||
/* Restore interrupt registers */
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600)
|
||||
RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
|
||||
RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static const struct file_operations radeon_driver_old_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
.mmap = drm_legacy_mmap,
|
||||
.poll = drm_poll,
|
||||
.read = drm_read,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.compat_ioctl = radeon_compat_ioctl,
|
||||
#endif
|
||||
.llseek = noop_llseek,
|
||||
};
|
||||
|
||||
static struct drm_driver driver_old = {
|
||||
.driver_features =
|
||||
DRIVER_USE_AGP | DRIVER_PCI_DMA | DRIVER_SG |
|
||||
DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
|
||||
.dev_priv_size = sizeof(drm_radeon_buf_priv_t),
|
||||
.load = radeon_driver_load,
|
||||
.firstopen = radeon_driver_firstopen,
|
||||
.open = radeon_driver_open,
|
||||
.preclose = radeon_driver_preclose,
|
||||
.postclose = radeon_driver_postclose,
|
||||
.lastclose = radeon_driver_lastclose,
|
||||
.set_busid = drm_pci_set_busid,
|
||||
.unload = radeon_driver_unload,
|
||||
.suspend = radeon_suspend,
|
||||
.resume = radeon_resume,
|
||||
.get_vblank_counter = radeon_get_vblank_counter,
|
||||
.enable_vblank = radeon_enable_vblank,
|
||||
.disable_vblank = radeon_disable_vblank,
|
||||
.master_create = radeon_master_create,
|
||||
.master_destroy = radeon_master_destroy,
|
||||
.irq_preinstall = radeon_driver_irq_preinstall,
|
||||
.irq_postinstall = radeon_driver_irq_postinstall,
|
||||
.irq_uninstall = radeon_driver_irq_uninstall,
|
||||
.irq_handler = radeon_driver_irq_handler,
|
||||
.ioctls = radeon_ioctls,
|
||||
.dma_ioctl = radeon_cp_buffers,
|
||||
.fops = &radeon_driver_old_fops,
|
||||
.name = DRIVER_NAME,
|
||||
.desc = DRIVER_DESC,
|
||||
.date = DRIVER_DATE,
|
||||
.major = DRIVER_MAJOR,
|
||||
.minor = DRIVER_MINOR,
|
||||
.patchlevel = DRIVER_PATCHLEVEL,
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
static struct drm_driver kms_driver;
|
||||
|
||||
static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
|
||||
@@ -619,13 +537,6 @@ static struct drm_driver kms_driver = {
|
||||
static struct drm_driver *driver;
|
||||
static struct pci_driver *pdriver;
|
||||
|
||||
#ifdef CONFIG_DRM_RADEON_UMS
|
||||
static struct pci_driver radeon_pci_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
};
|
||||
#endif
|
||||
|
||||
static struct pci_driver radeon_kms_pci_driver = {
|
||||
.name = DRIVER_NAME,
|
||||
.id_table = pciidlist,
|
||||
@@ -655,16 +566,8 @@ static int __init radeon_init(void)
|
||||
radeon_register_atpx_handler();
|
||||
|
||||
} else {
|
||||
#ifdef CONFIG_DRM_RADEON_UMS
|
||||
DRM_INFO("radeon userspace modesetting enabled.\n");
|
||||
driver = &driver_old;
|
||||
pdriver = &radeon_pci_driver;
|
||||
driver->driver_features &= ~DRIVER_MODESET;
|
||||
driver->num_ioctls = radeon_max_ioctl;
|
||||
#else
|
||||
DRM_ERROR("No UMS support in radeon module!\n");
|
||||
return -EINVAL;
|
||||
#endif
|
||||
}
|
||||
|
||||
radeon_kfd_init();
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,402 +0,0 @@
|
||||
/* radeon_irq.c -- IRQ handling for radeon -*- linux-c -*- */
|
||||
/*
|
||||
* Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
|
||||
*
|
||||
* The Weather Channel (TM) funded Tungsten Graphics to develop the
|
||||
* initial release of the Radeon 8500 driver under the XFree86 license.
|
||||
* This notice must be preserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Keith Whitwell <keith@tungstengraphics.com>
|
||||
* Michel D<>zer <michel@daenzer.net>
|
||||
*
|
||||
* ------------------------ This file is DEPRECATED! -------------------------
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/radeon_drm.h>
|
||||
#include "radeon_drv.h"
|
||||
|
||||
void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
if (state)
|
||||
dev_priv->irq_enable_reg |= mask;
|
||||
else
|
||||
dev_priv->irq_enable_reg &= ~mask;
|
||||
|
||||
if (dev->irq_enabled)
|
||||
RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
|
||||
}
|
||||
|
||||
static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
if (state)
|
||||
dev_priv->r500_disp_irq_reg |= mask;
|
||||
else
|
||||
dev_priv->r500_disp_irq_reg &= ~mask;
|
||||
|
||||
if (dev->irq_enabled)
|
||||
RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
|
||||
}
|
||||
|
||||
int radeon_enable_vblank(struct drm_device *dev, unsigned int pipe)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
|
||||
switch (pipe) {
|
||||
case 0:
|
||||
r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 1);
|
||||
break;
|
||||
case 1:
|
||||
r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 1);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("tried to enable vblank on non-existent crtc %u\n",
|
||||
pipe);
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
switch (pipe) {
|
||||
case 0:
|
||||
radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1);
|
||||
break;
|
||||
case 1:
|
||||
radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("tried to enable vblank on non-existent crtc %u\n",
|
||||
pipe);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_disable_vblank(struct drm_device *dev, unsigned int pipe)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
|
||||
switch (pipe) {
|
||||
case 0:
|
||||
r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 0);
|
||||
break;
|
||||
case 1:
|
||||
r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 0);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("tried to enable vblank on non-existent crtc %u\n",
|
||||
pipe);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
switch (pipe) {
|
||||
case 0:
|
||||
radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0);
|
||||
break;
|
||||
case 1:
|
||||
radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("tried to enable vblank on non-existent crtc %u\n",
|
||||
pipe);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static u32 radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv, u32 *r500_disp_int)
|
||||
{
|
||||
u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS);
|
||||
u32 irq_mask = RADEON_SW_INT_TEST;
|
||||
|
||||
*r500_disp_int = 0;
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
|
||||
/* vbl interrupts in a different place */
|
||||
|
||||
if (irqs & R500_DISPLAY_INT_STATUS) {
|
||||
/* if a display interrupt */
|
||||
u32 disp_irq;
|
||||
|
||||
disp_irq = RADEON_READ(R500_DISP_INTERRUPT_STATUS);
|
||||
|
||||
*r500_disp_int = disp_irq;
|
||||
if (disp_irq & R500_D1_VBLANK_INTERRUPT)
|
||||
RADEON_WRITE(R500_D1MODE_VBLANK_STATUS, R500_VBLANK_ACK);
|
||||
if (disp_irq & R500_D2_VBLANK_INTERRUPT)
|
||||
RADEON_WRITE(R500_D2MODE_VBLANK_STATUS, R500_VBLANK_ACK);
|
||||
}
|
||||
irq_mask |= R500_DISPLAY_INT_STATUS;
|
||||
} else
|
||||
irq_mask |= RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT;
|
||||
|
||||
irqs &= irq_mask;
|
||||
|
||||
if (irqs)
|
||||
RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs);
|
||||
|
||||
return irqs;
|
||||
}
|
||||
|
||||
/* Interrupts - Used for device synchronization and flushing in the
|
||||
* following circumstances:
|
||||
*
|
||||
* - Exclusive FB access with hw idle:
|
||||
* - Wait for GUI Idle (?) interrupt, then do normal flush.
|
||||
*
|
||||
* - Frame throttling, NV_fence:
|
||||
* - Drop marker irq's into command stream ahead of time.
|
||||
* - Wait on irq's with lock *not held*
|
||||
* - Check each for termination condition
|
||||
*
|
||||
* - Internally in cp_getbuffer, etc:
|
||||
* - as above, but wait with lock held???
|
||||
*
|
||||
* NOTE: These functions are misleadingly named -- the irq's aren't
|
||||
* tied to dma at all, this is just a hangover from dri prehistory.
|
||||
*/
|
||||
|
||||
irqreturn_t radeon_driver_irq_handler(int irq, void *arg)
|
||||
{
|
||||
struct drm_device *dev = (struct drm_device *) arg;
|
||||
drm_radeon_private_t *dev_priv =
|
||||
(drm_radeon_private_t *) dev->dev_private;
|
||||
u32 stat;
|
||||
u32 r500_disp_int;
|
||||
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
|
||||
return IRQ_NONE;
|
||||
|
||||
/* Only consider the bits we're interested in - others could be used
|
||||
* outside the DRM
|
||||
*/
|
||||
stat = radeon_acknowledge_irqs(dev_priv, &r500_disp_int);
|
||||
if (!stat)
|
||||
return IRQ_NONE;
|
||||
|
||||
stat &= dev_priv->irq_enable_reg;
|
||||
|
||||
/* SW interrupt */
|
||||
if (stat & RADEON_SW_INT_TEST)
|
||||
wake_up(&dev_priv->swi_queue);
|
||||
|
||||
/* VBLANK interrupt */
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
|
||||
if (r500_disp_int & R500_D1_VBLANK_INTERRUPT)
|
||||
drm_handle_vblank(dev, 0);
|
||||
if (r500_disp_int & R500_D2_VBLANK_INTERRUPT)
|
||||
drm_handle_vblank(dev, 1);
|
||||
} else {
|
||||
if (stat & RADEON_CRTC_VBLANK_STAT)
|
||||
drm_handle_vblank(dev, 0);
|
||||
if (stat & RADEON_CRTC2_VBLANK_STAT)
|
||||
drm_handle_vblank(dev, 1);
|
||||
}
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
static int radeon_emit_irq(struct drm_device * dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
unsigned int ret;
|
||||
RING_LOCALS;
|
||||
|
||||
atomic_inc(&dev_priv->swi_emitted);
|
||||
ret = atomic_read(&dev_priv->swi_emitted);
|
||||
|
||||
BEGIN_RING(4);
|
||||
OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
|
||||
OUT_RING_REG(RADEON_GEN_INT_STATUS, RADEON_SW_INT_FIRE);
|
||||
ADVANCE_RING();
|
||||
COMMIT_RING();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv =
|
||||
(drm_radeon_private_t *) dev->dev_private;
|
||||
int ret = 0;
|
||||
|
||||
if (RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr)
|
||||
return 0;
|
||||
|
||||
dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
|
||||
|
||||
DRM_WAIT_ON(ret, dev_priv->swi_queue, 3 * HZ,
|
||||
RADEON_READ(RADEON_LAST_SWI_REG) >= swi_nr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
u32 radeon_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("called with no initialization\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (pipe > 1) {
|
||||
DRM_ERROR("Invalid crtc %u\n", pipe);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600) {
|
||||
if (pipe == 0)
|
||||
return RADEON_READ(R500_D1CRTC_FRAME_COUNT);
|
||||
else
|
||||
return RADEON_READ(R500_D2CRTC_FRAME_COUNT);
|
||||
} else {
|
||||
if (pipe == 0)
|
||||
return RADEON_READ(RADEON_CRTC_CRNT_FRAME);
|
||||
else
|
||||
return RADEON_READ(RADEON_CRTC2_CRNT_FRAME);
|
||||
}
|
||||
}
|
||||
|
||||
/* Needs the lock as it touches the ring.
|
||||
*/
|
||||
int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
drm_radeon_irq_emit_t *emit = data;
|
||||
int result;
|
||||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("called with no initialization\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
|
||||
return -EINVAL;
|
||||
|
||||
LOCK_TEST_WITH_RETURN(dev, file_priv);
|
||||
|
||||
result = radeon_emit_irq(dev);
|
||||
|
||||
if (copy_to_user(emit->irq_seq, &result, sizeof(int))) {
|
||||
DRM_ERROR("copy_to_user\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Doesn't need the hardware lock.
|
||||
*/
|
||||
int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
drm_radeon_irq_wait_t *irqwait = data;
|
||||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("called with no initialization\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
|
||||
return -EINVAL;
|
||||
|
||||
return radeon_wait_irq(dev, irqwait->irq_seq);
|
||||
}
|
||||
|
||||
/* drm_dma.h hooks
|
||||
*/
|
||||
void radeon_driver_irq_preinstall(struct drm_device * dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv =
|
||||
(drm_radeon_private_t *) dev->dev_private;
|
||||
u32 dummy;
|
||||
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
|
||||
return;
|
||||
|
||||
/* Disable *all* interrupts */
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600)
|
||||
RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
|
||||
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
|
||||
|
||||
/* Clear bits if they're already high */
|
||||
radeon_acknowledge_irqs(dev_priv, &dummy);
|
||||
}
|
||||
|
||||
int radeon_driver_irq_postinstall(struct drm_device *dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv =
|
||||
(drm_radeon_private_t *) dev->dev_private;
|
||||
|
||||
atomic_set(&dev_priv->swi_emitted, 0);
|
||||
init_waitqueue_head(&dev_priv->swi_queue);
|
||||
|
||||
dev->max_vblank_count = 0x001fffff;
|
||||
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
|
||||
return 0;
|
||||
|
||||
radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void radeon_driver_irq_uninstall(struct drm_device * dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv =
|
||||
(drm_radeon_private_t *) dev->dev_private;
|
||||
if (!dev_priv)
|
||||
return;
|
||||
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R600)
|
||||
return;
|
||||
|
||||
if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS600)
|
||||
RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
|
||||
/* Disable *all* interrupts */
|
||||
RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
|
||||
}
|
||||
|
||||
|
||||
int radeon_vblank_crtc_get(struct drm_device *dev)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
|
||||
|
||||
return dev_priv->vblank_crtc;
|
||||
}
|
||||
|
||||
int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
|
||||
if (value & ~(DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
|
||||
DRM_ERROR("called with invalid crtc 0x%x\n", (unsigned int)value);
|
||||
return -EINVAL;
|
||||
}
|
||||
dev_priv->vblank_crtc = (unsigned int)value;
|
||||
return 0;
|
||||
}
|
||||
@@ -1,302 +0,0 @@
|
||||
/* radeon_mem.c -- Simple GART/fb memory manager for radeon -*- linux-c -*- */
|
||||
/*
|
||||
* Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
|
||||
*
|
||||
* The Weather Channel (TM) funded Tungsten Graphics to develop the
|
||||
* initial release of the Radeon 8500 driver under the XFree86 license.
|
||||
* This notice must be preserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors:
|
||||
* Keith Whitwell <keith@tungstengraphics.com>
|
||||
*
|
||||
* ------------------------ This file is DEPRECATED! -------------------------
|
||||
*/
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/radeon_drm.h>
|
||||
#include "radeon_drv.h"
|
||||
|
||||
/* Very simple allocator for GART memory, working on a static range
|
||||
* already mapped into each client's address space.
|
||||
*/
|
||||
|
||||
static struct mem_block *split_block(struct mem_block *p, int start, int size,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
/* Maybe cut off the start of an existing block */
|
||||
if (start > p->start) {
|
||||
struct mem_block *newblock = kmalloc(sizeof(*newblock),
|
||||
GFP_KERNEL);
|
||||
if (!newblock)
|
||||
goto out;
|
||||
newblock->start = start;
|
||||
newblock->size = p->size - (start - p->start);
|
||||
newblock->file_priv = NULL;
|
||||
newblock->next = p->next;
|
||||
newblock->prev = p;
|
||||
p->next->prev = newblock;
|
||||
p->next = newblock;
|
||||
p->size -= newblock->size;
|
||||
p = newblock;
|
||||
}
|
||||
|
||||
/* Maybe cut off the end of an existing block */
|
||||
if (size < p->size) {
|
||||
struct mem_block *newblock = kmalloc(sizeof(*newblock),
|
||||
GFP_KERNEL);
|
||||
if (!newblock)
|
||||
goto out;
|
||||
newblock->start = start + size;
|
||||
newblock->size = p->size - size;
|
||||
newblock->file_priv = NULL;
|
||||
newblock->next = p->next;
|
||||
newblock->prev = p;
|
||||
p->next->prev = newblock;
|
||||
p->next = newblock;
|
||||
p->size = size;
|
||||
}
|
||||
|
||||
out:
|
||||
/* Our block is in the middle */
|
||||
p->file_priv = file_priv;
|
||||
return p;
|
||||
}
|
||||
|
||||
static struct mem_block *alloc_block(struct mem_block *heap, int size,
|
||||
int align2, struct drm_file *file_priv)
|
||||
{
|
||||
struct mem_block *p;
|
||||
int mask = (1 << align2) - 1;
|
||||
|
||||
list_for_each(p, heap) {
|
||||
int start = (p->start + mask) & ~mask;
|
||||
if (p->file_priv == NULL && start + size <= p->start + p->size)
|
||||
return split_block(p, start, size, file_priv);
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static struct mem_block *find_block(struct mem_block *heap, int start)
|
||||
{
|
||||
struct mem_block *p;
|
||||
|
||||
list_for_each(p, heap)
|
||||
if (p->start == start)
|
||||
return p;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void free_block(struct mem_block *p)
|
||||
{
|
||||
p->file_priv = NULL;
|
||||
|
||||
/* Assumes a single contiguous range. Needs a special file_priv in
|
||||
* 'heap' to stop it being subsumed.
|
||||
*/
|
||||
if (p->next->file_priv == NULL) {
|
||||
struct mem_block *q = p->next;
|
||||
p->size += q->size;
|
||||
p->next = q->next;
|
||||
p->next->prev = p;
|
||||
kfree(q);
|
||||
}
|
||||
|
||||
if (p->prev->file_priv == NULL) {
|
||||
struct mem_block *q = p->prev;
|
||||
q->size += p->size;
|
||||
q->next = p->next;
|
||||
q->next->prev = q;
|
||||
kfree(p);
|
||||
}
|
||||
}
|
||||
|
||||
/* Initialize. How to check for an uninitialized heap?
|
||||
*/
|
||||
static int init_heap(struct mem_block **heap, int start, int size)
|
||||
{
|
||||
struct mem_block *blocks = kmalloc(sizeof(*blocks), GFP_KERNEL);
|
||||
|
||||
if (!blocks)
|
||||
return -ENOMEM;
|
||||
|
||||
*heap = kzalloc(sizeof(**heap), GFP_KERNEL);
|
||||
if (!*heap) {
|
||||
kfree(blocks);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
blocks->start = start;
|
||||
blocks->size = size;
|
||||
blocks->file_priv = NULL;
|
||||
blocks->next = blocks->prev = *heap;
|
||||
|
||||
(*heap)->file_priv = (struct drm_file *) - 1;
|
||||
(*heap)->next = (*heap)->prev = blocks;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Free all blocks associated with the releasing file.
|
||||
*/
|
||||
void radeon_mem_release(struct drm_file *file_priv, struct mem_block *heap)
|
||||
{
|
||||
struct mem_block *p;
|
||||
|
||||
if (!heap || !heap->next)
|
||||
return;
|
||||
|
||||
list_for_each(p, heap) {
|
||||
if (p->file_priv == file_priv)
|
||||
p->file_priv = NULL;
|
||||
}
|
||||
|
||||
/* Assumes a single contiguous range. Needs a special file_priv in
|
||||
* 'heap' to stop it being subsumed.
|
||||
*/
|
||||
list_for_each(p, heap) {
|
||||
while (p->file_priv == NULL && p->next->file_priv == NULL) {
|
||||
struct mem_block *q = p->next;
|
||||
p->size += q->size;
|
||||
p->next = q->next;
|
||||
p->next->prev = p;
|
||||
kfree(q);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Shutdown.
|
||||
*/
|
||||
void radeon_mem_takedown(struct mem_block **heap)
|
||||
{
|
||||
struct mem_block *p;
|
||||
|
||||
if (!*heap)
|
||||
return;
|
||||
|
||||
for (p = (*heap)->next; p != *heap;) {
|
||||
struct mem_block *q = p;
|
||||
p = p->next;
|
||||
kfree(q);
|
||||
}
|
||||
|
||||
kfree(*heap);
|
||||
*heap = NULL;
|
||||
}
|
||||
|
||||
/* IOCTL HANDLERS */
|
||||
|
||||
static struct mem_block **get_heap(drm_radeon_private_t * dev_priv, int region)
|
||||
{
|
||||
switch (region) {
|
||||
case RADEON_MEM_REGION_GART:
|
||||
return &dev_priv->gart_heap;
|
||||
case RADEON_MEM_REGION_FB:
|
||||
return &dev_priv->fb_heap;
|
||||
default:
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_mem_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
drm_radeon_mem_alloc_t *alloc = data;
|
||||
struct mem_block *block, **heap;
|
||||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("called with no initialization\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
heap = get_heap(dev_priv, alloc->region);
|
||||
if (!heap || !*heap)
|
||||
return -EFAULT;
|
||||
|
||||
/* Make things easier on ourselves: all allocations at least
|
||||
* 4k aligned.
|
||||
*/
|
||||
if (alloc->alignment < 12)
|
||||
alloc->alignment = 12;
|
||||
|
||||
block = alloc_block(*heap, alloc->size, alloc->alignment, file_priv);
|
||||
|
||||
if (!block)
|
||||
return -ENOMEM;
|
||||
|
||||
if (copy_to_user(alloc->region_offset, &block->start,
|
||||
sizeof(int))) {
|
||||
DRM_ERROR("copy_to_user\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_mem_free(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
drm_radeon_mem_free_t *memfree = data;
|
||||
struct mem_block *block, **heap;
|
||||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("called with no initialization\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
heap = get_heap(dev_priv, memfree->region);
|
||||
if (!heap || !*heap)
|
||||
return -EFAULT;
|
||||
|
||||
block = find_block(*heap, memfree->region_offset);
|
||||
if (!block)
|
||||
return -EFAULT;
|
||||
|
||||
if (block->file_priv != file_priv)
|
||||
return -EPERM;
|
||||
|
||||
free_block(block);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int radeon_mem_init_heap(struct drm_device *dev, void *data, struct drm_file *file_priv)
|
||||
{
|
||||
drm_radeon_private_t *dev_priv = dev->dev_private;
|
||||
drm_radeon_mem_init_heap_t *initheap = data;
|
||||
struct mem_block **heap;
|
||||
|
||||
if (!dev_priv) {
|
||||
DRM_ERROR("called with no initialization\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
heap = get_heap(dev_priv, initheap->region);
|
||||
if (!heap)
|
||||
return -EFAULT;
|
||||
|
||||
if (*heap) {
|
||||
DRM_ERROR("heap already initialized?");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return init_heap(heap, initheap->start, initheap->size);
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user