drm: add new QXL driver. (v1.4)

QXL is a paravirtual graphics device used by the Spice virtual desktop
interface.

The drivers uses GEM and TTM to manage memory, the qxl hw fencing however
is quite different than normal TTM expects, we have to keep track of a number
of non-linear fence ids per bo that we need to have released by the hardware.

The releases are freed from a workqueue that wakes up and processes the
release ring.

releases are suballocated from a BO, there are 3 release categories, drawables,
surfaces and cursor cmds. The hw also has 3 rings for commands, cursor and release handling.

The hardware also have a surface id tracking mechnaism and the driver encapsulates it completely inside the kernel, userspace never sees the actual hw surface
ids.

This requires a newer version of the QXL userspace driver, so shouldn't be
enabled until that has been placed into your distro of choice.

Authors: Dave Airlie, Alon Levy

v1.1: fixup some issues in the ioctl interface with padding
v1.2: add module device table
v1.3: fix nomodeset, fbcon leak, dumb bo create, release ring irq,
      don't try flush release ring (broken hw), fix -modesetting.
v1.4: fbcon cpu usage reduction + suitable accel flags.

Signed-off-by: Alon Levy <alevy@redhat.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
Dave Airlie
2013-02-25 14:47:55 +10:00
committed by Dave Airlie
parent afe6804c04
commit f64122c1f6
25 changed files with 7260 additions and 0 deletions

View File

@@ -220,3 +220,5 @@ source "drivers/gpu/drm/tegra/Kconfig"
source "drivers/gpu/drm/omapdrm/Kconfig"
source "drivers/gpu/drm/tilcdc/Kconfig"
source "drivers/gpu/drm/qxl/Kconfig"

View File

@@ -52,4 +52,5 @@ obj-$(CONFIG_DRM_SHMOBILE) +=shmobile/
obj-$(CONFIG_DRM_TEGRA) += tegra/
obj-$(CONFIG_DRM_OMAP) += omapdrm/
obj-$(CONFIG_DRM_TILCDC) += tilcdc/
obj-$(CONFIG_DRM_QXL) += qxl/
obj-y += i2c/

View File

@@ -0,0 +1,10 @@
config DRM_QXL
tristate "QXL virtual GPU"
depends on DRM && PCI
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
select DRM_KMS_HELPER
select DRM_TTM
help
QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.

View File

@@ -0,0 +1,9 @@
#
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
ccflags-y := -Iinclude/drm
qxl-y := qxl_drv.o qxl_kms.o qxl_display.o qxl_ttm.o qxl_fb.o qxl_object.o qxl_gem.o qxl_cmd.o qxl_image.o qxl_draw.o qxl_debugfs.o qxl_irq.o qxl_dumb.o qxl_ioctl.o qxl_fence.o qxl_release.o
obj-$(CONFIG_DRM_QXL)+= qxl.o

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,135 @@
/*
* Copyright (C) 2009 Red Hat <bskeggs@redhat.com>
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
/*
* Authors:
* Alon Levy <alevy@redhat.com>
*/
#include <linux/debugfs.h>
#include "drmP.h"
#include "qxl_drv.h"
#include "qxl_object.h"
static int
qxl_debugfs_irq_received(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct qxl_device *qdev = node->minor->dev->dev_private;
seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
seq_printf(m, "%d\n", qdev->irq_received_error);
return 0;
}
static int
qxl_debugfs_buffers_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct qxl_device *qdev = node->minor->dev->dev_private;
struct qxl_bo *bo;
list_for_each_entry(bo, &qdev->gem.objects, list) {
seq_printf(m, "size %ld, pc %d, sync obj %p, num releases %d\n",
(unsigned long)bo->gem_base.size, bo->pin_count,
bo->tbo.sync_obj, bo->fence.num_active_releases);
}
return 0;
}
static struct drm_info_list qxl_debugfs_list[] = {
{ "irq_received", qxl_debugfs_irq_received, 0, NULL },
{ "qxl_buffers", qxl_debugfs_buffers_info, 0, NULL },
};
#define QXL_DEBUGFS_ENTRIES ARRAY_SIZE(qxl_debugfs_list)
int
qxl_debugfs_init(struct drm_minor *minor)
{
drm_debugfs_create_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
return 0;
}
void
qxl_debugfs_takedown(struct drm_minor *minor)
{
drm_debugfs_remove_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES,
minor);
}
int qxl_debugfs_add_files(struct qxl_device *qdev,
struct drm_info_list *files,
unsigned nfiles)
{
unsigned i;
for (i = 0; i < qdev->debugfs_count; i++) {
if (qdev->debugfs[i].files == files) {
/* Already registered */
return 0;
}
}
i = qdev->debugfs_count + 1;
if (i > QXL_DEBUGFS_MAX_COMPONENTS) {
DRM_ERROR("Reached maximum number of debugfs components.\n");
DRM_ERROR("Report so we increase QXL_DEBUGFS_MAX_COMPONENTS.\n");
return -EINVAL;
}
qdev->debugfs[qdev->debugfs_count].files = files;
qdev->debugfs[qdev->debugfs_count].num_files = nfiles;
qdev->debugfs_count = i;
#if defined(CONFIG_DEBUG_FS)
drm_debugfs_create_files(files, nfiles,
qdev->ddev->control->debugfs_root,
qdev->ddev->control);
drm_debugfs_create_files(files, nfiles,
qdev->ddev->primary->debugfs_root,
qdev->ddev->primary);
#endif
return 0;
}
void qxl_debugfs_remove_files(struct qxl_device *qdev)
{
#if defined(CONFIG_DEBUG_FS)
unsigned i;
for (i = 0; i < qdev->debugfs_count; i++) {
drm_debugfs_remove_files(qdev->debugfs[i].files,
qdev->debugfs[i].num_files,
qdev->ddev->control);
drm_debugfs_remove_files(qdev->debugfs[i].files,
qdev->debugfs[i].num_files,
qdev->ddev->primary);
}
#endif
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,390 @@
/*
* Copyright 2011 Red Hat, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* on the rights to use, copy, modify, merge, publish, distribute, sub
* license, and/or sell copies of the Software, and to permit persons to whom
* the Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include "qxl_drv.h"
#include "qxl_object.h"
/* returns a pointer to the already allocated qxl_rect array inside
* the qxl_clip_rects. This is *not* the same as the memory allocated
* on the device, it is offset to qxl_clip_rects.chunk.data */
static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev,
struct qxl_drawable *drawable,
unsigned num_clips,
struct qxl_bo **clips_bo,
struct qxl_release *release)
{
struct qxl_clip_rects *dev_clips;
int ret;
int size = sizeof(*dev_clips) + sizeof(struct qxl_rect) * num_clips;
ret = qxl_alloc_bo_reserved(qdev, size, clips_bo);
if (ret)
return NULL;
ret = qxl_bo_kmap(*clips_bo, (void **)&dev_clips);
if (ret) {
qxl_bo_unref(clips_bo);
return NULL;
}
dev_clips->num_rects = num_clips;
dev_clips->chunk.next_chunk = 0;
dev_clips->chunk.prev_chunk = 0;
dev_clips->chunk.data_size = sizeof(struct qxl_rect) * num_clips;
return (struct qxl_rect *)dev_clips->chunk.data;
}
static int
make_drawable(struct qxl_device *qdev, int surface, uint8_t type,
const struct qxl_rect *rect,
struct qxl_release **release)
{
struct qxl_drawable *drawable;
int i, ret;
ret = qxl_alloc_release_reserved(qdev, sizeof(*drawable),
QXL_RELEASE_DRAWABLE, release,
NULL);
if (ret)
return ret;
drawable = (struct qxl_drawable *)qxl_release_map(qdev, *release);
drawable->type = type;
drawable->surface_id = surface; /* Only primary for now */
drawable->effect = QXL_EFFECT_OPAQUE;
drawable->self_bitmap = 0;
drawable->self_bitmap_area.top = 0;
drawable->self_bitmap_area.left = 0;
drawable->self_bitmap_area.bottom = 0;
drawable->self_bitmap_area.right = 0;
/* FIXME: add clipping */
drawable->clip.type = SPICE_CLIP_TYPE_NONE;
/*
* surfaces_dest[i] should apparently be filled out with the
* surfaces that we depend on, and surface_rects should be
* filled with the rectangles of those surfaces that we
* are going to use.
*/
for (i = 0; i < 3; ++i)
drawable->surfaces_dest[i] = -1;
if (rect)
drawable->bbox = *rect;
drawable->mm_time = qdev->rom->mm_clock;
qxl_release_unmap(qdev, *release, &drawable->release_info);
return 0;
}
static int qxl_palette_create_1bit(struct qxl_bo **palette_bo,
const struct qxl_fb_image *qxl_fb_image)
{
struct qxl_device *qdev = qxl_fb_image->qdev;
const struct fb_image *fb_image = &qxl_fb_image->fb_image;
uint32_t visual = qxl_fb_image->visual;
const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette;
struct qxl_palette *pal;
int ret;
uint32_t fgcolor, bgcolor;
static uint64_t unique; /* we make no attempt to actually set this
* correctly globaly, since that would require
* tracking all of our palettes. */
ret = qxl_alloc_bo_reserved(qdev,
sizeof(struct qxl_palette) + sizeof(uint32_t) * 2,
palette_bo);
ret = qxl_bo_kmap(*palette_bo, (void **)&pal);
pal->num_ents = 2;
pal->unique = unique++;
if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) {
/* NB: this is the only used branch currently. */
fgcolor = pseudo_palette[fb_image->fg_color];
bgcolor = pseudo_palette[fb_image->bg_color];
} else {
fgcolor = fb_image->fg_color;
bgcolor = fb_image->bg_color;
}
pal->ents[0] = bgcolor;
pal->ents[1] = fgcolor;
qxl_bo_kunmap(*palette_bo);
return 0;
}
void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image,
int stride /* filled in if 0 */)
{
struct qxl_device *qdev = qxl_fb_image->qdev;
struct qxl_drawable *drawable;
struct qxl_rect rect;
const struct fb_image *fb_image = &qxl_fb_image->fb_image;
int x = fb_image->dx;
int y = fb_image->dy;
int width = fb_image->width;
int height = fb_image->height;
const char *src = fb_image->data;
int depth = fb_image->depth;
struct qxl_release *release;
struct qxl_bo *image_bo;
struct qxl_image *image;
int ret;
if (stride == 0)
stride = depth * width / 8;
rect.left = x;
rect.right = x + width;
rect.top = y;
rect.bottom = y + height;
ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, &release);
if (ret)
return;
ret = qxl_image_create(qdev, release, &image_bo,
(const uint8_t *)src, 0, 0,
width, height, depth, stride);
if (ret) {
qxl_release_unreserve(qdev, release);
qxl_release_free(qdev, release);
return;
}
if (depth == 1) {
struct qxl_bo *palette_bo;
void *ptr;
ret = qxl_palette_create_1bit(&palette_bo, qxl_fb_image);
qxl_release_add_res(qdev, release, palette_bo);
ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0);
image = ptr;
image->u.bitmap.palette =
qxl_bo_physical_address(qdev, palette_bo, 0);
qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
qxl_bo_unreserve(palette_bo);
qxl_bo_unref(&palette_bo);
}
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
drawable->u.copy.src_area.top = 0;
drawable->u.copy.src_area.bottom = height;
drawable->u.copy.src_area.left = 0;
drawable->u.copy.src_area.right = width;
drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT;
drawable->u.copy.scale_mode = 0;
drawable->u.copy.mask.flags = 0;
drawable->u.copy.mask.pos.x = 0;
drawable->u.copy.mask.pos.y = 0;
drawable->u.copy.mask.bitmap = 0;
drawable->u.copy.src_bitmap =
qxl_bo_physical_address(qdev, image_bo, 0);
qxl_release_unmap(qdev, release, &drawable->release_info);
qxl_release_add_res(qdev, release, image_bo);
qxl_bo_unreserve(image_bo);
qxl_bo_unref(&image_bo);
qxl_fence_releaseable(qdev, release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
qxl_release_unreserve(qdev, release);
}
/* push a draw command using the given clipping rectangles as
* the sources from the shadow framebuffer.
*
* Right now implementing with a single draw and a clip list. Clip
* lists are known to be a problem performance wise, this can be solved
* by treating them differently in the server.
*/
void qxl_draw_dirty_fb(struct qxl_device *qdev,
struct qxl_framebuffer *qxl_fb,
struct qxl_bo *bo,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips, int inc)
{
/*
* TODO: if flags & DRM_MODE_FB_DIRTY_ANNOTATE_FILL then we should
* send a fill command instead, much cheaper.
*
* See include/drm/drm_mode.h
*/
struct drm_clip_rect *clips_ptr;
int i;
int left, right, top, bottom;
int width, height;
struct qxl_drawable *drawable;
struct qxl_rect drawable_rect;
struct qxl_rect *rects;
int stride = qxl_fb->base.pitches[0];
/* depth is not actually interesting, we don't mask with it */
int depth = qxl_fb->base.bits_per_pixel;
uint8_t *surface_base;
struct qxl_release *release;
struct qxl_bo *image_bo;
struct qxl_bo *clips_bo;
int ret;
left = clips->x1;
right = clips->x2;
top = clips->y1;
bottom = clips->y2;
/* skip the first clip rect */
for (i = 1, clips_ptr = clips + inc;
i < num_clips; i++, clips_ptr += inc) {
left = min_t(int, left, (int)clips_ptr->x1);
right = max_t(int, right, (int)clips_ptr->x2);
top = min_t(int, top, (int)clips_ptr->y1);
bottom = max_t(int, bottom, (int)clips_ptr->y2);
}
width = right - left;
height = bottom - top;
drawable_rect.left = left;
drawable_rect.right = right;
drawable_rect.top = top;
drawable_rect.bottom = bottom;
ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect,
&release);
if (ret)
return;
ret = qxl_bo_kmap(bo, (void **)&surface_base);
if (ret)
goto out_unref;
ret = qxl_image_create(qdev, release, &image_bo, surface_base,
left, top, width, height, depth, stride);
qxl_bo_kunmap(bo);
if (ret)
goto out_unref;
rects = drawable_set_clipping(qdev, drawable, num_clips, &clips_bo, release);
if (!rects) {
qxl_bo_unref(&image_bo);
goto out_unref;
}
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
drawable->clip.type = SPICE_CLIP_TYPE_RECTS;
drawable->clip.data = qxl_bo_physical_address(qdev,
clips_bo, 0);
qxl_release_add_res(qdev, release, clips_bo);
drawable->u.copy.src_area.top = 0;
drawable->u.copy.src_area.bottom = height;
drawable->u.copy.src_area.left = 0;
drawable->u.copy.src_area.right = width;
drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT;
drawable->u.copy.scale_mode = 0;
drawable->u.copy.mask.flags = 0;
drawable->u.copy.mask.pos.x = 0;
drawable->u.copy.mask.pos.y = 0;
drawable->u.copy.mask.bitmap = 0;
drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, image_bo, 0);
qxl_release_unmap(qdev, release, &drawable->release_info);
qxl_release_add_res(qdev, release, image_bo);
qxl_bo_unreserve(image_bo);
qxl_bo_unref(&image_bo);
clips_ptr = clips;
for (i = 0; i < num_clips; i++, clips_ptr += inc) {
rects[i].left = clips_ptr->x1;
rects[i].right = clips_ptr->x2;
rects[i].top = clips_ptr->y1;
rects[i].bottom = clips_ptr->y2;
}
qxl_bo_kunmap(clips_bo);
qxl_bo_unreserve(clips_bo);
qxl_bo_unref(&clips_bo);
qxl_fence_releaseable(qdev, release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
qxl_release_unreserve(qdev, release);
return;
out_unref:
qxl_release_unreserve(qdev, release);
qxl_release_free(qdev, release);
}
void qxl_draw_copyarea(struct qxl_device *qdev,
u32 width, u32 height,
u32 sx, u32 sy,
u32 dx, u32 dy)
{
struct qxl_drawable *drawable;
struct qxl_rect rect;
struct qxl_release *release;
int ret;
rect.left = dx;
rect.top = dy;
rect.right = dx + width;
rect.bottom = dy + height;
ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, &release);
if (ret)
return;
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
drawable->u.copy_bits.src_pos.x = sx;
drawable->u.copy_bits.src_pos.y = sy;
qxl_release_unmap(qdev, release, &drawable->release_info);
qxl_fence_releaseable(qdev, release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
qxl_release_unreserve(qdev, release);
}
void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec)
{
struct qxl_device *qdev = qxl_draw_fill_rec->qdev;
struct qxl_rect rect = qxl_draw_fill_rec->rect;
uint32_t color = qxl_draw_fill_rec->color;
uint16_t rop = qxl_draw_fill_rec->rop;
struct qxl_drawable *drawable;
struct qxl_release *release;
int ret;
ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, &release);
if (ret)
return;
drawable = (struct qxl_drawable *)qxl_release_map(qdev, release);
drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID;
drawable->u.fill.brush.u.color = color;
drawable->u.fill.rop_descriptor = rop;
drawable->u.fill.mask.flags = 0;
drawable->u.fill.mask.pos.x = 0;
drawable->u.fill.mask.pos.y = 0;
drawable->u.fill.mask.bitmap = 0;
qxl_release_unmap(qdev, release, &drawable->release_info);
qxl_fence_releaseable(qdev, release);
qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false);
qxl_release_unreserve(qdev, release);
}

View File

@@ -0,0 +1,145 @@
/* vim: set ts=8 sw=8 tw=78 ai noexpandtab */
/* qxl_drv.c -- QXL driver -*- linux-c -*-
*
* Copyright 2011 Red Hat, Inc.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Dave Airlie <airlie@redhat.com>
* Alon Levy <alevy@redhat.com>
*/
#include <linux/module.h>
#include <linux/console.h>
#include "drmP.h"
#include "drm/drm.h"
#include "qxl_drv.h"
extern int qxl_max_ioctls;
static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
{ 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8,
0xffff00, 0 },
{ 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_OTHER << 8,
0xffff00, 0 },
{ 0, 0, 0 },
};
MODULE_DEVICE_TABLE(pci, pciidlist);
int qxl_modeset = -1;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, qxl_modeset, int, 0400);
static struct drm_driver qxl_driver;
static struct pci_driver qxl_pci_driver;
static int
qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
if (pdev->revision < 4) {
DRM_ERROR("qxl too old, doesn't support client_monitors_config,"
" use xf86-video-qxl in user mode");
return -EINVAL; /* TODO: ENODEV ? */
}
return drm_get_pci_dev(pdev, ent, &qxl_driver);
}
static void
qxl_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
drm_put_dev(dev);
}
static struct pci_driver qxl_pci_driver = {
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = qxl_pci_probe,
.remove = qxl_pci_remove,
};
static const struct file_operations qxl_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.poll = drm_poll,
.fasync = drm_fasync,
.mmap = qxl_mmap,
};
static struct drm_driver qxl_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET |
DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.dev_priv_size = 0,
.load = qxl_driver_load,
.unload = qxl_driver_unload,
.dumb_create = qxl_mode_dumb_create,
.dumb_map_offset = qxl_mode_dumb_mmap,
.dumb_destroy = qxl_mode_dumb_destroy,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = qxl_debugfs_init,
.debugfs_cleanup = qxl_debugfs_takedown,
#endif
.gem_init_object = qxl_gem_object_init,
.gem_free_object = qxl_gem_object_free,
.gem_open_object = qxl_gem_object_open,
.gem_close_object = qxl_gem_object_close,
.fops = &qxl_fops,
.ioctls = qxl_ioctls,
.irq_handler = qxl_irq_handler,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = 0,
.minor = 1,
.patchlevel = 0,
};
static int __init qxl_init(void)
{
#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force() && qxl_modeset == -1)
return -EINVAL;
#endif
if (qxl_modeset == 0)
return -EINVAL;
qxl_driver.num_ioctls = qxl_max_ioctls;
return drm_pci_init(&qxl_driver, &qxl_pci_driver);
}
static void __exit qxl_exit(void)
{
drm_pci_exit(&qxl_driver, &qxl_pci_driver);
}
module_init(qxl_init);
module_exit(qxl_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,93 @@
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alon Levy
*/
#include "qxl_drv.h"
#include "qxl_object.h"
/* dumb ioctls implementation */
int qxl_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
struct qxl_device *qdev = dev->dev_private;
struct qxl_bo *qobj;
uint32_t handle;
int r;
struct qxl_surface surf;
uint32_t pitch, format;
pitch = args->width * ((args->bpp + 1) / 8);
args->size = pitch * args->height;
args->size = ALIGN(args->size, PAGE_SIZE);
switch (args->bpp) {
case 16:
format = SPICE_SURFACE_FMT_16_565;
break;
case 32:
format = SPICE_SURFACE_FMT_32_xRGB;
break;
default:
return -EINVAL;
}
surf.width = args->width;
surf.height = args->height;
surf.stride = pitch;
surf.format = format;
r = qxl_gem_object_create_with_handle(qdev, file_priv,
QXL_GEM_DOMAIN_VRAM,
args->size, &surf, &qobj,
&handle);
if (r)
return r;
args->pitch = pitch;
args->handle = handle;
return 0;
}
int qxl_mode_dumb_destroy(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle)
{
return drm_gem_handle_delete(file_priv, handle);
}
int qxl_mode_dumb_mmap(struct drm_file *file_priv,
struct drm_device *dev,
uint32_t handle, uint64_t *offset_p)
{
struct drm_gem_object *gobj;
struct qxl_bo *qobj;
BUG_ON(!offset_p);
gobj = drm_gem_object_lookup(dev, file_priv, handle);
if (gobj == NULL)
return -ENOENT;
qobj = gem_to_qxl_bo(gobj);
*offset_p = qxl_bo_mmap_offset(qobj);
drm_gem_object_unreference_unlocked(gobj);
return 0;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,97 @@
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alon Levy
*/
#include "qxl_drv.h"
/* QXL fencing-
When we submit operations to the GPU we pass a release reference to the GPU
with them, the release reference is then added to the release ring when
the GPU is finished with that particular operation and has removed it from
its tree.
So we have can have multiple outstanding non linear fences per object.
From a TTM POV we only care if the object has any outstanding releases on
it.
we wait until all outstanding releases are processeed.
sync object is just a list of release ids that represent that fence on
that buffer.
we just add new releases onto the sync object attached to the object.
This currently uses a radix tree to store the list of release ids.
For some reason every so often qxl hw fails to release, things go wrong.
*/
int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id)
{
struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
spin_lock(&bo->tbo.bdev->fence_lock);
radix_tree_insert(&qfence->tree, rel_id, qfence);
qfence->num_active_releases++;
spin_unlock(&bo->tbo.bdev->fence_lock);
return 0;
}
int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id)
{
void *ret;
int retval = 0;
struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence);
spin_lock(&bo->tbo.bdev->fence_lock);
ret = radix_tree_delete(&qfence->tree, rel_id);
if (ret == qfence)
qfence->num_active_releases--;
else {
DRM_DEBUG("didn't find fence in radix tree for %d\n", rel_id);
retval = -ENOENT;
}
spin_unlock(&bo->tbo.bdev->fence_lock);
return retval;
}
int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence)
{
qfence->qdev = qdev;
qfence->num_active_releases = 0;
INIT_RADIX_TREE(&qfence->tree, GFP_ATOMIC);
return 0;
}
void qxl_fence_fini(struct qxl_fence *qfence)
{
kfree(qfence->release_ids);
qfence->num_active_releases = 0;
}

View File

@@ -0,0 +1,178 @@
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alon Levy
*/
#include "drmP.h"
#include "drm/drm.h"
#include "qxl_drv.h"
#include "qxl_object.h"
int qxl_gem_object_init(struct drm_gem_object *obj)
{
/* we do nothings here */
return 0;
}
void qxl_gem_object_free(struct drm_gem_object *gobj)
{
struct qxl_bo *qobj = gem_to_qxl_bo(gobj);
if (qobj)
qxl_bo_unref(&qobj);
}
int qxl_gem_object_create(struct qxl_device *qdev, int size,
int alignment, int initial_domain,
bool discardable, bool kernel,
struct qxl_surface *surf,
struct drm_gem_object **obj)
{
struct qxl_bo *qbo;
int r;
*obj = NULL;
/* At least align on page size */
if (alignment < PAGE_SIZE)
alignment = PAGE_SIZE;
r = qxl_bo_create(qdev, size, kernel, initial_domain, surf, &qbo);
if (r) {
if (r != -ERESTARTSYS)
DRM_ERROR(
"Failed to allocate GEM object (%d, %d, %u, %d)\n",
size, initial_domain, alignment, r);
return r;
}
*obj = &qbo->gem_base;
mutex_lock(&qdev->gem.mutex);
list_add_tail(&qbo->list, &qdev->gem.objects);
mutex_unlock(&qdev->gem.mutex);
return 0;
}
int qxl_gem_object_create_with_handle(struct qxl_device *qdev,
struct drm_file *file_priv,
u32 domain,
size_t size,
struct qxl_surface *surf,
struct qxl_bo **qobj,
uint32_t *handle)
{
struct drm_gem_object *gobj;
int r;
BUG_ON(!qobj);
BUG_ON(!handle);
r = qxl_gem_object_create(qdev, size, 0,
domain,
false, false, surf,
&gobj);
if (r)
return -ENOMEM;
r = drm_gem_handle_create(file_priv, gobj, handle);
if (r)
return r;
/* drop reference from allocate - handle holds it now */
*qobj = gem_to_qxl_bo(gobj);
drm_gem_object_unreference_unlocked(gobj);
return 0;
}
int qxl_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
uint64_t *gpu_addr)
{
struct qxl_bo *qobj = obj->driver_private;
int r;
r = qxl_bo_reserve(qobj, false);
if (unlikely(r != 0))
return r;
r = qxl_bo_pin(qobj, pin_domain, gpu_addr);
qxl_bo_unreserve(qobj);
return r;
}
void qxl_gem_object_unpin(struct drm_gem_object *obj)
{
struct qxl_bo *qobj = obj->driver_private;
int r;
r = qxl_bo_reserve(qobj, false);
if (likely(r == 0)) {
qxl_bo_unpin(qobj);
qxl_bo_unreserve(qobj);
}
}
int qxl_gem_set_domain(struct drm_gem_object *gobj,
uint32_t rdomain, uint32_t wdomain)
{
struct qxl_bo *qobj;
uint32_t domain;
int r;
/* FIXME: reeimplement */
qobj = gobj->driver_private;
/* work out where to validate the buffer to */
domain = wdomain;
if (!domain)
domain = rdomain;
if (!domain) {
/* Do nothings */
pr_warn("Set domain withou domain !\n");
return 0;
}
if (domain == QXL_GEM_DOMAIN_CPU) {
/* Asking for cpu access wait for object idle */
r = qxl_bo_wait(qobj, NULL, false);
if (r) {
pr_err("Failed to wait for object !\n");
return r;
}
}
return 0;
}
int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv)
{
return 0;
}
void qxl_gem_object_close(struct drm_gem_object *obj,
struct drm_file *file_priv)
{
}
int qxl_gem_init(struct qxl_device *qdev)
{
INIT_LIST_HEAD(&qdev->gem.objects);
return 0;
}
void qxl_gem_fini(struct qxl_device *qdev)
{
qxl_bo_force_delete(qdev);
}

View File

@@ -0,0 +1,176 @@
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alon Levy
*/
#include <linux/gfp.h>
#include <linux/slab.h>
#include "qxl_drv.h"
#include "qxl_object.h"
static int
qxl_image_create_helper(struct qxl_device *qdev,
struct qxl_release *release,
struct qxl_bo **image_bo,
const uint8_t *data,
int width, int height,
int depth, unsigned int hash,
int stride)
{
struct qxl_image *image;
struct qxl_data_chunk *chunk;
int i;
int chunk_stride;
int linesize = width * depth / 8;
struct qxl_bo *chunk_bo;
int ret;
void *ptr;
/* Chunk */
/* FIXME: Check integer overflow */
/* TODO: variable number of chunks */
chunk_stride = stride; /* TODO: should use linesize, but it renders
wrong (check the bitmaps are sent correctly
first) */
ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride,
&chunk_bo);
ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0);
chunk = ptr;
chunk->data_size = height * chunk_stride;
chunk->prev_chunk = 0;
chunk->next_chunk = 0;
qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
{
void *k_data, *i_data;
int remain;
int page;
int size;
if (stride == linesize && chunk_stride == stride) {
remain = linesize * height;
page = 0;
i_data = (void *)data;
while (remain > 0) {
ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page << PAGE_SHIFT);
if (page == 0) {
chunk = ptr;
k_data = chunk->data;
size = PAGE_SIZE - offsetof(struct qxl_data_chunk, data);
} else {
k_data = ptr;
size = PAGE_SIZE;
}
size = min(size, remain);
memcpy(k_data, i_data, size);
qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
i_data += size;
remain -= size;
page++;
}
} else {
unsigned page_base, page_offset, out_offset;
for (i = 0 ; i < height ; ++i) {
i_data = (void *)data + i * stride;
remain = linesize;
out_offset = offsetof(struct qxl_data_chunk, data) + i * chunk_stride;
while (remain > 0) {
page_base = out_offset & PAGE_MASK;
page_offset = offset_in_page(out_offset);
size = min((int)(PAGE_SIZE - page_offset), remain);
ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base);
k_data = ptr + page_offset;
memcpy(k_data, i_data, size);
qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr);
remain -= size;
i_data += size;
out_offset += size;
}
}
}
}
qxl_bo_kunmap(chunk_bo);
/* Image */
ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo);
ptr = qxl_bo_kmap_atomic_page(qdev, *image_bo, 0);
image = ptr;
image->descriptor.id = 0;
image->descriptor.type = SPICE_IMAGE_TYPE_BITMAP;
image->descriptor.flags = 0;
image->descriptor.width = width;
image->descriptor.height = height;
switch (depth) {
case 1:
/* TODO: BE? check by arch? */
image->u.bitmap.format = SPICE_BITMAP_FMT_1BIT_BE;
break;
case 24:
image->u.bitmap.format = SPICE_BITMAP_FMT_24BIT;
break;
case 32:
image->u.bitmap.format = SPICE_BITMAP_FMT_32BIT;
break;
default:
DRM_ERROR("unsupported image bit depth\n");
return -EINVAL; /* TODO: cleanup */
}
image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
image->u.bitmap.x = width;
image->u.bitmap.y = height;
image->u.bitmap.stride = chunk_stride;
image->u.bitmap.palette = 0;
image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0);
qxl_release_add_res(qdev, release, chunk_bo);
qxl_bo_unreserve(chunk_bo);
qxl_bo_unref(&chunk_bo);
qxl_bo_kunmap_atomic_page(qdev, *image_bo, ptr);
return 0;
}
int qxl_image_create(struct qxl_device *qdev,
struct qxl_release *release,
struct qxl_bo **image_bo,
const uint8_t *data,
int x, int y, int width, int height,
int depth, int stride)
{
data += y * stride + x * (depth / 8);
return qxl_image_create_helper(qdev, release, image_bo, data,
width, height, depth, 0, stride);
}

View File

@@ -0,0 +1,411 @@
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alon Levy
*/
#include "qxl_drv.h"
#include "qxl_object.h"
/*
* TODO: allocating a new gem(in qxl_bo) for each request.
* This is wasteful since bo's are page aligned.
*/
int qxl_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct qxl_device *qdev = dev->dev_private;
struct drm_qxl_alloc *qxl_alloc = data;
int ret;
struct qxl_bo *qobj;
uint32_t handle;
u32 domain = QXL_GEM_DOMAIN_VRAM;
if (qxl_alloc->size == 0) {
DRM_ERROR("invalid size %d\n", qxl_alloc->size);
return -EINVAL;
}
ret = qxl_gem_object_create_with_handle(qdev, file_priv,
domain,
qxl_alloc->size,
NULL,
&qobj, &handle);
if (ret) {
DRM_ERROR("%s: failed to create gem ret=%d\n",
__func__, ret);
return -ENOMEM;
}
qxl_alloc->handle = handle;
return 0;
}
int qxl_map_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct qxl_device *qdev = dev->dev_private;
struct drm_qxl_map *qxl_map = data;
return qxl_mode_dumb_mmap(file_priv, qdev->ddev, qxl_map->handle,
&qxl_map->offset);
}
/*
* dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's
* are on vram).
* *(dst + dst_off) = qxl_bo_physical_address(src, src_off)
*/
static void
apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
struct qxl_bo *src, uint64_t src_off)
{
void *reloc_page;
reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
*(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev,
src, src_off);
qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
}
static void
apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off,
struct qxl_bo *src)
{
uint32_t id = 0;
void *reloc_page;
if (src && !src->is_primary)
id = src->surface_id;
reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK);
*(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id;
qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page);
}
/* return holding the reference to this object */
struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev,
struct drm_file *file_priv, uint64_t handle,
struct qxl_reloc_list *reloc_list)
{
struct drm_gem_object *gobj;
struct qxl_bo *qobj;
int ret;
gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle);
if (!gobj) {
DRM_ERROR("bad bo handle %lld\n", handle);
return NULL;
}
qobj = gem_to_qxl_bo(gobj);
ret = qxl_bo_list_add(reloc_list, qobj);
if (ret)
return NULL;
return qobj;
}
/*
* Usage of execbuffer:
* Relocations need to take into account the full QXLDrawable size.
* However, the command as passed from user space must *not* contain the initial
* QXLReleaseInfo struct (first XXX bytes)
*/
int qxl_execbuffer_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct qxl_device *qdev = dev->dev_private;
struct drm_qxl_execbuffer *execbuffer = data;
struct drm_qxl_command user_cmd;
int cmd_num;
struct qxl_bo *reloc_src_bo;
struct qxl_bo *reloc_dst_bo;
struct drm_qxl_reloc reloc;
void *fb_cmd;
int i, ret;
struct qxl_reloc_list reloc_list;
int unwritten;
uint32_t reloc_dst_offset;
INIT_LIST_HEAD(&reloc_list.bos);
for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) {
struct qxl_release *release;
struct qxl_bo *cmd_bo;
int release_type;
struct drm_qxl_command *commands =
(struct drm_qxl_command *)execbuffer->commands;
if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num],
sizeof(user_cmd)))
return -EFAULT;
switch (user_cmd.type) {
case QXL_CMD_DRAW:
release_type = QXL_RELEASE_DRAWABLE;
break;
case QXL_CMD_SURFACE:
case QXL_CMD_CURSOR:
default:
DRM_DEBUG("Only draw commands in execbuffers\n");
return -EINVAL;
break;
}
if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info))
return -EINVAL;
ret = qxl_alloc_release_reserved(qdev,
sizeof(union qxl_release_info) +
user_cmd.command_size,
release_type,
&release,
&cmd_bo);
if (ret)
return ret;
/* TODO copy slow path code from i915 */
fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE));
unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size);
qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd);
if (unwritten) {
DRM_ERROR("got unwritten %d\n", unwritten);
qxl_release_unreserve(qdev, release);
qxl_release_free(qdev, release);
return -EFAULT;
}
for (i = 0 ; i < user_cmd.relocs_num; ++i) {
if (DRM_COPY_FROM_USER(&reloc,
&((struct drm_qxl_reloc *)user_cmd.relocs)[i],
sizeof(reloc))) {
qxl_bo_list_unreserve(&reloc_list, true);
qxl_release_unreserve(qdev, release);
qxl_release_free(qdev, release);
return -EFAULT;
}
/* add the bos to the list of bos to validate -
need to validate first then process relocs? */
if (reloc.dst_handle) {
reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv,
reloc.dst_handle, &reloc_list);
if (!reloc_dst_bo) {
qxl_bo_list_unreserve(&reloc_list, true);
qxl_release_unreserve(qdev, release);
qxl_release_free(qdev, release);
return -EINVAL;
}
reloc_dst_offset = 0;
} else {
reloc_dst_bo = cmd_bo;
reloc_dst_offset = release->release_offset;
}
/* reserve and validate the reloc dst bo */
if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) {
reloc_src_bo =
qxlhw_handle_to_bo(qdev, file_priv,
reloc.src_handle, &reloc_list);
if (!reloc_src_bo) {
if (reloc_dst_bo != cmd_bo)
drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
qxl_bo_list_unreserve(&reloc_list, true);
qxl_release_unreserve(qdev, release);
qxl_release_free(qdev, release);
return -EINVAL;
}
} else
reloc_src_bo = NULL;
if (reloc.reloc_type == QXL_RELOC_TYPE_BO) {
apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset,
reloc_src_bo, reloc.src_offset);
} else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) {
apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo);
} else {
DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type);
return -EINVAL;
}
if (reloc_src_bo && reloc_src_bo != cmd_bo) {
qxl_release_add_res(qdev, release, reloc_src_bo);
drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base);
}
if (reloc_dst_bo != cmd_bo)
drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base);
}
qxl_fence_releaseable(qdev, release);
ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true);
if (ret == -ERESTARTSYS) {
qxl_release_unreserve(qdev, release);
qxl_release_free(qdev, release);
qxl_bo_list_unreserve(&reloc_list, true);
return ret;
}
qxl_release_unreserve(qdev, release);
}
qxl_bo_list_unreserve(&reloc_list, 0);
return 0;
}
int qxl_update_area_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct qxl_device *qdev = dev->dev_private;
struct drm_qxl_update_area *update_area = data;
struct qxl_rect area = {.left = update_area->left,
.top = update_area->top,
.right = update_area->right,
.bottom = update_area->bottom};
int ret;
struct drm_gem_object *gobj = NULL;
struct qxl_bo *qobj = NULL;
if (update_area->left >= update_area->right ||
update_area->top >= update_area->bottom)
return -EINVAL;
gobj = drm_gem_object_lookup(dev, file, update_area->handle);
if (gobj == NULL)
return -ENOENT;
qobj = gem_to_qxl_bo(gobj);
ret = qxl_bo_reserve(qobj, false);
if (ret)
goto out;
if (!qobj->pin_count) {
ret = ttm_bo_validate(&qobj->tbo, &qobj->placement,
true, false);
if (unlikely(ret))
goto out;
}
ret = qxl_bo_check_id(qdev, qobj);
if (ret)
goto out2;
if (!qobj->surface_id)
DRM_ERROR("got update area for surface with no id %d\n", update_area->handle);
ret = qxl_io_update_area(qdev, qobj, &area);
out2:
qxl_bo_unreserve(qobj);
out:
drm_gem_object_unreference_unlocked(gobj);
return ret;
}
static int qxl_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct qxl_device *qdev = dev->dev_private;
struct drm_qxl_getparam *param = data;
switch (param->param) {
case QXL_PARAM_NUM_SURFACES:
param->value = qdev->rom->n_surfaces;
break;
case QXL_PARAM_MAX_RELOCS:
param->value = QXL_MAX_RES;
break;
default:
return -EINVAL;
}
return 0;
}
static int qxl_clientcap_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct qxl_device *qdev = dev->dev_private;
struct drm_qxl_clientcap *param = data;
int byte, idx;
byte = param->index / 8;
idx = param->index % 8;
if (qdev->pdev->revision < 4)
return -ENOSYS;
if (byte > 58)
return -ENOSYS;
if (qdev->rom->client_capabilities[byte] & (1 << idx))
return 0;
return -ENOSYS;
}
static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct qxl_device *qdev = dev->dev_private;
struct drm_qxl_alloc_surf *param = data;
struct qxl_bo *qobj;
int handle;
int ret;
int size, actual_stride;
struct qxl_surface surf;
/* work out size allocate bo with handle */
actual_stride = param->stride < 0 ? -param->stride : param->stride;
size = actual_stride * param->height + actual_stride;
surf.format = param->format;
surf.width = param->width;
surf.height = param->height;
surf.stride = param->stride;
surf.data = 0;
ret = qxl_gem_object_create_with_handle(qdev, file,
QXL_GEM_DOMAIN_SURFACE,
size,
&surf,
&qobj, &handle);
if (ret) {
DRM_ERROR("%s: failed to create gem ret=%d\n",
__func__, ret);
return -ENOMEM;
} else
param->handle = handle;
return ret;
}
struct drm_ioctl_desc qxl_ioctls[] = {
DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl,
DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl,
DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl,
DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl,
DRM_AUTH|DRM_UNLOCKED),
DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl,
DRM_AUTH|DRM_UNLOCKED),
};
int qxl_max_ioctls = DRM_ARRAY_SIZE(qxl_ioctls);

View File

@@ -0,0 +1,97 @@
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alon Levy
*/
#include "qxl_drv.h"
irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
struct qxl_device *qdev = (struct qxl_device *)dev->dev_private;
uint32_t pending;
pending = xchg(&qdev->ram_header->int_pending, 0);
atomic_inc(&qdev->irq_received);
if (pending & QXL_INTERRUPT_DISPLAY) {
atomic_inc(&qdev->irq_received_display);
wake_up_all(&qdev->display_event);
qxl_queue_garbage_collect(qdev, false);
}
if (pending & QXL_INTERRUPT_CURSOR) {
atomic_inc(&qdev->irq_received_cursor);
wake_up_all(&qdev->cursor_event);
}
if (pending & QXL_INTERRUPT_IO_CMD) {
atomic_inc(&qdev->irq_received_io_cmd);
wake_up_all(&qdev->io_cmd_event);
}
if (pending & QXL_INTERRUPT_ERROR) {
/* TODO: log it, reset device (only way to exit this condition)
* (do it a certain number of times, afterwards admit defeat,
* to avoid endless loops).
*/
qdev->irq_received_error++;
qxl_io_log(qdev, "%s: driver is in bug mode.\n", __func__);
}
if (pending & QXL_INTERRUPT_CLIENT_MONITORS_CONFIG) {
qxl_io_log(qdev, "QXL_INTERRUPT_CLIENT_MONITORS_CONFIG\n");
schedule_work(&qdev->client_monitors_config_work);
}
qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
outb(0, qdev->io_base + QXL_IO_UPDATE_IRQ);
return IRQ_HANDLED;
}
static void qxl_client_monitors_config_work_func(struct work_struct *work)
{
struct qxl_device *qdev = container_of(work, struct qxl_device,
client_monitors_config_work);
qxl_display_read_client_monitors_config(qdev);
}
int qxl_irq_init(struct qxl_device *qdev)
{
int ret;
init_waitqueue_head(&qdev->display_event);
init_waitqueue_head(&qdev->cursor_event);
init_waitqueue_head(&qdev->io_cmd_event);
INIT_WORK(&qdev->client_monitors_config_work,
qxl_client_monitors_config_work_func);
atomic_set(&qdev->irq_received, 0);
atomic_set(&qdev->irq_received_display, 0);
atomic_set(&qdev->irq_received_cursor, 0);
atomic_set(&qdev->irq_received_io_cmd, 0);
qdev->irq_received_error = 0;
ret = drm_irq_install(qdev->ddev);
qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
if (unlikely(ret != 0)) {
DRM_ERROR("Failed installing irq: %d\n", ret);
return 1;
}
return 0;
}

View File

@@ -0,0 +1,302 @@
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alon Levy
*/
#include "qxl_drv.h"
#include "qxl_object.h"
#include <linux/io-mapping.h>
int qxl_log_level;
static void qxl_dump_mode(struct qxl_device *qdev, void *p)
{
struct qxl_mode *m = p;
DRM_DEBUG_KMS("%d: %dx%d %d bits, stride %d, %dmm x %dmm, orientation %d\n",
m->id, m->x_res, m->y_res, m->bits, m->stride, m->x_mili,
m->y_mili, m->orientation);
}
static bool qxl_check_device(struct qxl_device *qdev)
{
struct qxl_rom *rom = qdev->rom;
int mode_offset;
int i;
if (rom->magic != 0x4f525851) {
DRM_ERROR("bad rom signature %x\n", rom->magic);
return false;
}
DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id);
DRM_INFO("Compression level %d log level %d\n", rom->compression_level,
rom->log_level);
DRM_INFO("Currently using mode #%d, list at 0x%x\n",
rom->mode, rom->modes_offset);
DRM_INFO("%d io pages at offset 0x%x\n",
rom->num_io_pages, rom->pages_offset);
DRM_INFO("%d byte draw area at offset 0x%x\n",
rom->surface0_area_size, rom->draw_area_offset);
qdev->vram_size = rom->surface0_area_size;
DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset);
mode_offset = rom->modes_offset / 4;
qdev->mode_info.num_modes = ((u32 *)rom)[mode_offset];
DRM_INFO("rom modes offset 0x%x for %d modes\n", rom->modes_offset,
qdev->mode_info.num_modes);
qdev->mode_info.modes = (void *)((uint32_t *)rom + mode_offset + 1);
for (i = 0; i < qdev->mode_info.num_modes; i++)
qxl_dump_mode(qdev, qdev->mode_info.modes + i);
return true;
}
static uint8_t setup_slot(struct qxl_device *qdev, uint8_t slot_index_offset,
unsigned long start_phys_addr, unsigned long end_phys_addr)
{
uint64_t high_bits;
struct qxl_memslot *slot;
uint8_t slot_index;
struct qxl_ram_header *ram_header = qdev->ram_header;
slot_index = qdev->rom->slots_start + slot_index_offset;
slot = &qdev->mem_slots[slot_index];
slot->start_phys_addr = start_phys_addr;
slot->end_phys_addr = end_phys_addr;
ram_header->mem_slot.mem_start = slot->start_phys_addr;
ram_header->mem_slot.mem_end = slot->end_phys_addr;
qxl_io_memslot_add(qdev, slot_index);
slot->generation = qdev->rom->slot_generation;
high_bits = slot_index << qdev->slot_gen_bits;
high_bits |= slot->generation;
high_bits <<= (64 - (qdev->slot_gen_bits + qdev->slot_id_bits));
slot->high_bits = high_bits;
return slot_index;
}
static void qxl_gc_work(struct work_struct *work)
{
struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work);
qxl_garbage_collect(qdev);
}
int qxl_device_init(struct qxl_device *qdev,
struct drm_device *ddev,
struct pci_dev *pdev,
unsigned long flags)
{
int r;
qdev->dev = &pdev->dev;
qdev->ddev = ddev;
qdev->pdev = pdev;
qdev->flags = flags;
mutex_init(&qdev->gem.mutex);
mutex_init(&qdev->update_area_mutex);
mutex_init(&qdev->release_mutex);
mutex_init(&qdev->surf_evict_mutex);
INIT_LIST_HEAD(&qdev->gem.objects);
qdev->rom_base = pci_resource_start(pdev, 2);
qdev->rom_size = pci_resource_len(pdev, 2);
qdev->vram_base = pci_resource_start(pdev, 0);
qdev->surfaceram_base = pci_resource_start(pdev, 1);
qdev->surfaceram_size = pci_resource_len(pdev, 1);
qdev->io_base = pci_resource_start(pdev, 3);
qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
qdev->surface_mapping = io_mapping_create_wc(qdev->surfaceram_base, qdev->surfaceram_size);
DRM_DEBUG_KMS("qxl: vram %p-%p(%dM %dk), surface %p-%p(%dM %dk)\n",
(void *)qdev->vram_base, (void *)pci_resource_end(pdev, 0),
(int)pci_resource_len(pdev, 0) / 1024 / 1024,
(int)pci_resource_len(pdev, 0) / 1024,
(void *)qdev->surfaceram_base,
(void *)pci_resource_end(pdev, 1),
(int)qdev->surfaceram_size / 1024 / 1024,
(int)qdev->surfaceram_size / 1024);
qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
if (!qdev->rom) {
pr_err("Unable to ioremap ROM\n");
return -ENOMEM;
}
qxl_check_device(qdev);
r = qxl_bo_init(qdev);
if (r) {
DRM_ERROR("bo init failed %d\n", r);
return r;
}
qdev->ram_header = ioremap(qdev->vram_base +
qdev->rom->ram_header_offset,
sizeof(*qdev->ram_header));
qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
sizeof(struct qxl_command),
QXL_COMMAND_RING_SIZE,
qdev->io_base + QXL_IO_NOTIFY_CMD,
false,
&qdev->display_event);
qdev->cursor_ring = qxl_ring_create(
&(qdev->ram_header->cursor_ring_hdr),
sizeof(struct qxl_command),
QXL_CURSOR_RING_SIZE,
qdev->io_base + QXL_IO_NOTIFY_CMD,
false,
&qdev->cursor_event);
qdev->release_ring = qxl_ring_create(
&(qdev->ram_header->release_ring_hdr),
sizeof(uint64_t),
QXL_RELEASE_RING_SIZE, 0, true,
NULL);
/* TODO - slot initialization should happen on reset. where is our
* reset handler? */
qdev->n_mem_slots = qdev->rom->slots_end;
qdev->slot_gen_bits = qdev->rom->slot_gen_bits;
qdev->slot_id_bits = qdev->rom->slot_id_bits;
qdev->va_slot_mask =
(~(uint64_t)0) >> (qdev->slot_id_bits + qdev->slot_gen_bits);
qdev->mem_slots =
kmalloc(qdev->n_mem_slots * sizeof(struct qxl_memslot),
GFP_KERNEL);
idr_init(&qdev->release_idr);
spin_lock_init(&qdev->release_idr_lock);
idr_init(&qdev->surf_id_idr);
spin_lock_init(&qdev->surf_id_idr_lock);
mutex_init(&qdev->async_io_mutex);
/* reset the device into a known state - no memslots, no primary
* created, no surfaces. */
qxl_io_reset(qdev);
/* must initialize irq before first async io - slot creation */
r = qxl_irq_init(qdev);
if (r)
return r;
/*
* Note that virtual is surface0. We rely on the single ioremap done
* before.
*/
qdev->main_mem_slot = setup_slot(qdev, 0,
(unsigned long)qdev->vram_base,
(unsigned long)qdev->vram_base + qdev->rom->ram_header_offset);
qdev->surfaces_mem_slot = setup_slot(qdev, 1,
(unsigned long)qdev->surfaceram_base,
(unsigned long)qdev->surfaceram_base + qdev->surfaceram_size);
DRM_INFO("main mem slot %d [%lx,%x)\n",
qdev->main_mem_slot,
(unsigned long)qdev->vram_base, qdev->rom->ram_header_offset);
qdev->gc_queue = create_singlethread_workqueue("qxl_gc");
INIT_WORK(&qdev->gc_work, qxl_gc_work);
r = qxl_fb_init(qdev);
if (r)
return r;
return 0;
}
void qxl_device_fini(struct qxl_device *qdev)
{
if (qdev->current_release_bo[0])
qxl_bo_unref(&qdev->current_release_bo[0]);
if (qdev->current_release_bo[1])
qxl_bo_unref(&qdev->current_release_bo[1]);
flush_workqueue(qdev->gc_queue);
destroy_workqueue(qdev->gc_queue);
qdev->gc_queue = NULL;
qxl_ring_free(qdev->command_ring);
qxl_ring_free(qdev->cursor_ring);
qxl_ring_free(qdev->release_ring);
qxl_bo_fini(qdev);
io_mapping_free(qdev->surface_mapping);
io_mapping_free(qdev->vram_mapping);
iounmap(qdev->ram_header);
iounmap(qdev->rom);
qdev->rom = NULL;
qdev->mode_info.modes = NULL;
qdev->mode_info.num_modes = 0;
qxl_debugfs_remove_files(qdev);
}
int qxl_driver_unload(struct drm_device *dev)
{
struct qxl_device *qdev = dev->dev_private;
if (qdev == NULL)
return 0;
qxl_modeset_fini(qdev);
qxl_device_fini(qdev);
kfree(qdev);
dev->dev_private = NULL;
return 0;
}
int qxl_driver_load(struct drm_device *dev, unsigned long flags)
{
struct qxl_device *qdev;
int r;
/* require kms */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -ENODEV;
qdev = kzalloc(sizeof(struct qxl_device), GFP_KERNEL);
if (qdev == NULL)
return -ENOMEM;
dev->dev_private = qdev;
r = qxl_device_init(qdev, dev, dev->pdev, flags);
if (r)
goto out;
r = qxl_modeset_init(qdev);
if (r) {
qxl_driver_unload(dev);
goto out;
}
return 0;
out:
kfree(qdev);
return r;
}

View File

@@ -0,0 +1,365 @@
/*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alon Levy
*/
#include "qxl_drv.h"
#include "qxl_object.h"
#include <linux/io-mapping.h>
static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo)
{
struct qxl_bo *bo;
struct qxl_device *qdev;
bo = container_of(tbo, struct qxl_bo, tbo);
qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
qxl_surface_evict(qdev, bo, false);
qxl_fence_fini(&bo->fence);
mutex_lock(&qdev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&qdev->gem.mutex);
drm_gem_object_release(&bo->gem_base);
kfree(bo);
}
bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo)
{
if (bo->destroy == &qxl_ttm_bo_destroy)
return true;
return false;
}
void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain)
{
u32 c = 0;
qbo->placement.fpfn = 0;
qbo->placement.lpfn = 0;
qbo->placement.placement = qbo->placements;
qbo->placement.busy_placement = qbo->placements;
if (domain & QXL_GEM_DOMAIN_VRAM)
qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM;
if (domain & QXL_GEM_DOMAIN_SURFACE)
qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0;
if (domain & QXL_GEM_DOMAIN_CPU)
qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
if (!c)
qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
qbo->placement.num_placement = c;
qbo->placement.num_busy_placement = c;
}
int qxl_bo_create(struct qxl_device *qdev,
unsigned long size, bool kernel, u32 domain,
struct qxl_surface *surf,
struct qxl_bo **bo_ptr)
{
struct qxl_bo *bo;
enum ttm_bo_type type;
int r;
if (unlikely(qdev->mman.bdev.dev_mapping == NULL))
qdev->mman.bdev.dev_mapping = qdev->ddev->dev_mapping;
if (kernel)
type = ttm_bo_type_kernel;
else
type = ttm_bo_type_device;
*bo_ptr = NULL;
bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
size = roundup(size, PAGE_SIZE);
r = drm_gem_object_init(qdev->ddev, &bo->gem_base, size);
if (unlikely(r)) {
kfree(bo);
return r;
}
bo->gem_base.driver_private = NULL;
bo->type = domain;
bo->pin_count = 0;
bo->surface_id = 0;
qxl_fence_init(qdev, &bo->fence);
INIT_LIST_HEAD(&bo->list);
atomic_set(&bo->reserve_count, 0);
if (surf)
bo->surf = *surf;
qxl_ttm_placement_from_domain(bo, domain);
r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type,
&bo->placement, 0, !kernel, NULL, size,
NULL, &qxl_ttm_bo_destroy);
if (unlikely(r != 0)) {
if (r != -ERESTARTSYS)
dev_err(qdev->dev,
"object_init failed for (%lu, 0x%08X)\n",
size, domain);
return r;
}
*bo_ptr = bo;
return 0;
}
int qxl_bo_kmap(struct qxl_bo *bo, void **ptr)
{
bool is_iomem;
int r;
if (bo->kptr) {
if (ptr)
*ptr = bo->kptr;
return 0;
}
r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
if (r)
return r;
bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
if (ptr)
*ptr = bo->kptr;
return 0;
}
void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, int page_offset)
{
struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
void *rptr;
int ret;
struct io_mapping *map;
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
map = qdev->vram_mapping;
else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
map = qdev->surface_mapping;
else
goto fallback;
(void) ttm_mem_io_lock(man, false);
ret = ttm_mem_io_reserve(bo->tbo.bdev, &bo->tbo.mem);
ttm_mem_io_unlock(man);
return io_mapping_map_atomic_wc(map, bo->tbo.mem.bus.offset + page_offset);
fallback:
if (bo->kptr) {
rptr = bo->kptr + (page_offset * PAGE_SIZE);
return rptr;
}
ret = qxl_bo_kmap(bo, &rptr);
if (ret)
return NULL;
rptr += page_offset * PAGE_SIZE;
return rptr;
}
void qxl_bo_kunmap(struct qxl_bo *bo)
{
if (bo->kptr == NULL)
return;
bo->kptr = NULL;
ttm_bo_kunmap(&bo->kmap);
}
void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev,
struct qxl_bo *bo, void *pmap)
{
struct ttm_mem_type_manager *man = &bo->tbo.bdev->man[bo->tbo.mem.mem_type];
struct io_mapping *map;
if (bo->tbo.mem.mem_type == TTM_PL_VRAM)
map = qdev->vram_mapping;
else if (bo->tbo.mem.mem_type == TTM_PL_PRIV0)
map = qdev->surface_mapping;
else
goto fallback;
io_mapping_unmap_atomic(pmap);
(void) ttm_mem_io_lock(man, false);
ttm_mem_io_free(bo->tbo.bdev, &bo->tbo.mem);
ttm_mem_io_unlock(man);
return ;
fallback:
qxl_bo_kunmap(bo);
}
void qxl_bo_unref(struct qxl_bo **bo)
{
struct ttm_buffer_object *tbo;
if ((*bo) == NULL)
return;
tbo = &((*bo)->tbo);
ttm_bo_unref(&tbo);
if (tbo == NULL)
*bo = NULL;
}
struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo)
{
ttm_bo_reference(&bo->tbo);
return bo;
}
int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr)
{
struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
int r, i;
if (bo->pin_count) {
bo->pin_count++;
if (gpu_addr)
*gpu_addr = qxl_bo_gpu_offset(bo);
return 0;
}
qxl_ttm_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (likely(r == 0)) {
bo->pin_count = 1;
if (gpu_addr != NULL)
*gpu_addr = qxl_bo_gpu_offset(bo);
}
if (unlikely(r != 0))
dev_err(qdev->dev, "%p pin failed\n", bo);
return r;
}
int qxl_bo_unpin(struct qxl_bo *bo)
{
struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private;
int r, i;
if (!bo->pin_count) {
dev_warn(qdev->dev, "%p unpin not necessary\n", bo);
return 0;
}
bo->pin_count--;
if (bo->pin_count)
return 0;
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (unlikely(r != 0))
dev_err(qdev->dev, "%p validate failed for unpin\n", bo);
return r;
}
void qxl_bo_force_delete(struct qxl_device *qdev)
{
struct qxl_bo *bo, *n;
if (list_empty(&qdev->gem.objects))
return;
dev_err(qdev->dev, "Userspace still has active objects !\n");
list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) {
mutex_lock(&qdev->ddev->struct_mutex);
dev_err(qdev->dev, "%p %p %lu %lu force free\n",
&bo->gem_base, bo, (unsigned long)bo->gem_base.size,
*((unsigned long *)&bo->gem_base.refcount));
mutex_lock(&qdev->gem.mutex);
list_del_init(&bo->list);
mutex_unlock(&qdev->gem.mutex);
/* this should unref the ttm bo */
drm_gem_object_unreference(&bo->gem_base);
mutex_unlock(&qdev->ddev->struct_mutex);
}
}
int qxl_bo_init(struct qxl_device *qdev)
{
return qxl_ttm_init(qdev);
}
void qxl_bo_fini(struct qxl_device *qdev)
{
qxl_ttm_fini(qdev);
}
int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo)
{
int ret;
if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) {
/* allocate a surface id for this surface now */
ret = qxl_surface_id_alloc(qdev, bo);
if (ret)
return ret;
ret = qxl_hw_surface_alloc(qdev, bo, NULL);
if (ret)
return ret;
}
return 0;
}
void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed)
{
struct qxl_bo_list *entry, *sf;
list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) {
qxl_bo_unreserve(entry->bo);
list_del(&entry->lhead);
kfree(entry);
}
}
int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo)
{
struct qxl_bo_list *entry;
int ret;
list_for_each_entry(entry, &reloc_list->bos, lhead) {
if (entry->bo == bo)
return 0;
}
entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL);
if (!entry)
return -ENOMEM;
entry->bo = bo;
list_add(&entry->lhead, &reloc_list->bos);
ret = qxl_bo_reserve(bo, false);
if (ret)
return ret;
if (!bo->pin_count) {
qxl_ttm_placement_from_domain(bo, bo->type);
ret = ttm_bo_validate(&bo->tbo, &bo->placement,
true, false);
if (ret)
return ret;
}
/* allocate a surface for reserved + validated buffers */
ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo);
if (ret)
return ret;
return 0;
}

Some files were not shown because too many files have changed in this diff Show More