You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge tag 'drm-intel-next-2016-06-20' of git://anongit.freedesktop.org/drm-intel into drm-next
- Infrastructure for GVT-g (paravirtualized gpu on gen8+), from Zhi Wang - another attemp at nonblocking atomic plane updates - bugfixes and refactoring for GuC doorbell code (Dave Gordon) - GuC command submission enabled by default, if fw available (Dave Gordon) - more bxt w/a (Arun Siluvery) - bxt phy improvements (Imre Deak) - prep work for stolen objects support (Ankitprasa Sharma & Chris Wilson) - skl/bkl w/a update from Mika Kuoppala - bunch of small improvements and fixes all over, as usual * tag 'drm-intel-next-2016-06-20' of git://anongit.freedesktop.org/drm-intel: (81 commits) drm/i915: Update DRIVER_DATE to 20160620 drm/i915: Introduce GVT context creation API drm/i915: Support LRC context single submission drm/i915: Introduce execlist context status change notification drm/i915: Make addressing mode bits in context descriptor configurable drm/i915: Make ring buffer size of a LRC context configurable drm/i915: gvt: Introduce the basic architecture of GVT-g drm/i915: Fold vGPU active check into inner functions drm/i915: Use offsetof() to calculate the offset of members in PVINFO page drm/i915: Factor out i915_pvinfo.h drm/i915: Serialise presentation with imported dmabufs drm/i915: Use atomic commits for legacy page_flips drm/i915: Move fb_bits updating later in atomic_commit drm/i915: nonblocking commit Reapply "drm/i915: Pass atomic states to fbc update, functions." drm/i915: Roll out the helper nonblock tracking drm/i915: Signal drm events for atomic drm/i915/ilk: Don't disable SSC source if it's in use drm/i915/guc: (re)initialise doorbell h/w when enabling GuC submission drm/i915/guc: replace assign_doorbell() with select_doorbell_register() ...
This commit is contained in:
@@ -840,6 +840,14 @@ static bool i830_check_flags(unsigned int flags)
|
||||
return false;
|
||||
}
|
||||
|
||||
void intel_gtt_insert_page(dma_addr_t addr,
|
||||
unsigned int pg,
|
||||
unsigned int flags)
|
||||
{
|
||||
intel_private.driver->write_entry(addr, pg, flags);
|
||||
}
|
||||
EXPORT_SYMBOL(intel_gtt_insert_page);
|
||||
|
||||
void intel_gtt_insert_sg_entries(struct sg_table *st,
|
||||
unsigned int pg_start,
|
||||
unsigned int flags)
|
||||
|
||||
@@ -57,6 +57,28 @@ config DRM_I915_USERPTR
|
||||
|
||||
If in doubt, say "Y".
|
||||
|
||||
config DRM_I915_GVT
|
||||
bool "Enable Intel GVT-g graphics virtualization host support"
|
||||
depends on DRM_I915
|
||||
default n
|
||||
help
|
||||
Choose this option if you want to enable Intel GVT-g graphics
|
||||
virtualization technology host support with integrated graphics.
|
||||
With GVT-g, it's possible to have one integrated graphics
|
||||
device shared by multiple VMs under different hypervisors.
|
||||
|
||||
Note that at least one hypervisor like Xen or KVM is required for
|
||||
this driver to work, and it only supports newer device from
|
||||
Broadwell+. For further information and setup guide, you can
|
||||
visit: http://01.org/igvt-g.
|
||||
|
||||
Now it's just a stub to support the modifications of i915 for
|
||||
GVT device model. It requires at least one MPT modules for Xen/KVM
|
||||
and other components of GVT device model to work. Use it under
|
||||
you own risk.
|
||||
|
||||
If in doubt, say "N".
|
||||
|
||||
menu "drm/i915 Debugging"
|
||||
depends on DRM_I915
|
||||
depends on EXPERT
|
||||
|
||||
@@ -104,6 +104,11 @@ i915-y += i915_vgpu.o
|
||||
# legacy horrors
|
||||
i915-y += i915_dma.o
|
||||
|
||||
ifeq ($(CONFIG_DRM_I915_GVT),y)
|
||||
i915-y += intel_gvt.o
|
||||
include $(src)/gvt/Makefile
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_DRM_I915) += i915.o
|
||||
|
||||
CFLAGS_i915_trace_points.o := -I$(src)
|
||||
|
||||
@@ -0,0 +1,5 @@
|
||||
GVT_DIR := gvt
|
||||
GVT_SOURCE := gvt.o
|
||||
|
||||
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
|
||||
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
|
||||
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef __GVT_DEBUG_H__
|
||||
#define __GVT_DEBUG_H__
|
||||
|
||||
#define gvt_dbg_core(fmt, args...) \
|
||||
DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args)
|
||||
|
||||
/*
|
||||
* Other GVT debug stuff will be introduced in the GVT device model patches.
|
||||
*/
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,145 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <xen/xen.h>
|
||||
|
||||
#include "i915_drv.h"
|
||||
|
||||
struct intel_gvt_host intel_gvt_host;
|
||||
|
||||
static const char * const supported_hypervisors[] = {
|
||||
[INTEL_GVT_HYPERVISOR_XEN] = "XEN",
|
||||
[INTEL_GVT_HYPERVISOR_KVM] = "KVM",
|
||||
};
|
||||
|
||||
/**
|
||||
* intel_gvt_init_host - Load MPT modules and detect if we're running in host
|
||||
* @gvt: intel gvt device
|
||||
*
|
||||
* This function is called at the driver loading stage. If failed to find a
|
||||
* loadable MPT module or detect currently we're running in a VM, then GVT-g
|
||||
* will be disabled
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*
|
||||
*/
|
||||
int intel_gvt_init_host(void)
|
||||
{
|
||||
if (intel_gvt_host.initialized)
|
||||
return 0;
|
||||
|
||||
/* Xen DOM U */
|
||||
if (xen_domain() && !xen_initial_domain())
|
||||
return -ENODEV;
|
||||
|
||||
/* Try to load MPT modules for hypervisors */
|
||||
if (xen_initial_domain()) {
|
||||
/* In Xen dom0 */
|
||||
intel_gvt_host.mpt = try_then_request_module(
|
||||
symbol_get(xengt_mpt), "xengt");
|
||||
intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN;
|
||||
} else {
|
||||
/* not in Xen. Try KVMGT */
|
||||
intel_gvt_host.mpt = try_then_request_module(
|
||||
symbol_get(kvmgt_mpt), "kvm");
|
||||
intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM;
|
||||
}
|
||||
|
||||
/* Fail to load MPT modules - bail out */
|
||||
if (!intel_gvt_host.mpt)
|
||||
return -EINVAL;
|
||||
|
||||
/* Try to detect if we're running in host instead of VM. */
|
||||
if (!intel_gvt_hypervisor_detect_host())
|
||||
return -ENODEV;
|
||||
|
||||
gvt_dbg_core("Running with hypervisor %s in host mode\n",
|
||||
supported_hypervisors[intel_gvt_host.hypervisor_type]);
|
||||
|
||||
intel_gvt_host.initialized = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void init_device_info(struct intel_gvt *gvt)
|
||||
{
|
||||
if (IS_BROADWELL(gvt->dev_priv))
|
||||
gvt->device_info.max_support_vgpus = 8;
|
||||
/* This function will grow large in GVT device model patches. */
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_clean_device - clean a GVT device
|
||||
* @gvt: intel gvt device
|
||||
*
|
||||
* This function is called at the driver unloading stage, to free the
|
||||
* resources owned by a GVT device.
|
||||
*
|
||||
*/
|
||||
void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_gvt *gvt = &dev_priv->gvt;
|
||||
|
||||
if (WARN_ON(!gvt->initialized))
|
||||
return;
|
||||
|
||||
/* Other de-initialization of GVT components will be introduced. */
|
||||
|
||||
gvt->initialized = false;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_init_device - initialize a GVT device
|
||||
* @dev_priv: drm i915 private data
|
||||
*
|
||||
* This function is called at the initialization stage, to initialize
|
||||
* necessary GVT components.
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed.
|
||||
*
|
||||
*/
|
||||
int intel_gvt_init_device(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct intel_gvt *gvt = &dev_priv->gvt;
|
||||
/*
|
||||
* Cannot initialize GVT device without intel_gvt_host gets
|
||||
* initialized first.
|
||||
*/
|
||||
if (WARN_ON(!intel_gvt_host.initialized))
|
||||
return -EINVAL;
|
||||
|
||||
if (WARN_ON(gvt->initialized))
|
||||
return -EEXIST;
|
||||
|
||||
gvt_dbg_core("init gvt device\n");
|
||||
|
||||
init_device_info(gvt);
|
||||
/*
|
||||
* Other initialization of GVT components will be introduce here.
|
||||
*/
|
||||
gvt_dbg_core("gvt device creation is done\n");
|
||||
gvt->initialized = true;
|
||||
return 0;
|
||||
}
|
||||
@@ -0,0 +1,69 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _GVT_H_
|
||||
#define _GVT_H_
|
||||
|
||||
#include "debug.h"
|
||||
#include "hypercall.h"
|
||||
|
||||
#define GVT_MAX_VGPU 8
|
||||
|
||||
enum {
|
||||
INTEL_GVT_HYPERVISOR_XEN = 0,
|
||||
INTEL_GVT_HYPERVISOR_KVM,
|
||||
};
|
||||
|
||||
struct intel_gvt_host {
|
||||
bool initialized;
|
||||
int hypervisor_type;
|
||||
struct intel_gvt_mpt *mpt;
|
||||
};
|
||||
|
||||
extern struct intel_gvt_host intel_gvt_host;
|
||||
|
||||
/* Describe per-platform limitations. */
|
||||
struct intel_gvt_device_info {
|
||||
u32 max_support_vgpus;
|
||||
/* This data structure will grow bigger in GVT device model patches */
|
||||
};
|
||||
|
||||
struct intel_vgpu {
|
||||
struct intel_gvt *gvt;
|
||||
int id;
|
||||
unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
|
||||
};
|
||||
|
||||
struct intel_gvt {
|
||||
struct mutex lock;
|
||||
bool initialized;
|
||||
|
||||
struct drm_i915_private *dev_priv;
|
||||
struct idr vgpu_idr; /* vGPU IDR pool */
|
||||
|
||||
struct intel_gvt_device_info device_info;
|
||||
};
|
||||
|
||||
#include "mpt.h"
|
||||
|
||||
#endif
|
||||
@@ -0,0 +1,38 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _GVT_HYPERCALL_H_
|
||||
#define _GVT_HYPERCALL_H_
|
||||
|
||||
/*
|
||||
* Specific GVT-g MPT modules function collections. Currently GVT-g supports
|
||||
* both Xen and KVM by providing dedicated hypervisor-related MPT modules.
|
||||
*/
|
||||
struct intel_gvt_mpt {
|
||||
int (*detect_host)(void);
|
||||
};
|
||||
|
||||
extern struct intel_gvt_mpt xengt_mpt;
|
||||
extern struct intel_gvt_mpt kvmgt_mpt;
|
||||
|
||||
#endif /* _GVT_HYPERCALL_H_ */
|
||||
@@ -0,0 +1,49 @@
|
||||
/*
|
||||
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
* SOFTWARE.
|
||||
*/
|
||||
|
||||
#ifndef _GVT_MPT_H_
|
||||
#define _GVT_MPT_H_
|
||||
|
||||
/**
|
||||
* DOC: Hypervisor Service APIs for GVT-g Core Logic
|
||||
*
|
||||
* This is the glue layer between specific hypervisor MPT modules and GVT-g core
|
||||
* logic. Each kind of hypervisor MPT module provides a collection of function
|
||||
* callbacks and will be attached to GVT host when the driver is loading.
|
||||
* GVT-g core logic will call these APIs to request specific services from
|
||||
* hypervisor.
|
||||
*/
|
||||
|
||||
/**
|
||||
* intel_gvt_hypervisor_detect_host - check if GVT-g is running within
|
||||
* hypervisor host/privilged domain
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, -ENODEV if current kernel is running inside a VM
|
||||
*/
|
||||
static inline int intel_gvt_hypervisor_detect_host(void)
|
||||
{
|
||||
return intel_gvt_host.mpt->detect_host();
|
||||
}
|
||||
|
||||
#endif /* _GVT_MPT_H_ */
|
||||
@@ -737,7 +737,7 @@ static void fini_hash_table(struct intel_engine_cs *engine)
|
||||
|
||||
/**
|
||||
* i915_cmd_parser_init_ring() - set cmd parser related fields for a ringbuffer
|
||||
* @ring: the ringbuffer to initialize
|
||||
* @engine: the engine to initialize
|
||||
*
|
||||
* Optionally initializes fields related to batch buffer command parsing in the
|
||||
* struct intel_engine_cs based on whether the platform requires software
|
||||
@@ -830,7 +830,7 @@ int i915_cmd_parser_init_ring(struct intel_engine_cs *engine)
|
||||
|
||||
/**
|
||||
* i915_cmd_parser_fini_ring() - clean up cmd parser related fields
|
||||
* @ring: the ringbuffer to clean up
|
||||
* @engine: the engine to clean up
|
||||
*
|
||||
* Releases any resources related to command parsing that may have been
|
||||
* initialized for the specified ring.
|
||||
@@ -1024,7 +1024,7 @@ unpin_src:
|
||||
|
||||
/**
|
||||
* i915_needs_cmd_parser() - should a given ring use software command parsing?
|
||||
* @ring: the ring in question
|
||||
* @engine: the engine in question
|
||||
*
|
||||
* Only certain platforms require software batch buffer command parsing, and
|
||||
* only when enabled via module parameter.
|
||||
@@ -1176,7 +1176,7 @@ static bool check_cmd(const struct intel_engine_cs *engine,
|
||||
|
||||
/**
|
||||
* i915_parse_cmds() - parse a submitted batch buffer for privilege violations
|
||||
* @ring: the ring on which the batch is to execute
|
||||
* @engine: the engine on which the batch is to execute
|
||||
* @batch_obj: the batch buffer in question
|
||||
* @shadow_batch_obj: copy of the batch buffer in question
|
||||
* @batch_start_offset: byte offset in the batch at which execution starts
|
||||
@@ -1281,6 +1281,7 @@ int i915_parse_cmds(struct intel_engine_cs *engine,
|
||||
|
||||
/**
|
||||
* i915_cmd_parser_get_version() - get the cmd parser version number
|
||||
* @dev_priv: i915 device private
|
||||
*
|
||||
* The cmd parser maintains a simple increasing integer version number suitable
|
||||
* for passing to userspace clients to determine what operations are permitted.
|
||||
|
||||
@@ -2574,6 +2574,10 @@ static int i915_guc_info(struct seq_file *m, void *data)
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
seq_printf(m, "Doorbell map:\n");
|
||||
seq_printf(m, "\t%*pb\n", GUC_MAX_DOORBELLS, guc.doorbell_bitmap);
|
||||
seq_printf(m, "Doorbell next cacheline: 0x%x\n\n", guc.db_cacheline);
|
||||
|
||||
seq_printf(m, "GuC total action count: %llu\n", guc.action_count);
|
||||
seq_printf(m, "GuC action failure count: %u\n", guc.action_fail);
|
||||
seq_printf(m, "GuC last action command: 0x%x\n", guc.action_cmd);
|
||||
@@ -5306,6 +5310,10 @@ static int i915_sseu_status(struct seq_file *m, void *unused)
|
||||
INTEL_INFO(dev)->eu_total);
|
||||
seq_printf(m, " Available EU Per Subslice: %u\n",
|
||||
INTEL_INFO(dev)->eu_per_subslice);
|
||||
seq_printf(m, " Has Pooled EU: %s\n", yesno(HAS_POOLED_EU(dev)));
|
||||
if (HAS_POOLED_EU(dev))
|
||||
seq_printf(m, " Min EU in pool: %u\n",
|
||||
INTEL_INFO(dev)->min_eu_in_pool);
|
||||
seq_printf(m, " Has Slice Power Gating: %s\n",
|
||||
yesno(INTEL_INFO(dev)->has_slice_pg));
|
||||
seq_printf(m, " Has Subslice Power Gating: %s\n",
|
||||
|
||||
@@ -764,6 +764,32 @@ static void gen9_sseu_info_init(struct drm_device *dev)
|
||||
(info->slice_total > 1));
|
||||
info->has_subslice_pg = (IS_BROXTON(dev) && (info->subslice_total > 1));
|
||||
info->has_eu_pg = (info->eu_per_subslice > 2);
|
||||
|
||||
if (IS_BROXTON(dev)) {
|
||||
#define IS_SS_DISABLED(_ss_disable, ss) (_ss_disable & (0x1 << ss))
|
||||
/*
|
||||
* There is a HW issue in 2x6 fused down parts that requires
|
||||
* Pooled EU to be enabled as a WA. The pool configuration
|
||||
* changes depending upon which subslice is fused down. This
|
||||
* doesn't affect if the device has all 3 subslices enabled.
|
||||
*/
|
||||
/* WaEnablePooledEuFor2x6:bxt */
|
||||
info->has_pooled_eu = ((info->subslice_per_slice == 3) ||
|
||||
(info->subslice_per_slice == 2 &&
|
||||
INTEL_REVID(dev) < BXT_REVID_C0));
|
||||
|
||||
info->min_eu_in_pool = 0;
|
||||
if (info->has_pooled_eu) {
|
||||
if (IS_SS_DISABLED(ss_disable, 0) ||
|
||||
IS_SS_DISABLED(ss_disable, 2))
|
||||
info->min_eu_in_pool = 3;
|
||||
else if (IS_SS_DISABLED(ss_disable, 1))
|
||||
info->min_eu_in_pool = 6;
|
||||
else
|
||||
info->min_eu_in_pool = 9;
|
||||
}
|
||||
#undef IS_SS_DISABLED
|
||||
}
|
||||
}
|
||||
|
||||
static void broadwell_sseu_info_init(struct drm_device *dev)
|
||||
@@ -962,6 +988,9 @@ static void intel_device_info_runtime_init(struct drm_device *dev)
|
||||
DRM_DEBUG_DRIVER("subslice per slice: %u\n", info->subslice_per_slice);
|
||||
DRM_DEBUG_DRIVER("EU total: %u\n", info->eu_total);
|
||||
DRM_DEBUG_DRIVER("EU per subslice: %u\n", info->eu_per_subslice);
|
||||
DRM_DEBUG_DRIVER("Has Pooled EU: %s\n", HAS_POOLED_EU(dev) ? "y" : "n");
|
||||
if (HAS_POOLED_EU(dev))
|
||||
DRM_DEBUG_DRIVER("Min EU in pool: %u\n", info->min_eu_in_pool);
|
||||
DRM_DEBUG_DRIVER("has slice power gating: %s\n",
|
||||
info->has_slice_pg ? "y" : "n");
|
||||
DRM_DEBUG_DRIVER("has subslice power gating: %s\n",
|
||||
@@ -1091,6 +1120,10 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret = intel_gvt_init(dev_priv);
|
||||
if (ret < 0)
|
||||
goto err_workqueues;
|
||||
|
||||
/* This must be called before any calls to HAS_PCH_* */
|
||||
intel_detect_pch(dev);
|
||||
|
||||
@@ -1116,6 +1149,10 @@ static int i915_driver_init_early(struct drm_i915_private *dev_priv,
|
||||
"It may not be fully functional.\n");
|
||||
|
||||
return 0;
|
||||
|
||||
err_workqueues:
|
||||
i915_workqueues_cleanup(dev_priv);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
@@ -1487,6 +1524,8 @@ int i915_driver_unload(struct drm_device *dev)
|
||||
|
||||
intel_fbdev_fini(dev);
|
||||
|
||||
intel_gvt_cleanup(dev_priv);
|
||||
|
||||
ret = i915_gem_suspend(dev);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to idle hardware: %d\n", ret);
|
||||
|
||||
@@ -355,6 +355,7 @@ static const struct intel_device_info intel_broxton_info = {
|
||||
.has_ddi = 1,
|
||||
.has_fpga_dbg = 1,
|
||||
.has_fbc = 1,
|
||||
.has_pooled_eu = 0,
|
||||
GEN_DEFAULT_PIPEOFFSETS,
|
||||
IVB_CURSOR_OFFSETS,
|
||||
BDW_COLORS,
|
||||
@@ -517,8 +518,10 @@ void intel_detect_pch(struct drm_device *dev)
|
||||
} else if ((id == INTEL_PCH_P2X_DEVICE_ID_TYPE) ||
|
||||
(id == INTEL_PCH_P3X_DEVICE_ID_TYPE) ||
|
||||
((id == INTEL_PCH_QEMU_DEVICE_ID_TYPE) &&
|
||||
pch->subsystem_vendor == 0x1af4 &&
|
||||
pch->subsystem_device == 0x1100)) {
|
||||
pch->subsystem_vendor ==
|
||||
PCI_SUBVENDOR_ID_REDHAT_QUMRANET &&
|
||||
pch->subsystem_device ==
|
||||
PCI_SUBDEVICE_ID_QEMU)) {
|
||||
dev_priv->pch_type = intel_virt_detect_pch(dev);
|
||||
} else
|
||||
continue;
|
||||
|
||||
@@ -62,12 +62,14 @@
|
||||
#include "i915_gem_gtt.h"
|
||||
#include "i915_gem_render_state.h"
|
||||
|
||||
#include "intel_gvt.h"
|
||||
|
||||
/* General customization:
|
||||
*/
|
||||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20160606"
|
||||
#define DRIVER_DATE "20160620"
|
||||
|
||||
#undef WARN_ON
|
||||
/* Many gcc seem to no see through this and fall over :( */
|
||||
@@ -762,7 +764,8 @@ struct intel_csr {
|
||||
func(has_llc) sep \
|
||||
func(has_snoop) sep \
|
||||
func(has_ddi) sep \
|
||||
func(has_fpga_dbg)
|
||||
func(has_fpga_dbg) sep \
|
||||
func(has_pooled_eu)
|
||||
|
||||
#define DEFINE_FLAG(name) u8 name:1
|
||||
#define SEP_SEMICOLON ;
|
||||
@@ -788,6 +791,7 @@ struct intel_device_info {
|
||||
u8 subslice_per_slice;
|
||||
u8 eu_total;
|
||||
u8 eu_per_subslice;
|
||||
u8 min_eu_in_pool;
|
||||
/* For each slice, which subslice(s) has(have) 7 EUs (bitfield)? */
|
||||
u8 subslice_7eu[3];
|
||||
u8 has_slice_pg:1;
|
||||
@@ -877,6 +881,10 @@ struct i915_gem_context {
|
||||
int pin_count;
|
||||
bool initialised;
|
||||
} engine[I915_NUM_ENGINES];
|
||||
u32 ring_size;
|
||||
u32 desc_template;
|
||||
struct atomic_notifier_head status_notifier;
|
||||
bool execlists_force_single_submission;
|
||||
|
||||
struct list_head link;
|
||||
|
||||
@@ -1740,6 +1748,8 @@ struct drm_i915_private {
|
||||
|
||||
struct i915_virtual_gpu vgpu;
|
||||
|
||||
struct intel_gvt gvt;
|
||||
|
||||
struct intel_guc guc;
|
||||
|
||||
struct intel_csr csr;
|
||||
@@ -2718,6 +2728,15 @@ struct drm_i915_cmd_table {
|
||||
|
||||
#define IS_BXT_REVID(p, since, until) (IS_BROXTON(p) && IS_REVID(p, since, until))
|
||||
|
||||
#define KBL_REVID_A0 0x0
|
||||
#define KBL_REVID_B0 0x1
|
||||
#define KBL_REVID_C0 0x2
|
||||
#define KBL_REVID_D0 0x3
|
||||
#define KBL_REVID_E0 0x4
|
||||
|
||||
#define IS_KBL_REVID(p, since, until) \
|
||||
(IS_KABYLAKE(p) && IS_REVID(p, since, until))
|
||||
|
||||
/*
|
||||
* The genX designation typically refers to the render engine, so render
|
||||
* capability related checks should use IS_GEN, while display and other checks
|
||||
@@ -2824,6 +2843,8 @@ struct drm_i915_cmd_table {
|
||||
!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && \
|
||||
!IS_BROXTON(dev))
|
||||
|
||||
#define HAS_POOLED_EU(dev) (INTEL_INFO(dev)->has_pooled_eu)
|
||||
|
||||
#define INTEL_PCH_DEVICE_ID_MASK 0xff00
|
||||
#define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00
|
||||
#define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00
|
||||
@@ -2941,6 +2962,12 @@ void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
|
||||
u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
|
||||
|
||||
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
|
||||
|
||||
static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return dev_priv->gvt.initialized;
|
||||
}
|
||||
|
||||
static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
return dev_priv->vgpu.active;
|
||||
@@ -3110,6 +3137,23 @@ static inline int __sg_page_count(struct scatterlist *sg)
|
||||
struct page *
|
||||
i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n);
|
||||
|
||||
static inline dma_addr_t
|
||||
i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj, int n)
|
||||
{
|
||||
if (n < obj->get_page.last) {
|
||||
obj->get_page.sg = obj->pages->sgl;
|
||||
obj->get_page.last = 0;
|
||||
}
|
||||
|
||||
while (obj->get_page.last + __sg_page_count(obj->get_page.sg) <= n) {
|
||||
obj->get_page.last += __sg_page_count(obj->get_page.sg++);
|
||||
if (unlikely(sg_is_chain(obj->get_page.sg)))
|
||||
obj->get_page.sg = sg_chain_ptr(obj->get_page.sg);
|
||||
}
|
||||
|
||||
return sg_dma_address(obj->get_page.sg) + ((n - obj->get_page.last) << PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static inline struct page *
|
||||
i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)
|
||||
{
|
||||
@@ -3432,6 +3476,8 @@ int i915_switch_context(struct drm_i915_gem_request *req);
|
||||
void i915_gem_context_free(struct kref *ctx_ref);
|
||||
struct drm_i915_gem_object *
|
||||
i915_gem_alloc_context_obj(struct drm_device *dev, size_t size);
|
||||
struct i915_gem_context *
|
||||
i915_gem_context_create_gvt(struct drm_device *dev);
|
||||
|
||||
static inline struct i915_gem_context *
|
||||
i915_gem_context_lookup(struct drm_i915_file_private *file_priv, u32 id)
|
||||
@@ -3620,6 +3666,7 @@ int intel_bios_init(struct drm_i915_private *dev_priv);
|
||||
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
|
||||
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
|
||||
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
|
||||
bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port port);
|
||||
bool intel_bios_is_port_edp(struct drm_i915_private *dev_priv, enum port port);
|
||||
bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *dev_priv, enum port port);
|
||||
bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv, enum port *port);
|
||||
|
||||
+306
-60
File diff suppressed because it is too large
Load Diff
@@ -295,6 +295,10 @@ __create_hw_context(struct drm_device *dev,
|
||||
ctx->remap_slice = ALL_L3_SLICES(dev_priv);
|
||||
|
||||
ctx->hang_stats.ban_period_seconds = DRM_I915_CTX_BAN_PERIOD;
|
||||
ctx->ring_size = 4 * PAGE_SIZE;
|
||||
ctx->desc_template = GEN8_CTX_ADDRESSING_MODE(dev_priv) <<
|
||||
GEN8_CTX_ADDRESSING_MODE_SHIFT;
|
||||
ATOMIC_INIT_NOTIFIER_HEAD(&ctx->status_notifier);
|
||||
|
||||
return ctx;
|
||||
|
||||
@@ -339,6 +343,40 @@ i915_gem_create_context(struct drm_device *dev,
|
||||
return ctx;
|
||||
}
|
||||
|
||||
/**
|
||||
* i915_gem_context_create_gvt - create a GVT GEM context
|
||||
* @dev: drm device *
|
||||
*
|
||||
* This function is used to create a GVT specific GEM context.
|
||||
*
|
||||
* Returns:
|
||||
* pointer to i915_gem_context on success, error pointer if failed
|
||||
*
|
||||
*/
|
||||
struct i915_gem_context *
|
||||
i915_gem_context_create_gvt(struct drm_device *dev)
|
||||
{
|
||||
struct i915_gem_context *ctx;
|
||||
int ret;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
ret = i915_mutex_lock_interruptible(dev);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ctx = i915_gem_create_context(dev, NULL);
|
||||
if (IS_ERR(ctx))
|
||||
goto out;
|
||||
|
||||
ctx->execlists_force_single_submission = true;
|
||||
ctx->ring_size = 512 * PAGE_SIZE; /* Max ring buffer size */
|
||||
out:
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ctx;
|
||||
}
|
||||
|
||||
static void i915_gem_context_unpin(struct i915_gem_context *ctx,
|
||||
struct intel_engine_cs *engine)
|
||||
{
|
||||
|
||||
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
* Copyright 2016 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
|
||||
* DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef _I915_GEM_DMABUF_H_
|
||||
#define _I915_GEM_DMABUF_H_
|
||||
|
||||
#include <linux/dma-buf.h>
|
||||
|
||||
static inline struct reservation_object *
|
||||
i915_gem_object_get_dmabuf_resv(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct dma_buf *dma_buf;
|
||||
|
||||
if (obj->base.dma_buf)
|
||||
dma_buf = obj->base.dma_buf;
|
||||
else if (obj->base.import_attach)
|
||||
dma_buf = obj->base.import_attach->dmabuf;
|
||||
else
|
||||
return NULL;
|
||||
|
||||
return dma_buf->resv;
|
||||
}
|
||||
|
||||
#endif
|
||||
@@ -2355,6 +2355,28 @@ static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
|
||||
#endif
|
||||
}
|
||||
|
||||
static void gen8_ggtt_insert_page(struct i915_address_space *vm,
|
||||
dma_addr_t addr,
|
||||
uint64_t offset,
|
||||
enum i915_cache_level level,
|
||||
u32 unused)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
gen8_pte_t __iomem *pte =
|
||||
(gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
|
||||
(offset >> PAGE_SHIFT);
|
||||
int rpm_atomic_seq;
|
||||
|
||||
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
|
||||
|
||||
gen8_set_pte(pte, gen8_pte_encode(addr, level, true));
|
||||
|
||||
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
|
||||
POSTING_READ(GFX_FLSH_CNTL_GEN6);
|
||||
|
||||
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
|
||||
}
|
||||
|
||||
static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
|
||||
struct sg_table *st,
|
||||
uint64_t start,
|
||||
@@ -2424,6 +2446,28 @@ static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm,
|
||||
stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL);
|
||||
}
|
||||
|
||||
static void gen6_ggtt_insert_page(struct i915_address_space *vm,
|
||||
dma_addr_t addr,
|
||||
uint64_t offset,
|
||||
enum i915_cache_level level,
|
||||
u32 flags)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
gen6_pte_t __iomem *pte =
|
||||
(gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
|
||||
(offset >> PAGE_SHIFT);
|
||||
int rpm_atomic_seq;
|
||||
|
||||
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
|
||||
|
||||
iowrite32(vm->pte_encode(addr, level, true, flags), pte);
|
||||
|
||||
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
|
||||
POSTING_READ(GFX_FLSH_CNTL_GEN6);
|
||||
|
||||
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
|
||||
}
|
||||
|
||||
/*
|
||||
* Binds an object into the global gtt with the specified cache level. The object
|
||||
* will be accessible to the GPU via commands whose operands reference offsets
|
||||
@@ -2543,6 +2587,24 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
|
||||
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
|
||||
}
|
||||
|
||||
static void i915_ggtt_insert_page(struct i915_address_space *vm,
|
||||
dma_addr_t addr,
|
||||
uint64_t offset,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 unused)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(vm->dev);
|
||||
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
|
||||
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
|
||||
int rpm_atomic_seq;
|
||||
|
||||
rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
|
||||
|
||||
intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
|
||||
|
||||
assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
|
||||
}
|
||||
|
||||
static void i915_ggtt_insert_entries(struct i915_address_space *vm,
|
||||
struct sg_table *pages,
|
||||
uint64_t start,
|
||||
@@ -2732,11 +2794,9 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
|
||||
i915_address_space_init(&ggtt->base, dev_priv);
|
||||
ggtt->base.total += PAGE_SIZE;
|
||||
|
||||
if (intel_vgpu_active(dev_priv)) {
|
||||
ret = intel_vgt_balloon(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
ret = intel_vgt_balloon(dev_priv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (!HAS_LLC(dev))
|
||||
ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
|
||||
@@ -2836,8 +2896,7 @@ void i915_ggtt_cleanup_hw(struct drm_device *dev)
|
||||
i915_gem_cleanup_stolen(dev);
|
||||
|
||||
if (drm_mm_initialized(&ggtt->base.mm)) {
|
||||
if (intel_vgpu_active(dev_priv))
|
||||
intel_vgt_deballoon();
|
||||
intel_vgt_deballoon(dev_priv);
|
||||
|
||||
drm_mm_takedown(&ggtt->base.mm);
|
||||
list_del(&ggtt->base.global_link);
|
||||
@@ -3076,7 +3135,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
|
||||
|
||||
ggtt->base.bind_vma = ggtt_bind_vma;
|
||||
ggtt->base.unbind_vma = ggtt_unbind_vma;
|
||||
|
||||
ggtt->base.insert_page = gen8_ggtt_insert_page;
|
||||
ggtt->base.clear_range = nop_clear_range;
|
||||
if (!USES_FULL_PPGTT(dev_priv))
|
||||
ggtt->base.clear_range = gen8_ggtt_clear_range;
|
||||
@@ -3116,6 +3175,7 @@ static int gen6_gmch_probe(struct i915_ggtt *ggtt)
|
||||
ret = ggtt_probe_common(dev, ggtt->size);
|
||||
|
||||
ggtt->base.clear_range = gen6_ggtt_clear_range;
|
||||
ggtt->base.insert_page = gen6_ggtt_insert_page;
|
||||
ggtt->base.insert_entries = gen6_ggtt_insert_entries;
|
||||
ggtt->base.bind_vma = ggtt_bind_vma;
|
||||
ggtt->base.unbind_vma = ggtt_unbind_vma;
|
||||
@@ -3147,6 +3207,7 @@ static int i915_gmch_probe(struct i915_ggtt *ggtt)
|
||||
&ggtt->mappable_base, &ggtt->mappable_end);
|
||||
|
||||
ggtt->do_idle_maps = needs_idle_maps(dev_priv->dev);
|
||||
ggtt->base.insert_page = i915_ggtt_insert_page;
|
||||
ggtt->base.insert_entries = i915_ggtt_insert_entries;
|
||||
ggtt->base.clear_range = i915_ggtt_clear_range;
|
||||
ggtt->base.bind_vma = ggtt_bind_vma;
|
||||
|
||||
@@ -319,6 +319,11 @@ struct i915_address_space {
|
||||
uint64_t start,
|
||||
uint64_t length,
|
||||
bool use_scratch);
|
||||
void (*insert_page)(struct i915_address_space *vm,
|
||||
dma_addr_t addr,
|
||||
uint64_t offset,
|
||||
enum i915_cache_level cache_level,
|
||||
u32 flags);
|
||||
void (*insert_entries)(struct i915_address_space *vm,
|
||||
struct sg_table *st,
|
||||
uint64_t start,
|
||||
|
||||
@@ -94,6 +94,7 @@ free_gem:
|
||||
|
||||
static int render_state_setup(struct render_state *so)
|
||||
{
|
||||
struct drm_device *dev = so->obj->base.dev;
|
||||
const struct intel_renderstate_rodata *rodata = so->rodata;
|
||||
unsigned int i = 0, reloc_index = 0;
|
||||
struct page *page;
|
||||
@@ -135,6 +136,33 @@ static int render_state_setup(struct render_state *so)
|
||||
|
||||
so->aux_batch_offset = i * sizeof(u32);
|
||||
|
||||
if (HAS_POOLED_EU(dev)) {
|
||||
/*
|
||||
* We always program 3x6 pool config but depending upon which
|
||||
* subslice is disabled HW drops down to appropriate config
|
||||
* shown below.
|
||||
*
|
||||
* In the below table 2x6 config always refers to
|
||||
* fused-down version, native 2x6 is not available and can
|
||||
* be ignored
|
||||
*
|
||||
* SNo subslices config eu pool configuration
|
||||
* -----------------------------------------------------------
|
||||
* 1 3 subslices enabled (3x6) - 0x00777000 (9+9)
|
||||
* 2 ss0 disabled (2x6) - 0x00777000 (3+9)
|
||||
* 3 ss1 disabled (2x6) - 0x00770000 (6+6)
|
||||
* 4 ss2 disabled (2x6) - 0x00007000 (9+3)
|
||||
*/
|
||||
u32 eu_pool_config = 0x00777000;
|
||||
|
||||
OUT_BATCH(d, i, GEN9_MEDIA_POOL_STATE);
|
||||
OUT_BATCH(d, i, GEN9_MEDIA_POOL_ENABLE);
|
||||
OUT_BATCH(d, i, eu_pool_config);
|
||||
OUT_BATCH(d, i, 0);
|
||||
OUT_BATCH(d, i, 0);
|
||||
OUT_BATCH(d, i, 0);
|
||||
}
|
||||
|
||||
OUT_BATCH(d, i, MI_BATCH_BUFFER_END);
|
||||
so->aux_batch_size = (i * sizeof(u32)) - so->aux_batch_offset;
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user