Merge tag 'drm-intel-gt-next-2022-09-16' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

Cross-subsystem Changes:

- MEI subsystem pieces for XeHP SDV GSC support
  These are Acked-by Greg.

Driver Changes:

- Release mmaps on RPM suspend on discrete GPUs (Anshuman)
- Update GuC version to 7.5 on DG1, DG2 and ADL
- Revert "drm/i915/dg2: extend Wa_1409120013 to DG2" (Lucas)
- MTL enabling incl. standalone media (Matt R, Lucas)
- Explicitly clear BB_OFFSET for new contexts on Gen8+ (Chris)
- Fix throttling / perf limit reason decoding (Ashutosh)
- XeHP SDV GSC support (Vitaly, Alexander, Tomas)

- Fix issues with overrding firmware file paths (John)
- Invert if-else ladders to check latest version first (Lucas)
- Cancel GuC engine busyness worker synchronously (Umesh)

- Skip applying copy engine fuses outside PVC (Lucas)
- Eliminate Gen10 frequency read function (Lucas)
- Static code checker fixes (Gaosheng)
- Selftest improvements (Chris)

Signed-off-by: Dave Airlie <airlied@redhat.com>

From: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/YyQ4Jgl3cpGL1/As@jlahtine-mobl.ger.corp.intel.com
This commit is contained in:
Dave Airlie
2022-09-21 07:35:00 +10:00
64 changed files with 1426 additions and 537 deletions
+1
View File
@@ -123,6 +123,7 @@ gt-y += \
gt/intel_ring.o \
gt/intel_ring_submission.o \
gt/intel_rps.o \
gt/intel_sa_media.o \
gt/intel_sseu.o \
gt/intel_sseu_debugfs.o \
gt/intel_timeline.o \
-2
View File
@@ -12,8 +12,6 @@ struct drm_i915_private;
struct drm_i915_gem_object;
struct intel_memory_region;
extern const struct drm_i915_gem_object_ops i915_gem_lmem_obj_ops;
void __iomem *
i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
unsigned long n,
+22 -1
View File
@@ -413,7 +413,7 @@ retry:
vma->mmo = mmo;
if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref,
intel_wakeref_auto(&to_gt(i915)->userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
if (write) {
@@ -550,6 +550,20 @@ out:
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj)
{
struct ttm_buffer_object *bo = i915_gem_to_ttm(obj);
struct ttm_device *bdev = bo->bdev;
drm_vma_node_unmap(&bo->base.vma_node, bdev->dev_mapping);
if (obj->userfault_count) {
/* rpm wakeref provide exclusive access */
list_del(&obj->userfault_link);
obj->userfault_count = 0;
}
}
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
{
struct i915_mmap_offset *mmo, *mn;
@@ -573,6 +587,13 @@ void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
spin_lock(&obj->mmo.lock);
}
spin_unlock(&obj->mmo.lock);
if (obj->userfault_count) {
mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
list_del(&obj->userfault_link);
mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
obj->userfault_count = 0;
}
}
static struct i915_mmap_offset *
+1
View File
@@ -27,6 +27,7 @@ int i915_gem_dumb_mmap_offset(struct drm_file *file_priv,
void __i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
void i915_gem_object_release_mmap_gtt(struct drm_i915_gem_object *obj);
void i915_gem_object_runtime_pm_release_mmap_offset(struct drm_i915_gem_object *obj);
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj);
#endif
+1 -1
View File
@@ -238,7 +238,7 @@ static void __i915_gem_object_free_mmaps(struct drm_i915_gem_object *obj)
{
/* Skip serialisation and waking the device if known to be not used. */
if (obj->userfault_count)
if (obj->userfault_count && !IS_DGFX(to_i915(obj->base.dev)))
i915_gem_object_release_mmap_gtt(obj);
if (!RB_EMPTY_ROOT(&obj->mmo.offsets)) {
@@ -298,7 +298,8 @@ struct drm_i915_gem_object {
};
/**
* Whether the object is currently in the GGTT mmap.
* Whether the object is currently in the GGTT or any other supported
* fake offset mmap backed by lmem.
*/
unsigned int userfault_count;
struct list_head userfault_link;
+1 -1
View File
@@ -24,7 +24,7 @@ void i915_gem_suspend(struct drm_i915_private *i915)
{
GEM_TRACE("%s\n", dev_name(i915->drm.dev));
intel_wakeref_auto(&to_gt(i915)->ggtt->userfault_wakeref, 0);
intel_wakeref_auto(&to_gt(i915)->userfault_wakeref, 0);
flush_workqueue(i915->wq);
/*
+16 -35
View File
@@ -430,48 +430,29 @@ static int i915_gem_init_stolen(struct intel_memory_region *mem)
reserved_base = stolen_top;
reserved_size = 0;
switch (GRAPHICS_VER(i915)) {
case 2:
case 3:
break;
case 4:
if (!IS_G4X(i915))
break;
fallthrough;
case 5:
g4x_get_stolen_reserved(i915, uncore,
if (GRAPHICS_VER(i915) >= 11) {
icl_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
break;
case 6:
gen6_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
break;
case 7:
if (IS_VALLEYVIEW(i915))
vlv_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
else
gen7_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
break;
case 8:
case 9:
} else if (GRAPHICS_VER(i915) >= 8) {
if (IS_LP(i915))
chv_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
else
bdw_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
break;
default:
MISSING_CASE(GRAPHICS_VER(i915));
fallthrough;
case 11:
case 12:
icl_get_stolen_reserved(i915, uncore,
&reserved_base,
&reserved_size);
break;
} else if (GRAPHICS_VER(i915) >= 7) {
if (IS_VALLEYVIEW(i915))
vlv_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
else
gen7_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
} else if (GRAPHICS_VER(i915) >= 6) {
gen6_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
} else if (GRAPHICS_VER(i915) >= 5 || IS_G4X(i915)) {
g4x_get_stolen_reserved(i915, uncore,
&reserved_base, &reserved_size);
}
/*
+34 -2
View File
@@ -509,9 +509,18 @@ static int i915_ttm_shrink(struct drm_i915_gem_object *obj, unsigned int flags)
static void i915_ttm_delete_mem_notify(struct ttm_buffer_object *bo)
{
struct drm_i915_gem_object *obj = i915_ttm_to_gem(bo);
intel_wakeref_t wakeref = 0;
if (likely(obj)) {
/* ttm_bo_release() already has dma_resv_lock */
if (i915_ttm_cpu_maps_iomem(bo->resource))
wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
__i915_gem_object_pages_fini(obj);
if (wakeref)
intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
i915_ttm_free_cached_io_rsgt(obj);
}
}
@@ -981,6 +990,7 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
struct ttm_buffer_object *bo = area->vm_private_data;
struct drm_device *dev = bo->base.dev;
struct drm_i915_gem_object *obj;
intel_wakeref_t wakeref = 0;
vm_fault_t ret;
int idx;
@@ -1002,6 +1012,9 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
return VM_FAULT_SIGBUS;
}
if (i915_ttm_cpu_maps_iomem(bo->resource))
wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm);
if (!i915_ttm_resource_mappable(bo->resource)) {
int err = -ENODEV;
int i;
@@ -1023,7 +1036,8 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
if (err) {
drm_dbg(dev, "Unable to make resource CPU accessible\n");
dma_resv_unlock(bo->base.resv);
return VM_FAULT_SIGBUS;
ret = VM_FAULT_SIGBUS;
goto out_rpm;
}
}
@@ -1034,12 +1048,30 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf)
} else {
ret = ttm_bo_vm_dummy_page(vmf, vmf->vma->vm_page_prot);
}
if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
return ret;
goto out_rpm;
/* ttm_bo_vm_reserve() already has dma_resv_lock */
if (ret == VM_FAULT_NOPAGE && wakeref && !obj->userfault_count) {
obj->userfault_count = 1;
mutex_lock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
list_add(&obj->userfault_link, &to_gt(to_i915(obj->base.dev))->lmem_userfault_list);
mutex_unlock(&to_gt(to_i915(obj->base.dev))->lmem_userfault_lock);
}
if (wakeref & CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND)
intel_wakeref_auto(&to_gt(to_i915(obj->base.dev))->userfault_wakeref,
msecs_to_jiffies_timeout(CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND));
i915_ttm_adjust_lru(obj);
dma_resv_unlock(bo->base.resv);
out_rpm:
if (wakeref)
intel_runtime_pm_put(&to_i915(obj->base.dev)->runtime_pm, wakeref);
return ret;
}
+10 -5
View File
@@ -165,10 +165,12 @@ static u32 preparser_disable(bool state)
return MI_ARB_CHECK | 1 << 8 | state;
}
u32 *gen12_emit_aux_table_inv(u32 *cs, const i915_reg_t inv_reg)
u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg)
{
u32 gsi_offset = gt->uncore->gsi_offset;
*cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN;
*cs++ = i915_mmio_reg_offset(inv_reg);
*cs++ = i915_mmio_reg_offset(inv_reg) + gsi_offset;
*cs++ = AUX_INV;
*cs++ = MI_NOOP;
@@ -254,7 +256,8 @@ int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
if (!HAS_FLAT_CCS(rq->engine->i915)) {
/* hsdes: 1809175790 */
cs = gen12_emit_aux_table_inv(cs, GEN12_GFX_CCS_AUX_NV);
cs = gen12_emit_aux_table_inv(rq->engine->gt,
cs, GEN12_GFX_CCS_AUX_NV);
}
*cs++ = preparser_disable(false);
@@ -313,9 +316,11 @@ int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
if (aux_inv) { /* hsdes: 1809175790 */
if (rq->engine->class == VIDEO_DECODE_CLASS)
cs = gen12_emit_aux_table_inv(cs, GEN12_VD0_AUX_NV);
cs = gen12_emit_aux_table_inv(rq->engine->gt,
cs, GEN12_VD0_AUX_NV);
else
cs = gen12_emit_aux_table_inv(cs, GEN12_VE0_AUX_NV);
cs = gen12_emit_aux_table_inv(rq->engine->gt,
cs, GEN12_VE0_AUX_NV);
}
if (mode & EMIT_INVALIDATE)
+2 -1
View File
@@ -13,6 +13,7 @@
#include "intel_gt_regs.h"
#include "intel_gpu_commands.h"
struct intel_gt;
struct i915_request;
int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode);
@@ -45,7 +46,7 @@ u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs);
u32 *gen12_emit_aux_table_inv(u32 *cs, const i915_reg_t inv_reg);
u32 *gen12_emit_aux_table_inv(struct intel_gt *gt, u32 *cs, const i915_reg_t inv_reg);
static inline u32 *
__gen8_emit_pipe_control(u32 *batch, u32 flags0, u32 flags1, u32 offset)
+83 -73
View File
@@ -654,16 +654,83 @@ bool gen11_vdbox_has_sfc(struct intel_gt *gt,
*/
if ((gt->info.sfc_mask & BIT(physical_vdbox / 2)) == 0)
return false;
else if (GRAPHICS_VER(i915) == 12)
else if (MEDIA_VER(i915) >= 12)
return (physical_vdbox % 2 == 0) ||
!(BIT(physical_vdbox - 1) & vdbox_mask);
else if (GRAPHICS_VER(i915) == 11)
else if (MEDIA_VER(i915) == 11)
return logical_vdbox % 2 == 0;
MISSING_CASE(GRAPHICS_VER(i915));
return false;
}
static void engine_mask_apply_media_fuses(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
unsigned int logical_vdbox = 0;
unsigned int i;
u32 media_fuse, fuse1;
u16 vdbox_mask;
u16 vebox_mask;
if (MEDIA_VER(gt->i915) < 11)
return;
/*
* On newer platforms the fusing register is called 'enable' and has
* enable semantics, while on older platforms it is called 'disable'
* and bits have disable semantices.
*/
media_fuse = intel_uncore_read(gt->uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
if (MEDIA_VER_FULL(i915) < IP_VER(12, 50))
media_fuse = ~media_fuse;
vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
GEN11_GT_VEBOX_DISABLE_SHIFT;
if (MEDIA_VER_FULL(i915) >= IP_VER(12, 50)) {
fuse1 = intel_uncore_read(gt->uncore, HSW_PAVP_FUSE1);
gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1);
} else {
gt->info.sfc_mask = ~0;
}
for (i = 0; i < I915_MAX_VCS; i++) {
if (!HAS_ENGINE(gt, _VCS(i))) {
vdbox_mask &= ~BIT(i);
continue;
}
if (!(BIT(i) & vdbox_mask)) {
gt->info.engine_mask &= ~BIT(_VCS(i));
drm_dbg(&i915->drm, "vcs%u fused off\n", i);
continue;
}
if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask))
gt->info.vdbox_sfc_access |= BIT(i);
logical_vdbox++;
}
drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
vdbox_mask, VDBOX_MASK(gt));
GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt));
for (i = 0; i < I915_MAX_VECS; i++) {
if (!HAS_ENGINE(gt, _VECS(i))) {
vebox_mask &= ~BIT(i);
continue;
}
if (!(BIT(i) & vebox_mask)) {
gt->info.engine_mask &= ~BIT(_VECS(i));
drm_dbg(&i915->drm, "vecs%u fused off\n", i);
}
}
drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n",
vebox_mask, VEBOX_MASK(gt));
GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
}
static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
@@ -672,6 +739,9 @@ static void engine_mask_apply_compute_fuses(struct intel_gt *gt)
unsigned long ccs_mask;
unsigned int i;
if (GRAPHICS_VER(i915) < 11)
return;
if (hweight32(CCS_MASK(gt)) <= 1)
return;
@@ -694,6 +764,10 @@ static void engine_mask_apply_copy_fuses(struct intel_gt *gt)
unsigned long meml3_mask;
unsigned long quad;
if (!(GRAPHICS_VER_FULL(i915) >= IP_VER(12, 60) &&
GRAPHICS_VER_FULL(i915) < IP_VER(12, 70)))
return;
meml3_mask = intel_uncore_read(gt->uncore, GEN10_MIRROR_FUSE3);
meml3_mask = REG_FIELD_GET(GEN12_MEML3_EN_MASK, meml3_mask);
@@ -727,75 +801,11 @@ static void engine_mask_apply_copy_fuses(struct intel_gt *gt)
*/
static intel_engine_mask_t init_engine_mask(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_gt_info *info = &gt->info;
struct intel_uncore *uncore = gt->uncore;
unsigned int logical_vdbox = 0;
unsigned int i;
u32 media_fuse, fuse1;
u16 vdbox_mask;
u16 vebox_mask;
info->engine_mask = RUNTIME_INFO(i915)->platform_engine_mask;
if (GRAPHICS_VER(i915) < 11)
return info->engine_mask;
/*
* On newer platforms the fusing register is called 'enable' and has
* enable semantics, while on older platforms it is called 'disable'
* and bits have disable semantices.
*/
media_fuse = intel_uncore_read(uncore, GEN11_GT_VEBOX_VDBOX_DISABLE);
if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
media_fuse = ~media_fuse;
vdbox_mask = media_fuse & GEN11_GT_VDBOX_DISABLE_MASK;
vebox_mask = (media_fuse & GEN11_GT_VEBOX_DISABLE_MASK) >>
GEN11_GT_VEBOX_DISABLE_SHIFT;
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50)) {
fuse1 = intel_uncore_read(uncore, HSW_PAVP_FUSE1);
gt->info.sfc_mask = REG_FIELD_GET(XEHP_SFC_ENABLE_MASK, fuse1);
} else {
gt->info.sfc_mask = ~0;
}
for (i = 0; i < I915_MAX_VCS; i++) {
if (!HAS_ENGINE(gt, _VCS(i))) {
vdbox_mask &= ~BIT(i);
continue;
}
if (!(BIT(i) & vdbox_mask)) {
info->engine_mask &= ~BIT(_VCS(i));
drm_dbg(&i915->drm, "vcs%u fused off\n", i);
continue;
}
if (gen11_vdbox_has_sfc(gt, i, logical_vdbox, vdbox_mask))
gt->info.vdbox_sfc_access |= BIT(i);
logical_vdbox++;
}
drm_dbg(&i915->drm, "vdbox enable: %04x, instances: %04lx\n",
vdbox_mask, VDBOX_MASK(gt));
GEM_BUG_ON(vdbox_mask != VDBOX_MASK(gt));
for (i = 0; i < I915_MAX_VECS; i++) {
if (!HAS_ENGINE(gt, _VECS(i))) {
vebox_mask &= ~BIT(i);
continue;
}
if (!(BIT(i) & vebox_mask)) {
info->engine_mask &= ~BIT(_VECS(i));
drm_dbg(&i915->drm, "vecs%u fused off\n", i);
}
}
drm_dbg(&i915->drm, "vebox enable: %04x, instances: %04lx\n",
vebox_mask, VEBOX_MASK(gt));
GEM_BUG_ON(vebox_mask != VEBOX_MASK(gt));
GEM_BUG_ON(!info->engine_mask);
engine_mask_apply_media_fuses(gt);
engine_mask_apply_compute_fuses(gt);
engine_mask_apply_copy_fuses(gt);
@@ -1688,9 +1698,9 @@ bool intel_engine_irq_enable(struct intel_engine_cs *engine)
return false;
/* Caller disables interrupts */
spin_lock(&engine->gt->irq_lock);
spin_lock(engine->gt->irq_lock);
engine->irq_enable(engine);
spin_unlock(&engine->gt->irq_lock);
spin_unlock(engine->gt->irq_lock);
return true;
}
@@ -1701,9 +1711,9 @@ void intel_engine_irq_disable(struct intel_engine_cs *engine)
return;
/* Caller disables interrupts */
spin_lock(&engine->gt->irq_lock);
spin_lock(engine->gt->irq_lock);
engine->irq_disable(engine);
spin_unlock(&engine->gt->irq_lock);
spin_unlock(engine->gt->irq_lock);
}
void intel_engines_reset_default_submission(struct intel_gt *gt)
@@ -110,6 +110,7 @@
#define RING_SBBSTATE(base) _MMIO((base) + 0x118) /* hsw+ */
#define RING_SBBADDR_UDW(base) _MMIO((base) + 0x11c) /* gen8+ */
#define RING_BBADDR(base) _MMIO((base) + 0x140)
#define RING_BB_OFFSET(base) _MMIO((base) + 0x158)
#define RING_BBADDR_UDW(base) _MMIO((base) + 0x168) /* gen8+ */
#define CCID(base) _MMIO((base) + 0x180)
#define CCID_EN BIT(0)
@@ -842,7 +842,6 @@ void intel_ggtt_init_fences(struct i915_ggtt *ggtt)
INIT_LIST_HEAD(&ggtt->fence_list);
INIT_LIST_HEAD(&ggtt->userfault_list);
intel_wakeref_auto_init(&ggtt->userfault_wakeref, uncore->rpm);
detect_bit_6_swizzle(ggtt);
+96 -10
View File
@@ -7,6 +7,7 @@
#include <linux/mei_aux.h>
#include "i915_drv.h"
#include "i915_reg.h"
#include "gem/i915_gem_region.h"
#include "gt/intel_gsc.h"
#include "gt/intel_gt.h"
@@ -36,10 +37,56 @@ static int gsc_irq_init(int irq)
return irq_set_chip_data(irq, NULL);
}
static int
gsc_ext_om_alloc(struct intel_gsc *gsc, struct intel_gsc_intf *intf, size_t size)
{
struct intel_gt *gt = gsc_to_gt(gsc);
struct drm_i915_gem_object *obj;
int err;
obj = i915_gem_object_create_lmem(gt->i915, size,
I915_BO_ALLOC_CONTIGUOUS |
I915_BO_ALLOC_CPU_CLEAR);
if (IS_ERR(obj)) {
drm_err(&gt->i915->drm, "Failed to allocate gsc memory\n");
return PTR_ERR(obj);
}
err = i915_gem_object_pin_pages_unlocked(obj);
if (err) {
drm_err(&gt->i915->drm, "Failed to pin pages for gsc memory\n");
goto out_put;
}
intf->gem_obj = obj;
return 0;
out_put:
i915_gem_object_put(obj);
return err;
}
static void gsc_ext_om_destroy(struct intel_gsc_intf *intf)
{
struct drm_i915_gem_object *obj = fetch_and_zero(&intf->gem_obj);
if (!obj)
return;
if (i915_gem_object_has_pinned_pages(obj))
i915_gem_object_unpin_pages(obj);
i915_gem_object_put(obj);
}
struct gsc_def {
const char *name;
unsigned long bar;
size_t bar_size;
bool use_polling;
bool slow_firmware;
size_t lmem_size;
};
/* gsc resources and definitions (HECI1 and HECI2) */
@@ -54,11 +101,25 @@ static const struct gsc_def gsc_def_dg1[] = {
}
};
static const struct gsc_def gsc_def_xehpsdv[] = {
{
/* HECI1 not enabled on the device. */
},
{
.name = "mei-gscfi",
.bar = DG1_GSC_HECI2_BASE,
.bar_size = GSC_BAR_LENGTH,
.use_polling = true,
.slow_firmware = true,
}
};
static const struct gsc_def gsc_def_dg2[] = {
{
.name = "mei-gsc",
.bar = DG2_GSC_HECI1_BASE,
.bar_size = GSC_BAR_LENGTH,
.lmem_size = SZ_4M,
},
{
.name = "mei-gscfi",
@@ -75,26 +136,32 @@ static void gsc_release_dev(struct device *dev)
kfree(adev);
}
static void gsc_destroy_one(struct intel_gsc_intf *intf)
static void gsc_destroy_one(struct drm_i915_private *i915,
struct intel_gsc *gsc, unsigned int intf_id)
{
struct intel_gsc_intf *intf = &gsc->intf[intf_id];
if (intf->adev) {
auxiliary_device_delete(&intf->adev->aux_dev);
auxiliary_device_uninit(&intf->adev->aux_dev);
intf->adev = NULL;
}
if (intf->irq >= 0)
irq_free_desc(intf->irq);
intf->irq = -1;
gsc_ext_om_destroy(intf);
}
static void gsc_init_one(struct drm_i915_private *i915,
struct intel_gsc_intf *intf,
static void gsc_init_one(struct drm_i915_private *i915, struct intel_gsc *gsc,
unsigned int intf_id)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct mei_aux_device *adev;
struct auxiliary_device *aux_dev;
const struct gsc_def *def;
struct intel_gsc_intf *intf = &gsc->intf[intf_id];
int ret;
intf->irq = -1;
@@ -105,6 +172,8 @@ static void gsc_init_one(struct drm_i915_private *i915,
if (IS_DG1(i915)) {
def = &gsc_def_dg1[intf_id];
} else if (IS_XEHPSDV(i915)) {
def = &gsc_def_xehpsdv[intf_id];
} else if (IS_DG2(i915)) {
def = &gsc_def_dg2[intf_id];
} else {
@@ -117,10 +186,14 @@ static void gsc_init_one(struct drm_i915_private *i915,
return;
}
/* skip irq initialization */
if (def->use_polling)
goto add_device;
intf->irq = irq_alloc_desc(0);
if (intf->irq < 0) {
drm_err(&i915->drm, "gsc irq error %d\n", intf->irq);
return;
goto fail;
}
ret = gsc_irq_init(intf->irq);
@@ -129,16 +202,31 @@ static void gsc_init_one(struct drm_i915_private *i915,
goto fail;
}
add_device:
adev = kzalloc(sizeof(*adev), GFP_KERNEL);
if (!adev)
goto fail;
if (def->lmem_size) {
drm_dbg(&i915->drm, "setting up GSC lmem\n");
if (gsc_ext_om_alloc(gsc, intf, def->lmem_size)) {
drm_err(&i915->drm, "setting up gsc extended operational memory failed\n");
kfree(adev);
goto fail;
}
adev->ext_op_mem.start = i915_gem_object_get_dma_address(intf->gem_obj, 0);
adev->ext_op_mem.end = adev->ext_op_mem.start + def->lmem_size;
}
adev->irq = intf->irq;
adev->bar.parent = &pdev->resource[0];
adev->bar.start = def->bar + pdev->resource[0].start;
adev->bar.end = adev->bar.start + def->bar_size - 1;
adev->bar.flags = IORESOURCE_MEM;
adev->bar.desc = IORES_DESC_NONE;
adev->slow_firmware = def->slow_firmware;
aux_dev = &adev->aux_dev;
aux_dev->name = def->name;
@@ -165,7 +253,7 @@ static void gsc_init_one(struct drm_i915_private *i915,
return;
fail:
gsc_destroy_one(intf);
gsc_destroy_one(i915, gsc, intf->id);
}
static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id)
@@ -182,10 +270,8 @@ static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id)
return;
}
if (gt->gsc.intf[intf_id].irq < 0) {
drm_err_ratelimited(&gt->i915->drm, "GSC irq: irq not set");
if (gt->gsc.intf[intf_id].irq < 0)
return;
}
ret = generic_handle_irq(gt->gsc.intf[intf_id].irq);
if (ret)
@@ -208,7 +294,7 @@ void intel_gsc_init(struct intel_gsc *gsc, struct drm_i915_private *i915)
return;
for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++)
gsc_init_one(i915, &gsc->intf[i], i);
gsc_init_one(i915, gsc, i);
}
void intel_gsc_fini(struct intel_gsc *gsc)
@@ -220,5 +306,5 @@ void intel_gsc_fini(struct intel_gsc *gsc)
return;
for (i = 0; i < INTEL_GSC_NUM_INTERFACES; i++)
gsc_destroy_one(&gsc->intf[i]);
gsc_destroy_one(gt->i915, gsc, i);
}
+3
View File
@@ -20,11 +20,14 @@ struct mei_aux_device;
/**
* struct intel_gsc - graphics security controller
*
* @gem_obj: scratch memory GSC operations
* @intf : gsc interface
*/
struct intel_gsc {
struct intel_gsc_intf {
struct mei_aux_device *adev;
struct drm_i915_gem_object *gem_obj;
int irq;
unsigned int id;
} intf[INTEL_GSC_NUM_INTERFACES];
+83 -28
View File
@@ -31,14 +31,17 @@
#include "intel_rc6.h"
#include "intel_renderstate.h"
#include "intel_rps.h"
#include "intel_sa_media.h"
#include "intel_gt_sysfs.h"
#include "intel_uncore.h"
#include "shmem_utils.h"
static void __intel_gt_init_early(struct intel_gt *gt)
void intel_gt_common_init_early(struct intel_gt *gt)
{
spin_lock_init(&gt->irq_lock);
spin_lock_init(gt->irq_lock);
INIT_LIST_HEAD(&gt->lmem_userfault_list);
mutex_init(&gt->lmem_userfault_lock);
INIT_LIST_HEAD(&gt->closed_vma);
spin_lock_init(&gt->closed_lock);
@@ -58,14 +61,19 @@ static void __intel_gt_init_early(struct intel_gt *gt)
}
/* Preliminary initialization of Tile 0 */
void intel_root_gt_init_early(struct drm_i915_private *i915)
int intel_root_gt_init_early(struct drm_i915_private *i915)
{
struct intel_gt *gt = to_gt(i915);
gt->i915 = i915;
gt->uncore = &i915->uncore;
gt->irq_lock = drmm_kzalloc(&i915->drm, sizeof(*gt->irq_lock), GFP_KERNEL);
if (!gt->irq_lock)
return -ENOMEM;
__intel_gt_init_early(gt);
intel_gt_common_init_early(gt);
return 0;
}
static int intel_gt_probe_lmem(struct intel_gt *gt)
@@ -781,26 +789,25 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
int ret;
if (!gt_is_root(gt)) {
struct intel_uncore_mmio_debug *mmio_debug;
struct intel_uncore *uncore;
spinlock_t *irq_lock;
uncore = kzalloc(sizeof(*uncore), GFP_KERNEL);
uncore = drmm_kzalloc(&gt->i915->drm, sizeof(*uncore), GFP_KERNEL);
if (!uncore)
return -ENOMEM;
mmio_debug = kzalloc(sizeof(*mmio_debug), GFP_KERNEL);
if (!mmio_debug) {
kfree(uncore);
irq_lock = drmm_kzalloc(&gt->i915->drm, sizeof(*irq_lock), GFP_KERNEL);
if (!irq_lock)
return -ENOMEM;
}
gt->uncore = uncore;
gt->uncore->debug = mmio_debug;
gt->irq_lock = irq_lock;
__intel_gt_init_early(gt);
intel_gt_common_init_early(gt);
}
intel_uncore_init_early(gt->uncore, gt);
intel_wakeref_auto_init(&gt->userfault_wakeref, gt->uncore->rpm);
ret = intel_uncore_setup_mmio(gt->uncore, phys_addr);
if (ret)
@@ -811,24 +818,14 @@ static int intel_gt_tile_setup(struct intel_gt *gt, phys_addr_t phys_addr)
return 0;
}
static void
intel_gt_tile_cleanup(struct intel_gt *gt)
{
intel_uncore_cleanup_mmio(gt->uncore);
if (!gt_is_root(gt)) {
kfree(gt->uncore->debug);
kfree(gt->uncore);
kfree(gt);
}
}
int intel_gt_probe_all(struct drm_i915_private *i915)
{
struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct intel_gt *gt = &i915->gt0;
const struct intel_gt_definition *gtdef;
phys_addr_t phys_addr;
unsigned int mmio_bar;
unsigned int i;
int ret;
mmio_bar = GRAPHICS_VER(i915) == 2 ? GEN2_GTTMMADR_BAR : GTTMMADR_BAR;
@@ -839,14 +836,74 @@ int intel_gt_probe_all(struct drm_i915_private *i915)
* and it has been already initialized early during probe
* in i915_driver_probe()
*/
gt->i915 = i915;
gt->name = "Primary GT";
gt->info.engine_mask = RUNTIME_INFO(i915)->platform_engine_mask;
drm_dbg(&i915->drm, "Setting up %s\n", gt->name);
ret = intel_gt_tile_setup(gt, phys_addr);
if (ret)
return ret;
i915->gt[0] = gt;
/* TODO: add more tiles */
if (!HAS_EXTRA_GT_LIST(i915))
return 0;
for (i = 1, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1];
gtdef->name != NULL;
i++, gtdef = &INTEL_INFO(i915)->extra_gt_list[i - 1]) {
gt = drmm_kzalloc(&i915->drm, sizeof(*gt), GFP_KERNEL);
if (!gt) {
ret = -ENOMEM;
goto err;
}
gt->i915 = i915;
gt->name = gtdef->name;
gt->type = gtdef->type;
gt->info.engine_mask = gtdef->engine_mask;
gt->info.id = i;
drm_dbg(&i915->drm, "Setting up %s\n", gt->name);
if (GEM_WARN_ON(range_overflows_t(resource_size_t,
gtdef->mapping_base,
SZ_16M,
pci_resource_len(pdev, mmio_bar)))) {
ret = -ENODEV;
goto err;
}
switch (gtdef->type) {
case GT_TILE:
ret = intel_gt_tile_setup(gt, phys_addr + gtdef->mapping_base);
break;
case GT_MEDIA:
ret = intel_sa_mediagt_setup(gt, phys_addr + gtdef->mapping_base,
gtdef->gsi_offset);
break;
case GT_PRIMARY:
/* Primary GT should not appear in extra GT list */
default:
MISSING_CASE(gtdef->type);
ret = -ENODEV;
}
if (ret)
goto err;
i915->gt[i] = gt;
}
return 0;
err:
i915_probe_error(i915, "Failed to initialize %s! (%d)\n", gtdef->name, ret);
intel_gt_release_all(i915);
return ret;
}
int intel_gt_tiles_init(struct drm_i915_private *i915)
@@ -869,10 +926,8 @@ void intel_gt_release_all(struct drm_i915_private *i915)
struct intel_gt *gt;
unsigned int id;
for_each_gt(gt, i915, id) {
intel_gt_tile_cleanup(gt);
for_each_gt(gt, i915, id)
i915->gt[id] = NULL;
}
}
void intel_gt_info_print(const struct intel_gt_info *info,
+2 -2
View File
@@ -44,7 +44,8 @@ static inline struct intel_gt *gsc_to_gt(struct intel_gsc *gsc)
return container_of(gsc, struct intel_gt, gsc);
}
void intel_root_gt_init_early(struct drm_i915_private *i915);
void intel_gt_common_init_early(struct intel_gt *gt);
int intel_root_gt_init_early(struct drm_i915_private *i915);
int intel_gt_assign_ggtt(struct intel_gt *gt);
int intel_gt_init_mmio(struct intel_gt *gt);
int __must_check intel_gt_init_hw(struct intel_gt *gt);
@@ -54,7 +55,6 @@ void intel_gt_driver_register(struct intel_gt *gt);
void intel_gt_driver_unregister(struct intel_gt *gt);
void intel_gt_driver_remove(struct intel_gt *gt);
void intel_gt_driver_release(struct intel_gt *gt);
void intel_gt_driver_late_release_all(struct drm_i915_private *i915);
int intel_gt_wait_for_idle(struct intel_gt *gt, long timeout);
+83 -95
View File
@@ -26,26 +26,6 @@ static u32 read_reference_ts_freq(struct intel_uncore *uncore)
return base_freq + frac_freq;
}
static u32 gen9_get_crystal_clock_freq(struct intel_uncore *uncore,
u32 rpm_config_reg)
{
u32 f19_2_mhz = 19200000;
u32 f24_mhz = 24000000;
u32 crystal_clock =
(rpm_config_reg & GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_MASK) >>
GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_SHIFT;
switch (crystal_clock) {
case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_19_2_MHZ:
return f19_2_mhz;
case GEN9_RPM_CONFIG0_CRYSTAL_CLOCK_FREQ_24_MHZ:
return f24_mhz;
default:
MISSING_CASE(crystal_clock);
return 0;
}
}
static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore,
u32 rpm_config_reg)
{
@@ -72,98 +52,106 @@ static u32 gen11_get_crystal_clock_freq(struct intel_uncore *uncore,
}
}
static u32 read_clock_frequency(struct intel_uncore *uncore)
static u32 gen11_read_clock_frequency(struct intel_uncore *uncore)
{
u32 f12_5_mhz = 12500000;
u32 f19_2_mhz = 19200000;
u32 f24_mhz = 24000000;
u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
u32 freq = 0;
if (GRAPHICS_VER(uncore->i915) <= 4) {
/*
* PRMs say:
*
* "The value in this register increments once every 16
* hclks." (through the “Clocking Configuration”
* (CLKCFG) MCHBAR register)
*/
return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16;
} else if (GRAPHICS_VER(uncore->i915) <= 8) {
/*
* PRMs say:
*
* "The PCU TSC counts 10ns increments; this timestamp
* reflects bits 38:3 of the TSC (i.e. 80ns granularity,
* rolling over every 1.5 hours).
*/
return f12_5_mhz;
} else if (GRAPHICS_VER(uncore->i915) <= 9) {
u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
u32 freq = 0;
/*
* Note that on gen11+, the clock frequency may be reconfigured.
* We do not, and we assume nobody else does.
*
* First figure out the reference frequency. There are 2 ways
* we can compute the frequency, either through the
* TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
* tells us which one we should use.
*/
if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
freq = read_reference_ts_freq(uncore);
} else {
u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
freq = read_reference_ts_freq(uncore);
} else {
freq = IS_GEN9_LP(uncore->i915) ? f19_2_mhz : f24_mhz;
/*
* Now figure out how the command stream's timestamp
* register increments from this frequency (it might
* increment only every few clock cycle).
*/
freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
CTC_SHIFT_PARAMETER_SHIFT);
}
return freq;
} else if (GRAPHICS_VER(uncore->i915) <= 12) {
u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
u32 freq = 0;
freq = gen11_get_crystal_clock_freq(uncore, c0);
/*
* First figure out the reference frequency. There are 2 ways
* we can compute the frequency, either through the
* TIMESTAMP_OVERRIDE register or through RPM_CONFIG. CTC_MODE
* tells us which one we should use.
* Now figure out how the command stream's timestamp
* register increments from this frequency (it might
* increment only every few clock cycle).
*/
if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
freq = read_reference_ts_freq(uncore);
} else {
u32 c0 = intel_uncore_read(uncore, RPM_CONFIG0);
if (GRAPHICS_VER(uncore->i915) >= 11)
freq = gen11_get_crystal_clock_freq(uncore, c0);
else
freq = gen9_get_crystal_clock_freq(uncore, c0);
/*
* Now figure out how the command stream's timestamp
* register increments from this frequency (it might
* increment only every few clock cycle).
*/
freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
}
return freq;
freq >>= 3 - ((c0 & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >>
GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT);
}
MISSING_CASE("Unknown gen, unable to read command streamer timestamp frequency\n");
return 0;
return freq;
}
static u32 gen9_read_clock_frequency(struct intel_uncore *uncore)
{
u32 ctc_reg = intel_uncore_read(uncore, CTC_MODE);
u32 freq = 0;
if ((ctc_reg & CTC_SOURCE_PARAMETER_MASK) == CTC_SOURCE_DIVIDE_LOGIC) {
freq = read_reference_ts_freq(uncore);
} else {
freq = IS_GEN9_LP(uncore->i915) ? 19200000 : 24000000;
/*
* Now figure out how the command stream's timestamp
* register increments from this frequency (it might
* increment only every few clock cycle).
*/
freq >>= 3 - ((ctc_reg & CTC_SHIFT_PARAMETER_MASK) >>
CTC_SHIFT_PARAMETER_SHIFT);
}
return freq;
}
static u32 gen5_read_clock_frequency(struct intel_uncore *uncore)
{
/*
* PRMs say:
*
* "The PCU TSC counts 10ns increments; this timestamp
* reflects bits 38:3 of the TSC (i.e. 80ns granularity,
* rolling over every 1.5 hours).
*/
return 12500000;
}
static u32 gen2_read_clock_frequency(struct intel_uncore *uncore)
{
/*
* PRMs say:
*
* "The value in this register increments once every 16
* hclks." (through the “Clocking Configuration”
* (CLKCFG) MCHBAR register)
*/
return RUNTIME_INFO(uncore->i915)->rawclk_freq * 1000 / 16;
}
static u32 read_clock_frequency(struct intel_uncore *uncore)
{
if (GRAPHICS_VER(uncore->i915) >= 11)
return gen11_read_clock_frequency(uncore);
else if (GRAPHICS_VER(uncore->i915) >= 9)
return gen9_read_clock_frequency(uncore);
else if (GRAPHICS_VER(uncore->i915) >= 5)
return gen5_read_clock_frequency(uncore);
else
return gen2_read_clock_frequency(uncore);
}
void intel_gt_init_clock_frequency(struct intel_gt *gt)
{
/*
* Note that on gen11+, the clock frequency may be reconfigured.
* We do not, and we assume nobody else does.
*/
gt->clock_frequency = read_clock_frequency(gt->uncore);
if (gt->clock_frequency)
gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1);
/* Icelake appears to use another fixed frequency for CTX_TIMESTAMP */
if (GRAPHICS_VER(gt->i915) == 11)
gt->clock_period_ns = NSEC_PER_SEC / 13750000;
else if (gt->clock_frequency)
gt->clock_period_ns = intel_gt_clock_interval_to_ns(gt, 1);
GT_TRACE(gt,
"Using clock frequency: %dkHz, period: %dns, wrap: %lldms\n",
+27 -8
View File
@@ -29,7 +29,7 @@ gen11_gt_engine_identity(struct intel_gt *gt,
u32 timeout_ts;
u32 ident;
lockdep_assert_held(&gt->irq_lock);
lockdep_assert_held(gt->irq_lock);
raw_reg_write(regs, GEN11_IIR_REG_SELECTOR(bank), BIT(bit));
@@ -59,11 +59,17 @@ static void
gen11_other_irq_handler(struct intel_gt *gt, const u8 instance,
const u16 iir)
{
struct intel_gt *media_gt = gt->i915->media_gt;
if (instance == OTHER_GUC_INSTANCE)
return guc_irq_handler(&gt->uc.guc, iir);
if (instance == OTHER_MEDIA_GUC_INSTANCE && media_gt)
return guc_irq_handler(&media_gt->uc.guc, iir);
if (instance == OTHER_GTPM_INSTANCE)
return gen11_rps_irq_handler(&gt->rps, iir);
if (instance == OTHER_MEDIA_GTPM_INSTANCE && media_gt)
return gen11_rps_irq_handler(&media_gt->rps, iir);
if (instance == OTHER_KCR_INSTANCE)
return intel_pxp_irq_handler(&gt->pxp, iir);
@@ -81,6 +87,18 @@ gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
{
struct intel_engine_cs *engine;
/*
* Platforms with standalone media have their media engines in another
* GT.
*/
if (MEDIA_VER(gt->i915) >= 13 &&
(class == VIDEO_DECODE_CLASS || class == VIDEO_ENHANCEMENT_CLASS)) {
if (!gt->i915->media_gt)
goto err;
gt = gt->i915->media_gt;
}
if (instance <= MAX_ENGINE_INSTANCE)
engine = gt->engine_class[class][instance];
else
@@ -89,6 +107,7 @@ gen11_engine_irq_handler(struct intel_gt *gt, const u8 class,
if (likely(engine))
return intel_engine_cs_irq(engine, iir);
err:
WARN_ONCE(1, "unhandled engine interrupt class=0x%x, instance=0x%x\n",
class, instance);
}
@@ -120,7 +139,7 @@ gen11_gt_bank_handler(struct intel_gt *gt, const unsigned int bank)
unsigned long intr_dw;
unsigned int bit;
lockdep_assert_held(&gt->irq_lock);
lockdep_assert_held(gt->irq_lock);
intr_dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
@@ -138,14 +157,14 @@ void gen11_gt_irq_handler(struct intel_gt *gt, const u32 master_ctl)
{
unsigned int bank;
spin_lock(&gt->irq_lock);
spin_lock(gt->irq_lock);
for (bank = 0; bank < 2; bank++) {
if (master_ctl & GEN11_GT_DW_IRQ(bank))
gen11_gt_bank_handler(gt, bank);
}
spin_unlock(&gt->irq_lock);
spin_unlock(gt->irq_lock);
}
bool gen11_gt_reset_one_iir(struct intel_gt *gt,
@@ -154,7 +173,7 @@ bool gen11_gt_reset_one_iir(struct intel_gt *gt,
void __iomem * const regs = gt->uncore->regs;
u32 dw;
lockdep_assert_held(&gt->irq_lock);
lockdep_assert_held(gt->irq_lock);
dw = raw_reg_read(regs, GEN11_GT_INTR_DW(bank));
if (dw & BIT(bit)) {
@@ -310,9 +329,9 @@ static void gen7_parity_error_irq_handler(struct intel_gt *gt, u32 iir)
if (!HAS_L3_DPF(gt->i915))
return;
spin_lock(&gt->irq_lock);
spin_lock(gt->irq_lock);
gen5_gt_disable_irq(gt, GT_PARITY_ERROR(gt->i915));
spin_unlock(&gt->irq_lock);
spin_unlock(gt->irq_lock);
if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1)
gt->i915->l3_parity.which_slice |= 1 << 1;
@@ -434,7 +453,7 @@ static void gen5_gt_update_irq(struct intel_gt *gt,
u32 interrupt_mask,
u32 enabled_irq_mask)
{
lockdep_assert_held(&gt->irq_lock);
lockdep_assert_held(gt->irq_lock);
GEM_BUG_ON(enabled_irq_mask & ~interrupt_mask);

Some files were not shown because too many files have changed in this diff Show More