You've already forked linux-apfs
mirror of
https://github.com/linux-apfs/linux-apfs.git
synced 2026-05-01 15:00:59 -07:00
Merge tag 'drm-intel-next-2016-04-25' of git://anongit.freedesktop.org/drm-intel into drm-next
- more userptr cornercase fixes from Chris - clean up and tune forcewake handling (Tvrtko) - more underrun fixes from Ville, mostly for ilk to appeas CI - fix unclaimed register warnings on vlv/chv and enable the debug code to catch them by default (Ville) - skl gpu hang fixes for gt3/4 (Mika Kuoppala) - edram improvements for gen9+ (Mika again) - clean up gpu reset corner cases (Chris) - fix ctx/ring machine deaths on snb/ilk (Chris) - MOCS programming for all engines (Peter Antoine) - robustify/clean up vlv/chv irq handler (Ville) - split gen8+ irq handlers into ack/handle phase (Ville) - tons of bxt rpm fixes (mostly around firmware interactions), from Imre - hook up panel fitting for dsi panels (Ville) - more runtime PM fixes all over from Imre - shrinker polish (Chris) - more guc fixes from Alex Dai and Dave Gordon - tons of bugfixes and small polish all over (but with a big focus on bxt) * tag 'drm-intel-next-2016-04-25' of git://anongit.freedesktop.org/drm-intel: (142 commits) drm/i915: Update DRIVER_DATE to 20160425 drm/i915/bxt: Explicitly clear the Turbo control register drm/i915: Correct the i915_frequency_info debugfs output drm/i915: Macros to convert PM time interval values to microseconds drm/i915: Make RPS EI/thresholds multiple of 25 on SNB-BDW drm/i915: Fake HDMI live status drm/i915/bxt: Force reprogramming a PHY with invalid HW state drm/i915/bxt: Wait for PHY1 GRC done if PHY0 was already enabled drm/i915/bxt: Use PHY0 GRC value for HW state verification drm/i915: use dev_priv directly in gen8_ppgtt_notify_vgt drm/i915/bxt: Enable DC5 during runtime resume drm/i915/bxt: Sanitize DC state tracking during system resume drm/i915/bxt: Don't uninit/init display core twice during system suspend/resume drm/i915: Inline intel_suspend_complete drm/i915/kbl: Don't WARN for expected secondary MISC IO power well request drm/i915: Fix eDP low vswing for Broadwell drm/i915: check for ERR_PTR from i915_gem_object_pin_map() drm/i915/guc: local optimisations and updating comments drm/i915/guc: drop cached copy of 'wq_head' drm/i915/guc: keep GuC doorbell & process descriptor mapped in kernel ...
This commit is contained in:
@@ -1,3 +1,20 @@
|
||||
config DRM_I915_WERROR
|
||||
bool "Force GCC to throw an error instead of a warning when compiling"
|
||||
# As this may inadvertently break the build, only allow the user
|
||||
# to shoot oneself in the foot iff they aim really hard
|
||||
depends on EXPERT
|
||||
# We use the dependency on !COMPILE_TEST to not be enabled in
|
||||
# allmodconfig or allyesconfig configurations
|
||||
depends on !COMPILE_TEST
|
||||
default n
|
||||
help
|
||||
Add -Werror to the build flags for (and only for) i915.ko.
|
||||
Do not enable this unless you are writing code for the i915.ko module.
|
||||
|
||||
Recommended for driver developers only.
|
||||
|
||||
If in doubt, say "N".
|
||||
|
||||
config DRM_I915_DEBUG
|
||||
bool "Enable additional driver debugging"
|
||||
depends on DRM_I915
|
||||
@@ -10,3 +27,15 @@ config DRM_I915_DEBUG
|
||||
|
||||
If in doubt, say "N".
|
||||
|
||||
config DRM_I915_DEBUG_GEM
|
||||
bool "Insert extra checks into the GEM internals"
|
||||
default n
|
||||
depends on DRM_I915_WERROR
|
||||
help
|
||||
Enable extra sanity checks (including BUGs) along the GEM driver
|
||||
paths that may slow the system down and if hit hang the machine.
|
||||
|
||||
Recommended for driver developers only.
|
||||
|
||||
If in doubt, say "N".
|
||||
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
# Makefile for the drm device driver. This driver provides support for the
|
||||
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
|
||||
|
||||
subdir-ccflags-$(CONFIG_DRM_I915_WERROR) := -Werror
|
||||
|
||||
# Please keep these build lists sorted!
|
||||
|
||||
# core driver code
|
||||
|
||||
@@ -89,27 +89,34 @@ static int i915_capabilities(struct seq_file *m, void *data)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const char *get_pin_flag(struct drm_i915_gem_object *obj)
|
||||
static const char get_active_flag(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
if (obj->pin_display)
|
||||
return "p";
|
||||
else
|
||||
return " ";
|
||||
return obj->active ? '*' : ' ';
|
||||
}
|
||||
|
||||
static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
|
||||
static const char get_pin_flag(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return obj->pin_display ? 'p' : ' ';
|
||||
}
|
||||
|
||||
static const char get_tiling_flag(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
switch (obj->tiling_mode) {
|
||||
default:
|
||||
case I915_TILING_NONE: return " ";
|
||||
case I915_TILING_X: return "X";
|
||||
case I915_TILING_Y: return "Y";
|
||||
case I915_TILING_NONE: return ' ';
|
||||
case I915_TILING_X: return 'X';
|
||||
case I915_TILING_Y: return 'Y';
|
||||
}
|
||||
}
|
||||
|
||||
static inline const char *get_global_flag(struct drm_i915_gem_object *obj)
|
||||
static inline const char get_global_flag(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return i915_gem_obj_to_ggtt(obj) ? "g" : " ";
|
||||
return i915_gem_obj_to_ggtt(obj) ? 'g' : ' ';
|
||||
}
|
||||
|
||||
static inline const char get_pin_mapped_flag(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
return obj->mapping ? 'M' : ' ';
|
||||
}
|
||||
|
||||
static u64 i915_gem_obj_total_ggtt_size(struct drm_i915_gem_object *obj)
|
||||
@@ -136,12 +143,13 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
|
||||
|
||||
lockdep_assert_held(&obj->base.dev->struct_mutex);
|
||||
|
||||
seq_printf(m, "%pK: %s%s%s%s %8zdKiB %02x %02x [ ",
|
||||
seq_printf(m, "%pK: %c%c%c%c%c %8zdKiB %02x %02x [ ",
|
||||
&obj->base,
|
||||
obj->active ? "*" : " ",
|
||||
get_active_flag(obj),
|
||||
get_pin_flag(obj),
|
||||
get_tiling_flag(obj),
|
||||
get_global_flag(obj),
|
||||
get_pin_mapped_flag(obj),
|
||||
obj->base.size / 1024,
|
||||
obj->base.read_domains,
|
||||
obj->base.write_domain);
|
||||
@@ -435,6 +443,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
|
||||
struct i915_ggtt *ggtt = &dev_priv->ggtt;
|
||||
u32 count, mappable_count, purgeable_count;
|
||||
u64 size, mappable_size, purgeable_size;
|
||||
unsigned long pin_mapped_count = 0, pin_mapped_purgeable_count = 0;
|
||||
u64 pin_mapped_size = 0, pin_mapped_purgeable_size = 0;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_file *file;
|
||||
struct i915_vma *vma;
|
||||
@@ -468,6 +478,14 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
|
||||
size += obj->base.size, ++count;
|
||||
if (obj->madv == I915_MADV_DONTNEED)
|
||||
purgeable_size += obj->base.size, ++purgeable_count;
|
||||
if (obj->mapping) {
|
||||
pin_mapped_count++;
|
||||
pin_mapped_size += obj->base.size;
|
||||
if (obj->pages_pin_count == 0) {
|
||||
pin_mapped_purgeable_count++;
|
||||
pin_mapped_purgeable_size += obj->base.size;
|
||||
}
|
||||
}
|
||||
}
|
||||
seq_printf(m, "%u unbound objects, %llu bytes\n", count, size);
|
||||
|
||||
@@ -485,6 +503,14 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
|
||||
purgeable_size += obj->base.size;
|
||||
++purgeable_count;
|
||||
}
|
||||
if (obj->mapping) {
|
||||
pin_mapped_count++;
|
||||
pin_mapped_size += obj->base.size;
|
||||
if (obj->pages_pin_count == 0) {
|
||||
pin_mapped_purgeable_count++;
|
||||
pin_mapped_purgeable_size += obj->base.size;
|
||||
}
|
||||
}
|
||||
}
|
||||
seq_printf(m, "%u purgeable objects, %llu bytes\n",
|
||||
purgeable_count, purgeable_size);
|
||||
@@ -492,6 +518,10 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
|
||||
mappable_count, mappable_size);
|
||||
seq_printf(m, "%u fault mappable objects, %llu bytes\n",
|
||||
count, size);
|
||||
seq_printf(m,
|
||||
"%lu [%lu] pin mapped objects, %llu [%llu] bytes [purgeable]\n",
|
||||
pin_mapped_count, pin_mapped_purgeable_count,
|
||||
pin_mapped_size, pin_mapped_purgeable_size);
|
||||
|
||||
seq_printf(m, "%llu [%llu] gtt total\n",
|
||||
ggtt->base.total, ggtt->mappable_end - ggtt->base.start);
|
||||
@@ -1216,12 +1246,12 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
||||
rpdeclimit = I915_READ(GEN6_RP_DOWN_THRESHOLD);
|
||||
|
||||
rpstat = I915_READ(GEN6_RPSTAT1);
|
||||
rpupei = I915_READ(GEN6_RP_CUR_UP_EI);
|
||||
rpcurup = I915_READ(GEN6_RP_CUR_UP);
|
||||
rpprevup = I915_READ(GEN6_RP_PREV_UP);
|
||||
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
|
||||
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
|
||||
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
|
||||
rpupei = I915_READ(GEN6_RP_CUR_UP_EI) & GEN6_CURICONT_MASK;
|
||||
rpcurup = I915_READ(GEN6_RP_CUR_UP) & GEN6_CURBSYTAVG_MASK;
|
||||
rpprevup = I915_READ(GEN6_RP_PREV_UP) & GEN6_CURBSYTAVG_MASK;
|
||||
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI) & GEN6_CURIAVG_MASK;
|
||||
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN) & GEN6_CURBSYTAVG_MASK;
|
||||
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN) & GEN6_CURBSYTAVG_MASK;
|
||||
if (IS_GEN9(dev))
|
||||
cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
|
||||
else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
|
||||
@@ -1261,21 +1291,21 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
|
||||
seq_printf(m, "RPDECLIMIT: 0x%08x\n", rpdeclimit);
|
||||
seq_printf(m, "RPNSWREQ: %dMHz\n", reqf);
|
||||
seq_printf(m, "CAGF: %dMHz\n", cagf);
|
||||
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
|
||||
GEN6_CURICONT_MASK);
|
||||
seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
|
||||
GEN6_CURBSYTAVG_MASK);
|
||||
seq_printf(m, "RP PREV UP: %dus\n", rpprevup &
|
||||
GEN6_CURBSYTAVG_MASK);
|
||||
seq_printf(m, "RP CUR UP EI: %d (%dus)\n",
|
||||
rpupei, GT_PM_INTERVAL_TO_US(dev_priv, rpupei));
|
||||
seq_printf(m, "RP CUR UP: %d (%dus)\n",
|
||||
rpcurup, GT_PM_INTERVAL_TO_US(dev_priv, rpcurup));
|
||||
seq_printf(m, "RP PREV UP: %d (%dus)\n",
|
||||
rpprevup, GT_PM_INTERVAL_TO_US(dev_priv, rpprevup));
|
||||
seq_printf(m, "Up threshold: %d%%\n",
|
||||
dev_priv->rps.up_threshold);
|
||||
|
||||
seq_printf(m, "RP CUR DOWN EI: %dus\n", rpdownei &
|
||||
GEN6_CURIAVG_MASK);
|
||||
seq_printf(m, "RP CUR DOWN: %dus\n", rpcurdown &
|
||||
GEN6_CURBSYTAVG_MASK);
|
||||
seq_printf(m, "RP PREV DOWN: %dus\n", rpprevdown &
|
||||
GEN6_CURBSYTAVG_MASK);
|
||||
seq_printf(m, "RP CUR DOWN EI: %d (%dus)\n",
|
||||
rpdownei, GT_PM_INTERVAL_TO_US(dev_priv, rpdownei));
|
||||
seq_printf(m, "RP CUR DOWN: %d (%dus)\n",
|
||||
rpcurdown, GT_PM_INTERVAL_TO_US(dev_priv, rpcurdown));
|
||||
seq_printf(m, "RP PREV DOWN: %d (%dus)\n",
|
||||
rpprevdown, GT_PM_INTERVAL_TO_US(dev_priv, rpprevdown));
|
||||
seq_printf(m, "Down threshold: %d%%\n",
|
||||
dev_priv->rps.down_threshold);
|
||||
|
||||
@@ -1469,12 +1499,11 @@ static int i915_forcewake_domains(struct seq_file *m, void *data)
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct intel_uncore_forcewake_domain *fw_domain;
|
||||
int i;
|
||||
|
||||
spin_lock_irq(&dev_priv->uncore.lock);
|
||||
for_each_fw_domain(fw_domain, dev_priv, i) {
|
||||
for_each_fw_domain(fw_domain, dev_priv) {
|
||||
seq_printf(m, "%s.wake_count = %u\n",
|
||||
intel_uncore_forcewake_domain_to_str(i),
|
||||
intel_uncore_forcewake_domain_to_str(fw_domain->id),
|
||||
fw_domain->wake_count);
|
||||
}
|
||||
spin_unlock_irq(&dev_priv->uncore.lock);
|
||||
@@ -2405,10 +2434,11 @@ static int i915_llc(struct seq_file *m, void *data)
|
||||
struct drm_info_node *node = m->private;
|
||||
struct drm_device *dev = node->minor->dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
const bool edram = INTEL_GEN(dev_priv) > 8;
|
||||
|
||||
/* Size calculation for LLC is a bit of a pain. Ignore for now. */
|
||||
seq_printf(m, "LLC: %s\n", yesno(HAS_LLC(dev)));
|
||||
seq_printf(m, "eLLC: %zuMB\n", dev_priv->ellc_size);
|
||||
seq_printf(m, "%s: %lluMB\n", edram ? "eDRAM" : "eLLC",
|
||||
intel_uncore_edram_size(dev_priv)/1024/1024);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -4723,7 +4753,7 @@ i915_wedged_get(void *data, u64 *val)
|
||||
struct drm_device *dev = data;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
*val = atomic_read(&dev_priv->gpu_error.reset_counter);
|
||||
*val = i915_terminally_wedged(&dev_priv->gpu_error);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -257,13 +257,6 @@ static int i915_get_bridge_dev(struct drm_device *dev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define MCHBAR_I915 0x44
|
||||
#define MCHBAR_I965 0x48
|
||||
#define MCHBAR_SIZE (4*4096)
|
||||
|
||||
#define DEVEN_REG 0x54
|
||||
#define DEVEN_MCHBAR_EN (1 << 28)
|
||||
|
||||
/* Allocate space for the MCH regs if needed, return nonzero on error */
|
||||
static int
|
||||
intel_alloc_mchbar_resource(struct drm_device *dev)
|
||||
@@ -325,7 +318,7 @@ intel_setup_mchbar(struct drm_device *dev)
|
||||
dev_priv->mchbar_need_disable = false;
|
||||
|
||||
if (IS_I915G(dev) || IS_I915GM(dev)) {
|
||||
pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
|
||||
pci_read_config_dword(dev_priv->bridge_dev, DEVEN, &temp);
|
||||
enabled = !!(temp & DEVEN_MCHBAR_EN);
|
||||
} else {
|
||||
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
|
||||
@@ -343,7 +336,7 @@ intel_setup_mchbar(struct drm_device *dev)
|
||||
|
||||
/* Space is allocated or reserved, so enable it. */
|
||||
if (IS_I915G(dev) || IS_I915GM(dev)) {
|
||||
pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG,
|
||||
pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
|
||||
temp | DEVEN_MCHBAR_EN);
|
||||
} else {
|
||||
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
|
||||
@@ -356,17 +349,24 @@ intel_teardown_mchbar(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
|
||||
u32 temp;
|
||||
|
||||
if (dev_priv->mchbar_need_disable) {
|
||||
if (IS_I915G(dev) || IS_I915GM(dev)) {
|
||||
pci_read_config_dword(dev_priv->bridge_dev, DEVEN_REG, &temp);
|
||||
temp &= ~DEVEN_MCHBAR_EN;
|
||||
pci_write_config_dword(dev_priv->bridge_dev, DEVEN_REG, temp);
|
||||
u32 deven_val;
|
||||
|
||||
pci_read_config_dword(dev_priv->bridge_dev, DEVEN,
|
||||
&deven_val);
|
||||
deven_val &= ~DEVEN_MCHBAR_EN;
|
||||
pci_write_config_dword(dev_priv->bridge_dev, DEVEN,
|
||||
deven_val);
|
||||
} else {
|
||||
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg, &temp);
|
||||
temp &= ~1;
|
||||
pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg, temp);
|
||||
u32 mchbar_val;
|
||||
|
||||
pci_read_config_dword(dev_priv->bridge_dev, mchbar_reg,
|
||||
&mchbar_val);
|
||||
mchbar_val &= ~1;
|
||||
pci_write_config_dword(dev_priv->bridge_dev, mchbar_reg,
|
||||
mchbar_val);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
+100
-93
@@ -567,10 +567,9 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
|
||||
drm_modeset_unlock_all(dev);
|
||||
}
|
||||
|
||||
static int intel_suspend_complete(struct drm_i915_private *dev_priv);
|
||||
static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
|
||||
bool rpm_resume);
|
||||
static int bxt_resume_prepare(struct drm_i915_private *dev_priv);
|
||||
static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
|
||||
|
||||
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
@@ -640,8 +639,7 @@ static int i915_drm_suspend(struct drm_device *dev)
|
||||
|
||||
intel_display_set_init_power(dev_priv, false);
|
||||
|
||||
if (HAS_CSR(dev_priv))
|
||||
flush_work(&dev_priv->csr.work);
|
||||
intel_csr_ucode_suspend(dev_priv);
|
||||
|
||||
out:
|
||||
enable_rpm_wakeref_asserts(dev_priv);
|
||||
@@ -657,7 +655,8 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
|
||||
|
||||
disable_rpm_wakeref_asserts(dev_priv);
|
||||
|
||||
fw_csr = suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
|
||||
fw_csr = !IS_BROXTON(dev_priv) &&
|
||||
suspend_to_idle(dev_priv) && dev_priv->csr.dmc_payload;
|
||||
/*
|
||||
* In case of firmware assisted context save/restore don't manually
|
||||
* deinit the power domains. This also means the CSR/DMC firmware will
|
||||
@@ -668,7 +667,13 @@ static int i915_drm_suspend_late(struct drm_device *drm_dev, bool hibernation)
|
||||
if (!fw_csr)
|
||||
intel_power_domains_suspend(dev_priv);
|
||||
|
||||
ret = intel_suspend_complete(dev_priv);
|
||||
ret = 0;
|
||||
if (IS_BROXTON(dev_priv))
|
||||
bxt_enable_dc9(dev_priv);
|
||||
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
|
||||
hsw_enable_pc8(dev_priv);
|
||||
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
ret = vlv_suspend_complete(dev_priv);
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("Suspend complete failed: %d\n", ret);
|
||||
@@ -732,6 +737,8 @@ static int i915_drm_resume(struct drm_device *dev)
|
||||
|
||||
disable_rpm_wakeref_asserts(dev_priv);
|
||||
|
||||
intel_csr_ucode_resume(dev_priv);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
i915_gem_restore_gtt_mappings(dev);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
@@ -802,7 +809,7 @@ static int i915_drm_resume(struct drm_device *dev)
|
||||
static int i915_drm_resume_early(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
int ret = 0;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* We have a resume ordering issue with the snd-hda driver also
|
||||
@@ -813,6 +820,36 @@ static int i915_drm_resume_early(struct drm_device *dev)
|
||||
* FIXME: This should be solved with a special hdmi sink device or
|
||||
* similar so that power domains can be employed.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Note that we need to set the power state explicitly, since we
|
||||
* powered off the device during freeze and the PCI core won't power
|
||||
* it back up for us during thaw. Powering off the device during
|
||||
* freeze is not a hard requirement though, and during the
|
||||
* suspend/resume phases the PCI core makes sure we get here with the
|
||||
* device powered on. So in case we change our freeze logic and keep
|
||||
* the device powered we can also remove the following set power state
|
||||
* call.
|
||||
*/
|
||||
ret = pci_set_power_state(dev->pdev, PCI_D0);
|
||||
if (ret) {
|
||||
DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note that pci_enable_device() first enables any parent bridge
|
||||
* device and only then sets the power state for this device. The
|
||||
* bridge enabling is a nop though, since bridge devices are resumed
|
||||
* first. The order of enabling power and enabling the device is
|
||||
* imposed by the PCI core as described above, so here we preserve the
|
||||
* same order for the freeze/thaw phases.
|
||||
*
|
||||
* TODO: eventually we should remove pci_disable_device() /
|
||||
* pci_enable_enable_device() from suspend/resume. Due to how they
|
||||
* depend on the device enable refcount we can't anyway depend on them
|
||||
* disabling/enabling the device.
|
||||
*/
|
||||
if (pci_enable_device(dev->pdev)) {
|
||||
ret = -EIO;
|
||||
goto out;
|
||||
@@ -830,21 +867,25 @@ static int i915_drm_resume_early(struct drm_device *dev)
|
||||
|
||||
intel_uncore_early_sanitize(dev, true);
|
||||
|
||||
if (IS_BROXTON(dev))
|
||||
ret = bxt_resume_prepare(dev_priv);
|
||||
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
|
||||
if (IS_BROXTON(dev)) {
|
||||
if (!dev_priv->suspended_to_idle)
|
||||
gen9_sanitize_dc_state(dev_priv);
|
||||
bxt_disable_dc9(dev_priv);
|
||||
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
|
||||
hsw_disable_pc8(dev_priv);
|
||||
}
|
||||
|
||||
intel_uncore_sanitize(dev);
|
||||
|
||||
if (!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
|
||||
if (IS_BROXTON(dev_priv) ||
|
||||
!(dev_priv->suspended_to_idle && dev_priv->csr.dmc_payload))
|
||||
intel_power_domains_init_hw(dev_priv, true);
|
||||
|
||||
enable_rpm_wakeref_asserts(dev_priv);
|
||||
|
||||
out:
|
||||
dev_priv->suspended_to_idle = false;
|
||||
|
||||
enable_rpm_wakeref_asserts(dev_priv);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -880,23 +921,32 @@ int i915_resume_switcheroo(struct drm_device *dev)
|
||||
int i915_reset(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
bool simulated;
|
||||
struct i915_gpu_error *error = &dev_priv->gpu_error;
|
||||
unsigned reset_counter;
|
||||
int ret;
|
||||
|
||||
intel_reset_gt_powersave(dev);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
i915_gem_reset(dev);
|
||||
/* Clear any previous failed attempts at recovery. Time to try again. */
|
||||
atomic_andnot(I915_WEDGED, &error->reset_counter);
|
||||
|
||||
simulated = dev_priv->gpu_error.stop_rings != 0;
|
||||
/* Clear the reset-in-progress flag and increment the reset epoch. */
|
||||
reset_counter = atomic_inc_return(&error->reset_counter);
|
||||
if (WARN_ON(__i915_reset_in_progress(reset_counter))) {
|
||||
ret = -EIO;
|
||||
goto error;
|
||||
}
|
||||
|
||||
i915_gem_reset(dev);
|
||||
|
||||
ret = intel_gpu_reset(dev, ALL_ENGINES);
|
||||
|
||||
/* Also reset the gpu hangman. */
|
||||
if (simulated) {
|
||||
if (error->stop_rings != 0) {
|
||||
DRM_INFO("Simulated gpu hang, resetting stop_rings\n");
|
||||
dev_priv->gpu_error.stop_rings = 0;
|
||||
error->stop_rings = 0;
|
||||
if (ret == -ENODEV) {
|
||||
DRM_INFO("Reset not implemented, but ignoring "
|
||||
"error for simulated gpu hangs\n");
|
||||
@@ -908,9 +958,11 @@ int i915_reset(struct drm_device *dev)
|
||||
pr_notice("drm/i915: Resetting chip after gpu hang\n");
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to reset chip: %i\n", ret);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
if (ret != -ENODEV)
|
||||
DRM_ERROR("Failed to reset chip: %i\n", ret);
|
||||
else
|
||||
DRM_DEBUG_DRIVER("GPU reset disabled\n");
|
||||
goto error;
|
||||
}
|
||||
|
||||
intel_overlay_reset(dev_priv);
|
||||
@@ -929,20 +981,14 @@ int i915_reset(struct drm_device *dev)
|
||||
* was running at the time of the reset (i.e. we weren't VT
|
||||
* switched away).
|
||||
*/
|
||||
|
||||
/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
|
||||
dev_priv->gpu_error.reload_in_reset = true;
|
||||
|
||||
ret = i915_gem_init_hw(dev);
|
||||
|
||||
dev_priv->gpu_error.reload_in_reset = false;
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed hw init on reset %d\n", ret);
|
||||
return ret;
|
||||
goto error;
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
/*
|
||||
* rps/rc6 re-init is necessary to restore state lost after the
|
||||
* reset and the re-install of gt irqs. Skip for ironlake per
|
||||
@@ -953,6 +999,11 @@ int i915_reset(struct drm_device *dev)
|
||||
intel_enable_gt_powersave(dev);
|
||||
|
||||
return 0;
|
||||
|
||||
error:
|
||||
atomic_or(I915_WEDGED, &error->reset_counter);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
@@ -1059,44 +1110,6 @@ static int i915_pm_resume(struct device *dev)
|
||||
return i915_drm_resume(drm_dev);
|
||||
}
|
||||
|
||||
static int hsw_suspend_complete(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
hsw_enable_pc8(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bxt_suspend_complete(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
/* TODO: when DC5 support is added disable DC5 here. */
|
||||
|
||||
broxton_ddi_phy_uninit(dev);
|
||||
broxton_uninit_cdclk(dev);
|
||||
bxt_enable_dc9(dev_priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bxt_resume_prepare(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
|
||||
/* TODO: when CSR FW support is added make sure the FW is loaded */
|
||||
|
||||
bxt_disable_dc9(dev_priv);
|
||||
|
||||
/*
|
||||
* TODO: when DC5 support is added enable DC5 here if the CSR FW
|
||||
* is available.
|
||||
*/
|
||||
broxton_init_cdclk(dev);
|
||||
broxton_ddi_phy_init(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Save all Gunit registers that may be lost after a D3 and a subsequent
|
||||
* S0i[R123] transition. The list of registers needing a save/restore is
|
||||
@@ -1502,7 +1515,16 @@ static int intel_runtime_suspend(struct device *device)
|
||||
intel_suspend_gt_powersave(dev);
|
||||
intel_runtime_pm_disable_interrupts(dev_priv);
|
||||
|
||||
ret = intel_suspend_complete(dev_priv);
|
||||
ret = 0;
|
||||
if (IS_BROXTON(dev_priv)) {
|
||||
bxt_display_core_uninit(dev_priv);
|
||||
bxt_enable_dc9(dev_priv);
|
||||
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
|
||||
hsw_enable_pc8(dev_priv);
|
||||
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
|
||||
ret = vlv_suspend_complete(dev_priv);
|
||||
}
|
||||
|
||||
if (ret) {
|
||||
DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
|
||||
intel_runtime_pm_enable_interrupts(dev_priv);
|
||||
@@ -1576,12 +1598,17 @@ static int intel_runtime_resume(struct device *device)
|
||||
if (IS_GEN6(dev_priv))
|
||||
intel_init_pch_refclk(dev);
|
||||
|
||||
if (IS_BROXTON(dev))
|
||||
ret = bxt_resume_prepare(dev_priv);
|
||||
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
|
||||
if (IS_BROXTON(dev)) {
|
||||
bxt_disable_dc9(dev_priv);
|
||||
bxt_display_core_init(dev_priv, true);
|
||||
if (dev_priv->csr.dmc_payload &&
|
||||
(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_UPTO_DC5))
|
||||
gen9_enable_dc5(dev_priv);
|
||||
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
|
||||
hsw_disable_pc8(dev_priv);
|
||||
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
|
||||
ret = vlv_resume_prepare(dev_priv, true);
|
||||
}
|
||||
|
||||
/*
|
||||
* No point of rolling back things in case of an error, as the best
|
||||
@@ -1612,26 +1639,6 @@ static int intel_runtime_resume(struct device *device)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This function implements common functionality of runtime and system
|
||||
* suspend sequence.
|
||||
*/
|
||||
static int intel_suspend_complete(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (IS_BROXTON(dev_priv))
|
||||
ret = bxt_suspend_complete(dev_priv);
|
||||
else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
|
||||
ret = hsw_suspend_complete(dev_priv);
|
||||
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
|
||||
ret = vlv_suspend_complete(dev_priv);
|
||||
else
|
||||
ret = 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops i915_pm_ops = {
|
||||
/*
|
||||
* S0ix (via system suspend) and S3 event handlers [PMSG_SUSPEND,
|
||||
|
||||
@@ -33,34 +33,40 @@
|
||||
#include <uapi/drm/i915_drm.h>
|
||||
#include <uapi/drm/drm_fourcc.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include "i915_params.h"
|
||||
#include "i915_reg.h"
|
||||
#include "intel_bios.h"
|
||||
#include "intel_ringbuffer.h"
|
||||
#include "intel_lrc.h"
|
||||
#include "i915_gem_gtt.h"
|
||||
#include "i915_gem_render_state.h"
|
||||
#include <linux/io-mapping.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/i2c-algo-bit.h>
|
||||
#include <drm/intel-gtt.h>
|
||||
#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
|
||||
#include <drm/drm_gem.h>
|
||||
#include <linux/backlight.h>
|
||||
#include <linux/hashtable.h>
|
||||
#include <linux/intel-iommu.h>
|
||||
#include <linux/kref.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include "intel_guc.h"
|
||||
#include <linux/shmem_fs.h>
|
||||
|
||||
#include <drm/drmP.h>
|
||||
#include <drm/intel-gtt.h>
|
||||
#include <drm/drm_legacy.h> /* for struct drm_dma_handle */
|
||||
#include <drm/drm_gem.h>
|
||||
|
||||
#include "i915_params.h"
|
||||
#include "i915_reg.h"
|
||||
|
||||
#include "intel_bios.h"
|
||||
#include "intel_dpll_mgr.h"
|
||||
#include "intel_guc.h"
|
||||
#include "intel_lrc.h"
|
||||
#include "intel_ringbuffer.h"
|
||||
|
||||
#include "i915_gem.h"
|
||||
#include "i915_gem_gtt.h"
|
||||
#include "i915_gem_render_state.h"
|
||||
|
||||
/* General customization:
|
||||
*/
|
||||
|
||||
#define DRIVER_NAME "i915"
|
||||
#define DRIVER_DESC "Intel Graphics"
|
||||
#define DRIVER_DATE "20160411"
|
||||
#define DRIVER_DATE "20160425"
|
||||
|
||||
#undef WARN_ON
|
||||
/* Many gcc seem to no see through this and fall over :( */
|
||||
@@ -634,6 +640,13 @@ enum forcewake_domains {
|
||||
FORCEWAKE_MEDIA)
|
||||
};
|
||||
|
||||
#define FW_REG_READ (1)
|
||||
#define FW_REG_WRITE (2)
|
||||
|
||||
enum forcewake_domains
|
||||
intel_uncore_forcewake_for_reg(struct drm_i915_private *dev_priv,
|
||||
i915_reg_t reg, unsigned int op);
|
||||
|
||||
struct intel_uncore_funcs {
|
||||
void (*force_wake_get)(struct drm_i915_private *dev_priv,
|
||||
enum forcewake_domains domains);
|
||||
@@ -666,8 +679,9 @@ struct intel_uncore {
|
||||
struct intel_uncore_forcewake_domain {
|
||||
struct drm_i915_private *i915;
|
||||
enum forcewake_domain_id id;
|
||||
enum forcewake_domains mask;
|
||||
unsigned wake_count;
|
||||
struct timer_list timer;
|
||||
struct hrtimer timer;
|
||||
i915_reg_t reg_set;
|
||||
u32 val_set;
|
||||
u32 val_clear;
|
||||
@@ -680,14 +694,14 @@ struct intel_uncore {
|
||||
};
|
||||
|
||||
/* Iterate over initialised fw domains */
|
||||
#define for_each_fw_domain_mask(domain__, mask__, dev_priv__, i__) \
|
||||
for ((i__) = 0, (domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
|
||||
(i__) < FW_DOMAIN_ID_COUNT; \
|
||||
(i__)++, (domain__) = &(dev_priv__)->uncore.fw_domain[i__]) \
|
||||
for_each_if (((mask__) & (dev_priv__)->uncore.fw_domains) & (1 << (i__)))
|
||||
#define for_each_fw_domain_masked(domain__, mask__, dev_priv__) \
|
||||
for ((domain__) = &(dev_priv__)->uncore.fw_domain[0]; \
|
||||
(domain__) < &(dev_priv__)->uncore.fw_domain[FW_DOMAIN_ID_COUNT]; \
|
||||
(domain__)++) \
|
||||
for_each_if ((mask__) & (domain__)->mask)
|
||||
|
||||
#define for_each_fw_domain(domain__, dev_priv__, i__) \
|
||||
for_each_fw_domain_mask(domain__, FORCEWAKE_ALL, dev_priv__, i__)
|
||||
#define for_each_fw_domain(domain__, dev_priv__) \
|
||||
for_each_fw_domain_masked(domain__, FORCEWAKE_ALL, dev_priv__)
|
||||
|
||||
#define CSR_VERSION(major, minor) ((major) << 16 | (minor))
|
||||
#define CSR_VERSION_MAJOR(version) ((version) >> 16)
|
||||
@@ -996,6 +1010,7 @@ struct intel_fbc_work;
|
||||
|
||||
struct intel_gmbus {
|
||||
struct i2c_adapter adapter;
|
||||
#define GMBUS_FORCE_BIT_RETRY (1U << 31)
|
||||
u32 force_bit;
|
||||
u32 reg0;
|
||||
i915_reg_t gpio_reg;
|
||||
@@ -1385,9 +1400,6 @@ struct i915_gpu_error {
|
||||
|
||||
/* For missed irq/seqno simulation. */
|
||||
unsigned int test_irq_rings;
|
||||
|
||||
/* Used to prevent gem_check_wedged returning -EAGAIN during gpu reset */
|
||||
bool reload_in_reset;
|
||||
};
|
||||
|
||||
enum modeset_restore {
|
||||
@@ -1444,6 +1456,7 @@ struct intel_vbt_data {
|
||||
unsigned int lvds_use_ssc:1;
|
||||
unsigned int display_clock_mode:1;
|
||||
unsigned int fdi_rx_polarity_inverted:1;
|
||||
unsigned int panel_type:4;
|
||||
int lvds_ssc_freq;
|
||||
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
|
||||
|
||||
@@ -1863,7 +1876,7 @@ struct drm_i915_private {
|
||||
struct intel_l3_parity l3_parity;
|
||||
|
||||
/* Cannot be determined by PCIID. You must always read a register. */
|
||||
size_t ellc_size;
|
||||
u32 edram_cap;
|
||||
|
||||
/* gen6+ rps state */
|
||||
struct intel_gen6_power_mgmt rps;
|
||||
@@ -1911,6 +1924,7 @@ struct drm_i915_private {
|
||||
* crappiness (can't read out DPLL_MD for pipes B & C).
|
||||
*/
|
||||
u32 chv_dpll_md[I915_MAX_PIPES];
|
||||
u32 bxt_phy_grc;
|
||||
|
||||
u32 suspend_count;
|
||||
bool suspended_to_idle;
|
||||
@@ -2237,6 +2251,7 @@ struct drm_i915_gem_request {
|
||||
/** On Which ring this request was generated */
|
||||
struct drm_i915_private *i915;
|
||||
struct intel_engine_cs *engine;
|
||||
unsigned reset_counter;
|
||||
|
||||
/** GEM sequence number associated with the previous request,
|
||||
* when the HWS breadcrumb is equal to this the GPU is processing
|
||||
@@ -2317,7 +2332,6 @@ struct drm_i915_gem_request {
|
||||
struct drm_i915_gem_request * __must_check
|
||||
i915_gem_request_alloc(struct intel_engine_cs *engine,
|
||||
struct intel_context *ctx);
|
||||
void i915_gem_request_cancel(struct drm_i915_gem_request *req);
|
||||
void i915_gem_request_free(struct kref *req_ref);
|
||||
int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
|
||||
struct drm_file *file);
|
||||
@@ -2487,6 +2501,7 @@ struct drm_i915_cmd_table {
|
||||
__p; \
|
||||
})
|
||||
#define INTEL_INFO(p) (&__I915__(p)->info)
|
||||
#define INTEL_GEN(p) (INTEL_INFO(p)->gen)
|
||||
#define INTEL_DEVID(p) (INTEL_INFO(p)->device_id)
|
||||
#define INTEL_REVID(p) (__I915__(p)->dev->pdev->revision)
|
||||
|
||||
@@ -2613,8 +2628,9 @@ struct drm_i915_cmd_table {
|
||||
#define HAS_VEBOX(dev) (INTEL_INFO(dev)->ring_mask & VEBOX_RING)
|
||||
#define HAS_LLC(dev) (INTEL_INFO(dev)->has_llc)
|
||||
#define HAS_SNOOP(dev) (INTEL_INFO(dev)->has_snoop)
|
||||
#define HAS_EDRAM(dev) (__I915__(dev)->edram_cap & EDRAM_ENABLED)
|
||||
#define HAS_WT(dev) ((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \
|
||||
__I915__(dev)->ellc_size)
|
||||
HAS_EDRAM(dev))
|
||||
#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
|
||||
|
||||
#define HAS_HW_CONTEXTS(dev) (INTEL_INFO(dev)->gen >= 6)
|
||||
@@ -2631,8 +2647,9 @@ struct drm_i915_cmd_table {
|
||||
|
||||
/* WaRsDisableCoarsePowerGating:skl,bxt */
|
||||
#define NEEDS_WaRsDisableCoarsePowerGating(dev) (IS_BXT_REVID(dev, 0, BXT_REVID_A1) || \
|
||||
((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && \
|
||||
IS_SKL_REVID(dev, 0, SKL_REVID_F0)))
|
||||
IS_SKL_GT3(dev) || \
|
||||
IS_SKL_GT4(dev))
|
||||
|
||||
/*
|
||||
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
|
||||
* even when in MSI mode. This results in spurious interrupt warnings if the
|
||||
@@ -2667,7 +2684,7 @@ struct drm_i915_cmd_table {
|
||||
#define HAS_RUNTIME_PM(dev) (IS_GEN6(dev) || IS_HASWELL(dev) || \
|
||||
IS_BROADWELL(dev) || IS_VALLEYVIEW(dev) || \
|
||||
IS_CHERRYVIEW(dev) || IS_SKYLAKE(dev) || \
|
||||
IS_KABYLAKE(dev))
|
||||
IS_KABYLAKE(dev) || IS_BROXTON(dev))
|
||||
#define HAS_RC6(dev) (INTEL_INFO(dev)->gen >= 6)
|
||||
#define HAS_RC6p(dev) (INTEL_INFO(dev)->gen == 6 || IS_IVYBRIDGE(dev))
|
||||
|
||||
@@ -2791,6 +2808,8 @@ void intel_uncore_forcewake_get__locked(struct drm_i915_private *dev_priv,
|
||||
enum forcewake_domains domains);
|
||||
void intel_uncore_forcewake_put__locked(struct drm_i915_private *dev_priv,
|
||||
enum forcewake_domains domains);
|
||||
u64 intel_uncore_edram_size(struct drm_i915_private *dev_priv);
|
||||
|
||||
void assert_forcewakes_inactive(struct drm_i915_private *dev_priv);
|
||||
static inline bool intel_vgpu_active(struct drm_device *dev)
|
||||
{
|
||||
@@ -2869,7 +2888,6 @@ int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
||||
struct drm_i915_gem_request *req);
|
||||
void i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params);
|
||||
int i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
||||
struct drm_i915_gem_execbuffer2 *args,
|
||||
struct list_head *vmas);
|
||||
@@ -3000,9 +3018,11 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)
|
||||
* pages and then returns a contiguous mapping of the backing storage into
|
||||
* the kernel address space.
|
||||
*
|
||||
* The caller must hold the struct_mutex.
|
||||
* The caller must hold the struct_mutex, and is responsible for calling
|
||||
* i915_gem_object_unpin_map() when the mapping is no longer required.
|
||||
*
|
||||
* Returns the pointer through which to access the backing storage.
|
||||
* Returns the pointer through which to access the mapped object, or an
|
||||
* ERR_PTR() on error.
|
||||
*/
|
||||
void *__must_check i915_gem_object_pin_map(struct drm_i915_gem_object *obj);
|
||||
|
||||
@@ -3069,23 +3089,45 @@ i915_gem_find_active_request(struct intel_engine_cs *engine);
|
||||
|
||||
bool i915_gem_retire_requests(struct drm_device *dev);
|
||||
void i915_gem_retire_requests_ring(struct intel_engine_cs *engine);
|
||||
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
|
||||
bool interruptible);
|
||||
|
||||
static inline u32 i915_reset_counter(struct i915_gpu_error *error)
|
||||
{
|
||||
return atomic_read(&error->reset_counter);
|
||||
}
|
||||
|
||||
static inline bool __i915_reset_in_progress(u32 reset)
|
||||
{
|
||||
return unlikely(reset & I915_RESET_IN_PROGRESS_FLAG);
|
||||
}
|
||||
|
||||
static inline bool __i915_reset_in_progress_or_wedged(u32 reset)
|
||||
{
|
||||
return unlikely(reset & (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
|
||||
}
|
||||
|
||||
static inline bool __i915_terminally_wedged(u32 reset)
|
||||
{
|
||||
return unlikely(reset & I915_WEDGED);
|
||||
}
|
||||
|
||||
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
|
||||
{
|
||||
return unlikely(atomic_read(&error->reset_counter)
|
||||
& (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));
|
||||
return __i915_reset_in_progress(i915_reset_counter(error));
|
||||
}
|
||||
|
||||
static inline bool i915_reset_in_progress_or_wedged(struct i915_gpu_error *error)
|
||||
{
|
||||
return __i915_reset_in_progress_or_wedged(i915_reset_counter(error));
|
||||
}
|
||||
|
||||
static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
|
||||
{
|
||||
return atomic_read(&error->reset_counter) & I915_WEDGED;
|
||||
return __i915_terminally_wedged(i915_reset_counter(error));
|
||||
}
|
||||
|
||||
static inline u32 i915_reset_count(struct i915_gpu_error *error)
|
||||
{
|
||||
return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2;
|
||||
return ((i915_reset_counter(error) & ~I915_WEDGED) + 1) / 2;
|
||||
}
|
||||
|
||||
static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv)
|
||||
@@ -3118,7 +3160,6 @@ void __i915_add_request(struct drm_i915_gem_request *req,
|
||||
#define i915_add_request_no_flush(req) \
|
||||
__i915_add_request(req, NULL, false)
|
||||
int __i915_wait_request(struct drm_i915_gem_request *req,
|
||||
unsigned reset_counter,
|
||||
bool interruptible,
|
||||
s64 *timeout,
|
||||
struct intel_rps_client *rps);
|
||||
@@ -3455,6 +3496,7 @@ extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
|
||||
bool enable);
|
||||
extern int intel_opregion_notify_adapter(struct drm_device *dev,
|
||||
pci_power_t state);
|
||||
extern int intel_opregion_get_panel_type(struct drm_device *dev);
|
||||
#else
|
||||
static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }
|
||||
static inline void intel_opregion_init(struct drm_device *dev) { return; }
|
||||
@@ -3470,6 +3512,10 @@ intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
static inline int intel_opregion_get_panel_type(struct drm_device *dev)
|
||||
{
|
||||
return -ENODEV;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* intel_acpi.c */
|
||||
|
||||
+87
-112
@@ -32,14 +32,13 @@
|
||||
#include "i915_vgpu.h"
|
||||
#include "i915_trace.h"
|
||||
#include "intel_drv.h"
|
||||
#include "intel_mocs.h"
|
||||
#include <linux/shmem_fs.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/swap.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/dma-buf.h>
|
||||
|
||||
#define RQ_BUG_ON(expr)
|
||||
|
||||
static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
|
||||
static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
|
||||
static void
|
||||
@@ -85,9 +84,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
|
||||
{
|
||||
int ret;
|
||||
|
||||
#define EXIT_COND (!i915_reset_in_progress(error) || \
|
||||
i915_terminally_wedged(error))
|
||||
if (EXIT_COND)
|
||||
if (!i915_reset_in_progress(error))
|
||||
return 0;
|
||||
|
||||
/*
|
||||
@@ -96,17 +93,16 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
|
||||
* we should simply try to bail out and fail as gracefully as possible.
|
||||
*/
|
||||
ret = wait_event_interruptible_timeout(error->reset_queue,
|
||||
EXIT_COND,
|
||||
!i915_reset_in_progress(error),
|
||||
10*HZ);
|
||||
if (ret == 0) {
|
||||
DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
|
||||
return -EIO;
|
||||
} else if (ret < 0) {
|
||||
return ret;
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
#undef EXIT_COND
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int i915_mutex_lock_interruptible(struct drm_device *dev)
|
||||
@@ -211,11 +207,10 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj)
|
||||
BUG_ON(obj->madv == __I915_MADV_PURGED);
|
||||
|
||||
ret = i915_gem_object_set_to_cpu_domain(obj, true);
|
||||
if (ret) {
|
||||
if (WARN_ON(ret)) {
|
||||
/* In the event of a disaster, abandon all caches and
|
||||
* hope for the best.
|
||||
*/
|
||||
WARN_ON(ret != -EIO);
|
||||
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
|
||||
}
|
||||
|
||||
@@ -1110,27 +1105,19 @@ put_rpm:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
i915_gem_check_wedge(struct i915_gpu_error *error,
|
||||
bool interruptible)
|
||||
static int
|
||||
i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
|
||||
{
|
||||
if (i915_reset_in_progress(error)) {
|
||||
if (__i915_terminally_wedged(reset_counter))
|
||||
return -EIO;
|
||||
|
||||
if (__i915_reset_in_progress(reset_counter)) {
|
||||
/* Non-interruptible callers can't handle -EAGAIN, hence return
|
||||
* -EIO unconditionally for these. */
|
||||
if (!interruptible)
|
||||
return -EIO;
|
||||
|
||||
/* Recovery complete, but the reset failed ... */
|
||||
if (i915_terminally_wedged(error))
|
||||
return -EIO;
|
||||
|
||||
/*
|
||||
* Check if GPU Reset is in progress - we need intel_ring_begin
|
||||
* to work properly to reinit the hw state while the gpu is
|
||||
* still marked as reset-in-progress. Handle this with a flag.
|
||||
*/
|
||||
if (!error->reload_in_reset)
|
||||
return -EAGAIN;
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -1224,7 +1211,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
|
||||
/**
|
||||
* __i915_wait_request - wait until execution of request has finished
|
||||
* @req: duh!
|
||||
* @reset_counter: reset sequence associated with the given request
|
||||
* @interruptible: do an interruptible wait (normally yes)
|
||||
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
|
||||
*
|
||||
@@ -1239,7 +1225,6 @@ static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
|
||||
* errno with remaining time filled in timeout argument.
|
||||
*/
|
||||
int __i915_wait_request(struct drm_i915_gem_request *req,
|
||||
unsigned reset_counter,
|
||||
bool interruptible,
|
||||
s64 *timeout,
|
||||
struct intel_rps_client *rps)
|
||||
@@ -1300,13 +1285,14 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
|
||||
prepare_to_wait(&engine->irq_queue, &wait, state);
|
||||
|
||||
/* We need to check whether any gpu reset happened in between
|
||||
* the caller grabbing the seqno and now ... */
|
||||
if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
|
||||
/* ... but upgrade the -EAGAIN to an -EIO if the gpu
|
||||
* is truely gone. */
|
||||
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
|
||||
if (ret == 0)
|
||||
ret = -EAGAIN;
|
||||
* the request being submitted and now. If a reset has occurred,
|
||||
* the request is effectively complete (we either are in the
|
||||
* process of or have discarded the rendering and completely
|
||||
* reset the GPU. The results of the request are lost and we
|
||||
* are free to continue on with the original operation.
|
||||
*/
|
||||
if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -1458,26 +1444,15 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
|
||||
int
|
||||
i915_wait_request(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct drm_device *dev;
|
||||
struct drm_i915_private *dev_priv;
|
||||
struct drm_i915_private *dev_priv = req->i915;
|
||||
bool interruptible;
|
||||
int ret;
|
||||
|
||||
BUG_ON(req == NULL);
|
||||
|
||||
dev = req->engine->dev;
|
||||
dev_priv = dev->dev_private;
|
||||
interruptible = dev_priv->mm.interruptible;
|
||||
|
||||
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
|
||||
|
||||
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = __i915_wait_request(req,
|
||||
atomic_read(&dev_priv->gpu_error.reset_counter),
|
||||
interruptible, NULL, NULL);
|
||||
ret = __i915_wait_request(req, interruptible, NULL, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@@ -1521,7 +1496,7 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
|
||||
|
||||
i915_gem_object_retire__read(obj, i);
|
||||
}
|
||||
RQ_BUG_ON(obj->active);
|
||||
GEM_BUG_ON(obj->active);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@@ -1552,7 +1527,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
|
||||
unsigned reset_counter;
|
||||
int ret, i, n = 0;
|
||||
|
||||
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
@@ -1561,12 +1535,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
|
||||
if (!obj->active)
|
||||
return 0;
|
||||
|
||||
ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
|
||||
|
||||
if (readonly) {
|
||||
struct drm_i915_gem_request *req;
|
||||
|
||||
@@ -1588,9 +1556,9 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
ret = 0;
|
||||
for (i = 0; ret == 0 && i < n; i++)
|
||||
ret = __i915_wait_request(requests[i], reset_counter, true,
|
||||
NULL, rps);
|
||||
ret = __i915_wait_request(requests[i], true, NULL, rps);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
@@ -1964,11 +1932,27 @@ out:
|
||||
void
|
||||
i915_gem_release_mmap(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
/* Serialisation between user GTT access and our code depends upon
|
||||
* revoking the CPU's PTE whilst the mutex is held. The next user
|
||||
* pagefault then has to wait until we release the mutex.
|
||||
*/
|
||||
lockdep_assert_held(&obj->base.dev->struct_mutex);
|
||||
|
||||
if (!obj->fault_mappable)
|
||||
return;
|
||||
|
||||
drm_vma_node_unmap(&obj->base.vma_node,
|
||||
obj->base.dev->anon_inode->i_mapping);
|
||||
|
||||
/* Ensure that the CPU's PTE are revoked and there are not outstanding
|
||||
* memory transactions from userspace before we return. The TLB
|
||||
* flushing implied above by changing the PTE above *should* be
|
||||
* sufficient, an extra barrier here just provides us with a bit
|
||||
* of paranoid documentation about our requirement to serialise
|
||||
* memory writes before touching registers / GSM.
|
||||
*/
|
||||
wmb();
|
||||
|
||||
obj->fault_mappable = false;
|
||||
}
|
||||
|
||||
@@ -2177,11 +2161,10 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
|
||||
BUG_ON(obj->madv == __I915_MADV_PURGED);
|
||||
|
||||
ret = i915_gem_object_set_to_cpu_domain(obj, true);
|
||||
if (ret) {
|
||||
if (WARN_ON(ret)) {
|
||||
/* In the event of a disaster, abandon all caches and
|
||||
* hope for the best.
|
||||
*/
|
||||
WARN_ON(ret != -EIO);
|
||||
i915_gem_clflush_object(obj, true);
|
||||
obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
|
||||
}
|
||||
@@ -2470,8 +2453,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
|
||||
static void
|
||||
i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
RQ_BUG_ON(obj->last_write_req == NULL);
|
||||
RQ_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
|
||||
GEM_BUG_ON(obj->last_write_req == NULL);
|
||||
GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
|
||||
|
||||
i915_gem_request_assign(&obj->last_write_req, NULL);
|
||||
intel_fb_obj_flush(obj, true, ORIGIN_CS);
|
||||
@@ -2482,8 +2465,8 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
|
||||
{
|
||||
struct i915_vma *vma;
|
||||
|
||||
RQ_BUG_ON(obj->last_read_req[ring] == NULL);
|
||||
RQ_BUG_ON(!(obj->active & (1 << ring)));
|
||||
GEM_BUG_ON(obj->last_read_req[ring] == NULL);
|
||||
GEM_BUG_ON(!(obj->active & (1 << ring)));
|
||||
|
||||
list_del_init(&obj->engine_list[ring]);
|
||||
i915_gem_request_assign(&obj->last_read_req[ring], NULL);
|
||||
@@ -2743,6 +2726,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
|
||||
struct drm_i915_gem_request **req_out)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(engine->dev);
|
||||
unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
|
||||
struct drm_i915_gem_request *req;
|
||||
int ret;
|
||||
|
||||
@@ -2751,6 +2735,14 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
|
||||
|
||||
*req_out = NULL;
|
||||
|
||||
/* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
|
||||
* EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
|
||||
* and restart.
|
||||
*/
|
||||
ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
|
||||
if (req == NULL)
|
||||
return -ENOMEM;
|
||||
@@ -2762,6 +2754,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
|
||||
kref_init(&req->ref);
|
||||
req->i915 = dev_priv;
|
||||
req->engine = engine;
|
||||
req->reset_counter = reset_counter;
|
||||
req->ctx = ctx;
|
||||
i915_gem_context_reference(req->ctx);
|
||||
|
||||
@@ -2791,7 +2784,8 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
|
||||
* fully prepared. Thus it can be cleaned up using the proper
|
||||
* free code.
|
||||
*/
|
||||
i915_gem_request_cancel(req);
|
||||
intel_ring_reserved_space_cancel(req->ringbuf);
|
||||
i915_gem_request_unreference(req);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -2828,13 +2822,6 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
|
||||
return err ? ERR_PTR(err) : req;
|
||||
}
|
||||
|
||||
void i915_gem_request_cancel(struct drm_i915_gem_request *req)
|
||||
{
|
||||
intel_ring_reserved_space_cancel(req->ringbuf);
|
||||
|
||||
i915_gem_request_unreference(req);
|
||||
}
|
||||
|
||||
struct drm_i915_gem_request *
|
||||
i915_gem_find_active_request(struct intel_engine_cs *engine)
|
||||
{
|
||||
@@ -3140,11 +3127,9 @@ retire:
|
||||
int
|
||||
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_gem_wait *args = data;
|
||||
struct drm_i915_gem_object *obj;
|
||||
struct drm_i915_gem_request *req[I915_NUM_ENGINES];
|
||||
unsigned reset_counter;
|
||||
int i, n = 0;
|
||||
int ret;
|
||||
|
||||
@@ -3178,7 +3163,6 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
}
|
||||
|
||||
drm_gem_object_unreference(&obj->base);
|
||||
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
|
||||
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
if (obj->last_read_req[i] == NULL)
|
||||
@@ -3191,7 +3175,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
if (ret == 0)
|
||||
ret = __i915_wait_request(req[i], reset_counter, true,
|
||||
ret = __i915_wait_request(req[i], true,
|
||||
args->timeout_ns > 0 ? &args->timeout_ns : NULL,
|
||||
to_rps_client(file));
|
||||
i915_gem_request_unreference__unlocked(req[i]);
|
||||
@@ -3223,7 +3207,6 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
|
||||
if (!i915_semaphore_is_enabled(obj->base.dev)) {
|
||||
struct drm_i915_private *i915 = to_i915(obj->base.dev);
|
||||
ret = __i915_wait_request(from_req,
|
||||
atomic_read(&i915->gpu_error.reset_counter),
|
||||
i915->mm.interruptible,
|
||||
NULL,
|
||||
&i915->rps.semaphores);
|
||||
@@ -3344,9 +3327,6 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
|
||||
if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
|
||||
return;
|
||||
|
||||
/* Wait for any direct GTT access to complete */
|
||||
mb();
|
||||
|
||||
old_read_domains = obj->base.read_domains;
|
||||
old_write_domain = obj->base.write_domain;
|
||||
|
||||
@@ -3451,12 +3431,9 @@ int i915_gpu_idle(struct drm_device *dev)
|
||||
return PTR_ERR(req);
|
||||
|
||||
ret = i915_switch_context(req);
|
||||
if (ret) {
|
||||
i915_gem_request_cancel(req);
|
||||
return ret;
|
||||
}
|
||||
|
||||
i915_add_request_no_flush(req);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = intel_engine_idle(engine);
|
||||
@@ -4179,16 +4156,15 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
|
||||
struct drm_i915_file_private *file_priv = file->driver_priv;
|
||||
unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
|
||||
struct drm_i915_gem_request *request, *target = NULL;
|
||||
unsigned reset_counter;
|
||||
int ret;
|
||||
|
||||
ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
/* ABI: return -EIO if already wedged */
|
||||
if (i915_terminally_wedged(&dev_priv->gpu_error))
|
||||
return -EIO;
|
||||
|
||||
spin_lock(&file_priv->mm.lock);
|
||||
list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
|
||||
@@ -4204,7 +4180,6 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
|
||||
|
||||
target = request;
|
||||
}
|
||||
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
|
||||
if (target)
|
||||
i915_gem_request_reference(target);
|
||||
spin_unlock(&file_priv->mm.lock);
|
||||
@@ -4212,7 +4187,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
|
||||
if (target == NULL)
|
||||
return 0;
|
||||
|
||||
ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
|
||||
ret = __i915_wait_request(target, true, NULL, NULL);
|
||||
if (ret == 0)
|
||||
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
|
||||
|
||||
@@ -4372,7 +4347,6 @@ i915_gem_object_ggtt_unpin_view(struct drm_i915_gem_object *obj,
|
||||
{
|
||||
struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
|
||||
|
||||
BUG_ON(!vma);
|
||||
WARN_ON(vma->pin_count == 0);
|
||||
WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
|
||||
|
||||
@@ -4889,7 +4863,7 @@ i915_gem_init_hw(struct drm_device *dev)
|
||||
/* Double layer security blanket, see i915_gem_init() */
|
||||
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
|
||||
|
||||
if (dev_priv->ellc_size)
|
||||
if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
|
||||
I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
|
||||
|
||||
if (IS_HASWELL(dev))
|
||||
@@ -4933,6 +4907,8 @@ i915_gem_init_hw(struct drm_device *dev)
|
||||
goto out;
|
||||
}
|
||||
|
||||
intel_mocs_init_l3cc_table(dev);
|
||||
|
||||
/* We can't enable contexts until all firmware is loaded */
|
||||
if (HAS_GUC_UCODE(dev)) {
|
||||
ret = intel_guc_ucode_load(dev);
|
||||
@@ -4958,34 +4934,33 @@ i915_gem_init_hw(struct drm_device *dev)
|
||||
req = i915_gem_request_alloc(engine, NULL);
|
||||
if (IS_ERR(req)) {
|
||||
ret = PTR_ERR(req);
|
||||
i915_gem_cleanup_engines(dev);
|
||||
goto out;
|
||||
break;
|
||||
}
|
||||
|
||||
if (engine->id == RCS) {
|
||||
for (j = 0; j < NUM_L3_SLICES(dev); j++)
|
||||
i915_gem_l3_remap(req, j);
|
||||
for (j = 0; j < NUM_L3_SLICES(dev); j++) {
|
||||
ret = i915_gem_l3_remap(req, j);
|
||||
if (ret)
|
||||
goto err_request;
|
||||
}
|
||||
}
|
||||
|
||||
ret = i915_ppgtt_init_ring(req);
|
||||
if (ret && ret != -EIO) {
|
||||
DRM_ERROR("PPGTT enable %s failed %d\n",
|
||||
engine->name, ret);
|
||||
i915_gem_request_cancel(req);
|
||||
i915_gem_cleanup_engines(dev);
|
||||
goto out;
|
||||
}
|
||||
if (ret)
|
||||
goto err_request;
|
||||
|
||||
ret = i915_gem_context_enable(req);
|
||||
if (ret && ret != -EIO) {
|
||||
DRM_ERROR("Context enable %s failed %d\n",
|
||||
engine->name, ret);
|
||||
i915_gem_request_cancel(req);
|
||||
i915_gem_cleanup_engines(dev);
|
||||
goto out;
|
||||
}
|
||||
if (ret)
|
||||
goto err_request;
|
||||
|
||||
err_request:
|
||||
i915_add_request_no_flush(req);
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to enable %s, error=%d\n",
|
||||
engine->name, ret);
|
||||
i915_gem_cleanup_engines(dev);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
|
||||
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
* Copyright © 2016 Intel Corporation
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the next
|
||||
* paragraph) shall be included in all copies or substantial portions of the
|
||||
* Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
|
||||
* IN THE SOFTWARE.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef __I915_GEM_H__
|
||||
#define __I915_GEM_H__
|
||||
|
||||
#ifdef CONFIG_DRM_I915_DEBUG_GEM
|
||||
#define GEM_BUG_ON(expr) BUG_ON(expr)
|
||||
#else
|
||||
#define GEM_BUG_ON(expr)
|
||||
#endif
|
||||
|
||||
#endif /* __I915_GEM_H__ */
|
||||
@@ -342,7 +342,7 @@ void i915_gem_context_reset(struct drm_device *dev)
|
||||
struct intel_context *ctx;
|
||||
|
||||
list_for_each_entry(ctx, &dev_priv->context_list, link)
|
||||
intel_lr_context_reset(dev, ctx);
|
||||
intel_lr_context_reset(dev_priv, ctx);
|
||||
}
|
||||
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
@@ -539,7 +539,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
|
||||
|
||||
len = 4;
|
||||
if (INTEL_INFO(engine->dev)->gen >= 7)
|
||||
len += 2 + (num_rings ? 4*num_rings + 2 : 0);
|
||||
len += 2 + (num_rings ? 4*num_rings + 6 : 0);
|
||||
|
||||
ret = intel_ring_begin(req, len);
|
||||
if (ret)
|
||||
@@ -579,6 +579,7 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
|
||||
if (INTEL_INFO(engine->dev)->gen >= 7) {
|
||||
if (num_rings) {
|
||||
struct intel_engine_cs *signaller;
|
||||
i915_reg_t last_reg = {}; /* keep gcc quiet */
|
||||
|
||||
intel_ring_emit(engine,
|
||||
MI_LOAD_REGISTER_IMM(num_rings));
|
||||
@@ -586,11 +587,19 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
|
||||
if (signaller == engine)
|
||||
continue;
|
||||
|
||||
intel_ring_emit_reg(engine,
|
||||
RING_PSMI_CTL(signaller->mmio_base));
|
||||
last_reg = RING_PSMI_CTL(signaller->mmio_base);
|
||||
intel_ring_emit_reg(engine, last_reg);
|
||||
intel_ring_emit(engine,
|
||||
_MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
|
||||
}
|
||||
|
||||
/* Insert a delay before the next switch! */
|
||||
intel_ring_emit(engine,
|
||||
MI_STORE_REGISTER_MEM |
|
||||
MI_SRM_LRM_GLOBAL_GTT);
|
||||
intel_ring_emit_reg(engine, last_reg);
|
||||
intel_ring_emit(engine, engine->scratch.gtt_offset);
|
||||
intel_ring_emit(engine, MI_NOOP);
|
||||
}
|
||||
intel_ring_emit(engine, MI_ARB_ON_OFF | MI_ARB_ENABLE);
|
||||
}
|
||||
@@ -600,50 +609,48 @@ mi_set_context(struct drm_i915_gem_request *req, u32 hw_flags)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline bool should_skip_switch(struct intel_engine_cs *engine,
|
||||
struct intel_context *from,
|
||||
struct intel_context *to)
|
||||
static inline bool skip_rcs_switch(struct intel_engine_cs *engine,
|
||||
struct intel_context *to)
|
||||
{
|
||||
if (to->remap_slice)
|
||||
return false;
|
||||
|
||||
if (to->ppgtt && from == to &&
|
||||
!(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
|
||||
return true;
|
||||
if (!to->legacy_hw_ctx.initialized)
|
||||
return false;
|
||||
|
||||
return false;
|
||||
if (to->ppgtt &&
|
||||
!(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
|
||||
return false;
|
||||
|
||||
return to == engine->last_context;
|
||||
}
|
||||
|
||||
static bool
|
||||
needs_pd_load_pre(struct intel_engine_cs *engine, struct intel_context *to)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
||||
|
||||
if (!to->ppgtt)
|
||||
return false;
|
||||
|
||||
if (INTEL_INFO(engine->dev)->gen < 8)
|
||||
if (engine->last_context == to &&
|
||||
!(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings))
|
||||
return false;
|
||||
|
||||
if (engine->id != RCS)
|
||||
return true;
|
||||
|
||||
if (engine != &dev_priv->engine[RCS])
|
||||
if (INTEL_INFO(engine->dev)->gen < 8)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool
|
||||
needs_pd_load_post(struct intel_engine_cs *engine, struct intel_context *to,
|
||||
u32 hw_flags)
|
||||
needs_pd_load_post(struct intel_context *to, u32 hw_flags)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->dev->dev_private;
|
||||
|
||||
if (!to->ppgtt)
|
||||
return false;
|
||||
|
||||
if (!IS_GEN8(engine->dev))
|
||||
return false;
|
||||
|
||||
if (engine != &dev_priv->engine[RCS])
|
||||
if (!IS_GEN8(to->i915))
|
||||
return false;
|
||||
|
||||
if (hw_flags & MI_RESTORE_INHIBIT)
|
||||
@@ -652,60 +659,33 @@ needs_pd_load_post(struct intel_engine_cs *engine, struct intel_context *to,
|
||||
return false;
|
||||
}
|
||||
|
||||
static int do_switch(struct drm_i915_gem_request *req)
|
||||
static int do_rcs_switch(struct drm_i915_gem_request *req)
|
||||
{
|
||||
struct intel_context *to = req->ctx;
|
||||
struct intel_engine_cs *engine = req->engine;
|
||||
struct drm_i915_private *dev_priv = req->i915;
|
||||
struct intel_context *from = engine->last_context;
|
||||
u32 hw_flags = 0;
|
||||
bool uninitialized = false;
|
||||
struct intel_context *from;
|
||||
u32 hw_flags;
|
||||
int ret, i;
|
||||
|
||||
if (from != NULL && engine == &dev_priv->engine[RCS]) {
|
||||
BUG_ON(from->legacy_hw_ctx.rcs_state == NULL);
|
||||
BUG_ON(!i915_gem_obj_is_pinned(from->legacy_hw_ctx.rcs_state));
|
||||
}
|
||||
|
||||
if (should_skip_switch(engine, from, to))
|
||||
if (skip_rcs_switch(engine, to))
|
||||
return 0;
|
||||
|
||||
/* Trying to pin first makes error handling easier. */
|
||||
if (engine == &dev_priv->engine[RCS]) {
|
||||
ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
|
||||
get_context_alignment(engine->dev),
|
||||
0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
ret = i915_gem_obj_ggtt_pin(to->legacy_hw_ctx.rcs_state,
|
||||
get_context_alignment(engine->dev),
|
||||
0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/*
|
||||
* Pin can switch back to the default context if we end up calling into
|
||||
* evict_everything - as a last ditch gtt defrag effort that also
|
||||
* switches to the default context. Hence we need to reload from here.
|
||||
*
|
||||
* XXX: Doing so is painfully broken!
|
||||
*/
|
||||
from = engine->last_context;
|
||||
|
||||
if (needs_pd_load_pre(engine, to)) {
|
||||
/* Older GENs and non render rings still want the load first,
|
||||
* "PP_DCLV followed by PP_DIR_BASE register through Load
|
||||
* Register Immediate commands in Ring Buffer before submitting
|
||||
* a context."*/
|
||||
trace_switch_mm(engine, to);
|
||||
ret = to->ppgtt->switch_mm(to->ppgtt, req);
|
||||
if (ret)
|
||||
goto unpin_out;
|
||||
|
||||
/* Doing a PD load always reloads the page dirs */
|
||||
to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
|
||||
}
|
||||
|
||||
if (engine != &dev_priv->engine[RCS]) {
|
||||
if (from)
|
||||
i915_gem_context_unreference(from);
|
||||
goto done;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear this page out of any CPU caches for coherent swap-in/out. Note
|
||||
* that thanks to write = false in this call and us not setting any gpu
|
||||
@@ -718,53 +698,37 @@ static int do_switch(struct drm_i915_gem_request *req)
|
||||
if (ret)
|
||||
goto unpin_out;
|
||||
|
||||
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to)) {
|
||||
hw_flags |= MI_RESTORE_INHIBIT;
|
||||
if (needs_pd_load_pre(engine, to)) {
|
||||
/* Older GENs and non render rings still want the load first,
|
||||
* "PP_DCLV followed by PP_DIR_BASE register through Load
|
||||
* Register Immediate commands in Ring Buffer before submitting
|
||||
* a context."*/
|
||||
trace_switch_mm(engine, to);
|
||||
ret = to->ppgtt->switch_mm(to->ppgtt, req);
|
||||
if (ret)
|
||||
goto unpin_out;
|
||||
}
|
||||
|
||||
if (!to->legacy_hw_ctx.initialized || i915_gem_context_is_default(to))
|
||||
/* NB: If we inhibit the restore, the context is not allowed to
|
||||
* die because future work may end up depending on valid address
|
||||
* space. This means we must enforce that a page table load
|
||||
* occur when this occurs. */
|
||||
} else if (to->ppgtt &&
|
||||
(intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)) {
|
||||
hw_flags |= MI_FORCE_RESTORE;
|
||||
to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
|
||||
}
|
||||
hw_flags = MI_RESTORE_INHIBIT;
|
||||
else if (to->ppgtt &&
|
||||
intel_engine_flag(engine) & to->ppgtt->pd_dirty_rings)
|
||||
hw_flags = MI_FORCE_RESTORE;
|
||||
else
|
||||
hw_flags = 0;
|
||||
|
||||
/* We should never emit switch_mm more than once */
|
||||
WARN_ON(needs_pd_load_pre(engine, to) &&
|
||||
needs_pd_load_post(engine, to, hw_flags));
|
||||
needs_pd_load_post(to, hw_flags));
|
||||
|
||||
ret = mi_set_context(req, hw_flags);
|
||||
if (ret)
|
||||
goto unpin_out;
|
||||
|
||||
/* GEN8 does *not* require an explicit reload if the PDPs have been
|
||||
* setup, and we do not wish to move them.
|
||||
*/
|
||||
if (needs_pd_load_post(engine, to, hw_flags)) {
|
||||
trace_switch_mm(engine, to);
|
||||
ret = to->ppgtt->switch_mm(to->ppgtt, req);
|
||||
/* The hardware context switch is emitted, but we haven't
|
||||
* actually changed the state - so it's probably safe to bail
|
||||
* here. Still, let the user know something dangerous has
|
||||
* happened.
|
||||
*/
|
||||
if (ret) {
|
||||
DRM_ERROR("Failed to change address space on context switch\n");
|
||||
goto unpin_out;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < MAX_L3_SLICES; i++) {
|
||||
if (!(to->remap_slice & (1<<i)))
|
||||
continue;
|
||||
|
||||
ret = i915_gem_l3_remap(req, i);
|
||||
/* If it failed, try again next round */
|
||||
if (to != from || (hw_flags & MI_FORCE_RESTORE)) {
|
||||
ret = mi_set_context(req, hw_flags);
|
||||
if (ret)
|
||||
DRM_DEBUG_DRIVER("L3 remapping failed\n");
|
||||
else
|
||||
to->remap_slice &= ~(1<<i);
|
||||
goto unpin_out;
|
||||
}
|
||||
|
||||
/* The backing object for the context is done after switching to the
|
||||
@@ -789,27 +753,51 @@ static int do_switch(struct drm_i915_gem_request *req)
|
||||
i915_gem_object_ggtt_unpin(from->legacy_hw_ctx.rcs_state);
|
||||
i915_gem_context_unreference(from);
|
||||
}
|
||||
|
||||
uninitialized = !to->legacy_hw_ctx.initialized;
|
||||
to->legacy_hw_ctx.initialized = true;
|
||||
|
||||
done:
|
||||
i915_gem_context_reference(to);
|
||||
engine->last_context = to;
|
||||
|
||||
if (uninitialized) {
|
||||
/* GEN8 does *not* require an explicit reload if the PDPs have been
|
||||
* setup, and we do not wish to move them.
|
||||
*/
|
||||
if (needs_pd_load_post(to, hw_flags)) {
|
||||
trace_switch_mm(engine, to);
|
||||
ret = to->ppgtt->switch_mm(to->ppgtt, req);
|
||||
/* The hardware context switch is emitted, but we haven't
|
||||
* actually changed the state - so it's probably safe to bail
|
||||
* here. Still, let the user know something dangerous has
|
||||
* happened.
|
||||
*/
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (to->ppgtt)
|
||||
to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
|
||||
|
||||
for (i = 0; i < MAX_L3_SLICES; i++) {
|
||||
if (!(to->remap_slice & (1<<i)))
|
||||
continue;
|
||||
|
||||
ret = i915_gem_l3_remap(req, i);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
to->remap_slice &= ~(1<<i);
|
||||
}
|
||||
|
||||
if (!to->legacy_hw_ctx.initialized) {
|
||||
if (engine->init_context) {
|
||||
ret = engine->init_context(req);
|
||||
if (ret)
|
||||
DRM_ERROR("ring init context: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
to->legacy_hw_ctx.initialized = true;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
unpin_out:
|
||||
if (engine->id == RCS)
|
||||
i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
|
||||
i915_gem_object_ggtt_unpin(to->legacy_hw_ctx.rcs_state);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -834,17 +822,33 @@ int i915_switch_context(struct drm_i915_gem_request *req)
|
||||
WARN_ON(i915.enable_execlists);
|
||||
WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
|
||||
|
||||
if (req->ctx->legacy_hw_ctx.rcs_state == NULL) { /* We have the fake context */
|
||||
if (req->ctx != engine->last_context) {
|
||||
i915_gem_context_reference(req->ctx);
|
||||
if (engine->id != RCS ||
|
||||
req->ctx->legacy_hw_ctx.rcs_state == NULL) {
|
||||
struct intel_context *to = req->ctx;
|
||||
|
||||
if (needs_pd_load_pre(engine, to)) {
|
||||
int ret;
|
||||
|
||||
trace_switch_mm(engine, to);
|
||||
ret = to->ppgtt->switch_mm(to->ppgtt, req);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Doing a PD load always reloads the page dirs */
|
||||
to->ppgtt->pd_dirty_rings &= ~intel_engine_flag(engine);
|
||||
}
|
||||
|
||||
if (to != engine->last_context) {
|
||||
i915_gem_context_reference(to);
|
||||
if (engine->last_context)
|
||||
i915_gem_context_unreference(engine->last_context);
|
||||
engine->last_context = req->ctx;
|
||||
engine->last_context = to;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
return do_switch(req);
|
||||
return do_rcs_switch(req);
|
||||
}
|
||||
|
||||
static bool contexts_enabled(struct drm_device *dev)
|
||||
|
||||
@@ -1137,7 +1137,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas,
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
static void
|
||||
i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
|
||||
{
|
||||
/* Unconditionally force add_request to emit a full flush. */
|
||||
@@ -1322,7 +1322,6 @@ i915_gem_ringbuffer_submission(struct i915_execbuffer_params *params,
|
||||
trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags);
|
||||
|
||||
i915_gem_execbuffer_move_to_active(vmas, params->request);
|
||||
i915_gem_execbuffer_retire_commands(params);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1624,7 +1623,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
|
||||
ret = i915_gem_request_add_to_client(req, file);
|
||||
if (ret)
|
||||
goto err_batch_unpin;
|
||||
goto err_request;
|
||||
|
||||
/*
|
||||
* Save assorted stuff away to pass through to *_submission().
|
||||
@@ -1641,6 +1640,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
|
||||
params->request = req;
|
||||
|
||||
ret = dev_priv->gt.execbuf_submit(params, args, &eb->vmas);
|
||||
err_request:
|
||||
i915_gem_execbuffer_retire_commands(params);
|
||||
|
||||
err_batch_unpin:
|
||||
/*
|
||||
@@ -1657,14 +1658,6 @@ err:
|
||||
i915_gem_context_unreference(ctx);
|
||||
eb_destroy(eb);
|
||||
|
||||
/*
|
||||
* If the request was created but not successfully submitted then it
|
||||
* must be freed again. If it was submitted then it is being tracked
|
||||
* on the active request list and no clean up is required here.
|
||||
*/
|
||||
if (ret && !IS_ERR_OR_NULL(req))
|
||||
i915_gem_request_cancel(req);
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
pre_mutex_err:
|
||||
|
||||
@@ -745,7 +745,7 @@ static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
|
||||
num_entries--;
|
||||
}
|
||||
|
||||
kunmap_px(ppgtt, pt);
|
||||
kunmap_px(ppgtt, pt_vaddr);
|
||||
|
||||
pte = 0;
|
||||
if (++pde == I915_PDES) {
|
||||
@@ -905,11 +905,10 @@ static int gen8_init_scratch(struct i915_address_space *vm)
|
||||
static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
|
||||
{
|
||||
enum vgt_g2v_type msg;
|
||||
struct drm_device *dev = ppgtt->base.dev;
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
|
||||
int i;
|
||||
|
||||
if (USES_FULL_48BIT_PPGTT(dev)) {
|
||||
if (USES_FULL_48BIT_PPGTT(dev_priv)) {
|
||||
u64 daddr = px_dma(&ppgtt->pml4);
|
||||
|
||||
I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
|
||||
@@ -3172,7 +3171,8 @@ int i915_ggtt_init_hw(struct drm_device *dev)
|
||||
} else if (INTEL_INFO(dev)->gen < 8) {
|
||||
ggtt->probe = gen6_gmch_probe;
|
||||
ggtt->base.cleanup = gen6_gmch_remove;
|
||||
if (IS_HASWELL(dev) && dev_priv->ellc_size)
|
||||
|
||||
if (HAS_EDRAM(dev))
|
||||
ggtt->base.pte_encode = iris_pte_encode;
|
||||
else if (IS_HASWELL(dev))
|
||||
ggtt->base.pte_encode = hsw_pte_encode;
|
||||
|
||||
@@ -70,6 +70,10 @@ static bool swap_available(void)
|
||||
|
||||
static bool can_release_pages(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
/* Only shmemfs objects are backed by swap */
|
||||
if (!obj->base.filp)
|
||||
return false;
|
||||
|
||||
/* Only report true if by unbinding the object and putting its pages
|
||||
* we can actually make forward progress towards freeing physical
|
||||
* pages.
|
||||
@@ -336,7 +340,7 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
|
||||
container_of(nb, struct drm_i915_private, mm.oom_notifier);
|
||||
struct shrinker_lock_uninterruptible slu;
|
||||
struct drm_i915_gem_object *obj;
|
||||
unsigned long pinned, bound, unbound, freed_pages;
|
||||
unsigned long unevictable, bound, unbound, freed_pages;
|
||||
|
||||
if (!i915_gem_shrinker_lock_uninterruptible(dev_priv, &slu, 5000))
|
||||
return NOTIFY_DONE;
|
||||
@@ -347,33 +351,28 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
|
||||
* assert that there are no objects with pinned pages that are not
|
||||
* being pointed to by hardware.
|
||||
*/
|
||||
unbound = bound = pinned = 0;
|
||||
unbound = bound = unevictable = 0;
|
||||
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
|
||||
if (!obj->base.filp) /* not backed by a freeable object */
|
||||
continue;
|
||||
|
||||
if (obj->pages_pin_count)
|
||||
pinned += obj->base.size;
|
||||
if (!can_release_pages(obj))
|
||||
unevictable += obj->base.size >> PAGE_SHIFT;
|
||||
else
|
||||
unbound += obj->base.size;
|
||||
unbound += obj->base.size >> PAGE_SHIFT;
|
||||
}
|
||||
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
|
||||
if (!obj->base.filp)
|
||||
continue;
|
||||
|
||||
if (obj->pages_pin_count)
|
||||
pinned += obj->base.size;
|
||||
if (!can_release_pages(obj))
|
||||
unevictable += obj->base.size >> PAGE_SHIFT;
|
||||
else
|
||||
bound += obj->base.size;
|
||||
bound += obj->base.size >> PAGE_SHIFT;
|
||||
}
|
||||
|
||||
i915_gem_shrinker_unlock_uninterruptible(dev_priv, &slu);
|
||||
|
||||
if (freed_pages || unbound || bound)
|
||||
pr_info("Purging GPU memory, %lu bytes freed, %lu bytes still pinned.\n",
|
||||
freed_pages << PAGE_SHIFT, pinned);
|
||||
pr_info("Purging GPU memory, %lu pages freed, "
|
||||
"%lu pages still pinned.\n",
|
||||
freed_pages, unevictable);
|
||||
if (unbound || bound)
|
||||
pr_err("%lu and %lu bytes still available in the "
|
||||
pr_err("%lu and %lu pages still available in the "
|
||||
"bound and unbound GPU page lists.\n",
|
||||
bound, unbound);
|
||||
|
||||
|
||||
@@ -95,9 +95,9 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
|
||||
u32 base;
|
||||
|
||||
/* Almost universally we can find the Graphics Base of Stolen Memory
|
||||
* at offset 0x5c in the igfx configuration space. On a few (desktop)
|
||||
* machines this is also mirrored in the bridge device at different
|
||||
* locations, or in the MCHBAR.
|
||||
* at register BSM (0x5c) in the igfx configuration space. On a few
|
||||
* (desktop) machines this is also mirrored in the bridge device at
|
||||
* different locations, or in the MCHBAR.
|
||||
*
|
||||
* On 865 we just check the TOUD register.
|
||||
*
|
||||
@@ -107,9 +107,11 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
|
||||
*/
|
||||
base = 0;
|
||||
if (INTEL_INFO(dev)->gen >= 3) {
|
||||
/* Read Graphics Base of Stolen Memory directly */
|
||||
pci_read_config_dword(dev->pdev, 0x5c, &base);
|
||||
base &= ~((1<<20) - 1);
|
||||
u32 bsm;
|
||||
|
||||
pci_read_config_dword(dev->pdev, BSM, &bsm);
|
||||
|
||||
base = bsm & BSM_MASK;
|
||||
} else if (IS_I865G(dev)) {
|
||||
u16 toud = 0;
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@
|
||||
|
||||
struct i915_mm_struct {
|
||||
struct mm_struct *mm;
|
||||
struct drm_device *dev;
|
||||
struct drm_i915_private *i915;
|
||||
struct i915_mmu_notifier *mn;
|
||||
struct hlist_node node;
|
||||
struct kref kref;
|
||||
@@ -49,6 +49,7 @@ struct i915_mmu_notifier {
|
||||
struct hlist_node node;
|
||||
struct mmu_notifier mn;
|
||||
struct rb_root objects;
|
||||
struct workqueue_struct *wq;
|
||||
};
|
||||
|
||||
struct i915_mmu_object {
|
||||
@@ -60,6 +61,37 @@ struct i915_mmu_object {
|
||||
bool attached;
|
||||
};
|
||||
|
||||
static void wait_rendering(struct drm_i915_gem_object *obj)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
|
||||
int i, n;
|
||||
|
||||
if (!obj->active)
|
||||
return;
|
||||
|
||||
n = 0;
|
||||
for (i = 0; i < I915_NUM_ENGINES; i++) {
|
||||
struct drm_i915_gem_request *req;
|
||||
|
||||
req = obj->last_read_req[i];
|
||||
if (req == NULL)
|
||||
continue;
|
||||
|
||||
requests[n++] = i915_gem_request_reference(req);
|
||||
}
|
||||
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
|
||||
for (i = 0; i < n; i++)
|
||||
__i915_wait_request(requests[i], false, NULL, NULL);
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
for (i = 0; i < n; i++)
|
||||
i915_gem_request_unreference(requests[i]);
|
||||
}
|
||||
|
||||
static void cancel_userptr(struct work_struct *work)
|
||||
{
|
||||
struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
|
||||
@@ -75,13 +107,13 @@ static void cancel_userptr(struct work_struct *work)
|
||||
struct i915_vma *vma, *tmp;
|
||||
bool was_interruptible;
|
||||
|
||||
wait_rendering(obj);
|
||||
|
||||
was_interruptible = dev_priv->mm.interruptible;
|
||||
dev_priv->mm.interruptible = false;
|
||||
|
||||
list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) {
|
||||
int ret = i915_vma_unbind(vma);
|
||||
WARN_ON(ret && ret != -EIO);
|
||||
}
|
||||
list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link)
|
||||
WARN_ON(i915_vma_unbind(vma));
|
||||
WARN_ON(i915_gem_object_put_pages(obj));
|
||||
|
||||
dev_priv->mm.interruptible = was_interruptible;
|
||||
@@ -140,7 +172,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
|
||||
*/
|
||||
mo = container_of(it, struct i915_mmu_object, it);
|
||||
if (kref_get_unless_zero(&mo->obj->base.refcount))
|
||||
schedule_work(&mo->work);
|
||||
queue_work(mn->wq, &mo->work);
|
||||
|
||||
list_add(&mo->link, &cancelled);
|
||||
it = interval_tree_iter_next(it, start, end);
|
||||
@@ -148,6 +180,8 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
|
||||
list_for_each_entry(mo, &cancelled, link)
|
||||
del_object(mo);
|
||||
spin_unlock(&mn->lock);
|
||||
|
||||
flush_workqueue(mn->wq);
|
||||
}
|
||||
|
||||
static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
|
||||
@@ -167,10 +201,16 @@ i915_mmu_notifier_create(struct mm_struct *mm)
|
||||
spin_lock_init(&mn->lock);
|
||||
mn->mn.ops = &i915_gem_userptr_notifier;
|
||||
mn->objects = RB_ROOT;
|
||||
mn->wq = alloc_workqueue("i915-userptr-release", WQ_UNBOUND, 0);
|
||||
if (mn->wq == NULL) {
|
||||
kfree(mn);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
/* Protected by mmap_sem (write-lock) */
|
||||
ret = __mmu_notifier_register(&mn->mn, mm);
|
||||
if (ret) {
|
||||
destroy_workqueue(mn->wq);
|
||||
kfree(mn);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
@@ -205,13 +245,13 @@ i915_mmu_notifier_find(struct i915_mm_struct *mm)
|
||||
return mn;
|
||||
|
||||
down_write(&mm->mm->mmap_sem);
|
||||
mutex_lock(&to_i915(mm->dev)->mm_lock);
|
||||
mutex_lock(&mm->i915->mm_lock);
|
||||
if ((mn = mm->mn) == NULL) {
|
||||
mn = i915_mmu_notifier_create(mm->mm);
|
||||
if (!IS_ERR(mn))
|
||||
mm->mn = mn;
|
||||
}
|
||||
mutex_unlock(&to_i915(mm->dev)->mm_lock);
|
||||
mutex_unlock(&mm->i915->mm_lock);
|
||||
up_write(&mm->mm->mmap_sem);
|
||||
|
||||
return mn;
|
||||
@@ -256,6 +296,7 @@ i915_mmu_notifier_free(struct i915_mmu_notifier *mn,
|
||||
return;
|
||||
|
||||
mmu_notifier_unregister(&mn->mn, mm);
|
||||
destroy_workqueue(mn->wq);
|
||||
kfree(mn);
|
||||
}
|
||||
|
||||
@@ -327,7 +368,7 @@ i915_gem_userptr_init__mm_struct(struct drm_i915_gem_object *obj)
|
||||
}
|
||||
|
||||
kref_init(&mm->kref);
|
||||
mm->dev = obj->base.dev;
|
||||
mm->i915 = to_i915(obj->base.dev);
|
||||
|
||||
mm->mm = current->mm;
|
||||
atomic_inc(¤t->mm->mm_count);
|
||||
@@ -362,7 +403,7 @@ __i915_mm_struct_free(struct kref *kref)
|
||||
|
||||
/* Protected by dev_priv->mm_lock */
|
||||
hash_del(&mm->node);
|
||||
mutex_unlock(&to_i915(mm->dev)->mm_lock);
|
||||
mutex_unlock(&mm->i915->mm_lock);
|
||||
|
||||
INIT_WORK(&mm->work, __i915_mm_struct_free__worker);
|
||||
schedule_work(&mm->work);
|
||||
@@ -498,19 +539,24 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
|
||||
if (pvec != NULL) {
|
||||
struct mm_struct *mm = obj->userptr.mm->mm;
|
||||
|
||||
down_read(&mm->mmap_sem);
|
||||
while (pinned < npages) {
|
||||
ret = get_user_pages_remote(work->task, mm,
|
||||
obj->userptr.ptr + pinned * PAGE_SIZE,
|
||||
npages - pinned,
|
||||
!obj->userptr.read_only, 0,
|
||||
pvec + pinned, NULL);
|
||||
if (ret < 0)
|
||||
break;
|
||||
ret = -EFAULT;
|
||||
if (atomic_inc_not_zero(&mm->mm_users)) {
|
||||
down_read(&mm->mmap_sem);
|
||||
while (pinned < npages) {
|
||||
ret = get_user_pages_remote
|
||||
(work->task, mm,
|
||||
obj->userptr.ptr + pinned * PAGE_SIZE,
|
||||
npages - pinned,
|
||||
!obj->userptr.read_only, 0,
|
||||
pvec + pinned, NULL);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
pinned += ret;
|
||||
pinned += ret;
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
mmput(mm);
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
}
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
@@ -179,15 +179,11 @@ static void guc_init_doorbell(struct intel_guc *guc,
|
||||
struct i915_guc_client *client)
|
||||
{
|
||||
struct guc_doorbell_info *doorbell;
|
||||
void *base;
|
||||
|
||||
base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
|
||||
doorbell = base + client->doorbell_offset;
|
||||
doorbell = client->client_base + client->doorbell_offset;
|
||||
|
||||
doorbell->db_status = 1;
|
||||
doorbell->db_status = GUC_DOORBELL_ENABLED;
|
||||
doorbell->cookie = 0;
|
||||
|
||||
kunmap_atomic(base);
|
||||
}
|
||||
|
||||
static int guc_ring_doorbell(struct i915_guc_client *gc)
|
||||
@@ -195,11 +191,9 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
|
||||
struct guc_process_desc *desc;
|
||||
union guc_doorbell_qw db_cmp, db_exc, db_ret;
|
||||
union guc_doorbell_qw *db;
|
||||
void *base;
|
||||
int attempt = 2, ret = -EAGAIN;
|
||||
|
||||
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
|
||||
desc = base + gc->proc_desc_offset;
|
||||
desc = gc->client_base + gc->proc_desc_offset;
|
||||
|
||||
/* Update the tail so it is visible to GuC */
|
||||
desc->tail = gc->wq_tail;
|
||||
@@ -215,7 +209,7 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
|
||||
db_exc.cookie = 1;
|
||||
|
||||
/* pointer of current doorbell cacheline */
|
||||
db = base + gc->doorbell_offset;
|
||||
db = gc->client_base + gc->doorbell_offset;
|
||||
|
||||
while (attempt--) {
|
||||
/* lets ring the doorbell */
|
||||
@@ -244,10 +238,6 @@ static int guc_ring_doorbell(struct i915_guc_client *gc)
|
||||
db_exc.cookie = 1;
|
||||
}
|
||||
|
||||
/* Finally, update the cached copy of the GuC's WQ head */
|
||||
gc->wq_head = desc->head;
|
||||
|
||||
kunmap_atomic(base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -256,16 +246,12 @@ static void guc_disable_doorbell(struct intel_guc *guc,
|
||||
{
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
struct guc_doorbell_info *doorbell;
|
||||
void *base;
|
||||
i915_reg_t drbreg = GEN8_DRBREGL(client->doorbell_id);
|
||||
int value;
|
||||
|
||||
base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
|
||||
doorbell = base + client->doorbell_offset;
|
||||
doorbell = client->client_base + client->doorbell_offset;
|
||||
|
||||
doorbell->db_status = 0;
|
||||
|
||||
kunmap_atomic(base);
|
||||
doorbell->db_status = GUC_DOORBELL_DISABLED;
|
||||
|
||||
I915_WRITE(drbreg, I915_READ(drbreg) & ~GEN8_DRB_VALID);
|
||||
|
||||
@@ -341,10 +327,8 @@ static void guc_init_proc_desc(struct intel_guc *guc,
|
||||
struct i915_guc_client *client)
|
||||
{
|
||||
struct guc_process_desc *desc;
|
||||
void *base;
|
||||
|
||||
base = kmap_atomic(i915_gem_object_get_page(client->client_obj, 0));
|
||||
desc = base + client->proc_desc_offset;
|
||||
desc = client->client_base + client->proc_desc_offset;
|
||||
|
||||
memset(desc, 0, sizeof(*desc));
|
||||
|
||||
@@ -361,8 +345,6 @@ static void guc_init_proc_desc(struct intel_guc *guc,
|
||||
desc->wq_size_bytes = client->wq_size;
|
||||
desc->wq_status = WQ_STATUS_ACTIVE;
|
||||
desc->priority = client->priority;
|
||||
|
||||
kunmap_atomic(base);
|
||||
}
|
||||
|
||||
/*
|
||||
@@ -376,12 +358,14 @@ static void guc_init_proc_desc(struct intel_guc *guc,
|
||||
static void guc_init_ctx_desc(struct intel_guc *guc,
|
||||
struct i915_guc_client *client)
|
||||
{
|
||||
struct drm_i915_gem_object *client_obj = client->client_obj;
|
||||
struct drm_i915_private *dev_priv = guc_to_i915(guc);
|
||||
struct intel_engine_cs *engine;
|
||||
struct intel_context *ctx = client->owner;
|
||||
struct guc_context_desc desc;
|
||||
struct sg_table *sg;
|
||||
enum intel_engine_id id;
|
||||
u32 gfx_addr;
|
||||
|
||||
memset(&desc, 0, sizeof(desc));
|
||||
|
||||
@@ -410,16 +394,17 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
|
||||
lrc->context_desc = (u32)ctx_desc;
|
||||
|
||||
/* The state page is after PPHWSP */
|
||||
lrc->ring_lcra = i915_gem_obj_ggtt_offset(obj) +
|
||||
LRC_STATE_PN * PAGE_SIZE;
|
||||
gfx_addr = i915_gem_obj_ggtt_offset(obj);
|
||||
lrc->ring_lcra = gfx_addr + LRC_STATE_PN * PAGE_SIZE;
|
||||
lrc->context_id = (client->ctx_index << GUC_ELC_CTXID_OFFSET) |
|
||||
(engine->guc_id << GUC_ELC_ENGINE_OFFSET);
|
||||
|
||||
obj = ctx->engine[id].ringbuf->obj;
|
||||
gfx_addr = i915_gem_obj_ggtt_offset(obj);
|
||||
|
||||
lrc->ring_begin = i915_gem_obj_ggtt_offset(obj);
|
||||
lrc->ring_end = lrc->ring_begin + obj->base.size - 1;
|
||||
lrc->ring_next_free_location = lrc->ring_begin;
|
||||
lrc->ring_begin = gfx_addr;
|
||||
lrc->ring_end = gfx_addr + obj->base.size - 1;
|
||||
lrc->ring_next_free_location = gfx_addr;
|
||||
lrc->ring_current_tail_pointer_value = 0;
|
||||
|
||||
desc.engines_used |= (1 << engine->guc_id);
|
||||
@@ -428,22 +413,17 @@ static void guc_init_ctx_desc(struct intel_guc *guc,
|
||||
WARN_ON(desc.engines_used == 0);
|
||||
|
||||
/*
|
||||
* The CPU address is only needed at certain points, so kmap_atomic on
|
||||
* demand instead of storing it in the ctx descriptor.
|
||||
* XXX: May make debug easier to have it mapped
|
||||
* The doorbell, process descriptor, and workqueue are all parts
|
||||
* of the client object, which the GuC will reference via the GGTT
|
||||
*/
|
||||
desc.db_trigger_cpu = 0;
|
||||
desc.db_trigger_uk = client->doorbell_offset +
|
||||
i915_gem_obj_ggtt_offset(client->client_obj);
|
||||
desc.db_trigger_phy = client->doorbell_offset +
|
||||
sg_dma_address(client->client_obj->pages->sgl);
|
||||
|
||||
desc.process_desc = client->proc_desc_offset +
|
||||
i915_gem_obj_ggtt_offset(client->client_obj);
|
||||
|
||||
desc.wq_addr = client->wq_offset +
|
||||
i915_gem_obj_ggtt_offset(client->client_obj);
|
||||
|
||||
gfx_addr = i915_gem_obj_ggtt_offset(client_obj);
|
||||
desc.db_trigger_phy = sg_dma_address(client_obj->pages->sgl) +
|
||||
client->doorbell_offset;
|
||||
desc.db_trigger_cpu = (uintptr_t)client->client_base +
|
||||
client->doorbell_offset;
|
||||
desc.db_trigger_uk = gfx_addr + client->doorbell_offset;
|
||||
desc.process_desc = gfx_addr + client->proc_desc_offset;
|
||||
desc.wq_addr = gfx_addr + client->wq_offset;
|
||||
desc.wq_size = client->wq_size;
|
||||
|
||||
/*
|
||||
@@ -474,25 +454,16 @@ static void guc_fini_ctx_desc(struct intel_guc *guc,
|
||||
int i915_guc_wq_check_space(struct i915_guc_client *gc)
|
||||
{
|
||||
struct guc_process_desc *desc;
|
||||
void *base;
|
||||
u32 size = sizeof(struct guc_wq_item);
|
||||
int ret = -ETIMEDOUT, timeout_counter = 200;
|
||||
|
||||
if (!gc)
|
||||
return 0;
|
||||
|
||||
/* Quickly return if wq space is available since last time we cache the
|
||||
* head position. */
|
||||
if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size)
|
||||
return 0;
|
||||
|
||||
base = kmap_atomic(i915_gem_object_get_page(gc->client_obj, 0));
|
||||
desc = base + gc->proc_desc_offset;
|
||||
desc = gc->client_base + gc->proc_desc_offset;
|
||||
|
||||
while (timeout_counter-- > 0) {
|
||||
gc->wq_head = desc->head;
|
||||
|
||||
if (CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size) >= size) {
|
||||
if (CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size) >= size) {
|
||||
ret = 0;
|
||||
break;
|
||||
}
|
||||
@@ -501,19 +472,19 @@ int i915_guc_wq_check_space(struct i915_guc_client *gc)
|
||||
usleep_range(1000, 2000);
|
||||
};
|
||||
|
||||
kunmap_atomic(base);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int guc_add_workqueue_item(struct i915_guc_client *gc,
|
||||
struct drm_i915_gem_request *rq)
|
||||
{
|
||||
struct guc_process_desc *desc;
|
||||
struct guc_wq_item *wqi;
|
||||
void *base;
|
||||
u32 tail, wq_len, wq_off, space;
|
||||
|
||||
space = CIRC_SPACE(gc->wq_tail, gc->wq_head, gc->wq_size);
|
||||
desc = gc->client_base + gc->proc_desc_offset;
|
||||
space = CIRC_SPACE(gc->wq_tail, desc->head, gc->wq_size);
|
||||
if (WARN_ON(space < sizeof(struct guc_wq_item)))
|
||||
return -ENOSPC; /* shouldn't happen */
|
||||
|
||||
@@ -661,21 +632,28 @@ static void guc_client_free(struct drm_device *dev,
|
||||
if (!client)
|
||||
return;
|
||||
|
||||
if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) {
|
||||
/*
|
||||
* First disable the doorbell, then tell the GuC we've
|
||||
* finished with it, finally deallocate it in our bitmap
|
||||
*/
|
||||
guc_disable_doorbell(guc, client);
|
||||
host2guc_release_doorbell(guc, client);
|
||||
release_doorbell(guc, client->doorbell_id);
|
||||
}
|
||||
|
||||
/*
|
||||
* XXX: wait for any outstanding submissions before freeing memory.
|
||||
* Be sure to drop any locks
|
||||
*/
|
||||
|
||||
if (client->client_base) {
|
||||
/*
|
||||
* If we got as far as setting up a doorbell, make sure
|
||||
* we shut it down before unmapping & deallocating the
|
||||
* memory. So first disable the doorbell, then tell the
|
||||
* GuC that we've finished with it, finally deallocate
|
||||
* it in our bitmap
|
||||
*/
|
||||
if (client->doorbell_id != GUC_INVALID_DOORBELL_ID) {
|
||||
guc_disable_doorbell(guc, client);
|
||||
host2guc_release_doorbell(guc, client);
|
||||
release_doorbell(guc, client->doorbell_id);
|
||||
}
|
||||
|
||||
kunmap(kmap_to_page(client->client_base));
|
||||
}
|
||||
|
||||
gem_release_guc_obj(client->client_obj);
|
||||
|
||||
if (client->ctx_index != GUC_INVALID_CTX_ID) {
|
||||
@@ -696,7 +674,7 @@ static void guc_client_free(struct drm_device *dev,
|
||||
* @ctx: the context that owns the client (we use the default render
|
||||
* context)
|
||||
*
|
||||
* Return: An i915_guc_client object if success.
|
||||
* Return: An i915_guc_client object if success, else NULL.
|
||||
*/
|
||||
static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
|
||||
uint32_t priority,
|
||||
@@ -728,7 +706,9 @@ static struct i915_guc_client *guc_client_alloc(struct drm_device *dev,
|
||||
if (!obj)
|
||||
goto err;
|
||||
|
||||
/* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
|
||||
client->client_obj = obj;
|
||||
client->client_base = kmap(i915_gem_object_get_page(obj, 0));
|
||||
client->wq_offset = GUC_DB_SIZE;
|
||||
client->wq_size = GUC_WQ_SIZE;
|
||||
|
||||
|
||||
+298
-289
File diff suppressed because it is too large
Load Diff
@@ -79,6 +79,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
|
||||
/* PCI config space */
|
||||
|
||||
#define MCHBAR_I915 0x44
|
||||
#define MCHBAR_I965 0x48
|
||||
#define MCHBAR_SIZE (4 * 4096)
|
||||
|
||||
#define DEVEN 0x54
|
||||
#define DEVEN_MCHBAR_EN (1 << 28)
|
||||
|
||||
#define BSM 0x5c
|
||||
#define BSM_MASK (0xFFFF << 20)
|
||||
|
||||
#define HPLLCC 0xc0 /* 85x only */
|
||||
#define GC_CLOCK_CONTROL_MASK (0x7 << 0)
|
||||
#define GC_CLOCK_133_200 (0 << 0)
|
||||
@@ -90,6 +100,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
#define GC_CLOCK_166_266 (6 << 0)
|
||||
#define GC_CLOCK_166_250 (7 << 0)
|
||||
|
||||
#define I915_GDRST 0xc0 /* PCI config register */
|
||||
#define GRDOM_FULL (0 << 2)
|
||||
#define GRDOM_RENDER (1 << 2)
|
||||
#define GRDOM_MEDIA (3 << 2)
|
||||
#define GRDOM_MASK (3 << 2)
|
||||
#define GRDOM_RESET_STATUS (1 << 1)
|
||||
#define GRDOM_RESET_ENABLE (1 << 0)
|
||||
|
||||
#define GCDGMBUS 0xcc
|
||||
|
||||
#define GCFGC2 0xda
|
||||
#define GCFGC 0xf0 /* 915+ only */
|
||||
#define GC_LOW_FREQUENCY_ENABLE (1 << 7)
|
||||
@@ -121,18 +141,16 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
||||
#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0)
|
||||
#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0)
|
||||
#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0)
|
||||
#define GCDGMBUS 0xcc
|
||||
#define PCI_LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
|
||||
|
||||
#define ASLE 0xe4
|
||||
#define ASLS 0xfc
|
||||
|
||||
#define SWSCI 0xe8
|
||||
#define SWSCI_SCISEL (1 << 15)
|
||||
#define SWSCI_GSSCIE (1 << 0)
|
||||
|
||||
#define LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */
|
||||
|
||||
/* Graphics reset regs */
|
||||
#define I915_GDRST 0xc0 /* PCI config register */
|
||||
#define GRDOM_FULL (0<<2)
|
||||
#define GRDOM_RENDER (1<<2)
|
||||
#define GRDOM_MEDIA (3<<2)
|
||||
#define GRDOM_MASK (3<<2)
|
||||
#define GRDOM_RESET_STATUS (1<<1)
|
||||
#define GRDOM_RESET_ENABLE (1<<0)
|
||||
|
||||
#define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4)
|
||||
#define ILK_GRDOM_FULL (0<<1)
|
||||
@@ -1375,14 +1393,10 @@ enum skl_disp_power_wells {
|
||||
|
||||
#define _PORT_REF_DW6_A 0x162198
|
||||
#define _PORT_REF_DW6_BC 0x6C198
|
||||
/*
|
||||
* FIXME: BSpec/CHV ConfigDB disagrees on the following two fields, fix them
|
||||
* after testing.
|
||||
*/
|
||||
#define GRC_CODE_SHIFT 23
|
||||
#define GRC_CODE_MASK (0x1FF << GRC_CODE_SHIFT)
|
||||
#define GRC_CODE_SHIFT 24
|
||||
#define GRC_CODE_MASK (0xFF << GRC_CODE_SHIFT)
|
||||
#define GRC_CODE_FAST_SHIFT 16
|
||||
#define GRC_CODE_FAST_MASK (0x7F << GRC_CODE_FAST_SHIFT)
|
||||
#define GRC_CODE_FAST_MASK (0xFF << GRC_CODE_FAST_SHIFT)
|
||||
#define GRC_CODE_SLOW_SHIFT 8
|
||||
#define GRC_CODE_SLOW_MASK (0xFF << GRC_CODE_SLOW_SHIFT)
|
||||
#define GRC_CODE_NOM_MASK 0xFF
|
||||
@@ -2934,7 +2948,14 @@ enum skl_disp_power_wells {
|
||||
#define GEN6_RP_STATE_CAP _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x5998)
|
||||
#define BXT_RP_STATE_CAP _MMIO(0x138170)
|
||||
|
||||
#define INTERVAL_1_28_US(us) (((us) * 100) >> 7)
|
||||
/*
|
||||
* Make these a multiple of magic 25 to avoid SNB (eg. Dell XPS
|
||||
* 8300) freezing up around GPU hangs. Looks as if even
|
||||
* scheduling/timer interrupts start misbehaving if the RPS
|
||||
* EI/thresholds are "bad", leading to a very sluggish or even
|
||||
* frozen machine.
|
||||
*/
|
||||
#define INTERVAL_1_28_US(us) roundup(((us) * 100) >> 7, 25)
|
||||
#define INTERVAL_1_33_US(us) (((us) * 3) >> 2)
|
||||
#define INTERVAL_0_833_US(us) (((us) * 6) / 5)
|
||||
#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \
|
||||
@@ -2943,6 +2964,15 @@ enum skl_disp_power_wells {
|
||||
INTERVAL_1_33_US(us)) : \
|
||||
INTERVAL_1_28_US(us))
|
||||
|
||||
#define INTERVAL_1_28_TO_US(interval) (((interval) << 7) / 100)
|
||||
#define INTERVAL_1_33_TO_US(interval) (((interval) << 2) / 3)
|
||||
#define INTERVAL_0_833_TO_US(interval) (((interval) * 5) / 6)
|
||||
#define GT_PM_INTERVAL_TO_US(dev_priv, interval) (IS_GEN9(dev_priv) ? \
|
||||
(IS_BROXTON(dev_priv) ? \
|
||||
INTERVAL_0_833_TO_US(interval) : \
|
||||
INTERVAL_1_33_TO_US(interval)) : \
|
||||
INTERVAL_1_28_TO_US(interval))
|
||||
|
||||
/*
|
||||
* Logical Context regs
|
||||
*/
|
||||
@@ -6866,6 +6896,8 @@ enum skl_disp_power_wells {
|
||||
#define VLV_SPAREG2H _MMIO(0xA194)
|
||||
|
||||
#define GTFIFODBG _MMIO(0x120000)
|
||||
#define GT_FIFO_SBDEDICATE_FREE_ENTRY_CHV (0x1f << 20)
|
||||
#define GT_FIFO_FREE_ENTRIES_CHV (0x7f << 13)
|
||||
#define GT_FIFO_SBDROPERR (1<<6)
|
||||
#define GT_FIFO_BLOBDROPERR (1<<5)
|
||||
#define GT_FIFO_SB_READ_ABORTERR (1<<4)
|
||||
@@ -6882,8 +6914,11 @@ enum skl_disp_power_wells {
|
||||
|
||||
#define HSW_IDICR _MMIO(0x9008)
|
||||
#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
|
||||
#define HSW_EDRAM_PRESENT _MMIO(0x120010)
|
||||
#define HSW_EDRAM_CAP _MMIO(0x120010)
|
||||
#define EDRAM_ENABLED 0x1
|
||||
#define EDRAM_NUM_BANKS(cap) (((cap) >> 1) & 0xf)
|
||||
#define EDRAM_WAYS_IDX(cap) (((cap) >> 5) & 0x7)
|
||||
#define EDRAM_SETS_IDX(cap) (((cap) >> 8) & 0x3)
|
||||
|
||||
#define GEN6_UCGCTL1 _MMIO(0x9400)
|
||||
# define GEN6_EU_TCUNIT_CLOCK_GATE_DISABLE (1 << 16)
|
||||
@@ -7161,6 +7196,7 @@ enum skl_disp_power_wells {
|
||||
|
||||
#define GEN9_HALF_SLICE_CHICKEN7 _MMIO(0xe194)
|
||||
#define GEN9_ENABLE_YV12_BUGFIX (1<<4)
|
||||
#define GEN9_ENABLE_GPGPU_PREEMPTION (1<<2)
|
||||
|
||||
/* Audio */
|
||||
#define G4X_AUD_VID_DID _MMIO(dev_priv->info.display_mmio_offset + 0x62020)
|
||||
|
||||
@@ -58,8 +58,6 @@
|
||||
#define SLAVE_ADDR1 0x70
|
||||
#define SLAVE_ADDR2 0x72
|
||||
|
||||
static int panel_type;
|
||||
|
||||
/* Get BDB block size given a pointer to Block ID. */
|
||||
static u32 _get_blocksize(const u8 *block_base)
|
||||
{
|
||||
@@ -205,17 +203,32 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
|
||||
const struct lvds_dvo_timing *panel_dvo_timing;
|
||||
const struct lvds_fp_timing *fp_timing;
|
||||
struct drm_display_mode *panel_fixed_mode;
|
||||
int panel_type;
|
||||
int drrs_mode;
|
||||
int ret;
|
||||
|
||||
lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
|
||||
if (!lvds_options)
|
||||
return;
|
||||
|
||||
dev_priv->vbt.lvds_dither = lvds_options->pixel_dither;
|
||||
if (lvds_options->panel_type == 0xff)
|
||||
return;
|
||||
|
||||
panel_type = lvds_options->panel_type;
|
||||
ret = intel_opregion_get_panel_type(dev_priv->dev);
|
||||
if (ret >= 0) {
|
||||
WARN_ON(ret > 0xf);
|
||||
panel_type = ret;
|
||||
DRM_DEBUG_KMS("Panel type: %d (OpRegion)\n", panel_type);
|
||||
} else {
|
||||
if (lvds_options->panel_type > 0xf) {
|
||||
DRM_DEBUG_KMS("Invalid VBT panel type 0x%x\n",
|
||||
lvds_options->panel_type);
|
||||
return;
|
||||
}
|
||||
panel_type = lvds_options->panel_type;
|
||||
DRM_DEBUG_KMS("Panel type: %d (VBT)\n", panel_type);
|
||||
}
|
||||
|
||||
dev_priv->vbt.panel_type = panel_type;
|
||||
|
||||
drrs_mode = (lvds_options->dps_panel_type_bits
|
||||
>> (panel_type * 2)) & MODE_MASK;
|
||||
@@ -251,7 +264,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
|
||||
|
||||
panel_dvo_timing = get_lvds_dvo_timing(lvds_lfp_data,
|
||||
lvds_lfp_data_ptrs,
|
||||
lvds_options->panel_type);
|
||||
panel_type);
|
||||
|
||||
panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
|
||||
if (!panel_fixed_mode)
|
||||
@@ -266,7 +279,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
|
||||
|
||||
fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
|
||||
lvds_lfp_data_ptrs,
|
||||
lvds_options->panel_type);
|
||||
panel_type);
|
||||
if (fp_timing) {
|
||||
/* check the resolution, just to be sure */
|
||||
if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
|
||||
@@ -284,6 +297,7 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
|
||||
{
|
||||
const struct bdb_lfp_backlight_data *backlight_data;
|
||||
const struct bdb_lfp_backlight_data_entry *entry;
|
||||
int panel_type = dev_priv->vbt.panel_type;
|
||||
|
||||
backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
|
||||
if (!backlight_data)
|
||||
@@ -546,6 +560,7 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
|
||||
const struct bdb_edp *edp;
|
||||
const struct edp_power_seq *edp_pps;
|
||||
const struct edp_link_params *edp_link_params;
|
||||
int panel_type = dev_priv->vbt.panel_type;
|
||||
|
||||
edp = find_section(bdb, BDB_EDP);
|
||||
if (!edp) {
|
||||
@@ -657,6 +672,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
|
||||
{
|
||||
const struct bdb_psr *psr;
|
||||
const struct psr_table *psr_table;
|
||||
int panel_type = dev_priv->vbt.panel_type;
|
||||
|
||||
psr = find_section(bdb, BDB_PSR);
|
||||
if (!psr) {
|
||||
@@ -703,6 +719,7 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
|
||||
const struct bdb_mipi_config *start;
|
||||
const struct mipi_config *config;
|
||||
const struct mipi_pps_data *pps;
|
||||
int panel_type = dev_priv->vbt.panel_type;
|
||||
|
||||
/* parse MIPI blocks only if LFP type is MIPI */
|
||||
if (!intel_bios_is_dsi_present(dev_priv, NULL))
|
||||
@@ -910,6 +927,7 @@ static void
|
||||
parse_mipi_sequence(struct drm_i915_private *dev_priv,
|
||||
const struct bdb_header *bdb)
|
||||
{
|
||||
int panel_type = dev_priv->vbt.panel_type;
|
||||
const struct bdb_mipi_sequence *sequence;
|
||||
const u8 *seq_data;
|
||||
u32 seq_size;
|
||||
|
||||
@@ -50,6 +50,7 @@ MODULE_FIRMWARE(I915_CSR_SKL);
|
||||
MODULE_FIRMWARE(I915_CSR_BXT);
|
||||
|
||||
#define SKL_CSR_VERSION_REQUIRED CSR_VERSION(1, 23)
|
||||
#define BXT_CSR_VERSION_REQUIRED CSR_VERSION(1, 7)
|
||||
|
||||
#define CSR_MAX_FW_SIZE 0x2FFF
|
||||
#define CSR_DEFAULT_FW_OFFSET 0xFFFFFFFF
|
||||
@@ -281,6 +282,7 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
||||
uint32_t dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
|
||||
uint32_t i;
|
||||
uint32_t *dmc_payload;
|
||||
uint32_t required_min_version;
|
||||
|
||||
if (!fw)
|
||||
return NULL;
|
||||
@@ -296,15 +298,23 @@ static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
|
||||
|
||||
csr->version = css_header->version;
|
||||
|
||||
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
|
||||
csr->version < SKL_CSR_VERSION_REQUIRED) {
|
||||
DRM_INFO("Refusing to load old Skylake DMC firmware v%u.%u,"
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
|
||||
required_min_version = SKL_CSR_VERSION_REQUIRED;
|
||||
} else if (IS_BROXTON(dev_priv)) {
|
||||
required_min_version = BXT_CSR_VERSION_REQUIRED;
|
||||
} else {
|
||||
MISSING_CASE(INTEL_REVID(dev_priv));
|
||||
required_min_version = 0;
|
||||
}
|
||||
|
||||
if (csr->version < required_min_version) {
|
||||
DRM_INFO("Refusing to load old DMC firmware v%u.%u,"
|
||||
" please upgrade to v%u.%u or later"
|
||||
" [" FIRMWARE_URL "].\n",
|
||||
CSR_VERSION_MAJOR(csr->version),
|
||||
CSR_VERSION_MINOR(csr->version),
|
||||
CSR_VERSION_MAJOR(SKL_CSR_VERSION_REQUIRED),
|
||||
CSR_VERSION_MINOR(SKL_CSR_VERSION_REQUIRED));
|
||||
CSR_VERSION_MAJOR(required_min_version),
|
||||
CSR_VERSION_MINOR(required_min_version));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -456,11 +466,51 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
|
||||
schedule_work(&dev_priv->csr.work);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_csr_ucode_suspend() - prepare CSR firmware before system suspend
|
||||
* @dev_priv: i915 drm device
|
||||
*
|
||||
* Prepare the DMC firmware before entering system suspend. This includes
|
||||
* flushing pending work items and releasing any resources acquired during
|
||||
* init.
|
||||
*/
|
||||
void intel_csr_ucode_suspend(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!HAS_CSR(dev_priv))
|
||||
return;
|
||||
|
||||
flush_work(&dev_priv->csr.work);
|
||||
|
||||
/* Drop the reference held in case DMC isn't loaded. */
|
||||
if (!dev_priv->csr.dmc_payload)
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_csr_ucode_resume() - init CSR firmware during system resume
|
||||
* @dev_priv: i915 drm device
|
||||
*
|
||||
* Reinitialize the DMC firmware during system resume, reacquiring any
|
||||
* resources released in intel_csr_ucode_suspend().
|
||||
*/
|
||||
void intel_csr_ucode_resume(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!HAS_CSR(dev_priv))
|
||||
return;
|
||||
|
||||
/*
|
||||
* Reacquire the reference to keep RPM disabled in case DMC isn't
|
||||
* loaded.
|
||||
*/
|
||||
if (!dev_priv->csr.dmc_payload)
|
||||
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_csr_ucode_fini() - unload the CSR firmware.
|
||||
* @dev_priv: i915 drm device.
|
||||
*
|
||||
* Firmmware unloading includes freeing the internal momory and reset the
|
||||
* Firmmware unloading includes freeing the internal memory and reset the
|
||||
* firmware loading status.
|
||||
*/
|
||||
void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
|
||||
@@ -468,7 +518,7 @@ void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
|
||||
if (!HAS_CSR(dev_priv))
|
||||
return;
|
||||
|
||||
flush_work(&dev_priv->csr.work);
|
||||
intel_csr_ucode_suspend(dev_priv);
|
||||
|
||||
kfree(dev_priv->csr.dmc_payload);
|
||||
}
|
||||
|
||||
@@ -443,9 +443,17 @@ void intel_prepare_ddi_buffer(struct intel_encoder *encoder)
|
||||
} else if (IS_BROADWELL(dev_priv)) {
|
||||
ddi_translations_fdi = bdw_ddi_translations_fdi;
|
||||
ddi_translations_dp = bdw_ddi_translations_dp;
|
||||
ddi_translations_edp = bdw_ddi_translations_edp;
|
||||
|
||||
if (dev_priv->vbt.edp.low_vswing) {
|
||||
ddi_translations_edp = bdw_ddi_translations_edp;
|
||||
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
|
||||
} else {
|
||||
ddi_translations_edp = bdw_ddi_translations_dp;
|
||||
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
|
||||
}
|
||||
|
||||
ddi_translations_hdmi = bdw_ddi_translations_hdmi;
|
||||
n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp);
|
||||
|
||||
n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp);
|
||||
n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi);
|
||||
hdmi_default_entry = 7;
|
||||
@@ -1722,12 +1730,78 @@ static void intel_disable_ddi(struct intel_encoder *intel_encoder)
|
||||
}
|
||||
}
|
||||
|
||||
static bool broxton_phy_is_enabled(struct drm_i915_private *dev_priv,
|
||||
enum dpio_phy phy)
|
||||
{
|
||||
if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & GT_DISPLAY_POWER_ON(phy)))
|
||||
return false;
|
||||
|
||||
if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
|
||||
(PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
|
||||
DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
|
||||
phy);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
if (phy == DPIO_PHY1 &&
|
||||
!(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE)) {
|
||||
DRM_DEBUG_DRIVER("DDI PHY 1 powered, but GRC isn't done\n");
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
|
||||
DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
|
||||
phy);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static u32 broxton_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
|
||||
{
|
||||
u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
|
||||
|
||||
return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
|
||||
}
|
||||
|
||||
static void broxton_phy_wait_grc_done(struct drm_i915_private *dev_priv,
|
||||
enum dpio_phy phy)
|
||||
{
|
||||
if (wait_for(I915_READ(BXT_PORT_REF_DW3(phy)) & GRC_DONE, 10))
|
||||
DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
|
||||
}
|
||||
|
||||
static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
|
||||
enum dpio_phy phy);
|
||||
|
||||
static void broxton_phy_init(struct drm_i915_private *dev_priv,
|
||||
enum dpio_phy phy)
|
||||
{
|
||||
enum port port;
|
||||
u32 ports, val;
|
||||
|
||||
if (broxton_phy_is_enabled(dev_priv, phy)) {
|
||||
/* Still read out the GRC value for state verification */
|
||||
if (phy == DPIO_PHY0)
|
||||
dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv, phy);
|
||||
|
||||
if (broxton_phy_verify_state(dev_priv, phy)) {
|
||||
DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
|
||||
"won't reprogram it\n", phy);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
|
||||
"force reprogramming it\n", phy);
|
||||
} else {
|
||||
DRM_DEBUG_DRIVER("DDI PHY %d not enabled, enabling it\n", phy);
|
||||
}
|
||||
|
||||
val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
|
||||
val |= GT_DISPLAY_POWER_ON(phy);
|
||||
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
|
||||
@@ -1798,6 +1872,9 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
|
||||
* enabled.
|
||||
* TODO: port C is only connected on BXT-P, so on BXT0/1 we should
|
||||
* power down the second channel on PHY0 as well.
|
||||
*
|
||||
* FIXME: Clarify programming of the following, the register is
|
||||
* read-only with bit 6 fixed at 0 at least in stepping A.
|
||||
*/
|
||||
if (phy == DPIO_PHY1)
|
||||
val |= OCL2_LDOFUSE_PWR_DIS;
|
||||
@@ -1810,12 +1887,10 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
|
||||
* the corresponding calibrated value from PHY1, and disable
|
||||
* the automatic calibration on PHY0.
|
||||
*/
|
||||
if (wait_for(I915_READ(BXT_PORT_REF_DW3(DPIO_PHY1)) & GRC_DONE,
|
||||
10))
|
||||
DRM_ERROR("timeout waiting for PHY1 GRC\n");
|
||||
broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1);
|
||||
|
||||
val = I915_READ(BXT_PORT_REF_DW6(DPIO_PHY1));
|
||||
val = (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
|
||||
val = dev_priv->bxt_phy_grc = broxton_get_grc(dev_priv,
|
||||
DPIO_PHY1);
|
||||
grc_code = val << GRC_CODE_FAST_SHIFT |
|
||||
val << GRC_CODE_SLOW_SHIFT |
|
||||
val;
|
||||
@@ -1825,17 +1900,27 @@ static void broxton_phy_init(struct drm_i915_private *dev_priv,
|
||||
val |= GRC_DIS | GRC_RDY_OVRD;
|
||||
I915_WRITE(BXT_PORT_REF_DW8(DPIO_PHY0), val);
|
||||
}
|
||||
/*
|
||||
* During PHY1 init delay waiting for GRC calibration to finish, since
|
||||
* it can happen in parallel with the subsequent PHY0 init.
|
||||
*/
|
||||
|
||||
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
|
||||
val |= COMMON_RESET_DIS;
|
||||
I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
|
||||
}
|
||||
|
||||
void broxton_ddi_phy_init(struct drm_device *dev)
|
||||
void broxton_ddi_phy_init(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
/* Enable PHY1 first since it provides Rcomp for PHY0 */
|
||||
broxton_phy_init(dev->dev_private, DPIO_PHY1);
|
||||
broxton_phy_init(dev->dev_private, DPIO_PHY0);
|
||||
broxton_phy_init(dev_priv, DPIO_PHY1);
|
||||
broxton_phy_init(dev_priv, DPIO_PHY0);
|
||||
|
||||
/*
|
||||
* If BIOS enabled only PHY0 and not PHY1, we skipped waiting for the
|
||||
* PHY1 GRC calibration to finish, so wait for it here.
|
||||
*/
|
||||
broxton_phy_wait_grc_done(dev_priv, DPIO_PHY1);
|
||||
}
|
||||
|
||||
static void broxton_phy_uninit(struct drm_i915_private *dev_priv,
|
||||
@@ -1846,17 +1931,126 @@ static void broxton_phy_uninit(struct drm_i915_private *dev_priv,
|
||||
val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
|
||||
val &= ~COMMON_RESET_DIS;
|
||||
I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
|
||||
|
||||
val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
|
||||
val &= ~GT_DISPLAY_POWER_ON(phy);
|
||||
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
|
||||
}
|
||||
|
||||
void broxton_ddi_phy_uninit(struct drm_device *dev)
|
||||
void broxton_ddi_phy_uninit(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = dev->dev_private;
|
||||
|
||||
broxton_phy_uninit(dev_priv, DPIO_PHY1);
|
||||
broxton_phy_uninit(dev_priv, DPIO_PHY0);
|
||||
}
|
||||
|
||||
/* FIXME: do this in broxton_phy_uninit per phy */
|
||||
I915_WRITE(BXT_P_CR_GT_DISP_PWRON, 0);
|
||||
static bool __printf(6, 7)
|
||||
__phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
|
||||
i915_reg_t reg, u32 mask, u32 expected,
|
||||
const char *reg_fmt, ...)
|
||||
{
|
||||
struct va_format vaf;
|
||||
va_list args;
|
||||
u32 val;
|
||||
|
||||
val = I915_READ(reg);
|
||||
if ((val & mask) == expected)
|
||||
return true;
|
||||
|
||||
va_start(args, reg_fmt);
|
||||
vaf.fmt = reg_fmt;
|
||||
vaf.va = &args;
|
||||
|
||||
DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
|
||||
"current %08x, expected %08x (mask %08x)\n",
|
||||
phy, &vaf, reg.reg, val, (val & ~mask) | expected,
|
||||
mask);
|
||||
|
||||
va_end(args);
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool broxton_phy_verify_state(struct drm_i915_private *dev_priv,
|
||||
enum dpio_phy phy)
|
||||
{
|
||||
enum port port;
|
||||
u32 ports;
|
||||
uint32_t mask;
|
||||
bool ok;
|
||||
|
||||
#define _CHK(reg, mask, exp, fmt, ...) \
|
||||
__phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
|
||||
## __VA_ARGS__)
|
||||
|
||||
/* We expect the PHY to be always enabled */
|
||||
if (!broxton_phy_is_enabled(dev_priv, phy))
|
||||
return false;
|
||||
|
||||
ok = true;
|
||||
|
||||
if (phy == DPIO_PHY0)
|
||||
ports = BIT(PORT_B) | BIT(PORT_C);
|
||||
else
|
||||
ports = BIT(PORT_A);
|
||||
|
||||
for_each_port_masked(port, ports) {
|
||||
int lane;
|
||||
|
||||
for (lane = 0; lane < 4; lane++)
|
||||
ok &= _CHK(BXT_PORT_TX_DW14_LN(port, lane),
|
||||
LATENCY_OPTIM,
|
||||
lane != 1 ? LATENCY_OPTIM : 0,
|
||||
"BXT_PORT_TX_DW14_LN(%d, %d)", port, lane);
|
||||
}
|
||||
|
||||
/* PLL Rcomp code offset */
|
||||
ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
|
||||
IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
|
||||
"BXT_PORT_CL1CM_DW9(%d)", phy);
|
||||
ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
|
||||
IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
|
||||
"BXT_PORT_CL1CM_DW10(%d)", phy);
|
||||
|
||||
/* Power gating */
|
||||
mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
|
||||
ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
|
||||
"BXT_PORT_CL1CM_DW28(%d)", phy);
|
||||
|
||||
if (phy == DPIO_PHY0)
|
||||
ok &= _CHK(BXT_PORT_CL2CM_DW6_BC,
|
||||
DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
|
||||
"BXT_PORT_CL2CM_DW6_BC");
|
||||
|
||||
/*
|
||||
* TODO: Verify BXT_PORT_CL1CM_DW30 bit OCL2_LDOFUSE_PWR_DIS,
|
||||
* at least on stepping A this bit is read-only and fixed at 0.
|
||||
*/
|
||||
|
||||
if (phy == DPIO_PHY0) {
|
||||
u32 grc_code = dev_priv->bxt_phy_grc;
|
||||
|
||||
grc_code = grc_code << GRC_CODE_FAST_SHIFT |
|
||||
grc_code << GRC_CODE_SLOW_SHIFT |
|
||||
grc_code;
|
||||
mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
|
||||
GRC_CODE_NOM_MASK;
|
||||
ok &= _CHK(BXT_PORT_REF_DW6(DPIO_PHY0), mask, grc_code,
|
||||
"BXT_PORT_REF_DW6(%d)", DPIO_PHY0);
|
||||
|
||||
mask = GRC_DIS | GRC_RDY_OVRD;
|
||||
ok &= _CHK(BXT_PORT_REF_DW8(DPIO_PHY0), mask, mask,
|
||||
"BXT_PORT_REF_DW8(%d)", DPIO_PHY0);
|
||||
}
|
||||
|
||||
return ok;
|
||||
#undef _CHK
|
||||
}
|
||||
|
||||
void broxton_ddi_phy_verify_state(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (!broxton_phy_verify_state(dev_priv, DPIO_PHY0) ||
|
||||
!broxton_phy_verify_state(dev_priv, DPIO_PHY1))
|
||||
i915_report_error(dev_priv, "DDI PHY state mismatch\n");
|
||||
}
|
||||
|
||||
void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
|
||||
@@ -2044,12 +2238,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
|
||||
intel_ddi_clock_get(encoder, pipe_config);
|
||||
}
|
||||
|
||||
static void intel_ddi_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
/* HDMI has nothing special to destroy, so we can go with this. */
|
||||
intel_dp_encoder_destroy(encoder);
|
||||
}
|
||||
|
||||
static bool intel_ddi_compute_config(struct intel_encoder *encoder,
|
||||
struct intel_crtc_state *pipe_config)
|
||||
{
|
||||
@@ -2068,7 +2256,8 @@ static bool intel_ddi_compute_config(struct intel_encoder *encoder,
|
||||
}
|
||||
|
||||
static const struct drm_encoder_funcs intel_ddi_funcs = {
|
||||
.destroy = intel_ddi_destroy,
|
||||
.reset = intel_dp_encoder_reset,
|
||||
.destroy = intel_dp_encoder_destroy,
|
||||
};
|
||||
|
||||
static struct intel_connector *
|
||||
@@ -2167,6 +2356,7 @@ void intel_ddi_init(struct drm_device *dev, enum port port)
|
||||
intel_encoder->post_disable = intel_ddi_post_disable;
|
||||
intel_encoder->get_hw_state = intel_ddi_get_hw_state;
|
||||
intel_encoder->get_config = intel_ddi_get_config;
|
||||
intel_encoder->suspend = intel_dp_encoder_suspend;
|
||||
|
||||
intel_dig_port->port = port;
|
||||
intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user