You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
MALI: rockchip: upgrade bifrost DDK to g25p0-00eac0, from g24p0-00eac0
mali_csffw.bin from Valhall DDK g25(r50) is included. Change-Id: Ic454428c384456a14b29d9651f537eb59c11284d Signed-off-by: Zhen Chen <chenzhen@rock-chips.com>
This commit is contained in:
@@ -0,0 +1,163 @@
|
||||
# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
#
|
||||
# (C) COPYRIGHT 2022-2024 ARM Limited. All rights reserved.
|
||||
#
|
||||
# This program is free software and is provided to you under the terms of the
|
||||
# GNU General Public License version 2 as published by the Free Software
|
||||
# Foundation, and any use by you of this program is subject to the terms
|
||||
# of such GNU license.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program; if not, you can access it online at
|
||||
# http://www.gnu.org/licenses/gpl-2.0.html.
|
||||
#
|
||||
#
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/arm/arm,coresight-mali-source.yaml#
|
||||
$schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
|
||||
title: ARM CoreSight Mali Source integration
|
||||
|
||||
maintainers:
|
||||
- ARM Ltd.
|
||||
|
||||
description: |
|
||||
See Documentation/trace/coresight/coresight.rst for detailed information
|
||||
about Coresight.
|
||||
|
||||
This documentation will cover Mali specific devicetree integration.
|
||||
|
||||
References to Sink ports are given as examples. Access to Sink is specific
|
||||
to an implementation and would require dedicated kernel modules.
|
||||
|
||||
Arm Mali GPU are supporting 3 different sources: ITM, ETM, ELA
|
||||
|
||||
ELA source configuration via SysFS entries:
|
||||
|
||||
The register values used by CoreSight for ELA can be configured using SysFS
|
||||
interfaces. This implicitly includes configuring the ELA for independent or
|
||||
shared JCN request and response channels.
|
||||
|
||||
properties:
|
||||
compatible:
|
||||
enum:
|
||||
- arm,coresight-mali-source-itm
|
||||
- arm,coresight-mali-source-etm
|
||||
- arm,coresight-mali-source-ela
|
||||
|
||||
gpu:
|
||||
minItems: 1
|
||||
maxItems: 1
|
||||
description:
|
||||
Phandle to a Mali GPU definition
|
||||
|
||||
port:
|
||||
description:
|
||||
Output connection to CoreSight Sink Trace bus.
|
||||
|
||||
Legacy binding between Coresight Sources and CoreSight Sink.
|
||||
For Linux kernel < v4.20.
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
|
||||
out-ports:
|
||||
description:
|
||||
Binding between Coresight Sources and CoreSight Sink.
|
||||
For Linux kernel >= v4.20.
|
||||
$ref: /schemas/graph.yaml#/properties/ports
|
||||
|
||||
properties:
|
||||
port:
|
||||
description: Output connection to CoreSight Sink Trace bus.
|
||||
$ref: /schemas/graph.yaml#/properties/port
|
||||
|
||||
required:
|
||||
- compatible
|
||||
- gpu
|
||||
- port
|
||||
- out-ports
|
||||
|
||||
additionalProperties: false
|
||||
|
||||
examples:
|
||||
|
||||
# A Sink node without legacy CoreSight connections
|
||||
- |
|
||||
mali-source-itm {
|
||||
compatible = "arm,coresight-mali-source-itm";
|
||||
gpu = <&gpu>;
|
||||
|
||||
out-ports {
|
||||
port {
|
||||
mali_source_itm_out_port0: endpoint {
|
||||
remote-endpoint = <&mali_sink_in_port0>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
mali-source-ela {
|
||||
compatible = "arm,coresight-mali-source-ela";
|
||||
gpu = <&gpu>;
|
||||
|
||||
out-ports {
|
||||
port {
|
||||
mali_source_ela_out_port0: endpoint {
|
||||
remote-endpoint = <&mali_sink_in_port1>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
mali-source-etm {
|
||||
compatible = "arm,coresight-mali-source-etm";
|
||||
gpu = <&gpu>;
|
||||
|
||||
out-ports {
|
||||
port {
|
||||
mali_source_etm_out_port0: endpoint {
|
||||
remote-endpoint = <&mali_sink_in_port2>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
# A Sink node with legacy CoreSight connections
|
||||
- |
|
||||
mali-source-itm {
|
||||
compatible = "arm,coresight-mali-source-itm";
|
||||
gpu = <&gpu>;
|
||||
|
||||
port {
|
||||
mali_source_itm_out_port0: endpoint {
|
||||
remote-endpoint = <&mali_sink_in_port0>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
mali-source-etm {
|
||||
compatible = "arm,coresight-mali-source-etm";
|
||||
gpu = <&gpu>;
|
||||
|
||||
port {
|
||||
mali_source_etm_out_port0: endpoint {
|
||||
remote-endpoint = <&mali_sink_in_port1>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
mali-source-ela {
|
||||
compatible = "arm,coresight-mali-source-ela";
|
||||
gpu = <&gpu>;
|
||||
|
||||
port {
|
||||
mali_source_ela_out_port0: endpoint {
|
||||
remote-endpoint = <&mali_sink_in_port2>;
|
||||
};
|
||||
};
|
||||
};
|
||||
@@ -1,6 +1,6 @@
|
||||
# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
#
|
||||
# (C) COPYRIGHT 2013-2023 ARM Limited. All rights reserved.
|
||||
# (C) COPYRIGHT 2013-2024 ARM Limited. All rights reserved.
|
||||
#
|
||||
# This program is free software and is provided to you under the terms of the
|
||||
# GNU General Public License version 2 as published by the Free Software
|
||||
@@ -132,6 +132,10 @@ for details.
|
||||
set and the setting coresponding to the SYSC_ALLOC register.
|
||||
- propagate-bits: Used to write to L2_CONFIG.PBHA_HWU. This bitset establishes which
|
||||
PBHA bits are propagated on the AXI bus.
|
||||
- mma-wa-id: Sets the PBHA ID to be used for the PBHA override based MMA violation workaround.
|
||||
The read and write allocation override bits for the PBHA are set to NONCACHEABLE
|
||||
and the driver encodes the PBHA ID in the PTEs where this workaround is to be applied.
|
||||
Valid values are from 1 to 15.
|
||||
|
||||
|
||||
Example for a Mali GPU with 1 clock and 1 regulator:
|
||||
@@ -241,6 +245,7 @@ gpu@0xfc010000 {
|
||||
pbha {
|
||||
int-id-override = <2 0x32>, <9 0x05>, <16 0x32>;
|
||||
propagate-bits = /bits/ 8 <0x03>;
|
||||
mma-wa-id = <2>;
|
||||
};
|
||||
...
|
||||
};
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
#
|
||||
# (C) COPYRIGHT 2021-2023 ARM Limited. All rights reserved.
|
||||
# (C) COPYRIGHT 2021-2024 ARM Limited. All rights reserved.
|
||||
#
|
||||
# This program is free software and is provided to you under the terms of the
|
||||
# GNU General Public License version 2 as published by the Free Software
|
||||
|
||||
@@ -299,7 +299,8 @@ static int example_mgm_get_import_memory_id(struct memory_group_manager_device *
|
||||
}
|
||||
|
||||
static u64 example_mgm_update_gpu_pte(struct memory_group_manager_device *const mgm_dev,
|
||||
unsigned int const group_id, int const mmu_level, u64 pte)
|
||||
unsigned int const group_id, unsigned int const pbha_id,
|
||||
unsigned int pte_flags, int const mmu_level, u64 pte)
|
||||
{
|
||||
struct mgm_groups *const data = mgm_dev->data;
|
||||
|
||||
@@ -309,7 +310,10 @@ static u64 example_mgm_update_gpu_pte(struct memory_group_manager_device *const
|
||||
if (WARN_ON(group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS))
|
||||
return pte;
|
||||
|
||||
pte |= ((u64)group_id << PTE_PBHA_SHIFT) & PTE_PBHA_MASK;
|
||||
if (pte_flags & BIT(MMA_VIOLATION)) {
|
||||
pr_warn_once("MMA violation! Applying PBHA override workaround to PTE\n");
|
||||
pte |= ((u64)pbha_id << PTE_PBHA_SHIFT) & PTE_PBHA_MASK;
|
||||
}
|
||||
|
||||
/* Address could be translated into a different bus address here */
|
||||
pte |= ((u64)1 << PTE_RES_BIT_MULTI_AS_SHIFT);
|
||||
@@ -362,6 +366,16 @@ static vm_fault_t example_mgm_vmf_insert_pfn_prot(struct memory_group_manager_de
|
||||
return fault;
|
||||
}
|
||||
|
||||
static bool example_mgm_get_import_memory_cached_access_permitted(
|
||||
struct memory_group_manager_device *mgm_dev,
|
||||
struct memory_group_manager_import_data *import_data)
|
||||
{
|
||||
CSTD_UNUSED(mgm_dev);
|
||||
CSTD_UNUSED(import_data);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static int mgm_initialize_data(struct mgm_groups *mgm_data)
|
||||
{
|
||||
int i;
|
||||
@@ -408,6 +422,8 @@ static int memory_group_manager_probe(struct platform_device *pdev)
|
||||
mgm_dev->ops.mgm_vmf_insert_pfn_prot = example_mgm_vmf_insert_pfn_prot;
|
||||
mgm_dev->ops.mgm_update_gpu_pte = example_mgm_update_gpu_pte;
|
||||
mgm_dev->ops.mgm_pte_to_original_pte = example_mgm_pte_to_original_pte;
|
||||
mgm_dev->ops.mgm_get_import_memory_cached_access_permitted =
|
||||
example_mgm_get_import_memory_cached_access_permitted;
|
||||
|
||||
mgm_data = kzalloc(sizeof(*mgm_data), GFP_KERNEL);
|
||||
if (!mgm_data) {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
#
|
||||
# (C) COPYRIGHT 2012-2023 ARM Limited. All rights reserved.
|
||||
# (C) COPYRIGHT 2012-2024 ARM Limited. All rights reserved.
|
||||
#
|
||||
# This program is free software and is provided to you under the terms of the
|
||||
# GNU General Public License version 2 as published by the Free Software
|
||||
@@ -69,7 +69,7 @@ endif
|
||||
#
|
||||
|
||||
# Driver version string which is returned to userspace via an ioctl
|
||||
MALI_RELEASE_NAME ?= '"g24p0-00eac0"'
|
||||
MALI_RELEASE_NAME ?= '"g25p0-00eac0"'
|
||||
# Set up defaults if not defined by build system
|
||||
ifeq ($(CONFIG_MALI_BIFROST_DEBUG), y)
|
||||
MALI_UNIT_TEST = 1
|
||||
@@ -104,7 +104,6 @@ endif
|
||||
#
|
||||
# Experimental features must default to disabled, e.g.:
|
||||
# MALI_EXPERIMENTAL_FEATURE ?= 0
|
||||
MALI_INCREMENTAL_RENDERING_JM ?= 0
|
||||
|
||||
#
|
||||
# ccflags
|
||||
@@ -117,7 +116,6 @@ ccflags-y = \
|
||||
-DMALI_COVERAGE=$(MALI_COVERAGE) \
|
||||
-DMALI_RELEASE_NAME=$(MALI_RELEASE_NAME) \
|
||||
-DMALI_JIT_PRESSURE_LIMIT_BASE=$(MALI_JIT_PRESSURE_LIMIT_BASE) \
|
||||
-DMALI_INCREMENTAL_RENDERING_JM=$(MALI_INCREMENTAL_RENDERING_JM) \
|
||||
-DMALI_PLATFORM_DIR=$(MALI_PLATFORM_DIR)
|
||||
|
||||
|
||||
@@ -212,6 +210,7 @@ endif
|
||||
|
||||
|
||||
INCLUDE_SUBDIR = \
|
||||
$(src)/arbiter/Kbuild \
|
||||
$(src)/context/Kbuild \
|
||||
$(src)/debug/Kbuild \
|
||||
$(src)/device/Kbuild \
|
||||
@@ -228,9 +227,6 @@ ifeq ($(CONFIG_MALI_CSF_SUPPORT),y)
|
||||
INCLUDE_SUBDIR += $(src)/csf/Kbuild
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_MALI_ARBITER_SUPPORT),y)
|
||||
INCLUDE_SUBDIR += $(src)/arbiter/Kbuild
|
||||
endif
|
||||
|
||||
ifeq ($(CONFIG_MALI_BIFROST_DEVFREQ),y)
|
||||
ifeq ($(CONFIG_DEVFREQ_THERMAL),y)
|
||||
|
||||
@@ -63,6 +63,8 @@ config MALI_BIFROST_NO_MALI
|
||||
All calls to the simulated hardware will complete immediately as if the hardware
|
||||
completed the task.
|
||||
|
||||
endchoice
|
||||
|
||||
config MALI_NO_MALI_DEFAULT_GPU
|
||||
string "Default GPU for No Mali"
|
||||
depends on MALI_BIFROST_NO_MALI
|
||||
@@ -70,7 +72,12 @@ config MALI_NO_MALI_DEFAULT_GPU
|
||||
help
|
||||
This option sets the default GPU to identify as for No Mali builds.
|
||||
|
||||
endchoice
|
||||
config MALI_IS_FPGA
|
||||
bool "Enable build of Mali kernel driver for FPGA"
|
||||
depends on MALI_BIFROST
|
||||
default n
|
||||
help
|
||||
This is the default HW backend.
|
||||
|
||||
menu "Platform specific options"
|
||||
source "$(MALI_KCONFIG_EXT_PREFIX)drivers/gpu/arm/bifrost/platform/Kconfig"
|
||||
@@ -340,7 +347,7 @@ config MALI_PWRSOFT_765
|
||||
changes have been backported say Y to avoid compilation errors.
|
||||
|
||||
config MALI_HW_ERRATA_1485982_NOT_AFFECTED
|
||||
bool "Disable workaround for BASE_HW_ISSUE_GPU2017_1336"
|
||||
bool "Disable workaround for KBASE_HW_ISSUE_GPU2017_1336"
|
||||
depends on MALI_BIFROST && MALI_BIFROST_EXPERT
|
||||
default n
|
||||
help
|
||||
@@ -352,7 +359,7 @@ config MALI_HW_ERRATA_1485982_NOT_AFFECTED
|
||||
coherency mode requires the L2 to be turned off.
|
||||
|
||||
config MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE
|
||||
bool "Use alternative workaround for BASE_HW_ISSUE_GPU2017_1336"
|
||||
bool "Use alternative workaround for KBASE_HW_ISSUE_GPU2017_1336"
|
||||
depends on MALI_BIFROST && MALI_BIFROST_EXPERT && !MALI_HW_ERRATA_1485982_NOT_AFFECTED
|
||||
default n
|
||||
help
|
||||
|
||||
@@ -156,7 +156,6 @@ ifeq ($(MALI_KCONFIG_EXT_PREFIX),)
|
||||
CONFIG_MALI_BIFROST \
|
||||
CONFIG_MALI_CSF_SUPPORT \
|
||||
CONFIG_MALI_BIFROST_GATOR_SUPPORT \
|
||||
CONFIG_MALI_ARBITER_SUPPORT \
|
||||
CONFIG_MALI_ARBITRATION \
|
||||
CONFIG_MALI_PARTITION_MANAGER \
|
||||
CONFIG_MALI_REAL_HW \
|
||||
@@ -170,6 +169,7 @@ ifeq ($(MALI_KCONFIG_EXT_PREFIX),)
|
||||
CONFIG_MALI_PWRSOFT_765 \
|
||||
CONFIG_MALI_JOB_DUMP \
|
||||
CONFIG_MALI_BIFROST_NO_MALI \
|
||||
CONFIG_MALI_IS_FPGA \
|
||||
CONFIG_MALI_HW_ERRATA_1485982_NOT_AFFECTED \
|
||||
CONFIG_MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE \
|
||||
CONFIG_MALI_PRFCNT_SET_PRIMARY \
|
||||
|
||||
@@ -108,6 +108,7 @@ static void on_gpu_stop(struct device *dev)
|
||||
}
|
||||
|
||||
KBASE_TLSTREAM_TL_ARBITER_STOP_REQUESTED(kbdev, kbdev);
|
||||
KBASE_KTRACE_ADD(kbdev, ARB_GPU_STOP_REQUESTED, NULL, 0);
|
||||
kbase_arbiter_pm_vm_event(kbdev, KBASE_VM_GPU_STOP_EVT);
|
||||
}
|
||||
|
||||
@@ -133,6 +134,7 @@ static void on_gpu_granted(struct device *dev)
|
||||
}
|
||||
|
||||
KBASE_TLSTREAM_TL_ARBITER_GRANTED(kbdev, kbdev);
|
||||
KBASE_KTRACE_ADD(kbdev, ARB_GPU_GRANTED, NULL, 0);
|
||||
kbase_arbiter_pm_vm_event(kbdev, KBASE_VM_GPU_GRANTED_EVT);
|
||||
}
|
||||
|
||||
@@ -156,7 +158,8 @@ static void on_gpu_lost(struct device *dev)
|
||||
dev_err(dev, "%s(): kbdev is NULL", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
KBASE_TLSTREAM_TL_ARBITER_LOST(kbdev, kbdev);
|
||||
KBASE_KTRACE_ADD(kbdev, ARB_GPU_LOST, NULL, 0);
|
||||
kbase_arbiter_pm_vm_event(kbdev, KBASE_VM_GPU_LOST_EVT);
|
||||
}
|
||||
|
||||
@@ -178,7 +181,7 @@ static int kbase_arbif_of_init(struct kbase_device *kbdev)
|
||||
if (!arbiter_if_node)
|
||||
arbiter_if_node = of_parse_phandle(kbdev->dev->of_node, "arbiter_if", 0);
|
||||
if (!arbiter_if_node) {
|
||||
dev_dbg(kbdev->dev, "No arbiter_if in Device Tree\n");
|
||||
dev_dbg(kbdev->dev, "No arbiter_if in Device Tree");
|
||||
/* no arbiter interface defined in device tree */
|
||||
kbdev->arb.arb_dev = NULL;
|
||||
kbdev->arb.arb_if = NULL;
|
||||
@@ -187,19 +190,19 @@ static int kbase_arbif_of_init(struct kbase_device *kbdev)
|
||||
|
||||
pdev = of_find_device_by_node(arbiter_if_node);
|
||||
if (!pdev) {
|
||||
dev_err(kbdev->dev, "Failed to find arbiter_if device\n");
|
||||
dev_err(kbdev->dev, "Failed to find arbiter_if device");
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
|
||||
if (!pdev->dev.driver || !try_module_get(pdev->dev.driver->owner)) {
|
||||
dev_err(kbdev->dev, "arbiter_if driver not available\n");
|
||||
dev_err(kbdev->dev, "arbiter_if driver not available");
|
||||
put_device(&pdev->dev);
|
||||
return -EPROBE_DEFER;
|
||||
}
|
||||
kbdev->arb.arb_dev = &pdev->dev;
|
||||
arb_if = platform_get_drvdata(pdev);
|
||||
if (!arb_if) {
|
||||
dev_err(kbdev->dev, "arbiter_if driver not ready\n");
|
||||
dev_err(kbdev->dev, "arbiter_if driver not ready");
|
||||
module_put(pdev->dev.driver->owner);
|
||||
put_device(&pdev->dev);
|
||||
return -EPROBE_DEFER;
|
||||
@@ -243,6 +246,10 @@ int kbase_arbif_init(struct kbase_device *kbdev)
|
||||
/* Tries to init with 'arbiter-if' if present in devicetree */
|
||||
err = kbase_arbif_of_init(kbdev);
|
||||
|
||||
if (err == -ENODEV) {
|
||||
/* devicetree does not support arbitration */
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
if (err)
|
||||
return err;
|
||||
@@ -260,19 +267,19 @@ int kbase_arbif_init(struct kbase_device *kbdev)
|
||||
arb_if = kbdev->arb.arb_if;
|
||||
|
||||
if (arb_if == NULL) {
|
||||
dev_err(kbdev->dev, "No arbiter interface present\n");
|
||||
dev_err(kbdev->dev, "No arbiter interface present");
|
||||
goto failure_term;
|
||||
}
|
||||
|
||||
if (!arb_if->vm_ops.vm_arb_register_dev) {
|
||||
dev_err(kbdev->dev, "arbiter_if registration callback not present\n");
|
||||
dev_err(kbdev->dev, "arbiter_if registration callback not present");
|
||||
goto failure_term;
|
||||
}
|
||||
|
||||
/* register kbase arbiter_if callbacks */
|
||||
err = arb_if->vm_ops.vm_arb_register_dev(arb_if, kbdev->dev, &ops);
|
||||
if (err) {
|
||||
dev_err(kbdev->dev, "Failed to register with arbiter. (err = %d)\n", err);
|
||||
dev_err(kbdev->dev, "Failed to register with arbiter. (err = %d)", err);
|
||||
goto failure_term;
|
||||
}
|
||||
|
||||
@@ -333,6 +340,7 @@ void kbase_arbif_gpu_request(struct kbase_device *kbdev)
|
||||
|
||||
if (arb_if && arb_if->vm_ops.vm_arb_gpu_request) {
|
||||
KBASE_TLSTREAM_TL_ARBITER_REQUESTED(kbdev, kbdev);
|
||||
KBASE_KTRACE_ADD(kbdev, ARB_GPU_REQUESTED, NULL, 0);
|
||||
arb_if->vm_ops.vm_arb_gpu_request(arb_if);
|
||||
}
|
||||
}
|
||||
@@ -349,8 +357,11 @@ void kbase_arbif_gpu_stopped(struct kbase_device *kbdev, u8 gpu_required)
|
||||
|
||||
if (arb_if && arb_if->vm_ops.vm_arb_gpu_stopped) {
|
||||
KBASE_TLSTREAM_TL_ARBITER_STOPPED(kbdev, kbdev);
|
||||
if (gpu_required)
|
||||
KBASE_KTRACE_ADD(kbdev, ARB_GPU_STOPPED, NULL, 0);
|
||||
if (gpu_required) {
|
||||
KBASE_TLSTREAM_TL_ARBITER_REQUESTED(kbdev, kbdev);
|
||||
KBASE_KTRACE_ADD(kbdev, ARB_GPU_REQUESTED, NULL, 0);
|
||||
}
|
||||
arb_if->vm_ops.vm_arb_gpu_stopped(arb_if, gpu_required);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -199,6 +199,7 @@ static void kbase_arbiter_pm_resume_wq(struct work_struct *data)
|
||||
arb_vm_state->vm_arb_starting = false;
|
||||
mutex_unlock(&arb_vm_state->vm_state_lock);
|
||||
KBASE_TLSTREAM_TL_ARBITER_STARTED(kbdev, kbdev);
|
||||
KBASE_KTRACE_ADD(kbdev, ARB_GPU_STARTED, NULL, 0);
|
||||
dev_dbg(kbdev->dev, "<%s\n", __func__);
|
||||
}
|
||||
|
||||
@@ -295,11 +296,13 @@ int kbase_arbiter_pm_early_init(struct kbase_device *kbdev)
|
||||
|
||||
err = kbase_arbif_init(kbdev);
|
||||
if (err) {
|
||||
dev_err(kbdev->dev, "Failed to initialise arbif module. (err = %d)\n", err);
|
||||
if (err != -EPERM)
|
||||
dev_err(kbdev->dev, "Failed to initialise arbif module. (err = %d)", err);
|
||||
|
||||
goto arbif_init_fail;
|
||||
}
|
||||
|
||||
if (kbdev->arb.arb_if) {
|
||||
if (kbase_has_arbiter(kbdev)) {
|
||||
kbase_arbif_gpu_request(kbdev);
|
||||
dev_dbg(kbdev->dev, "Waiting for initial GPU assignment...\n");
|
||||
|
||||
@@ -345,6 +348,9 @@ void kbase_arbiter_pm_early_term(struct kbase_device *kbdev)
|
||||
if (arb_vm_state == NULL)
|
||||
return;
|
||||
|
||||
if (!kbase_has_arbiter(kbdev))
|
||||
return;
|
||||
|
||||
kbase_arbiter_pm_release_interrupts(kbdev);
|
||||
|
||||
cancel_request_timer(kbdev);
|
||||
@@ -475,6 +481,12 @@ int kbase_arbiter_pm_gpu_assigned(struct kbase_device *kbdev)
|
||||
if (!kbdev)
|
||||
return result;
|
||||
|
||||
/* If there is no Arbiter, then there is no virtualization
|
||||
* and current VM always has access to GPU.
|
||||
*/
|
||||
if (!kbase_has_arbiter(kbdev))
|
||||
return 1;
|
||||
|
||||
/* First check the GPU_LOST state */
|
||||
kbase_pm_lock(kbdev);
|
||||
if (kbase_pm_is_gpu_lost(kbdev)) {
|
||||
@@ -688,7 +700,7 @@ static inline bool kbase_arbiter_pm_vm_os_suspend_ready_state(struct kbase_devic
|
||||
* @kbdev: The kbase device structure for the device
|
||||
*
|
||||
* Prepares OS to be in suspend state until it receives GRANT message
|
||||
* from Arbiter asynchronously.
|
||||
* from Arbiter asynchronously. This function assumes there is an active Arbiter.
|
||||
*/
|
||||
static void kbase_arbiter_pm_vm_os_prepare_suspend(struct kbase_device *kbdev)
|
||||
{
|
||||
@@ -696,10 +708,8 @@ static void kbase_arbiter_pm_vm_os_prepare_suspend(struct kbase_device *kbdev)
|
||||
enum kbase_vm_state prev_state;
|
||||
|
||||
lockdep_assert_held(&arb_vm_state->vm_state_lock);
|
||||
if (kbdev->arb.arb_if) {
|
||||
if (kbdev->pm.arb_vm_state->vm_state == KBASE_VM_STATE_SUSPENDED)
|
||||
return;
|
||||
}
|
||||
if (kbdev->pm.arb_vm_state->vm_state == KBASE_VM_STATE_SUSPENDED)
|
||||
return;
|
||||
/* Block suspend OS function until we are in a stable state
|
||||
* with vm_state_lock
|
||||
*/
|
||||
@@ -791,7 +801,7 @@ void kbase_arbiter_pm_vm_event(struct kbase_device *kbdev, enum kbase_arbif_evt
|
||||
{
|
||||
struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
|
||||
|
||||
if (!kbdev->arb.arb_if)
|
||||
if (!kbase_has_arbiter(kbdev))
|
||||
return;
|
||||
|
||||
mutex_lock(&arb_vm_state->vm_state_lock);
|
||||
@@ -911,7 +921,8 @@ static inline bool kbase_arbiter_pm_vm_gpu_assigned_locked(struct kbase_device *
|
||||
*
|
||||
* This function handles a suspend event from the driver,
|
||||
* communicating with the arbiter and waiting synchronously for the GPU
|
||||
* to be granted again depending on the VM state.
|
||||
* to be granted again depending on the VM state. Returns immediately
|
||||
* with success if there is no Arbiter.
|
||||
*
|
||||
* Return: 0 on success else 1 suspend handler isn not possible.
|
||||
*/
|
||||
@@ -921,58 +932,58 @@ int kbase_arbiter_pm_ctx_active_handle_suspend(struct kbase_device *kbdev,
|
||||
struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
|
||||
int res = 0;
|
||||
|
||||
if (kbdev->arb.arb_if) {
|
||||
mutex_lock(&arb_vm_state->vm_state_lock);
|
||||
while (!kbase_arbiter_pm_vm_gpu_assigned_locked(kbdev)) {
|
||||
/* Update VM state since we have GPU work to do */
|
||||
if (arb_vm_state->vm_state == KBASE_VM_STATE_STOPPING_IDLE)
|
||||
kbase_arbiter_pm_vm_set_state(kbdev,
|
||||
KBASE_VM_STATE_STOPPING_ACTIVE);
|
||||
else if (arb_vm_state->vm_state == KBASE_VM_STATE_STOPPED) {
|
||||
kbase_arbiter_pm_vm_set_state(kbdev,
|
||||
KBASE_VM_STATE_STOPPED_GPU_REQUESTED);
|
||||
kbase_arbif_gpu_request(kbdev);
|
||||
start_request_timer(kbdev);
|
||||
} else if (arb_vm_state->vm_state == KBASE_VM_STATE_INITIALIZING_WITH_GPU)
|
||||
if (!kbase_has_arbiter(kbdev))
|
||||
return res;
|
||||
|
||||
mutex_lock(&arb_vm_state->vm_state_lock);
|
||||
while (!kbase_arbiter_pm_vm_gpu_assigned_locked(kbdev)) {
|
||||
/* Update VM state since we have GPU work to do */
|
||||
if (arb_vm_state->vm_state == KBASE_VM_STATE_STOPPING_IDLE)
|
||||
kbase_arbiter_pm_vm_set_state(kbdev, KBASE_VM_STATE_STOPPING_ACTIVE);
|
||||
else if (arb_vm_state->vm_state == KBASE_VM_STATE_STOPPED) {
|
||||
kbase_arbiter_pm_vm_set_state(kbdev, KBASE_VM_STATE_STOPPED_GPU_REQUESTED);
|
||||
kbase_arbif_gpu_request(kbdev);
|
||||
start_request_timer(kbdev);
|
||||
} else if (arb_vm_state->vm_state == KBASE_VM_STATE_INITIALIZING_WITH_GPU)
|
||||
break;
|
||||
|
||||
if (suspend_handler != KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE) {
|
||||
/* In case of GPU lost, even if
|
||||
* active_count > 0, we no longer have GPU
|
||||
* access
|
||||
*/
|
||||
if (kbase_pm_is_gpu_lost(kbdev))
|
||||
res = 1;
|
||||
|
||||
switch (suspend_handler) {
|
||||
case KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE:
|
||||
res = 1;
|
||||
break;
|
||||
|
||||
if (suspend_handler != KBASE_PM_SUSPEND_HANDLER_NOT_POSSIBLE) {
|
||||
/* In case of GPU lost, even if
|
||||
* active_count > 0, we no longer have GPU
|
||||
* access
|
||||
*/
|
||||
if (kbase_pm_is_gpu_lost(kbdev))
|
||||
case KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE:
|
||||
if (kbdev->pm.active_count == 0)
|
||||
res = 1;
|
||||
|
||||
switch (suspend_handler) {
|
||||
case KBASE_PM_SUSPEND_HANDLER_DONT_INCREASE:
|
||||
res = 1;
|
||||
break;
|
||||
case KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE:
|
||||
if (kbdev->pm.active_count == 0)
|
||||
res = 1;
|
||||
break;
|
||||
case KBASE_PM_SUSPEND_HANDLER_VM_GPU_GRANTED:
|
||||
break;
|
||||
default:
|
||||
WARN(1, "Unknown suspend_handler\n");
|
||||
res = 1;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case KBASE_PM_SUSPEND_HANDLER_VM_GPU_GRANTED:
|
||||
break;
|
||||
default:
|
||||
WARN(1, "Unknown suspend_handler\n");
|
||||
res = 1;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Need to synchronously wait for GPU assignment */
|
||||
atomic_inc(&kbdev->pm.gpu_users_waiting);
|
||||
mutex_unlock(&arb_vm_state->vm_state_lock);
|
||||
kbase_pm_unlock(kbdev);
|
||||
kbase_arbiter_pm_vm_wait_gpu_assignment(kbdev);
|
||||
kbase_pm_lock(kbdev);
|
||||
mutex_lock(&arb_vm_state->vm_state_lock);
|
||||
atomic_dec(&kbdev->pm.gpu_users_waiting);
|
||||
break;
|
||||
}
|
||||
|
||||
/* Need to synchronously wait for GPU assignment */
|
||||
atomic_inc(&kbdev->pm.gpu_users_waiting);
|
||||
mutex_unlock(&arb_vm_state->vm_state_lock);
|
||||
kbase_pm_unlock(kbdev);
|
||||
kbase_arbiter_pm_vm_wait_gpu_assignment(kbdev);
|
||||
kbase_pm_lock(kbdev);
|
||||
mutex_lock(&arb_vm_state->vm_state_lock);
|
||||
atomic_dec(&kbdev->pm.gpu_users_waiting);
|
||||
}
|
||||
mutex_unlock(&arb_vm_state->vm_state_lock);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2020-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2020-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
@@ -50,14 +50,22 @@ static struct kbase_clk_rate_trace_op_conf *
|
||||
get_clk_rate_trace_callbacks(__maybe_unused struct kbase_device *kbdev)
|
||||
{
|
||||
/* base case */
|
||||
const void *arbiter_if_node;
|
||||
struct kbase_clk_rate_trace_op_conf *callbacks =
|
||||
(struct kbase_clk_rate_trace_op_conf *)CLK_RATE_TRACE_OPS;
|
||||
#if defined(CONFIG_MALI_ARBITER_SUPPORT) && defined(CONFIG_OF)
|
||||
const void *arbiter_if_node;
|
||||
|
||||
/* Nothing left to do here if there is no Arbiter/virtualization or if
|
||||
* CONFIG_OF is not enabled.
|
||||
*/
|
||||
if (!IS_ENABLED(CONFIG_OF))
|
||||
return callbacks;
|
||||
|
||||
if (WARN_ON(!kbdev) || WARN_ON(!kbdev->dev))
|
||||
return callbacks;
|
||||
|
||||
if (!kbase_has_arbiter(kbdev))
|
||||
return callbacks;
|
||||
|
||||
arbiter_if_node = of_get_property(kbdev->dev->of_node, "arbiter-if", NULL);
|
||||
if (!arbiter_if_node)
|
||||
arbiter_if_node = of_get_property(kbdev->dev->of_node, "arbiter_if", NULL);
|
||||
@@ -69,8 +77,6 @@ get_clk_rate_trace_callbacks(__maybe_unused struct kbase_device *kbdev)
|
||||
dev_dbg(kbdev->dev,
|
||||
"Arbitration supported but disabled by platform. Leaving clk rate callbacks as default.\n");
|
||||
|
||||
#endif
|
||||
|
||||
return callbacks;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2014-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2014-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
@@ -48,7 +48,7 @@ int kbase_backend_gpuprops_get(struct kbase_device *kbdev, struct kbasep_gpuprop
|
||||
/* Not a valid register on TMIX */
|
||||
|
||||
/* TGOx specific register */
|
||||
if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_THREAD_TLS_ALLOC))
|
||||
if (kbase_hw_has_feature(kbdev, KBASE_HW_FEATURE_THREAD_TLS_ALLOC))
|
||||
regdump->thread_tls_alloc =
|
||||
kbase_reg_read32(kbdev, GPU_CONTROL_ENUM(THREAD_TLS_ALLOC));
|
||||
#endif /* !MALI_USE_CSF */
|
||||
@@ -64,7 +64,7 @@ int kbase_backend_gpuprops_get(struct kbase_device *kbdev, struct kbasep_gpuprop
|
||||
/* AMBA_FEATURES enum is mapped to COHERENCY_FEATURES enum */
|
||||
regdump->coherency_features = KBASE_REG_READ(kbdev, GPU_CONTROL_ENUM(COHERENCY_FEATURES));
|
||||
|
||||
if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_CORE_FEATURES))
|
||||
if (kbase_hw_has_feature(kbdev, KBASE_HW_FEATURE_CORE_FEATURES))
|
||||
regdump->core_features = KBASE_REG_READ(kbdev, GPU_CONTROL_ENUM(CORE_FEATURES));
|
||||
|
||||
#if MALI_USE_CSF
|
||||
@@ -116,7 +116,7 @@ int kbase_backend_gpuprops_get_curr_config(struct kbase_device *kbdev,
|
||||
int kbase_backend_gpuprops_get_l2_features(struct kbase_device *kbdev,
|
||||
struct kbasep_gpuprops_regdump *regdump)
|
||||
{
|
||||
if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_L2_CONFIG)) {
|
||||
if (kbase_hw_has_feature(kbdev, KBASE_HW_FEATURE_L2_CONFIG)) {
|
||||
regdump->l2_features = KBASE_REG_READ(kbdev, GPU_CONTROL_ENUM(L2_FEATURES));
|
||||
regdump->l2_config = kbase_reg_read32(kbdev, GPU_CONTROL_ENUM(L2_CONFIG));
|
||||
|
||||
|
||||
@@ -98,82 +98,6 @@ static u64 kbase_job_write_affinity(struct kbase_device *kbdev, base_jd_core_req
|
||||
return affinity;
|
||||
}
|
||||
|
||||
/**
|
||||
* select_job_chain() - Select which job chain to submit to the GPU
|
||||
* @katom: Pointer to the atom about to be submitted to the GPU
|
||||
*
|
||||
* Selects one of the fragment job chains attached to the special atom at the
|
||||
* end of a renderpass, or returns the address of the single job chain attached
|
||||
* to any other type of atom.
|
||||
*
|
||||
* Which job chain is selected depends upon whether the tiling phase of the
|
||||
* renderpass completed normally or was soft-stopped because it used too
|
||||
* much memory. It also depends upon whether one of the fragment job chains
|
||||
* has already been run as part of the same renderpass.
|
||||
*
|
||||
* Return: GPU virtual address of the selected job chain
|
||||
*/
|
||||
static u64 select_job_chain(struct kbase_jd_atom *katom)
|
||||
{
|
||||
struct kbase_context *const kctx = katom->kctx;
|
||||
u64 jc = katom->jc;
|
||||
struct kbase_jd_renderpass *rp;
|
||||
|
||||
lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
|
||||
|
||||
if (!(katom->core_req & BASE_JD_REQ_END_RENDERPASS))
|
||||
return jc;
|
||||
|
||||
compiletime_assert((1ull << (sizeof(katom->renderpass_id) * 8)) <=
|
||||
ARRAY_SIZE(kctx->jctx.renderpasses),
|
||||
"Should check invalid access to renderpasses");
|
||||
|
||||
rp = &kctx->jctx.renderpasses[katom->renderpass_id];
|
||||
/* We can read a subset of renderpass state without holding
|
||||
* higher-level locks (but not end_katom, for example).
|
||||
* If the end-of-renderpass atom is running with as-yet indeterminate
|
||||
* OOM state then assume that the start atom was not soft-stopped.
|
||||
*/
|
||||
switch (rp->state) {
|
||||
case KBASE_JD_RP_OOM:
|
||||
/* Tiling ran out of memory.
|
||||
* Start of incremental rendering, used once.
|
||||
*/
|
||||
jc = katom->jc_fragment.norm_read_forced_write;
|
||||
break;
|
||||
case KBASE_JD_RP_START:
|
||||
case KBASE_JD_RP_PEND_OOM:
|
||||
/* Tiling completed successfully first time.
|
||||
* Single-iteration rendering, used once.
|
||||
*/
|
||||
jc = katom->jc_fragment.norm_read_norm_write;
|
||||
break;
|
||||
case KBASE_JD_RP_RETRY_OOM:
|
||||
/* Tiling ran out of memory again.
|
||||
* Continuation of incremental rendering, used as
|
||||
* many times as required.
|
||||
*/
|
||||
jc = katom->jc_fragment.forced_read_forced_write;
|
||||
break;
|
||||
case KBASE_JD_RP_RETRY:
|
||||
case KBASE_JD_RP_RETRY_PEND_OOM:
|
||||
/* Tiling completed successfully this time.
|
||||
* End of incremental rendering, used once.
|
||||
*/
|
||||
jc = katom->jc_fragment.forced_read_norm_write;
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
break;
|
||||
}
|
||||
|
||||
dev_dbg(kctx->kbdev->dev, "Selected job chain 0x%llx for end atom %pK in state %d\n", jc,
|
||||
(void *)katom, (int)rp->state);
|
||||
|
||||
katom->jc = jc;
|
||||
return jc;
|
||||
}
|
||||
|
||||
static inline bool kbasep_jm_wait_js_free(struct kbase_device *kbdev, unsigned int js,
|
||||
struct kbase_context *kctx)
|
||||
{
|
||||
@@ -196,7 +120,7 @@ int kbase_job_hw_submit(struct kbase_device *kbdev, struct kbase_jd_atom *katom,
|
||||
{
|
||||
struct kbase_context *kctx;
|
||||
u32 cfg;
|
||||
u64 const jc_head = select_job_chain(katom);
|
||||
u64 jc_head = katom->jc;
|
||||
u64 affinity;
|
||||
struct slot_rb *ptr_slot_rb = &kbdev->hwaccess.backend.slot_rb[js];
|
||||
|
||||
@@ -220,21 +144,21 @@ int kbase_job_hw_submit(struct kbase_device *kbdev, struct kbase_jd_atom *katom,
|
||||
*/
|
||||
cfg = (u32)kctx->as_nr;
|
||||
|
||||
if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION) &&
|
||||
if (kbase_hw_has_feature(kbdev, KBASE_HW_FEATURE_FLUSH_REDUCTION) &&
|
||||
!(kbdev->serialize_jobs & KBASE_SERIALIZE_RESET))
|
||||
cfg |= JS_CONFIG_ENABLE_FLUSH_REDUCTION;
|
||||
|
||||
if (0 != (katom->core_req & BASE_JD_REQ_SKIP_CACHE_START)) {
|
||||
/* Force a cache maintenance operation if the newly submitted
|
||||
* katom to the slot is from a different kctx. For a JM GPU
|
||||
* that has the feature BASE_HW_FEATURE_FLUSH_INV_SHADER_OTHER,
|
||||
* that has the feature KBASE_HW_FEATURE_FLUSH_INV_SHADER_OTHER,
|
||||
* applies a FLUSH_INV_SHADER_OTHER. Otherwise, do a
|
||||
* FLUSH_CLEAN_INVALIDATE.
|
||||
*/
|
||||
u64 tagged_kctx = ptr_slot_rb->last_kctx_tagged;
|
||||
|
||||
if (tagged_kctx != SLOT_RB_NULL_TAG_VAL && tagged_kctx != SLOT_RB_TAG_KCTX(kctx)) {
|
||||
if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_INV_SHADER_OTHER))
|
||||
if (kbase_hw_has_feature(kbdev, KBASE_HW_FEATURE_FLUSH_INV_SHADER_OTHER))
|
||||
cfg |= JS_CONFIG_START_FLUSH_INV_SHADER_OTHER;
|
||||
else
|
||||
cfg |= JS_CONFIG_START_FLUSH_CLEAN_INVALIDATE;
|
||||
@@ -246,15 +170,14 @@ int kbase_job_hw_submit(struct kbase_device *kbdev, struct kbase_jd_atom *katom,
|
||||
if (0 != (katom->core_req & BASE_JD_REQ_SKIP_CACHE_END) &&
|
||||
!(kbdev->serialize_jobs & KBASE_SERIALIZE_RESET))
|
||||
cfg |= JS_CONFIG_END_FLUSH_NO_ACTION;
|
||||
else if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_CLEAN_ONLY_SAFE))
|
||||
else if (kbase_hw_has_feature(kbdev, KBASE_HW_FEATURE_CLEAN_ONLY_SAFE))
|
||||
cfg |= JS_CONFIG_END_FLUSH_CLEAN;
|
||||
else
|
||||
cfg |= JS_CONFIG_END_FLUSH_CLEAN_INVALIDATE;
|
||||
|
||||
cfg |= JS_CONFIG_THREAD_PRI(8);
|
||||
|
||||
if ((katom->atom_flags & KBASE_KATOM_FLAG_PROTECTED) ||
|
||||
(katom->core_req & BASE_JD_REQ_END_RENDERPASS))
|
||||
if (katom->atom_flags & KBASE_KATOM_FLAG_PROTECTED)
|
||||
cfg |= JS_CONFIG_DISABLE_DESCRIPTOR_WR_BK;
|
||||
|
||||
if (!ptr_slot_rb->job_chain_flag) {
|
||||
@@ -268,7 +191,7 @@ int kbase_job_hw_submit(struct kbase_device *kbdev, struct kbase_jd_atom *katom,
|
||||
|
||||
kbase_reg_write32(kbdev, JOB_SLOT_OFFSET(js, CONFIG_NEXT), cfg);
|
||||
|
||||
if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION))
|
||||
if (kbase_hw_has_feature(kbdev, KBASE_HW_FEATURE_FLUSH_REDUCTION))
|
||||
kbase_reg_write32(kbdev, JOB_SLOT_OFFSET(js, FLUSH_ID_NEXT), katom->flush_id);
|
||||
|
||||
/* Write an approximate start timestamp.
|
||||
@@ -440,7 +363,7 @@ void kbase_job_done(struct kbase_device *kbdev, u32 done)
|
||||
* jobs to hang. Reset GPU before allowing
|
||||
* any other jobs on the slot to continue.
|
||||
*/
|
||||
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TTRX_3076)) {
|
||||
if (kbase_hw_has_issue(kbdev, KBASE_HW_ISSUE_TTRX_3076)) {
|
||||
if (completion_code == BASE_JD_EVENT_JOB_BUS_FAULT) {
|
||||
if (kbase_prepare_to_reset_gpu_locked(
|
||||
kbdev, RESET_FLAGS_NONE))
|
||||
@@ -740,66 +663,6 @@ void kbase_job_slot_ctx_priority_check_locked(struct kbase_context *kctx,
|
||||
}
|
||||
}
|
||||
|
||||
static int softstop_start_rp_nolock(struct kbase_context *kctx, struct kbase_va_region *reg)
|
||||
{
|
||||
struct kbase_device *const kbdev = kctx->kbdev;
|
||||
struct kbase_jd_atom *katom;
|
||||
struct kbase_jd_renderpass *rp;
|
||||
|
||||
lockdep_assert_held(&kbdev->hwaccess_lock);
|
||||
|
||||
katom = kbase_gpu_inspect(kbdev, 1, 0);
|
||||
|
||||
if (!katom) {
|
||||
dev_dbg(kctx->kbdev->dev, "No atom on job slot\n");
|
||||
return -ESRCH;
|
||||
}
|
||||
|
||||
if (!(katom->core_req & BASE_JD_REQ_START_RENDERPASS)) {
|
||||
dev_dbg(kctx->kbdev->dev, "Atom %pK on job slot is not start RP\n", (void *)katom);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
compiletime_assert((1ull << (sizeof(katom->renderpass_id) * 8)) <=
|
||||
ARRAY_SIZE(kctx->jctx.renderpasses),
|
||||
"Should check invalid access to renderpasses");
|
||||
|
||||
rp = &kctx->jctx.renderpasses[katom->renderpass_id];
|
||||
if (WARN_ON(rp->state != KBASE_JD_RP_START && rp->state != KBASE_JD_RP_RETRY))
|
||||
return -EINVAL;
|
||||
|
||||
dev_dbg(kctx->kbdev->dev, "OOM in state %d with region %pK\n", (int)rp->state, (void *)reg);
|
||||
|
||||
if (WARN_ON(katom != rp->start_katom))
|
||||
return -EINVAL;
|
||||
|
||||
dev_dbg(kctx->kbdev->dev, "Adding region %pK to list %pK\n", (void *)reg,
|
||||
(void *)&rp->oom_reg_list);
|
||||
list_move_tail(®->link, &rp->oom_reg_list);
|
||||
dev_dbg(kctx->kbdev->dev, "Added region to list\n");
|
||||
|
||||
rp->state = (rp->state == KBASE_JD_RP_START ? KBASE_JD_RP_PEND_OOM :
|
||||
KBASE_JD_RP_RETRY_PEND_OOM);
|
||||
|
||||
kbase_job_slot_softstop(kbdev, 1, katom);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kbase_job_slot_softstop_start_rp(struct kbase_context *const kctx,
|
||||
struct kbase_va_region *const reg)
|
||||
{
|
||||
struct kbase_device *const kbdev = kctx->kbdev;
|
||||
int err;
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
|
||||
err = softstop_start_rp_nolock(kctx, reg);
|
||||
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void kbase_jm_wait_for_zero_jobs(struct kbase_context *kctx)
|
||||
{
|
||||
struct kbase_device *kbdev = kctx->kbdev;
|
||||
@@ -839,7 +702,7 @@ u32 kbase_backend_get_current_flush_id(struct kbase_device *kbdev)
|
||||
{
|
||||
u32 flush_id = 0;
|
||||
|
||||
if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_FLUSH_REDUCTION)) {
|
||||
if (kbase_hw_has_feature(kbdev, KBASE_HW_FEATURE_FLUSH_REDUCTION)) {
|
||||
mutex_lock(&kbdev->pm.lock);
|
||||
if (kbdev->pm.backend.gpu_powered)
|
||||
flush_id = kbase_reg_read32(kbdev, GPU_CONTROL_ENUM(LATEST_FLUSH));
|
||||
@@ -1085,7 +948,7 @@ static void kbasep_reset_timeout_worker(struct work_struct *data)
|
||||
/* The flush has completed so reset the active indicator */
|
||||
kbdev->irq_reset_flush = false;
|
||||
|
||||
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TMIX_8463)) {
|
||||
if (kbase_hw_has_issue(kbdev, KBASE_HW_ISSUE_TMIX_8463)) {
|
||||
u64 val;
|
||||
const u32 timeout_us =
|
||||
kbase_get_timeout_ms(kbdev, KBASE_CLEAN_CACHE_TIMEOUT) * USEC_PER_MSEC;
|
||||
@@ -1268,14 +1131,12 @@ bool kbase_prepare_to_reset_gpu_locked(struct kbase_device *kbdev, unsigned int
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
if (kbase_pm_is_gpu_lost(kbdev)) {
|
||||
/* GPU access has been removed, reset will be done by
|
||||
* Arbiter instead
|
||||
*/
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (flags & RESET_FLAGS_HWC_UNRECOVERABLE_ERROR)
|
||||
kbase_instr_hwcnt_on_unrecoverable_error(kbdev);
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2014-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2014-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
@@ -425,7 +425,7 @@ static void kbase_gpu_release_atom(struct kbase_device *kbdev, struct kbase_jd_a
|
||||
}
|
||||
}
|
||||
|
||||
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TGOX_R1_1234)) {
|
||||
if (kbase_hw_has_issue(kbdev, KBASE_HW_ISSUE_TGOX_R1_1234)) {
|
||||
if (katom->atom_flags & KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT) {
|
||||
kbase_pm_protected_l2_override(kbdev, false);
|
||||
katom->atom_flags &= ~KBASE_KATOM_FLAG_HOLDING_L2_REF_PROT;
|
||||
@@ -698,7 +698,7 @@ static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev, struct kbas
|
||||
|
||||
kbase_pm_protected_entry_override_disable(kbdev);
|
||||
|
||||
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TGOX_R1_1234)) {
|
||||
if (kbase_hw_has_issue(kbdev, KBASE_HW_ISSUE_TGOX_R1_1234)) {
|
||||
/*
|
||||
* Power on L2 caches; this will also result in the
|
||||
* correct value written to coherency enable register.
|
||||
@@ -714,13 +714,13 @@ static int kbase_jm_enter_protected_mode(struct kbase_device *kbdev, struct kbas
|
||||
|
||||
katom[idx]->protected_state.enter = KBASE_ATOM_ENTER_PROTECTED_FINISHED;
|
||||
|
||||
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TGOX_R1_1234))
|
||||
if (kbase_hw_has_issue(kbdev, KBASE_HW_ISSUE_TGOX_R1_1234))
|
||||
return -EAGAIN;
|
||||
|
||||
/* ***TRANSITION TO HIGHER STATE*** */
|
||||
fallthrough;
|
||||
case KBASE_ATOM_ENTER_PROTECTED_FINISHED:
|
||||
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TGOX_R1_1234)) {
|
||||
if (kbase_hw_has_issue(kbdev, KBASE_HW_ISSUE_TGOX_R1_1234)) {
|
||||
/*
|
||||
* Check that L2 caches are powered and, if so,
|
||||
* enter protected mode.
|
||||
@@ -864,11 +864,7 @@ void kbase_backend_slot_update(struct kbase_device *kbdev)
|
||||
|
||||
lockdep_assert_held(&kbdev->hwaccess_lock);
|
||||
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
if (kbase_reset_gpu_is_active(kbdev) || kbase_is_gpu_removed(kbdev))
|
||||
#else
|
||||
if (kbase_reset_gpu_is_active(kbdev))
|
||||
#endif
|
||||
if (kbase_reset_gpu_is_active(kbdev) || (kbase_is_gpu_removed(kbdev)))
|
||||
return;
|
||||
|
||||
for (js = 0; js < kbdev->gpu_props.num_job_slots; js++) {
|
||||
@@ -896,7 +892,7 @@ void kbase_backend_slot_update(struct kbase_device *kbdev)
|
||||
break;
|
||||
|
||||
case KBASE_ATOM_GPU_RB_WAITING_BLOCKED:
|
||||
if (kbase_js_atom_blocked_on_x_dep(katom[idx]))
|
||||
if (katom[idx]->atom_flags & KBASE_KATOM_FLAG_X_DEP_BLOCKED)
|
||||
break;
|
||||
|
||||
katom[idx]->gpu_rb_state =
|
||||
@@ -1236,7 +1232,7 @@ void kbase_gpu_complete_hw(struct kbase_device *kbdev, unsigned int js, u32 comp
|
||||
* When a hard-stop is followed close after a soft-stop, the completion
|
||||
* code may be set to STOPPED, even though the job is terminated
|
||||
*/
|
||||
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TMIX_8438)) {
|
||||
if (kbase_hw_has_issue(kbdev, KBASE_HW_ISSUE_TMIX_8438)) {
|
||||
if (completion_code == BASE_JD_EVENT_STOPPED &&
|
||||
(katom->atom_flags & KBASE_KATOM_FLAG_BEEN_HARD_STOPPED)) {
|
||||
completion_code = BASE_JD_EVENT_TERMINATED;
|
||||
@@ -1331,6 +1327,9 @@ void kbase_gpu_complete_hw(struct kbase_device *kbdev, unsigned int js, u32 comp
|
||||
dev_dbg(kbdev->dev, "Update job chain address of atom %pK to resume from 0x%llx\n",
|
||||
(void *)katom, job_tail);
|
||||
|
||||
/* Some of the job has been executed, so we update the job chain address to where
|
||||
* we should resume from
|
||||
*/
|
||||
katom->jc = job_tail;
|
||||
KBASE_KTRACE_ADD_JM_SLOT(kbdev, JM_UPDATE_HEAD, katom->kctx, katom, job_tail, js);
|
||||
}
|
||||
@@ -1381,6 +1380,8 @@ void kbase_gpu_complete_hw(struct kbase_device *kbdev, unsigned int js, u32 comp
|
||||
dev_dbg(kbdev->dev, "Cross-slot dependency %pK has become runnable.\n",
|
||||
(void *)katom);
|
||||
|
||||
/* Cross-slot dependency has now become runnable. Try to submit it. */
|
||||
|
||||
/* Check if there are lower priority jobs to soft stop */
|
||||
kbase_job_slot_ctx_priority_check_locked(kctx, katom);
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2014-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2014-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
@@ -46,7 +46,7 @@ static inline bool timer_callback_should_run(struct kbase_device *kbdev, int nr_
|
||||
}
|
||||
#endif /* CONFIG_MALI_BIFROST_DEBUG */
|
||||
|
||||
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_9435)) {
|
||||
if (kbase_hw_has_issue(kbdev, KBASE_HW_ISSUE_9435)) {
|
||||
/* Timeouts would have to be 4x longer (due to micro-
|
||||
* architectural design) to support OpenCL conformance tests, so
|
||||
* only run the timer when there's:
|
||||
@@ -100,7 +100,7 @@ static enum hrtimer_restart timer_callback(struct hrtimer *timer)
|
||||
/* The current version of the model doesn't support
|
||||
* Soft-Stop
|
||||
*/
|
||||
if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_5736)) {
|
||||
if (!kbase_hw_has_issue(kbdev, KBASE_HW_ISSUE_5736)) {
|
||||
u32 ticks = atom->ticks++;
|
||||
|
||||
#if !defined(CONFIG_MALI_JOB_DUMP) && !defined(CONFIG_MALI_VECTOR_DUMP)
|
||||
|
||||
@@ -1953,7 +1953,8 @@ void midgard_model_read_reg(void *h, u32 addr, u32 *const value)
|
||||
*value = dummy->control_reg_values->gpu_features_lo;
|
||||
} else if (addr == GPU_CONTROL_REG(GPU_FEATURES_HI)) {
|
||||
*value = dummy->control_reg_values->gpu_features_hi;
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
model_error_log(
|
||||
KBASE_CORE,
|
||||
"Dummy model register access: Reading unsupported register 0x%x. Returning 0\n",
|
||||
|
||||
@@ -36,9 +36,7 @@
|
||||
#include <linux/version_compat_defs.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <mali_kbase_reset_gpu.h>
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
#include <csf/mali_kbase_csf_scheduler.h>
|
||||
#endif /* !CONFIG_MALI_ARBITER_SUPPORT */
|
||||
#endif /* !MALI_USE_CSF */
|
||||
#include <hwcnt/mali_kbase_hwcnt_context.h>
|
||||
#include <backend/gpu/mali_kbase_pm_internal.h>
|
||||
@@ -100,10 +98,8 @@ void kbase_pm_register_access_enable(struct kbase_device *kbdev)
|
||||
if (callbacks)
|
||||
callbacks->power_on_callback(kbdev);
|
||||
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
if (WARN_ON(kbase_pm_is_gpu_lost(kbdev)))
|
||||
dev_err(kbdev->dev, "Attempting to power on while GPU lost\n");
|
||||
#endif
|
||||
|
||||
kbdev->pm.backend.gpu_powered = true;
|
||||
}
|
||||
@@ -136,9 +132,7 @@ int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
|
||||
INIT_WORK(&kbdev->pm.backend.gpu_poweroff_wait_work, kbase_pm_gpu_poweroff_wait_wq);
|
||||
|
||||
kbdev->pm.backend.ca_cores_enabled = ~0ull;
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
kbase_pm_set_gpu_lost(kbdev, false);
|
||||
#endif
|
||||
init_waitqueue_head(&kbdev->pm.backend.gpu_in_desired_state_wait);
|
||||
|
||||
#if !MALI_USE_CSF
|
||||
@@ -180,15 +174,18 @@ int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
|
||||
kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
|
||||
|
||||
#if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
|
||||
kbdev->pm.backend.gpu_sleep_supported =
|
||||
kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_GPU_SLEEP) &&
|
||||
!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TURSEHW_1997) &&
|
||||
kbdev->pm.backend.callback_power_runtime_gpu_active &&
|
||||
kbdev->pm.backend.callback_power_runtime_gpu_idle;
|
||||
kbdev->pm.backend.gpu_sleep_allowed = 0;
|
||||
if (kbase_hw_has_feature(kbdev, KBASE_HW_FEATURE_GPU_SLEEP) &&
|
||||
!kbase_hw_has_issue(kbdev, KBASE_HW_ISSUE_TURSEHW_1997) &&
|
||||
kbdev->pm.backend.callback_power_runtime_gpu_active &&
|
||||
kbdev->pm.backend.callback_power_runtime_gpu_idle)
|
||||
set_bit(KBASE_GPU_SUPPORTS_GPU_SLEEP, &kbdev->pm.backend.gpu_sleep_allowed);
|
||||
|
||||
kbdev->pm.backend.apply_hw_issue_TITANHW_2938_wa =
|
||||
kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TITANHW_2938) &&
|
||||
kbdev->pm.backend.gpu_sleep_supported;
|
||||
kbase_hw_has_issue(kbdev, KBASE_HW_ISSUE_TITANHW_2938) &&
|
||||
test_bit(KBASE_GPU_SUPPORTS_GPU_SLEEP, &kbdev->pm.backend.gpu_sleep_allowed);
|
||||
|
||||
/* FW Sleep-on-Idle is feature is kept disabled */
|
||||
#endif
|
||||
|
||||
if (IS_ENABLED(CONFIG_MALI_HW_ERRATA_1485982_NOT_AFFECTED))
|
||||
@@ -196,14 +193,14 @@ int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
|
||||
|
||||
/* WA1: L2 always_on for GPUs being affected by GPU2017-1336 */
|
||||
if (!IS_ENABLED(CONFIG_MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE)) {
|
||||
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_GPU2017_1336))
|
||||
if (kbase_hw_has_issue(kbdev, KBASE_HW_ISSUE_GPU2017_1336))
|
||||
kbdev->pm.backend.l2_always_on = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* WA3: Clock slow down for GPUs being affected by GPU2017-1336 */
|
||||
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_GPU2017_1336)) {
|
||||
if (kbase_hw_has_issue(kbdev, KBASE_HW_ISSUE_GPU2017_1336)) {
|
||||
kbdev->pm.backend.gpu_clock_slow_down_wa = true;
|
||||
kbdev->pm.backend.gpu_clock_slow_down_desired = true;
|
||||
INIT_WORK(&kbdev->pm.backend.gpu_clock_control_work,
|
||||
@@ -348,13 +345,11 @@ static void pm_handle_power_off(struct kbase_device *kbdev)
|
||||
*/
|
||||
wait_for_mmu_fault_handling_in_gpu_poweroff_wait_wq(kbdev);
|
||||
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
/* poweron_required may have changed while pm lock
|
||||
* was released.
|
||||
*/
|
||||
if (kbase_pm_is_gpu_lost(kbdev))
|
||||
backend->poweron_required = false;
|
||||
#endif
|
||||
|
||||
/* Turn off clock now that fault have been handled. We
|
||||
* dropped locks so poweron_required may have changed -
|
||||
@@ -948,13 +943,11 @@ void kbase_hwaccess_pm_resume(struct kbase_device *kbdev)
|
||||
/* System resume callback has begun */
|
||||
kbdev->pm.resuming = true;
|
||||
kbdev->pm.suspending = false;
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
if (kbase_pm_is_gpu_lost(kbdev)) {
|
||||
dev_dbg(kbdev->dev, "%s: GPU lost in progress\n", __func__);
|
||||
kbase_pm_unlock(kbdev);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
kbase_pm_do_poweron(kbdev, true);
|
||||
|
||||
#if !MALI_USE_CSF
|
||||
@@ -964,7 +957,6 @@ void kbase_hwaccess_pm_resume(struct kbase_device *kbdev)
|
||||
kbase_pm_unlock(kbdev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
void kbase_pm_handle_gpu_lost(struct kbase_device *kbdev)
|
||||
{
|
||||
unsigned long flags;
|
||||
@@ -975,8 +967,10 @@ void kbase_pm_handle_gpu_lost(struct kbase_device *kbdev)
|
||||
#endif
|
||||
struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
|
||||
|
||||
if (!kbdev->arb.arb_if)
|
||||
if (!kbase_has_arbiter(kbdev)) {
|
||||
dev_warn(kbdev->dev, "%s called with no active arbiter!\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_lock(&kbdev->pm.lock);
|
||||
mutex_lock(&arb_vm_state->vm_state_lock);
|
||||
@@ -991,7 +985,8 @@ void kbase_pm_handle_gpu_lost(struct kbase_device *kbdev)
|
||||
|
||||
#if MALI_USE_CSF
|
||||
/* Full GPU reset will have been done by hypervisor, so cancel */
|
||||
kbase_reset_gpu_prevent_and_wait(kbdev);
|
||||
if (kbase_reset_gpu_prevent_and_wait(kbdev))
|
||||
dev_warn(kbdev->dev, "Failed to prevent GPU reset.");
|
||||
|
||||
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
|
||||
kbase_csf_scheduler_spin_lock(kbdev, &flags_sched);
|
||||
@@ -1041,7 +1036,6 @@ void kbase_pm_handle_gpu_lost(struct kbase_device *kbdev)
|
||||
mutex_unlock(&arb_vm_state->vm_state_lock);
|
||||
mutex_unlock(&kbdev->pm.lock);
|
||||
}
|
||||
#endif /* CONFIG_MALI_ARBITER_SUPPORT */
|
||||
|
||||
#if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
|
||||
int kbase_pm_force_mcu_wakeup_after_sleep(struct kbase_device *kbdev)
|
||||
@@ -1253,4 +1247,5 @@ out:
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2013-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2013-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
@@ -55,11 +55,18 @@ void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask)
|
||||
unsigned long flags;
|
||||
#if MALI_USE_CSF
|
||||
u64 old_core_mask = 0;
|
||||
#endif
|
||||
bool mmu_sync_needed = false;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_MALI_BIFROST_NO_MALI) &&
|
||||
kbase_hw_has_issue(kbdev, KBASE_HW_ISSUE_GPU2019_3901)) {
|
||||
mmu_sync_needed = true;
|
||||
down_write(&kbdev->csf.mmu_sync_sem);
|
||||
}
|
||||
#endif
|
||||
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
|
||||
|
||||
#if MALI_USE_CSF
|
||||
|
||||
if (!(core_mask & kbdev->pm.debug_core_mask)) {
|
||||
dev_err(kbdev->dev,
|
||||
"OPP core mask 0x%llX does not intersect with debug mask 0x%llX\n",
|
||||
@@ -98,6 +105,9 @@ void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask)
|
||||
old_core_mask, core_mask);
|
||||
}
|
||||
}
|
||||
|
||||
if (mmu_sync_needed)
|
||||
up_write(&kbdev->csf.mmu_sync_sem);
|
||||
#endif
|
||||
|
||||
dev_dbg(kbdev->dev, "Devfreq policy : new core mask=%llX\n", pm_backend->ca_cores_enabled);
|
||||
@@ -105,6 +115,10 @@ void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask)
|
||||
return;
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
|
||||
#if MALI_USE_CSF
|
||||
if (mmu_sync_needed)
|
||||
up_write(&kbdev->csf.mmu_sync_sem);
|
||||
#endif
|
||||
}
|
||||
KBASE_EXPORT_TEST_API(kbase_devfreq_set_core_mask);
|
||||
#endif
|
||||
|
||||
@@ -114,6 +114,27 @@ enum kbase_pm_runtime_suspend_abort_reason {
|
||||
ABORT_REASON_NON_IDLE_CGS
|
||||
};
|
||||
|
||||
/* The following indices point to the corresponding bits stored in
|
||||
* &kbase_pm_backend_data.gpu_sleep_allowed. They denote the conditions that
|
||||
* would be checked against to determine the level of support for GPU sleep
|
||||
* and firmware sleep-on-idle.
|
||||
*/
|
||||
#define KBASE_GPU_SUPPORTS_GPU_SLEEP ((uint8_t)0)
|
||||
#define KBASE_GPU_SUPPORTS_FW_SLEEP_ON_IDLE ((uint8_t)1)
|
||||
#define KBASE_GPU_PERF_COUNTERS_COLLECTION_ENABLED ((uint8_t)2)
|
||||
#define KBASE_GPU_IGNORE_IDLE_EVENT ((uint8_t)3)
|
||||
#define KBASE_GPU_NON_IDLE_OFF_SLOT_GROUPS_AVAILABLE ((uint8_t)4)
|
||||
|
||||
/* FW sleep-on-idle could be enabled if
|
||||
* &kbase_pm_backend_data.gpu_sleep_allowed is equal to this value.
|
||||
*/
|
||||
#define KBASE_GPU_FW_SLEEP_ON_IDLE_ALLOWED \
|
||||
((uint8_t)((1 << KBASE_GPU_SUPPORTS_GPU_SLEEP) | \
|
||||
(1 << KBASE_GPU_SUPPORTS_FW_SLEEP_ON_IDLE) | \
|
||||
(0 << KBASE_GPU_PERF_COUNTERS_COLLECTION_ENABLED) | \
|
||||
(0 << KBASE_GPU_IGNORE_IDLE_EVENT) | \
|
||||
(0 << KBASE_GPU_NON_IDLE_OFF_SLOT_GROUPS_AVAILABLE)))
|
||||
|
||||
/**
|
||||
* struct kbasep_pm_metrics - Metrics data collected for use by the power
|
||||
* management framework.
|
||||
@@ -304,7 +325,7 @@ union kbase_pm_policy_data {
|
||||
* called previously.
|
||||
* See &struct kbase_pm_callback_conf.
|
||||
* @ca_cores_enabled: Cores that are currently available
|
||||
* @apply_hw_issue_TITANHW_2938_wa: Indicates if the workaround for BASE_HW_ISSUE_TITANHW_2938
|
||||
* @apply_hw_issue_TITANHW_2938_wa: Indicates if the workaround for KBASE_HW_ISSUE_TITANHW_2938
|
||||
* needs to be applied when unmapping memory from GPU.
|
||||
* @mcu_state: The current state of the micro-control unit, only applicable
|
||||
* to GPUs that have such a component
|
||||
@@ -350,10 +371,9 @@ union kbase_pm_policy_data {
|
||||
* @core_idle_work: Work item used to wait for undesired cores to become inactive.
|
||||
* The work item is enqueued when Host controls the power for
|
||||
* shader cores and down scaling of cores is performed.
|
||||
* @gpu_sleep_supported: Flag to indicate that if GPU sleep feature can be
|
||||
* supported by the kernel driver or not. If this
|
||||
* flag is not set, then HW state is directly saved
|
||||
* when GPU idle notification is received.
|
||||
* @gpu_sleep_allowed: Bitmask to indicate the conditions that would be
|
||||
* used to determine what support for GPU sleep is
|
||||
* available.
|
||||
* @gpu_sleep_mode_active: Flag to indicate that the GPU needs to be in sleep
|
||||
* mode. It is set when the GPU idle notification is
|
||||
* received and is cleared when HW state has been
|
||||
@@ -497,7 +517,7 @@ struct kbase_pm_backend_data {
|
||||
struct work_struct core_idle_work;
|
||||
|
||||
#ifdef KBASE_PM_RUNTIME
|
||||
bool gpu_sleep_supported;
|
||||
unsigned long gpu_sleep_allowed;
|
||||
bool gpu_sleep_mode_active;
|
||||
bool exit_gpu_sleep_mode;
|
||||
bool gpu_idled;
|
||||
|
||||
@@ -47,9 +47,7 @@
|
||||
#include <backend/gpu/mali_kbase_pm_internal.h>
|
||||
#include <backend/gpu/mali_kbase_l2_mmu_config.h>
|
||||
#include <mali_kbase_dummy_job_wa.h>
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
#include <arbiter/mali_kbase_arbiter_pm.h>
|
||||
#endif /* CONFIG_MALI_ARBITER_SUPPORT */
|
||||
|
||||
#if MALI_USE_CSF
|
||||
#include <linux/delay.h>
|
||||
@@ -615,11 +613,11 @@ static void kbase_pm_l2_config_override(struct kbase_device *kbdev)
|
||||
/*
|
||||
* Skip if it is not supported
|
||||
*/
|
||||
if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_L2_CONFIG))
|
||||
if (!kbase_hw_has_feature(kbdev, KBASE_HW_FEATURE_L2_CONFIG))
|
||||
return;
|
||||
|
||||
#if MALI_USE_CSF
|
||||
if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PBHA_HWU)) {
|
||||
if (kbase_hw_has_feature(kbdev, KBASE_HW_FEATURE_PBHA_HWU)) {
|
||||
val = kbase_reg_read32(kbdev, GPU_CONTROL_ENUM(L2_CONFIG));
|
||||
kbase_reg_write32(kbdev, GPU_CONTROL_ENUM(L2_CONFIG),
|
||||
L2_CONFIG_PBHA_HWU_SET(val, kbdev->pbha_propagate_bits));
|
||||
@@ -743,16 +741,8 @@ bool kbase_pm_is_mcu_inactive(struct kbase_device *kbdev, enum kbase_mcu_state s
|
||||
}
|
||||
|
||||
#ifdef KBASE_PM_RUNTIME
|
||||
/**
|
||||
* kbase_pm_enable_mcu_db_notification - Enable the Doorbell notification on
|
||||
* MCU side
|
||||
*
|
||||
* @kbdev: Pointer to the device.
|
||||
*
|
||||
* This function is called to re-enable the Doorbell notification on MCU side
|
||||
* when MCU needs to beome active again.
|
||||
*/
|
||||
static void kbase_pm_enable_mcu_db_notification(struct kbase_device *kbdev)
|
||||
|
||||
void kbase_pm_enable_mcu_db_notification(struct kbase_device *kbdev)
|
||||
{
|
||||
u32 val = kbase_reg_read32(kbdev, GPU_CONTROL_ENUM(MCU_CONTROL));
|
||||
|
||||
@@ -778,7 +768,7 @@ static void wait_mcu_as_inactive(struct kbase_device *kbdev)
|
||||
kbase_get_timeout_ms(kbdev, KBASE_AS_INACTIVE_TIMEOUT) * USEC_PER_MSEC;
|
||||
lockdep_assert_held(&kbdev->hwaccess_lock);
|
||||
|
||||
if (!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TURSEHW_2716))
|
||||
if (!kbase_hw_has_issue(kbdev, KBASE_HW_ISSUE_TURSEHW_2716))
|
||||
return;
|
||||
|
||||
/* Wait for the AS_ACTIVE_INT bit to become 0 for the AS used by MCU FW */
|
||||
@@ -927,6 +917,18 @@ static int kbase_pm_mcu_update_state(struct kbase_device *kbdev)
|
||||
if (kbase_pm_is_mcu_desired(kbdev) &&
|
||||
!backend->policy_change_clamp_state_to_off &&
|
||||
backend->l2_state == KBASE_L2_ON) {
|
||||
kbdev->csf.mcu_halted = false;
|
||||
|
||||
/* Ensure that FW would not go to sleep immediately after
|
||||
* resumption.
|
||||
*/
|
||||
kbase_csf_firmware_global_input_mask(&kbdev->csf.global_iface,
|
||||
GLB_REQ,
|
||||
GLB_REQ_REQ_IDLE_DISABLE,
|
||||
GLB_REQ_IDLE_DISABLE_MASK);
|
||||
atomic_set(&kbdev->csf.scheduler.gpu_idle_timer_enabled, false);
|
||||
atomic_set(&kbdev->csf.scheduler.fw_soi_enabled, false);
|
||||
|
||||
kbase_csf_firmware_trigger_reload(kbdev);
|
||||
backend->mcu_state = KBASE_MCU_PEND_ON_RELOAD;
|
||||
}
|
||||
@@ -1005,7 +1007,6 @@ static int kbase_pm_mcu_update_state(struct kbase_device *kbdev)
|
||||
|
||||
case KBASE_MCU_ON:
|
||||
backend->shaders_desired_mask = kbase_pm_ca_get_core_mask(kbdev);
|
||||
|
||||
if (!kbase_pm_is_mcu_desired(kbdev))
|
||||
backend->mcu_state = KBASE_MCU_ON_HWCNT_DISABLE;
|
||||
else if (kbdev->csf.firmware_hctl_core_pwr) {
|
||||
@@ -1185,7 +1186,7 @@ static int kbase_pm_mcu_update_state(struct kbase_device *kbdev)
|
||||
break;
|
||||
|
||||
case KBASE_MCU_POWER_DOWN:
|
||||
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TITANHW_2922)) {
|
||||
if (kbase_hw_has_issue(kbdev, KBASE_HW_ISSUE_TITANHW_2922)) {
|
||||
if (!kbdev->csf.firmware_hctl_core_pwr)
|
||||
kbasep_pm_toggle_power_interrupt(kbdev, true);
|
||||
backend->mcu_state = KBASE_MCU_OFF;
|
||||
@@ -1206,7 +1207,20 @@ static int kbase_pm_mcu_update_state(struct kbase_device *kbdev)
|
||||
#ifdef KBASE_PM_RUNTIME
|
||||
case KBASE_MCU_ON_SLEEP_INITIATE:
|
||||
if (!kbase_pm_is_mcu_desired(kbdev)) {
|
||||
kbase_csf_firmware_trigger_mcu_sleep(kbdev);
|
||||
bool db_notif_disabled = false;
|
||||
|
||||
if (likely(test_bit(KBASE_GPU_SUPPORTS_FW_SLEEP_ON_IDLE,
|
||||
&kbdev->pm.backend.gpu_sleep_allowed)))
|
||||
db_notif_disabled =
|
||||
kbase_reg_read32(kbdev,
|
||||
GPU_CONTROL_ENUM(MCU_CONTROL)) &
|
||||
MCU_CNTRL_DOORBELL_DISABLE_MASK;
|
||||
|
||||
/* If DB notification is enabled on FW side then send a sleep
|
||||
* request to FW.
|
||||
*/
|
||||
if (!db_notif_disabled)
|
||||
kbase_csf_firmware_trigger_mcu_sleep(kbdev);
|
||||
backend->mcu_state = KBASE_MCU_ON_PEND_SLEEP;
|
||||
} else
|
||||
backend->mcu_state = KBASE_MCU_ON_HWCNT_ENABLE;
|
||||
@@ -1240,6 +1254,16 @@ static int kbase_pm_mcu_update_state(struct kbase_device *kbdev)
|
||||
case KBASE_MCU_IN_SLEEP:
|
||||
if (kbase_pm_is_mcu_desired(kbdev) && backend->l2_state == KBASE_L2_ON) {
|
||||
wait_mcu_as_inactive(kbdev);
|
||||
/* Ensure that FW would not go to sleep immediately after
|
||||
* resumption.
|
||||
*/
|
||||
kbase_csf_firmware_global_input_mask(&kbdev->csf.global_iface,
|
||||
GLB_REQ,
|
||||
GLB_REQ_REQ_IDLE_DISABLE,
|
||||
GLB_REQ_IDLE_DISABLE_MASK);
|
||||
atomic_set(&kbdev->csf.scheduler.gpu_idle_timer_enabled, false);
|
||||
atomic_set(&kbdev->csf.scheduler.fw_soi_enabled, false);
|
||||
|
||||
KBASE_TLSTREAM_TL_KBASE_CSFFW_FW_REQUEST_WAKEUP(
|
||||
kbdev, kbase_backend_get_cycle_cnt(kbdev));
|
||||
kbase_pm_enable_mcu_db_notification(kbdev);
|
||||
@@ -1384,20 +1408,6 @@ static bool need_tiler_control(struct kbase_device *kbdev)
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
* hctl_l2_power_down - Initiate power down of L2 cache
|
||||
*
|
||||
* @kbdev: The kbase device structure for the device.
|
||||
*
|
||||
* This function initiates the power down of L2 cache when Host controls the power
|
||||
* for Tiler block. The function expects that power down of Tiler to already have
|
||||
* been initiated and it triggers the L2 power down only after the power down for
|
||||
* Tiler is complete.
|
||||
* The function shall be called only if L2 is in ready state.
|
||||
*/
|
||||
static void hctl_l2_power_down(struct kbase_device *kbdev)
|
||||
{
|
||||
}
|
||||
|
||||
/**
|
||||
* hctl_tiler_power_up_done - Check and/or initiate power up of Tiler
|
||||
@@ -1444,7 +1454,6 @@ static int kbase_pm_l2_update_state(struct kbase_device *kbdev)
|
||||
u64 l2_trans = kbase_pm_get_trans_cores(kbdev, KBASE_PM_CORE_L2);
|
||||
u64 l2_ready = kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_L2);
|
||||
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
/*
|
||||
* kbase_pm_get_ready_cores and kbase_pm_get_trans_cores
|
||||
* are vulnerable to corruption if gpu is lost
|
||||
@@ -1473,7 +1482,6 @@ static int kbase_pm_l2_update_state(struct kbase_device *kbdev)
|
||||
}
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* mask off ready from trans in case transitions finished
|
||||
* between the register reads
|
||||
@@ -1574,7 +1582,7 @@ static int kbase_pm_l2_update_state(struct kbase_device *kbdev)
|
||||
|
||||
case KBASE_L2_RESTORE_CLOCKS:
|
||||
/* We always assume only GPUs being affected by
|
||||
* BASE_HW_ISSUE_GPU2017_1336 fall into this state
|
||||
* KBASE_HW_ISSUE_GPU2017_1336 fall into this state
|
||||
*/
|
||||
WARN_ON_ONCE(!kbdev->pm.backend.gpu_clock_slow_down_wa);
|
||||
|
||||
@@ -1676,7 +1684,7 @@ static int kbase_pm_l2_update_state(struct kbase_device *kbdev)
|
||||
|
||||
case KBASE_L2_SLOW_DOWN_CLOCKS:
|
||||
/* We always assume only GPUs being affected by
|
||||
* BASE_HW_ISSUE_GPU2017_1336 fall into this state
|
||||
* KBASE_HW_ISSUE_GPU2017_1336 fall into this state
|
||||
*/
|
||||
WARN_ON_ONCE(!kbdev->pm.backend.gpu_clock_slow_down_wa);
|
||||
|
||||
@@ -1725,11 +1733,6 @@ static int kbase_pm_l2_update_state(struct kbase_device *kbdev)
|
||||
|
||||
case KBASE_L2_PEND_OFF:
|
||||
if (likely(!backend->l2_always_on)) {
|
||||
if (need_tiler_control(kbdev) && l2_ready) {
|
||||
hctl_l2_power_down(kbdev);
|
||||
break;
|
||||
}
|
||||
|
||||
if (l2_trans || l2_ready)
|
||||
break;
|
||||
} else if (kbdev->cache_clean_in_progress)
|
||||
@@ -1744,11 +1747,10 @@ static int kbase_pm_l2_update_state(struct kbase_device *kbdev)
|
||||
}
|
||||
#endif
|
||||
/* Disabling MCU after L2 cache power down is to address
|
||||
* BASE_HW_ISSUE_TITANHW_2922 hardware issue.
|
||||
* KBASE_HW_ISSUE_TITANHW_2922 hardware issue.
|
||||
*/
|
||||
if (backend->l2_force_off_after_mcu_halt) {
|
||||
kbase_csf_firmware_disable_mcu(kbdev);
|
||||
kbase_csf_firmware_disable_mcu_wait(kbdev);
|
||||
kbase_csf_stop_firmware_and_wait(kbdev);
|
||||
WARN_ON_ONCE(backend->mcu_state != KBASE_MCU_OFF);
|
||||
backend->l2_force_off_after_mcu_halt = false;
|
||||
}
|
||||
@@ -1895,12 +1897,7 @@ static int kbase_pm_shaders_update_state(struct kbase_device *kbdev)
|
||||
* kbase_pm_get_ready_cores and kbase_pm_get_trans_cores
|
||||
* are vulnerable to corruption if gpu is lost
|
||||
*/
|
||||
if (kbase_is_gpu_removed(kbdev)
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
|| kbase_pm_is_gpu_lost(kbdev)) {
|
||||
#else
|
||||
) {
|
||||
#endif
|
||||
if (kbase_is_gpu_removed(kbdev) || kbase_pm_is_gpu_lost(kbdev)) {
|
||||
backend->shaders_state = KBASE_SHADERS_OFF_CORESTACK_OFF;
|
||||
dev_dbg(kbdev->dev, "GPU lost has occurred - shaders off\n");
|
||||
break;
|
||||
@@ -2005,9 +2002,8 @@ static int kbase_pm_shaders_update_state(struct kbase_device *kbdev)
|
||||
kbdev, KBASE_PM_POLICY_EVENT_IDLE);
|
||||
|
||||
if (kbdev->pm.backend.protected_transition_override ||
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
kbase_pm_is_suspending(kbdev) || kbase_pm_is_gpu_lost(kbdev) ||
|
||||
#endif /* CONFIG_MALI_ARBITER_SUPPORT */
|
||||
(kbase_has_arbiter(kbdev) && (kbase_pm_is_suspending(kbdev) ||
|
||||
kbase_pm_is_gpu_lost(kbdev))) ||
|
||||
!stt->configured_ticks || WARN_ON(stt->cancel_queued)) {
|
||||
backend->shaders_state =
|
||||
KBASE_SHADERS_WAIT_FINISHED_CORESTACK_ON;
|
||||
@@ -2074,10 +2070,9 @@ static int kbase_pm_shaders_update_state(struct kbase_device *kbdev)
|
||||
kbdev, KBASE_PM_POLICY_EVENT_TIMER_MISS);
|
||||
|
||||
backend->shaders_state = KBASE_SHADERS_WAIT_FINISHED_CORESTACK_ON;
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
} else if (kbase_pm_is_suspending(kbdev) || kbase_pm_is_gpu_lost(kbdev)) {
|
||||
} else if (kbase_has_arbiter(kbdev) &&
|
||||
(kbase_pm_is_suspending(kbdev) || kbase_pm_is_gpu_lost(kbdev))) {
|
||||
backend->shaders_state = KBASE_SHADERS_WAIT_FINISHED_CORESTACK_ON;
|
||||
#endif /* CONFIG_MALI_ARBITER_SUPPORT */
|
||||
}
|
||||
break;
|
||||
|
||||
@@ -2096,7 +2091,7 @@ static int kbase_pm_shaders_update_state(struct kbase_device *kbdev)
|
||||
if (!backend->partial_shaderoff)
|
||||
shader_poweroff_timer_queue_cancel(kbdev);
|
||||
|
||||
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TTRX_921)) {
|
||||
if (kbase_hw_has_issue(kbdev, KBASE_HW_ISSUE_TTRX_921)) {
|
||||
kbase_gpu_start_cache_clean_nolock(kbdev,
|
||||
GPU_COMMAND_CACHE_CLN_INV_L2);
|
||||
backend->shaders_state = KBASE_SHADERS_L2_FLUSHING_CORESTACK_ON;
|
||||
@@ -2446,6 +2441,9 @@ void kbase_pm_reset_complete(struct kbase_device *kbdev)
|
||||
backend->in_reset = false;
|
||||
#if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
|
||||
backend->gpu_wakeup_override = false;
|
||||
backend->db_mirror_interrupt_enabled = false;
|
||||
backend->gpu_sleep_mode_active = false;
|
||||
backend->exit_gpu_sleep_mode = false;
|
||||
#endif
|
||||
kbase_pm_update_state(kbdev);
|
||||
|
||||
@@ -2670,12 +2668,9 @@ static int pm_wait_for_poweroff_work_complete(struct kbase_device *kbdev, bool k
|
||||
const long timeout = kbase_csf_timeout_in_jiffies(
|
||||
kbase_get_timeout_ms(kbdev, CSF_PM_TIMEOUT) + extra_wait_time_ms);
|
||||
#else
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
/* Handling of timeout error isn't supported for arbiter builds */
|
||||
const long timeout = MAX_SCHEDULE_TIMEOUT;
|
||||
#else
|
||||
const long timeout = (long)msecs_to_jiffies(PM_TIMEOUT_MS);
|
||||
#endif
|
||||
const long timeout = kbase_has_arbiter(kbdev) ? MAX_SCHEDULE_TIMEOUT :
|
||||
(long)msecs_to_jiffies(PM_TIMEOUT_MS);
|
||||
#endif
|
||||
int err = 0;
|
||||
|
||||
@@ -2796,7 +2791,8 @@ static void update_user_reg_page_mapping(struct kbase_device *kbdev)
|
||||
* when the context (user process) needs to access to the page.
|
||||
*/
|
||||
unmap_mapping_range(kbdev->csf.user_reg.filp->f_inode->i_mapping,
|
||||
kctx->csf.user_reg.file_offset << PAGE_SHIFT, PAGE_SIZE, 1);
|
||||
(loff_t)kctx->csf.user_reg.file_offset << PAGE_SHIFT, PAGE_SIZE,
|
||||
1);
|
||||
list_del_init(&kctx->csf.user_reg.link);
|
||||
dev_dbg(kbdev->dev, "Updated USER Reg page mapping of ctx %d_%d", kctx->tgid,
|
||||
kctx->id);
|
||||
@@ -2823,12 +2819,10 @@ void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume)
|
||||
#endif /* !MALI_USE_CSF */
|
||||
lockdep_assert_held(&kbdev->pm.lock);
|
||||
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
if (WARN_ON(kbase_pm_is_gpu_lost(kbdev))) {
|
||||
dev_err(kbdev->dev, "%s: Cannot power up while GPU lost", __func__);
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (backend->gpu_powered) {
|
||||
#if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
|
||||
@@ -2876,10 +2870,8 @@ void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume)
|
||||
* consistent state
|
||||
*/
|
||||
kbase_pm_init_hw(kbdev, PM_ENABLE_IRQS);
|
||||
}
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
else {
|
||||
if (kbdev->arb.arb_if) {
|
||||
} else {
|
||||
if (kbase_has_arbiter(kbdev)) {
|
||||
struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
|
||||
|
||||
/* In the case that the GPU has just been granted by
|
||||
@@ -2895,8 +2887,8 @@ void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume)
|
||||
* that a repartitioning occurred. In this case the current config
|
||||
* should be read again.
|
||||
*/
|
||||
kbase_gpuprops_get_curr_config_props(kbdev, &kbdev->gpu_props.curr_config);
|
||||
#endif /* CONFIG_MALI_ARBITER_SUPPORT */
|
||||
if (kbase_has_arbiter(kbdev))
|
||||
kbase_gpuprops_get_curr_config_props(kbdev, &kbdev->gpu_props.curr_config);
|
||||
|
||||
mutex_lock(&kbdev->mmu_hw_mutex);
|
||||
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
|
||||
@@ -2988,12 +2980,7 @@ bool kbase_pm_clock_off(struct kbase_device *kbdev)
|
||||
}
|
||||
#endif
|
||||
|
||||
if (kbase_is_gpu_removed(kbdev)
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
|| kbase_pm_is_gpu_lost(kbdev)) {
|
||||
#else
|
||||
) {
|
||||
#endif
|
||||
if (kbase_is_gpu_removed(kbdev) || kbase_pm_is_gpu_lost(kbdev)) {
|
||||
/* Ensure we unblock any threads that are stuck waiting
|
||||
* for the GPU
|
||||
*/
|
||||
@@ -3011,10 +2998,7 @@ bool kbase_pm_clock_off(struct kbase_device *kbdev)
|
||||
/* GPU is about to be turned off, switch to dummy page */
|
||||
update_user_reg_page_mapping(kbdev);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
kbase_arbiter_pm_vm_event(kbdev, KBASE_VM_GPU_IDLE_EVENT);
|
||||
#endif /* CONFIG_MALI_ARBITER_SUPPORT */
|
||||
|
||||
if (kbdev->pm.backend.callback_power_off)
|
||||
kbdev->pm.backend.callback_power_off(kbdev);
|
||||
@@ -3068,6 +3052,7 @@ static enum hrtimer_restart kbasep_reset_timeout(struct hrtimer *timer)
|
||||
return HRTIMER_NORESTART;
|
||||
}
|
||||
|
||||
|
||||
static int kbase_set_gpu_quirks(struct kbase_device *kbdev)
|
||||
{
|
||||
#if MALI_USE_CSF
|
||||
@@ -3097,7 +3082,7 @@ static int kbase_set_gpu_quirks(struct kbase_device *kbdev)
|
||||
kbdev->hw_quirks_gpu = hw_quirks_gpu;
|
||||
|
||||
#endif /* !MALI_USE_CSF */
|
||||
if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_IDVS_GROUP_SIZE)) {
|
||||
if (kbase_hw_has_feature(kbdev, KBASE_HW_FEATURE_IDVS_GROUP_SIZE)) {
|
||||
u32 default_idvs_group_size = 0xF;
|
||||
u32 group_size = 0;
|
||||
|
||||
@@ -3131,10 +3116,10 @@ static int kbase_set_sc_quirks(struct kbase_device *kbdev)
|
||||
if (kbase_is_gpu_removed(kbdev))
|
||||
return -EIO;
|
||||
|
||||
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TTRX_2968_TTRX_3162))
|
||||
if (kbase_hw_has_issue(kbdev, KBASE_HW_ISSUE_TTRX_2968_TTRX_3162))
|
||||
hw_quirks_sc |= SC_VAR_ALGORITHM;
|
||||
|
||||
if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_TLS_HASHING))
|
||||
if (kbase_hw_has_feature(kbdev, KBASE_HW_FEATURE_TLS_HASHING))
|
||||
hw_quirks_sc |= SC_TLS_HASH_ENABLE;
|
||||
|
||||
kbdev->hw_quirks_sc = hw_quirks_sc;
|
||||
@@ -3153,7 +3138,7 @@ static int kbase_set_tiler_quirks(struct kbase_device *kbdev)
|
||||
return -EIO;
|
||||
|
||||
/* Set tiler clock gate override if required */
|
||||
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_T76X_3953))
|
||||
if (kbase_hw_has_issue(kbdev, KBASE_HW_ISSUE_T76X_3953))
|
||||
hw_quirks_tiler |= TC_CLOCK_GATE_OVERRIDE;
|
||||
|
||||
kbdev->hw_quirks_tiler = hw_quirks_tiler;
|
||||
@@ -3370,9 +3355,8 @@ static int kbase_pm_do_reset(struct kbase_device *kbdev)
|
||||
/* The GPU doesn't seem to be responding to the reset so try a hard
|
||||
* reset, but only when NOT in arbitration mode.
|
||||
*/
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
if (!kbdev->arb.arb_if) {
|
||||
#endif
|
||||
|
||||
if (!kbase_has_arbiter(kbdev)) {
|
||||
dev_err(kbdev->dev,
|
||||
"Failed to soft-reset GPU (timed out after %d ms), now attempting a hard reset\n",
|
||||
RESET_TIMEOUT);
|
||||
@@ -3402,9 +3386,7 @@ static int kbase_pm_do_reset(struct kbase_device *kbdev)
|
||||
|
||||
dev_err(kbdev->dev, "Failed to hard-reset the GPU (timed out after %d ms)\n",
|
||||
RESET_TIMEOUT);
|
||||
#ifdef CONFIG_MALI_ARBITER_SUPPORT
|
||||
}
|
||||
#endif
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
@@ -3487,6 +3469,7 @@ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
|
||||
kbase_amba_set_shareable_cache_support(kbdev);
|
||||
#if MALI_USE_CSF
|
||||
kbase_backend_update_gpu_timestamp_offset(kbdev);
|
||||
kbdev->csf.compute_progress_timeout_cc = 0;
|
||||
#endif
|
||||
|
||||
/* Sanity check protected mode was left after reset */
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
*
|
||||
* (C) COPYRIGHT 2010-2023 ARM Limited. All rights reserved.
|
||||
* (C) COPYRIGHT 2010-2024 ARM Limited. All rights reserved.
|
||||
*
|
||||
* This program is free software and is provided to you under the terms of the
|
||||
* GNU General Public License version 2 as published by the Free Software
|
||||
@@ -821,6 +821,21 @@ bool kbase_pm_is_mcu_desired(struct kbase_device *kbdev);
|
||||
*/
|
||||
bool kbase_pm_is_mcu_inactive(struct kbase_device *kbdev, enum kbase_mcu_state state);
|
||||
|
||||
#ifdef KBASE_PM_RUNTIME
|
||||
|
||||
/**
|
||||
* kbase_pm_enable_mcu_db_notification - Enable the Doorbell notification on
|
||||
* MCU side
|
||||
*
|
||||
* @kbdev: Pointer to the device.
|
||||
*
|
||||
* This function is called to re-enable the Doorbell notification on MCU side
|
||||
* when MCU needs to beome active again.
|
||||
*/
|
||||
void kbase_pm_enable_mcu_db_notification(struct kbase_device *kbdev);
|
||||
|
||||
#endif /* KBASE_PM_RUNTIME */
|
||||
|
||||
/**
|
||||
* kbase_pm_idle_groups_sched_suspendable - Check whether the scheduler can be
|
||||
* suspended to low power state when all
|
||||
@@ -963,11 +978,29 @@ static inline bool kbase_pm_gpu_sleep_allowed(struct kbase_device *kbdev)
|
||||
* A high positive value of autosuspend_delay can be used to keep the
|
||||
* GPU in sleep state for a long time.
|
||||
*/
|
||||
if (unlikely(!kbdev->dev->power.autosuspend_delay ||
|
||||
(kbdev->dev->power.autosuspend_delay < 0)))
|
||||
if (unlikely(kbdev->dev->power.autosuspend_delay <= 0))
|
||||
return false;
|
||||
|
||||
return kbdev->pm.backend.gpu_sleep_supported;
|
||||
return test_bit(KBASE_GPU_SUPPORTS_GPU_SLEEP, &kbdev->pm.backend.gpu_sleep_allowed);
|
||||
}
|
||||
|
||||
/**
|
||||
* kbase_pm_fw_sleep_on_idle_allowed - Check if FW sleep-on-idle could be enabled
|
||||
*
|
||||
* @kbdev: Device pointer
|
||||
*
|
||||
* This function should be called whenever the conditions that impact
|
||||
* FW sleep-on-idle support change so that it could be enabled/disabled
|
||||
* accordingly.
|
||||
*
|
||||
* Return: true if FW sleep-on-idle is allowed
|
||||
*/
|
||||
static inline bool kbase_pm_fw_sleep_on_idle_allowed(struct kbase_device *kbdev)
|
||||
{
|
||||
if (unlikely(kbdev->dev->power.autosuspend_delay <= 0))
|
||||
return false;
|
||||
|
||||
return kbdev->pm.backend.gpu_sleep_allowed == KBASE_GPU_FW_SLEEP_ON_IDLE_ALLOWED;
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user