Revert "FROMGIT: arm64: Work around Cortex-A510 erratum 2454944"

Revert submission 2302443

Reason for revert: Series is not queued in a maintainer tree and has not been posted to a public mailing list.
Reverted Changes:
Iffd38bf97:FROMGIT: arm64: Work around Cortex-A510 erratum 24...
I694523564:FROMGIT: mm/vmalloc: Add override for lazy vunmap

Change-Id: I254d427b9dad0791ca8df4dc51be92e458c58728
Signed-off-by: Will Deacon <willdeacon@google.com>
This commit is contained in:
Will Deacon
2022-11-21 12:21:05 +00:00
committed by Treehugger Robot
parent 015859081a
commit 450a37133d
10 changed files with 6 additions and 686 deletions

View File

@@ -94,8 +94,6 @@ stable kernels.
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2051678 | ARM64_ERRATUM_2051678 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A510 | #2454944 | ARM64_ERRATUM_2454944 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 |
+----------------+-----------------+-----------------+-----------------------------+
| ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 |

View File

@@ -713,36 +713,6 @@ config ARM64_ERRATUM_2067961
If unsure, say Y.
config ARM64_ERRATUM_2454944
bool "Cortex-A510: 2454944: Unmodified cache line might be written back to memory"
select ARCH_HAS_TEARDOWN_DMA_OPS
default y
help
This option adds the workaround for ARM Cortex-A510 erratum 2454944.
Affected Cortex-A510 core might write unmodified cache lines back to
memory, which breaks the assumptions upon which software coherency
management for non-coherent DMA relies. If a cache line is
speculatively fetched while a non-coherent device is writing directly
to DRAM, and subsequently written back by natural eviction, data
written by the device in the intervening period can be lost.
The workaround is to enforce as far as reasonably possible that all
non-coherent DMA transfers are bounced and/or remapped to minimise
the chance that any Cacheable alias exists through which speculative
cache fills could occur.
This is quite involved and has unavoidable performance impact on
affected systems.
config ARM64_ERRATUM_2454944_DEBUG
bool "Extra debug checks for Cortex-A510 2454944"
depends on ARM64_ERRATUM_2454944
default y
help
Enable additional checks and warnings to detect and mitigate driver
bugs breaking the remapping workaround.
config CAVIUM_ERRATUM_22375
bool "Cavium erratum 22375, 24313"
default y

View File

@@ -72,8 +72,7 @@
#define ARM64_WORKAROUND_TSB_FLUSH_FAILURE 61
#define ARM64_SPECTRE_BHB 62
/* kabi: reserve 63 - 74 for future cpu capabilities */
#define ARM64_WORKAROUND_NO_DMA_ALIAS 75
/* kabi: reserve 63 - 76 for future cpu capabilities */
#define ARM64_NCAPS 76
#endif /* __ASM_CPUCAPS_H */

View File

@@ -43,19 +43,6 @@ typedef struct page *pgtable_t;
extern int pfn_valid(unsigned long);
#ifdef CONFIG_ARM64_ERRATUM_2454944_DEBUG
#include <asm/cpufeature.h>
void page_check_nc(struct page *page, int order);
static inline void arch_free_page(struct page *page, int order)
{
if (cpus_have_const_cap(ARM64_WORKAROUND_NO_DMA_ALIAS))
page_check_nc(page, order);
}
#define HAVE_ARCH_FREE_PAGE
#endif
#include <asm/memory.h>
#endif /* !__ASSEMBLY__ */

View File

@@ -1,8 +1,4 @@
#ifndef _ASM_ARM64_VMALLOC_H
#define _ASM_ARM64_VMALLOC_H
#include <asm/cpufeature.h>
#define arch_disable_lazy_vunmap cpus_have_const_cap(ARM64_WORKAROUND_NO_DMA_ALIAS)
#endif /* _ASM_ARM64_VMALLOC_H */

View File

@@ -555,14 +555,6 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
.capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE,
ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus),
},
#endif
#ifdef CONFIG_ARM64_ERRATUM_2454944
{
.desc = "ARM erratum 2454944",
.capability = ARM64_WORKAROUND_NO_DMA_ALIAS,
ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
MIDR_FIXED(MIDR_CPU_VAR_REV(1, 1), BIT(25)),
},
#endif
{
}

View File

@@ -125,13 +125,9 @@ SYM_CODE_END(primary_entry)
SYM_CODE_START_LOCAL(preserve_boot_args)
mov x21, x0 // x21=FDT
adr_l x0, boot_args
#ifdef CONFIG_ARM64_ERRATUM_2454944
dc ivac, x0 // Cortex-A510 CWG is 64 bytes, so plenty
dsb sy
#endif
stp x21, x1, [x0] // record the contents of
stp x2, x3, [x0, #16] // x0 .. x3 at kernel entry
adr_l x0, boot_args // record the contents of
stp x21, x1, [x0] // x0 .. x3 at kernel entry
stp x2, x3, [x0, #16]
dmb sy // needed before dc ivac with
// MMU off
@@ -286,17 +282,8 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
* the kernel image, and thus are clean to the PoC per the boot
* protocol.
*/
#ifndef CONFIG_ARM64_ERRATUM_2454944
adrp x0, init_pg_dir
adrp x1, init_pg_end
#else
/*
* However if we can't even trust "clean" cache lines shadowing rodata,
* then nuke the entire image. It's the only way to be sure.
*/
adrp x0, _text
adrp x1, _end
#endif
sub x1, x1, x0
bl __inval_dcache_area
@@ -544,10 +531,6 @@ SYM_FUNC_END(init_kernel_el)
*/
SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag)
adr_l x1, __boot_cpu_mode
#ifdef CONFIG_ARM64_ERRATUM_2454944
dc ivac, x1
dsb sy
#endif
cmp w0, #BOOT_CPU_MODE_EL2
b.ne 1f
add x1, x1, #4

File diff suppressed because it is too large Load Diff

View File

@@ -581,8 +581,7 @@ static void __init free_unused_memmap(void)
void __init mem_init(void)
{
if (swiotlb_force == SWIOTLB_FORCE ||
max_pfn > PFN_DOWN(arm64_dma_phys_limit) ||
cpus_have_cap(ARM64_WORKAROUND_NO_DMA_ALIAS))
max_pfn > PFN_DOWN(arm64_dma_phys_limit))
swiotlb_init(1);
else
swiotlb_force = SWIOTLB_NO_FORCE;

View File

@@ -502,7 +502,7 @@ static void __init map_mem(pgd_t *pgdp)
u64 i;
if (rodata_full || debug_pagealloc_enabled() ||
IS_ENABLED(CONFIG_KFENCE) || cpus_have_cap(ARM64_WORKAROUND_NO_DMA_ALIAS))
IS_ENABLED(CONFIG_KFENCE))
flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
/*