diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 71a12660f747..33d93f800734 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -62,6 +62,12 @@ EXPORT_SYMBOL(memstart_addr); */ phys_addr_t arm64_dma_phys_limit __ro_after_init; +/* + * Provide a run-time mean of disabling ZONE_DMA32 if it is enabled via + * CONFIG_ZONE_DMA32. + */ +static bool disable_dma32 __ro_after_init; + #ifdef CONFIG_KEXEC_CORE /* * reserve_crashkernel() - reserves memory for crash kernel @@ -207,7 +213,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit); #endif #ifdef CONFIG_ZONE_DMA32 - max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit); + max_zone_pfns[ZONE_DMA32] = disable_dma32 ? 0 : PFN_DOWN(dma32_phys_limit); if (!arm64_dma_phys_limit) arm64_dma_phys_limit = dma32_phys_limit; #endif @@ -218,6 +224,18 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max) free_area_init(max_zone_pfns); } +static int __init early_disable_dma32(char *buf) +{ + if (!buf) + return -EINVAL; + + if (!strcmp(buf, "on")) + disable_dma32 = true; + + return 0; +} +early_param("disable_dma32", early_disable_dma32); + int pfn_valid(unsigned long pfn) { phys_addr_t addr = pfn << PAGE_SHIFT; diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index c7a47603537f..155c95dc1772 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -102,6 +102,12 @@ static unsigned long min_pfn_mapped; static bool __initdata can_use_brk_pgt = true; +/* + * Provide a run-time mean of disabling ZONE_DMA32 if it is enabled via + * CONFIG_ZONE_DMA32. + */ +static bool disable_dma32 __ro_after_init; + /* * Pages returned are already directly mapped. * @@ -996,7 +1002,7 @@ void __init zone_sizes_init(void) max_zone_pfns[ZONE_DMA] = min(MAX_DMA_PFN, max_low_pfn); #endif #ifdef CONFIG_ZONE_DMA32 - max_zone_pfns[ZONE_DMA32] = min(MAX_DMA32_PFN, max_low_pfn); + max_zone_pfns[ZONE_DMA32] = disable_dma32 ? 0 : min(MAX_DMA32_PFN, max_low_pfn); #endif max_zone_pfns[ZONE_NORMAL] = max_low_pfn; #ifdef CONFIG_HIGHMEM @@ -1006,6 +1012,18 @@ void __init zone_sizes_init(void) free_area_init(max_zone_pfns); } +static int __init early_disable_dma32(char *buf) +{ + if (!buf) + return -EINVAL; + + if (!strcmp(buf, "on")) + disable_dma32 = true; + + return 0; +} +early_param("disable_dma32", early_disable_dma32); + __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = { .loaded_mm = &init_mm, .next_asid = 1, diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index 18aade195884..ed69618cf1fc 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -24,6 +24,28 @@ struct bus_dma_region { u64 offset; }; +static inline bool zone_dma32_is_empty(int node) +{ +#ifdef CONFIG_ZONE_DMA32 + pg_data_t *pgdat = NODE_DATA(node); + + return zone_is_empty(&pgdat->node_zones[ZONE_DMA32]); +#else + return true; +#endif +} + +static inline bool zone_dma32_are_empty(void) +{ + int node; + + for_each_node(node) + if (!zone_dma32_is_empty(node)) + return false; + + return true; +} + static inline dma_addr_t translate_phys_to_dma(struct device *dev, phys_addr_t paddr) { diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c index b6b106fb36bd..6f58cde1558b 100644 --- a/kernel/dma/direct.c +++ b/kernel/dma/direct.c @@ -61,7 +61,8 @@ static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, *phys_limit = dma_to_phys(dev, dma_limit); if (*phys_limit <= DMA_BIT_MASK(zone_dma_bits)) return GFP_DMA; - if (*phys_limit <= DMA_BIT_MASK(32)) + if (*phys_limit <= DMA_BIT_MASK(32) && + !zone_dma32_is_empty(dev_to_node(dev))) return GFP_DMA32; return 0; } @@ -101,7 +102,8 @@ again: if (IS_ENABLED(CONFIG_ZONE_DMA32) && phys_limit < DMA_BIT_MASK(64) && - !(gfp & (GFP_DMA32 | GFP_DMA))) { + !(gfp & (GFP_DMA32 | GFP_DMA)) && + !zone_dma32_is_empty(node)) { gfp |= GFP_DMA32; goto again; } diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c index d4637f72239b..cebfe3537218 100644 --- a/kernel/dma/pool.c +++ b/kernel/dma/pool.c @@ -74,7 +74,7 @@ static bool cma_in_zone(gfp_t gfp) end = cma_get_base(cma) + size - 1; if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA)) return end <= DMA_BIT_MASK(zone_dma_bits); - if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32)) + if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32) && !zone_dma32_are_empty()) return end <= DMA_BIT_MASK(32); return true; } @@ -156,7 +156,7 @@ static void atomic_pool_work_fn(struct work_struct *work) if (IS_ENABLED(CONFIG_ZONE_DMA)) atomic_pool_resize(atomic_pool_dma, GFP_KERNEL | GFP_DMA); - if (IS_ENABLED(CONFIG_ZONE_DMA32)) + if (IS_ENABLED(CONFIG_ZONE_DMA32) && !zone_dma32_are_empty()) atomic_pool_resize(atomic_pool_dma32, GFP_KERNEL | GFP_DMA32); atomic_pool_resize(atomic_pool_kernel, GFP_KERNEL); @@ -212,7 +212,7 @@ static int __init dma_atomic_pool_init(void) if (!atomic_pool_dma) ret = -ENOMEM; } - if (IS_ENABLED(CONFIG_ZONE_DMA32)) { + if (IS_ENABLED(CONFIG_ZONE_DMA32) && !zone_dma32_are_empty()) { atomic_pool_dma32 = __dma_atomic_pool_init(atomic_pool_size, GFP_KERNEL | GFP_DMA32); if (!atomic_pool_dma32) @@ -227,7 +227,7 @@ postcore_initcall(dma_atomic_pool_init); static inline struct gen_pool *dma_guess_pool(struct gen_pool *prev, gfp_t gfp) { if (prev == NULL) { - if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32)) + if (IS_ENABLED(CONFIG_ZONE_DMA32) && (gfp & GFP_DMA32) && !zone_dma32_are_empty()) return atomic_pool_dma32; if (IS_ENABLED(CONFIG_ZONE_DMA) && (gfp & GFP_DMA)) return atomic_pool_dma;