You've already forked linux-rockchip
mirror of
https://github.com/armbian/linux-rockchip.git
synced 2026-01-06 11:08:10 -08:00
ANDROID: vendor_hooks: Add hooks to avoid key threads stalled in
memory allocations We add these hooks to avoid key threads blocked in memory allocation path. -android_vh_free_unref_page_bypass ----We create a memory pool for the key threads. This hook determines whether a page should be free to the pool or to buddy freelist. It works with a existing hook `android_vh_alloc_pages_reclaim_bypass`, which takes pages out of the pool. -android_vh_kvmalloc_node_use_vmalloc ----For key threads, we perfer not to run into direct reclaim. So we clear __GFP_DIRECT_RECLAIM flag. For threads which are not that important, we perfer use vmalloc. -android_vh_should_alloc_pages_retry ----Before key threads run into direct reclaim, we want to retry with a lower watermark. -android_vh_unreserve_highatomic_bypass ----We want to keep more highatomic pages when unreserve them to avoid highatomic allocation failures. -android_vh_pageset_update ----We found the default per-cpu pageset is quite few in smartphones with large ram size. This hook is used to increase it to reduce zone->lock contentions. -android_vh_rmqueue_bulk_bypass ----We found sometimes when key threads run into rmqueue_bulk, it took several milliseconds spinning at zone->lock or filling per-cpu pages. We use this hook to take pages from the mempool mentioned above, rather than grab zone->lock and fill a batch of pages to per-cpu. Bug: 288216516 Change-Id: I1656032d6819ca627723341987b6094775bc345f Signed-off-by: Oven <liyangouwen1@oppo.com>
This commit is contained in:
@@ -332,6 +332,12 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_do_page_trylock);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_page_referenced_check_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_drain_all_pages_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_cma_drain_all_pages_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_free_unref_page_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_kvmalloc_node_use_vmalloc);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_should_alloc_pages_retry);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_unreserve_highatomic_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_pageset_update);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_rmqueue_bulk_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_pcplist_add_cma_pages_bypass);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_event);
|
||||
EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_psi_group);
|
||||
|
||||
@@ -192,6 +192,27 @@ DECLARE_HOOK(android_vh_mark_page_accessed,
|
||||
DECLARE_HOOK(android_vh_cma_drain_all_pages_bypass,
|
||||
TP_PROTO(unsigned int migratetype, bool *bypass),
|
||||
TP_ARGS(migratetype, bypass));
|
||||
DECLARE_HOOK(android_vh_free_unref_page_bypass,
|
||||
TP_PROTO(struct page *page, int order, int migratetype, bool *bypass),
|
||||
TP_ARGS(page, order, migratetype, bypass));
|
||||
DECLARE_HOOK(android_vh_kvmalloc_node_use_vmalloc,
|
||||
TP_PROTO(size_t size, gfp_t *kmalloc_flags, bool *use_vmalloc),
|
||||
TP_ARGS(size, kmalloc_flags, use_vmalloc));
|
||||
DECLARE_HOOK(android_vh_should_alloc_pages_retry,
|
||||
TP_PROTO(gfp_t gfp_mask, int order, int *alloc_flags,
|
||||
int migratetype, struct zone *preferred_zone, struct page **page, bool *should_alloc_retry),
|
||||
TP_ARGS(gfp_mask, order, alloc_flags,
|
||||
migratetype, preferred_zone, page, should_alloc_retry));
|
||||
DECLARE_HOOK(android_vh_unreserve_highatomic_bypass,
|
||||
TP_PROTO(bool force, struct zone *zone, bool *skip_unreserve_highatomic),
|
||||
TP_ARGS(force, zone, skip_unreserve_highatomic));
|
||||
DECLARE_HOOK(android_vh_pageset_update,
|
||||
TP_PROTO(unsigned long *high, unsigned long *batch),
|
||||
TP_ARGS(high, batch));
|
||||
DECLARE_HOOK(android_vh_rmqueue_bulk_bypass,
|
||||
TP_PROTO(unsigned int order, struct per_cpu_pages *pcp, int migratetype,
|
||||
struct list_head *list),
|
||||
TP_ARGS(order, pcp, migratetype, list));
|
||||
DECLARE_HOOK(android_vh_pcplist_add_cma_pages_bypass,
|
||||
TP_PROTO(int migratetype, bool *bypass),
|
||||
TP_ARGS(migratetype, bypass));
|
||||
|
||||
@@ -1608,11 +1608,16 @@ static void __free_pages_ok(struct page *page, unsigned int order,
|
||||
unsigned long flags;
|
||||
int migratetype;
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
bool skip_free_unref_page = false;
|
||||
|
||||
if (!free_pages_prepare(page, order, true, fpi_flags))
|
||||
return;
|
||||
|
||||
migratetype = get_pfnblock_migratetype(page, pfn);
|
||||
trace_android_vh_free_unref_page_bypass(page, order, migratetype, &skip_free_unref_page);
|
||||
if (skip_free_unref_page)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
__count_vm_events(PGFREE, 1 << order);
|
||||
free_one_page(page_zone(page), page, pfn, order, migratetype,
|
||||
@@ -2791,6 +2796,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
|
||||
struct page *page;
|
||||
int order;
|
||||
bool ret;
|
||||
bool skip_unreserve_highatomic = false;
|
||||
|
||||
for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
|
||||
ac->nodemask) {
|
||||
@@ -2802,6 +2808,11 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
|
||||
pageblock_nr_pages)
|
||||
continue;
|
||||
|
||||
trace_android_vh_unreserve_highatomic_bypass(force, zone,
|
||||
&skip_unreserve_highatomic);
|
||||
if (skip_unreserve_highatomic)
|
||||
continue;
|
||||
|
||||
spin_lock_irqsave(&zone->lock, flags);
|
||||
for (order = 0; order < MAX_ORDER; order++) {
|
||||
struct free_area *area = &(zone->free_area[order]);
|
||||
@@ -3047,6 +3058,10 @@ static struct list_head *get_populated_pcp_list(struct zone *zone,
|
||||
struct list_head *list = &pcp->lists[migratetype];
|
||||
|
||||
if (list_empty(list)) {
|
||||
trace_android_vh_rmqueue_bulk_bypass(order, pcp, migratetype, list);
|
||||
if (!list_empty(list))
|
||||
return list;
|
||||
|
||||
pcp->count += rmqueue_bulk(zone, order,
|
||||
pcp->batch, list,
|
||||
migratetype, alloc_flags);
|
||||
@@ -3343,10 +3358,17 @@ void free_unref_page(struct page *page)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long pfn = page_to_pfn(page);
|
||||
int migratetype;
|
||||
bool skip_free_unref_page = false;
|
||||
|
||||
if (!free_unref_page_prepare(page, pfn))
|
||||
return;
|
||||
|
||||
migratetype = get_pfnblock_migratetype(page, pfn);
|
||||
trace_android_vh_free_unref_page_bypass(page, 0, migratetype, &skip_free_unref_page);
|
||||
if (skip_free_unref_page)
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
free_unref_page_commit(page, pfn);
|
||||
local_irq_restore(flags);
|
||||
@@ -4822,6 +4844,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
|
||||
unsigned int zonelist_iter_cookie;
|
||||
int reserve_flags;
|
||||
unsigned long vh_record;
|
||||
bool should_alloc_retry = false;
|
||||
|
||||
trace_android_vh_alloc_pages_slowpath_begin(gfp_mask, order, &vh_record);
|
||||
/*
|
||||
@@ -4962,6 +4985,12 @@ retry:
|
||||
if (page)
|
||||
goto got_pg;
|
||||
|
||||
trace_android_vh_should_alloc_pages_retry(gfp_mask, order,
|
||||
&alloc_flags, ac->migratetype, ac->preferred_zoneref->zone,
|
||||
&page, &should_alloc_retry);
|
||||
if (should_alloc_retry)
|
||||
goto retry;
|
||||
|
||||
/* Try direct reclaim and then allocating */
|
||||
page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
|
||||
&did_some_progress);
|
||||
@@ -6604,6 +6633,7 @@ static int zone_batchsize(struct zone *zone)
|
||||
static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
|
||||
unsigned long batch)
|
||||
{
|
||||
trace_android_vh_pageset_update(&high, &batch);
|
||||
/* start with a fail safe value for batch */
|
||||
pcp->batch = 1;
|
||||
smp_wmb();
|
||||
|
||||
@@ -29,6 +29,7 @@
|
||||
#include "internal.h"
|
||||
#ifndef __GENKSYMS__
|
||||
#include <trace/hooks/syscall_check.h>
|
||||
#include <trace/hooks/mm.h>
|
||||
#endif
|
||||
|
||||
/**
|
||||
@@ -587,6 +588,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
|
||||
{
|
||||
gfp_t kmalloc_flags = flags;
|
||||
void *ret;
|
||||
bool use_vmalloc = false;
|
||||
|
||||
/*
|
||||
* vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
|
||||
@@ -595,6 +597,10 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
|
||||
if ((flags & GFP_KERNEL) != GFP_KERNEL)
|
||||
return kmalloc_node(size, flags, node);
|
||||
|
||||
trace_android_vh_kvmalloc_node_use_vmalloc(size, &kmalloc_flags, &use_vmalloc);
|
||||
if (use_vmalloc)
|
||||
goto use_vmalloc_node;
|
||||
|
||||
/*
|
||||
* We want to attempt a large physically contiguous block first because
|
||||
* it is less likely to fragment multiple larger blocks and therefore
|
||||
@@ -624,6 +630,7 @@ void *kvmalloc_node(size_t size, gfp_t flags, int node)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
use_vmalloc_node:
|
||||
return __vmalloc_node(size, 1, flags, node,
|
||||
__builtin_return_address(0));
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user