mm/damon/paddr: increment pa_stat damon address range by folio size

This is to avoid going through all the pages in a folio.  For folio_size >
PAGE_SIZE, damon_get_folio will return NULL for tail pages, so the for
loop in those instances will be a nop.  Have a more efficient loop by just
incrementing the address by folio_size.

Link: https://lkml.kernel.org/r/20250113190738.1156381-1-usamaarif642@gmail.com
Signed-off-by: Usama Arif <usamaarif642@gmail.com>
Reviewed-by: SeongJae Park <sj@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Usama Arif
2025-01-13 19:07:38 +00:00
committed by Andrew Morton
parent bf069012df
commit bdbe1d7bc3

View File

@@ -504,17 +504,21 @@ static unsigned long damon_pa_stat(struct damon_region *r, struct damos *s,
if (!damon_pa_scheme_has_filter(s))
return 0;
for (addr = r->ar.start; addr < r->ar.end; addr += PAGE_SIZE) {
addr = r->ar.start;
while (addr < r->ar.end) {
struct folio *folio = damon_get_folio(PHYS_PFN(addr));
if (!folio)
if (!folio) {
addr += PAGE_SIZE;
continue;
}
if (damos_pa_filter_out(s, folio))
goto put_folio;
else
*sz_filter_passed += folio_size(folio);
put_folio:
addr += folio_size(folio);
folio_put(folio);
}
return 0;