Skip to content

Commit 4c20019

Browse files
Kefeng Wangakpm00
authored andcommitted
mm: memory_hotplug: unify Huge/LRU/non-LRU movable folio isolation
Use the isolate_folio_to_list() to unify hugetlb/LRU/non-LRU folio isolation, which cleanup code a bit and save a few calls to compound_head(). Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Kefeng Wang <[email protected]> Cc: Dan Carpenter <[email protected]> Cc: David Hildenbrand <[email protected]> Cc: Jonathan Cameron <[email protected]> Cc: Miaohe Lin <[email protected]> Cc: Naoya Horiguchi <[email protected]> Cc: Oscar Salvador <[email protected]> Signed-off-by: Andrew Morton <[email protected]>
1 parent 7e36929 commit 4c20019

File tree

1 file changed

+17
-28
lines changed

1 file changed

+17
-28
lines changed

mm/memory_hotplug.c

Lines changed: 17 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -1772,15 +1772,15 @@ static int scan_movable_pages(unsigned long start, unsigned long end,
17721772

17731773
static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
17741774
{
1775+
struct folio *folio;
17751776
unsigned long pfn;
1776-
struct page *page;
17771777
LIST_HEAD(source);
17781778
static DEFINE_RATELIMIT_STATE(migrate_rs, DEFAULT_RATELIMIT_INTERVAL,
17791779
DEFAULT_RATELIMIT_BURST);
17801780

17811781
for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1782-
struct folio *folio;
1783-
bool isolated;
1782+
struct page *page;
1783+
bool hugetlb;
17841784

17851785
if (!pfn_valid(pfn))
17861786
continue;
@@ -1811,34 +1811,22 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
18111811
continue;
18121812
}
18131813

1814-
if (folio_test_hugetlb(folio)) {
1815-
isolate_hugetlb(folio, &source);
1816-
continue;
1814+
hugetlb = folio_test_hugetlb(folio);
1815+
if (!hugetlb) {
1816+
folio = folio_get_nontail_page(page);
1817+
if (!folio)
1818+
continue;
18171819
}
18181820

1819-
if (!get_page_unless_zero(page))
1820-
continue;
1821-
/*
1822-
* We can skip free pages. And we can deal with pages on
1823-
* LRU and non-lru movable pages.
1824-
*/
1825-
if (PageLRU(page))
1826-
isolated = isolate_lru_page(page);
1827-
else
1828-
isolated = isolate_movable_page(page, ISOLATE_UNEVICTABLE);
1829-
if (isolated) {
1830-
list_add_tail(&page->lru, &source);
1831-
if (!__PageMovable(page))
1832-
inc_node_page_state(page, NR_ISOLATED_ANON +
1833-
page_is_file_lru(page));
1834-
1835-
} else {
1821+
if (!isolate_folio_to_list(folio, &source)) {
18361822
if (__ratelimit(&migrate_rs)) {
18371823
pr_warn("failed to isolate pfn %lx\n", pfn);
18381824
dump_page(page, "isolation failed");
18391825
}
18401826
}
1841-
put_page(page);
1827+
1828+
if (!hugetlb)
1829+
folio_put(folio);
18421830
}
18431831
if (!list_empty(&source)) {
18441832
nodemask_t nmask = node_states[N_MEMORY];
@@ -1853,7 +1841,7 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
18531841
* We have checked that migration range is on a single zone so
18541842
* we can use the nid of the first page to all the others.
18551843
*/
1856-
mtc.nid = page_to_nid(list_first_entry(&source, struct page, lru));
1844+
mtc.nid = folio_nid(list_first_entry(&source, struct folio, lru));
18571845

18581846
/*
18591847
* try to allocate from a different node but reuse this node
@@ -1866,11 +1854,12 @@ static void do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
18661854
ret = migrate_pages(&source, alloc_migration_target, NULL,
18671855
(unsigned long)&mtc, MIGRATE_SYNC, MR_MEMORY_HOTPLUG, NULL);
18681856
if (ret) {
1869-
list_for_each_entry(page, &source, lru) {
1857+
list_for_each_entry(folio, &source, lru) {
18701858
if (__ratelimit(&migrate_rs)) {
18711859
pr_warn("migrating pfn %lx failed ret:%d\n",
1872-
page_to_pfn(page), ret);
1873-
dump_page(page, "migration failure");
1860+
folio_pfn(folio), ret);
1861+
dump_page(&folio->page,
1862+
"migration failure");
18741863
}
18751864
}
18761865
putback_movable_pages(&source);

0 commit comments

Comments
 (0)