Skip to content

Commit a5ecf56

Browse files
jcmvbkbcgregkh
authored andcommitted
xtensa: fix high memory/reserved memory collision
commit 6ac5a11 upstream. Xtensa memory initialization code frees high memory pages without checking whether they are in the reserved memory regions or not. That results in invalid value of totalram_pages and duplicate page usage by CMA and highmem. It produces a bunch of BUGs at startup looking like this: BUG: Bad page state in process swapper pfn:70800 page:be60c000 count:0 mapcount:-127 mapping: (null) index:0x1 flags: 0x80000000() raw: 80000000 00000000 00000001 ffffff80 00000000 be60c014 be60c014 0000000a page dumped because: nonzero mapcount Modules linked in: CPU: 0 PID: 1 Comm: swapper Tainted: G B 4.16.0-rc1-00015-g7928b2cbe55b-dirty Freescale#23 Stack: bd839d33 00000000 00000018 ba97b64c a106578c bd839d70 be60c000 00000000 a1378054 bd86a000 00000003 ba97b64c a1066166 bd839da0 be60c000 ffe00000 a1066b58 bd839dc0 be504000 00000000 000002f4 bd838000 00000000 0000001e Call Trace: [<a1065734>] bad_page+0xac/0xd0 [<a106578c>] free_pages_check_bad+0x34/0x4c [<a1066166>] __free_pages_ok+0xae/0x14c [<a1066b58>] __free_pages+0x30/0x64 [<a1365de5>] init_cma_reserved_pageblock+0x35/0x44 [<a13682dc>] cma_init_reserved_areas+0xf4/0x148 [<a10034b8>] do_one_initcall+0x80/0xf8 [<a1361c16>] kernel_init_freeable+0xda/0x13c [<a125b59d>] kernel_init+0x9/0xd0 [<a1004304>] ret_from_kernel_thread+0xc/0x18 Only free high memory pages that are not reserved. Cc: [email protected] Signed-off-by: Max Filippov <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]>
1 parent d58d78c commit a5ecf56

File tree

1 file changed

+63
-7
lines changed

1 file changed

+63
-7
lines changed

arch/xtensa/mm/init.c

Lines changed: 63 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -77,19 +77,75 @@ void __init zones_init(void)
7777
free_area_init_node(0, zones_size, ARCH_PFN_OFFSET, NULL);
7878
}
7979

80+
#ifdef CONFIG_HIGHMEM
81+
static void __init free_area_high(unsigned long pfn, unsigned long end)
82+
{
83+
for (; pfn < end; pfn++)
84+
free_highmem_page(pfn_to_page(pfn));
85+
}
86+
87+
static void __init free_highpages(void)
88+
{
89+
unsigned long max_low = max_low_pfn;
90+
struct memblock_region *mem, *res;
91+
92+
reset_all_zones_managed_pages();
93+
/* set highmem page free */
94+
for_each_memblock(memory, mem) {
95+
unsigned long start = memblock_region_memory_base_pfn(mem);
96+
unsigned long end = memblock_region_memory_end_pfn(mem);
97+
98+
/* Ignore complete lowmem entries */
99+
if (end <= max_low)
100+
continue;
101+
102+
if (memblock_is_nomap(mem))
103+
continue;
104+
105+
/* Truncate partial highmem entries */
106+
if (start < max_low)
107+
start = max_low;
108+
109+
/* Find and exclude any reserved regions */
110+
for_each_memblock(reserved, res) {
111+
unsigned long res_start, res_end;
112+
113+
res_start = memblock_region_reserved_base_pfn(res);
114+
res_end = memblock_region_reserved_end_pfn(res);
115+
116+
if (res_end < start)
117+
continue;
118+
if (res_start < start)
119+
res_start = start;
120+
if (res_start > end)
121+
res_start = end;
122+
if (res_end > end)
123+
res_end = end;
124+
if (res_start != start)
125+
free_area_high(start, res_start);
126+
start = res_end;
127+
if (start == end)
128+
break;
129+
}
130+
131+
/* And now free anything which remains */
132+
if (start < end)
133+
free_area_high(start, end);
134+
}
135+
}
136+
#else
137+
static void __init free_highpages(void)
138+
{
139+
}
140+
#endif
141+
80142
/*
81143
* Initialize memory pages.
82144
*/
83145

84146
void __init mem_init(void)
85147
{
86-
#ifdef CONFIG_HIGHMEM
87-
unsigned long tmp;
88-
89-
reset_all_zones_managed_pages();
90-
for (tmp = max_low_pfn; tmp < max_pfn; tmp++)
91-
free_highmem_page(pfn_to_page(tmp));
92-
#endif
148+
free_highpages();
93149

94150
max_mapnr = max_pfn - ARCH_PFN_OFFSET;
95151
high_memory = (void *)__va(max_low_pfn << PAGE_SHIFT);

0 commit comments

Comments
 (0)