Skip to content

Commit 1a3e1f4

Browse files
hnaztorvalds
authored andcommitted
mm: memcontrol: decouple reference counting from page accounting
The reference counting of a memcg is currently coupled directly to how many 4k pages are charged to it. This doesn't work well with Roman's new slab controller, which maintains pools of objects and doesn't want to keep an extra balance sheet for the pages backing those objects. This unusual refcounting design (reference counts usually track pointers to an object) is only for historical reasons: memcg used to not take any css references and simply stalled offlining until all charges had been reparented and the page counters had dropped to zero. When we got rid of the reparenting requirement, the simple mechanical translation was to take a reference for every charge. More historical context can be found in commit e8ea14c ("mm: memcontrol: take a css reference for each charged page"), commit 64f2199 ("mm: memcontrol: remove obsolete kmemcg pinning tricks") and commit b205256 ("mm: memcontrol: continue cache reclaim from offlined groups"). The new slab controller exposes the limitations in this scheme, so let's switch it to a more idiomatic reference counting model based on actual kernel pointers to the memcg: - The per-cpu stock holds a reference to the memcg its caching - User pages hold a reference for their page->mem_cgroup. Transparent huge pages will no longer acquire tail references in advance, we'll get them if needed during the split. - Kernel pages hold a reference for their page->mem_cgroup - Pages allocated in the root cgroup will acquire and release css references for simplicity. css_get() and css_put() optimize that. - The current memcg_charge_slab() already hacked around the per-charge references; this change gets rid of that as well. - tcp accounting will handle reference in mem_cgroup_sk_{alloc,free} Roman: 1) Rebased on top of the current mm tree: added css_get() in mem_cgroup_charge(), dropped mem_cgroup_try_charge() part 2) I've reformatted commit references in the commit log to make checkpatch.pl happy. [[email protected]: remove css_put_many() from __mem_cgroup_clear_mc()] Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Johannes Weiner <[email protected]> Signed-off-by: Roman Gushchin <[email protected]> Signed-off-by: Hugh Dickins <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Reviewed-by: Shakeel Butt <[email protected]> Acked-by: Roman Gushchin <[email protected]> Acked-by: Michal Hocko <[email protected]> Cc: Christoph Lameter <[email protected]> Cc: Tejun Heo <[email protected]> Cc: Vlastimil Babka <[email protected]> Link: http://lkml.kernel.org/r/[email protected] Signed-off-by: Linus Torvalds <[email protected]>
1 parent 4138fdf commit 1a3e1f4

File tree

2 files changed

+21
-20
lines changed

2 files changed

+21
-20
lines changed

mm/memcontrol.c

Lines changed: 21 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -2094,13 +2094,17 @@ static void drain_stock(struct memcg_stock_pcp *stock)
20942094
{
20952095
struct mem_cgroup *old = stock->cached;
20962096

2097+
if (!old)
2098+
return;
2099+
20972100
if (stock->nr_pages) {
20982101
page_counter_uncharge(&old->memory, stock->nr_pages);
20992102
if (do_memsw_account())
21002103
page_counter_uncharge(&old->memsw, stock->nr_pages);
2101-
css_put_many(&old->css, stock->nr_pages);
21022104
stock->nr_pages = 0;
21032105
}
2106+
2107+
css_put(&old->css);
21042108
stock->cached = NULL;
21052109
}
21062110

@@ -2136,6 +2140,7 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages)
21362140
stock = this_cpu_ptr(&memcg_stock);
21372141
if (stock->cached != memcg) { /* reset if necessary */
21382142
drain_stock(stock);
2143+
css_get(&memcg->css);
21392144
stock->cached = memcg;
21402145
}
21412146
stock->nr_pages += nr_pages;
@@ -2594,12 +2599,10 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
25942599
page_counter_charge(&memcg->memory, nr_pages);
25952600
if (do_memsw_account())
25962601
page_counter_charge(&memcg->memsw, nr_pages);
2597-
css_get_many(&memcg->css, nr_pages);
25982602

25992603
return 0;
26002604

26012605
done_restock:
2602-
css_get_many(&memcg->css, batch);
26032606
if (batch > nr_pages)
26042607
refill_stock(memcg, batch - nr_pages);
26052608

@@ -2657,8 +2660,6 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages)
26572660
page_counter_uncharge(&memcg->memory, nr_pages);
26582661
if (do_memsw_account())
26592662
page_counter_uncharge(&memcg->memsw, nr_pages);
2660-
2661-
css_put_many(&memcg->css, nr_pages);
26622663
}
26632664
#endif
26642665

@@ -2966,6 +2967,7 @@ int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order)
29662967
if (!ret) {
29672968
page->mem_cgroup = memcg;
29682969
__SetPageKmemcg(page);
2970+
return 0;
29692971
}
29702972
}
29712973
css_put(&memcg->css);
@@ -2988,12 +2990,11 @@ void __memcg_kmem_uncharge_page(struct page *page, int order)
29882990
VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page);
29892991
__memcg_kmem_uncharge(memcg, nr_pages);
29902992
page->mem_cgroup = NULL;
2993+
css_put(&memcg->css);
29912994

29922995
/* slab pages do not have PageKmemcg flag set */
29932996
if (PageKmemcg(page))
29942997
__ClearPageKmemcg(page);
2995-
2996-
css_put_many(&memcg->css, nr_pages);
29972998
}
29982999
#endif /* CONFIG_MEMCG_KMEM */
29993000

@@ -3005,13 +3006,16 @@ void __memcg_kmem_uncharge_page(struct page *page, int order)
30053006
*/
30063007
void mem_cgroup_split_huge_fixup(struct page *head)
30073008
{
3009+
struct mem_cgroup *memcg = head->mem_cgroup;
30083010
int i;
30093011

30103012
if (mem_cgroup_disabled())
30113013
return;
30123014

3013-
for (i = 1; i < HPAGE_PMD_NR; i++)
3014-
head[i].mem_cgroup = head->mem_cgroup;
3015+
for (i = 1; i < HPAGE_PMD_NR; i++) {
3016+
css_get(&memcg->css);
3017+
head[i].mem_cgroup = memcg;
3018+
}
30153019
}
30163020
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
30173021

@@ -5452,7 +5456,10 @@ static int mem_cgroup_move_account(struct page *page,
54525456
*/
54535457
smp_mb();
54545458

5455-
page->mem_cgroup = to; /* caller should have done css_get */
5459+
css_get(&to->css);
5460+
css_put(&from->css);
5461+
5462+
page->mem_cgroup = to;
54565463

54575464
__unlock_page_memcg(from);
54585465

@@ -5673,8 +5680,6 @@ static void __mem_cgroup_clear_mc(void)
56735680
if (!mem_cgroup_is_root(mc.to))
56745681
page_counter_uncharge(&mc.to->memory, mc.moved_swap);
56755682

5676-
css_put_many(&mc.to->css, mc.moved_swap);
5677-
56785683
mc.moved_swap = 0;
56795684
}
56805685
memcg_oom_recover(from);
@@ -6502,6 +6507,7 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
65026507
if (ret)
65036508
goto out_put;
65046509

6510+
css_get(&memcg->css);
65056511
commit_charge(page, memcg);
65066512

65076513
local_irq_disable();
@@ -6556,9 +6562,6 @@ static void uncharge_batch(const struct uncharge_gather *ug)
65566562
__this_cpu_add(ug->memcg->vmstats_percpu->nr_page_events, ug->nr_pages);
65576563
memcg_check_events(ug->memcg, ug->dummy_page);
65586564
local_irq_restore(flags);
6559-
6560-
if (!mem_cgroup_is_root(ug->memcg))
6561-
css_put_many(&ug->memcg->css, ug->nr_pages);
65626565
}
65636566

65646567
static void uncharge_page(struct page *page, struct uncharge_gather *ug)
@@ -6596,6 +6599,7 @@ static void uncharge_page(struct page *page, struct uncharge_gather *ug)
65966599

65976600
ug->dummy_page = page;
65986601
page->mem_cgroup = NULL;
6602+
css_put(&ug->memcg->css);
65996603
}
66006604

66016605
static void uncharge_list(struct list_head *page_list)
@@ -6701,8 +6705,8 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
67016705
page_counter_charge(&memcg->memory, nr_pages);
67026706
if (do_memsw_account())
67036707
page_counter_charge(&memcg->memsw, nr_pages);
6704-
css_get_many(&memcg->css, nr_pages);
67056708

6709+
css_get(&memcg->css);
67066710
commit_charge(newpage, memcg);
67076711

67086712
local_irq_save(flags);
@@ -6939,8 +6943,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
69396943
mem_cgroup_charge_statistics(memcg, page, -nr_entries);
69406944
memcg_check_events(memcg, page);
69416945

6942-
if (!mem_cgroup_is_root(memcg))
6943-
css_put_many(&memcg->css, nr_entries);
6946+
css_put(&memcg->css);
69446947
}
69456948

69466949
/**

mm/slab.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -402,9 +402,7 @@ static __always_inline int memcg_charge_slab(struct page *page,
402402
lruvec = mem_cgroup_lruvec(memcg, page_pgdat(page));
403403
mod_lruvec_state(lruvec, cache_vmstat_idx(s), nr_pages << PAGE_SHIFT);
404404

405-
/* transer try_charge() page references to kmem_cache */
406405
percpu_ref_get_many(&s->memcg_params.refcnt, nr_pages);
407-
css_put_many(&memcg->css, nr_pages);
408406
out:
409407
css_put(&memcg->css);
410408
return ret;

0 commit comments

Comments
 (0)