page = dequeue_huge_page_node(h, nid);
spin_unlock(&hugetlb_lock);
- if (!page) {
+ if (!page)
page = alloc_buddy_huge_page(h, nid);
- if (page) {
- spin_lock(&hugetlb_lock);
- list_move(&page->lru, &h->hugepage_activelist);
- spin_unlock(&hugetlb_lock);
- }
- }
return page;
}
}
spin_lock(&hugetlb_lock);
page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve);
- spin_unlock(&hugetlb_lock);
-
- if (!page) {
+ if (page) {
+ /* update page cgroup details */
+ hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
+ h_cg, page);
+ spin_unlock(&hugetlb_lock);
+ } else {
+ spin_unlock(&hugetlb_lock);
page = alloc_buddy_huge_page(h, NUMA_NO_NODE);
if (!page) {
hugetlb_cgroup_uncharge_cgroup(idx,
return ERR_PTR(-ENOSPC);
}
spin_lock(&hugetlb_lock);
+ hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h),
+ h_cg, page);
list_move(&page->lru, &h->hugepage_activelist);
spin_unlock(&hugetlb_lock);
}
set_page_private(page, (unsigned long)spool);
vma_commit_reservation(h, vma, addr);
- /* update page cgroup details */
- hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
return page;
}
return ret;
}
+/* Should be called with hugetlb_lock held */
void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages,
struct hugetlb_cgroup *h_cg,
struct page *page)
if (hugetlb_cgroup_disabled() || !h_cg)
return;
- spin_lock(&hugetlb_lock);
set_hugetlb_cgroup(page, h_cg);
- spin_unlock(&hugetlb_lock);
return;
}
void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage)
{
struct hugetlb_cgroup *h_cg;
+ struct hstate *h = page_hstate(oldhpage);
if (hugetlb_cgroup_disabled())
return;
/* move the h_cg details to new cgroup */
set_hugetlb_cgroup(newhpage, h_cg);
+ list_move(&newhpage->lru, &h->hugepage_activelist);
spin_unlock(&hugetlb_lock);
cgroup_release_and_wakeup_rmdir(&h_cg->css);
return;