static void __meminit
remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
- bool direct, bool split)
+ bool direct)
{
unsigned long next, pages = 0;
pte_t *pte;
/*
* Do not free direct mapping pages since they were
* freed when offlining, or simplely not in use.
- *
- * Do not free pages split from larger page since only
- * the _count of the 1st page struct is available.
- * Free the larger page when it is fulfilled with 0xFD.
*/
- if (!direct) {
- if (split) {
- /*
- * Fill the split 4KB page with 0xFD.
- * When the whole 2MB page is fulfilled
- * with 0xFD, it could be freed.
- */
- memset((void *)addr, PAGE_INUSE,
- PAGE_SIZE);
- } else
- free_pagetable(pte_page(*pte), 0);
- }
+ if (!direct)
+ free_pagetable(pte_page(*pte), 0);
spin_lock(&init_mm.page_table_lock);
pte_clear(&init_mm, addr, pte);
pages++;
} else {
/*
+ * If we are here, we are freeing vmemmap pages since
+ * direct mapped memory ranges to be freed are aligned.
+ *
* If we are not removing the whole page, it means
- * other ptes in this page are being used and we cannot
- * remove them. So fill the unused ptes with 0xFD, and
- * remove the page when it is wholly filled with 0xFD.
+ * other page structs in this page are being used and
+ * we canot remove them. So fill the unused page_structs
+ * with 0xFD, and remove the page when it is wholly
+ * filled with 0xFD.
*/
memset((void *)addr, PAGE_INUSE, next - addr);
- /*
- * If the range is not aligned to PAGE_SIZE, then the
- * page is definitely not split from larger page.
- */
page_addr = page_address(pte_page(*pte));
if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
- if (!direct)
- free_pagetable(pte_page(*pte), 0);
+ free_pagetable(pte_page(*pte), 0);
spin_lock(&init_mm.page_table_lock);
pte_clear(&init_mm, addr, pte);
spin_unlock(&init_mm.page_table_lock);
- pages++;
}
}
}
static void __meminit
remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
- bool direct, bool split)
+ bool direct)
{
- unsigned long pte_phys, next, pages = 0;
+ unsigned long next, pages = 0;
pte_t *pte_base;
pmd_t *pmd;
void *page_addr;
- bool split_pmd = split, split_pte = false;
pmd = pmd_start + pmd_index(addr);
for (; addr < end; addr = next, pmd++) {
if (pmd_large(*pmd)) {
if (IS_ALIGNED(addr, PMD_SIZE) &&
IS_ALIGNED(next, PMD_SIZE)) {
- if (!direct) {
- if (split_pmd) {
- /*
- * Fill the split 2MB page with
- * 0xFD. When the whole 1GB page
- * is fulfilled with 0xFD, it
- * could be freed.
- */
- memset((void *)addr, PAGE_INUSE,
- PMD_SIZE);
- } else {
- free_pagetable(pmd_page(*pmd),
+ if (!direct)
+ free_pagetable(pmd_page(*pmd),
get_order(PMD_SIZE));
- }
- }
spin_lock(&init_mm.page_table_lock);
pmd_clear(pmd);
spin_unlock(&init_mm.page_table_lock);
-
- /*
- * For non-direct mapping, pages means
- * nothing.
- */
pages++;
+ } else {
+ /* If here, we are freeing vmemmap pages. */
+ memset((void *)addr, PAGE_INUSE, next - addr);
+
+ page_addr = page_address(pmd_page(*pmd));
+ if (!memchr_inv(page_addr, PAGE_INUSE,
+ PMD_SIZE)) {
+ free_pagetable(pmd_page(*pmd),
+ get_order(PMD_SIZE));
- continue;
+ spin_lock(&init_mm.page_table_lock);
+ pmd_clear(pmd);
+ spin_unlock(&init_mm.page_table_lock);
+ }
}
- /*
- * We use 2M page, but we need to remove part of them,
- * so split 2M page to 4K page.
- */
- pte_base = (pte_t *)alloc_low_page(&pte_phys);
- BUG_ON(!pte_base);
- __split_large_page((pte_t *)pmd, addr,
- (pte_t *)pte_base);
- split_pte = true;
-
- spin_lock(&init_mm.page_table_lock);
- pmd_populate_kernel(&init_mm, pmd, __va(pte_phys));
- spin_unlock(&init_mm.page_table_lock);
-
- flush_tlb_all();
+ continue;
}
pte_base = (pte_t *)map_low_page((pte_t *)pmd_page_vaddr(*pmd));
- remove_pte_table(pte_base, addr, next, direct, split_pte);
-
- if (!direct && split_pte) {
- page_addr = page_address(pmd_page(*pmd));
- if (!memchr_inv(page_addr, PAGE_INUSE, PMD_SIZE)) {
- free_pagetable(pmd_page(*pmd),
- get_order(PMD_SIZE));
- }
- }
-
+ remove_pte_table(pte_base, addr, next, direct);
free_pte_table(pte_base, pmd);
unmap_low_page(pte_base);
}
remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
bool direct)
{
- unsigned long pmd_phys, next, pages = 0;
+ unsigned long next, pages = 0;
pmd_t *pmd_base;
pud_t *pud;
void *page_addr;
- bool split_pmd = false;
pud = pud_start + pud_index(addr);
for (; addr < end; addr = next, pud++) {
pud_clear(pud);
spin_unlock(&init_mm.page_table_lock);
pages++;
- continue;
- }
+ } else {
+ /* If here, we are freeing vmemmap pages. */
+ memset((void *)addr, PAGE_INUSE, next - addr);
- /*
- * We use 1G page, but we need to remove part of them,
- * so split 1G page to 2M page.
- */
- pmd_base = (pmd_t *)alloc_low_page(&pmd_phys);
- BUG_ON(!pmd_base);
- __split_large_page((pte_t *)pud, addr,
- (pte_t *)pmd_base);
- split_pmd = true;
+ page_addr = page_address(pud_page(*pud));
+ if (!memchr_inv(page_addr, PAGE_INUSE,
+ PUD_SIZE)) {
+ free_pagetable(pud_page(*pud),
+ get_order(PUD_SIZE));
- spin_lock(&init_mm.page_table_lock);
- pud_populate(&init_mm, pud, __va(pmd_phys));
- spin_unlock(&init_mm.page_table_lock);
+ spin_lock(&init_mm.page_table_lock);
+ pud_clear(pud);
+ spin_unlock(&init_mm.page_table_lock);
+ }
+ }
- flush_tlb_all();
+ continue;
}
pmd_base = (pmd_t *)map_low_page((pmd_t *)pud_page_vaddr(*pud));
- remove_pmd_table(pmd_base, addr, next, direct, split_pmd);
-
- if (!direct && split_pmd) {
- page_addr = page_address(pud_page(*pud));
- if (!memchr_inv(page_addr, PAGE_INUSE, PUD_SIZE)) {
- free_pagetable(pud_page(*pud),
- get_order(PUD_SIZE));
- }
- }
-
+ remove_pmd_table(pmd_base, addr, next, direct);
free_pmd_table(pmd_base, pud);
unmap_low_page(pmd_base);
}