From 56dd47098abe1fdde598a8d8b7c04d775506f456 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 18 Dec 2009 16:24:34 +0000 Subject: [PATCH] ARM: make_coherent: fix problems with highpte, part 1 update_mmu_cache() is called with a page table already mapped. We call make_coherent(), which then calls adjust_pte() which wants to map other page tables. This causes kmap_atomic() to BUG() because the slot its trying to use is already taken. Since do_adjust_pte() modifies the page tables, we are also missing any form of locking, so we're risking corrupting the page tables. Fix this by using pte_offset_map_nested(), and taking the pte page table lock around do_adjust_pte(). Signed-off-by: Russell King --- arch/arm/mm/fault-armv.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 7a8efe1b37d8..8e9bc517132e 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -67,6 +67,7 @@ static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, static int adjust_pte(struct vm_area_struct *vma, unsigned long address) { + spinlock_t *ptl; pgd_t *pgd; pmd_t *pmd; pte_t *pte; @@ -80,11 +81,19 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) if (pmd_none_or_clear_bad(pmd)) return 0; - pte = pte_offset_map(pmd, address); + /* + * This is called while another page table is mapped, so we + * must use the nested version. This also means we need to + * open-code the spin-locking. + */ + ptl = pte_lockptr(vma->vm_mm, pmd); + pte = pte_offset_map_nested(pmd, address); + spin_lock(ptl); ret = do_adjust_pte(vma, address, pte); - pte_unmap(pte); + spin_unlock(ptl); + pte_unmap_nested(pte); return ret; } -- 2.39.5