]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge branch 'numa/misc'
authorIngo Molnar <mingo@kernel.org>
Tue, 23 Oct 2012 09:45:06 +0000 (11:45 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 23 Oct 2012 09:45:06 +0000 (11:45 +0200)
arch/x86/mm/gup.c
mm/huge_memory.c
mm/memory.c

index dd74e46828c0fc243740b61a18c2dea654fafb5e..6dc992125a8414ebc9b0fe0dbd54a0432c8eaf98 100644 (file)
@@ -150,7 +150,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
 
        pmdp = pmd_offset(&pud, addr);
        do {
-               pmd_t pmd = *pmdp;
+               /*
+                * With THP and hugetlbfs the pmd can change from
+                * under us and it can be cleared as well by the TLB
+                * shootdown, so read it with ACCESS_ONCE to do all
+                * computations on the same sampling.
+                */
+               pmd_t pmd = ACCESS_ONCE(*pmdp);
 
                next = pmd_addr_end(addr, end);
                /*
@@ -220,7 +226,13 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
 
        pudp = pud_offset(&pgd, addr);
        do {
-               pud_t pud = *pudp;
+               /*
+                * With hugetlbfs giga pages the pud can change from
+                * under us and it can be cleared as well by the TLB
+                * shootdown, so read it with ACCESS_ONCE to do all
+                * computations on the same sampling.
+                */
+               pud_t pud = ACCESS_ONCE(*pudp);
 
                next = pud_addr_end(addr, end);
                if (pud_none(pud))
@@ -280,7 +292,12 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
        local_irq_save(flags);
        pgdp = pgd_offset(mm, addr);
        do {
-               pgd_t pgd = *pgdp;
+               /*
+                * The pgd could be cleared by the TLB shootdown from
+                * under us so read it with ACCESS_ONCE to do all
+                * computations on the same sampling.
+                */
+               pgd_t pgd = ACCESS_ONCE(*pgdp);
 
                next = pgd_addr_end(addr, end);
                if (pgd_none(pgd))
index e62d3c5c5586bcf3a10575e78862e687b2755072..c58a5f05f39949634e794d5229e91b34fda779a8 100644 (file)
@@ -711,7 +711,8 @@ out:
         * run pte_offset_map on the pmd, if an huge pmd could
         * materialize from under us from a different thread.
         */
-       if (unlikely(__pte_alloc(mm, vma, pmd, address)))
+       if (unlikely(pmd_none(*pmd)) &&
+           unlikely(__pte_alloc(mm, vma, pmd, address)))
                return VM_FAULT_OOM;
        /* if an huge pmd materialized from under us just retry later */
        if (unlikely(pmd_trans_huge(*pmd)))
index 1ea7e5be2d9cc8d11c748943591b87154d2dee82..dbe32bd28642b0262a544f2932cec1f897111df1 100644 (file)
@@ -3538,7 +3538,7 @@ int handle_pte_fault(struct mm_struct *mm,
        pte_t entry;
        spinlock_t *ptl;
 
-       entry = *pte;
+       entry = ACCESS_ONCE(*pte);
        if (!pte_present(entry)) {
                if (pte_none(entry)) {
                        if (vma->vm_ops) {
@@ -3654,7 +3654,8 @@ retry:
         * run pte_offset_map on the pmd, if an huge pmd could
         * materialize from under us from a different thread.
         */
-       if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
+       if (unlikely(pmd_none(*pmd)) &&
+           unlikely(__pte_alloc(mm, vma, pmd, address)))
                return VM_FAULT_OOM;
        /* if an huge pmd materialized from under us just retry later */
        if (unlikely(pmd_trans_huge(*pmd)))