]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - arch/sparc/mm/init_64.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[karo-tx-linux.git] / arch / sparc / mm / init_64.c
index 63b50447bb62eb2e1e5fdd15fcdf670c30958b7a..3c40ebd50f928cbbbfe69c65c35810a78b30c53d 100644 (file)
@@ -711,6 +711,54 @@ unsigned long tlb_context_cache = CTX_FIRST_VERSION;
 #define MAX_CTX_NR     (1UL << CTX_NR_BITS)
 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
 DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
+DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
+
+static void mmu_context_wrap(void)
+{
+       unsigned long old_ver = tlb_context_cache & CTX_VERSION_MASK;
+       unsigned long new_ver, new_ctx, old_ctx;
+       struct mm_struct *mm;
+       int cpu;
+
+       bitmap_zero(mmu_context_bmap, 1 << CTX_NR_BITS);
+
+       /* Reserve kernel context */
+       set_bit(0, mmu_context_bmap);
+
+       new_ver = (tlb_context_cache & CTX_VERSION_MASK) + CTX_FIRST_VERSION;
+       if (unlikely(new_ver == 0))
+               new_ver = CTX_FIRST_VERSION;
+       tlb_context_cache = new_ver;
+
+       /*
+        * Make sure that any new mm that are added into per_cpu_secondary_mm,
+        * are going to go through get_new_mmu_context() path.
+        */
+       mb();
+
+       /*
+        * Updated versions to current on those CPUs that had valid secondary
+        * contexts
+        */
+       for_each_online_cpu(cpu) {
+               /*
+                * If a new mm is stored after we took this mm from the array,
+                * it will go into get_new_mmu_context() path, because we
+                * already bumped the version in tlb_context_cache.
+                */
+               mm = per_cpu(per_cpu_secondary_mm, cpu);
+
+               if (unlikely(!mm || mm == &init_mm))
+                       continue;
+
+               old_ctx = mm->context.sparc64_ctx_val;
+               if (likely((old_ctx & CTX_VERSION_MASK) == old_ver)) {
+                       new_ctx = (old_ctx & ~CTX_VERSION_MASK) | new_ver;
+                       set_bit(new_ctx & CTX_NR_MASK, mmu_context_bmap);
+                       mm->context.sparc64_ctx_val = new_ctx;
+               }
+       }
+}
 
 /* Caller does TLB context flushing on local CPU if necessary.
  * The caller also ensures that CTX_VALID(mm->context) is false.
@@ -726,50 +774,30 @@ void get_new_mmu_context(struct mm_struct *mm)
 {
        unsigned long ctx, new_ctx;
        unsigned long orig_pgsz_bits;
-       int new_version;
 
        spin_lock(&ctx_alloc_lock);
+retry:
+       /* wrap might have happened, test again if our context became valid */
+       if (unlikely(CTX_VALID(mm->context)))
+               goto out;
        orig_pgsz_bits = (mm->context.sparc64_ctx_val & CTX_PGSZ_MASK);
        ctx = (tlb_context_cache + 1) & CTX_NR_MASK;
        new_ctx = find_next_zero_bit(mmu_context_bmap, 1 << CTX_NR_BITS, ctx);
-       new_version = 0;
        if (new_ctx >= (1 << CTX_NR_BITS)) {
                new_ctx = find_next_zero_bit(mmu_context_bmap, ctx, 1);
                if (new_ctx >= ctx) {
-                       int i;
-                       new_ctx = (tlb_context_cache & CTX_VERSION_MASK) +
-                               CTX_FIRST_VERSION + 1;
-                       if (new_ctx == 1)
-                               new_ctx = CTX_FIRST_VERSION + 1;
-
-                       /* Don't call memset, for 16 entries that's just
-                        * plain silly...
-                        */
-                       mmu_context_bmap[0] = 3;
-                       mmu_context_bmap[1] = 0;
-                       mmu_context_bmap[2] = 0;
-                       mmu_context_bmap[3] = 0;
-                       for (i = 4; i < CTX_BMAP_SLOTS; i += 4) {
-                               mmu_context_bmap[i + 0] = 0;
-                               mmu_context_bmap[i + 1] = 0;
-                               mmu_context_bmap[i + 2] = 0;
-                               mmu_context_bmap[i + 3] = 0;
-                       }
-                       new_version = 1;
-                       goto out;
+                       mmu_context_wrap();
+                       goto retry;
                }
        }
        if (mm->context.sparc64_ctx_val)
                cpumask_clear(mm_cpumask(mm));
        mmu_context_bmap[new_ctx>>6] |= (1UL << (new_ctx & 63));
        new_ctx |= (tlb_context_cache & CTX_VERSION_MASK);
-out:
        tlb_context_cache = new_ctx;
        mm->context.sparc64_ctx_val = new_ctx | orig_pgsz_bits;
+out:
        spin_unlock(&ctx_alloc_lock);
-
-       if (unlikely(new_version))
-               smp_new_mmu_context_version();
 }
 
 static int numa_enabled = 1;