]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
sparc64: add per-cpu mm of secondary contexts
authorPavel Tatashin <pasha.tatashin@oracle.com>
Wed, 31 May 2017 15:25:23 +0000 (11:25 -0400)
committerDavid S. Miller <davem@davemloft.net>
Tue, 6 Jun 2017 20:45:29 +0000 (13:45 -0700)
The new wrap is going to use information from this array to figure out
mm's that currently have valid secondary contexts setup.

Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com>
Reviewed-by: Bob Picco <bob.picco@oracle.com>
Reviewed-by: Steven Sistare <steven.sistare@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
arch/sparc/include/asm/mmu_context_64.h
arch/sparc/mm/init_64.c

index 734a1343d77d9380cc3e3c5ca8d8f1fcb6852473..edb45247bfa9f2b0a7e1c4e155fe9087b09a5e53 100644 (file)
@@ -19,6 +19,7 @@ extern spinlock_t ctx_alloc_lock;
 extern unsigned long tlb_context_cache;
 extern unsigned long mmu_context_bmap[];
 
+DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
 void get_new_mmu_context(struct mm_struct *mm);
 #ifdef CONFIG_SMP
 void smp_new_mmu_context_version(void);
@@ -76,8 +77,9 @@ void __flush_tlb_mm(unsigned long, unsigned long);
 static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
 {
        unsigned long ctx_valid, flags;
-       int cpu;
+       int cpu = smp_processor_id();
 
+       per_cpu(per_cpu_secondary_mm, cpu) = mm;
        if (unlikely(mm == &init_mm))
                return;
 
@@ -123,7 +125,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
         * for the first time, we must flush that context out of the
         * local TLB.
         */
-       cpu = smp_processor_id();
        if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
                cpumask_set_cpu(cpu, mm_cpumask(mm));
                __flush_tlb_mm(CTX_HWBITS(mm->context),
index 63b50447bb62eb2e1e5fdd15fcdf670c30958b7a..a4c0bc8af82029e4d4d2207575c0d0e691a844be 100644 (file)
@@ -711,6 +711,7 @@ unsigned long tlb_context_cache = CTX_FIRST_VERSION;
 #define MAX_CTX_NR     (1UL << CTX_NR_BITS)
 #define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
 DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
+DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
 
 /* Caller does TLB context flushing on local CPU if necessary.
  * The caller also ensures that CTX_VALID(mm->context) is false.