4 * Derived from "include/asm-i386/mmu_context.h"
7 #ifndef __S390_MMU_CONTEXT_H
8 #define __S390_MMU_CONTEXT_H
10 #include <asm/pgalloc.h>
11 #include <asm/uaccess.h>
12 #include <asm/tlbflush.h>
13 #include <asm/ctl_reg.h>
15 static inline int init_new_context(struct task_struct *tsk,
18 atomic_set(&mm->context.attach_count, 0);
19 mm->context.flush_mm = 0;
20 mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
22 mm->context.asce_bits |= _ASCE_TYPE_REGION3;
24 mm->context.has_pgste = 0;
25 mm->context.asce_limit = STACK_TOP_MAX;
26 crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
30 #define destroy_context(mm) do { } while (0)
33 #define LCTL_OPCODE "lctl"
35 #define LCTL_OPCODE "lctlg"
38 static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
42 S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
43 /* Load primary space page table origin. */
44 asm volatile(LCTL_OPCODE" 1,1,%0\n" : : "m" (S390_lowcore.user_asce));
45 set_fs(current->thread.mm_segment);
48 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
49 struct task_struct *tsk)
51 cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
53 atomic_dec(&prev->context.attach_count);
54 WARN_ON(atomic_read(&prev->context.attach_count) < 0);
55 atomic_inc(&next->context.attach_count);
56 /* Check for TLBs not flushed yet */
57 __tlb_flush_mm_lazy(next);
60 #define enter_lazy_tlb(mm,tsk) do { } while (0)
61 #define deactivate_mm(tsk,mm) do { } while (0)
63 static inline void activate_mm(struct mm_struct *prev,
64 struct mm_struct *next)
66 switch_mm(prev, next, current);
69 static inline void arch_dup_mmap(struct mm_struct *oldmm,
73 if (oldmm->context.asce_limit < mm->context.asce_limit)
74 crst_table_downgrade(mm, oldmm->context.asce_limit);
78 static inline void arch_exit_mmap(struct mm_struct *mm)
82 #endif /* __S390_MMU_CONTEXT_H */