1 /* $Id: spitfire.h,v 1.18 2001/11/29 16:42:10 kanoj Exp $
2 * spitfire.h: SpitFire/BlackBird/Cheetah inline MMU operations.
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
7 #ifndef _SPARC64_SPITFIRE_H
8 #define _SPARC64_SPITFIRE_H
12 /* The following register addresses are accessible via ASI_DMMU
13 * and ASI_IMMU, that is there is a distinct and unique copy of
14 * each these registers for each TLB.
16 #define TSB_TAG_TARGET 0x0000000000000000 /* All chips */
17 #define TLB_SFSR 0x0000000000000018 /* All chips */
18 #define TSB_REG 0x0000000000000028 /* All chips */
19 #define TLB_TAG_ACCESS 0x0000000000000030 /* All chips */
20 #define VIRT_WATCHPOINT 0x0000000000000038 /* All chips */
21 #define PHYS_WATCHPOINT 0x0000000000000040 /* All chips */
22 #define TSB_EXTENSION_P 0x0000000000000048 /* Ultra-III and later */
23 #define TSB_EXTENSION_S 0x0000000000000050 /* Ultra-III and later, D-TLB only */
24 #define TSB_EXTENSION_N 0x0000000000000058 /* Ultra-III and later */
25 #define TLB_TAG_ACCESS_EXT 0x0000000000000060 /* Ultra-III+ and later */
27 /* These registers only exist as one entity, and are accessed
30 #define PRIMARY_CONTEXT 0x0000000000000008
31 #define SECONDARY_CONTEXT 0x0000000000000010
32 #define DMMU_SFAR 0x0000000000000020
33 #define VIRT_WATCHPOINT 0x0000000000000038
34 #define PHYS_WATCHPOINT 0x0000000000000040
36 #define SPITFIRE_HIGHEST_LOCKED_TLBENT (64 - 1)
37 #define CHEETAH_HIGHEST_LOCKED_TLBENT (16 - 1)
39 #define L1DCACHE_SIZE 0x4000
43 enum ultra_tlb_layout {
50 extern enum ultra_tlb_layout tlb_type;
52 extern int cheetah_pcache_forced_on;
53 extern void cheetah_enable_pcache(void);
55 #define sparc64_highest_locked_tlbent() \
56 (tlb_type == spitfire ? \
57 SPITFIRE_HIGHEST_LOCKED_TLBENT : \
58 CHEETAH_HIGHEST_LOCKED_TLBENT)
60 /* The data cache is write through, so this just invalidates the
63 static __inline__ void spitfire_put_dcache_tag(unsigned long addr, unsigned long tag)
65 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
68 : "r" (tag), "r" (addr), "i" (ASI_DCACHE_TAG));
71 /* The instruction cache lines are flushed with this, but note that
72 * this does not flush the pipeline. It is possible for a line to
73 * get flushed but stale instructions to still be in the pipeline,
74 * a flush instruction (to any address) is sufficient to handle
75 * this issue after the line is invalidated.
77 static __inline__ void spitfire_put_icache_tag(unsigned long addr, unsigned long tag)
79 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
82 : "r" (tag), "r" (addr), "i" (ASI_IC_TAG));
85 static __inline__ unsigned long spitfire_get_dtlb_data(int entry)
89 __asm__ __volatile__("ldxa [%1] %2, %0"
91 : "r" (entry << 3), "i" (ASI_DTLB_DATA_ACCESS));
93 /* Clear TTE diag bits. */
94 data &= ~0x0003fe0000000000UL;
99 static __inline__ unsigned long spitfire_get_dtlb_tag(int entry)
103 __asm__ __volatile__("ldxa [%1] %2, %0"
105 : "r" (entry << 3), "i" (ASI_DTLB_TAG_READ));
109 static __inline__ void spitfire_put_dtlb_data(int entry, unsigned long data)
111 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
114 : "r" (data), "r" (entry << 3),
115 "i" (ASI_DTLB_DATA_ACCESS));
118 static __inline__ unsigned long spitfire_get_itlb_data(int entry)
122 __asm__ __volatile__("ldxa [%1] %2, %0"
124 : "r" (entry << 3), "i" (ASI_ITLB_DATA_ACCESS));
126 /* Clear TTE diag bits. */
127 data &= ~0x0003fe0000000000UL;
132 static __inline__ unsigned long spitfire_get_itlb_tag(int entry)
136 __asm__ __volatile__("ldxa [%1] %2, %0"
138 : "r" (entry << 3), "i" (ASI_ITLB_TAG_READ));
142 static __inline__ void spitfire_put_itlb_data(int entry, unsigned long data)
144 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
147 : "r" (data), "r" (entry << 3),
148 "i" (ASI_ITLB_DATA_ACCESS));
151 static __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page)
153 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
156 : "r" (page | 0x20), "i" (ASI_DMMU_DEMAP));
159 static __inline__ void spitfire_flush_itlb_nucleus_page(unsigned long page)
161 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
164 : "r" (page | 0x20), "i" (ASI_IMMU_DEMAP));
167 /* Cheetah has "all non-locked" tlb flushes. */
168 static __inline__ void cheetah_flush_dtlb_all(void)
170 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
173 : "r" (0x80), "i" (ASI_DMMU_DEMAP));
176 static __inline__ void cheetah_flush_itlb_all(void)
178 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
181 : "r" (0x80), "i" (ASI_IMMU_DEMAP));
184 /* Cheetah has a 4-tlb layout so direct access is a bit different.
185 * The first two TLBs are fully assosciative, hold 16 entries, and are
186 * used only for locked and >8K sized translations. One exists for
187 * data accesses and one for instruction accesses.
189 * The third TLB is for data accesses to 8K non-locked translations, is
190 * 2 way assosciative, and holds 512 entries. The fourth TLB is for
191 * instruction accesses to 8K non-locked translations, is 2 way
192 * assosciative, and holds 128 entries.
194 * Cheetah has some bug where bogus data can be returned from
195 * ASI_{D,I}TLB_DATA_ACCESS loads, doing the load twice fixes
196 * the problem for me. -DaveM
198 static __inline__ unsigned long cheetah_get_ldtlb_data(int entry)
202 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
205 : "r" ((0 << 16) | (entry << 3)),
206 "i" (ASI_DTLB_DATA_ACCESS));
211 static __inline__ unsigned long cheetah_get_litlb_data(int entry)
215 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
218 : "r" ((0 << 16) | (entry << 3)),
219 "i" (ASI_ITLB_DATA_ACCESS));
224 static __inline__ unsigned long cheetah_get_ldtlb_tag(int entry)
228 __asm__ __volatile__("ldxa [%1] %2, %0"
230 : "r" ((0 << 16) | (entry << 3)),
231 "i" (ASI_DTLB_TAG_READ));
236 static __inline__ unsigned long cheetah_get_litlb_tag(int entry)
240 __asm__ __volatile__("ldxa [%1] %2, %0"
242 : "r" ((0 << 16) | (entry << 3)),
243 "i" (ASI_ITLB_TAG_READ));
248 static __inline__ void cheetah_put_ldtlb_data(int entry, unsigned long data)
250 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
254 "r" ((0 << 16) | (entry << 3)),
255 "i" (ASI_DTLB_DATA_ACCESS));
258 static __inline__ void cheetah_put_litlb_data(int entry, unsigned long data)
260 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
264 "r" ((0 << 16) | (entry << 3)),
265 "i" (ASI_ITLB_DATA_ACCESS));
268 static __inline__ unsigned long cheetah_get_dtlb_data(int entry, int tlb)
272 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
275 : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_DATA_ACCESS));
280 static __inline__ unsigned long cheetah_get_dtlb_tag(int entry, int tlb)
284 __asm__ __volatile__("ldxa [%1] %2, %0"
286 : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_TAG_READ));
290 static __inline__ void cheetah_put_dtlb_data(int entry, unsigned long data, int tlb)
292 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
296 "r" ((tlb << 16) | (entry << 3)),
297 "i" (ASI_DTLB_DATA_ACCESS));
300 static __inline__ unsigned long cheetah_get_itlb_data(int entry)
304 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
307 : "r" ((2 << 16) | (entry << 3)),
308 "i" (ASI_ITLB_DATA_ACCESS));
313 static __inline__ unsigned long cheetah_get_itlb_tag(int entry)
317 __asm__ __volatile__("ldxa [%1] %2, %0"
319 : "r" ((2 << 16) | (entry << 3)), "i" (ASI_ITLB_TAG_READ));
323 static __inline__ void cheetah_put_itlb_data(int entry, unsigned long data)
325 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
328 : "r" (data), "r" ((2 << 16) | (entry << 3)),
329 "i" (ASI_ITLB_DATA_ACCESS));
332 #endif /* !(__ASSEMBLY__) */
334 #endif /* !(_SPARC64_SPITFIRE_H) */