1 /* $Id: spitfire.h,v 1.18 2001/11/29 16:42:10 kanoj Exp $
2 * spitfire.h: SpitFire/BlackBird/Cheetah inline MMU operations.
4 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
7 #ifndef _SPARC64_SPITFIRE_H
8 #define _SPARC64_SPITFIRE_H
12 /* The following register addresses are accessible via ASI_DMMU
13 * and ASI_IMMU, that is there is a distinct and unique copy of
14 * each these registers for each TLB.
16 #define TSB_TAG_TARGET 0x0000000000000000 /* All chips */
17 #define TLB_SFSR 0x0000000000000018 /* All chips */
18 #define TSB_REG 0x0000000000000028 /* All chips */
19 #define TLB_TAG_ACCESS 0x0000000000000030 /* All chips */
20 #define VIRT_WATCHPOINT 0x0000000000000038 /* All chips */
21 #define PHYS_WATCHPOINT 0x0000000000000040 /* All chips */
22 #define TSB_EXTENSION_P 0x0000000000000048 /* Ultra-III and later */
23 #define TSB_EXTENSION_S 0x0000000000000050 /* Ultra-III and later, D-TLB only */
24 #define TSB_EXTENSION_N 0x0000000000000058 /* Ultra-III and later */
25 #define TLB_TAG_ACCESS_EXT 0x0000000000000060 /* Ultra-III+ and later */
27 /* These registers only exist as one entity, and are accessed
30 #define PRIMARY_CONTEXT 0x0000000000000008
31 #define SECONDARY_CONTEXT 0x0000000000000010
32 #define DMMU_SFAR 0x0000000000000020
33 #define VIRT_WATCHPOINT 0x0000000000000038
34 #define PHYS_WATCHPOINT 0x0000000000000040
36 #define SPITFIRE_HIGHEST_LOCKED_TLBENT (64 - 1)
37 #define CHEETAH_HIGHEST_LOCKED_TLBENT (16 - 1)
39 #define L1DCACHE_SIZE 0x4000
43 enum ultra_tlb_layout {
49 extern enum ultra_tlb_layout tlb_type;
51 extern int cheetah_pcache_forced_on;
52 extern void cheetah_enable_pcache(void);
54 #define sparc64_highest_locked_tlbent() \
55 (tlb_type == spitfire ? \
56 SPITFIRE_HIGHEST_LOCKED_TLBENT : \
57 CHEETAH_HIGHEST_LOCKED_TLBENT)
59 static __inline__ unsigned long spitfire_get_isfsr(void)
63 __asm__ __volatile__("ldxa [%1] %2, %0"
65 : "r" (TLB_SFSR), "i" (ASI_IMMU));
69 static __inline__ unsigned long spitfire_get_dsfsr(void)
73 __asm__ __volatile__("ldxa [%1] %2, %0"
75 : "r" (TLB_SFSR), "i" (ASI_DMMU));
79 static __inline__ unsigned long spitfire_get_sfar(void)
83 __asm__ __volatile__("ldxa [%1] %2, %0"
85 : "r" (DMMU_SFAR), "i" (ASI_DMMU));
89 static __inline__ void spitfire_put_isfsr(unsigned long sfsr)
91 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
94 : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_IMMU));
97 static __inline__ void spitfire_put_dsfsr(unsigned long sfsr)
99 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
102 : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_DMMU));
105 /* The data cache is write through, so this just invalidates the
108 static __inline__ void spitfire_put_dcache_tag(unsigned long addr, unsigned long tag)
110 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
113 : "r" (tag), "r" (addr), "i" (ASI_DCACHE_TAG));
116 /* The instruction cache lines are flushed with this, but note that
117 * this does not flush the pipeline. It is possible for a line to
118 * get flushed but stale instructions to still be in the pipeline,
119 * a flush instruction (to any address) is sufficient to handle
120 * this issue after the line is invalidated.
122 static __inline__ void spitfire_put_icache_tag(unsigned long addr, unsigned long tag)
124 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
127 : "r" (tag), "r" (addr), "i" (ASI_IC_TAG));
130 static __inline__ unsigned long spitfire_get_dtlb_data(int entry)
134 __asm__ __volatile__("ldxa [%1] %2, %0"
136 : "r" (entry << 3), "i" (ASI_DTLB_DATA_ACCESS));
138 /* Clear TTE diag bits. */
139 data &= ~0x0003fe0000000000UL;
144 static __inline__ unsigned long spitfire_get_dtlb_tag(int entry)
148 __asm__ __volatile__("ldxa [%1] %2, %0"
150 : "r" (entry << 3), "i" (ASI_DTLB_TAG_READ));
154 static __inline__ void spitfire_put_dtlb_data(int entry, unsigned long data)
156 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
159 : "r" (data), "r" (entry << 3),
160 "i" (ASI_DTLB_DATA_ACCESS));
163 static __inline__ unsigned long spitfire_get_itlb_data(int entry)
167 __asm__ __volatile__("ldxa [%1] %2, %0"
169 : "r" (entry << 3), "i" (ASI_ITLB_DATA_ACCESS));
171 /* Clear TTE diag bits. */
172 data &= ~0x0003fe0000000000UL;
177 static __inline__ unsigned long spitfire_get_itlb_tag(int entry)
181 __asm__ __volatile__("ldxa [%1] %2, %0"
183 : "r" (entry << 3), "i" (ASI_ITLB_TAG_READ));
187 static __inline__ void spitfire_put_itlb_data(int entry, unsigned long data)
189 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
192 : "r" (data), "r" (entry << 3),
193 "i" (ASI_ITLB_DATA_ACCESS));
196 /* Spitfire hardware assisted TLB flushes. */
198 /* Context level flushes. */
199 static __inline__ void spitfire_flush_dtlb_primary_context(void)
201 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
204 : "r" (0x40), "i" (ASI_DMMU_DEMAP));
207 static __inline__ void spitfire_flush_itlb_primary_context(void)
209 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
212 : "r" (0x40), "i" (ASI_IMMU_DEMAP));
215 static __inline__ void spitfire_flush_dtlb_secondary_context(void)
217 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
220 : "r" (0x50), "i" (ASI_DMMU_DEMAP));
223 static __inline__ void spitfire_flush_itlb_secondary_context(void)
225 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
228 : "r" (0x50), "i" (ASI_IMMU_DEMAP));
231 static __inline__ void spitfire_flush_dtlb_nucleus_context(void)
233 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
236 : "r" (0x60), "i" (ASI_DMMU_DEMAP));
239 static __inline__ void spitfire_flush_itlb_nucleus_context(void)
241 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
244 : "r" (0x60), "i" (ASI_IMMU_DEMAP));
247 /* Page level flushes. */
248 static __inline__ void spitfire_flush_dtlb_primary_page(unsigned long page)
250 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
253 : "r" (page), "i" (ASI_DMMU_DEMAP));
256 static __inline__ void spitfire_flush_itlb_primary_page(unsigned long page)
258 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
261 : "r" (page), "i" (ASI_IMMU_DEMAP));
264 static __inline__ void spitfire_flush_dtlb_secondary_page(unsigned long page)
266 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
269 : "r" (page | 0x10), "i" (ASI_DMMU_DEMAP));
272 static __inline__ void spitfire_flush_itlb_secondary_page(unsigned long page)
274 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
277 : "r" (page | 0x10), "i" (ASI_IMMU_DEMAP));
280 static __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page)
282 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
285 : "r" (page | 0x20), "i" (ASI_DMMU_DEMAP));
288 static __inline__ void spitfire_flush_itlb_nucleus_page(unsigned long page)
290 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
293 : "r" (page | 0x20), "i" (ASI_IMMU_DEMAP));
296 /* Cheetah has "all non-locked" tlb flushes. */
297 static __inline__ void cheetah_flush_dtlb_all(void)
299 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
302 : "r" (0x80), "i" (ASI_DMMU_DEMAP));
305 static __inline__ void cheetah_flush_itlb_all(void)
307 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
310 : "r" (0x80), "i" (ASI_IMMU_DEMAP));
313 /* Cheetah has a 4-tlb layout so direct access is a bit different.
314 * The first two TLBs are fully assosciative, hold 16 entries, and are
315 * used only for locked and >8K sized translations. One exists for
316 * data accesses and one for instruction accesses.
318 * The third TLB is for data accesses to 8K non-locked translations, is
319 * 2 way assosciative, and holds 512 entries. The fourth TLB is for
320 * instruction accesses to 8K non-locked translations, is 2 way
321 * assosciative, and holds 128 entries.
323 * Cheetah has some bug where bogus data can be returned from
324 * ASI_{D,I}TLB_DATA_ACCESS loads, doing the load twice fixes
325 * the problem for me. -DaveM
327 static __inline__ unsigned long cheetah_get_ldtlb_data(int entry)
331 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
334 : "r" ((0 << 16) | (entry << 3)),
335 "i" (ASI_DTLB_DATA_ACCESS));
340 static __inline__ unsigned long cheetah_get_litlb_data(int entry)
344 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
347 : "r" ((0 << 16) | (entry << 3)),
348 "i" (ASI_ITLB_DATA_ACCESS));
353 static __inline__ unsigned long cheetah_get_ldtlb_tag(int entry)
357 __asm__ __volatile__("ldxa [%1] %2, %0"
359 : "r" ((0 << 16) | (entry << 3)),
360 "i" (ASI_DTLB_TAG_READ));
365 static __inline__ unsigned long cheetah_get_litlb_tag(int entry)
369 __asm__ __volatile__("ldxa [%1] %2, %0"
371 : "r" ((0 << 16) | (entry << 3)),
372 "i" (ASI_ITLB_TAG_READ));
377 static __inline__ void cheetah_put_ldtlb_data(int entry, unsigned long data)
379 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
383 "r" ((0 << 16) | (entry << 3)),
384 "i" (ASI_DTLB_DATA_ACCESS));
387 static __inline__ void cheetah_put_litlb_data(int entry, unsigned long data)
389 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
393 "r" ((0 << 16) | (entry << 3)),
394 "i" (ASI_ITLB_DATA_ACCESS));
397 static __inline__ unsigned long cheetah_get_dtlb_data(int entry, int tlb)
401 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
404 : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_DATA_ACCESS));
409 static __inline__ unsigned long cheetah_get_dtlb_tag(int entry, int tlb)
413 __asm__ __volatile__("ldxa [%1] %2, %0"
415 : "r" ((tlb << 16) | (entry << 3)), "i" (ASI_DTLB_TAG_READ));
419 static __inline__ void cheetah_put_dtlb_data(int entry, unsigned long data, int tlb)
421 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
425 "r" ((tlb << 16) | (entry << 3)),
426 "i" (ASI_DTLB_DATA_ACCESS));
429 static __inline__ unsigned long cheetah_get_itlb_data(int entry)
433 __asm__ __volatile__("ldxa [%1] %2, %%g0\n\t"
436 : "r" ((2 << 16) | (entry << 3)),
437 "i" (ASI_ITLB_DATA_ACCESS));
442 static __inline__ unsigned long cheetah_get_itlb_tag(int entry)
446 __asm__ __volatile__("ldxa [%1] %2, %0"
448 : "r" ((2 << 16) | (entry << 3)), "i" (ASI_ITLB_TAG_READ));
452 static __inline__ void cheetah_put_itlb_data(int entry, unsigned long data)
454 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
457 : "r" (data), "r" ((2 << 16) | (entry << 3)),
458 "i" (ASI_ITLB_DATA_ACCESS));
461 #endif /* !(__ASSEMBLY__) */
463 #endif /* !(_SPARC64_SPITFIRE_H) */