2 * Copyright 2014 Freescale Semiconductor, Inc.
4 * SPDX-License-Identifier: GPL-2.0+
9 #include <asm/system.h>
10 #include <asm/armv8/mmu.h>
12 #include <asm/arch-fsl-lsch3/immap_lsch3.h>
18 DECLARE_GLOBAL_DATA_PTR;
20 #ifndef CONFIG_SYS_DCACHE_OFF
22 * To start MMU before DDR is available, we create MMU table in SRAM.
23 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
24 * levels of translation tables here to cover 40-bit address space.
25 * We use 4KB granule size, with 40 bits physical address, T0SZ=24
26 * Level 0 IA[39], table address @0
27 * Level 1 IA[31:30], table address @01000, 0x2000
28 * Level 2 IA[29:21], table address @0x3000
31 #define SECTION_SHIFT_L0 39UL
32 #define SECTION_SHIFT_L1 30UL
33 #define SECTION_SHIFT_L2 21UL
34 #define BLOCK_SIZE_L0 0x8000000000UL
35 #define BLOCK_SIZE_L1 (1 << SECTION_SHIFT_L1)
36 #define BLOCK_SIZE_L2 (1 << SECTION_SHIFT_L2)
37 #define CONFIG_SYS_IFC_BASE 0x30000000
38 #define CONFIG_SYS_IFC_SIZE 0x10000000
39 #define CONFIG_SYS_IFC_BASE2 0x500000000
40 #define CONFIG_SYS_IFC_SIZE2 0x100000000
41 #define TCR_EL2_PS_40BIT (2 << 16)
42 #define LSCH3_VA_BITS (40)
43 #define LSCH3_TCR (TCR_TG0_4K | \
48 TCR_T0SZ(LSCH3_VA_BITS))
52 * Let's start from the same layout as early MMU and modify as needed.
53 * IFC regions will be cache-inhibit.
55 #define FINAL_QBMAN_CACHED_MEM 0x818000000UL
56 #define FINAL_QBMAN_CACHED_SIZE 0x4000000
59 static inline void early_mmu_setup(void)
63 u64 section_l1t0, section_l1t1, section_l2;
64 u64 *level0_table = (u64 *)CONFIG_SYS_FSL_OCRAM_BASE;
65 u64 *level1_table_0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x1000);
66 u64 *level1_table_1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x2000);
67 u64 *level2_table = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x3000);
71 (u64)level1_table_0 | PMD_TYPE_TABLE;
73 (u64)level1_table_1 | PMD_TYPE_TABLE;
76 * set level 1 table 0 to cache_inhibit, covering 0 to 512GB
77 * set level 1 table 1 to cache enabled, covering 512GB to 1TB
78 * set level 2 table to cache-inhibit, covering 0 to 1GB
81 section_l1t1 = BLOCK_SIZE_L0;
83 for (i = 0; i < 512; i++) {
84 set_pgtable_section(level1_table_0, i, section_l1t0,
86 set_pgtable_section(level1_table_1, i, section_l1t1,
88 set_pgtable_section(level2_table, i, section_l2,
90 section_l1t0 += BLOCK_SIZE_L1;
91 section_l1t1 += BLOCK_SIZE_L1;
92 section_l2 += BLOCK_SIZE_L2;
96 (u64)level2_table | PMD_TYPE_TABLE;
98 0x40000000 | PMD_SECT_AF | PMD_TYPE_SECT |
99 PMD_ATTRINDX(MT_DEVICE_NGNRNE);
101 0x80000000 | PMD_SECT_AF | PMD_TYPE_SECT |
102 PMD_ATTRINDX(MT_NORMAL);
104 0xc0000000 | PMD_SECT_AF | PMD_TYPE_SECT |
105 PMD_ATTRINDX(MT_NORMAL);
107 /* Rewrite table to enable cache */
108 set_pgtable_section(level2_table,
109 CONFIG_SYS_FSL_OCRAM_BASE >> SECTION_SHIFT_L2,
110 CONFIG_SYS_FSL_OCRAM_BASE,
112 for (i = CONFIG_SYS_IFC_BASE >> SECTION_SHIFT_L2;
113 i < (CONFIG_SYS_IFC_BASE + CONFIG_SYS_IFC_SIZE)
114 >> SECTION_SHIFT_L2; i++) {
115 section_l2 = i << SECTION_SHIFT_L2;
116 set_pgtable_section(level2_table, i,
117 section_l2, MT_NORMAL);
121 set_ttbr_tcr_mair(el, (u64)level0_table, LSCH3_TCR, MEMORY_ATTRIBUTES);
122 set_sctlr(get_sctlr() | CR_M);
126 * This final tale looks similar to early table, but different in detail.
127 * These tables are in regular memory. Cache on IFC is disabled. One sub table
128 * is added to enable cache for QBMan.
130 static inline void final_mmu_setup(void)
133 u64 i, tbl_base, tbl_limit, section_base;
134 u64 section_l1t0, section_l1t1, section_l2;
135 u64 *level0_table = (u64 *)gd->arch.tlb_addr;
136 u64 *level1_table_0 = (u64 *)(gd->arch.tlb_addr + 0x1000);
137 u64 *level1_table_1 = (u64 *)(gd->arch.tlb_addr + 0x2000);
138 u64 *level2_table_0 = (u64 *)(gd->arch.tlb_addr + 0x3000);
139 u64 *level2_table_1 = (u64 *)(gd->arch.tlb_addr + 0x4000);
143 (u64)level1_table_0 | PMD_TYPE_TABLE;
145 (u64)level1_table_1 | PMD_TYPE_TABLE;
148 * set level 1 table 0 to cache_inhibit, covering 0 to 512GB
149 * set level 1 table 1 to cache enabled, covering 512GB to 1TB
150 * set level 2 table 0 to cache-inhibit, covering 0 to 1GB
153 section_l1t1 = BLOCK_SIZE_L0 | PMD_SECT_OUTER_SHARE;
155 for (i = 0; i < 512; i++) {
156 set_pgtable_section(level1_table_0, i, section_l1t0,
158 set_pgtable_section(level1_table_1, i, section_l1t1,
160 set_pgtable_section(level2_table_0, i, section_l2,
162 section_l1t0 += BLOCK_SIZE_L1;
163 section_l1t1 += BLOCK_SIZE_L1;
164 section_l2 += BLOCK_SIZE_L2;
168 (u64)level2_table_0 | PMD_TYPE_TABLE;
170 0x80000000 | PMD_SECT_AF | PMD_TYPE_SECT |
171 PMD_SECT_OUTER_SHARE | PMD_ATTRINDX(MT_NORMAL);
173 0xc0000000 | PMD_SECT_AF | PMD_TYPE_SECT |
174 PMD_SECT_OUTER_SHARE | PMD_ATTRINDX(MT_NORMAL);
176 /* Rewrite table to enable cache */
177 set_pgtable_section(level2_table_0,
178 CONFIG_SYS_FSL_OCRAM_BASE >> SECTION_SHIFT_L2,
179 CONFIG_SYS_FSL_OCRAM_BASE,
183 * Fill in other part of tables if cache is needed
184 * If finer granularity than 1GB is needed, sub table
187 section_base = FINAL_QBMAN_CACHED_MEM & ~(BLOCK_SIZE_L1 - 1);
188 i = section_base >> SECTION_SHIFT_L1;
189 level1_table_0[i] = (u64)level2_table_1 | PMD_TYPE_TABLE;
190 section_l2 = section_base;
191 for (i = 0; i < 512; i++) {
192 set_pgtable_section(level2_table_1, i, section_l2,
194 section_l2 += BLOCK_SIZE_L2;
196 tbl_base = FINAL_QBMAN_CACHED_MEM & (BLOCK_SIZE_L1 - 1);
197 tbl_limit = (FINAL_QBMAN_CACHED_MEM + FINAL_QBMAN_CACHED_SIZE) &
199 for (i = tbl_base >> SECTION_SHIFT_L2;
200 i < tbl_limit >> SECTION_SHIFT_L2; i++) {
201 section_l2 = section_base + (i << SECTION_SHIFT_L2);
202 set_pgtable_section(level2_table_1, i,
203 section_l2, MT_NORMAL);
206 /* flush new MMU table */
207 flush_dcache_range(gd->arch.tlb_addr,
208 gd->arch.tlb_addr + gd->arch.tlb_size);
210 /* point TTBR to the new table */
212 asm volatile("dsb sy");
214 asm volatile("msr ttbr0_el1, %0"
215 : : "r" ((u64)level0_table) : "memory");
216 } else if (el == 2) {
217 asm volatile("msr ttbr0_el2, %0"
218 : : "r" ((u64)level0_table) : "memory");
219 } else if (el == 3) {
220 asm volatile("msr ttbr0_el3, %0"
221 : : "r" ((u64)level0_table) : "memory");
228 * MMU is already enabled, just need to invalidate TLB to load the
229 * new table. The new table is compatible with the current table, if
230 * MMU somehow walks through the new table before invalidation TLB,
231 * it still works. So we don't need to turn off MMU here.
235 int arch_cpu_init(void)
238 __asm_invalidate_dcache_all();
239 __asm_invalidate_tlb_all();
241 set_sctlr(get_sctlr() | CR_C);
247 * Dickens L3 cache can be flushed by transitioning from FAM to SFONLY power
248 * state, by writing to HP-F P-state request register.
249 * Fixme: This function should moved to a common file if other SoCs also use
252 #define HNF0_PSTATE_REQ 0x04200010
253 #define HNF1_PSTATE_REQ 0x04210010
254 #define HNF2_PSTATE_REQ 0x04220010
255 #define HNF3_PSTATE_REQ 0x04230010
256 #define HNF4_PSTATE_REQ 0x04240010
257 #define HNF5_PSTATE_REQ 0x04250010
258 #define HNF6_PSTATE_REQ 0x04260010
259 #define HNF7_PSTATE_REQ 0x04270010
260 #define HNFPSTAT_MASK (0xFFFFFFFFFFFFFFFC)
261 #define HNFPSTAT_FAM 0x3
262 #define HNFPSTAT_SFONLY 0x01
264 static void hnf_pstate_req(u64 *ptr, u64 state)
267 out_le64(ptr, (in_le64(ptr) & HNFPSTAT_MASK) | (state & 0x3));
269 /* checking if the transition is completed */
270 while (timeout > 0) {
271 if (((in_le64(ptr) & 0x0c) >> 2) == (state & 0x3))
278 void flush_l3_cache(void)
280 hnf_pstate_req((u64 *)HNF0_PSTATE_REQ, HNFPSTAT_SFONLY);
281 hnf_pstate_req((u64 *)HNF1_PSTATE_REQ, HNFPSTAT_SFONLY);
282 hnf_pstate_req((u64 *)HNF2_PSTATE_REQ, HNFPSTAT_SFONLY);
283 hnf_pstate_req((u64 *)HNF3_PSTATE_REQ, HNFPSTAT_SFONLY);
284 hnf_pstate_req((u64 *)HNF4_PSTATE_REQ, HNFPSTAT_SFONLY);
285 hnf_pstate_req((u64 *)HNF5_PSTATE_REQ, HNFPSTAT_SFONLY);
286 hnf_pstate_req((u64 *)HNF6_PSTATE_REQ, HNFPSTAT_SFONLY);
287 hnf_pstate_req((u64 *)HNF7_PSTATE_REQ, HNFPSTAT_SFONLY);
288 hnf_pstate_req((u64 *)HNF0_PSTATE_REQ, HNFPSTAT_FAM);
289 hnf_pstate_req((u64 *)HNF1_PSTATE_REQ, HNFPSTAT_FAM);
290 hnf_pstate_req((u64 *)HNF2_PSTATE_REQ, HNFPSTAT_FAM);
291 hnf_pstate_req((u64 *)HNF3_PSTATE_REQ, HNFPSTAT_FAM);
292 hnf_pstate_req((u64 *)HNF4_PSTATE_REQ, HNFPSTAT_FAM);
293 hnf_pstate_req((u64 *)HNF5_PSTATE_REQ, HNFPSTAT_FAM);
294 hnf_pstate_req((u64 *)HNF6_PSTATE_REQ, HNFPSTAT_FAM);
295 hnf_pstate_req((u64 *)HNF7_PSTATE_REQ, HNFPSTAT_FAM);
299 * This function is called from lib/board.c.
300 * It recreates MMU table in main memory. MMU and d-cache are enabled earlier.
301 * There is no need to disable d-cache for this operation.
303 void enable_caches(void)
306 __asm_invalidate_tlb_all();
310 static inline u32 initiator_type(u32 cluster, int init_id)
312 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
313 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
314 u32 type = in_le32(&gur->tp_ityp[idx]);
316 if (type & TP_ITYP_AV)
324 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
325 int i = 0, count = 0;
326 u32 cluster, type, mask = 0;
330 cluster = in_le32(&gur->tp_cluster[i].lower);
331 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
332 type = initiator_type(cluster, j);
334 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
340 } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
346 * Return the number of cores on this SOC.
348 int cpu_numcores(void)
350 return hweight32(cpu_mask());
353 int fsl_qoriq_core_to_cluster(unsigned int core)
355 struct ccsr_gur __iomem *gur =
356 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
357 int i = 0, count = 0;
362 cluster = in_le32(&gur->tp_cluster[i].lower);
363 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
364 if (initiator_type(cluster, j)) {
371 } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
373 return -1; /* cannot identify the cluster */
376 u32 fsl_qoriq_core_to_type(unsigned int core)
378 struct ccsr_gur __iomem *gur =
379 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
380 int i = 0, count = 0;
385 cluster = in_le32(&gur->tp_cluster[i].lower);
386 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
387 type = initiator_type(cluster, j);
395 } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
397 return -1; /* cannot identify the cluster */
400 #ifdef CONFIG_DISPLAY_CPUINFO
401 int print_cpuinfo(void)
403 struct sys_info sysinfo;
405 unsigned int i, core;
408 get_sys_info(&sysinfo);
409 puts("Clock Configuration:");
410 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
413 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
414 printf("CPU%d(%s):%-4s MHz ", core,
415 type == TY_ITYP_VER_A7 ? "A7 " :
416 (type == TY_ITYP_VER_A53 ? "A53" :
417 (type == TY_ITYP_VER_A57 ? "A57" : " ")),
418 strmhz(buf, sysinfo.freq_processor[core]));
420 printf("\n Bus: %-4s MHz ",
421 strmhz(buf, sysinfo.freq_systembus));
422 printf("DDR: %-4s MHz", strmhz(buf, sysinfo.freq_ddrbus));
429 int cpu_eth_init(bd_t *bis)
433 #ifdef CONFIG_FSL_MC_ENET
434 error = mc_init(bis);
440 int arch_early_init_r(void)
443 rv = fsl_lsch3_wake_seconday_cores();
446 printf("Did not wake secondary cores\n");