2 * Copyright 2014 Freescale Semiconductor, Inc.
4 * SPDX-License-Identifier: GPL-2.0+
9 #include <asm/system.h>
10 #include <asm/armv8/mmu.h>
12 #include <asm/arch-fsl-lsch3/immap_lsch3.h>
13 #include <fsl_debug_server.h>
14 #include <fsl-mc/fsl_mc.h>
19 DECLARE_GLOBAL_DATA_PTR;
21 #ifndef CONFIG_SYS_DCACHE_OFF
23 * To start MMU before DDR is available, we create MMU table in SRAM.
24 * The base address of SRAM is CONFIG_SYS_FSL_OCRAM_BASE. We use three
25 * levels of translation tables here to cover 40-bit address space.
26 * We use 4KB granule size, with 40 bits physical address, T0SZ=24
27 * Level 0 IA[39], table address @0
28 * Level 1 IA[31:30], table address @01000, 0x2000
29 * Level 2 IA[29:21], table address @0x3000
32 #define SECTION_SHIFT_L0 39UL
33 #define SECTION_SHIFT_L1 30UL
34 #define SECTION_SHIFT_L2 21UL
35 #define BLOCK_SIZE_L0 0x8000000000UL
36 #define BLOCK_SIZE_L1 (1 << SECTION_SHIFT_L1)
37 #define BLOCK_SIZE_L2 (1 << SECTION_SHIFT_L2)
38 #define CONFIG_SYS_IFC_BASE 0x30000000
39 #define CONFIG_SYS_IFC_SIZE 0x10000000
40 #define CONFIG_SYS_IFC_BASE2 0x500000000
41 #define CONFIG_SYS_IFC_SIZE2 0x100000000
42 #define TCR_EL2_PS_40BIT (2 << 16)
43 #define LSCH3_VA_BITS (40)
44 #define LSCH3_TCR (TCR_TG0_4K | \
49 TCR_T0SZ(LSCH3_VA_BITS))
53 * Let's start from the same layout as early MMU and modify as needed.
54 * IFC regions will be cache-inhibit.
56 #define FINAL_QBMAN_CACHED_MEM 0x818000000UL
57 #define FINAL_QBMAN_CACHED_SIZE 0x4000000
60 static inline void early_mmu_setup(void)
64 u64 section_l1t0, section_l1t1, section_l2;
65 u64 *level0_table = (u64 *)CONFIG_SYS_FSL_OCRAM_BASE;
66 u64 *level1_table_0 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x1000);
67 u64 *level1_table_1 = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x2000);
68 u64 *level2_table = (u64 *)(CONFIG_SYS_FSL_OCRAM_BASE + 0x3000);
72 (u64)level1_table_0 | PMD_TYPE_TABLE;
74 (u64)level1_table_1 | PMD_TYPE_TABLE;
77 * set level 1 table 0 to cache_inhibit, covering 0 to 512GB
78 * set level 1 table 1 to cache enabled, covering 512GB to 1TB
79 * set level 2 table to cache-inhibit, covering 0 to 1GB
82 section_l1t1 = BLOCK_SIZE_L0;
84 for (i = 0; i < 512; i++) {
85 set_pgtable_section(level1_table_0, i, section_l1t0,
87 set_pgtable_section(level1_table_1, i, section_l1t1,
89 set_pgtable_section(level2_table, i, section_l2,
91 section_l1t0 += BLOCK_SIZE_L1;
92 section_l1t1 += BLOCK_SIZE_L1;
93 section_l2 += BLOCK_SIZE_L2;
97 (u64)level2_table | PMD_TYPE_TABLE;
99 0x40000000 | PMD_SECT_AF | PMD_TYPE_SECT |
100 PMD_ATTRINDX(MT_DEVICE_NGNRNE);
102 0x80000000 | PMD_SECT_AF | PMD_TYPE_SECT |
103 PMD_ATTRINDX(MT_NORMAL);
105 0xc0000000 | PMD_SECT_AF | PMD_TYPE_SECT |
106 PMD_ATTRINDX(MT_NORMAL);
108 /* Rewrite table to enable cache */
109 set_pgtable_section(level2_table,
110 CONFIG_SYS_FSL_OCRAM_BASE >> SECTION_SHIFT_L2,
111 CONFIG_SYS_FSL_OCRAM_BASE,
113 for (i = CONFIG_SYS_IFC_BASE >> SECTION_SHIFT_L2;
114 i < (CONFIG_SYS_IFC_BASE + CONFIG_SYS_IFC_SIZE)
115 >> SECTION_SHIFT_L2; i++) {
116 section_l2 = i << SECTION_SHIFT_L2;
117 set_pgtable_section(level2_table, i,
118 section_l2, MT_NORMAL);
122 set_ttbr_tcr_mair(el, (u64)level0_table, LSCH3_TCR, MEMORY_ATTRIBUTES);
123 set_sctlr(get_sctlr() | CR_M);
127 * This final tale looks similar to early table, but different in detail.
128 * These tables are in regular memory. Cache on IFC is disabled. One sub table
129 * is added to enable cache for QBMan.
131 static inline void final_mmu_setup(void)
134 u64 i, tbl_base, tbl_limit, section_base;
135 u64 section_l1t0, section_l1t1, section_l2;
136 u64 *level0_table = (u64 *)gd->arch.tlb_addr;
137 u64 *level1_table_0 = (u64 *)(gd->arch.tlb_addr + 0x1000);
138 u64 *level1_table_1 = (u64 *)(gd->arch.tlb_addr + 0x2000);
139 u64 *level2_table_0 = (u64 *)(gd->arch.tlb_addr + 0x3000);
140 u64 *level2_table_1 = (u64 *)(gd->arch.tlb_addr + 0x4000);
144 (u64)level1_table_0 | PMD_TYPE_TABLE;
146 (u64)level1_table_1 | PMD_TYPE_TABLE;
149 * set level 1 table 0 to cache_inhibit, covering 0 to 512GB
150 * set level 1 table 1 to cache enabled, covering 512GB to 1TB
151 * set level 2 table 0 to cache-inhibit, covering 0 to 1GB
154 section_l1t1 = BLOCK_SIZE_L0 | PMD_SECT_OUTER_SHARE;
156 for (i = 0; i < 512; i++) {
157 set_pgtable_section(level1_table_0, i, section_l1t0,
159 set_pgtable_section(level1_table_1, i, section_l1t1,
161 set_pgtable_section(level2_table_0, i, section_l2,
163 section_l1t0 += BLOCK_SIZE_L1;
164 section_l1t1 += BLOCK_SIZE_L1;
165 section_l2 += BLOCK_SIZE_L2;
169 (u64)level2_table_0 | PMD_TYPE_TABLE;
171 0x80000000 | PMD_SECT_AF | PMD_TYPE_SECT |
172 PMD_SECT_OUTER_SHARE | PMD_ATTRINDX(MT_NORMAL);
174 0xc0000000 | PMD_SECT_AF | PMD_TYPE_SECT |
175 PMD_SECT_OUTER_SHARE | PMD_ATTRINDX(MT_NORMAL);
177 /* Rewrite table to enable cache */
178 set_pgtable_section(level2_table_0,
179 CONFIG_SYS_FSL_OCRAM_BASE >> SECTION_SHIFT_L2,
180 CONFIG_SYS_FSL_OCRAM_BASE,
184 * Fill in other part of tables if cache is needed
185 * If finer granularity than 1GB is needed, sub table
188 section_base = FINAL_QBMAN_CACHED_MEM & ~(BLOCK_SIZE_L1 - 1);
189 i = section_base >> SECTION_SHIFT_L1;
190 level1_table_0[i] = (u64)level2_table_1 | PMD_TYPE_TABLE;
191 section_l2 = section_base;
192 for (i = 0; i < 512; i++) {
193 set_pgtable_section(level2_table_1, i, section_l2,
195 section_l2 += BLOCK_SIZE_L2;
197 tbl_base = FINAL_QBMAN_CACHED_MEM & (BLOCK_SIZE_L1 - 1);
198 tbl_limit = (FINAL_QBMAN_CACHED_MEM + FINAL_QBMAN_CACHED_SIZE) &
200 for (i = tbl_base >> SECTION_SHIFT_L2;
201 i < tbl_limit >> SECTION_SHIFT_L2; i++) {
202 section_l2 = section_base + (i << SECTION_SHIFT_L2);
203 set_pgtable_section(level2_table_1, i,
204 section_l2, MT_NORMAL);
207 /* flush new MMU table */
208 flush_dcache_range(gd->arch.tlb_addr,
209 gd->arch.tlb_addr + gd->arch.tlb_size);
211 /* point TTBR to the new table */
213 asm volatile("dsb sy");
215 asm volatile("msr ttbr0_el1, %0"
216 : : "r" ((u64)level0_table) : "memory");
217 } else if (el == 2) {
218 asm volatile("msr ttbr0_el2, %0"
219 : : "r" ((u64)level0_table) : "memory");
220 } else if (el == 3) {
221 asm volatile("msr ttbr0_el3, %0"
222 : : "r" ((u64)level0_table) : "memory");
229 * MMU is already enabled, just need to invalidate TLB to load the
230 * new table. The new table is compatible with the current table, if
231 * MMU somehow walks through the new table before invalidation TLB,
232 * it still works. So we don't need to turn off MMU here.
236 int arch_cpu_init(void)
239 __asm_invalidate_dcache_all();
240 __asm_invalidate_tlb_all();
242 set_sctlr(get_sctlr() | CR_C);
247 * This function is called from lib/board.c.
248 * It recreates MMU table in main memory. MMU and d-cache are enabled earlier.
249 * There is no need to disable d-cache for this operation.
251 void enable_caches(void)
254 __asm_invalidate_tlb_all();
258 static inline u32 initiator_type(u32 cluster, int init_id)
260 struct ccsr_gur *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
261 u32 idx = (cluster >> (init_id * 8)) & TP_CLUSTER_INIT_MASK;
262 u32 type = in_le32(&gur->tp_ityp[idx]);
264 if (type & TP_ITYP_AV)
272 struct ccsr_gur __iomem *gur = (void *)(CONFIG_SYS_FSL_GUTS_ADDR);
273 int i = 0, count = 0;
274 u32 cluster, type, mask = 0;
278 cluster = in_le32(&gur->tp_cluster[i].lower);
279 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
280 type = initiator_type(cluster, j);
282 if (TP_ITYP_TYPE(type) == TP_ITYP_TYPE_ARM)
288 } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
294 * Return the number of cores on this SOC.
296 int cpu_numcores(void)
298 return hweight32(cpu_mask());
301 int fsl_qoriq_core_to_cluster(unsigned int core)
303 struct ccsr_gur __iomem *gur =
304 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
305 int i = 0, count = 0;
310 cluster = in_le32(&gur->tp_cluster[i].lower);
311 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
312 if (initiator_type(cluster, j)) {
319 } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
321 return -1; /* cannot identify the cluster */
324 u32 fsl_qoriq_core_to_type(unsigned int core)
326 struct ccsr_gur __iomem *gur =
327 (void __iomem *)(CONFIG_SYS_FSL_GUTS_ADDR);
328 int i = 0, count = 0;
333 cluster = in_le32(&gur->tp_cluster[i].lower);
334 for (j = 0; j < TP_INIT_PER_CLUSTER; j++) {
335 type = initiator_type(cluster, j);
343 } while ((cluster & TP_CLUSTER_EOC) != TP_CLUSTER_EOC);
345 return -1; /* cannot identify the cluster */
348 #ifdef CONFIG_DISPLAY_CPUINFO
349 int print_cpuinfo(void)
351 struct sys_info sysinfo;
353 unsigned int i, core;
356 get_sys_info(&sysinfo);
357 puts("Clock Configuration:");
358 for_each_cpu(i, core, cpu_numcores(), cpu_mask()) {
361 type = TP_ITYP_VER(fsl_qoriq_core_to_type(core));
362 printf("CPU%d(%s):%-4s MHz ", core,
363 type == TY_ITYP_VER_A7 ? "A7 " :
364 (type == TY_ITYP_VER_A53 ? "A53" :
365 (type == TY_ITYP_VER_A57 ? "A57" : " ")),
366 strmhz(buf, sysinfo.freq_processor[core]));
368 printf("\n Bus: %-4s MHz ",
369 strmhz(buf, sysinfo.freq_systembus));
370 printf("DDR: %-4s MHz", strmhz(buf, sysinfo.freq_ddrbus));
371 printf(" DP-DDR: %-4s MHz", strmhz(buf, sysinfo.freq_ddrbus2));
378 int cpu_eth_init(bd_t *bis)
382 #ifdef CONFIG_FSL_MC_ENET
383 error = fsl_mc_ldpaa_init(bis);
388 int arch_early_init_r(void)
391 rv = fsl_lsch3_wake_seconday_cores();
394 printf("Did not wake secondary cores\n");
401 u32 __iomem *cntcr = (u32 *)CONFIG_SYS_FSL_TIMER_ADDR;
402 u32 __iomem *cltbenr = (u32 *)CONFIG_SYS_FSL_PMU_CLTBENR;
403 #ifdef COUNTER_FREQUENCY_REAL
404 unsigned long cntfrq = COUNTER_FREQUENCY_REAL;
406 /* Update with accurate clock frequency */
407 asm volatile("msr cntfrq_el0, %0" : : "r" (cntfrq) : "memory");
410 /* Enable timebase for all clusters.
411 * It is safe to do so even some clusters are not enabled.
413 out_le32(cltbenr, 0xf);
415 /* Enable clock for timer
416 * This is a global setting.
418 out_le32(cntcr, 0x1);
423 void reset_cpu(ulong addr)
425 u32 __iomem *rstcr = (u32 *)CONFIG_SYS_FSL_RST_ADDR;
428 /* Raise RESET_REQ_B */
429 val = in_le32(rstcr);
431 out_le32(rstcr, val);