2 * Copyright 2010 Tilera Corporation. All Rights Reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
14 * This code maintains the "home" for each page in the system.
17 #include <linux/kernel.h>
19 #include <linux/spinlock.h>
20 #include <linux/list.h>
21 #include <linux/bootmem.h>
22 #include <linux/rmap.h>
23 #include <linux/pagemap.h>
24 #include <linux/mutex.h>
25 #include <linux/interrupt.h>
26 #include <linux/sysctl.h>
27 #include <linux/pagevec.h>
28 #include <linux/ptrace.h>
29 #include <linux/timex.h>
30 #include <linux/cache.h>
31 #include <linux/smp.h>
32 #include <linux/module.h>
33 #include <linux/hugetlb.h>
36 #include <asm/sections.h>
37 #include <asm/tlbflush.h>
38 #include <asm/pgalloc.h>
39 #include <asm/homecache.h>
47 * The noallocl2 option suppresses all use of the L2 cache to cache
48 * locally from a remote home.
50 static int __write_once noallocl2;
51 static int __init set_noallocl2(char *str)
56 early_param("noallocl2", set_noallocl2);
60 * Update the irq_stat for cpus that we are going to interrupt
61 * with TLB or cache flushes. Also handle removing dataplane cpus
62 * from the TLB flush set, and setting dataplane_tlb_state instead.
64 static void hv_flush_update(const struct cpumask *cache_cpumask,
65 struct cpumask *tlb_cpumask,
66 unsigned long tlb_va, unsigned long tlb_length,
67 HV_Remote_ASID *asids, int asidcount)
74 cpumask_or(&mask, &mask, cache_cpumask);
75 if (tlb_cpumask && tlb_length) {
76 cpumask_or(&mask, &mask, tlb_cpumask);
79 for (i = 0; i < asidcount; ++i)
80 cpumask_set_cpu(asids[i].y * smp_width + asids[i].x, &mask);
83 * Don't bother to update atomically; losing a count
84 * here is not that critical.
86 for_each_cpu(cpu, &mask)
87 ++per_cpu(irq_stat, cpu).irq_hv_flush_count;
91 * This wrapper function around hv_flush_remote() does several things:
93 * - Provides a return value error-checking panic path, since
94 * there's never any good reason for hv_flush_remote() to fail.
95 * - Accepts a 32-bit PFN rather than a 64-bit PA, which generally
96 * is the type that Linux wants to pass around anyway.
97 * - Canonicalizes that lengths of zero make cpumasks NULL.
98 * - Handles deferring TLB flushes for dataplane tiles.
99 * - Tracks remote interrupts in the per-cpu irq_cpustat_t.
101 * Note that we have to wait until the cache flush completes before
102 * updating the per-cpu last_cache_flush word, since otherwise another
103 * concurrent flush can race, conclude the flush has already
104 * completed, and start to use the page while it's still dirty
105 * remotely (running concurrently with the actual evict, presumably).
107 void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
108 const struct cpumask *cache_cpumask_orig,
109 HV_VirtAddr tlb_va, unsigned long tlb_length,
110 unsigned long tlb_pgsize,
111 const struct cpumask *tlb_cpumask_orig,
112 HV_Remote_ASID *asids, int asidcount)
115 struct cpumask cache_cpumask_copy, tlb_cpumask_copy;
116 struct cpumask *cache_cpumask, *tlb_cpumask;
117 HV_PhysAddr cache_pa;
119 mb(); /* provided just to simplify "magic hypervisor" mode */
122 * Canonicalize and copy the cpumasks.
124 if (cache_cpumask_orig && cache_control) {
125 cpumask_copy(&cache_cpumask_copy, cache_cpumask_orig);
126 cache_cpumask = &cache_cpumask_copy;
128 cpumask_clear(&cache_cpumask_copy);
129 cache_cpumask = NULL;
131 if (cache_cpumask == NULL)
133 if (tlb_cpumask_orig && tlb_length) {
134 cpumask_copy(&tlb_cpumask_copy, tlb_cpumask_orig);
135 tlb_cpumask = &tlb_cpumask_copy;
137 cpumask_clear(&tlb_cpumask_copy);
141 hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length,
143 cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT;
144 rc = hv_flush_remote(cache_pa, cache_control,
145 cpumask_bits(cache_cpumask),
146 tlb_va, tlb_length, tlb_pgsize,
147 cpumask_bits(tlb_cpumask),
152 pr_err("hv_flush_remote(%#llx, %#lx, %p [%*pb], %#lx, %#lx, %#lx, %p [%*pb], %p, %d) = %d\n",
153 cache_pa, cache_control, cache_cpumask,
154 cpumask_pr_args(&cache_cpumask_copy),
155 (unsigned long)tlb_va, tlb_length, tlb_pgsize, tlb_cpumask,
156 cpumask_pr_args(&tlb_cpumask_copy), asids, asidcount, rc);
157 panic("Unsafe to continue.");
160 static void homecache_finv_page_va(void* va, int home)
164 finv_buffer_local(va, PAGE_SIZE);
165 } else if (home == PAGE_HOME_HASH) {
166 finv_buffer_remote(va, PAGE_SIZE, 1);
168 BUG_ON(home < 0 || home >= NR_CPUS);
169 finv_buffer_remote(va, PAGE_SIZE, 0);
174 void homecache_finv_map_page(struct page *page, int home)
181 if (home == PAGE_HOME_UNCACHED)
183 local_irq_save(flags);
184 #ifdef CONFIG_HIGHMEM
185 va = __fix_to_virt(FIX_KMAP_BEGIN + kmap_atomic_idx_push() +
186 (KM_TYPE_NR * smp_processor_id()));
188 va = __fix_to_virt(FIX_HOMECACHE_BEGIN + smp_processor_id());
190 ptep = virt_to_kpte(va);
191 pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL);
192 __set_pte(ptep, pte_set_home(pte, home));
193 homecache_finv_page_va((void *)va, home);
195 hv_flush_page(va, PAGE_SIZE);
196 #ifdef CONFIG_HIGHMEM
197 kmap_atomic_idx_pop();
199 local_irq_restore(flags);
202 static void homecache_finv_page_home(struct page *page, int home)
204 if (!PageHighMem(page) && home == page_home(page))
205 homecache_finv_page_va(page_address(page), home);
207 homecache_finv_map_page(page, home);
210 static inline bool incoherent_home(int home)
212 return home == PAGE_HOME_IMMUTABLE || home == PAGE_HOME_INCOHERENT;
215 static void homecache_finv_page_internal(struct page *page, int force_map)
217 int home = page_home(page);
218 if (home == PAGE_HOME_UNCACHED)
220 if (incoherent_home(home)) {
222 for_each_cpu(cpu, &cpu_cacheable_map)
223 homecache_finv_map_page(page, cpu);
224 } else if (force_map) {
225 /* Force if, e.g., the normal mapping is migrating. */
226 homecache_finv_map_page(page, home);
228 homecache_finv_page_home(page, home);
230 sim_validate_lines_evicted(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE);
233 void homecache_finv_page(struct page *page)
235 homecache_finv_page_internal(page, 0);
238 void homecache_evict(const struct cpumask *mask)
240 flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0);
243 /* Report the home corresponding to a given PTE. */
244 static int pte_to_home(pte_t pte)
246 if (hv_pte_get_nc(pte))
247 return PAGE_HOME_IMMUTABLE;
248 switch (hv_pte_get_mode(pte)) {
249 case HV_PTE_MODE_CACHE_TILE_L3:
250 return get_remote_cache_cpu(pte);
251 case HV_PTE_MODE_CACHE_NO_L3:
252 return PAGE_HOME_INCOHERENT;
253 case HV_PTE_MODE_UNCACHED:
254 return PAGE_HOME_UNCACHED;
255 case HV_PTE_MODE_CACHE_HASH_L3:
256 return PAGE_HOME_HASH;
258 panic("Bad PTE %#llx\n", pte.val);
261 /* Update the home of a PTE if necessary (can also be used for a pgprot_t). */
262 pte_t pte_set_home(pte_t pte, int home)
265 /* Check for MMIO mappings and pass them through. */
266 if (hv_pte_get_mode(pte) == HV_PTE_MODE_MMIO)
272 * Only immutable pages get NC mappings. If we have a
273 * non-coherent PTE, but the underlying page is not
274 * immutable, it's likely the result of a forced
275 * caching setting running up against ptrace setting
276 * the page to be writable underneath. In this case,
277 * just keep the PTE coherent.
279 if (hv_pte_get_nc(pte) && home != PAGE_HOME_IMMUTABLE) {
280 pte = hv_pte_clear_nc(pte);
281 pr_err("non-immutable page incoherently referenced: %#llx\n",
287 case PAGE_HOME_UNCACHED:
288 pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED);
291 case PAGE_HOME_INCOHERENT:
292 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
295 case PAGE_HOME_IMMUTABLE:
297 * We could home this page anywhere, since it's immutable,
298 * but by default just home it to follow "hash_default".
300 BUG_ON(hv_pte_get_writable(pte));
301 if (pte_get_forcecache(pte)) {
302 /* Upgrade "force any cpu" to "No L3" for immutable. */
303 if (hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_TILE_L3
304 && pte_get_anyhome(pte)) {
305 pte = hv_pte_set_mode(pte,
306 HV_PTE_MODE_CACHE_NO_L3);
310 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
312 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
313 pte = hv_pte_set_nc(pte);
317 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
321 BUG_ON(home < 0 || home >= NR_CPUS ||
322 !cpu_is_valid_lotar(home));
323 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
324 pte = set_remote_cache_cpu(pte, home);
329 pte = hv_pte_set_no_alloc_l2(pte);
331 /* Simplify "no local and no l3" to "uncached" */
332 if (hv_pte_get_no_alloc_l2(pte) && hv_pte_get_no_alloc_l1(pte) &&
333 hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_NO_L3) {
334 pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED);
337 /* Checking this case here gives a better panic than from the hv. */
338 BUG_ON(hv_pte_get_mode(pte) == 0);
342 EXPORT_SYMBOL(pte_set_home);
345 * The routines in this section are the "static" versions of the normal
346 * dynamic homecaching routines; they just set the home cache
347 * of a kernel page once, and require a full-chip cache/TLB flush,
348 * so they're not suitable for anything but infrequent use.
351 int page_home(struct page *page)
353 if (PageHighMem(page)) {
354 return PAGE_HOME_HASH;
356 unsigned long kva = (unsigned long)page_address(page);
357 return pte_to_home(*virt_to_kpte(kva));
360 EXPORT_SYMBOL(page_home);
362 void homecache_change_page_home(struct page *page, int order, int home)
364 int i, pages = (1 << order);
367 BUG_ON(PageHighMem(page));
368 BUG_ON(page_count(page) > 1);
369 BUG_ON(page_mapcount(page) != 0);
370 kva = (unsigned long) page_address(page);
371 flush_remote(0, HV_FLUSH_EVICT_L2, &cpu_cacheable_map,
372 kva, pages * PAGE_SIZE, PAGE_SIZE, cpu_online_mask,
375 for (i = 0; i < pages; ++i, kva += PAGE_SIZE) {
376 pte_t *ptep = virt_to_kpte(kva);
377 pte_t pteval = *ptep;
378 BUG_ON(!pte_present(pteval) || pte_huge(pteval));
379 __set_pte(ptep, pte_set_home(pteval, home));
382 EXPORT_SYMBOL(homecache_change_page_home);
384 struct page *homecache_alloc_pages(gfp_t gfp_mask,
385 unsigned int order, int home)
388 BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */
389 page = alloc_pages(gfp_mask, order);
391 homecache_change_page_home(page, order, home);
394 EXPORT_SYMBOL(homecache_alloc_pages);
396 struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
397 unsigned int order, int home)
400 BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */
401 page = alloc_pages_node(nid, gfp_mask, order);
403 homecache_change_page_home(page, order, home);
407 void __homecache_free_pages(struct page *page, unsigned int order)
409 if (put_page_testzero(page)) {
410 homecache_change_page_home(page, order, PAGE_HOME_HASH);
412 free_hot_cold_page(page, false);
414 init_page_count(page);
415 __free_pages(page, order);
419 EXPORT_SYMBOL(__homecache_free_pages);
421 void homecache_free_pages(unsigned long addr, unsigned int order)
424 VM_BUG_ON(!virt_addr_valid((void *)addr));
425 __homecache_free_pages(virt_to_page((void *)addr), order);
428 EXPORT_SYMBOL(homecache_free_pages);