flush_dcache_page(page);
}
-#if (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)) && !defined(CONFIG_CACHE_OFF)
extern void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len);
extern void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len);
-#else
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
- do { \
- flush_cache_page(vma, vaddr, page_to_pfn(page));\
- memcpy(dst, src, len); \
- flush_icache_user_range(vma, page, vaddr, len); \
- } while (0)
-
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- do { \
- flush_cache_page(vma, vaddr, page_to_pfn(page));\
- memcpy(dst, src, len); \
- } while (0)
-#endif
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
struct page;
struct vm_area_struct;
-#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \
- (defined(CONFIG_CPU_SH5) || defined(CONFIG_CPU_SH4) || \
- defined(CONFIG_SH7705_CACHE_32KB))
+#if defined(CONFIG_CPU_SH5)
extern void clear_user_page(void *to, unsigned long address, struct page *page);
extern void copy_user_page(void *to, void *from, unsigned long address,
struct page *page);
-#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
+
+#elif defined(CONFIG_MMU)
extern void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma);
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
extern void clear_user_highpage(struct page *page, unsigned long vaddr);
#define clear_user_highpage clear_user_highpage
-#endif
+
#else
+
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+
#endif
/*
extern void page_table_range_init(unsigned long start, unsigned long end,
pgd_t *pgd);
-#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \
- defined(CONFIG_SH7705_CACHE_32KB)) && defined(CONFIG_MMU)
+#if defined(CONFIG_MMU) && !defined(CONFIG_CPU_SH5)
extern void kmap_coherent_init(void);
#else
#define kmap_coherent_init() do { } while (0)
obj-y += $(cache-y)
mmu-y := tlb-nommu.o pg-nommu.o
-mmu-$(CONFIG_MMU) := fault_32.o tlbflush_32.o ioremap_32.o
+mmu-$(CONFIG_MMU) := fault_32.o tlbflush_32.o ioremap_32.o pg-mmu.o
obj-y += $(mmu-y)
obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o
tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o
tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o
obj-y += $(tlb-y)
-ifndef CONFIG_CACHE_OFF
-obj-$(CONFIG_CPU_SH4) += pg-sh4.o
-obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh4.o
-endif
endif
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
else
sh64_clear_user_page_coloured(to, address);
}
+
+void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ flush_cache_page(vma, vaddr, page_to_pfn(page));
+ memcpy(dst, src, len);
+ flush_icache_user_range(vma, page, vaddr, len);
+}
+
+void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
+ unsigned long vaddr, void *dst, const void *src,
+ unsigned long len)
+{
+ flush_cache_page(vma, vaddr, page_to_pfn(page));
+ memcpy(dst, src, len);
+}
#endif
/*
- * arch/sh/mm/pg-sh4.c
+ * arch/sh/mm/pg-mmu.c
*
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka
* Copyright (C) 2002 - 2009 Paul Mundt
void __init kmap_coherent_init(void)
{
+#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
unsigned long vaddr;
/* cache the first coherent kmap pte */
vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
+#endif
}
static inline void *kmap_coherent(struct page *page, unsigned long addr)
unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
- if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) {
+ if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
+ !test_bit(PG_dcache_dirty, &page->flags)) {
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(vto, src, len);
kunmap_coherent(vto);
} else {
memcpy(dst, src, len);
- set_bit(PG_dcache_dirty, &page->flags);
+ if (boot_cpu_data.dcache.n_aliases)
+ set_bit(PG_dcache_dirty, &page->flags);
}
if (vma->vm_flags & VM_EXEC)
unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
- if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) {
+ if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
+ !test_bit(PG_dcache_dirty, &page->flags)) {
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(dst, vfrom, len);
kunmap_coherent(vfrom);
} else {
memcpy(dst, src, len);
- set_bit(PG_dcache_dirty, &page->flags);
+ if (boot_cpu_data.dcache.n_aliases)
+ set_bit(PG_dcache_dirty, &page->flags);
}
}
vto = kmap_atomic(to, KM_USER1);
- if (page_mapped(from) && !test_bit(PG_dcache_dirty, &from->flags)) {
+ if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
+ !test_bit(PG_dcache_dirty, &from->flags)) {
vfrom = kmap_coherent(from, vaddr);
copy_page(vto, vfrom);
kunmap_coherent(vfrom);