]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
Merge git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-mn10300
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 28 Oct 2010 01:53:26 +0000 (18:53 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 28 Oct 2010 01:53:26 +0000 (18:53 -0700)
* git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-mn10300: (44 commits)
  MN10300: Save frame pointer in thread_info struct rather than global var
  MN10300: Change "Matsushita" to "Panasonic".
  MN10300: Create a defconfig for the ASB2364 board
  MN10300: Update the ASB2303 defconfig
  MN10300: ASB2364: Add support for SMSC911X and SMC911X
  MN10300: ASB2364: Handle the IRQ multiplexer in the FPGA
  MN10300: Generic time support
  MN10300: Specify an ELF HWCAP flag for MN10300 Atomic Operations Unit support
  MN10300: Map userspace atomic op regs as a vmalloc page
  MN10300: And Panasonic AM34 subarch and implement SMP
  MN10300: Delete idle_timestamp from irq_cpustat_t
  MN10300: Make various interrupt priority settings configurable
  MN10300: Optimise do_csum()
  MN10300: Implement atomic ops using atomic ops unit
  MN10300: Make the FPU operate in non-lazy mode under SMP
  MN10300: SMP TLB flushing
  MN10300: Use the [ID]PTEL2 registers rather than [ID]PTEL for TLB control
  MN10300: Make the use of PIDR to mark TLB entries controllable
  MN10300: Rename __flush_tlb*() to local_flush_tlb*()
  MN10300: AM34 erratum requires MMUCTR read and write on exception entry
  ...

145 files changed:
Kbuild
MAINTAINERS
arch/mn10300/Kconfig
arch/mn10300/Makefile
arch/mn10300/boot/compressed/head.S
arch/mn10300/configs/asb2303_defconfig
arch/mn10300/configs/asb2364_defconfig [new file with mode: 0644]
arch/mn10300/include/asm/atomic.h
arch/mn10300/include/asm/bitops.h
arch/mn10300/include/asm/cache.h
arch/mn10300/include/asm/cacheflush.h
arch/mn10300/include/asm/cpu-regs.h
arch/mn10300/include/asm/dmactl-regs.h
arch/mn10300/include/asm/elf.h
arch/mn10300/include/asm/exceptions.h
arch/mn10300/include/asm/fpu.h
arch/mn10300/include/asm/frame.inc
arch/mn10300/include/asm/gdb-stub.h
arch/mn10300/include/asm/hardirq.h
arch/mn10300/include/asm/highmem.h
arch/mn10300/include/asm/intctl-regs.h
arch/mn10300/include/asm/io.h
arch/mn10300/include/asm/irq.h
arch/mn10300/include/asm/irq_regs.h
arch/mn10300/include/asm/irqflags.h
arch/mn10300/include/asm/mmu_context.h
arch/mn10300/include/asm/pgalloc.h
arch/mn10300/include/asm/pgtable.h
arch/mn10300/include/asm/processor.h
arch/mn10300/include/asm/ptrace.h
arch/mn10300/include/asm/reset-regs.h
arch/mn10300/include/asm/rtc.h
arch/mn10300/include/asm/rwlock.h [new file with mode: 0644]
arch/mn10300/include/asm/serial-regs.h
arch/mn10300/include/asm/serial.h
arch/mn10300/include/asm/smp.h
arch/mn10300/include/asm/smsc911x.h [new file with mode: 0644]
arch/mn10300/include/asm/spinlock.h
arch/mn10300/include/asm/spinlock_types.h [new file with mode: 0644]
arch/mn10300/include/asm/system.h
arch/mn10300/include/asm/thread_info.h
arch/mn10300/include/asm/timer-regs.h
arch/mn10300/include/asm/timex.h
arch/mn10300/include/asm/tlbflush.h
arch/mn10300/include/asm/uaccess.h
arch/mn10300/kernel/Makefile
arch/mn10300/kernel/asm-offsets.c
arch/mn10300/kernel/cevt-mn10300.c [new file with mode: 0644]
arch/mn10300/kernel/csrc-mn10300.c [new file with mode: 0644]
arch/mn10300/kernel/entry.S
arch/mn10300/kernel/fpu-low.S
arch/mn10300/kernel/fpu-nofpu-low.S [new file with mode: 0644]
arch/mn10300/kernel/fpu-nofpu.c [new file with mode: 0644]
arch/mn10300/kernel/fpu.c
arch/mn10300/kernel/gdb-io-serial-low.S
arch/mn10300/kernel/gdb-io-serial.c
arch/mn10300/kernel/gdb-io-ttysm.c
arch/mn10300/kernel/gdb-stub.c
arch/mn10300/kernel/head.S
arch/mn10300/kernel/internal.h
arch/mn10300/kernel/irq.c
arch/mn10300/kernel/kprobes.c
arch/mn10300/kernel/mn10300-serial-low.S
arch/mn10300/kernel/mn10300-serial.c
arch/mn10300/kernel/mn10300-watchdog-low.S
arch/mn10300/kernel/mn10300-watchdog.c
arch/mn10300/kernel/process.c
arch/mn10300/kernel/profile.c
arch/mn10300/kernel/rtc.c
arch/mn10300/kernel/setup.c
arch/mn10300/kernel/signal.c
arch/mn10300/kernel/smp-low.S [new file with mode: 0644]
arch/mn10300/kernel/smp.c [new file with mode: 0644]
arch/mn10300/kernel/switch_to.S
arch/mn10300/kernel/time.c
arch/mn10300/kernel/traps.c
arch/mn10300/lib/bitops.c
arch/mn10300/lib/delay.c
arch/mn10300/lib/do_csum.S
arch/mn10300/mm/Kconfig.cache [new file with mode: 0644]
arch/mn10300/mm/Makefile
arch/mn10300/mm/cache-flush-by-reg.S [new file with mode: 0644]
arch/mn10300/mm/cache-flush-by-tag.S [new file with mode: 0644]
arch/mn10300/mm/cache-flush-icache.c [new file with mode: 0644]
arch/mn10300/mm/cache-flush-mn10300.S [deleted file]
arch/mn10300/mm/cache-inv-by-reg.S [new file with mode: 0644]
arch/mn10300/mm/cache-inv-by-tag.S [new file with mode: 0644]
arch/mn10300/mm/cache-inv-icache.c [new file with mode: 0644]
arch/mn10300/mm/cache-mn10300.S [deleted file]
arch/mn10300/mm/cache-smp-flush.c [new file with mode: 0644]
arch/mn10300/mm/cache-smp-inv.c [new file with mode: 0644]
arch/mn10300/mm/cache-smp.c [new file with mode: 0644]
arch/mn10300/mm/cache-smp.h [new file with mode: 0644]
arch/mn10300/mm/cache.c
arch/mn10300/mm/fault.c
arch/mn10300/mm/init.c
arch/mn10300/mm/misalignment.c
arch/mn10300/mm/mmu-context.c
arch/mn10300/mm/pgtable.c
arch/mn10300/mm/tlb-mn10300.S
arch/mn10300/mm/tlb-smp.c [new file with mode: 0644]
arch/mn10300/proc-mn103e010/include/proc/cache.h
arch/mn10300/proc-mn103e010/include/proc/clock.h
arch/mn10300/proc-mn103e010/include/proc/dmactl-regs.h [new file with mode: 0644]
arch/mn10300/proc-mn103e010/include/proc/intctl-regs.h [new file with mode: 0644]
arch/mn10300/proc-mn103e010/include/proc/proc.h
arch/mn10300/proc-mn103e010/proc-init.c
arch/mn10300/proc-mn2ws0050/Makefile [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/include/proc/cache.h [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/include/proc/clock.h [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/include/proc/dmactl-regs.h [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/include/proc/intctl-regs.h [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/include/proc/irq.h [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/include/proc/nand-regs.h [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/include/proc/proc.h [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/include/proc/smp-regs.h [new file with mode: 0644]
arch/mn10300/proc-mn2ws0050/proc-init.c [new file with mode: 0644]
arch/mn10300/unit-asb2303/include/unit/clock.h
arch/mn10300/unit-asb2303/include/unit/serial.h
arch/mn10300/unit-asb2303/include/unit/timex.h
arch/mn10300/unit-asb2303/unit-init.c
arch/mn10300/unit-asb2305/include/unit/clock.h
arch/mn10300/unit-asb2305/include/unit/serial.h
arch/mn10300/unit-asb2305/include/unit/timex.h
arch/mn10300/unit-asb2305/pci-asb2305.c
arch/mn10300/unit-asb2305/pci.c
arch/mn10300/unit-asb2305/unit-init.c
arch/mn10300/unit-asb2364/Makefile [new file with mode: 0644]
arch/mn10300/unit-asb2364/include/unit/clock.h [new file with mode: 0644]
arch/mn10300/unit-asb2364/include/unit/fpga-regs.h [new file with mode: 0644]
arch/mn10300/unit-asb2364/include/unit/irq.h [new file with mode: 0644]
arch/mn10300/unit-asb2364/include/unit/leds.h [new file with mode: 0644]
arch/mn10300/unit-asb2364/include/unit/serial.h [new file with mode: 0644]
arch/mn10300/unit-asb2364/include/unit/smsc911x.h [new file with mode: 0644]
arch/mn10300/unit-asb2364/include/unit/timex.h [new file with mode: 0644]
arch/mn10300/unit-asb2364/irq-fpga.c [new file with mode: 0644]
arch/mn10300/unit-asb2364/leds.c [new file with mode: 0644]
arch/mn10300/unit-asb2364/smsc911x.c [new file with mode: 0644]
arch/mn10300/unit-asb2364/unit-init.c [new file with mode: 0644]
drivers/net/Kconfig
drivers/net/smsc911x.c
drivers/net/smsc911x.h
include/linux/smp.h
kernel/smp.c
mm/maccess.c

diff --git a/Kbuild b/Kbuild
index 431f7ca2404cb2cf5bbc5b40050f032a8986888a..b00037ad7e03b2d5953572a440b5c6f1a1c5708e 100644 (file)
--- a/Kbuild
+++ b/Kbuild
@@ -53,7 +53,7 @@ targets += arch/$(SRCARCH)/kernel/asm-offsets.s
 # Default sed regexp - multiline due to syntax constraints
 define sed-y
        "/^->/{s:->#\(.*\):/* \1 */:; \
-       s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 (\2) /* \3 */:; \
+       s:^->\([^ ]*\) [\$$#]*\([-0-9]*\) \(.*\):#define \1 \2 /* \3 */:; \
        s:^->\([^ ]*\) [\$$#]*\([^ ]*\) \(.*\):#define \1 \2 /* \3 */:; \
        s:->::; p;}"
 endef
index debde0128cd0f661937d759923d2dd0790cc2fac..1e6b6bdf63403950bc7cc573971f2399fe2c94ec 100644 (file)
@@ -4448,7 +4448,7 @@ L:        platform-driver-x86@vger.kernel.org
 S:     Maintained
 F:     drivers/platform/x86/panasonic-laptop.c
 
-PANASONIC MN10300/AM33 PORT
+PANASONIC MN10300/AM33/AM34 PORT
 M:     David Howells <dhowells@redhat.com>
 M:     Koichi Yasutake <yasutake.koichi@jp.panasonic.com>
 L:     linux-am33-list@redhat.com (moderated for non-subscribers)
index 7c2a2f7f8dc143889b74605d2741f5707ad330e4..365766a3d536025ad0692643edbad8ffb6fee0d5 100644 (file)
@@ -9,8 +9,19 @@ config MN10300
        def_bool y
        select HAVE_OPROFILE
 
-config AM33
-       def_bool y
+config AM33_2
+       def_bool n
+
+config AM33_3
+       def_bool n
+
+config AM34_2
+       def_bool n
+       select MN10300_HAS_ATOMIC_OPS_UNIT
+       select MN10300_HAS_CACHE_SNOOP
+
+config ERRATUM_NEED_TO_RELOAD_MMUCTR
+       def_bool y if AM33_3 || AM34_2
 
 config MMU
        def_bool y
@@ -37,7 +48,7 @@ config GENERIC_CALIBRATE_DELAY
        def_bool y
 
 config GENERIC_CMOS_UPDATE
-        def_bool y
+        def_bool n
 
 config GENERIC_FIND_NEXT_BIT
        def_bool y
@@ -45,6 +56,27 @@ config GENERIC_FIND_NEXT_BIT
 config GENERIC_HWEIGHT
        def_bool y
 
+config GENERIC_TIME
+       def_bool y
+
+config GENERIC_CLOCKEVENTS
+       def_bool y
+
+config GENERIC_CLOCKEVENTS_BUILD
+       def_bool y
+       depends on GENERIC_CLOCKEVENTS
+
+config GENERIC_CLOCKEVENTS_BROADCAST
+       bool
+
+config CEVT_MN10300
+       def_bool y
+       depends on GENERIC_CLOCKEVENTS
+
+config CSRC_MN10300
+       def_bool y
+       depends on GENERIC_TIME
+
 config GENERIC_BUG
        def_bool y
 
@@ -61,18 +93,14 @@ config GENERIC_HARDIRQS
 config HOTPLUG_CPU
        def_bool n
 
-config HZ
-       int
-       default 1000
-
-mainmenu "Matsushita MN10300/AM33 Kernel Configuration"
+mainmenu "Panasonic MN10300/AM33 Kernel Configuration"
 
 source "init/Kconfig"
 
 source "kernel/Kconfig.freezer"
 
 
-menu "Matsushita MN10300 system setup"
+menu "Panasonic MN10300 system setup"
 
 choice
        prompt "Unit type"
@@ -87,6 +115,10 @@ config MN10300_UNIT_ASB2303
 config MN10300_UNIT_ASB2305
        bool "ASB2305"
 
+config MN10300_UNIT_ASB2364
+       bool "ASB2364"
+       select SMSC911X_ARCH_HOOKS if SMSC911X
+
 endchoice
 
 choice
@@ -99,57 +131,51 @@ choice
 config MN10300_PROC_MN103E010
        bool "MN103E010"
        depends on MN10300_UNIT_ASB2303 || MN10300_UNIT_ASB2305
+       select AM33_2
+       select MN10300_PROC_HAS_TTYSM0
+       select MN10300_PROC_HAS_TTYSM1
+       select MN10300_PROC_HAS_TTYSM2
+
+config MN10300_PROC_MN2WS0050
+       bool "MN2WS0050"
+       depends on MN10300_UNIT_ASB2364
+       select AM34_2
        select MN10300_PROC_HAS_TTYSM0
        select MN10300_PROC_HAS_TTYSM1
        select MN10300_PROC_HAS_TTYSM2
 
 endchoice
 
-choice
-       prompt "Processor core support"
-       default MN10300_CPU_AM33V2
+config MN10300_HAS_ATOMIC_OPS_UNIT
+       def_bool n
        help
-         This option specifies the processor core for which the kernel will be
-         compiled. It affects the instruction set used.
-
-config MN10300_CPU_AM33V2
-       bool "AM33v2"
-
-endchoice
+         This should be enabled if the processor has an atomic ops unit
+         capable of doing LL/SC equivalent operations.
 
 config FPU
        bool "FPU present"
        default y
-       depends on MN10300_PROC_MN103E010
+       depends on MN10300_PROC_MN103E010 || MN10300_PROC_MN2WS0050
 
-choice
-       prompt "CPU Caching mode"
-       default MN10300_CACHE_WBACK
+config LAZY_SAVE_FPU
+       bool "Save FPU state lazily"
+       default y
+       depends on FPU && !SMP
        help
-         This option determines the caching mode for the kernel.
-
-         Write-Back caching mode involves the all reads and writes causing
-         the affected cacheline to be read into the cache first before being
-         operated upon. Memory is not then updated by a write until the cache
-         is filled and a cacheline needs to be displaced from the cache to
-         make room. Only at that point is it written back.
-
-         Write-Through caching only fetches cachelines from memory on a
-         read. Writes always get written directly to memory. If the affected
-         cacheline is also in cache, it will be updated too.
-
-         The final option is to turn of caching entirely.
+         Enable this to be lazy in the saving of the FPU state to the owning
+         task's thread struct.  This is useful if most tasks on the system
+         don't use the FPU as only those tasks that use it will pass it
+         between them, and the state needn't be saved for a task that isn't
+         using it.
 
-config MN10300_CACHE_WBACK
-       bool "Write-Back"
+         This can't be so easily used on SMP as the process that owns the FPU
+         state on a CPU may be currently running on another CPU, so for the
+         moment, it is disabled.
 
-config MN10300_CACHE_WTHRU
-       bool "Write-Through"
+source "arch/mn10300/mm/Kconfig.cache"
 
-config MN10300_CACHE_DISABLED
-       bool "Disabled"
-
-endchoice
+config MN10300_TLB_USE_PIDR
+       def_bool y
 
 menu "Memory layout options"
 
@@ -170,24 +196,55 @@ config KERNEL_TEXT_ADDRESS
 
 config KERNEL_ZIMAGE_BASE_ADDRESS
        hex "Base address of compressed vmlinux image"
-       default "0x90700000"
+       default "0x50700000"
 
+config BOOT_STACK_OFFSET
+       hex
+       default "0xF00" if SMP
+       default "0xFF0" if !SMP
+
+config BOOT_STACK_SIZE
+       hex
+       depends on SMP
+       default "0x100"
 endmenu
 
-config PREEMPT
-       bool "Preemptible Kernel"
-       help
-         This option reduces the latency of the kernel when reacting to
-         real-time or interactive events by allowing a low priority process to
-         be preempted even if it is in kernel mode executing a system call.
-         This allows applications to run more reliably even when the system is
-         under load.
+config SMP
+       bool "Symmetric multi-processing support"
+       default y
+       depends on MN10300_PROC_MN2WS0038 || MN10300_PROC_MN2WS0050
+       ---help---
+         This enables support for systems with more than one CPU. If you have
+         a system with only one CPU, like most personal computers, say N. If
+         you have a system with more than one CPU, say Y.
+
+         If you say N here, the kernel will run on single and multiprocessor
+         machines, but will use only one CPU of a multiprocessor machine. If
+         you say Y here, the kernel will run on many, but not all,
+         singleprocessor machines. On a singleprocessor machine, the kernel
+         will run faster if you say N here.
 
-         Say Y here if you are building a kernel for a desktop, embedded
-         or real-time system.  Say N if you are unsure.
+         See also <file:Documentation/i386/IO-APIC.txt>,
+         <file:Documentation/nmi_watchdog.txt> and the SMP-HOWTO available at
+         <http://www.tldp.org/docs.html#howto>.
+
+         If you don't know what to do here, say N.
+
+config NR_CPUS
+       int
+       depends on SMP
+       default "2"
+
+config USE_GENERIC_SMP_HELPERS
+       bool
+       depends on SMP
+       default y
+
+source "kernel/Kconfig.preempt"
 
 config MN10300_CURRENT_IN_E2
        bool "Hold current task address in E2 register"
+       depends on !SMP
        default y
        help
          This option removes the E2/R2 register from the set available to gcc
@@ -209,12 +266,15 @@ config MN10300_USING_JTAG
          suppresses the use of certain hardware debugging features, such as
          single-stepping, which are taken over completely by the JTAG unit.
 
+source "kernel/Kconfig.hz"
+source "kernel/time/Kconfig"
+
 config MN10300_RTC
        bool "Using MN10300 RTC"
-       depends on MN10300_PROC_MN103E010
+       depends on MN10300_PROC_MN103E010 || MN10300_PROC_MN2WS0050
+       select GENERIC_CMOS_UPDATE
        default n
        help
-
          This option enables support for the RTC, thus enabling time to be
          tracked, even when system is powered down. This is available on-chip
          on the MN103E010.
@@ -306,14 +366,23 @@ config MN10300_TTYSM1
 
 choice
        prompt "Select the timer to supply the clock for SIF1"
-       default MN10300_TTYSM0_TIMER9
+       default MN10300_TTYSM1_TIMER12 \
+               if !(AM33_2 || AM33_3)
+       default MN10300_TTYSM1_TIMER9 \
+               if AM33_2 || AM33_3
        depends on MN10300_TTYSM1
 
+config MN10300_TTYSM1_TIMER12
+       bool "Use timer 12 (16-bit)"
+       depends on !(AM33_2 || AM33_3)
+
 config MN10300_TTYSM1_TIMER9
        bool "Use timer 9 (16-bit)"
+       depends on AM33_2 || AM33_3
 
 config MN10300_TTYSM1_TIMER3
        bool "Use timer 3 (8-bit)"
+       depends on AM33_2 || AM33_3
 
 endchoice
 
@@ -328,17 +397,107 @@ config MN10300_TTYSM2
 
 choice
        prompt "Select the timer to supply the clock for SIF2"
-       default MN10300_TTYSM0_TIMER10
+       default MN10300_TTYSM2_TIMER3 \
+               if !(AM33_2 || AM33_3)
+       default MN10300_TTYSM2_TIMER10 \
+               if AM33_2 || AM33_3
        depends on MN10300_TTYSM2
 
+config MN10300_TTYSM2_TIMER9
+       bool "Use timer 9 (16-bit)"
+       depends on !(AM33_2 || AM33_3)
+
+config MN10300_TTYSM2_TIMER1
+       bool "Use timer 1 (8-bit)"
+       depends on !(AM33_2 || AM33_3)
+
+config MN10300_TTYSM2_TIMER3
+       bool "Use timer 3 (8-bit)"
+       depends on !(AM33_2 || AM33_3)
+
 config MN10300_TTYSM2_TIMER10
        bool "Use timer 10 (16-bit)"
+       depends on AM33_2 || AM33_3
 
 endchoice
 
 config MN10300_TTYSM2_CTS
        bool "Enable the use of the CTS line /dev/ttySM2"
-       depends on MN10300_TTYSM2
+       depends on MN10300_TTYSM2 && AM33_2
+
+endmenu
+
+menu "Interrupt request priority options"
+
+comment "[!] NOTE: A lower number/level indicates a higher priority (0 is highest, 6 is lowest)"
+
+comment "____Non-maskable interrupt levels____"
+comment "The following must be set to a higher priority than local_irq_disable() and on-chip serial"
+
+config GDBSTUB_IRQ_LEVEL
+       int "GDBSTUB interrupt priority"
+       depends on GDBSTUB
+       range 0 1 if LINUX_CLI_LEVEL = 2
+       range 0 2 if LINUX_CLI_LEVEL = 3
+       range 0 3 if LINUX_CLI_LEVEL = 4
+       range 0 4 if LINUX_CLI_LEVEL = 5
+       range 0 5 if LINUX_CLI_LEVEL = 6
+       default 0
+
+comment "The following must be set to a higher priority than local_irq_disable()"
+
+config MN10300_SERIAL_IRQ_LEVEL
+       int "MN10300 on-chip serial interrupt priority"
+       depends on MN10300_TTYSM
+       range 1 1 if LINUX_CLI_LEVEL = 2
+       range 1 2 if LINUX_CLI_LEVEL = 3
+       range 1 3 if LINUX_CLI_LEVEL = 4
+       range 1 4 if LINUX_CLI_LEVEL = 5
+       range 1 5 if LINUX_CLI_LEVEL = 6
+       default 1
+
+comment "-"
+comment "____Maskable interrupt levels____"
+
+config LINUX_CLI_LEVEL
+       int "The highest interrupt priority excluded by local_irq_disable() (2-6)"
+       range 2 6
+       default 2
+       help
+         local_irq_disable() doesn't actually disable maskable interrupts -
+         what it does is restrict the levels of interrupt which are permitted
+         (a lower level indicates a higher priority) by lowering the value in
+         EPSW.IM from 7.  Any interrupt is permitted for which the level is
+         lower than EPSW.IM.
+
+         Certain interrupts, such as GDBSTUB and virtual MN10300 on-chip
+         serial DMA interrupts are allowed to interrupt normal disabled
+         sections.
+
+comment "The following must be set to a equal to or lower priority than LINUX_CLI_LEVEL"
+
+config TIMER_IRQ_LEVEL
+       int "Kernel timer interrupt priority"
+       range LINUX_CLI_LEVEL 6
+       default 4
+
+config PCI_IRQ_LEVEL
+       int "PCI interrupt priority"
+       depends on PCI
+       range LINUX_CLI_LEVEL 6
+       default 5
+
+config ETHERNET_IRQ_LEVEL
+       int "Ethernet interrupt priority"
+       depends on SMC91X || SMC911X || SMSC911X
+       range LINUX_CLI_LEVEL 6
+       default 6
+
+config EXT_SERIAL_IRQ_LEVEL
+       int "External serial port interrupt priority"
+       depends on SERIAL_8250
+       range LINUX_CLI_LEVEL 6
+       default 6
 
 endmenu
 
index ac5c6bdb2f05c6535ab802cf34000f206995badb..7120282bf0d89c3240886cbad32b973bda1bfc6f 100644 (file)
@@ -36,6 +36,9 @@ endif
 ifeq ($(CONFIG_MN10300_PROC_MN103E010),y)
 PROCESSOR      := mn103e010
 endif
+ifeq ($(CONFIG_MN10300_PROC_MN2WS0050),y)
+PROCESSOR      := mn2ws0050
+endif
 
 ifeq ($(CONFIG_MN10300_UNIT_ASB2303),y)
 UNIT           := asb2303
@@ -43,6 +46,9 @@ endif
 ifeq ($(CONFIG_MN10300_UNIT_ASB2305),y)
 UNIT           := asb2305
 endif
+ifeq ($(CONFIG_MN10300_UNIT_ASB2364),y)
+UNIT           := asb2364
+endif
 
 
 head-y         := arch/mn10300/kernel/head.o arch/mn10300/kernel/init_task.o
index 502e1eb56709685ce7e698e6a93bfb9ddb50e795..7b50345b9e840dccc0a299efed88c402324a9da2 100644 (file)
 
 #include <linux/linkage.h>
 #include <asm/cpu-regs.h>
+#include <asm/cache.h>
+#ifdef CONFIG_SMP
+#include <proc/smp-regs.h>
+#endif
 
        .globl startup_32
 startup_32:
-       # first save off parameters from bootloader
+#ifdef CONFIG_SMP
+       #
+       # Secondary CPUs jump directly to the kernel entry point
+       #
+       # Must save primary CPU's D0-D2 registers as they hold boot parameters
+       #
+       mov     (CPUID), d3
+       and     CPUID_MASK,d3
+       beq     startup_primary
+       mov     CONFIG_KERNEL_TEXT_ADDRESS,a0
+       jmp     (a0)
+
+startup_primary:
+#endif /* CONFIG_SMP */
+
+       # first save parameters from bootloader
        mov     param_save_area,a0
        mov     d0,(a0)
        mov     d1,(4,a0)
@@ -37,8 +56,15 @@ startup_32:
        mov     (a0),d0
        btst    CHCTR_ICBUSY|CHCTR_DCBUSY,d0            # wait till not busy
        lne
-       mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD,d0   # writethru dcache
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+#ifdef CONFIG_MN10300_CACHE_WBACK
+       mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0
+#else
+       mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0
+#endif /* WBACK */
        movhu   d0,(a0)                                 # enable
+#endif /* !ENABLED */
 
        # clear the BSS area
        mov     __bss_start,a0
@@ -54,6 +80,9 @@ bssclear_end:
 
        # decompress the kernel
        call    decompress_kernel[],0
+#ifdef CONFIG_MN10300_CACHE_WBACK
+       call    mn10300_dcache_flush_inv[],0
+#endif
 
        # disable caches again
        mov     CHCTR,a0
@@ -69,10 +98,46 @@ bssclear_end:
        mov     (4,a0),d1
        mov     (8,a0),d2
 
+       # jump to the kernel proper entry point
        mov     a3,sp
        mov     CONFIG_KERNEL_TEXT_ADDRESS,a0
        jmp     (a0)
 
+
+###############################################################################
+#
+# Cache flush routines
+#
+###############################################################################
+#ifdef CONFIG_MN10300_CACHE_WBACK
+mn10300_dcache_flush_inv:
+       movhu   (CHCTR),d0
+       btst    CHCTR_DCEN,d0
+       beq     mn10300_dcache_flush_inv_end
+
+       mov     L1_CACHE_NENTRIES,d1
+       clr     a1
+
+mn10300_dcache_flush_inv_loop:
+       mov     (DCACHE_PURGE_WAY0(0),a1),d0    # unconditional purge
+       mov     (DCACHE_PURGE_WAY1(0),a1),d0    # unconditional purge
+       mov     (DCACHE_PURGE_WAY2(0),a1),d0    # unconditional purge
+       mov     (DCACHE_PURGE_WAY3(0),a1),d0    # unconditional purge
+
+       add     L1_CACHE_BYTES,a1
+       add     -1,d1
+       bne     mn10300_dcache_flush_inv_loop
+
+mn10300_dcache_flush_inv_end:
+       ret     [],0
+#endif /* CONFIG_MN10300_CACHE_WBACK */
+
+
+###############################################################################
+#
+# Data areas
+#
+###############################################################################
        .data
        .align          4
 param_save_area:
index d80dfcb2c902302ce7e3b28004dd74b121df7bda..3f749b69ca71b732081ff166a35c1ec8cca43fc3 100644 (file)
@@ -12,6 +12,8 @@ CONFIG_SLAB=y
 CONFIG_PROFILING=y
 # CONFIG_BLOCK is not set
 CONFIG_PREEMPT=y
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
 CONFIG_MN10300_RTC=y
 CONFIG_MN10300_TTYSM_CONSOLE=y
 CONFIG_MN10300_TTYSM0=y
diff --git a/arch/mn10300/configs/asb2364_defconfig b/arch/mn10300/configs/asb2364_defconfig
new file mode 100644 (file)
index 0000000..83ce2f2
--- /dev/null
@@ -0,0 +1,98 @@
+CONFIG_EXPERIMENTAL=y
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_BSD_PROCESS_ACCT=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_NS=y
+CONFIG_CGROUP_FREEZER=y
+CONFIG_CGROUP_DEVICE=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_RELAY=y
+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
+CONFIG_EMBEDDED=y
+# CONFIG_KALLSYMS is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+CONFIG_SLAB=y
+CONFIG_PROFILING=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLOCK is not set
+CONFIG_MN10300_UNIT_ASB2364=y
+CONFIG_PREEMPT=y
+# CONFIG_MN10300_USING_JTAG is not set
+CONFIG_NO_HZ=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_MN10300_TTYSM_CONSOLE=y
+CONFIG_MN10300_TTYSM0=y
+CONFIG_MN10300_TTYSM0_TIMER2=y
+CONFIG_MN10300_TTYSM1=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_BOOTP=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_INET_LRO is not set
+# CONFIG_INET_DIAG is not set
+CONFIG_IPV6=y
+# CONFIG_INET6_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET6_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET6_XFRM_MODE_BEET is not set
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_CONNECTOR=y
+CONFIG_MTD=y
+CONFIG_MTD_DEBUG=y
+CONFIG_MTD_PARTITIONS=y
+CONFIG_MTD_REDBOOT_PARTS=y
+CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED=y
+CONFIG_MTD_CHAR=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_JEDECPROBE=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_GEOMETRY=y
+CONFIG_MTD_CFI_I4=y
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_NETDEVICES=y
+CONFIG_NET_ETHERNET=y
+CONFIG_SMSC911X=y
+# CONFIG_NETDEV_1000 is not set
+# CONFIG_NETDEV_10000 is not set
+# CONFIG_INPUT_MOUSEDEV is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_EXTENDED=y
+CONFIG_SERIAL_8250_SHARE_IRQ=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_HID_SUPPORT is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_PROC_KCORE=y
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_JFFS2_FS=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V3=y
+CONFIG_ROOT_NFS=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_KERNEL=y
+CONFIG_DETECT_HUNG_TASK=y
+# CONFIG_DEBUG_BUGVERBOSE is not set
+CONFIG_DEBUG_INFO=y
+# CONFIG_RCU_CPU_STALL_DETECTOR is not set
index f0cc1f84a72f180c5492987327b855b36bf46561..92d2f9298e3832155b14bf21d1eb964e015b87f5 100644 (file)
@@ -1 +1,351 @@
+/* MN10300 Atomic counter operations
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef _ASM_ATOMIC_H
+#define _ASM_ATOMIC_H
+
+#include <asm/irqflags.h>
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+static inline
+unsigned long __xchg(volatile unsigned long *m, unsigned long val)
+{
+       unsigned long status;
+       unsigned long oldval;
+
+       asm volatile(
+               "1:     mov     %4,(_AAR,%3)    \n"
+               "       mov     (_ADR,%3),%1    \n"
+               "       mov     %5,(_ADR,%3)    \n"
+               "       mov     (_ADR,%3),%0    \n"     /* flush */
+               "       mov     (_ASR,%3),%0    \n"
+               "       or      %0,%0           \n"
+               "       bne     1b              \n"
+               : "=&r"(status), "=&r"(oldval), "=m"(*m)
+               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m), "r"(val)
+               : "memory", "cc");
+
+       return oldval;
+}
+
+static inline unsigned long __cmpxchg(volatile unsigned long *m,
+                                     unsigned long old, unsigned long new)
+{
+       unsigned long status;
+       unsigned long oldval;
+
+       asm volatile(
+               "1:     mov     %4,(_AAR,%3)    \n"
+               "       mov     (_ADR,%3),%1    \n"
+               "       cmp     %5,%1           \n"
+               "       bne     2f              \n"
+               "       mov     %6,(_ADR,%3)    \n"
+               "2:     mov     (_ADR,%3),%0    \n"     /* flush */
+               "       mov     (_ASR,%3),%0    \n"
+               "       or      %0,%0           \n"
+               "       bne     1b              \n"
+               : "=&r"(status), "=&r"(oldval), "=m"(*m)
+               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(m),
+                 "r"(old), "r"(new)
+               : "memory", "cc");
+
+       return oldval;
+}
+#else  /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+#error "No SMP atomic operation support!"
+#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+
+#else  /* CONFIG_SMP */
+
+/*
+ * Emulate xchg for non-SMP MN10300
+ */
+struct __xchg_dummy { unsigned long a[100]; };
+#define __xg(x) ((struct __xchg_dummy *)(x))
+
+static inline
+unsigned long __xchg(volatile unsigned long *m, unsigned long val)
+{
+       unsigned long oldval;
+       unsigned long flags;
+
+       flags = arch_local_cli_save();
+       oldval = *m;
+       *m = val;
+       arch_local_irq_restore(flags);
+       return oldval;
+}
+
+/*
+ * Emulate cmpxchg for non-SMP MN10300
+ */
+static inline unsigned long __cmpxchg(volatile unsigned long *m,
+                                     unsigned long old, unsigned long new)
+{
+       unsigned long oldval;
+       unsigned long flags;
+
+       flags = arch_local_cli_save();
+       oldval = *m;
+       if (oldval == old)
+               *m = new;
+       arch_local_irq_restore(flags);
+       return oldval;
+}
+
+#endif /* CONFIG_SMP */
+
+#define xchg(ptr, v)                                           \
+       ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr),    \
+                                    (unsigned long)(v)))
+
+#define cmpxchg(ptr, o, n)                                     \
+       ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
+                                       (unsigned long)(o),     \
+                                       (unsigned long)(n)))
+
+#define atomic_xchg(ptr, v)            (xchg(&(ptr)->counter, (v)))
+#define atomic_cmpxchg(v, old, new)    (cmpxchg(&((v)->counter), (old), (new)))
+
+#endif /* !__ASSEMBLY__ */
+
+#ifndef CONFIG_SMP
 #include <asm-generic/atomic.h>
+#else
+
+/*
+ * Atomic operations that C can't guarantee us.  Useful for
+ * resource counting etc..
+ */
+
+#define ATOMIC_INIT(i) { (i) }
+
+#ifdef __KERNEL__
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.  Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_read(v) ((v)->counter)
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.  Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_set(v, i) (((v)->counter) = (i))
+
+/**
+ * atomic_add_return - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns the result
+ * Note that the guaranteed useful range of an atomic_t is only 24 bits.
+ */
+static inline int atomic_add_return(int i, atomic_t *v)
+{
+       int retval;
+#ifdef CONFIG_SMP
+       int status;
+
+       asm volatile(
+               "1:     mov     %4,(_AAR,%3)    \n"
+               "       mov     (_ADR,%3),%1    \n"
+               "       add     %5,%1           \n"
+               "       mov     %1,(_ADR,%3)    \n"
+               "       mov     (_ADR,%3),%0    \n"     /* flush */
+               "       mov     (_ASR,%3),%0    \n"
+               "       or      %0,%0           \n"
+               "       bne     1b              \n"
+               : "=&r"(status), "=&r"(retval), "=m"(v->counter)
+               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
+               : "memory", "cc");
+
+#else
+       unsigned long flags;
+
+       flags = arch_local_cli_save();
+       retval = v->counter;
+       retval += i;
+       v->counter = retval;
+       arch_local_irq_restore(flags);
+#endif
+       return retval;
+}
+
+/**
+ * atomic_sub_return - subtract integer from atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns the result
+ * Note that the guaranteed useful range of an atomic_t is only 24 bits.
+ */
+static inline int atomic_sub_return(int i, atomic_t *v)
+{
+       int retval;
+#ifdef CONFIG_SMP
+       int status;
+
+       asm volatile(
+               "1:     mov     %4,(_AAR,%3)    \n"
+               "       mov     (_ADR,%3),%1    \n"
+               "       sub     %5,%1           \n"
+               "       mov     %1,(_ADR,%3)    \n"
+               "       mov     (_ADR,%3),%0    \n"     /* flush */
+               "       mov     (_ASR,%3),%0    \n"
+               "       or      %0,%0           \n"
+               "       bne     1b              \n"
+               : "=&r"(status), "=&r"(retval), "=m"(v->counter)
+               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(&v->counter), "r"(i)
+               : "memory", "cc");
+
+#else
+       unsigned long flags;
+       flags = arch_local_cli_save();
+       retval = v->counter;
+       retval -= i;
+       v->counter = retval;
+       arch_local_irq_restore(flags);
+#endif
+       return retval;
+}
+
+static inline int atomic_add_negative(int i, atomic_t *v)
+{
+       return atomic_add_return(i, v) < 0;
+}
+
+static inline void atomic_add(int i, atomic_t *v)
+{
+       atomic_add_return(i, v);
+}
+
+static inline void atomic_sub(int i, atomic_t *v)
+{
+       atomic_sub_return(i, v);
+}
+
+static inline void atomic_inc(atomic_t *v)
+{
+       atomic_add_return(1, v);
+}
+
+static inline void atomic_dec(atomic_t *v)
+{
+       atomic_sub_return(1, v);
+}
+
+#define atomic_dec_return(v)           atomic_sub_return(1, (v))
+#define atomic_inc_return(v)           atomic_add_return(1, (v))
+
+#define atomic_sub_and_test(i, v)      (atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v)         (atomic_sub_return(1, (v)) == 0)
+#define atomic_inc_and_test(v)         (atomic_add_return(1, (v)) == 0)
+
+#define atomic_add_unless(v, a, u)                             \
+({                                                             \
+       int c, old;                                             \
+       c = atomic_read(v);                                     \
+       while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+               c = old;                                        \
+       c != (u);                                               \
+})
+
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
+
+/**
+ * atomic_clear_mask - Atomically clear bits in memory
+ * @mask: Mask of the bits to be cleared
+ * @v: pointer to word in memory
+ *
+ * Atomically clears the bits set in mask from the memory word specified.
+ */
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
+{
+#ifdef CONFIG_SMP
+       int status;
+
+       asm volatile(
+               "1:     mov     %3,(_AAR,%2)    \n"
+               "       mov     (_ADR,%2),%0    \n"
+               "       and     %4,%0           \n"
+               "       mov     %0,(_ADR,%2)    \n"
+               "       mov     (_ADR,%2),%0    \n"     /* flush */
+               "       mov     (_ASR,%2),%0    \n"
+               "       or      %0,%0           \n"
+               "       bne     1b              \n"
+               : "=&r"(status), "=m"(*addr)
+               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(~mask)
+               : "memory", "cc");
+#else
+       unsigned long flags;
+
+       mask = ~mask;
+       flags = arch_local_cli_save();
+       *addr &= mask;
+       arch_local_irq_restore(flags);
+#endif
+}
+
+/**
+ * atomic_set_mask - Atomically set bits in memory
+ * @mask: Mask of the bits to be set
+ * @v: pointer to word in memory
+ *
+ * Atomically sets the bits set in mask from the memory word specified.
+ */
+static inline void atomic_set_mask(unsigned long mask, unsigned long *addr)
+{
+#ifdef CONFIG_SMP
+       int status;
+
+       asm volatile(
+               "1:     mov     %3,(_AAR,%2)    \n"
+               "       mov     (_ADR,%2),%0    \n"
+               "       or      %4,%0           \n"
+               "       mov     %0,(_ADR,%2)    \n"
+               "       mov     (_ADR,%2),%0    \n"     /* flush */
+               "       mov     (_ASR,%2),%0    \n"
+               "       or      %0,%0           \n"
+               "       bne     1b              \n"
+               : "=&r"(status), "=m"(*addr)
+               : "a"(ATOMIC_OPS_BASE_ADDR), "r"(addr), "r"(mask)
+               : "memory", "cc");
+#else
+       unsigned long flags;
+
+       flags = arch_local_cli_save();
+       *addr |= mask;
+       arch_local_irq_restore(flags);
+#endif
+}
+
+/* Atomic operations are already serializing on MN10300??? */
+#define smp_mb__before_atomic_dec()    barrier()
+#define smp_mb__after_atomic_dec()     barrier()
+#define smp_mb__before_atomic_inc()    barrier()
+#define smp_mb__after_atomic_inc()     barrier()
+
+#include <asm-generic/atomic-long.h>
+
+#endif /* __KERNEL__ */
+#endif /* CONFIG_SMP */
+#endif /* _ASM_ATOMIC_H */
index 3f50e966107641f21f346a38e50cca97d2eda24b..3b8a868188f59e88f2926d48298b54c188f318f2 100644 (file)
@@ -57,7 +57,7 @@
 #define clear_bit(nr, addr) ___clear_bit((nr), (addr))
 
 
-static inline void __clear_bit(int nr, volatile void *addr)
+static inline void __clear_bit(unsigned long nr, volatile void *addr)
 {
        unsigned int *a = (unsigned int *) addr;
        int mask;
@@ -70,15 +70,15 @@ static inline void __clear_bit(int nr, volatile void *addr)
 /*
  * test bit
  */
-static inline int test_bit(int nr, const volatile void *addr)
+static inline int test_bit(unsigned long nr, const volatile void *addr)
 {
-       return 1UL & (((const unsigned int *) addr)[nr >> 5] >> (nr & 31));
+       return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31));
 }
 
 /*
  * change bit
  */
-static inline void __change_bit(int nr, volatile void *addr)
+static inline void __change_bit(unsigned long nr, volatile void *addr)
 {
        int     mask;
        unsigned int *a = (unsigned int *) addr;
@@ -88,7 +88,7 @@ static inline void __change_bit(int nr, volatile void *addr)
        *a ^= mask;
 }
 
-extern void change_bit(int nr, volatile void *addr);
+extern void change_bit(unsigned long nr, volatile void *addr);
 
 /*
  * test and set bit
@@ -135,7 +135,7 @@ extern void change_bit(int nr, volatile void *addr);
 /*
  * test and change bit
  */
-static inline int __test_and_change_bit(int nr, volatile void *addr)
+static inline int __test_and_change_bit(unsigned long nr, volatile void *addr)
 {
        int     mask, retval;
        unsigned int *a = (unsigned int *)addr;
@@ -148,7 +148,7 @@ static inline int __test_and_change_bit(int nr, volatile void *addr)
        return retval;
 }
 
-extern int test_and_change_bit(int nr, volatile void *addr);
+extern int test_and_change_bit(unsigned long nr, volatile void *addr);
 
 #include <asm-generic/bitops/lock.h>
 
index 781bf613366d747dae1a5cbb9924970f65bbe4c9..f29cde2cfc91af51b21b1644c92eaa974266eb65 100644 (file)
 
 /* instruction cache access registers */
 #define ICACHE_DATA(WAY, ENTRY, OFF) \
-       __SYSREG(0xc8000000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10 + (OFF) * 4, u32)
+       __SYSREG(0xc8000000 + (WAY) * L1_CACHE_WAYDISP + \
+               (ENTRY) * L1_CACHE_BYTES + (OFF) * 4, u32)
 #define ICACHE_TAG(WAY, ENTRY)  \
-       __SYSREG(0xc8100000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10, u32)
+       __SYSREG(0xc8100000 + (WAY) * L1_CACHE_WAYDISP + \
+               (ENTRY) * L1_CACHE_BYTES, u32)
 
-/* instruction cache access registers */
+/* data cache access registers */
 #define DCACHE_DATA(WAY, ENTRY, OFF) \
-       __SYSREG(0xc8200000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10 + (OFF) * 4, u32)
+       __SYSREG(0xc8200000 + (WAY) * L1_CACHE_WAYDISP + \
+               (ENTRY) * L1_CACHE_BYTES + (OFF) * 4, u32)
 #define DCACHE_TAG(WAY, ENTRY)  \
-       __SYSREG(0xc8300000 + (WAY) * L1_CACHE_WAYDISP + (ENTRY) * 0x10, u32)
+       __SYSREG(0xc8300000 + (WAY) * L1_CACHE_WAYDISP + \
+               (ENTRY) * L1_CACHE_BYTES, u32)
 
 #endif /* _ASM_CACHE_H */
index 29e692f7f030423076d2dd7d09ad765b8c350beb..faed90240dedd1f3f88ba34105a268efa1dd6fd2 100644 (file)
 #include <linux/mm.h>
 
 /*
- * virtually-indexed cache management (our cache is physically indexed)
+ * Primitive routines
  */
-#define flush_cache_all()                      do {} while (0)
-#define flush_cache_mm(mm)                     do {} while (0)
-#define flush_cache_dup_mm(mm)                 do {} while (0)
-#define flush_cache_range(mm, start, end)      do {} while (0)
-#define flush_cache_page(vma, vmaddr, pfn)     do {} while (0)
-#define flush_cache_vmap(start, end)           do {} while (0)
-#define flush_cache_vunmap(start, end)         do {} while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page)                        do {} while (0)
-#define flush_dcache_mmap_lock(mapping)                do {} while (0)
-#define flush_dcache_mmap_unlock(mapping)      do {} while (0)
-
-/*
- * physically-indexed cache management
- */
-#ifndef CONFIG_MN10300_CACHE_DISABLED
-
-extern void flush_icache_range(unsigned long start, unsigned long end);
-extern void flush_icache_page(struct vm_area_struct *vma, struct page *pg);
-
-#else
-
-#define flush_icache_range(start, end)         do {} while (0)
-#define flush_icache_page(vma, pg)             do {} while (0)
-
-#endif
-
-#define flush_icache_user_range(vma, pg, adr, len) \
-       flush_icache_range(adr, adr + len)
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-       do {                                    \
-               memcpy(dst, src, len);          \
-               flush_icache_page(vma, page);   \
-       } while (0)
-
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
-       memcpy(dst, src, len)
-
-/*
- * primitive routines
- */
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+extern void mn10300_local_icache_inv(void);
+extern void mn10300_local_icache_inv_page(unsigned long start);
+extern void mn10300_local_icache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_local_icache_inv_range2(unsigned long start, unsigned long size);
+extern void mn10300_local_dcache_inv(void);
+extern void mn10300_local_dcache_inv_page(unsigned long start);
+extern void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size);
 extern void mn10300_icache_inv(void);
+extern void mn10300_icache_inv_page(unsigned long start);
+extern void mn10300_icache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_icache_inv_range2(unsigned long start, unsigned long size);
 extern void mn10300_dcache_inv(void);
-extern void mn10300_dcache_inv_page(unsigned start);
-extern void mn10300_dcache_inv_range(unsigned start, unsigned end);
-extern void mn10300_dcache_inv_range2(unsigned start, unsigned size);
+extern void mn10300_dcache_inv_page(unsigned long start);
+extern void mn10300_dcache_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_dcache_inv_range2(unsigned long start, unsigned long size);
 #ifdef CONFIG_MN10300_CACHE_WBACK
+extern void mn10300_local_dcache_flush(void);
+extern void mn10300_local_dcache_flush_page(unsigned long start);
+extern void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end);
+extern void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size);
+extern void mn10300_local_dcache_flush_inv(void);
+extern void mn10300_local_dcache_flush_inv_page(unsigned long start);
+extern void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size);
 extern void mn10300_dcache_flush(void);
-extern void mn10300_dcache_flush_page(unsigned start);
-extern void mn10300_dcache_flush_range(unsigned start, unsigned end);
-extern void mn10300_dcache_flush_range2(unsigned start, unsigned size);
+extern void mn10300_dcache_flush_page(unsigned long start);
+extern void mn10300_dcache_flush_range(unsigned long start, unsigned long end);
+extern void mn10300_dcache_flush_range2(unsigned long start, unsigned long size);
 extern void mn10300_dcache_flush_inv(void);
-extern void mn10300_dcache_flush_inv_page(unsigned start);
-extern void mn10300_dcache_flush_inv_range(unsigned start, unsigned end);
-extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
+extern void mn10300_dcache_flush_inv_page(unsigned long start);
+extern void mn10300_dcache_flush_inv_range(unsigned long start, unsigned long end);
+extern void mn10300_dcache_flush_inv_range2(unsigned long start, unsigned long size);
 #else
+#define mn10300_local_dcache_flush()                   do {} while (0)
+#define mn10300_local_dcache_flush_page(start)         do {} while (0)
+#define mn10300_local_dcache_flush_range(start, end)   do {} while (0)
+#define mn10300_local_dcache_flush_range2(start, size) do {} while (0)
+#define mn10300_local_dcache_flush_inv() \
+               mn10300_local_dcache_inv()
+#define mn10300_local_dcache_flush_inv_page(start) \
+               mn10300_local_dcache_inv_page(start)
+#define mn10300_local_dcache_flush_inv_range(start, end) \
+               mn10300_local_dcache_inv_range(start, end)
+#define mn10300_local_dcache_flush_inv_range2(start, size) \
+               mn10300_local_dcache_inv_range2(start, size)
 #define mn10300_dcache_flush()                         do {} while (0)
 #define mn10300_dcache_flush_page(start)               do {} while (0)
 #define mn10300_dcache_flush_range(start, end)         do {} while (0)
@@ -90,7 +79,26 @@ extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
        mn10300_dcache_inv_range2((start), (size))
 #endif /* CONFIG_MN10300_CACHE_WBACK */
 #else
+#define mn10300_local_icache_inv()                     do {} while (0)
+#define mn10300_local_icache_inv_page(start)           do {} while (0)
+#define mn10300_local_icache_inv_range(start, end)     do {} while (0)
+#define mn10300_local_icache_inv_range2(start, size)   do {} while (0)
+#define mn10300_local_dcache_inv()                     do {} while (0)
+#define mn10300_local_dcache_inv_page(start)           do {} while (0)
+#define mn10300_local_dcache_inv_range(start, end)     do {} while (0)
+#define mn10300_local_dcache_inv_range2(start, size)   do {} while (0)
+#define mn10300_local_dcache_flush()                   do {} while (0)
+#define mn10300_local_dcache_flush_inv_page(start)     do {} while (0)
+#define mn10300_local_dcache_flush_inv()               do {} while (0)
+#define mn10300_local_dcache_flush_inv_range(start, end)do {} while (0)
+#define mn10300_local_dcache_flush_inv_range2(start, size) do {} while (0)
+#define mn10300_local_dcache_flush_page(start)         do {} while (0)
+#define mn10300_local_dcache_flush_range(start, end)   do {} while (0)
+#define mn10300_local_dcache_flush_range2(start, size) do {} while (0)
 #define mn10300_icache_inv()                           do {} while (0)
+#define mn10300_icache_inv_page(start)                 do {} while (0)
+#define mn10300_icache_inv_range(start, end)           do {} while (0)
+#define mn10300_icache_inv_range2(start, size)         do {} while (0)
 #define mn10300_dcache_inv()                           do {} while (0)
 #define mn10300_dcache_inv_page(start)                 do {} while (0)
 #define mn10300_dcache_inv_range(start, end)           do {} while (0)
@@ -103,10 +111,56 @@ extern void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size);
 #define mn10300_dcache_flush_page(start)               do {} while (0)
 #define mn10300_dcache_flush_range(start, end)         do {} while (0)
 #define mn10300_dcache_flush_range2(start, size)       do {} while (0)
-#endif /* CONFIG_MN10300_CACHE_DISABLED */
+#endif /* CONFIG_MN10300_CACHE_ENABLED */
+
+/*
+ * Virtually-indexed cache management (our cache is physically indexed)
+ */
+#define flush_cache_all()                      do {} while (0)
+#define flush_cache_mm(mm)                     do {} while (0)
+#define flush_cache_dup_mm(mm)                 do {} while (0)
+#define flush_cache_range(mm, start, end)      do {} while (0)
+#define flush_cache_page(vma, vmaddr, pfn)     do {} while (0)
+#define flush_cache_vmap(start, end)           do {} while (0)
+#define flush_cache_vunmap(start, end)         do {} while (0)
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
+#define flush_dcache_page(page)                        do {} while (0)
+#define flush_dcache_mmap_lock(mapping)                do {} while (0)
+#define flush_dcache_mmap_unlock(mapping)      do {} while (0)
+
+/*
+ * Physically-indexed cache management
+ */
+#if defined(CONFIG_MN10300_CACHE_FLUSH_ICACHE)
+extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
+extern void flush_icache_range(unsigned long start, unsigned long end);
+#elif defined(CONFIG_MN10300_CACHE_INV_ICACHE)
+static inline void flush_icache_page(struct vm_area_struct *vma,
+                                    struct page *page)
+{
+       mn10300_icache_inv_page(page_to_phys(page));
+}
+extern void flush_icache_range(unsigned long start, unsigned long end);
+#else
+#define flush_icache_range(start, end)         do {} while (0)
+#define flush_icache_page(vma, pg)             do {} while (0)
+#endif
+
+
+#define flush_icache_user_range(vma, pg, adr, len) \
+       flush_icache_range(adr, adr + len)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+       do {                                    \
+               memcpy(dst, src, len);          \
+               flush_icache_page(vma, page);   \
+       } while (0)
+
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+       memcpy(dst, src, len)
 
 /*
- * internal debugging function
+ * Internal debugging function
  */
 #ifdef CONFIG_DEBUG_PAGEALLOC
 extern void kernel_map_pages(struct page *page, int numpages, int enable);
index 757e9b5388ea5bec4230fad2d44f78e812e68ff6..90ed4a365c97cb57c0a2d071ecd1357ca453b48b 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/types.h>
 #endif
 
-#ifdef CONFIG_MN10300_CPU_AM33V2
 /* we tell the compiler to pretend to be AM33 so that it doesn't try and use
  * the FP regs, but tell the assembler that we're actually allowed AM33v2
  * instructions */
@@ -24,7 +23,6 @@ asm(" .am33_2\n");
 #else
 .am33_2
 #endif
-#endif
 
 #ifdef __KERNEL__
 
@@ -58,6 +56,9 @@ asm(" .am33_2\n");
 #define EPSW_nAR               0x00040000      /* register bank control */
 #define EPSW_ML                        0x00080000      /* monitor level */
 #define EPSW_FE                        0x00100000      /* FPU enable */
+#define EPSW_IM_SHIFT          8               /* EPSW_IM_SHIFT determines the interrupt mode */
+
+#define NUM2EPSW_IM(num)       ((num) << EPSW_IM_SHIFT)
 
 /* FPU registers */
 #define FPCR_EF_I              0x00000001      /* inexact result FPU exception flag */
@@ -99,9 +100,11 @@ asm(" .am33_2\n");
 #define CPUREV                 __SYSREGC(0xc0000050, u32)      /* CPU revision register */
 #define CPUREV_TYPE            0x0000000f      /* CPU type */
 #define CPUREV_TYPE_S          0
-#define CPUREV_TYPE_AM33V1     0x00000000      /* - AM33 V1 core, AM33/1.00 arch */
-#define CPUREV_TYPE_AM33V2     0x00000001      /* - AM33 V2 core, AM33/2.00 arch */
-#define CPUREV_TYPE_AM34V1     0x00000002      /* - AM34 V1 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM33_1     0x00000000      /* - AM33-1 core, AM33/1.00 arch */
+#define CPUREV_TYPE_AM33_2     0x00000001      /* - AM33-2 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM34_1     0x00000002      /* - AM34-1 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM33_3     0x00000003      /* - AM33-3 core, AM33/2.00 arch */
+#define CPUREV_TYPE_AM34_2     0x00000004      /* - AM34-2 core, AM33/3.00 arch */
 #define CPUREV_REVISION                0x000000f0      /* CPU revision */
 #define CPUREV_REVISION_S      4
 #define CPUREV_ICWAY           0x00000f00      /* number of instruction cache ways */
@@ -180,6 +183,21 @@ asm(" .am33_2\n");
 #define CHCTR_ICWMD            0x0f00          /* instruction cache way mode */
 #define CHCTR_DCWMD            0xf000          /* data cache way mode */
 
+#ifdef CONFIG_AM34_2
+#define ICIVCR                 __SYSREG(0xc0000c00, u32)       /* icache area invalidate control */
+#define ICIVCR_ICIVBSY         0x00000008                      /* icache area invalidate busy */
+#define ICIVCR_ICI             0x00000001                      /* icache area invalidate */
+
+#define ICIVMR                 __SYSREG(0xc0000c04, u32)       /* icache area invalidate mask */
+
+#define        DCPGCR                  __SYSREG(0xc0000c10, u32)       /* data cache area purge control */
+#define        DCPGCR_DCPGBSY          0x00000008                      /* data cache area purge busy */
+#define        DCPGCR_DCP              0x00000002                      /* data cache area purge */
+#define        DCPGCR_DCI              0x00000001                      /* data cache area invalidate */
+
+#define        DCPGMR                  __SYSREG(0xc0000c14, u32)       /* data cache area purge mask */
+#endif /* CONFIG_AM34_2 */
+
 /* MMU control registers */
 #define MMUCTR                 __SYSREG(0xc0000090, u32)       /* MMU control register */
 #define MMUCTR_IRP             0x0000003f      /* instruction TLB replace pointer */
@@ -203,6 +221,9 @@ asm(" .am33_2\n");
 #define MMUCTR_DTL_LOCK0_3     0x03000000      /* - entry 0-3 locked */
 #define MMUCTR_DTL_LOCK0_7     0x04000000      /* - entry 0-7 locked */
 #define MMUCTR_DTL_LOCK0_15    0x05000000      /* - entry 0-15 locked */
+#ifdef CONFIG_AM34_2
+#define MMUCTR_WTE             0x80000000      /* write-through cache TLB entry bit enable */
+#endif
 
 #define PIDR                   __SYSREG(0xc0000094, u16)       /* PID register */
 #define PIDR_PID               0x00ff          /* process identifier */
@@ -231,14 +252,6 @@ asm(" .am33_2\n");
 #define xPTEL_PS_4Mb           0x00000c00      /* - 4Mb page */
 #define xPTEL_PPN              0xfffff006      /* physical page number */
 
-#define xPTEL_V_BIT            0       /* bit numbers corresponding to above masks */
-#define xPTEL_UNUSED1_BIT      1
-#define xPTEL_UNUSED2_BIT      2
-#define xPTEL_C_BIT            3
-#define xPTEL_PV_BIT           4
-#define xPTEL_D_BIT            5
-#define xPTEL_G_BIT            9
-
 #define IPTEU                  __SYSREG(0xc00000a4, u32)       /* instruction TLB virtual addr */
 #define DPTEU                  __SYSREG(0xc00000b4, u32)       /* data TLB virtual addr */
 #define xPTEU_VPN              0xfffffc00      /* virtual page number */
@@ -262,7 +275,16 @@ asm(" .am33_2\n");
 #define xPTEL2_PS_128Kb                0x00000100      /* - 128Kb page */
 #define xPTEL2_PS_1Kb          0x00000200      /* - 1Kb page */
 #define xPTEL2_PS_4Mb          0x00000300      /* - 4Mb page */
-#define xPTEL2_PPN             0xfffffc00      /* physical page number */
+#define xPTEL2_CWT             0x00000400      /* cacheable write-through */
+#define xPTEL2_UNUSED1         0x00000800      /* unused bit (broadcast mask) */
+#define xPTEL2_PPN             0xfffff000      /* physical page number */
+
+#define xPTEL2_V_BIT           0       /* bit numbers corresponding to above masks */
+#define xPTEL2_C_BIT           1
+#define xPTEL2_PV_BIT          2
+#define xPTEL2_D_BIT           3
+#define xPTEL2_G_BIT           7
+#define xPTEL2_UNUSED1_BIT     11
 
 #define MMUFCR                 __SYSREGC(0xc000009c, u32)      /* MMU exception cause */
 #define MMUFCR_IFC             __SYSREGC(0xc000009c, u16)      /* MMU instruction excep cause */
@@ -285,6 +307,47 @@ asm(" .am33_2\n");
 #define MMUFCR_xFC_PR_RWK_RWU  0x01c0          /* - R/W kernel and R/W user */
 #define MMUFCR_xFC_ILLADDR     0x0200          /* illegal address excep flag */
 
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+/* atomic operation registers */
+#define AAR            __SYSREG(0xc0000a00, u32)       /* cacheable address */
+#define AAR2           __SYSREG(0xc0000a04, u32)       /* uncacheable address */
+#define ADR            __SYSREG(0xc0000a08, u32)       /* data */
+#define ASR            __SYSREG(0xc0000a0c, u32)       /* status */
+#define AARU           __SYSREG(0xd400aa00, u32)       /* user address */
+#define ADRU           __SYSREG(0xd400aa08, u32)       /* user data */
+#define ASRU           __SYSREG(0xd400aa0c, u32)       /* user status */
+
+#define ASR_RW         0x00000008      /* read */
+#define ASR_BW         0x00000004      /* bus error */
+#define ASR_IW         0x00000002      /* interrupt */
+#define ASR_LW         0x00000001      /* bus lock */
+
+#define ASRU_RW                ASR_RW          /* read */
+#define ASRU_BW                ASR_BW          /* bus error */
+#define ASRU_IW                ASR_IW          /* interrupt */
+#define ASRU_LW                ASR_LW          /* bus lock */
+
+/* in inline ASM, we stick the base pointer in to a reg and use offsets from
+ * it */
+#define ATOMIC_OPS_BASE_ADDR 0xc0000a00
+#ifndef __ASSEMBLY__
+asm(
+       "_AAR   = 0\n"
+       "_AAR2  = 4\n"
+       "_ADR   = 8\n"
+       "_ASR   = 12\n");
+#else
+#define _AAR           0
+#define _AAR2          4
+#define _ADR           8
+#define _ASR           12
+#endif
+
+/* physical page address for userspace atomic operations registers */
+#define USER_ATOMIC_OPS_PAGE_ADDR  0xd400a000
+
+#endif /* CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT */
+
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_CPU_REGS_H */
index 58a199da0f4a801f8f4418a01e964a647fa153f2..80337b339c903fecf8544498b154157babeae2b8 100644 (file)
 #ifndef _ASM_DMACTL_REGS_H
 #define _ASM_DMACTL_REGS_H
 
-#include <asm/cpu-regs.h>
-
-#ifdef __KERNEL__
-
-/* DMA registers */
-#define        DMxCTR(N)               __SYSREG(0xd2000000 + ((N) * 0x100), u32)       /* control reg */
-#define        DMxCTR_BG               0x0000001f      /* transfer request source */
-#define        DMxCTR_BG_SOFT          0x00000000      /* - software source */
-#define        DMxCTR_BG_SC0TX         0x00000002      /* - serial port 0 transmission */
-#define        DMxCTR_BG_SC0RX         0x00000003      /* - serial port 0 reception */
-#define        DMxCTR_BG_SC1TX         0x00000004      /* - serial port 1 transmission */
-#define        DMxCTR_BG_SC1RX         0x00000005      /* - serial port 1 reception */
-#define        DMxCTR_BG_SC2TX         0x00000006      /* - serial port 2 transmission */
-#define        DMxCTR_BG_SC2RX         0x00000007      /* - serial port 2 reception */
-#define        DMxCTR_BG_TM0UFLOW      0x00000008      /* - timer 0 underflow */
-#define        DMxCTR_BG_TM1UFLOW      0x00000009      /* - timer 1 underflow */
-#define        DMxCTR_BG_TM2UFLOW      0x0000000a      /* - timer 2 underflow */
-#define        DMxCTR_BG_TM3UFLOW      0x0000000b      /* - timer 3 underflow */
-#define        DMxCTR_BG_TM6ACMPCAP    0x0000000c      /* - timer 6A compare/capture */
-#define        DMxCTR_BG_AFE           0x0000000d      /* - analogue front-end interrupt source */
-#define        DMxCTR_BG_ADC           0x0000000e      /* - A/D conversion end interrupt source */
-#define        DMxCTR_BG_IRDA          0x0000000f      /* - IrDA interrupt source */
-#define        DMxCTR_BG_RTC           0x00000010      /* - RTC interrupt source */
-#define        DMxCTR_BG_XIRQ0         0x00000011      /* - XIRQ0 pin interrupt source */
-#define        DMxCTR_BG_XIRQ1         0x00000012      /* - XIRQ1 pin interrupt source */
-#define        DMxCTR_BG_XDMR0         0x00000013      /* - external request 0 source (XDMR0 pin) */
-#define        DMxCTR_BG_XDMR1         0x00000014      /* - external request 1 source (XDMR1 pin) */
-#define        DMxCTR_SAM              0x000000e0      /* DMA transfer src addr mode */
-#define        DMxCTR_SAM_INCR         0x00000000      /* - increment */
-#define        DMxCTR_SAM_DECR         0x00000020      /* - decrement */
-#define        DMxCTR_SAM_FIXED        0x00000040      /* - fixed */
-#define        DMxCTR_DAM              0x00000000      /* DMA transfer dest addr mode */
-#define        DMxCTR_DAM_INCR         0x00000000      /* - increment */
-#define        DMxCTR_DAM_DECR         0x00000100      /* - decrement */
-#define        DMxCTR_DAM_FIXED        0x00000200      /* - fixed */
-#define        DMxCTR_TM               0x00001800      /* DMA transfer mode */
-#define        DMxCTR_TM_BATCH         0x00000000      /* - batch transfer */
-#define        DMxCTR_TM_INTERM        0x00001000      /* - intermittent transfer */
-#define        DMxCTR_UT               0x00006000      /* DMA transfer unit */
-#define        DMxCTR_UT_1             0x00000000      /* - 1 byte */
-#define        DMxCTR_UT_2             0x00002000      /* - 2 byte */
-#define        DMxCTR_UT_4             0x00004000      /* - 4 byte */
-#define        DMxCTR_UT_16            0x00006000      /* - 16 byte */
-#define        DMxCTR_TEN              0x00010000      /* DMA channel transfer enable */
-#define        DMxCTR_RQM              0x00060000      /* external request input source mode */
-#define        DMxCTR_RQM_FALLEDGE     0x00000000      /* - falling edge */
-#define        DMxCTR_RQM_RISEEDGE     0x00020000      /* - rising edge */
-#define        DMxCTR_RQM_LOLEVEL      0x00040000      /* - low level */
-#define        DMxCTR_RQM_HILEVEL      0x00060000      /* - high level */
-#define        DMxCTR_RQF              0x01000000      /* DMA transfer request flag */
-#define        DMxCTR_XEND             0x80000000      /* DMA transfer end flag */
-
-#define        DMxSRC(N)               __SYSREG(0xd2000004 + ((N) * 0x100), u32)       /* control reg */
-
-#define        DMxDST(N)               __SYSREG(0xd2000008 + ((N) * 0x100), u32)       /* src addr reg */
-
-#define        DMxSIZ(N)               __SYSREG(0xd200000c + ((N) * 0x100), u32)       /* dest addr reg */
-#define DMxSIZ_CT              0x000fffff      /* number of bytes to transfer */
-
-#define        DMxCYC(N)               __SYSREG(0xd2000010 + ((N) * 0x100), u32)       /* intermittent
-                                                                                * size reg */
-#define DMxCYC_CYC             0x000000ff      /* number of interrmittent transfers -1 */
-
-#define DM0IRQ                 16              /* DMA channel 0 complete IRQ */
-#define DM1IRQ                 17              /* DMA channel 1 complete IRQ */
-#define DM2IRQ                 18              /* DMA channel 2 complete IRQ */
-#define DM3IRQ                 19              /* DMA channel 3 complete IRQ */
-
-#define        DM0ICR                  GxICR(DM0IRQ)   /* DMA channel 0 complete intr ctrl reg */
-#define        DM1ICR                  GxICR(DM0IR1)   /* DMA channel 1 complete intr ctrl reg */
-#define        DM2ICR                  GxICR(DM0IR2)   /* DMA channel 2 complete intr ctrl reg */
-#define        DM3ICR                  GxICR(DM0IR3)   /* DMA channel 3 complete intr ctrl reg */
-
-#ifndef __ASSEMBLY__
-
-struct mn10300_dmactl_regs {
-       u32             ctr;
-       const void      *src;
-       void            *dst;
-       u32             siz;
-       u32             cyc;
-} __attribute__((aligned(0x100)));
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* __KERNEL__ */
+#include <proc/dmactl-regs.h>
 
 #endif /* _ASM_DMACTL_REGS_H */
index e5fa97cd9a147222c567b39f1e8bd01cec18ff4a..8157c9267f426ac7dabdd6f0dcef9a67bd2ff5a3 100644 (file)
 #define R_MN10300_SYM_DIFF     33      /* Adjustment when relaxing. */
 #define R_MN10300_ALIGN        34      /* Alignment requirement. */
 
+/*
+ * AM33/AM34 HW Capabilities
+ */
+#define HWCAP_MN10300_ATOMIC_OP_UNIT   1       /* Has AM34 Atomic Operations */
+
+
 /*
  * ELF register definitions..
  */
@@ -47,8 +53,6 @@ typedef struct {
        u_int32_t       fpcr;
 } elf_fpregset_t;
 
-extern int dump_fpu(struct pt_regs *, elf_fpregset_t *);
-
 /*
  * This is used to ensure we don't load something for the wrong architecture
  */
@@ -130,7 +134,11 @@ do {                                               \
  * instruction set this CPU supports.  This could be done in user space,
  * but it's not easy, and we've already done it here.
  */
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+#define ELF_HWCAP      (HWCAP_MN10300_ATOMIC_OP_UNIT)
+#else
 #define ELF_HWCAP      (0)
+#endif
 
 /*
  * This yields a string that ld.so will use to load implementation
index fa16466ef3f93cb38a4b325e08c46a2d4d7f1146..ca3e20508c77556a77492b85da8697d8eba4ced6 100644 (file)
@@ -15,8 +15,8 @@
 
 /*
  * define the breakpoint instruction opcode to use
- * - note that the JTAG unit steals 0xFF, so we want to avoid that if we can
- *   (can use 0xF7)
+ * - note that the JTAG unit steals 0xFF, so you can't use JTAG and GDBSTUB at
+ *   the same time.
  */
 #define GDBSTUB_BKPT           0xFF
 
@@ -90,7 +90,6 @@ enum exception_code {
 
 extern void __set_intr_stub(enum exception_code code, void *handler);
 extern void set_intr_stub(enum exception_code code, void *handler);
-extern void set_jtag_stub(enum exception_code code, void *handler);
 
 struct pt_regs;
 
@@ -102,7 +101,6 @@ extern asmlinkage void dtlb_aerror(void);
 extern asmlinkage void raw_bus_error(void);
 extern asmlinkage void double_fault(void);
 extern asmlinkage int  system_call(struct pt_regs *);
-extern asmlinkage void fpu_exception(struct pt_regs *, enum exception_code);
 extern asmlinkage void nmi(struct pt_regs *, enum exception_code);
 extern asmlinkage void uninitialised_exception(struct pt_regs *,
                                               enum exception_code);
@@ -116,6 +114,8 @@ extern void die(const char *, struct pt_regs *, enum exception_code)
 
 extern int die_if_no_fixup(const char *, struct pt_regs *, enum exception_code);
 
+#define NUM2EXCEP_IRQ_LEVEL(num)       (EXCEP_IRQ_LEVEL0 + (num) * 8)
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* _ASM_EXCEPTIONS_H */
index 64a2b83a7a6aefb09490f4e68a8a3d8f67fa7686..b7625de8eade6754edf14f5de0340d9a0aa76e57 100644 (file)
 #ifndef _ASM_FPU_H
 #define _ASM_FPU_H
 
-#include <asm/processor.h>
+#ifndef __ASSEMBLY__
+
+#include <linux/sched.h>
+#include <asm/exceptions.h>
 #include <asm/sigcontext.h>
-#include <asm/user.h>
 
 #ifdef __KERNEL__
 
-/* the task that owns the FPU state */
+extern asmlinkage void fpu_disabled(void);
+
+#ifdef CONFIG_FPU
+
+#ifdef CONFIG_LAZY_SAVE_FPU
+/* the task that currently owns the FPU state */
 extern struct task_struct *fpu_state_owner;
+#endif
 
-#define set_using_fpu(tsk)                             \
-do {                                                   \
-       (tsk)->thread.fpu_flags |= THREAD_USING_FPU;    \
-} while (0)
+#if (THREAD_USING_FPU & ~0xff)
+#error THREAD_USING_FPU must be smaller than 0x100.
+#endif
 
-#define clear_using_fpu(tsk)                           \
-do {                                                   \
-       (tsk)->thread.fpu_flags &= ~THREAD_USING_FPU;   \
-} while (0)
+static inline void set_using_fpu(struct task_struct *tsk)
+{
+       asm volatile(
+               "bset %0,(0,%1)"
+               :
+               : "i"(THREAD_USING_FPU), "a"(&tsk->thread.fpu_flags)
+               : "memory", "cc");
+}
 
-#define is_using_fpu(tsk) ((tsk)->thread.fpu_flags & THREAD_USING_FPU)
+static inline void clear_using_fpu(struct task_struct *tsk)
+{
+       asm volatile(
+               "bclr %0,(0,%1)"
+               :
+               : "i"(THREAD_USING_FPU), "a"(&tsk->thread.fpu_flags)
+               : "memory", "cc");
+}
 
-#define unlazy_fpu(tsk)                                        \
-do {                                                   \
-       preempt_disable();                              \
-       if (fpu_state_owner == (tsk))                   \
-               fpu_save(&tsk->thread.fpu_state);       \
-       preempt_enable();                               \
-} while (0)
-
-#define exit_fpu()                             \
-do {                                           \
-       struct task_struct *__tsk = current;    \
-       preempt_disable();                      \
-       if (fpu_state_owner == __tsk)           \
-               fpu_state_owner = NULL;         \
-       preempt_enable();                       \
-} while (0)
-
-#define flush_fpu()                                    \
-do {                                                   \
-       struct task_struct *__tsk = current;            \
-       preempt_disable();                              \
-       if (fpu_state_owner == __tsk) {                 \
-               fpu_state_owner = NULL;                 \
-               __tsk->thread.uregs->epsw &= ~EPSW_FE;  \
-       }                                               \
-       preempt_enable();                               \
-       clear_using_fpu(__tsk);                         \
-} while (0)
+#define is_using_fpu(tsk) ((tsk)->thread.fpu_flags & THREAD_USING_FPU)
 
-extern asmlinkage void fpu_init_state(void);
 extern asmlinkage void fpu_kill_state(struct task_struct *);
-extern asmlinkage void fpu_disabled(struct pt_regs *, enum exception_code);
 extern asmlinkage void fpu_exception(struct pt_regs *, enum exception_code);
-
-#ifdef CONFIG_FPU
+extern asmlinkage void fpu_invalid_op(struct pt_regs *, enum exception_code);
+extern asmlinkage void fpu_init_state(void);
 extern asmlinkage void fpu_save(struct fpu_state_struct *);
-extern asmlinkage void fpu_restore(struct fpu_state_struct *);
-#else
-#define fpu_save(a)
-#define fpu_restore(a)
-#endif /* CONFIG_FPU  */
-
-/*
- * signal frame handlers
- */
 extern int fpu_setup_sigcontext(struct fpucontext *buf);
 extern int fpu_restore_sigcontext(struct fpucontext *buf);
 
+static inline void unlazy_fpu(struct task_struct *tsk)
+{
+       preempt_disable();
+#ifndef CONFIG_LAZY_SAVE_FPU
+       if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+               fpu_save(&tsk->thread.fpu_state);
+               tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+               tsk->thread.uregs->epsw &= ~EPSW_FE;
+       }
+#else
+       if (fpu_state_owner == tsk)
+               fpu_save(&tsk->thread.fpu_state);
+#endif
+       preempt_enable();
+}
+
+static inline void exit_fpu(void)
+{
+#ifdef CONFIG_LAZY_SAVE_FPU
+       struct task_struct *tsk = current;
+
+       preempt_disable();
+       if (fpu_state_owner == tsk)
+               fpu_state_owner = NULL;
+       preempt_enable();
+#endif
+}
+
+static inline void flush_fpu(void)
+{
+       struct task_struct *tsk = current;
+
+       preempt_disable();
+#ifndef CONFIG_LAZY_SAVE_FPU
+       if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+               tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+               tsk->thread.uregs->epsw &= ~EPSW_FE;
+       }
+#else
+       if (fpu_state_owner == tsk) {
+               fpu_state_owner = NULL;
+               tsk->thread.uregs->epsw &= ~EPSW_FE;
+       }
+#endif
+       preempt_enable();
+       clear_using_fpu(tsk);
+}
+
+#else /* CONFIG_FPU */
+
+extern asmlinkage
+void unexpected_fpu_exception(struct pt_regs *, enum exception_code);
+#define fpu_invalid_op unexpected_fpu_exception
+#define fpu_exception unexpected_fpu_exception
+
+struct task_struct;
+struct fpu_state_struct;
+static inline bool is_using_fpu(struct task_struct *tsk) { return false; }
+static inline void set_using_fpu(struct task_struct *tsk) {}
+static inline void clear_using_fpu(struct task_struct *tsk) {}
+static inline void fpu_init_state(void) {}
+static inline void fpu_save(struct fpu_state_struct *s) {}
+static inline void fpu_kill_state(struct task_struct *tsk) {}
+static inline void unlazy_fpu(struct task_struct *tsk) {}
+static inline void exit_fpu(void) {}
+static inline void flush_fpu(void) {}
+static inline int fpu_setup_sigcontext(struct fpucontext *buf) { return 0; }
+static inline int fpu_restore_sigcontext(struct fpucontext *buf) { return 0; }
+#endif /* CONFIG_FPU  */
+
 #endif /* __KERNEL__ */
+#endif /* !__ASSEMBLY__ */
 #endif /* _ASM_FPU_H */
index 5b1949bdf039a85854594e728dfe100e009b7ee3..2ee58e3eb6b355b75166bf57334c01498a66b8fd 100644 (file)
@@ -18,6 +18,7 @@
 #ifndef __ASM_OFFSETS_H__
 #include <asm/asm-offsets.h>
 #endif
+#include <asm/thread_info.h>
 
 #define pi break
 
        movm    [d2,d3,a2,a3,exreg0,exreg1,exother],(sp)
        mov     sp,fp                           # FRAME pointer in A3
        add     -12,sp                          # allow for calls to be made
-       mov     (__frame),a1
-       mov     a1,(REG_NEXT,fp)
-       mov     fp,(__frame)
 
-       and     ~EPSW_FE,epsw                   # disable the FPU inside the kernel
+       # push the exception frame onto the front of the list
+       GET_THREAD_INFO a1
+       mov     (TI_frame,a1),a0
+       mov     a0,(REG_NEXT,fp)
+       mov     fp,(TI_frame,a1)
+
+       # disable the FPU inside the kernel
+       and     ~EPSW_FE,epsw
 
        # we may be holding current in E2
 #ifdef CONFIG_MN10300_CURRENT_IN_E2
 .macro RESTORE_ALL
        # peel back the stack to the calling frame
        # - this permits execve() to discard extra frames due to kernel syscalls
-       mov     (__frame),fp
+       GET_THREAD_INFO a0
+       mov     (TI_frame,a0),fp
        mov     fp,sp
-       mov     (REG_NEXT,fp),d0                # userspace has regs->next == 0
-       mov     d0,(__frame)
+       mov     (REG_NEXT,fp),d0
+       mov     d0,(TI_frame,a0)                # userspace has regs->next == 0
 
 #ifndef CONFIG_MN10300_USING_JTAG
        mov     (REG_EPSW,fp),d0
index 41ed267639649b8e91fae62b31a80f3ae9d55763..f5495ad82b77bc753b218b3a3498ea089e511134 100644 (file)
@@ -110,7 +110,7 @@ extern asmlinkage void gdbstub_exception(struct pt_regs *, enum exception_code);
 extern asmlinkage void __gdbstub_bug_trap(void);
 extern asmlinkage void __gdbstub_pause(void);
 
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
 extern asmlinkage void gdbstub_purge_cache(void);
 #else
 #define gdbstub_purge_cache()  do {} while (0)
index 54d950117674f60a9d7b920fba81c9c6fd8d65ee..0000d650b55f130892d59face4f3ff4fd1365d47 100644 (file)
 /* assembly code in softirq.h is sensitive to the offsets of these fields */
 typedef struct {
        unsigned int    __softirq_pending;
-       unsigned long   idle_timestamp;
+#ifdef CONFIG_MN10300_WD_TIMER
        unsigned int    __nmi_count;    /* arch dependent */
        unsigned int    __irq_count;    /* arch dependent */
+#endif
 } ____cacheline_aligned irq_cpustat_t;
 
 #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
index e2155e686451f51eba912857995fc9ad79461fbc..bfe2d88604d9041821f33c11019477ff47bda07d 100644 (file)
@@ -87,7 +87,7 @@ static inline unsigned long __kmap_atomic(struct page *page)
                BUG();
 #endif
        set_pte(kmap_pte - idx, mk_pte(page, kmap_prot));
-       __flush_tlb_one(vaddr);
+       local_flush_tlb_one(vaddr);
 
        return vaddr;
 }
@@ -116,7 +116,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
                 * this pte without first remap it
                 */
                pte_clear(kmap_pte - idx);
-               __flush_tlb_one(vaddr);
+               local_flush_tlb_one(vaddr);
        }
 #endif
 
index ba544c796c5a4786c5dce521b1201ec79f5d62ff..585b708c2bc0620fb37b56df22a33df91e8ad5a1 100644 (file)
 
 #ifdef __KERNEL__
 
-/* interrupt controller registers */
-#define GxICR(X)               __SYSREG(0xd4000000 + (X) * 4, u16)     /* group irq ctrl regs */
-
-#define IAGR                   __SYSREG(0xd4000100, u16)       /* intr acceptance group reg */
-#define IAGR_GN                        0x00fc          /* group number register
-                                                * (documentation _has_ to be wrong)
-                                                */
+/*
+ * Interrupt controller registers
+ * - Registers 64-191 are at addresses offset from the main array
+ */
+#define GxICR(X)                                               \
+       __SYSREG(0xd4000000 + (X) * 4 +                         \
+                (((X) >= 64) && ((X) < 192)) * 0xf00, u16)
 
-#define EXTMD                  __SYSREG(0xd4000200, u16)       /* external pin intr spec reg */
-#define GET_XIRQ_TRIGGER(X) ((EXTMD >> ((X) * 2)) & 3)
+#define GxICR_u8(X)                                                    \
+       __SYSREG(0xd4000000 + (X) * 4 +                                 \
+                (((X) >= 64) && ((X) < 192)) * 0xf00, u8)
 
-#define SET_XIRQ_TRIGGER(X,Y)                  \
-do {                                           \
-       u16 x = EXTMD;                          \
-       x &= ~(3 << ((X) * 2));                 \
-       x |= ((Y) & 3) << ((X) * 2);            \
-       EXTMD = x;                              \
-} while (0)
+#include <proc/intctl-regs.h>
 
 #define XIRQ_TRIGGER_LOWLEVEL  0
 #define XIRQ_TRIGGER_HILEVEL   1
@@ -59,10 +54,18 @@ do {                                                \
 #define GxICR_LEVEL_5          0x5000          /* - level 5 */
 #define GxICR_LEVEL_6          0x6000          /* - level 6 */
 #define GxICR_LEVEL_SHIFT      12
+#define GxICR_NMI              0x8000          /* nmi request flag */
+
+#define NUM2GxICR_LEVEL(num)   ((num) << GxICR_LEVEL_SHIFT)
 
 #ifndef __ASSEMBLY__
 extern void set_intr_level(int irq, u16 level);
-extern void set_intr_postackable(int irq);
+extern void mn10300_intc_set_level(unsigned int irq, unsigned int level);
+extern void mn10300_intc_clear(unsigned int irq);
+extern void mn10300_intc_set(unsigned int irq);
+extern void mn10300_intc_enable(unsigned int irq);
+extern void mn10300_intc_disable(unsigned int irq);
+extern void mn10300_set_lateack_irq_type(int irq);
 #endif
 
 /* external interrupts */
index c1a4119e6497ee2b23a4460d3c2ef05294cec128..787255da744e2647a592b1aea12e20f3e76f83f9 100644 (file)
@@ -206,6 +206,19 @@ static inline void outsl(unsigned long addr, const void *buffer, int count)
 #define iowrite32_rep(p, src, count) \
        outsl((unsigned long) (p), (src), (count))
 
+#define readsb(p, dst, count) \
+       insb((unsigned long) (p), (dst), (count))
+#define readsw(p, dst, count) \
+       insw((unsigned long) (p), (dst), (count))
+#define readsl(p, dst, count) \
+       insl((unsigned long) (p), (dst), (count))
+
+#define writesb(p, src, count) \
+       outsb((unsigned long) (p), (src), (count))
+#define writesw(p, src, count) \
+       outsw((unsigned long) (p), (src), (count))
+#define writesl(p, src, count) \
+       outsl((unsigned long) (p), (src), (count))
 
 #define IO_SPACE_LIMIT 0xffffffff
 
index 25c045d16d1c49fd2d85be264429c3f2549ccfd9..1a73fb3f60c6607734a57470b391c9f5b4085ebc 100644 (file)
 /* this number is used when no interrupt has been assigned */
 #define NO_IRQ         INT_MAX
 
-/* hardware irq numbers */
-#define NR_IRQS                GxICR_NUM_IRQS
+/*
+ * hardware irq numbers
+ * - the ASB2364 has an FPGA with an IRQ multiplexer on it
+ */
+#ifdef CONFIG_MN10300_UNIT_ASB2364
+#include <unit/irq.h>
+#else
+#define NR_CPU_IRQS    GxICR_NUM_IRQS
+#define NR_IRQS                NR_CPU_IRQS
+#endif
 
 /* external hardware irq numbers */
 #define NR_XIRQS       GxICR_NUM_XIRQS
index a848cd232eb4a1c14b30d1bec0c5c87998bfd450..97d0cb5af80784c632ebc8442ecb1343931ff865 100644 (file)
 #define ARCH_HAS_OWN_IRQ_REGS
 
 #ifndef __ASSEMBLY__
-#define get_irq_regs() (__frame)
+static inline __attribute__((const))
+struct pt_regs *get_irq_regs(void)
+{
+       return current_frame();
+}
 #endif
 
 #endif /* _ASM_IRQ_REGS_H */
index 5e529a117cb29915276dd30a17e41a02d406bfd0..7a7ae12c7119e42f9c83943dec0ad14ab9dfa7a7 100644 (file)
@@ -13,6 +13,9 @@
 #define _ASM_IRQFLAGS_H
 
 #include <asm/cpu-regs.h>
+#ifndef __ASSEMBLY__
+#include <linux/smp.h>
+#endif
 
 /*
  * interrupt control
  *   - level 6 - timer interrupt
  * - "enabled":  run in IM7
  */
-#ifdef CONFIG_MN10300_TTYSM
-#define MN10300_CLI_LEVEL      EPSW_IM_2
-#else
-#define MN10300_CLI_LEVEL      EPSW_IM_1
-#endif
+#define MN10300_CLI_LEVEL      (CONFIG_LINUX_CLI_LEVEL << EPSW_IM_SHIFT)
 
 #ifndef __ASSEMBLY__
 
@@ -64,11 +63,12 @@ static inline unsigned long arch_local_irq_save(void)
 /*
  * we make sure arch_irq_enable() doesn't cause priority inversion
  */
-extern unsigned long __mn10300_irq_enabled_epsw;
+extern unsigned long __mn10300_irq_enabled_epsw[];
 
 static inline void arch_local_irq_enable(void)
 {
        unsigned long tmp;
+       int cpu = raw_smp_processor_id();
 
        asm volatile(
                "       mov     epsw,%0         \n"
@@ -76,8 +76,8 @@ static inline void arch_local_irq_enable(void)
                "       or      %2,%0           \n"
                "       mov     %0,epsw         \n"
                : "=&d"(tmp)
-               : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw)
-               : "memory");
+               : "i"(~EPSW_IM), "r"(__mn10300_irq_enabled_epsw[cpu])
+               : "memory", "cc");
 }
 
 static inline void arch_local_irq_restore(unsigned long flags)
@@ -94,7 +94,7 @@ static inline void arch_local_irq_restore(unsigned long flags)
 
 static inline bool arch_irqs_disabled_flags(unsigned long flags)
 {
-       return (flags & EPSW_IM) <= MN10300_CLI_LEVEL;
+       return (flags & (EPSW_IE | EPSW_IM)) != (EPSW_IE | EPSW_IM_7);
 }
 
 static inline bool arch_irqs_disabled(void)
@@ -109,6 +109,9 @@ static inline bool arch_irqs_disabled(void)
  */
 static inline void arch_safe_halt(void)
 {
+#ifdef CONFIG_SMP
+       arch_local_irq_enable();
+#else
        asm volatile(
                "       or      %0,epsw \n"
                "       nop             \n"
@@ -117,7 +120,97 @@ static inline void arch_safe_halt(void)
                :
                : "i"(EPSW_IE|EPSW_IM), "n"(&CPUM), "i"(CPUM_SLEEP)
                : "cc");
+#endif
 }
 
+#define __sleep_cpu()                          \
+do {                                           \
+       asm volatile(                           \
+               "       bset    %1,(%0)\n"      \
+               "1:     btst    %1,(%0)\n"      \
+               "       bne     1b\n"           \
+               :                               \
+               : "i"(&CPUM), "i"(CPUM_SLEEP)   \
+               : "cc"                          \
+               );                              \
+} while (0)
+
+static inline void arch_local_cli(void)
+{
+       asm volatile(
+               "       and     %0,epsw         \n"
+               "       nop                     \n"
+               "       nop                     \n"
+               "       nop                     \n"
+               :
+               : "i"(~EPSW_IE)
+               : "memory"
+               );
+}
+
+static inline unsigned long arch_local_cli_save(void)
+{
+       unsigned long flags = arch_local_save_flags();
+       arch_local_cli();
+       return flags;
+}
+
+static inline void arch_local_sti(void)
+{
+       asm volatile(
+               "       or      %0,epsw         \n"
+               :
+               : "i"(EPSW_IE)
+               : "memory");
+}
+
+static inline void arch_local_change_intr_mask_level(unsigned long level)
+{
+       asm volatile(
+               "       and     %0,epsw         \n"
+               "       or      %1,epsw         \n"
+               :
+               : "i"(~EPSW_IM), "i"(EPSW_IE | level)
+               : "cc", "memory");
+}
+
+#else /* !__ASSEMBLY__ */
+
+#define LOCAL_SAVE_FLAGS(reg)                  \
+       mov     epsw,reg
+
+#define LOCAL_IRQ_DISABLE                              \
+       and     ~EPSW_IM,epsw;                          \
+       or      EPSW_IE|MN10300_CLI_LEVEL,epsw;         \
+       nop;                                            \
+       nop;                                            \
+       nop
+
+#define LOCAL_IRQ_ENABLE               \
+       or      EPSW_IE|EPSW_IM_7,epsw
+
+#define LOCAL_IRQ_RESTORE(reg) \
+       mov     reg,epsw
+
+#define LOCAL_CLI_SAVE(reg)    \
+       mov     epsw,reg;       \
+       and     ~EPSW_IE,epsw;  \
+       nop;                    \
+       nop;                    \
+       nop
+
+#define LOCAL_CLI              \
+       and     ~EPSW_IE,epsw;  \
+       nop;                    \
+       nop;                    \
+       nop
+
+#define LOCAL_STI              \
+       or      EPSW_IE,epsw
+
+#define LOCAL_CHANGE_INTR_MASK_LEVEL(level)    \
+       and     ~EPSW_IM,epsw;                  \
+       or      EPSW_IE|(level),epsw
+
 #endif /* __ASSEMBLY__ */
 #endif /* _ASM_IRQFLAGS_H */
index cb294c244de3395e94792912a186e0e36063635d..c8f6c82672adb46ceefb31f719c72d80bb53d3f3 100644 (file)
 #include <asm/tlbflush.h>
 #include <asm-generic/mm_hooks.h>
 
+#define MMU_CONTEXT_TLBPID_NR          256
 #define MMU_CONTEXT_TLBPID_MASK                0x000000ffUL
 #define MMU_CONTEXT_VERSION_MASK       0xffffff00UL
 #define MMU_CONTEXT_FIRST_VERSION      0x00000100UL
 #define MMU_NO_CONTEXT                 0x00000000UL
-
-extern unsigned long mmu_context_cache[NR_CPUS];
-#define mm_context(mm) (mm->context.tlbpid[smp_processor_id()])
+#define MMU_CONTEXT_TLBPID_LOCK_NR     0
 
 #define enter_lazy_tlb(mm, tsk)        do {} while (0)
 
+static inline void cpu_ran_vm(int cpu, struct mm_struct *mm)
+{
 #ifdef CONFIG_SMP
-#define cpu_ran_vm(cpu, mm) \
-       cpumask_set_cpu((cpu), mm_cpumask(mm))
-#define cpu_maybe_ran_vm(cpu, mm) \
-       cpumask_test_and_set_cpu((cpu), mm_cpumask(mm))
+       cpumask_set_cpu(cpu, mm_cpumask(mm));
+#endif
+}
+
+static inline bool cpu_maybe_ran_vm(int cpu, struct mm_struct *mm)
+{
+#ifdef CONFIG_SMP
+       return cpumask_test_and_set_cpu(cpu, mm_cpumask(mm));
 #else
-#define cpu_ran_vm(cpu, mm)            do {} while (0)
-#define cpu_maybe_ran_vm(cpu, mm)      true
-#endif /* CONFIG_SMP */
+       return true;
+#endif
+}
 
-/*
- * allocate an MMU context
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+extern unsigned long mmu_context_cache[NR_CPUS];
+#define mm_context(mm) (mm->context.tlbpid[smp_processor_id()])
+
+/**
+ * allocate_mmu_context - Allocate storage for the arch-specific MMU data
+ * @mm: The userspace VM context being set up
  */
 static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
 {
@@ -58,7 +68,7 @@ static inline unsigned long allocate_mmu_context(struct mm_struct *mm)
        if (!(mc & MMU_CONTEXT_TLBPID_MASK)) {
                /* we exhausted the TLB PIDs of this version on this CPU, so we
                 * flush this CPU's TLB in its entirety and start new cycle */
-               flush_tlb_all();
+               local_flush_tlb_all();
 
                /* fix the TLB version if needed (we avoid version #0 so as to
                 * distingush MMU_NO_CONTEXT) */
@@ -100,23 +110,35 @@ static inline int init_new_context(struct task_struct *tsk,
        return 0;
 }
 
-/*
- * destroy context related info for an mm_struct that is about to be put to
- * rest
- */
-#define destroy_context(mm)    do { } while (0)
-
 /*
  * after we have set current->mm to a new value, this activates the context for
  * the new mm so we see the new mappings.
  */
-static inline void activate_context(struct mm_struct *mm, int cpu)
+static inline void activate_context(struct mm_struct *mm)
 {
        PIDR = get_mmu_context(mm) & MMU_CONTEXT_TLBPID_MASK;
 }
+#else  /* CONFIG_MN10300_TLB_USE_PIDR */
 
-/*
- * change between virtual memory sets
+#define init_new_context(tsk, mm)      (0)
+#define activate_context(mm)           local_flush_tlb()
+
+#endif /* CONFIG_MN10300_TLB_USE_PIDR */
+
+/**
+ * destroy_context - Destroy mm context information
+ * @mm: The MM being destroyed.
+ *
+ * Destroy context related info for an mm_struct that is about to be put to
+ * rest
+ */
+#define destroy_context(mm)    do {} while (0)
+
+/**
+ * switch_mm - Change between userspace virtual memory contexts
+ * @prev: The outgoing MM context.
+ * @next: The incoming MM context.
+ * @tsk: The incoming task.
  */
 static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
                             struct task_struct *tsk)
@@ -124,11 +146,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
        int cpu = smp_processor_id();
 
        if (prev != next) {
+#ifdef CONFIG_SMP
+               per_cpu(cpu_tlbstate, cpu).active_mm = next;
+#endif
                cpu_ran_vm(cpu, next);
-               activate_context(next, cpu);
                PTBR = (unsigned long) next->pgd;
-       } else if (!cpu_maybe_ran_vm(cpu, next)) {
-               activate_context(next, cpu);
+               activate_context(next);
        }
 }
 
index a19f11327cd87ce9dd2c578c1ecf25bf655d083f..146bacf193eac3f6c27536bd4b4b04b73942b60b 100644 (file)
@@ -11,7 +11,6 @@
 #ifndef _ASM_PGALLOC_H
 #define _ASM_PGALLOC_H
 
-#include <asm/processor.h>
 #include <asm/page.h>
 #include <linux/threads.h>
 #include <linux/mm.h>          /* for struct page */
index b049a8bd157774fd9c3f20d208913d04a8761c70..a1e894b5f65b9bab8e51d99010e80172c7696dd3 100644 (file)
@@ -90,46 +90,58 @@ extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
  * The vmalloc() routines also leaves a hole of 4kB between each vmalloced
  * area to catch addressing errors.
  */
+#ifndef __ASSEMBLY__
+#define VMALLOC_OFFSET (8UL * 1024 * 1024)
+#define VMALLOC_START  (0x70000000UL)
+#define VMALLOC_END    (0x7C000000UL)
+#else
 #define VMALLOC_OFFSET (8 * 1024 * 1024)
 #define VMALLOC_START  (0x70000000)
 #define VMALLOC_END    (0x7C000000)
+#endif
 
 #ifndef __ASSEMBLY__
 extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE];
 #endif
 
-/* IPTEL/DPTEL bit assignments */
-#define _PAGE_BIT_VALID                xPTEL_V_BIT
-#define _PAGE_BIT_ACCESSED     xPTEL_UNUSED1_BIT       /* mustn't be loaded into IPTEL/DPTEL */
-#define _PAGE_BIT_NX           xPTEL_UNUSED2_BIT       /* mustn't be loaded into IPTEL/DPTEL */
-#define _PAGE_BIT_CACHE                xPTEL_C_BIT
-#define _PAGE_BIT_PRESENT      xPTEL_PV_BIT
-#define _PAGE_BIT_DIRTY                xPTEL_D_BIT
-#define _PAGE_BIT_GLOBAL       xPTEL_G_BIT
-
-#define _PAGE_VALID            xPTEL_V
-#define _PAGE_ACCESSED         xPTEL_UNUSED1
-#define _PAGE_NX               xPTEL_UNUSED2           /* no-execute bit */
-#define _PAGE_CACHE            xPTEL_C
-#define _PAGE_PRESENT          xPTEL_PV
-#define _PAGE_DIRTY            xPTEL_D
-#define _PAGE_PROT             xPTEL_PR
-#define _PAGE_PROT_RKNU                xPTEL_PR_ROK
-#define _PAGE_PROT_WKNU                xPTEL_PR_RWK
-#define _PAGE_PROT_RKRU                xPTEL_PR_ROK_ROU
-#define _PAGE_PROT_WKRU                xPTEL_PR_RWK_ROU
-#define _PAGE_PROT_WKWU                xPTEL_PR_RWK_RWU
-#define _PAGE_GLOBAL           xPTEL_G
-#define _PAGE_PSE              xPTEL_PS_4Mb            /* 4MB page */
-
-#define _PAGE_FILE             xPTEL_UNUSED1_BIT       /* set:pagecache unset:swap */
-
-#define __PAGE_PROT_UWAUX      0x040
-#define __PAGE_PROT_USER       0x080
-#define __PAGE_PROT_WRITE      0x100
+/* IPTEL2/DPTEL2 bit assignments */
+#define _PAGE_BIT_VALID                xPTEL2_V_BIT
+#define _PAGE_BIT_CACHE                xPTEL2_C_BIT
+#define _PAGE_BIT_PRESENT      xPTEL2_PV_BIT
+#define _PAGE_BIT_DIRTY                xPTEL2_D_BIT
+#define _PAGE_BIT_GLOBAL       xPTEL2_G_BIT
+#define _PAGE_BIT_ACCESSED     xPTEL2_UNUSED1_BIT      /* mustn't be loaded into IPTEL2/DPTEL2 */
+
+#define _PAGE_VALID            xPTEL2_V
+#define _PAGE_CACHE            xPTEL2_C
+#define _PAGE_PRESENT          xPTEL2_PV
+#define _PAGE_DIRTY            xPTEL2_D
+#define _PAGE_PROT             xPTEL2_PR
+#define _PAGE_PROT_RKNU                xPTEL2_PR_ROK
+#define _PAGE_PROT_WKNU                xPTEL2_PR_RWK
+#define _PAGE_PROT_RKRU                xPTEL2_PR_ROK_ROU
+#define _PAGE_PROT_WKRU                xPTEL2_PR_RWK_ROU
+#define _PAGE_PROT_WKWU                xPTEL2_PR_RWK_RWU
+#define _PAGE_GLOBAL           xPTEL2_G
+#define _PAGE_PS_MASK          xPTEL2_PS
+#define _PAGE_PS_4Kb           xPTEL2_PS_4Kb
+#define _PAGE_PS_128Kb         xPTEL2_PS_128Kb
+#define _PAGE_PS_1Kb           xPTEL2_PS_1Kb
+#define _PAGE_PS_4Mb           xPTEL2_PS_4Mb
+#define _PAGE_PSE              xPTEL2_PS_4Mb           /* 4MB page */
+#define _PAGE_CACHE_WT         xPTEL2_CWT
+#define _PAGE_ACCESSED         xPTEL2_UNUSED1
+#define _PAGE_NX               0                       /* no-execute bit */
+
+/* If _PAGE_VALID is clear, we use these: */
+#define _PAGE_FILE             xPTEL2_C        /* set:pagecache unset:swap */
+#define _PAGE_PROTNONE         0x000           /* If not present */
+
+#define __PAGE_PROT_UWAUX      0x010
+#define __PAGE_PROT_USER       0x020
+#define __PAGE_PROT_WRITE      0x040
 
 #define _PAGE_PRESENTV         (_PAGE_PRESENT|_PAGE_VALID)
-#define _PAGE_PROTNONE         0x000   /* If not present */
 
 #ifndef __ASSEMBLY__
 
@@ -170,6 +182,9 @@ extern pte_t kernel_vmalloc_ptes[(VMALLOC_END - VMALLOC_START) / PAGE_SIZE];
 #define PAGE_KERNEL_LARGE      __pgprot(__PAGE_KERNEL_LARGE)
 #define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
 
+#define __PAGE_USERIO          (__PAGE_KERNEL_BASE | _PAGE_PROT_WKWU | _PAGE_NX)
+#define PAGE_USERIO            __pgprot(__PAGE_USERIO)
+
 /*
  * Whilst the MN10300 can do page protection for execute (given separate data
  * and insn TLBs), we are not supporting it at the moment. Write permission,
@@ -323,11 +338,7 @@ static inline int pte_exec_kernel(pte_t pte)
        return 1;
 }
 
-/*
- * Bits 0 and 1 are taken, split up the 29 bits of offset
- * into this range:
- */
-#define PTE_FILE_MAX_BITS      29
+#define PTE_FILE_MAX_BITS      30
 
 #define pte_to_pgoff(pte)      (pte_val(pte) >> 2)
 #define pgoff_to_pte(off)      __pte((off) << 2 | _PAGE_FILE)
@@ -373,8 +384,13 @@ static inline void ptep_mkdirty(pte_t *ptep)
  * Macro to mark a page protection value as "uncacheable".  On processors which
  * do not support it, this is a no-op.
  */
-#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) _PAGE_CACHE)
+#define pgprot_noncached(prot) __pgprot(pgprot_val(prot) & ~_PAGE_CACHE)
 
+/*
+ * Macro to mark a page protection value as "Write-Through".
+ * On processors which do not support it, this is a no-op.
+ */
+#define pgprot_through(prot)   __pgprot(pgprot_val(prot) | _PAGE_CACHE_WT)
 
 /*
  * Conversion functions: convert a page and protection to a page entry,
index f7d4b0d285e8dddb835781ac071898ff5d34dfe9..4c1b5cc14c190edc529c05844360f9fd52b1bb96 100644 (file)
 #ifndef _ASM_PROCESSOR_H
 #define _ASM_PROCESSOR_H
 
+#include <linux/threads.h>
+#include <linux/thread_info.h>
 #include <asm/page.h>
 #include <asm/ptrace.h>
 #include <asm/cpu-regs.h>
-#include <linux/threads.h>
+#include <asm/uaccess.h>
+#include <asm/current.h>
 
 /* Forward declaration, a strange C thing */
 struct task_struct;
@@ -33,6 +36,8 @@ struct mm_struct;
        __pc;                                   \
 })
 
+extern void get_mem_info(unsigned long *mem_base, unsigned long *mem_size);
+
 extern void show_registers(struct pt_regs *regs);
 
 /*
@@ -43,17 +48,22 @@ extern void show_registers(struct pt_regs *regs);
 
 struct mn10300_cpuinfo {
        int             type;
-       unsigned long   loops_per_sec;
+       unsigned long   loops_per_jiffy;
        char            hard_math;
-       unsigned long   *pgd_quick;
-       unsigned long   *pte_quick;
-       unsigned long   pgtable_cache_sz;
 };
 
 extern struct mn10300_cpuinfo boot_cpu_data;
 
+#ifdef CONFIG_SMP
+#if CONFIG_NR_CPUS < 2 || CONFIG_NR_CPUS > 8
+# error Sorry, NR_CPUS should be 2 to 8
+#endif
+extern struct mn10300_cpuinfo cpu_data[];
+#define current_cpu_data cpu_data[smp_processor_id()]
+#else  /* CONFIG_SMP */
 #define cpu_data &boot_cpu_data
 #define current_cpu_data boot_cpu_data
+#endif /* CONFIG_SMP */
 
 extern void identify_cpu(struct mn10300_cpuinfo *);
 extern void print_cpu_info(struct mn10300_cpuinfo *);
@@ -76,10 +86,6 @@ extern void dodgy_tsc(void);
  */
 #define TASK_UNMAPPED_BASE     0x30000000
 
-typedef struct {
-       unsigned long   seg;
-} mm_segment_t;
-
 struct fpu_state_struct {
        unsigned long   fs[32];         /* fpu registers */
        unsigned long   fpcr;           /* fpu control register */
@@ -92,20 +98,19 @@ struct thread_struct {
        unsigned long           a3;             /* kernel FP */
        unsigned long           wchan;
        unsigned long           usp;
-       struct pt_regs          *__frame;
        unsigned long           fpu_flags;
 #define THREAD_USING_FPU       0x00000001      /* T if this task is using the FPU */
+#define THREAD_HAS_FPU         0x00000002      /* T if this task owns the FPU right now */
        struct fpu_state_struct fpu_state;
 };
 
-#define INIT_THREAD                            \
-{                                              \
-       .uregs          = init_uregs,           \
-       .pc             = 0,                    \
-       .sp             = 0,                    \
-       .a3             = 0,                    \
-       .wchan          = 0,                    \
-       .__frame        = NULL,                 \
+#define INIT_THREAD            \
+{                              \
+       .uregs  = init_uregs,   \
+       .pc     = 0,            \
+       .sp     = 0,            \
+       .a3     = 0,            \
+       .wchan  = 0,            \
 }
 
 #define INIT_MMAP \
@@ -117,13 +122,20 @@ struct thread_struct {
  * - need to discard the frame stacked by the kernel thread invoking the execve
  *   syscall (see RESTORE_ALL macro)
  */
-#define start_thread(regs, new_pc, new_sp) do {                \
-       set_fs(USER_DS);                                \
-       __frame = current->thread.uregs;                \
-       __frame->epsw = EPSW_nSL | EPSW_IE | EPSW_IM;   \
-       __frame->pc = new_pc;                           \
-       __frame->sp = new_sp;                           \
-} while (0)
+static inline void start_thread(struct pt_regs *regs,
+                               unsigned long new_pc, unsigned long new_sp)
+{
+       struct thread_info *ti = current_thread_info();
+       struct pt_regs *frame0;
+       set_fs(USER_DS);
+
+       frame0 = thread_info_to_uregs(ti);
+       frame0->epsw = EPSW_nSL | EPSW_IE | EPSW_IM;
+       frame0->pc = new_pc;
+       frame0->sp = new_sp;
+       ti->frame = frame0;
+}
+
 
 /* Free all resources held by a thread. */
 extern void release_thread(struct task_struct *);
@@ -157,7 +169,7 @@ unsigned long get_wchan(struct task_struct *p);
 
 static inline void prefetch(const void *x)
 {
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
 #ifdef CONFIG_MN10300_PROC_MN103E010
        asm volatile ("nop; nop; dcpf (%0)" : : "r"(x));
 #else
@@ -168,7 +180,7 @@ static inline void prefetch(const void *x)
 
 static inline void prefetchw(const void *x)
 {
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
 #ifdef CONFIG_MN10300_PROC_MN103E010
        asm volatile ("nop; nop; dcpf (%0)" : : "r"(x));
 #else
index 7c2e911052b64699a55d8d23517557db5eb5ae41..b6961811d4458d05a689908ac90686f13420c270 100644 (file)
@@ -40,7 +40,6 @@
 #define        PT_PC           26
 #define NR_PTREGS      27
 
-#ifndef __ASSEMBLY__
 /*
  * This defines the way registers are stored in the event of an exception
  * - the strange order is due to the MOVM instruction
@@ -75,7 +74,6 @@ struct pt_regs {
        unsigned long           epsw;
        unsigned long           pc;
 };
-#endif
 
 /* Arbitrarily choose the same ptrace numbers as used by the Sparc code. */
 #define PTRACE_GETREGS            12
@@ -86,12 +84,7 @@ struct pt_regs {
 /* options set using PTRACE_SETOPTIONS */
 #define PTRACE_O_TRACESYSGOOD     0x00000001
 
-#if defined(__KERNEL__)
-
-extern struct pt_regs *__frame;                /* current frame pointer */
-
-#if !defined(__ASSEMBLY__)
-struct task_struct;
+#ifdef __KERNEL__
 
 #define user_mode(regs)                        (((regs)->epsw & EPSW_nSL) == EPSW_nSL)
 #define instruction_pointer(regs)      ((regs)->pc)
@@ -100,9 +93,7 @@ extern void show_regs(struct pt_regs *);
 
 #define arch_has_single_step() (1)
 
-#endif  /*  !__ASSEMBLY  */
-
 #define profile_pc(regs) ((regs)->pc)
 
-#endif  /*  __KERNEL__  */
+#endif /* __KERNEL__  */
 #endif /* _ASM_PTRACE_H */
index 174523d501323e313a5c7d1e2a5f684e0ddc0bab..10c7502a113fbce1cfeb07b56eec061caa1523c8 100644 (file)
@@ -50,7 +50,7 @@ static inline void mn10300_proc_hard_reset(void)
        RSTCTR |= RSTCTR_CHIPRST;
 }
 
-extern unsigned int watchdog_alert_counter;
+extern unsigned int watchdog_alert_counter[];
 
 extern void watchdog_go(void);
 extern asmlinkage void watchdog_handler(void);
index c295194cc70330b3da8a34fe0db05e2f34a76044..6c14bb1d0d9b8c20f45fff9279052eb43ab113e2 100644 (file)
 
 #include <linux/init.h>
 
-extern void check_rtc_time(void);
 extern void __init calibrate_clock(void);
-extern unsigned long __init get_initial_rtc_time(void);
 
 #else /* !CONFIG_MN10300_RTC */
 
-static inline void check_rtc_time(void)
-{
-}
-
 static inline void calibrate_clock(void)
 {
 }
 
-static inline unsigned long get_initial_rtc_time(void)
-{
-       return 0;
-}
-
 #endif /* !CONFIG_MN10300_RTC */
 
 #include <asm-generic/rtc.h>
diff --git a/arch/mn10300/include/asm/rwlock.h b/arch/mn10300/include/asm/rwlock.h
new file mode 100644 (file)
index 0000000..6d594d4
--- /dev/null
@@ -0,0 +1,125 @@
+/*
+ * Helpers used by both rw spinlocks and rw semaphores.
+ *
+ * Based in part on code from semaphore.h and
+ * spinlock.h Copyright 1996 Linus Torvalds.
+ *
+ * Copyright 1999 Red Hat, Inc.
+ *
+ * Written by Benjamin LaHaise.
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ * 13-Nov-2006 MEI Temporarily delete lock functions for SMP support.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ */
+#ifndef _ASM_RWLOCK_H
+#define _ASM_RWLOCK_H
+
+#define RW_LOCK_BIAS            0x01000000
+
+#ifndef CONFIG_SMP
+
+typedef struct { unsigned long a[100]; } __dummy_lock_t;
+#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
+
+#define RW_LOCK_BIAS_STR       "0x01000000"
+
+#define __build_read_lock_ptr(rw, helper)                              \
+       do {                                                            \
+               asm volatile(                                           \
+                       "       mov     (%0),d3                 \n"     \
+                       "       sub     1,d3                    \n"     \
+                       "       mov     d3,(%0)                 \n"     \
+                       "       blt     1f                      \n"     \
+                       "       bra     2f                      \n"     \
+                       "1:     jmp     3f                      \n"     \
+                       "2:                                     \n"     \
+                       "       .section .text.lock,\"ax\"      \n"     \
+                       "3:     call    "helper"[],0            \n"     \
+                       "       jmp     2b                      \n"     \
+                       "       .previous"                              \
+                       :                                               \
+                       : "d" (rw)                                      \
+                       : "memory", "d3", "cc");                        \
+       } while (0)
+
+#define __build_read_lock_const(rw, helper)                            \
+       do {                                                            \
+               asm volatile(                                           \
+                       "       mov     (%0),d3                 \n"     \
+                       "       sub     1,d3                    \n"     \
+                       "       mov     d3,(%0)                 \n"     \
+                       "       blt     1f                      \n"     \
+                       "       bra     2f                      \n"     \
+                       "1:     jmp     3f                      \n"     \
+                       "2:                                     \n"     \
+                       "       .section .text.lock,\"ax\"      \n"     \
+                       "3:     call    "helper"[],0            \n"     \
+                       "       jmp     2b                      \n"     \
+                       "       .previous"                              \
+                       :                                               \
+                       : "d" (rw)                                      \
+                       : "memory", "d3", "cc");                        \
+       } while (0)
+
+#define __build_read_lock(rw, helper) \
+       do {                                                            \
+               if (__builtin_constant_p(rw))                           \
+                       __build_read_lock_const(rw, helper);            \
+               else                                                    \
+                       __build_read_lock_ptr(rw, helper);              \
+       } while (0)
+
+#define __build_write_lock_ptr(rw, helper)                             \
+       do {                                                            \
+               asm volatile(                                           \
+                       "       mov     (%0),d3                 \n"     \
+                       "       sub     1,d3                    \n"     \
+                       "       mov     d3,(%0)                 \n"     \
+                       "       blt     1f                      \n"     \
+                       "       bra     2f                      \n"     \
+                       "1:     jmp     3f                      \n"     \
+                       "2:                                     \n"     \
+                       "       .section .text.lock,\"ax\"      \n"     \
+                       "3:     call    "helper"[],0            \n"     \
+                       "       jmp     2b                      \n"     \
+                       "       .previous"                              \
+                       :                                               \
+                       : "d" (rw)                                      \
+                       : "memory", "d3", "cc");                        \
+       } while (0)
+
+#define __build_write_lock_const(rw, helper)                           \
+       do {                                                            \
+               asm volatile(                                           \
+                       "       mov     (%0),d3                 \n"     \
+                       "       sub     1,d3                    \n"     \
+                       "       mov     d3,(%0)                 \n"     \
+                       "       blt     1f                      \n"     \
+                       "       bra     2f                      \n"     \
+                       "1:     jmp     3f                      \n"     \
+                       "2:                                     \n"     \
+                       "       .section .text.lock,\"ax\"      \n"     \
+                       "3:     call    "helper"[],0            \n"     \
+                       "       jmp     2b                      \n"     \
+                       "       .previous"                              \
+                       :                                               \
+                       : "d" (rw)                                      \
+                       : "memory", "d3", "cc");                        \
+       } while (0)
+
+#define __build_write_lock(rw, helper)                                 \
+       do {                                                            \
+               if (__builtin_constant_p(rw))                           \
+                       __build_write_lock_const(rw, helper);           \
+               else                                                    \
+                       __build_write_lock_ptr(rw, helper);             \
+       } while (0)
+
+#endif /* CONFIG_SMP */
+#endif /* _ASM_RWLOCK_H */
index 6498469e93ac33390d7b2031c4a6781e9eb1c6cd..8320cda32f5aa04ed1cdad0a70dc182cc417c841 100644 (file)
 /* serial port 0 */
 #define        SC0CTR                  __SYSREG(0xd4002000, u16)       /* control reg */
 #define        SC01CTR_CK              0x0007  /* clock source select */
-#define        SC0CTR_CK_TM8UFLOW_8    0x0000  /* - 1/8 timer 8 underflow (serial port 0 only) */
-#define        SC1CTR_CK_TM9UFLOW_8    0x0000  /* - 1/8 timer 9 underflow (serial port 1 only) */
 #define        SC01CTR_CK_IOCLK_8      0x0001  /* - 1/8 IOCLK */
 #define        SC01CTR_CK_IOCLK_32     0x0002  /* - 1/32 IOCLK */
+#define        SC01CTR_CK_EXTERN_8     0x0006  /* - 1/8 external closk */
+#define        SC01CTR_CK_EXTERN       0x0007  /* - external closk */
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+#define        SC0CTR_CK_TM8UFLOW_8    0x0000  /* - 1/8 timer 8 underflow (serial port 0 only) */
 #define        SC0CTR_CK_TM2UFLOW_2    0x0003  /* - 1/2 timer 2 underflow (serial port 0 only) */
-#define        SC1CTR_CK_TM3UFLOW_2    0x0003  /* - 1/2 timer 3 underflow (serial port 1 only) */
-#define        SC0CTR_CK_TM0UFLOW_8    0x0004  /* - 1/8 timer 1 underflow (serial port 0 only) */
-#define        SC1CTR_CK_TM1UFLOW_8    0x0004  /* - 1/8 timer 2 underflow (serial port 1 only) */
+#define        SC0CTR_CK_TM0UFLOW_8    0x0004  /* - 1/8 timer 0 underflow (serial port 0 only) */
 #define        SC0CTR_CK_TM2UFLOW_8    0x0005  /* - 1/8 timer 2 underflow (serial port 0 only) */
+#define        SC1CTR_CK_TM9UFLOW_8    0x0000  /* - 1/8 timer 9 underflow (serial port 1 only) */
+#define        SC1CTR_CK_TM3UFLOW_2    0x0003  /* - 1/2 timer 3 underflow (serial port 1 only) */
+#define        SC1CTR_CK_TM1UFLOW_8    0x0004  /* - 1/8 timer 1 underflow (serial port 1 only) */
 #define        SC1CTR_CK_TM3UFLOW_8    0x0005  /* - 1/8 timer 3 underflow (serial port 1 only) */
-#define        SC01CTR_CK_EXTERN_8     0x0006  /* - 1/8 external closk */
-#define        SC01CTR_CK_EXTERN       0x0007  /* - external closk */
+#else  /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+#define        SC0CTR_CK_TM8UFLOW_8    0x0000  /* - 1/8 timer 8 underflow (serial port 0 only) */
+#define        SC0CTR_CK_TM0UFLOW_8    0x0004  /* - 1/8 timer 0 underflow (serial port 0 only) */
+#define        SC0CTR_CK_TM2UFLOW_8    0x0005  /* - 1/8 timer 2 underflow (serial port 0 only) */
+#define        SC1CTR_CK_TM12UFLOW_8   0x0000  /* - 1/8 timer 12 underflow (serial port 1 only) */
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
 #define        SC01CTR_STB             0x0008  /* stop bit select */
 #define        SC01CTR_STB_1BIT        0x0000  /* - 1 stop bit */
 #define        SC01CTR_STB_2BIT        0x0008  /* - 2 stop bits */
 
 /* serial port 2 */
 #define        SC2CTR                  __SYSREG(0xd4002020, u16)       /* control reg */
+#ifdef CONFIG_AM33_2
 #define        SC2CTR_CK               0x0003  /* clock source select */
 #define        SC2CTR_CK_TM10UFLOW     0x0000  /* - timer 10 underflow */
 #define        SC2CTR_CK_TM2UFLOW      0x0001  /* - timer 2 underflow */
 #define        SC2CTR_CK_EXTERN        0x0002  /* - external closk */
 #define        SC2CTR_CK_TM3UFLOW      0x0003  /* - timer 3 underflow */
+#else  /* CONFIG_AM33_2 */
+#define        SC2CTR_CK               0x0007  /* clock source select */
+#define        SC2CTR_CK_TM9UFLOW_8    0x0000  /* - 1/8 timer 9 underflow */
+#define        SC2CTR_CK_IOCLK_8       0x0001  /* - 1/8 IOCLK */
+#define        SC2CTR_CK_IOCLK_32      0x0002  /* - 1/32 IOCLK */
+#define        SC2CTR_CK_TM3UFLOW_2    0x0003  /* - 1/2 timer 3 underflow */
+#define        SC2CTR_CK_TM1UFLOW_8    0x0004  /* - 1/8 timer 1 underflow */
+#define        SC2CTR_CK_TM3UFLOW_8    0x0005  /* - 1/8 timer 3 underflow */
+#define        SC2CTR_CK_EXTERN_8      0x0006  /* - 1/8 external closk */
+#define        SC2CTR_CK_EXTERN        0x0007  /* - external closk */
+#endif /* CONFIG_AM33_2 */
 #define        SC2CTR_STB              0x0008  /* stop bit select */
 #define        SC2CTR_STB_1BIT         0x0000  /* - 1 stop bit */
 #define        SC2CTR_STB_2BIT         0x0008  /* - 2 stop bits */
 #define SC2ICR_RES             0x04    /* receive error select */
 #define SC2ICR_RI              0x01    /* receive interrupt cause */
 
-#define        SC2TXB                  __SYSREG(0xd4002018, u8)        /* transmit buffer reg */
-#define        SC2RXB                  __SYSREG(0xd4002019, u8)        /* receive buffer reg */
-#define        SC2STR                  __SYSREG(0xd400201c, u8)        /* status reg */
+#define        SC2TXB                  __SYSREG(0xd4002028, u8)        /* transmit buffer reg */
+#define        SC2RXB                  __SYSREG(0xd4002029, u8)        /* receive buffer reg */
+
+#ifdef CONFIG_AM33_2
+#define        SC2STR                  __SYSREG(0xd400202c, u8)        /* status reg */
+#else  /* CONFIG_AM33_2 */
+#define        SC2STR                  __SYSREG(0xd400202c, u16)       /* status reg */
+#endif /* CONFIG_AM33_2 */
 #define SC2STR_OEF             0x0001  /* overrun error found */
 #define SC2STR_PEF             0x0002  /* parity error found */
 #define SC2STR_FEF             0x0004  /* framing error found */
 #define SC2STR_RXF             0x0040  /* receive status */
 #define SC2STR_TXF             0x0080  /* transmit status */
 
+#ifdef CONFIG_AM33_2
 #define        SC2TIM                  __SYSREG(0xd400202d, u8)        /* status reg */
+#endif
 
+#ifdef CONFIG_AM33_2
 #define SC2RXIRQ               24      /* serial 2 Receive IRQ */
 #define SC2TXIRQ               25      /* serial 2 Transmit IRQ */
+#else  /* CONFIG_AM33_2 */
+#define SC2RXIRQ               68      /* serial 2 Receive IRQ */
+#define SC2TXIRQ               69      /* serial 2 Transmit IRQ */
+#endif /* CONFIG_AM33_2 */
 
 #define        SC2RXICR                GxICR(SC2RXIRQ) /* serial 2 receive intr ctrl reg */
 #define        SC2TXICR                GxICR(SC2TXIRQ) /* serial 2 transmit intr ctrl reg */
index a29445cddd6fc559f631ba9a571aa4fcbbd0dde6..23a79929359943aef854b7ec332dec679efb18e6 100644 (file)
@@ -9,10 +9,8 @@
  * 2 of the Licence, or (at your option) any later version.
  */
 
-/*
- * The ASB2305 has an 18.432 MHz clock the UART
- */
-#define BASE_BAUD      (18432000 / 16)
+#ifndef _ASM_SERIAL_H
+#define _ASM_SERIAL_H
 
 /* Standard COM flags (except for COM4, because of the 8514 problem) */
 #ifdef CONFIG_SERIAL_DETECT_IRQ
@@ -34,3 +32,5 @@
 #endif
 
 #include <unit/serial.h>
+
+#endif /* _ASM_SERIAL_H */
index 4eb8c61b7dab42eef2c62a091de76b7edd72cdc6..a3930e43a958d1e4077c028afd94b122963032cb 100644 (file)
@@ -3,6 +3,16 @@
  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ *  13-Nov-2006 MEI Define IPI-IRQ number and add inline/macro function
+ *                  for SMP support.
+ *  22-Jan-2007 MEI Add the define related to SMP_BOOT_IRQ.
+ *  23-Feb-2007 MEI Add the define related to SMP icahce invalidate.
+ *  23-Jun-2008 MEI Delete INTC_IPI.
+ *  22-Jul-2008 MEI Add smp_nmi_call_function and related defines.
+ *  04-Aug-2008 MEI Delete USE_DOIRQ_CACHE_IPI.
+ *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public Licence
  * as published by the Free Software Foundation; either version
 #ifndef _ASM_SMP_H
 #define _ASM_SMP_H
 
-#ifdef CONFIG_SMP
-#error SMP not yet supported for MN10300
+#ifndef __ASSEMBLY__
+#include <linux/threads.h>
+#include <linux/cpumask.h>
 #endif
 
+#ifdef CONFIG_SMP
+#include <proc/smp-regs.h>
+
+#define RESCHEDULE_IPI         63
+#define CALL_FUNC_SINGLE_IPI   192
+#define LOCAL_TIMER_IPI                193
+#define FLUSH_CACHE_IPI                194
+#define CALL_FUNCTION_NMI_IPI  195
+#define GDB_NMI_IPI            196
+
+#define SMP_BOOT_IRQ           195
+
+#define RESCHEDULE_GxICR_LV    GxICR_LEVEL_6
+#define CALL_FUNCTION_GxICR_LV GxICR_LEVEL_4
+#define LOCAL_TIMER_GxICR_LV   GxICR_LEVEL_4
+#define FLUSH_CACHE_GxICR_LV   GxICR_LEVEL_0
+#define SMP_BOOT_GxICR_LV      GxICR_LEVEL_0
+
+#define TIME_OUT_COUNT_BOOT_IPI        100
+#define DELAY_TIME_BOOT_IPI    75000
+
+
+#ifndef __ASSEMBLY__
+
+/**
+ * raw_smp_processor_id - Determine the raw CPU ID of the CPU running it
+ *
+ * What we really want to do is to use the CPUID hardware CPU register to get
+ * this information, but accesses to that aren't cached, and run at system bus
+ * speed, not CPU speed.  A copy of this value is, however, stored in the
+ * thread_info struct, and that can be cached.
+ *
+ * An alternate way of dealing with this could be to use the EPSW.S bits to
+ * cache this information for systems with up to four CPUs.
+ */
+#if 0
+#define raw_smp_processor_id() (CPUID)
+#else
+#define raw_smp_processor_id() (current_thread_info()->cpu)
 #endif
+
+static inline int cpu_logical_map(int cpu)
+{
+       return cpu;
+}
+
+static inline int cpu_number_map(int cpu)
+{
+       return cpu;
+}
+
+
+extern cpumask_t cpu_boot_map;
+
+extern void smp_init_cpus(void);
+extern void smp_cache_interrupt(void);
+extern void send_IPI_allbutself(int irq);
+extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait);
+
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern int __cpu_disable(void);
+extern void __cpu_die(unsigned int cpu);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#endif /* __ASSEMBLY__ */
+#else /* CONFIG_SMP */
+#ifndef __ASSEMBLY__
+
+static inline void smp_init_cpus(void) {}
+
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_SMP */
+
+#endif /* _ASM_SMP_H */
diff --git a/arch/mn10300/include/asm/smsc911x.h b/arch/mn10300/include/asm/smsc911x.h
new file mode 100644 (file)
index 0000000..2fcd108
--- /dev/null
@@ -0,0 +1 @@
+#include <unit/smsc911x.h>
index 4bf9c8b169e082dfe220e316bd70a7e40591a3f6..93429154e898e72c1f3e39a4e6ac71f8b7cc1ed5 100644 (file)
 #ifndef _ASM_SPINLOCK_H
 #define _ASM_SPINLOCK_H
 
-#error SMP spinlocks not implemented for MN10300
+#include <asm/atomic.h>
+#include <asm/rwlock.h>
+#include <asm/page.h>
 
+/*
+ * Simple spin lock operations.  There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ */
+
+#define arch_spin_is_locked(x) (*(volatile signed char *)(&(x)->slock) != 0)
+#define arch_spin_unlock_wait(x) do { barrier(); } while (arch_spin_is_locked(x))
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+       asm volatile(
+               "       bclr    1,(0,%0)        \n"
+               :
+               : "a"(&lock->slock)
+               : "memory", "cc");
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+       int ret;
+
+       asm volatile(
+               "       mov     1,%0            \n"
+               "       bset    %0,(%1)         \n"
+               "       bne     1f              \n"
+               "       clr     %0              \n"
+               "1:     xor     1,%0            \n"
+               : "=d"(ret)
+               : "a"(&lock->slock)
+               : "memory", "cc");
+
+       return ret;
+}
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+       asm volatile(
+               "1:     bset    1,(0,%0)        \n"
+               "       bne     1b              \n"
+               :
+               : "a"(&lock->slock)
+               : "memory", "cc");
+}
+
+static inline void arch_spin_lock_flags(arch_spinlock_t *lock,
+                                        unsigned long flags)
+{
+       int temp;
+
+       asm volatile(
+               "1:     bset    1,(0,%2)        \n"
+               "       beq     3f              \n"
+               "       mov     %1,epsw         \n"
+               "2:     mov     (0,%2),%0       \n"
+               "       or      %0,%0           \n"
+               "       bne     2b              \n"
+               "       mov     %3,%0           \n"
+               "       mov     %0,epsw         \n"
+               "       nop                     \n"
+               "       nop                     \n"
+               "       bra     1b\n"
+               "3:                             \n"
+               : "=&d" (temp)
+               : "d" (flags), "a"(&lock->slock), "i"(EPSW_IE | MN10300_CLI_LEVEL)
+               : "memory", "cc");
+}
+
+#ifdef __KERNEL__
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+
+/**
+ * read_can_lock - would read_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define arch_read_can_lock(x) ((int)(x)->lock > 0)
+
+/**
+ * write_can_lock - would write_trylock() succeed?
+ * @lock: the rwlock in question.
+ */
+#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS)
+
+/*
+ * On mn10300, we implement read-write locks as a 32-bit counter
+ * with the high bit (sign) being the "contended" bit.
+ */
+static inline void arch_read_lock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+       __build_read_lock(rw, "__read_lock_failed");
+#else
+       {
+               atomic_t *count = (atomic_t *)rw;
+               while (atomic_dec_return(count) < 0)
+                       atomic_inc(count);
+       }
+#endif
+}
+
+static inline void arch_write_lock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+       __build_write_lock(rw, "__write_lock_failed");
+#else
+       {
+               atomic_t *count = (atomic_t *)rw;
+               while (!atomic_sub_and_test(RW_LOCK_BIAS, count))
+                       atomic_add(RW_LOCK_BIAS, count);
+       }
+#endif
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+       __build_read_unlock(rw);
+#else
+       {
+               atomic_t *count = (atomic_t *)rw;
+               atomic_inc(count);
+       }
+#endif
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *rw)
+{
+#if 0 //def CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+       __build_write_unlock(rw);
+#else
+       {
+               atomic_t *count = (atomic_t *)rw;
+               atomic_add(RW_LOCK_BIAS, count);
+       }
+#endif
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *lock)
+{
+       atomic_t *count = (atomic_t *)lock;
+       atomic_dec(count);
+       if (atomic_read(count) >= 0)
+               return 1;
+       atomic_inc(count);
+       return 0;
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *lock)
+{
+       atomic_t *count = (atomic_t *)lock;
+       if (atomic_sub_and_test(RW_LOCK_BIAS, count))
+               return 1;
+       atomic_add(RW_LOCK_BIAS, count);
+       return 0;
+}
+
+#define arch_read_lock_flags(lock, flags)  arch_read_lock(lock)
+#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
+
+#define _raw_spin_relax(lock)  cpu_relax()
+#define _raw_read_relax(lock)  cpu_relax()
+#define _raw_write_relax(lock) cpu_relax()
+
+#endif /* __KERNEL__ */
 #endif /* _ASM_SPINLOCK_H */
diff --git a/arch/mn10300/include/asm/spinlock_types.h b/arch/mn10300/include/asm/spinlock_types.h
new file mode 100644 (file)
index 0000000..653dc51
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef _ASM_SPINLOCK_TYPES_H
+#define _ASM_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct arch_spinlock {
+       unsigned int slock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED      { 0 }
+
+typedef struct {
+       unsigned int lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED                { RW_LOCK_BIAS }
+
+#endif /* _ASM_SPINLOCK_TYPES_H */
index 9f7c7e17c01ee7d8560a568dd4f2d98636c7f130..8ff3e5aaca4124cba71e8baade685253e551c94c 100644 (file)
 #define _ASM_SYSTEM_H
 
 #include <asm/cpu-regs.h>
+#include <asm/intctl-regs.h>
 
 #ifdef __KERNEL__
 #ifndef __ASSEMBLY__
 
 #include <linux/kernel.h>
 #include <linux/irqflags.h>
+#include <asm/atomic.h>
+
+#if !defined(CONFIG_LAZY_SAVE_FPU)
+struct fpu_state_struct;
+extern asmlinkage void fpu_save(struct fpu_state_struct *);
+#define switch_fpu(prev, next)                                         \
+       do {                                                            \
+               if ((prev)->thread.fpu_flags & THREAD_HAS_FPU) {        \
+                       (prev)->thread.fpu_flags &= ~THREAD_HAS_FPU;    \
+                       (prev)->thread.uregs->epsw &= ~EPSW_FE;         \
+                       fpu_save(&(prev)->thread.fpu_state);            \
+               }                                                       \
+       } while (0)
+#else
+#define switch_fpu(prev, next) do {} while (0)
+#endif
 
 struct task_struct;
 struct thread_struct;
@@ -30,6 +47,7 @@ struct task_struct *__switch_to(struct thread_struct *prev,
 /* context switching is now performed out-of-line in switch_to.S */
 #define switch_to(prev, next, last)                                    \
 do {                                                                   \
+       switch_fpu(prev, next);                                         \
        current->thread.wchan = (u_long) __builtin_return_address(0);   \
        (last) = __switch_to(&(prev)->thread, &(next)->thread, (prev)); \
        mb();                                                           \
@@ -40,8 +58,6 @@ do {                                                                  \
 
 #define nop() asm volatile ("nop")
 
-#endif /* !__ASSEMBLY__ */
-
 /*
  * Force strict CPU ordering.
  * And yes, this is required on UP too when we're talking
@@ -68,64 +84,19 @@ do {                                                                        \
 #define smp_mb()       mb()
 #define smp_rmb()      rmb()
 #define smp_wmb()      wmb()
-#else
+#define set_mb(var, value)  do { xchg(&var, value); } while (0)
+#else  /* CONFIG_SMP */
 #define smp_mb()       barrier()
 #define smp_rmb()      barrier()
 #define smp_wmb()      barrier()
-#endif
-
 #define set_mb(var, value)  do { var = value;  mb(); } while (0)
+#endif /* CONFIG_SMP */
+
 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
 
 #define read_barrier_depends()         do {} while (0)
 #define smp_read_barrier_depends()     do {} while (0)
 
-/*****************************************************************************/
-/*
- * MN10300 doesn't actually have an exchange instruction
- */
-#ifndef __ASSEMBLY__
-
-struct __xchg_dummy { unsigned long a[100]; };
-#define __xg(x) ((struct __xchg_dummy *)(x))
-
-static inline
-unsigned long __xchg(volatile unsigned long *m, unsigned long val)
-{
-       unsigned long retval;
-       unsigned long flags;
-
-       local_irq_save(flags);
-       retval = *m;
-       *m = val;
-       local_irq_restore(flags);
-       return retval;
-}
-
-#define xchg(ptr, v)                                           \
-       ((__typeof__(*(ptr))) __xchg((unsigned long *)(ptr),    \
-                                    (unsigned long)(v)))
-
-static inline unsigned long __cmpxchg(volatile unsigned long *m,
-                                     unsigned long old, unsigned long new)
-{
-       unsigned long retval;
-       unsigned long flags;
-
-       local_irq_save(flags);
-       retval = *m;
-       if (retval == old)
-               *m = new;
-       local_irq_restore(flags);
-       return retval;
-}
-
-#define cmpxchg(ptr, o, n)                                     \
-       ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
-                                       (unsigned long)(o),     \
-                                       (unsigned long)(n)))
-
 #endif /* !__ASSEMBLY__ */
-
 #endif /* __KERNEL__ */
 #endif /* _ASM_SYSTEM_H */
index 2001cb657a95029ddeb19f40f2706064876e1b55..aa07a4a5d7949406550ba001f7a8faaa631633ec 100644 (file)
 
 #include <asm/page.h>
 
-#ifndef __ASSEMBLY__
-#include <asm/processor.h>
-#endif
-
 #define PREEMPT_ACTIVE         0x10000000
 
 #ifdef CONFIG_4KSTACKS
  *   must also be changed
  */
 #ifndef __ASSEMBLY__
+typedef struct {
+       unsigned long   seg;
+} mm_segment_t;
 
 struct thread_info {
        struct task_struct      *task;          /* main task structure */
        struct exec_domain      *exec_domain;   /* execution domain */
+       struct pt_regs          *frame;         /* current exception frame */
        unsigned long           flags;          /* low level flags */
        __u32                   cpu;            /* current CPU */
        __s32                   preempt_count;  /* 0 => preemptable, <0 => BUG */
@@ -55,6 +55,10 @@ struct thread_info {
        __u8                    supervisor_stack[0];
 };
 
+#define thread_info_to_uregs(ti)                                       \
+       ((struct pt_regs *)                                             \
+        ((unsigned long)ti + THREAD_SIZE - sizeof(struct pt_regs)))
+
 #else /* !__ASSEMBLY__ */
 
 #ifndef __ASM_OFFSETS_H__
@@ -102,6 +106,12 @@ struct thread_info *current_thread_info(void)
        return ti;
 }
 
+static inline __attribute__((const))
+struct pt_regs *current_frame(void)
+{
+       return current_thread_info()->frame;
+}
+
 /* how to get the current stack pointer from C */
 static inline unsigned long current_stack_pointer(void)
 {
index 1d883b7f94ab9b63776255c681d9001d690d28ad..c634977caf66d1338e52fae61f1dbe401fb249fe 100644 (file)
 
 #ifdef __KERNEL__
 
-/* timer prescalar control */
+/*
+ * Timer prescalar control
+ */
 #define        TMPSCNT                 __SYSREG(0xd4003071, u8) /* timer prescaler control */
 #define        TMPSCNT_ENABLE          0x80    /* timer prescaler enable */
 #define        TMPSCNT_DISABLE         0x00    /* timer prescaler disable */
 
-/* 8 bit timers */
+/*
+ * 8-bit timers
+ */
 #define        TM0MD                   __SYSREG(0xd4003000, u8) /* timer 0 mode register */
 #define        TM0MD_SRC               0x07    /* timer source */
 #define        TM0MD_SRC_IOCLK         0x00    /* - IOCLK */
 #define        TM0MD_SRC_IOCLK_8       0x01    /* - 1/8 IOCLK */
 #define        TM0MD_SRC_IOCLK_32      0x02    /* - 1/32 IOCLK */
-#define        TM0MD_SRC_TM2IO         0x03    /* - TM2IO pin input */
 #define        TM0MD_SRC_TM1UFLOW      0x05    /* - timer 1 underflow */
 #define        TM0MD_SRC_TM2UFLOW      0x06    /* - timer 2 underflow */
+#if    defined(CONFIG_AM33_2)
+#define        TM0MD_SRC_TM2IO         0x03    /* - TM2IO pin input */
 #define        TM0MD_SRC_TM0IO         0x07    /* - TM0IO pin input */
+#endif /* CONFIG_AM33_2 */
 #define        TM0MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM0MD_COUNT_ENABLE      0x80    /* timer count enable */
 
@@ -43,7 +49,9 @@
 #define        TM1MD_SRC_TM0CASCADE    0x03    /* - cascade with timer 0 */
 #define        TM1MD_SRC_TM0UFLOW      0x04    /* - timer 0 underflow */
 #define        TM1MD_SRC_TM2UFLOW      0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM1MD_SRC_TM1IO         0x07    /* - TM1IO pin input */
+#endif /* CONFIG_AM33_2 */
 #define        TM1MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM1MD_COUNT_ENABLE      0x80    /* timer count enable */
 
@@ -55,7 +63,9 @@
 #define        TM2MD_SRC_TM1CASCADE    0x03    /* - cascade with timer 1 */
 #define        TM2MD_SRC_TM0UFLOW      0x04    /* - timer 0 underflow */
 #define        TM2MD_SRC_TM1UFLOW      0x05    /* - timer 1 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM2MD_SRC_TM2IO         0x07    /* - TM2IO pin input */
+#endif /* CONFIG_AM33_2 */
 #define        TM2MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM2MD_COUNT_ENABLE      0x80    /* timer count enable */
 
 #define        TM3MD_SRC_IOCLK         0x00    /* - IOCLK */
 #define        TM3MD_SRC_IOCLK_8       0x01    /* - 1/8 IOCLK */
 #define        TM3MD_SRC_IOCLK_32      0x02    /* - 1/32 IOCLK */
-#define        TM3MD_SRC_TM1CASCADE    0x03    /* - cascade with timer 2 */
+#define        TM3MD_SRC_TM2CASCADE    0x03    /* - cascade with timer 2 */
 #define        TM3MD_SRC_TM0UFLOW      0x04    /* - timer 0 underflow */
 #define        TM3MD_SRC_TM1UFLOW      0x05    /* - timer 1 underflow */
 #define        TM3MD_SRC_TM2UFLOW      0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM3MD_SRC_TM3IO         0x07    /* - TM3IO pin input */
+#endif /* CONFIG_AM33_2 */
 #define        TM3MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM3MD_COUNT_ENABLE      0x80    /* timer count enable */
 
 #define        TM2ICR                  GxICR(TM2IRQ)   /* timer 2 uflow intr ctrl reg */
 #define        TM3ICR                  GxICR(TM3IRQ)   /* timer 3 uflow intr ctrl reg */
 
-/* 16-bit timers 4,5 & 7-11 */
+/*
+ * 16-bit timers 4,5 & 7-15
+ */
 #define        TM4MD                   __SYSREG(0xd4003080, u8)   /* timer 4 mode register */
 #define        TM4MD_SRC               0x07    /* timer source */
 #define        TM4MD_SRC_IOCLK         0x00    /* - IOCLK */
 #define        TM4MD_SRC_TM0UFLOW      0x04    /* - timer 0 underflow */
 #define        TM4MD_SRC_TM1UFLOW      0x05    /* - timer 1 underflow */
 #define        TM4MD_SRC_TM2UFLOW      0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM4MD_SRC_TM4IO         0x07    /* - TM4IO pin input */
+#endif /* CONFIG_AM33_2 */
 #define        TM4MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM4MD_COUNT_ENABLE      0x80    /* timer count enable */
 
 #define        TM5MD_SRC_TM0UFLOW      0x04    /* - timer 0 underflow */
 #define        TM5MD_SRC_TM1UFLOW      0x05    /* - timer 1 underflow */
 #define        TM5MD_SRC_TM2UFLOW      0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM5MD_SRC_TM5IO         0x07    /* - TM5IO pin input */
+#else  /* !CONFIG_AM33_2 */
+#define        TM5MD_SRC_TM7UFLOW      0x07    /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
 #define        TM5MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM5MD_COUNT_ENABLE      0x80    /* timer count enable */
 
 #define        TM7MD_SRC_TM0UFLOW      0x04    /* - timer 0 underflow */
 #define        TM7MD_SRC_TM1UFLOW      0x05    /* - timer 1 underflow */
 #define        TM7MD_SRC_TM2UFLOW      0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM7MD_SRC_TM7IO         0x07    /* - TM7IO pin input */
+#endif /* CONFIG_AM33_2 */
 #define        TM7MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM7MD_COUNT_ENABLE      0x80    /* timer count enable */
 
 #define        TM8MD_SRC_TM0UFLOW      0x04    /* - timer 0 underflow */
 #define        TM8MD_SRC_TM1UFLOW      0x05    /* - timer 1 underflow */
 #define        TM8MD_SRC_TM2UFLOW      0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM8MD_SRC_TM8IO         0x07    /* - TM8IO pin input */
+#else  /* !CONFIG_AM33_2 */
+#define        TM8MD_SRC_TM7UFLOW      0x07    /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
 #define        TM8MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM8MD_COUNT_ENABLE      0x80    /* timer count enable */
 
 #define        TM9MD_SRC_TM0UFLOW      0x04    /* - timer 0 underflow */
 #define        TM9MD_SRC_TM1UFLOW      0x05    /* - timer 1 underflow */
 #define        TM9MD_SRC_TM2UFLOW      0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM9MD_SRC_TM9IO         0x07    /* - TM9IO pin input */
+#else  /* !CONFIG_AM33_2 */
+#define        TM9MD_SRC_TM7UFLOW      0x07    /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
 #define        TM9MD_INIT_COUNTER      0x40    /* initialize TMnBC = TMnBR */
 #define        TM9MD_COUNT_ENABLE      0x80    /* timer count enable */
 
 #define        TM10MD_SRC_TM0UFLOW     0x04    /* - timer 0 underflow */
 #define        TM10MD_SRC_TM1UFLOW     0x05    /* - timer 1 underflow */
 #define        TM10MD_SRC_TM2UFLOW     0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM10MD_SRC_TM10IO       0x07    /* - TM10IO pin input */
+#else  /* !CONFIG_AM33_2 */
+#define        TM10MD_SRC_TM7UFLOW     0x07    /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
 #define        TM10MD_INIT_COUNTER     0x40    /* initialize TMnBC = TMnBR */
 #define        TM10MD_COUNT_ENABLE     0x80    /* timer count enable */
 
 #define        TM11MD_SRC_IOCLK        0x00    /* - IOCLK */
 #define        TM11MD_SRC_IOCLK_8      0x01    /* - 1/8 IOCLK */
 #define        TM11MD_SRC_IOCLK_32     0x02    /* - 1/32 IOCLK */
-#define        TM11MD_SRC_TM7CASCADE   0x03    /* - cascade with timer 7 */
 #define        TM11MD_SRC_TM0UFLOW     0x04    /* - timer 0 underflow */
 #define        TM11MD_SRC_TM1UFLOW     0x05    /* - timer 1 underflow */
 #define        TM11MD_SRC_TM2UFLOW     0x06    /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
 #define        TM11MD_SRC_TM11IO       0x07    /* - TM11IO pin input */
+#else  /* !CONFIG_AM33_2 */
+#define        TM11MD_SRC_TM7UFLOW     0x07    /* - timer 7 underflow */
+#endif /* CONFIG_AM33_2 */
 #define        TM11MD_INIT_COUNTER     0x40    /* initialize TMnBC = TMnBR */
 #define        TM11MD_COUNT_ENABLE     0x80    /* timer count enable */
 
+#if defined(CONFIG_AM34_2)
+#define        TM12MD                  __SYSREG(0xd4003180, u8)   /* timer 11 mode register */
+#define        TM12MD_SRC              0x07    /* timer source */
+#define        TM12MD_SRC_IOCLK        0x00    /* - IOCLK */
+#define        TM12MD_SRC_IOCLK_8      0x01    /* - 1/8 IOCLK */
+#define        TM12MD_SRC_IOCLK_32     0x02    /* - 1/32 IOCLK */
+#define        TM12MD_SRC_TM0UFLOW     0x04    /* - timer 0 underflow */
+#define        TM12MD_SRC_TM1UFLOW     0x05    /* - timer 1 underflow */
+#define        TM12MD_SRC_TM2UFLOW     0x06    /* - timer 2 underflow */
+#define        TM12MD_SRC_TM7UFLOW     0x07    /* - timer 7 underflow */
+#define        TM12MD_INIT_COUNTER     0x40    /* initialize TMnBC = TMnBR */
+#define        TM12MD_COUNT_ENABLE     0x80    /* timer count enable */
+
+#define        TM13MD                  __SYSREG(0xd4003182, u8)   /* timer 11 mode register */
+#define        TM13MD_SRC              0x07    /* timer source */
+#define        TM13MD_SRC_IOCLK        0x00    /* - IOCLK */
+#define        TM13MD_SRC_IOCLK_8      0x01    /* - 1/8 IOCLK */
+#define        TM13MD_SRC_IOCLK_32     0x02    /* - 1/32 IOCLK */
+#define        TM13MD_SRC_TM12CASCADE  0x03    /* - cascade with timer 12 */
+#define        TM13MD_SRC_TM0UFLOW     0x04    /* - timer 0 underflow */
+#define        TM13MD_SRC_TM1UFLOW     0x05    /* - timer 1 underflow */
+#define        TM13MD_SRC_TM2UFLOW     0x06    /* - timer 2 underflow */
+#define        TM13MD_SRC_TM7UFLOW     0x07    /* - timer 7 underflow */
+#define        TM13MD_INIT_COUNTER     0x40    /* initialize TMnBC = TMnBR */
+#define        TM13MD_COUNT_ENABLE     0x80    /* timer count enable */
+
+#define        TM14MD                  __SYSREG(0xd4003184, u8)   /* timer 11 mode register */
+#define        TM14MD_SRC              0x07    /* timer source */
+#define        TM14MD_SRC_IOCLK        0x00    /* - IOCLK */
+#define        TM14MD_SRC_IOCLK_8      0x01    /* - 1/8 IOCLK */
+#define        TM14MD_SRC_IOCLK_32     0x02    /* - 1/32 IOCLK */
+#define        TM14MD_SRC_TM13CASCADE  0x03    /* - cascade with timer 13 */
+#define        TM14MD_SRC_TM0UFLOW     0x04    /* - timer 0 underflow */
+#define        TM14MD_SRC_TM1UFLOW     0x05    /* - timer 1 underflow */
+#define        TM14MD_SRC_TM2UFLOW     0x06    /* - timer 2 underflow */
+#define        TM14MD_SRC_TM7UFLOW     0x07    /* - timer 7 underflow */
+#define        TM14MD_INIT_COUNTER     0x40    /* initialize TMnBC = TMnBR */
+#define        TM14MD_COUNT_ENABLE     0x80    /* timer count enable */
+
+#define        TM15MD                  __SYSREG(0xd4003186, u8)   /* timer 11 mode register */
+#define        TM15MD_SRC              0x07    /* timer source */
+#define        TM15MD_SRC_IOCLK        0x00    /* - IOCLK */
+#define        TM15MD_SRC_IOCLK_8      0x01    /* - 1/8 IOCLK */
+#define        TM15MD_SRC_IOCLK_32     0x02    /* - 1/32 IOCLK */
+#define        TM15MD_SRC_TM0UFLOW     0x04    /* - timer 0 underflow */
+#define        TM15MD_SRC_TM1UFLOW     0x05    /* - timer 1 underflow */
+#define        TM15MD_SRC_TM2UFLOW     0x06    /* - timer 2 underflow */
+#define        TM15MD_SRC_TM7UFLOW     0x07    /* - timer 7 underflow */
+#define        TM15MD_INIT_COUNTER     0x40    /* initialize TMnBC = TMnBR */
+#define        TM15MD_COUNT_ENABLE     0x80    /* timer count enable */
+#endif /* CONFIG_AM34_2 */
+
+
 #define        TM4BR                   __SYSREG(0xd4003090, u16)  /* timer 4 base register */
 #define        TM5BR                   __SYSREG(0xd4003092, u16)  /* timer 5 base register */
+#define        TM45BR                  __SYSREG(0xd4003090, u32)  /* timer 4:5 base register */
 #define        TM7BR                   __SYSREG(0xd4003096, u16)  /* timer 7 base register */
 #define        TM8BR                   __SYSREG(0xd4003098, u16)  /* timer 8 base register */
 #define        TM9BR                   __SYSREG(0xd400309a, u16)  /* timer 9 base register */
+#define        TM89BR                  __SYSREG(0xd4003098, u32)  /* timer 8:9 base register */
 #define        TM10BR                  __SYSREG(0xd400309c, u16)  /* timer 10 base register */
 #define        TM11BR                  __SYSREG(0xd400309e, u16)  /* timer 11 base register */
-#define        TM45BR                  __SYSREG(0xd4003090, u32)  /* timer 4:5 base register */
+#if defined(CONFIG_AM34_2)
+#define        TM12BR                  __SYSREG(0xd4003190, u16)  /* timer 12 base register */
+#define        TM13BR                  __SYSREG(0xd4003192, u16)  /* timer 13 base register */
+#define        TM14BR                  __SYSREG(0xd4003194, u16)  /* timer 14 base register */
+#define        TM15BR                  __SYSREG(0xd4003196, u16)  /* timer 15 base register */
+#endif /* CONFIG_AM34_2 */
 
 #define        TM4BC                   __SYSREG(0xd40030a0, u16)  /* timer 4 binary counter */
 #define        TM5BC                   __SYSREG(0xd40030a2, u16)  /* timer 5 binary counter */
 #define        TM45BC                  __SYSREG(0xd40030a0, u32)  /* timer 4:5 binary counter */
-
 #define        TM7BC                   __SYSREG(0xd40030a6, u16)  /* timer 7 binary counter */
 #define        TM8BC                   __SYSREG(0xd40030a8, u16)  /* timer 8 binary counter */
 #define        TM9BC                   __SYSREG(0xd40030aa, u16)  /* timer 9 binary counter */
+#define        TM89BC                  __SYSREG(0xd40030a8, u32)  /* timer 8:9 binary counter */
 #define        TM10BC                  __SYSREG(0xd40030ac, u16)  /* timer 10 binary counter */
 #define        TM11BC                  __SYSREG(0xd40030ae, u16)  /* timer 11 binary counter */
+#if defined(CONFIG_AM34_2)
+#define        TM12BC                  __SYSREG(0xd40031a0, u16)  /* timer 12 binary counter */
+#define        TM13BC                  __SYSREG(0xd40031a2, u16)  /* timer 13 binary counter */
+#define        TM14BC                  __SYSREG(0xd40031a4, u16)  /* timer 14 binary counter */
+#define        TM15BC                  __SYSREG(0xd40031a6, u16)  /* timer 15 binary counter */
+#endif /* CONFIG_AM34_2 */
 
 #define TM4IRQ                 6       /* timer 4 IRQ */
 #define TM5IRQ                 7       /* timer 5 IRQ */
 #define TM9IRQ                 13      /* timer 9 IRQ */
 #define TM10IRQ                        14      /* timer 10 IRQ */
 #define TM11IRQ                        15      /* timer 11 IRQ */
+#if defined(CONFIG_AM34_2)
+#define TM12IRQ                        64      /* timer 12 IRQ */
+#define TM13IRQ                        65      /* timer 13 IRQ */
+#define TM14IRQ                        66      /* timer 14 IRQ */
+#define TM15IRQ                        67      /* timer 15 IRQ */
+#endif /* CONFIG_AM34_2 */
 
 #define        TM4ICR                  GxICR(TM4IRQ)   /* timer 4 uflow intr ctrl reg */
 #define        TM5ICR                  GxICR(TM5IRQ)   /* timer 5 uflow intr ctrl reg */
 #define        TM9ICR                  GxICR(TM9IRQ)   /* timer 9 uflow intr ctrl reg */
 #define        TM10ICR                 GxICR(TM10IRQ)  /* timer 10 uflow intr ctrl reg */
 #define        TM11ICR                 GxICR(TM11IRQ)  /* timer 11 uflow intr ctrl reg */
-
-/* 16-bit timer 6 */
+#if defined(CONFIG_AM34_2)
+#define        TM12ICR                 GxICR(TM12IRQ)  /* timer 12 uflow intr ctrl reg */
+#define        TM13ICR                 GxICR(TM13IRQ)  /* timer 13 uflow intr ctrl reg */
+#define        TM14ICR                 GxICR(TM14IRQ)  /* timer 14 uflow intr ctrl reg */
+#define        TM15ICR                 GxICR(TM15IRQ)  /* timer 15 uflow intr ctrl reg */
+#endif /* CONFIG_AM34_2 */
+
+/*
+ * 16-bit timer 6
+ */
 #define        TM6MD                   __SYSREG(0xd4003084, u16)  /* timer6 mode register */
 #define        TM6MD_SRC               0x0007  /* timer source */
 #define        TM6MD_SRC_IOCLK         0x0000  /* - IOCLK */
 #define        TM6MD_SRC_IOCLK_32      0x0002  /* - 1/32 IOCLK */
 #define        TM6MD_SRC_TM0UFLOW      0x0004  /* - timer 0 underflow */
 #define        TM6MD_SRC_TM1UFLOW      0x0005  /* - timer 1 underflow */
-#define        TM6MD_SRC_TM6IOB_BOTH   0x0006  /* - TM6IOB pin input (both edges) */
+#define        TM6MD_SRC_TM2UFLOW      0x0006  /* - timer 2 underflow */
+#if defined(CONFIG_AM33_2)
+/* #define     TM6MD_SRC_TM6IOB_BOTH   0x0006 */       /* - TM6IOB pin input (both edges) */
 #define        TM6MD_SRC_TM6IOB_SINGLE 0x0007  /* - TM6IOB pin input (single edge) */
-#define        TM6MD_CLR_ENABLE        0x0010  /* clear count enable */
+#endif /* CONFIG_AM33_2 */
 #define        TM6MD_ONESHOT_ENABLE    0x0040  /* oneshot count */
+#define        TM6MD_CLR_ENABLE        0x0010  /* clear count enable */
+#if    defined(CONFIG_AM33_2)
 #define        TM6MD_TRIG_ENABLE       0x0080  /* TM6IOB pin trigger enable */
 #define TM6MD_PWM              0x3800  /* PWM output mode */
 #define TM6MD_PWM_DIS          0x0000  /* - disabled */
 #define        TM6MD_PWM_11BIT         0x1800  /* - 11 bits mode */
 #define        TM6MD_PWM_12BIT         0x3000  /* - 12 bits mode */
 #define        TM6MD_PWM_14BIT         0x3800  /* - 14 bits mode */
+#endif /* CONFIG_AM33_2 */
+
 #define        TM6MD_INIT_COUNTER      0x4000  /* initialize TMnBC to zero */
 #define        TM6MD_COUNT_ENABLE      0x8000  /* timer count enable */
 
 #define        TM6MDA                  __SYSREG(0xd40030b4, u8)   /* timer6 cmp/cap A mode reg */
+#define        TM6MDA_MODE_CMP_SINGLE  0x00    /* - compare, single buffer mode */
+#define        TM6MDA_MODE_CMP_DOUBLE  0x40    /* - compare, double buffer mode */
+#if    defined(CONFIG_AM33_2)
 #define TM6MDA_OUT             0x07    /* output select */
 #define        TM6MDA_OUT_SETA_RESETB  0x00    /* - set at match A, reset at match B */
 #define        TM6MDA_OUT_SETA_RESETOV 0x01    /* - set at match A, reset at overflow */
 #define        TM6MDA_OUT_RESETA       0x03    /* - reset at match A */
 #define        TM6MDA_OUT_TOGGLE       0x04    /* - toggle on match A */
 #define TM6MDA_MODE            0xc0    /* compare A register mode */
-#define        TM6MDA_MODE_CMP_SINGLE  0x00    /* - compare, single buffer mode */
-#define        TM6MDA_MODE_CMP_DOUBLE  0x40    /* - compare, double buffer mode */
 #define        TM6MDA_MODE_CAP_S_EDGE  0x80    /* - capture, single edge mode */
 #define        TM6MDA_MODE_CAP_D_EDGE  0xc0    /* - capture, double edge mode */
 #define TM6MDA_EDGE            0x20    /* compare A edge select */
 #define        TM6MDA_EDGE_FALLING     0x00    /* capture on falling edge */
 #define        TM6MDA_EDGE_RISING      0x20    /* capture on rising edge */
 #define        TM6MDA_CAPTURE_ENABLE   0x10    /* capture enable */
+#else  /* !CONFIG_AM33_2 */
+#define        TM6MDA_MODE             0x40    /* compare A register mode */
+#endif /* CONFIG_AM33_2 */
 
 #define        TM6MDB                  __SYSREG(0xd40030b5, u8)   /* timer6 cmp/cap B mode reg */
+#define        TM6MDB_MODE_CMP_SINGLE  0x00    /* - compare, single buffer mode */
+#define        TM6MDB_MODE_CMP_DOUBLE  0x40    /* - compare, double buffer mode */
+#if defined(CONFIG_AM33_2)
 #define TM6MDB_OUT             0x07    /* output select */
 #define        TM6MDB_OUT_SETB_RESETA  0x00    /* - set at match B, reset at match A */
 #define        TM6MDB_OUT_SETB_RESETOV 0x01    /* - set at match B */
 #define        TM6MDB_OUT_RESETB       0x03    /* - reset at match B */
 #define        TM6MDB_OUT_TOGGLE       0x04    /* - toggle on match B */
 #define TM6MDB_MODE            0xc0    /* compare B register mode */
-#define        TM6MDB_MODE_CMP_SINGLE  0x00    /* - compare, single buffer mode */
-#define        TM6MDB_MODE_CMP_DOUBLE  0x40    /* - compare, double buffer mode */
 #define        TM6MDB_MODE_CAP_S_EDGE  0x80    /* - capture, single edge mode */
 #define        TM6MDB_MODE_CAP_D_EDGE  0xc0    /* - capture, double edge mode */
 #define TM6MDB_EDGE            0x20    /* compare B edge select */
 #define        TM6MDB_EDGE_FALLING     0x00    /* capture on falling edge */
 #define        TM6MDB_EDGE_RISING      0x20    /* capture on rising edge */
 #define        TM6MDB_CAPTURE_ENABLE   0x10    /* capture enable */
+#else  /* !CONFIG_AM33_2 */
+#define        TM6MDB_MODE             0x40    /* compare B register mode */
+#endif /* CONFIG_AM33_2 */
 
 #define        TM6CA                   __SYSREG(0xd40030c4, u16)   /* timer6 cmp/capture reg A */
 #define        TM6CB                   __SYSREG(0xd40030d4, u16)   /* timer6 cmp/capture reg B */
 #define        TM6AICR                 GxICR(TM6AIRQ)  /* timer 6A intr control reg */
 #define        TM6BICR                 GxICR(TM6BIRQ)  /* timer 6B intr control reg */
 
+#if defined(CONFIG_AM34_2)
+/*
+ * MTM: OS Tick-Timer
+ */
+#define        TMTMD                   __SYSREG(0xd4004100, u8)        /* Tick Timer mode register */
+#define        TMTMD_TMTLDE            0x40    /* initialize TMTBC = TMTBR */
+#define        TMTMD_TMTCNE            0x80    /* timer count enable       */
+
+#define        TMTBR                   __SYSREG(0xd4004110, u32)       /* Tick Timer mode reg */
+#define        TMTBC                   __SYSREG(0xd4004120, u32)       /* Tick Timer mode reg */
+
+/*
+ * MTM: OS Timestamp-Timer
+ */
+#define        TMSMD                   __SYSREG(0xd4004140, u8)        /* Tick Timer mode register */
+#define        TMSMD_TMSLDE            0x40            /* initialize TMSBC = TMSBR */
+#define        TMSMD_TMSCNE            0x80            /* timer count enable       */
+
+#define        TMSBR                   __SYSREG(0xd4004150, u32)       /* Tick Timer mode register */
+#define        TMSBC                   __SYSREG(0xd4004160, u32)       /* Tick Timer mode register */
+
+#define TMTIRQ                 119             /* OS Tick timer   IRQ */
+#define TMSIRQ                 120             /* Timestamp timer IRQ */
+
+#define        TMTICR                  GxICR(TMTIRQ)   /* OS Tick timer   uflow intr ctrl reg */
+#define        TMSICR                  GxICR(TMSIRQ)   /* Timestamp timer uflow intr ctrl reg */
+#endif /* CONFIG_AM34_2 */
+
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_TIMER_REGS_H */
index 8d031f9e117d7db554bf3802cd357d36bbbb110b..bd4e90dfe6c26d37c366abec8275a1d0d1e7b774 100644 (file)
 
 #define TICK_SIZE (tick_nsec / 1000)
 
-#define CLOCK_TICK_RATE 1193180 /* Underlying HZ - this should probably be set
-                                * to something appropriate, but what? */
-
-extern cycles_t cacheflush_time;
+#define CLOCK_TICK_RATE MN10300_JCCLK /* Underlying HZ */
 
 #ifdef __KERNEL__
 
+extern cycles_t cacheflush_time;
+
 static inline cycles_t get_cycles(void)
 {
        return read_timestamp_counter();
 }
 
+extern int init_clockevents(void);
+extern int init_clocksource(void);
+
+static inline void setup_jiffies_interrupt(int irq,
+                                          struct irqaction *action)
+{
+       u16 tmp;
+       setup_irq(irq, action);
+       set_intr_level(irq, NUM2GxICR_LEVEL(CONFIG_TIMER_IRQ_LEVEL));
+       GxICR(irq) |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
+       tmp = GxICR(irq);
+}
+
 #endif /* __KERNEL__ */
 
 #endif /* _ASM_TIMEX_H */
index 1a7e29281c5d0642ca0e66b91ca054f8353989b1..efddd6e1adeadef731997d6b78214e23e962a20c 100644 (file)
 #ifndef _ASM_TLBFLUSH_H
 #define _ASM_TLBFLUSH_H
 
+#include <linux/mm.h>
 #include <asm/processor.h>
 
-#define __flush_tlb()                                          \
-do {                                                           \
-       int w;                                                  \
-       __asm__ __volatile__                                    \
-               ("      mov %1,%0               \n"             \
-                "      or %2,%0                \n"             \
-                "      mov %0,%1               \n"             \
-                : "=d"(w)                                      \
-                : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV)      \
-                : "cc", "memory"                               \
-                );                                             \
-} while (0)
+struct tlb_state {
+       struct mm_struct        *active_mm;
+       int                     state;
+};
+DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
 
-#define __flush_tlb_all() __flush_tlb()
-#define __flush_tlb_one(addr) __flush_tlb()
+/**
+ * local_flush_tlb - Flush the current MM's entries from the local CPU's TLBs
+ */
+static inline void local_flush_tlb(void)
+{
+       int w;
+       asm volatile(
+               "       mov     %1,%0           \n"
+               "       or      %2,%0           \n"
+               "       mov     %0,%1           \n"
+               : "=d"(w)
+               : "m"(MMUCTR), "i"(MMUCTR_IIV|MMUCTR_DIV)
+               : "cc", "memory");
+}
+
+/**
+ * local_flush_tlb_all - Flush all entries from the local CPU's TLBs
+ */
+static inline void local_flush_tlb_all(void)
+{
+       local_flush_tlb();
+}
 
+/**
+ * local_flush_tlb_one - Flush one entry from the local CPU's TLBs
+ */
+static inline void local_flush_tlb_one(unsigned long addr)
+{
+       local_flush_tlb();
+}
+
+/**
+ * local_flush_tlb_page - Flush a page's entry from the local CPU's TLBs
+ * @mm: The MM to flush for
+ * @addr: The address of the target page in RAM (not its page struct)
+ */
+static inline
+void local_flush_tlb_page(struct mm_struct *mm, unsigned long addr)
+{
+       unsigned long pteu, flags, cnx;
+
+       addr &= PAGE_MASK;
+
+       local_irq_save(flags);
+
+       cnx = 1;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+       cnx = mm->context.tlbpid[smp_processor_id()];
+#endif
+       if (cnx) {
+               pteu = addr;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+               pteu |= cnx & xPTEU_PID;
+#endif
+               IPTEU = pteu;
+               DPTEU = pteu;
+               if (IPTEL & xPTEL_V)
+                       IPTEL = 0;
+               if (DPTEL & xPTEL_V)
+                       DPTEL = 0;
+       }
+       local_irq_restore(flags);
+}
 
 /*
  * TLB flushing:
@@ -40,41 +94,61 @@ do {                                                                \
  *  - flush_tlb_range(mm, start, end) flushes a range of pages
  *  - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
  */
-#define flush_tlb_all()                                \
-do {                                           \
-       preempt_disable();                      \
-       __flush_tlb_all();                      \
-       preempt_enable();                       \
-} while (0)
-
-#define flush_tlb_mm(mm)                       \
-do {                                           \
-       preempt_disable();                      \
-       __flush_tlb_all();                      \
-       preempt_enable();                       \
-} while (0)
-
-#define flush_tlb_range(vma, start, end)                       \
-do {                                                           \
-       unsigned long __s __attribute__((unused)) = (start);    \
-       unsigned long __e __attribute__((unused)) = (end);      \
-       preempt_disable();                                      \
-       __flush_tlb_all();                                      \
-       preempt_enable();                                       \
-} while (0)
-
-
-#define __flush_tlb_global()                   flush_tlb_all()
-#define flush_tlb()                            flush_tlb_all()
-#define flush_tlb_kernel_range(start, end)                     \
-do {                                                           \
-       unsigned long __s __attribute__((unused)) = (start);    \
-       unsigned long __e __attribute__((unused)) = (end);      \
-       flush_tlb_all();                                        \
-} while (0)
-
-extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
-
-#define flush_tlb_pgtables(mm, start, end)     do {} while (0)
+#ifdef CONFIG_SMP
+
+#include <asm/smp.h>
+
+extern void flush_tlb_all(void);
+extern void flush_tlb_current_task(void);
+extern void flush_tlb_mm(struct mm_struct *);
+extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
+
+#define flush_tlb()            flush_tlb_current_task()
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+                                  unsigned long start, unsigned long end)
+{
+       flush_tlb_mm(vma->vm_mm);
+}
+
+#else   /* CONFIG_SMP */
+
+static inline void flush_tlb_all(void)
+{
+       preempt_disable();
+       local_flush_tlb_all();
+       preempt_enable();
+}
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+       preempt_disable();
+       local_flush_tlb_all();
+       preempt_enable();
+}
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+                                  unsigned long start, unsigned long end)
+{
+       preempt_disable();
+       local_flush_tlb_all();
+       preempt_enable();
+}
+
+#define flush_tlb_page(vma, addr)      local_flush_tlb_page((vma)->vm_mm, addr)
+#define flush_tlb()                    flush_tlb_all()
+
+#endif /* CONFIG_SMP */
+
+static inline void flush_tlb_kernel_range(unsigned long start,
+                                         unsigned long end)
+{
+       flush_tlb_all();
+}
+
+static inline void flush_tlb_pgtables(struct mm_struct *mm,
+                                     unsigned long start, unsigned long end)
+{
+}
 
 #endif /* _ASM_TLBFLUSH_H */
index 197a7af3dd8aeaa7014df5856877de3f0924ee71..679dee0bbd089dddabdbe230abf3874cebb174aa 100644 (file)
@@ -14,9 +14,8 @@
 /*
  * User space memory access functions
  */
-#include <linux/sched.h>
+#include <linux/thread_info.h>
 #include <asm/page.h>
-#include <asm/pgtable.h>
 #include <asm/errno.h>
 
 #define VERIFY_READ 0
@@ -29,7 +28,6 @@
  *
  * For historical reasons, these macros are grossly misnamed.
  */
-
 #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
 
 #define KERNEL_XDS     MAKE_MM_SEG(0xBFFFFFFF)
@@ -377,7 +375,7 @@ unsigned long __generic_copy_to_user_nocheck(void *to, const void *from,
 
 
 #if 0
-#error don't use - these macros don't increment to & from pointers
+#error "don't use - these macros don't increment to & from pointers"
 /* Optimize just a little bit when we know the size of the move. */
 #define __constant_copy_user(to, from, size)   \
 do {                                           \
index 23f2ab67574c171b72bcecfc1199572c1604ebcd..8f5f1e81baf5c1527a2a82d1380f88918dd38d07 100644 (file)
@@ -3,13 +3,16 @@
 #
 extra-y := head.o init_task.o vmlinux.lds
 
-obj-y   := process.o signal.o entry.o fpu.o traps.o irq.o \
+fpu-obj-y := fpu-nofpu.o fpu-nofpu-low.o
+fpu-obj-$(CONFIG_FPU) := fpu.o fpu-low.o
+
+obj-y   := process.o signal.o entry.o traps.o irq.o \
           ptrace.o setup.o time.o sys_mn10300.o io.o kthread.o \
-          switch_to.o mn10300_ksyms.o kernel_execve.o
+          switch_to.o mn10300_ksyms.o kernel_execve.o $(fpu-obj-y)
 
-obj-$(CONFIG_MN10300_WD_TIMER) += mn10300-watchdog.o mn10300-watchdog-low.o
+obj-$(CONFIG_SMP) += smp.o smp-low.o
 
-obj-$(CONFIG_FPU) += fpu-low.o
+obj-$(CONFIG_MN10300_WD_TIMER) += mn10300-watchdog.o mn10300-watchdog-low.o
 
 obj-$(CONFIG_MN10300_TTYSM) += mn10300-serial.o mn10300-serial-low.o \
                               mn10300-debug.o
@@ -17,7 +20,7 @@ obj-$(CONFIG_GDBSTUB) += gdb-stub.o gdb-low.o
 obj-$(CONFIG_GDBSTUB_ON_TTYSx) += gdb-io-serial.o gdb-io-serial-low.o
 obj-$(CONFIG_GDBSTUB_ON_TTYSMx) += gdb-io-ttysm.o gdb-io-ttysm-low.o
 
-ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y)
+ifeq ($(CONFIG_MN10300_CACHE_ENABLED),y)
 obj-$(CONFIG_GDBSTUB) += gdb-cache.o
 endif
 
@@ -25,3 +28,5 @@ obj-$(CONFIG_MN10300_RTC) += rtc.o
 obj-$(CONFIG_PROFILE) += profile.o profile-low.o
 obj-$(CONFIG_MODULES) += module.o
 obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_CSRC_MN10300) += csrc-mn10300.o
+obj-$(CONFIG_CEVT_MN10300) += cevt-mn10300.o
index 02dc7e461fef68e8c505cd4e8eea2ae12bbe2b01..96f24fab7de6ee0e559369e195a2fa7b5120a8f2 100644 (file)
@@ -23,6 +23,7 @@ void foo(void)
 
        OFFSET(TI_task,                 thread_info, task);
        OFFSET(TI_exec_domain,          thread_info, exec_domain);
+       OFFSET(TI_frame,                thread_info, frame);
        OFFSET(TI_flags,                thread_info, flags);
        OFFSET(TI_cpu,                  thread_info, cpu);
        OFFSET(TI_preempt_count,        thread_info, preempt_count);
@@ -66,7 +67,15 @@ void foo(void)
        OFFSET(THREAD_SP,               thread_struct, sp);
        OFFSET(THREAD_A3,               thread_struct, a3);
        OFFSET(THREAD_USP,              thread_struct, usp);
-       OFFSET(THREAD_FRAME,            thread_struct, __frame);
+#ifdef CONFIG_FPU
+       OFFSET(THREAD_FPU_FLAGS,        thread_struct, fpu_flags);
+       OFFSET(THREAD_FPU_STATE,        thread_struct, fpu_state);
+       DEFINE(__THREAD_USING_FPU,      THREAD_USING_FPU);
+       DEFINE(__THREAD_HAS_FPU,        THREAD_HAS_FPU);
+#endif /* CONFIG_FPU */
+       BLANK();
+
+       OFFSET(TASK_THREAD,             task_struct, thread);
        BLANK();
 
        DEFINE(CLONE_VM_asm,            CLONE_VM);
diff --git a/arch/mn10300/kernel/cevt-mn10300.c b/arch/mn10300/kernel/cevt-mn10300.c
new file mode 100644 (file)
index 0000000..d4cb535
--- /dev/null
@@ -0,0 +1,131 @@
+/* MN10300 clockevents
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by Mark Salter (msalter@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <asm/timex.h>
+#include "internal.h"
+
+#ifdef CONFIG_SMP
+#if (CONFIG_NR_CPUS > 2) && !defined(CONFIG_GEENERIC_CLOCKEVENTS_BROADCAST)
+#error "This doesn't scale well! Need per-core local timers."
+#endif
+#else /* CONFIG_SMP */
+#define stop_jiffies_counter1()
+#define reload_jiffies_counter1(x)
+#define TMJC1IRQ TMJCIRQ
+#endif
+
+
+static int next_event(unsigned long delta,
+                     struct clock_event_device *evt)
+{
+       unsigned int cpu = smp_processor_id();
+
+       if (cpu == 0) {
+               stop_jiffies_counter();
+               reload_jiffies_counter(delta - 1);
+       } else {
+               stop_jiffies_counter1();
+               reload_jiffies_counter1(delta - 1);
+       }
+       return 0;
+}
+
+static void set_clock_mode(enum clock_event_mode mode,
+                          struct clock_event_device *evt)
+{
+       /* Nothing to do ...  */
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, mn10300_clockevent_device);
+static DEFINE_PER_CPU(struct irqaction, timer_irq);
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+       struct clock_event_device *cd;
+       unsigned int cpu = smp_processor_id();
+
+       if (cpu == 0)
+               stop_jiffies_counter();
+       else
+               stop_jiffies_counter1();
+
+       cd = &per_cpu(mn10300_clockevent_device, cpu);
+       cd->event_handler(cd);
+
+       return IRQ_HANDLED;
+}
+
+static void event_handler(struct clock_event_device *dev)
+{
+}
+
+int __init init_clockevents(void)
+{
+       struct clock_event_device *cd;
+       struct irqaction *iact;
+       unsigned int cpu = smp_processor_id();
+
+       cd = &per_cpu(mn10300_clockevent_device, cpu);
+
+       if (cpu == 0) {
+               stop_jiffies_counter();
+               cd->irq = TMJCIRQ;
+       } else {
+               stop_jiffies_counter1();
+               cd->irq = TMJC1IRQ;
+       }
+
+       cd->name                = "Timestamp";
+       cd->features            = CLOCK_EVT_FEAT_ONESHOT;
+
+       /* Calculate the min / max delta */
+       clockevent_set_clock(cd, MN10300_JCCLK);
+
+       cd->max_delta_ns        = clockevent_delta2ns(TMJCBR_MAX, cd);
+       cd->min_delta_ns        = clockevent_delta2ns(100, cd);
+
+       cd->rating              = 200;
+       cd->cpumask             = cpumask_of(smp_processor_id());
+       cd->set_mode            = set_clock_mode;
+       cd->event_handler       = event_handler;
+       cd->set_next_event      = next_event;
+
+       iact = &per_cpu(timer_irq, cpu);
+       iact->flags = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER;
+       iact->handler = timer_interrupt;
+
+       clockevents_register_device(cd);
+
+#if defined(CONFIG_SMP) && !defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+       /* setup timer irq affinity so it only runs on this cpu */
+       {
+               struct irq_desc *desc;
+               desc = irq_to_desc(cd->irq);
+               cpumask_copy(desc->affinity, cpumask_of(cpu));
+               iact->flags |= IRQF_NOBALANCING;
+       }
+#endif
+
+       if (cpu == 0) {
+               reload_jiffies_counter(MN10300_JC_PER_HZ - 1);
+               iact->name = "CPU0 Timer";
+       } else {
+               reload_jiffies_counter1(MN10300_JC_PER_HZ - 1);
+               iact->name = "CPU1 Timer";
+       }
+
+       setup_jiffies_interrupt(cd->irq, iact);
+
+       return 0;
+}
diff --git a/arch/mn10300/kernel/csrc-mn10300.c b/arch/mn10300/kernel/csrc-mn10300.c
new file mode 100644 (file)
index 0000000..ba2f0c4
--- /dev/null
@@ -0,0 +1,35 @@
+/* MN10300 clocksource
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by Mark Salter (msalter@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/clocksource.h>
+#include <linux/init.h>
+#include <asm/timex.h>
+#include "internal.h"
+
+static cycle_t mn10300_read(struct clocksource *cs)
+{
+       return read_timestamp_counter();
+}
+
+static struct clocksource clocksource_mn10300 = {
+       .name   = "TSC",
+       .rating = 200,
+       .read   = mn10300_read,
+       .mask   = CLOCKSOURCE_MASK(32),
+       .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+int __init init_clocksource(void)
+{
+       startup_timestamp_counter();
+       clocksource_set_clock(&clocksource_mn10300, MN10300_TSCCLK);
+       clocksource_register(&clocksource_mn10300);
+       return 0;
+}
index 3d394b4eefba1e5a90f14c22997b1ccbbcb3fc5e..f00b9bafcd3ebb7aa8d53e0e8db61aedf15652e2 100644 (file)
 #include <asm/asm-offsets.h>
 #include <asm/frame.inc>
 
+#if defined(CONFIG_SMP) && defined(CONFIG_GDBSTUB)
+#include <asm/gdb-stub.h>
+#endif /* CONFIG_SMP && CONFIG_GDBSTUB */
+
 #ifdef CONFIG_PREEMPT
-#define preempt_stop           __cli
+#define preempt_stop           LOCAL_IRQ_DISABLE
 #else
 #define preempt_stop
 #define resume_kernel          restore_all
 #endif
 
-       .macro __cli
-       and     ~EPSW_IM,epsw
-       or      EPSW_IE|MN10300_CLI_LEVEL,epsw
-       nop
-       nop
-       nop
-       .endm
-       .macro __sti
-       or      EPSW_IE|EPSW_IM_7,epsw
-       .endm
-
-
        .am33_2
 
 ###############################################################################
@@ -88,7 +80,7 @@ syscall_call:
 syscall_exit:
        # make sure we don't miss an interrupt setting need_resched or
        # sigpending between sampling and the rti
-       __cli
+       LOCAL_IRQ_DISABLE
        mov     (TI_flags,a2),d2
        btst    _TIF_ALLWORK_MASK,d2
        bne     syscall_exit_work
@@ -105,7 +97,7 @@ restore_all:
 syscall_exit_work:
        btst    _TIF_SYSCALL_TRACE,d2
        beq     work_pending
-       __sti                           # could let syscall_trace_exit() call
+       LOCAL_IRQ_ENABLE                # could let syscall_trace_exit() call
                                        # schedule() instead
        mov     fp,d0
        call    syscall_trace_exit[],0  # do_syscall_trace(regs)
@@ -121,7 +113,7 @@ work_resched:
 
        # make sure we don't miss an interrupt setting need_resched or
        # sigpending between sampling and the rti
-       __cli
+       LOCAL_IRQ_DISABLE
 
        # is there any work to be done other than syscall tracing?
        mov     (TI_flags,a2),d2
@@ -168,7 +160,7 @@ ret_from_intr:
 ENTRY(resume_userspace)
        # make sure we don't miss an interrupt setting need_resched or
        # sigpending between sampling and the rti
-       __cli
+       LOCAL_IRQ_DISABLE
 
        # is there any work to be done on int/exception return?
        mov     (TI_flags,a2),d2
@@ -178,7 +170,7 @@ ENTRY(resume_userspace)
 
 #ifdef CONFIG_PREEMPT
 ENTRY(resume_kernel)
-       __cli
+       LOCAL_IRQ_DISABLE
        mov     (TI_preempt_count,a2),d0        # non-zero preempt_count ?
        cmp     0,d0
        bne     restore_all
@@ -214,31 +206,6 @@ ENTRY(irq_handler)
 
        jmp     ret_from_intr
 
-###############################################################################
-#
-# Monitor Signal handler entry point
-#
-###############################################################################
-ENTRY(monitor_signal)
-       movbu   (0xae000001),d1
-       cmp     1,d1
-       beq     monsignal
-       ret     [],0
-
-monsignal:
-       or      EPSW_NMID,epsw
-       mov     d0,a0
-       mov     a0,sp
-       mov     (REG_EPSW,fp),d1
-       and     ~EPSW_nSL,d1
-       mov     d1,(REG_EPSW,fp)
-       movm    (sp),[d2,d3,a2,a3,exreg0,exreg1,exother]
-       mov     (sp),a1
-       mov     a1,usp
-       movm    (sp),[other]
-       add     4,sp
-here:  jmp     0x8e000008-here+0x8e000008
-
 ###############################################################################
 #
 # Double Fault handler entry point
@@ -276,6 +243,10 @@ double_fault_loop:
 ENTRY(raw_bus_error)
        add     -4,sp
        mov     d0,(sp)
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+       mov     (MMUCTR),d0
+       mov     d0,(MMUCTR)
+#endif
        mov     (BCBERR),d0             # what
        btst    BCBERR_BEMR_DMA,d0      # see if it was an external bus error
        beq     __common_exception_aux  # it wasn't
@@ -302,11 +273,88 @@ ENTRY(nmi_handler)
        add     -4,sp
        mov     d0,(sp)
        mov     (TBR),d0
+
+#ifdef CONFIG_SMP
+       add     -4,sp
+       mov     d0,(sp)                 # save d0(TBR)
+       movhu   (NMIAGR),d0
+       and     NMIAGR_GN,d0
+       lsr     0x2,d0
+       cmp     CALL_FUNCTION_NMI_IPI,d0
+       bne     5f                      # if not call function, jump
+
+       # function call nmi ipi
+       add     4,sp                    # no need to store TBR
+       mov     GxICR_DETECT,d0         # clear NMI request
+       movbu   d0,(GxICR(CALL_FUNCTION_NMI_IPI))
+       movhu   (GxICR(CALL_FUNCTION_NMI_IPI)),d0
+       and     ~EPSW_NMID,epsw         # enable NMI
+
+       mov     (sp),d0                 # restore d0
+       SAVE_ALL
+       call    smp_nmi_call_function_interrupt[],0
+       RESTORE_ALL
+
+5:
+#ifdef CONFIG_GDBSTUB
+       cmp     GDB_NMI_IPI,d0
+       bne     3f                      # if not gdb nmi ipi, jump
+
+       # gdb nmi ipi
+       add     4,sp                    # no need to store TBR
+       mov     GxICR_DETECT,d0         # clear NMI
+       movbu   d0,(GxICR(GDB_NMI_IPI))
+       movhu   (GxICR(GDB_NMI_IPI)),d0
+       and     ~EPSW_NMID,epsw         # enable NMI
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+       mov     (gdbstub_nmi_opr_type),d0
+       cmp     GDBSTUB_NMI_CACHE_PURGE,d0
+       bne     4f                      # if not gdb cache purge, jump
+
+       # gdb cache purge nmi ipi
+       add     -20,sp
+       mov     d1,(4,sp)
+       mov     a0,(8,sp)
+       mov     a1,(12,sp)
+       mov     mdr,d0
+       mov     d0,(16,sp)
+       call    gdbstub_local_purge_cache[],0
+       mov     0x1,d0
+       mov     (CPUID),d1
+       asl     d1,d0
+       mov     gdbstub_nmi_cpumask,a0
+       bclr    d0,(a0)
+       mov     (4,sp),d1
+       mov     (8,sp),a0
+       mov     (12,sp),a1
+       mov     (16,sp),d0
+       mov     d0,mdr
+       add     20,sp
+       mov     (sp),d0
+       add     4,sp
+       rti
+4:
+#endif /* CONFIG_MN10300_CACHE_ENABLED */
+       # gdb wait nmi ipi
+       mov     (sp),d0
+       SAVE_ALL
+       call    gdbstub_nmi_wait[],0
+       RESTORE_ALL
+3:
+#endif /* CONFIG_GDBSTUB */
+       mov     (sp),d0                 # restore TBR to d0
+       add     4,sp
+#endif /* CONFIG_SMP */
+
        bra     __common_exception_nonmi
 
 ENTRY(__common_exception)
        add     -4,sp
        mov     d0,(sp)
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+       mov     (MMUCTR),d0
+       mov     d0,(MMUCTR)
+#endif
 
 __common_exception_aux:
        mov     (TBR),d0
@@ -331,15 +379,21 @@ __common_exception_nonmi:
        mov     d0,(REG_ORIG_D0,fp)
 
 #ifdef CONFIG_GDBSTUB
+#ifdef CONFIG_SMP
+       call    gdbstub_busy_check[],0
+       and     d0,d0                   # check return value
+       beq     2f
+#else  /* CONFIG_SMP */
        btst    0x01,(gdbstub_busy)
        beq     2f
+#endif /* CONFIG_SMP */
        and     ~EPSW_IE,epsw
        mov     fp,d0
        mov     a2,d1
        call    gdbstub_exception[],0   # gdbstub itself caused an exception
        bra     restore_all
 2:
-#endif
+#endif /* CONFIG_GDBSTUB */
 
        mov     fp,d0                   # arg 0: stacked register file
        mov     a2,d1                   # arg 1: exception number
@@ -374,11 +428,7 @@ ENTRY(set_excp_vector)
        add     exception_table,d0
        mov     d1,(d0)
        mov     4,d1
-#if defined(CONFIG_MN10300_CACHE_WBACK)
-       jmp     mn10300_dcache_flush_inv_range2
-#else
        ret     [],0
-#endif
 
 ###############################################################################
 #
index 96cfd47e68d50354a4d279845b4f7f7cea40703a..78df25cfae2936f8071ab5d7f312dc363d58c2d6 100644 (file)
@@ -8,25 +8,14 @@
  * as published by the Free Software Foundation; either version
  * 2 of the Licence, or (at your option) any later version.
  */
+#include <linux/linkage.h>
 #include <asm/cpu-regs.h>
+#include <asm/smp.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/frame.inc>
 
-###############################################################################
-#
-# void fpu_init_state(void)
-# - initialise the FPU
-#
-###############################################################################
-       .globl  fpu_init_state
-       .type   fpu_init_state,@function
-fpu_init_state:
-       mov     epsw,d0
-       or      EPSW_FE,epsw
-
-#ifdef CONFIG_MN10300_PROC_MN103E010
-       nop
-       nop
-       nop
-#endif
+.macro FPU_INIT_STATE_ALL
        fmov    0,fs0
        fmov    fs0,fs1
        fmov    fs0,fs2
@@ -60,7 +49,100 @@ fpu_init_state:
        fmov    fs0,fs30
        fmov    fs0,fs31
        fmov    FPCR_INIT,fpcr
+.endm
+
+.macro FPU_SAVE_ALL areg,dreg
+       fmov    fs0,(\areg+)
+       fmov    fs1,(\areg+)
+       fmov    fs2,(\areg+)
+       fmov    fs3,(\areg+)
+       fmov    fs4,(\areg+)
+       fmov    fs5,(\areg+)
+       fmov    fs6,(\areg+)
+       fmov    fs7,(\areg+)
+       fmov    fs8,(\areg+)
+       fmov    fs9,(\areg+)
+       fmov    fs10,(\areg+)
+       fmov    fs11,(\areg+)
+       fmov    fs12,(\areg+)
+       fmov    fs13,(\areg+)
+       fmov    fs14,(\areg+)
+       fmov    fs15,(\areg+)
+       fmov    fs16,(\areg+)
+       fmov    fs17,(\areg+)
+       fmov    fs18,(\areg+)
+       fmov    fs19,(\areg+)
+       fmov    fs20,(\areg+)
+       fmov    fs21,(\areg+)
+       fmov    fs22,(\areg+)
+       fmov    fs23,(\areg+)
+       fmov    fs24,(\areg+)
+       fmov    fs25,(\areg+)
+       fmov    fs26,(\areg+)
+       fmov    fs27,(\areg+)
+       fmov    fs28,(\areg+)
+       fmov    fs29,(\areg+)
+       fmov    fs30,(\areg+)
+       fmov    fs31,(\areg+)
+       fmov    fpcr,\dreg
+       mov     \dreg,(\areg)
+.endm
+
+.macro FPU_RESTORE_ALL areg,dreg
+       fmov    (\areg+),fs0
+       fmov    (\areg+),fs1
+       fmov    (\areg+),fs2
+       fmov    (\areg+),fs3
+       fmov    (\areg+),fs4
+       fmov    (\areg+),fs5
+       fmov    (\areg+),fs6
+       fmov    (\areg+),fs7
+       fmov    (\areg+),fs8
+       fmov    (\areg+),fs9
+       fmov    (\areg+),fs10
+       fmov    (\areg+),fs11
+       fmov    (\areg+),fs12
+       fmov    (\areg+),fs13
+       fmov    (\areg+),fs14
+       fmov    (\areg+),fs15
+       fmov    (\areg+),fs16
+       fmov    (\areg+),fs17
+       fmov    (\areg+),fs18
+       fmov    (\areg+),fs19
+       fmov    (\areg+),fs20
+       fmov    (\areg+),fs21
+       fmov    (\areg+),fs22
+       fmov    (\areg+),fs23
+       fmov    (\areg+),fs24
+       fmov    (\areg+),fs25
+       fmov    (\areg+),fs26
+       fmov    (\areg+),fs27
+       fmov    (\areg+),fs28
+       fmov    (\areg+),fs29
+       fmov    (\areg+),fs30
+       fmov    (\areg+),fs31
+       mov     (\areg),\dreg
+       fmov    \dreg,fpcr
+.endm
 
+###############################################################################
+#
+# void fpu_init_state(void)
+# - initialise the FPU
+#
+###############################################################################
+       .globl  fpu_init_state
+       .type   fpu_init_state,@function
+fpu_init_state:
+       mov     epsw,d0
+       or      EPSW_FE,epsw
+
+#ifdef CONFIG_MN10300_PROC_MN103E010
+       nop
+       nop
+       nop
+#endif
+       FPU_INIT_STATE_ALL
 #ifdef CONFIG_MN10300_PROC_MN103E010
        nop
        nop
@@ -89,40 +171,7 @@ fpu_save:
        nop
 #endif
        mov     d0,a0
-       fmov    fs0,(a0+)
-       fmov    fs1,(a0+)
-       fmov    fs2,(a0+)
-       fmov    fs3,(a0+)
-       fmov    fs4,(a0+)
-       fmov    fs5,(a0+)
-       fmov    fs6,(a0+)
-       fmov    fs7,(a0+)
-       fmov    fs8,(a0+)
-       fmov    fs9,(a0+)
-       fmov    fs10,(a0+)
-       fmov    fs11,(a0+)
-       fmov    fs12,(a0+)
-       fmov    fs13,(a0+)
-       fmov    fs14,(a0+)
-       fmov    fs15,(a0+)
-       fmov    fs16,(a0+)
-       fmov    fs17,(a0+)
-       fmov    fs18,(a0+)
-       fmov    fs19,(a0+)
-       fmov    fs20,(a0+)
-       fmov    fs21,(a0+)
-       fmov    fs22,(a0+)
-       fmov    fs23,(a0+)
-       fmov    fs24,(a0+)
-       fmov    fs25,(a0+)
-       fmov    fs26,(a0+)
-       fmov    fs27,(a0+)
-       fmov    fs28,(a0+)
-       fmov    fs29,(a0+)
-       fmov    fs30,(a0+)
-       fmov    fs31,(a0+)
-       fmov    fpcr,d0
-       mov     d0,(a0)
+       FPU_SAVE_ALL    a0,d0
 #ifdef CONFIG_MN10300_PROC_MN103E010
        nop
        nop
@@ -135,63 +184,75 @@ fpu_save:
 
 ###############################################################################
 #
-# void fpu_restore(struct fpu_state_struct *)
-# - restore the fpu state
-# - note that an FPU Operational exception might occur during this process
+# void fpu_disabled(void)
+# - handle an exception due to the FPU being disabled
+#   when CONFIG_FPU is enabled
 #
 ###############################################################################
-       .globl  fpu_restore
-       .type   fpu_restore,@function
-fpu_restore:
-       mov     epsw,d1
-       or      EPSW_FE,epsw            /* enable the FPU so we can access it */
-
-#ifdef CONFIG_MN10300_PROC_MN103E010
+       .type   fpu_disabled,@function
+       .globl  fpu_disabled
+fpu_disabled:
+       or      EPSW_nAR|EPSW_FE,epsw
        nop
        nop
-#endif
-       mov     d0,a0
-       fmov    (a0+),fs0
-       fmov    (a0+),fs1
-       fmov    (a0+),fs2
-       fmov    (a0+),fs3
-       fmov    (a0+),fs4
-       fmov    (a0+),fs5
-       fmov    (a0+),fs6
-       fmov    (a0+),fs7
-       fmov    (a0+),fs8
-       fmov    (a0+),fs9
-       fmov    (a0+),fs10
-       fmov    (a0+),fs11
-       fmov    (a0+),fs12
-       fmov    (a0+),fs13
-       fmov    (a0+),fs14
-       fmov    (a0+),fs15
-       fmov    (a0+),fs16
-       fmov    (a0+),fs17
-       fmov    (a0+),fs18
-       fmov    (a0+),fs19
-       fmov    (a0+),fs20
-       fmov    (a0+),fs21
-       fmov    (a0+),fs22
-       fmov    (a0+),fs23
-       fmov    (a0+),fs24
-       fmov    (a0+),fs25
-       fmov    (a0+),fs26
-       fmov    (a0+),fs27
-       fmov    (a0+),fs28
-       fmov    (a0+),fs29
-       fmov    (a0+),fs30
-       fmov    (a0+),fs31
-       mov     (a0),d0
-       fmov    d0,fpcr
-#ifdef CONFIG_MN10300_PROC_MN103E010
        nop
+
+       mov     sp,a1
+       mov     (a1),d1                 /* get epsw of user context */
+       and     ~(THREAD_SIZE-1),a1     /* a1: (thread_info *ti) */
+       mov     (TI_task,a1),a2         /* a2: (task_struct *tsk) */
+       btst    EPSW_nSL,d1
+       beq     fpu_used_in_kernel
+
+       or      EPSW_FE,d1
+       mov     d1,(sp)
+       mov     (TASK_THREAD+THREAD_FPU_FLAGS,a2),d1
+#ifndef CONFIG_LAZY_SAVE_FPU
+       or      __THREAD_HAS_FPU,d1
+       mov     d1,(TASK_THREAD+THREAD_FPU_FLAGS,a2)
+#else  /* !CONFIG_LAZY_SAVE_FPU */
+       mov     (fpu_state_owner),a0
+       cmp     0,a0
+       beq     fpu_regs_save_end
+
+       mov     (TASK_THREAD+THREAD_UREGS,a0),a1
+       add     TASK_THREAD+THREAD_FPU_STATE,a0
+       FPU_SAVE_ALL a0,d0
+
+       mov     (REG_EPSW,a1),d0
+       and     ~EPSW_FE,d0
+       mov     d0,(REG_EPSW,a1)
+
+fpu_regs_save_end:
+       mov     a2,(fpu_state_owner)
+#endif /* !CONFIG_LAZY_SAVE_FPU */
+
+       btst    __THREAD_USING_FPU,d1
+       beq     fpu_regs_init
+       add     TASK_THREAD+THREAD_FPU_STATE,a2
+       FPU_RESTORE_ALL a2,d0
+       rti
+
+fpu_regs_init:
+       FPU_INIT_STATE_ALL
+       add     TASK_THREAD+THREAD_FPU_FLAGS,a2
+       bset    __THREAD_USING_FPU,(0,a2)
+       rti
+
+fpu_used_in_kernel:
+       and     ~(EPSW_nAR|EPSW_FE),epsw
        nop
        nop
-#endif
 
-       mov     d1,epsw
-       ret     [],0
+       add     -4,sp
+       SAVE_ALL
+       mov     -1,d0
+       mov     d0,(REG_ORIG_D0,fp)
+
+       and     ~EPSW_NMID,epsw
+
+       mov     fp,d0
+       call    fpu_disabled_in_kernel[],0
+       jmp     ret_from_exception
 
-       .size   fpu_restore,.-fpu_restore
+       .size   fpu_disabled,.-fpu_disabled
diff --git a/arch/mn10300/kernel/fpu-nofpu-low.S b/arch/mn10300/kernel/fpu-nofpu-low.S
new file mode 100644 (file)
index 0000000..7ea087a
--- /dev/null
@@ -0,0 +1,39 @@
+/* MN10300 Low level FPU management operations
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/linkage.h>
+#include <asm/cpu-regs.h>
+#include <asm/smp.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/frame.inc>
+
+###############################################################################
+#
+# void fpu_disabled(void)
+# - handle an exception due to the FPU being disabled
+#   when CONFIG_FPU is disabled
+#
+###############################################################################
+       .type   fpu_disabled,@function
+       .globl  fpu_disabled
+fpu_disabled:
+       add     -4,sp
+       SAVE_ALL
+       mov     -1,d0
+       mov     d0,(REG_ORIG_D0,fp)
+
+       and     ~EPSW_NMID,epsw
+
+       mov     fp,d0
+       call    unexpected_fpu_exception[],0
+       jmp     ret_from_exception
+
+       .size   fpu_disabled,.-fpu_disabled
diff --git a/arch/mn10300/kernel/fpu-nofpu.c b/arch/mn10300/kernel/fpu-nofpu.c
new file mode 100644 (file)
index 0000000..31c765b
--- /dev/null
@@ -0,0 +1,30 @@
+/* MN10300 FPU management
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <asm/fpu.h>
+
+/*
+ * handle an FPU operational exception
+ * - there's a possibility that if the FPU is asynchronous, the signal might
+ *   be meant for a process other than the current one
+ */
+asmlinkage
+void unexpected_fpu_exception(struct pt_regs *regs, enum exception_code code)
+{
+       panic("An FPU exception was received, but there's no FPU enabled.");
+}
+
+/*
+ * fill in the FPU structure for a core dump
+ */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpreg)
+{
+       return 0; /* not valid */
+}
index e705f25ad5ff7e5e44b8da939d989b778ca77f6e..5f9c3fa19a85fb4459987789229387352cf1a809 100644 (file)
 #include <asm/fpu.h>
 #include <asm/elf.h>
 #include <asm/exceptions.h>
+#include <asm/system.h>
 
+#ifdef CONFIG_LAZY_SAVE_FPU
 struct task_struct *fpu_state_owner;
+#endif
 
 /*
- * handle an exception due to the FPU being disabled
+ * error functions in FPU disabled exception
  */
-asmlinkage void fpu_disabled(struct pt_regs *regs, enum exception_code code)
+asmlinkage void fpu_disabled_in_kernel(struct pt_regs *regs)
 {
-       struct task_struct *tsk = current;
-
-       if (!user_mode(regs))
-               die_if_no_fixup("An FPU Disabled exception happened in"
-                               " kernel space\n",
-                               regs, code);
-
-#ifdef CONFIG_FPU
-       preempt_disable();
-
-       /* transfer the last process's FPU state to memory */
-       if (fpu_state_owner) {
-               fpu_save(&fpu_state_owner->thread.fpu_state);
-               fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
-       }
-
-       /* the current process now owns the FPU state */
-       fpu_state_owner = tsk;
-       regs->epsw |= EPSW_FE;
-
-       /* load the FPU with the current process's FPU state or invent a new
-        * clean one if the process doesn't have one */
-       if (is_using_fpu(tsk)) {
-               fpu_restore(&tsk->thread.fpu_state);
-       } else {
-               fpu_init_state();
-               set_using_fpu(tsk);
-       }
-
-       preempt_enable();
-#else
-       {
-               siginfo_t info;
-
-               info.si_signo = SIGFPE;
-               info.si_errno = 0;
-               info.si_addr = (void *) tsk->thread.uregs->pc;
-               info.si_code = FPE_FLTINV;
-
-               force_sig_info(SIGFPE, &info, tsk);
-       }
-#endif  /* CONFIG_FPU */
+       die_if_no_fixup("An FPU Disabled exception happened in kernel space\n",
+                       regs, EXCEP_FPU_DISABLED);
 }
 
 /*
@@ -71,15 +34,16 @@ asmlinkage void fpu_disabled(struct pt_regs *regs, enum exception_code code)
  */
 asmlinkage void fpu_exception(struct pt_regs *regs, enum exception_code code)
 {
-       struct task_struct *tsk = fpu_state_owner;
+       struct task_struct *tsk = current;
        siginfo_t info;
+       u32 fpcr;
 
        if (!user_mode(regs))
                die_if_no_fixup("An FPU Operation exception happened in"
                                " kernel space\n",
                                regs, code);
 
-       if (!tsk)
+       if (!is_using_fpu(tsk))
                die_if_no_fixup("An FPU Operation exception happened,"
                                " but the FPU is not in use",
                                regs, code);
@@ -89,48 +53,45 @@ asmlinkage void fpu_exception(struct pt_regs *regs, enum exception_code code)
        info.si_addr = (void *) tsk->thread.uregs->pc;
        info.si_code = FPE_FLTINV;
 
-#ifdef CONFIG_FPU
-       {
-               u32 fpcr;
+       unlazy_fpu(tsk);
 
-               /* get FPCR (we need to enable the FPU whilst we do this) */
-               asm volatile("  or      %1,epsw         \n"
-#ifdef CONFIG_MN10300_PROC_MN103E010
-                            "  nop                     \n"
-                            "  nop                     \n"
-                            "  nop                     \n"
-#endif
-                            "  fmov    fpcr,%0         \n"
-#ifdef CONFIG_MN10300_PROC_MN103E010
-                            "  nop                     \n"
-                            "  nop                     \n"
-                            "  nop                     \n"
-#endif
-                            "  and     %2,epsw         \n"
-                            : "=&d"(fpcr)
-                            : "i"(EPSW_FE), "i"(~EPSW_FE)
-                            );
-
-               if (fpcr & FPCR_EC_Z)
-                       info.si_code = FPE_FLTDIV;
-               else if (fpcr & FPCR_EC_O)
-                       info.si_code = FPE_FLTOVF;
-               else if (fpcr & FPCR_EC_U)
-                       info.si_code = FPE_FLTUND;
-               else if (fpcr & FPCR_EC_I)
-                       info.si_code = FPE_FLTRES;
-       }
-#endif
+       fpcr = tsk->thread.fpu_state.fpcr;
+
+       if (fpcr & FPCR_EC_Z)
+               info.si_code = FPE_FLTDIV;
+       else if (fpcr & FPCR_EC_O)
+               info.si_code = FPE_FLTOVF;
+       else if (fpcr & FPCR_EC_U)
+               info.si_code = FPE_FLTUND;
+       else if (fpcr & FPCR_EC_I)
+               info.si_code = FPE_FLTRES;
 
        force_sig_info(SIGFPE, &info, tsk);
 }
 
+/*
+ * handle an FPU invalid_op exception
+ * - Derived from DO_EINFO() macro in arch/mn10300/kernel/traps.c
+ */
+asmlinkage void fpu_invalid_op(struct pt_regs *regs, enum exception_code code)
+{
+       siginfo_t info;
+
+       if (!user_mode(regs))
+               die_if_no_fixup("FPU invalid opcode", regs, code);
+
+       info.si_signo = SIGILL;
+       info.si_errno = 0;
+       info.si_code = ILL_COPROC;
+       info.si_addr = (void *) regs->pc;
+       force_sig_info(info.si_signo, &info, current);
+}
+
 /*
  * save the FPU state to a signal context
  */
 int fpu_setup_sigcontext(struct fpucontext *fpucontext)
 {
-#ifdef CONFIG_FPU
        struct task_struct *tsk = current;
 
        if (!is_using_fpu(tsk))
@@ -142,11 +103,19 @@ int fpu_setup_sigcontext(struct fpucontext *fpucontext)
         */
        preempt_disable();
 
+#ifndef CONFIG_LAZY_SAVE_FPU
+       if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+               fpu_save(&tsk->thread.fpu_state);
+               tsk->thread.uregs->epsw &= ~EPSW_FE;
+               tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+       }
+#else /* !CONFIG_LAZY_SAVE_FPU */
        if (fpu_state_owner == tsk) {
                fpu_save(&tsk->thread.fpu_state);
                fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
                fpu_state_owner = NULL;
        }
+#endif /* !CONFIG_LAZY_SAVE_FPU */
 
        preempt_enable();
 
@@ -161,9 +130,6 @@ int fpu_setup_sigcontext(struct fpucontext *fpucontext)
                return -1;
 
        return 1;
-#else
-       return 0;
-#endif
 }
 
 /*
@@ -171,17 +137,23 @@ int fpu_setup_sigcontext(struct fpucontext *fpucontext)
  */
 void fpu_kill_state(struct task_struct *tsk)
 {
-#ifdef CONFIG_FPU
        /* disown anything left in the FPU */
        preempt_disable();
 
+#ifndef CONFIG_LAZY_SAVE_FPU
+       if (tsk->thread.fpu_flags & THREAD_HAS_FPU) {
+               tsk->thread.uregs->epsw &= ~EPSW_FE;
+               tsk->thread.fpu_flags &= ~THREAD_HAS_FPU;
+       }
+#else /* !CONFIG_LAZY_SAVE_FPU */
        if (fpu_state_owner == tsk) {
                fpu_state_owner->thread.uregs->epsw &= ~EPSW_FE;
                fpu_state_owner = NULL;
        }
+#endif /* !CONFIG_LAZY_SAVE_FPU */
 
        preempt_enable();
-#endif
+
        /* we no longer have a valid current FPU state */
        clear_using_fpu(tsk);
 }
@@ -195,8 +167,7 @@ int fpu_restore_sigcontext(struct fpucontext *fpucontext)
        int ret;
 
        /* load up the old FPU state */
-       ret = copy_from_user(&tsk->thread.fpu_state,
-                            fpucontext,
+       ret = copy_from_user(&tsk->thread.fpu_state, fpucontext,
                             min(sizeof(struct fpu_state_struct),
                                 sizeof(struct fpucontext)));
        if (!ret)
index 4998b24f5d3a4e2081e3983b3b0e43cdf86bc670..b1d0152e96cb95741b0e31fd2ebf0782bbb0283e 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/thread_info.h>
 #include <asm/frame.inc>
 #include <asm/intctl-regs.h>
+#include <asm/irqflags.h>
 #include <unit/serial.h>
 
        .text
@@ -69,7 +70,7 @@ gdbstub_io_rx_overflow:
        bra     gdbstub_io_rx_done
 
 gdbstub_io_rx_enter:
-       or      EPSW_IE|EPSW_IM_1,epsw
+       LOCAL_CHANGE_INTR_MASK_LEVEL(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL+1))
        add     -4,sp
        SAVE_ALL
 
@@ -80,7 +81,7 @@ gdbstub_io_rx_enter:
        mov     fp,d0
        call    gdbstub_rx_irq[],0      # gdbstub_rx_irq(regs,excep)
 
-       and     ~EPSW_IE,epsw
+       LOCAL_CLI
        bclr    0x01,(gdbstub_busy)
 
        .globl gdbstub_return
index ae663dc717e94ab807fbeb3169955e8fbaebd04b..0d5d63c91dc3ee74457d183a11d59a149022d0c5 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/exceptions.h>
 #include <asm/serial-regs.h>
 #include <unit/serial.h>
+#include <asm/smp.h>
 
 /*
  * initialise the GDB stub
@@ -45,22 +46,34 @@ void gdbstub_io_init(void)
        XIRQxICR(GDBPORT_SERIAL_IRQ) = 0;
        tmp = XIRQxICR(GDBPORT_SERIAL_IRQ);
 
+#if   CONFIG_GDBSTUB_IRQ_LEVEL == 0
        IVAR0 = EXCEP_IRQ_LEVEL0;
-       set_intr_stub(EXCEP_IRQ_LEVEL0, gdbstub_io_rx_handler);
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 1
+       IVAR1 = EXCEP_IRQ_LEVEL1;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 2
+       IVAR2 = EXCEP_IRQ_LEVEL2;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 3
+       IVAR3 = EXCEP_IRQ_LEVEL3;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 4
+       IVAR4 = EXCEP_IRQ_LEVEL4;
+#elif CONFIG_GDBSTUB_IRQ_LEVEL == 5
+       IVAR5 = EXCEP_IRQ_LEVEL5;
+#else
+#error "Unknown irq level for gdbstub."
+#endif
+
+       set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL),
+               gdbstub_io_rx_handler);
 
        XIRQxICR(GDBPORT_SERIAL_IRQ) &= ~GxICR_REQUEST;
-       XIRQxICR(GDBPORT_SERIAL_IRQ) = GxICR_ENABLE | GxICR_LEVEL_0;
+       XIRQxICR(GDBPORT_SERIAL_IRQ) =
+               GxICR_ENABLE | NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL);
        tmp = XIRQxICR(GDBPORT_SERIAL_IRQ);
 
        GDBPORT_SERIAL_IER = UART_IER_RDI | UART_IER_RLSI;
 
        /* permit level 0 IRQs to take place */
-       asm volatile(
-               "       and %0,epsw     \n"
-               "       or %1,epsw      \n"
-               :
-               : "i"(~EPSW_IM), "i"(EPSW_IE | EPSW_IM_1)
-               );
+       local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
 }
 
 /*
@@ -87,6 +100,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
 {
        unsigned ix;
        u8 ch, st;
+#if defined(CONFIG_MN10300_WD_TIMER)
+       int cpu;
+#endif
 
        *_ch = 0xff;
 
@@ -104,8 +120,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
                if (nonblock)
                        return -EAGAIN;
 #ifdef CONFIG_MN10300_WD_TIMER
-               watchdog_alert_counter = 0;
-#endif /* CONFIG_MN10300_WD_TIMER */
+       for (cpu = 0; cpu < NR_CPUS; cpu++)
+               watchdog_alert_counter[cpu] = 0;
+#endif
                goto try_again;
        }
 
index a560bbc3137d3c93fd2a14eddbb79ed5b8abc23f..97dfda23342c8d9fe8aef19d8c9d4dfaeb170a7d 100644 (file)
@@ -58,9 +58,12 @@ void __init gdbstub_io_init(void)
        gdbstub_io_set_baud(115200);
 
        /* we want to get serial receive interrupts */
-       set_intr_level(gdbstub_port->rx_irq, GxICR_LEVEL_0);
-       set_intr_level(gdbstub_port->tx_irq, GxICR_LEVEL_0);
-       set_intr_stub(EXCEP_IRQ_LEVEL0, gdbstub_io_rx_handler);
+       set_intr_level(gdbstub_port->rx_irq,
+               NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL));
+       set_intr_level(gdbstub_port->tx_irq,
+               NUM2GxICR_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL));
+       set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_GDBSTUB_IRQ_LEVEL),
+               gdbstub_io_rx_handler);
 
        *gdbstub_port->rx_icr |= GxICR_ENABLE;
        tmp = *gdbstub_port->rx_icr;
@@ -84,12 +87,7 @@ void __init gdbstub_io_init(void)
        tmp = *gdbstub_port->_control;
 
        /* permit level 0 IRQs only */
-       asm volatile(
-               "       and %0,epsw     \n"
-               "       or %1,epsw      \n"
-               :
-               : "i"(~EPSW_IM), "i"(EPSW_IE|EPSW_IM_1)
-               );
+       local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
 }
 
 /*
@@ -184,6 +182,9 @@ int gdbstub_io_rx_char(unsigned char *_ch, int nonblock)
 {
        unsigned ix;
        u8 ch, st;
+#if defined(CONFIG_MN10300_WD_TIMER)
+       int cpu;
+#endif
 
        *_ch = 0xff;
 
@@ -201,8 +202,9 @@ try_again:
                if (nonblock)
                        return -EAGAIN;
 #ifdef CONFIG_MN10300_WD_TIMER
-               watchdog_alert_counter = 0;
-#endif /* CONFIG_MN10300_WD_TIMER */
+       for (cpu = 0; cpu < NR_CPUS; cpu++)
+               watchdog_alert_counter[cpu] = 0;
+#endif
                goto try_again;
        }
 
index 41b11706c8ed1ee70f0742bec2d9ac301187aea1..a5fc3f05309b2fc26960a36fe68ef1339683c4fd 100644 (file)
@@ -440,15 +440,11 @@ static const unsigned char gdbstub_insn_sizes[256] =
 
 static int __gdbstub_mark_bp(u8 *addr, int ix)
 {
-       if (addr < (u8 *) 0x70000000UL)
-               return 0;
-       /* 70000000-7fffffff: vmalloc area */
-       if (addr < (u8 *) 0x80000000UL)
+       /* vmalloc area */
+       if (((u8 *) VMALLOC_START <= addr) && (addr < (u8 *) VMALLOC_END))
                goto okay;
-       if (addr < (u8 *) 0x8c000000UL)
-               return 0;
-       /* 8c000000-93ffffff: SRAM, SDRAM */
-       if (addr < (u8 *) 0x94000000UL)
+       /* SRAM, SDRAM */
+       if (((u8 *) 0x80000000UL <= addr) && (addr < (u8 *) 0xa0000000UL))
                goto okay;
        return 0;
 
@@ -1197,9 +1193,8 @@ static int gdbstub(struct pt_regs *regs, enum exception_code excep)
        mn10300_set_gdbleds(1);
 
        asm volatile("mov mdr,%0" : "=d"(mdr));
-       asm volatile("mov epsw,%0" : "=d"(epsw));
-       asm volatile("mov %0,epsw"
-                    :: "d"((epsw & ~EPSW_IM) | EPSW_IE | EPSW_IM_1));
+       local_save_flags(epsw);
+       local_change_intr_mask_level(NUM2EPSW_IM(CONFIG_GDBSTUB_IRQ_LEVEL + 1));
 
        gdbstub_store_fpu();
 
index 14f27f3bfaf4f9007b86825cfb4ff5416acb196c..73e00fc78072d69623b5baf2e06f45ac4aa29a0b 100644 (file)
 #include <asm/frame.inc>
 #include <asm/param.h>
 #include <unit/serial.h>
+#ifdef CONFIG_SMP
+#include <asm/smp.h>
+#include <asm/intctl-regs.h>
+#include <asm/cpu-regs.h>
+#include <proc/smp-regs.h>
+#endif /* CONFIG_SMP */
 
        __HEAD
 
        .globl  _start
        .type   _start,@function
 _start:
+#ifdef CONFIG_SMP
+       #
+       # If this is a secondary CPU (AP), then deal with that elsewhere
+       #
+       mov     (CPUID),d3
+       and     CPUID_MASK,d3
+       bne     startup_secondary
+
+       #
+       # We're dealing with the primary CPU (BP) here, then.
+       # Keep BP's D0,D1,D2 register for boot check.
+       #
+
+       # Set up the Boot IPI for each secondary CPU
+       mov     0x1,a0
+loop_set_secondary_icr:
+       mov     a0,a1
+       asl     CROSS_ICR_CPU_SHIFT,a1
+       add     CROSS_GxICR(SMP_BOOT_IRQ,0),a1
+       movhu   (a1),d3
+       or      GxICR_ENABLE|GxICR_LEVEL_0,d3
+       movhu   d3,(a1)
+       movhu   (a1),d3                         # flush
+       inc     a0
+       cmp     NR_CPUS,a0
+       bne     loop_set_secondary_icr
+#endif /* CONFIG_SMP */
+
        # save commandline pointer
        mov     d0,a3
 
        # preload the PGD pointer register
        mov     swapper_pg_dir,d0
        mov     d0,(PTBR)
+       clr     d0
+       movbu   d0,(PIDR)
 
        # turn on the TLBs
        mov     MMUCTR_IIV|MMUCTR_DIV,d0
        mov     d0,(MMUCTR)
+#ifdef CONFIG_AM34_2
+       mov     MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE|MMUCTR_WTE,d0
+#else
        mov     MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE,d0
+#endif
        mov     d0,(MMUCTR)
 
        # turn on AM33v2 exception handling mode and set the trap table base
@@ -51,6 +91,11 @@ _start:
        mov     d0,(TBR)
 
        # invalidate and enable both of the caches
+#ifdef CONFIG_SMP
+       mov     ECHCTR,a0
+       clr     d0
+       mov     d0,(a0)
+#endif
        mov     CHCTR,a0
        clr     d0
        movhu   d0,(a0)                                 # turn off first
@@ -61,18 +106,18 @@ _start:
        btst    CHCTR_ICBUSY|CHCTR_DCBUSY,d0            # wait till not busy
        lne
 
-#ifndef CONFIG_MN10300_CACHE_DISABLED
+#ifdef CONFIG_MN10300_CACHE_ENABLED
 #ifdef CONFIG_MN10300_CACHE_WBACK
 #ifndef CONFIG_MN10300_CACHE_WBACK_NOWRALLOC
        mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0
 #else
        mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK|CHCTR_DCALMD,d0
-#endif /* CACHE_DISABLED */
+#endif /* NOWRALLOC */
 #else
        mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0
 #endif /* WBACK */
        movhu   d0,(a0)                                 # enable
-#endif /* NOWRALLOC */
+#endif /* ENABLED */
 
        # turn on RTS on the debug serial port if applicable
 #ifdef CONFIG_MN10300_UNIT_ASB2305
@@ -206,6 +251,44 @@ __no_parameters:
        call    processor_init[],0
        call    unit_init[],0
 
+#ifdef CONFIG_SMP
+       # mark the primary CPU in cpu_boot_map
+       mov     cpu_boot_map,a0
+       mov     0x1,d0
+       mov     d0,(a0)
+
+       # signal each secondary CPU to begin booting
+       mov     0x1,d2                          # CPU ID
+
+loop_request_boot_secondary:
+       mov     d2,a0
+       # send SMP_BOOT_IPI to secondary CPU
+       asl     CROSS_ICR_CPU_SHIFT,a0
+       add     CROSS_GxICR(SMP_BOOT_IRQ,0),a0
+       movhu   (a0),d0
+       or      GxICR_REQUEST|GxICR_DETECT,d0
+       movhu   d0,(a0)
+       movhu   (a0),d0                         # flush
+
+       # wait up to 100ms for AP's IPI to be received
+       clr     d3
+wait_on_secondary_boot:
+       mov     DELAY_TIME_BOOT_IPI,d0
+       call    __delay[],0
+       inc     d3
+       mov     cpu_boot_map,a0
+       mov     (a0),d0
+       lsr     d2,d0
+       btst    0x1,d0
+       bne     1f
+       cmp     TIME_OUT_COUNT_BOOT_IPI,d3
+       bne     wait_on_secondary_boot
+1:
+       inc     d2
+       cmp     NR_CPUS,d2
+       bne     loop_request_boot_secondary
+#endif /* CONFIG_SMP */
+
 #ifdef CONFIG_GDBSTUB
        call    gdbstub_init[],0
 
@@ -217,7 +300,118 @@ __gdbstub_pause:
 #endif
 
        jmp     start_kernel
-       .size   _start, _start-.
+       .size   _start,.-_start
+
+###############################################################################
+#
+# Secondary CPU boot point
+#
+###############################################################################
+#ifdef CONFIG_SMP
+startup_secondary:
+       # preload the PGD pointer register
+       mov     swapper_pg_dir,d0
+       mov     d0,(PTBR)
+       clr     d0
+       movbu   d0,(PIDR)
+
+       # turn on the TLBs
+       mov     MMUCTR_IIV|MMUCTR_DIV,d0
+       mov     d0,(MMUCTR)
+#ifdef CONFIG_AM34_2
+       mov     MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE|MMUCTR_WTE,d0
+#else
+       mov     MMUCTR_ITE|MMUCTR_DTE|MMUCTR_CE,d0
+#endif
+       mov     d0,(MMUCTR)
+
+       # turn on AM33v2 exception handling mode and set the trap table base
+       movhu   (CPUP),d0
+       or      CPUP_EXM_AM33V2,d0
+       movhu   d0,(CPUP)
+
+       # set the interrupt vector table
+       mov     CONFIG_INTERRUPT_VECTOR_BASE,d0
+       mov     d0,(TBR)
+
+       # invalidate and enable both of the caches
+       mov     ECHCTR,a0
+       clr     d0
+       mov     d0,(a0)
+       mov     CHCTR,a0
+       clr     d0
+       movhu   d0,(a0)                                 # turn off first
+       mov     CHCTR_ICINV|CHCTR_DCINV,d0
+       movhu   d0,(a0)
+       setlb
+       mov     (a0),d0
+       btst    CHCTR_ICBUSY|CHCTR_DCBUSY,d0            # wait till not busy (use CPU loop buffer)
+       lne
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+#ifdef  CONFIG_MN10300_CACHE_WBACK
+#ifndef CONFIG_MN10300_CACHE_WBACK_NOWRALLOC
+       mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK,d0
+#else
+       mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRBACK|CHCTR_DCALMD,d0
+#endif  /* !NOWRALLOC */
+#else
+       mov     CHCTR_ICEN|CHCTR_DCEN|CHCTR_DCWTMD_WRTHROUGH,d0
+#endif  /* WBACK */
+       movhu   d0,(a0)                                 # enable
+#endif  /* ENABLED */
+
+       # Clear the boot IPI interrupt for this CPU
+       movhu   (GxICR(SMP_BOOT_IRQ)),d0
+       and     ~GxICR_REQUEST,d0
+       movhu   d0,(GxICR(SMP_BOOT_IRQ))
+       movhu   (GxICR(SMP_BOOT_IRQ)),d0                # flush
+
+       /* get stack */
+       mov     CONFIG_INTERRUPT_VECTOR_BASE + CONFIG_BOOT_STACK_OFFSET,a0
+       mov     (CPUID),d0
+       and     CPUID_MASK,d0
+       mulu    CONFIG_BOOT_STACK_SIZE,d0
+       sub     d0,a0
+       mov     a0,sp
+
+       # init interrupt for AP
+       call    smp_prepare_cpu_init[],0
+
+       # mark this secondary CPU in cpu_boot_map
+       mov     (CPUID),d0
+       mov     0x1,d1
+       asl     d0,d1
+       mov     cpu_boot_map,a0
+       bset    d1,(a0)
+
+       or      EPSW_IE|EPSW_IM_1,epsw  # permit level 0 interrupts
+       nop
+       nop
+#ifdef  CONFIG_MN10300_CACHE_WBACK
+       # flush the local cache if it's in writeback mode
+       call    mn10300_local_dcache_flush_inv[],0
+       setlb
+       mov     (CHCTR),d0
+       btst    CHCTR_DCBUSY,d0         # wait till not busy (use CPU loop buffer)
+       lne
+#endif
+
+       # now sleep waiting for further instructions
+secondary_sleep:
+       mov     CPUM_SLEEP,d0
+       movhu   d0,(CPUM)
+       nop
+       nop
+       bra     secondary_sleep
+       .size   startup_secondary,.-startup_secondary
+#endif /* CONFIG_SMP */
+
+###############################################################################
+#
+#
+#
+###############################################################################
 ENTRY(__head_end)
 
 /*
index eee2eee86267bd961854bf6e91daddcb4e5961f3..6a064ab5af0774b15887d9432209a9e2f9abe208 100644 (file)
@@ -9,6 +9,9 @@
  * 2 of the Licence, or (at your option) any later version.
  */
 
+struct clocksource;
+struct clock_event_device;
+
 /*
  * kthread.S
  */
@@ -18,3 +21,25 @@ extern int kernel_thread_helper(int);
  * entry.S
  */
 extern void ret_from_fork(struct task_struct *) __attribute__((noreturn));
+
+/*
+ * smp-low.S
+ */
+#ifdef CONFIG_SMP
+extern void mn10300_low_ipi_handler(void);
+#endif
+
+/*
+ * time.c
+ */
+extern irqreturn_t local_timer_interrupt(void);
+
+/*
+ * time.c
+ */
+#ifdef CONFIG_CEVT_MN10300
+extern void clockevent_set_clock(struct clock_event_device *, unsigned int);
+#endif
+#ifdef CONFIG_CSRC_MN10300
+extern void clocksource_set_clock(struct clocksource *, unsigned int);
+#endif
index e2d5ed891f37b6c7471108f25c2e57dd425ba28f..c2e44597c22b1fc5b69ebf13cca307d43c2d1a52 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/kernel_stat.h>
 #include <linux/seq_file.h>
+#include <linux/cpumask.h>
 #include <asm/setup.h>
+#include <asm/serial-regs.h>
 
-unsigned long __mn10300_irq_enabled_epsw = EPSW_IE | EPSW_IM_7;
+unsigned long __mn10300_irq_enabled_epsw[NR_CPUS] __cacheline_aligned_in_smp = {
+       [0 ... NR_CPUS - 1] = EPSW_IE | EPSW_IM_7
+};
 EXPORT_SYMBOL(__mn10300_irq_enabled_epsw);
 
+#ifdef CONFIG_SMP
+static char irq_affinity_online[NR_IRQS] = {
+       [0 ... NR_IRQS - 1] = 0
+};
+
+#define NR_IRQ_WORDS   ((NR_IRQS + 31) / 32)
+static unsigned long irq_affinity_request[NR_IRQ_WORDS] = {
+       [0 ... NR_IRQ_WORDS - 1] = 0
+};
+#endif  /* CONFIG_SMP */
+
 atomic_t irq_err_count;
 
 /*
@@ -24,30 +39,67 @@ atomic_t irq_err_count;
  */
 static void mn10300_cpupic_ack(unsigned int irq)
 {
+       unsigned long flags;
        u16 tmp;
-       *(volatile u8 *) &GxICR(irq) = GxICR_DETECT;
+
+       flags = arch_local_cli_save();
+       GxICR_u8(irq) = GxICR_DETECT;
        tmp = GxICR(irq);
+       arch_local_irq_restore(flags);
 }
 
-static void mn10300_cpupic_mask(unsigned int irq)
+static void __mask_and_set_icr(unsigned int irq,
+                              unsigned int mask, unsigned int set)
 {
-       u16 tmp = GxICR(irq);
-       GxICR(irq) = (tmp & GxICR_LEVEL);
+       unsigned long flags;
+       u16 tmp;
+
+       flags = arch_local_cli_save();
+       tmp = GxICR(irq);
+       GxICR(irq) = (tmp & mask) | set;
        tmp = GxICR(irq);
+       arch_local_irq_restore(flags);
+}
+
+static void mn10300_cpupic_mask(unsigned int irq)
+{
+       __mask_and_set_icr(irq, GxICR_LEVEL, 0);
 }
 
 static void mn10300_cpupic_mask_ack(unsigned int irq)
 {
-       u16 tmp = GxICR(irq);
-       GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
-       tmp = GxICR(irq);
+#ifdef CONFIG_SMP
+       unsigned long flags;
+       u16 tmp;
+
+       flags = arch_local_cli_save();
+
+       if (!test_and_clear_bit(irq, irq_affinity_request)) {
+               tmp = GxICR(irq);
+               GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
+               tmp = GxICR(irq);
+       } else {
+               u16 tmp2;
+               tmp = GxICR(irq);
+               GxICR(irq) = (tmp & GxICR_LEVEL);
+               tmp2 = GxICR(irq);
+
+               irq_affinity_online[irq] =
+                       any_online_cpu(*irq_desc[irq].affinity);
+               CROSS_GxICR(irq, irq_affinity_online[irq]) =
+                       (tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT;
+               tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
+       }
+
+       arch_local_irq_restore(flags);
+#else  /* CONFIG_SMP */
+       __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_DETECT);
+#endif /* CONFIG_SMP */
 }
 
 static void mn10300_cpupic_unmask(unsigned int irq)
 {
-       u16 tmp = GxICR(irq);
-       GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
-       tmp = GxICR(irq);
+       __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE);
 }
 
 static void mn10300_cpupic_unmask_clear(unsigned int irq)
@@ -56,11 +108,89 @@ static void mn10300_cpupic_unmask_clear(unsigned int irq)
         * device has ceased to assert its interrupt line and the interrupt
         * channel has been disabled in the PIC, so for level-triggered
         * interrupts we need to clear the request bit when we re-enable */
-       u16 tmp = GxICR(irq);
-       GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
-       tmp = GxICR(irq);
+#ifdef CONFIG_SMP
+       unsigned long flags;
+       u16 tmp;
+
+       flags = arch_local_cli_save();
+
+       if (!test_and_clear_bit(irq, irq_affinity_request)) {
+               tmp = GxICR(irq);
+               GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
+               tmp = GxICR(irq);
+       } else {
+               tmp = GxICR(irq);
+
+               irq_affinity_online[irq] = any_online_cpu(*irq_desc[irq].affinity);
+               CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
+               tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
+       }
+
+       arch_local_irq_restore(flags);
+#else  /* CONFIG_SMP */
+       __mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE | GxICR_DETECT);
+#endif /* CONFIG_SMP */
 }
 
+#ifdef CONFIG_SMP
+static int
+mn10300_cpupic_setaffinity(unsigned int irq, const struct cpumask *mask)
+{
+       unsigned long flags;
+       int err;
+
+       flags = arch_local_cli_save();
+
+       /* check irq no */
+       switch (irq) {
+       case TMJCIRQ:
+       case RESCHEDULE_IPI:
+       case CALL_FUNC_SINGLE_IPI:
+       case LOCAL_TIMER_IPI:
+       case FLUSH_CACHE_IPI:
+       case CALL_FUNCTION_NMI_IPI:
+       case GDB_NMI_IPI:
+#ifdef CONFIG_MN10300_TTYSM0
+       case SC0RXIRQ:
+       case SC0TXIRQ:
+#ifdef CONFIG_MN10300_TTYSM0_TIMER8
+       case TM8IRQ:
+#elif CONFIG_MN10300_TTYSM0_TIMER2
+       case TM2IRQ:
+#endif /* CONFIG_MN10300_TTYSM0_TIMER8 */
+#endif /* CONFIG_MN10300_TTYSM0 */
+
+#ifdef CONFIG_MN10300_TTYSM1
+       case SC1RXIRQ:
+       case SC1TXIRQ:
+#ifdef CONFIG_MN10300_TTYSM1_TIMER12
+       case TM12IRQ:
+#elif CONFIG_MN10300_TTYSM1_TIMER9
+       case TM9IRQ:
+#elif CONFIG_MN10300_TTYSM1_TIMER3
+       case TM3IRQ:
+#endif /* CONFIG_MN10300_TTYSM1_TIMER12 */
+#endif /* CONFIG_MN10300_TTYSM1 */
+
+#ifdef CONFIG_MN10300_TTYSM2
+       case SC2RXIRQ:
+       case SC2TXIRQ:
+       case TM10IRQ:
+#endif /* CONFIG_MN10300_TTYSM2 */
+               err = -1;
+               break;
+
+       default:
+               set_bit(irq, irq_affinity_request);
+               err = 0;
+               break;
+       }
+
+       arch_local_irq_restore(flags);
+       return err;
+}
+#endif /* CONFIG_SMP */
+
 /*
  * MN10300 PIC level-triggered IRQ handling.
  *
@@ -79,6 +209,9 @@ static struct irq_chip mn10300_cpu_pic_level = {
        .mask           = mn10300_cpupic_mask,
        .mask_ack       = mn10300_cpupic_mask,
        .unmask         = mn10300_cpupic_unmask_clear,
+#ifdef CONFIG_SMP
+       .set_affinity   = mn10300_cpupic_setaffinity,
+#endif
 };
 
 /*
@@ -94,6 +227,9 @@ static struct irq_chip mn10300_cpu_pic_edge = {
        .mask           = mn10300_cpupic_mask,
        .mask_ack       = mn10300_cpupic_mask_ack,
        .unmask         = mn10300_cpupic_unmask,
+#ifdef CONFIG_SMP
+       .set_affinity   = mn10300_cpupic_setaffinity,
+#endif
 };
 
 /*
@@ -111,14 +247,34 @@ void ack_bad_irq(int irq)
  */
 void set_intr_level(int irq, u16 level)
 {
-       u16 tmp;
+       BUG_ON(in_interrupt());
 
-       if (in_interrupt())
-               BUG();
+       __mask_and_set_icr(irq, GxICR_ENABLE, level);
+}
 
-       tmp = GxICR(irq);
-       GxICR(irq) = (tmp & GxICR_ENABLE) | level;
-       tmp = GxICR(irq);
+void mn10300_intc_set_level(unsigned int irq, unsigned int level)
+{
+       set_intr_level(irq, NUM2GxICR_LEVEL(level) & GxICR_LEVEL);
+}
+
+void mn10300_intc_clear(unsigned int irq)
+{
+       __mask_and_set_icr(irq, GxICR_LEVEL | GxICR_ENABLE, GxICR_DETECT);
+}
+
+void mn10300_intc_set(unsigned int irq)
+{
+       __mask_and_set_icr(irq, 0, GxICR_REQUEST | GxICR_DETECT);
+}
+
+void mn10300_intc_enable(unsigned int irq)
+{
+       mn10300_cpupic_unmask(irq);
+}
+
+void mn10300_intc_disable(unsigned int irq)
+{
+       mn10300_cpupic_mask(irq);
 }
 
 /*
@@ -126,7 +282,7 @@ void set_intr_level(int irq, u16 level)
  * than before
  * - see Documentation/mn10300/features.txt
  */
-void set_intr_postackable(int irq)
+void mn10300_set_lateack_irq_type(int irq)
 {
        set_irq_chip_and_handler(irq, &mn10300_cpu_pic_level,
                                 handle_level_irq);
@@ -147,6 +303,7 @@ void __init init_IRQ(void)
                         * interrupts */
                        set_irq_chip_and_handler(irq, &mn10300_cpu_pic_edge,
                                                 handle_level_irq);
+
        unit_init_IRQ();
 }
 
@@ -156,20 +313,22 @@ void __init init_IRQ(void)
 asmlinkage void do_IRQ(void)
 {
        unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw;
+       unsigned int cpu_id = smp_processor_id();
        int irq;
 
        sp = current_stack_pointer();
-       if (sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN)
-               BUG();
+       BUG_ON(sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN);
 
        /* make sure local_irq_enable() doesn't muck up the interrupt priority
         * setting in EPSW */
-       old_irq_enabled_epsw = __mn10300_irq_enabled_epsw;
+       old_irq_enabled_epsw = __mn10300_irq_enabled_epsw[cpu_id];
        local_save_flags(epsw);
-       __mn10300_irq_enabled_epsw = EPSW_IE | (EPSW_IM & epsw);
+       __mn10300_irq_enabled_epsw[cpu_id] = EPSW_IE | (EPSW_IM & epsw);
        irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL;
 
-       __IRQ_STAT(smp_processor_id(), __irq_count)++;
+#ifdef CONFIG_MN10300_WD_TIMER
+       __IRQ_STAT(cpu_id, __irq_count)++;
+#endif
 
        irq_enter();
 
@@ -189,7 +348,7 @@ asmlinkage void do_IRQ(void)
                local_irq_restore(epsw);
        }
 
-       __mn10300_irq_enabled_epsw = old_irq_enabled_epsw;
+       __mn10300_irq_enabled_epsw[cpu_id] = old_irq_enabled_epsw;
 
        irq_exit();
 }
@@ -222,9 +381,16 @@ int show_interrupts(struct seq_file *p, void *v)
                        seq_printf(p, "%3d: ", i);
                        for_each_present_cpu(cpu)
                                seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
-                       seq_printf(p, " %14s.%u", irq_desc[i].chip->name,
-                                  (GxICR(i) & GxICR_LEVEL) >>
-                                  GxICR_LEVEL_SHIFT);
+
+                       if (i < NR_CPU_IRQS)
+                               seq_printf(p, " %14s.%u",
+                                          irq_desc[i].chip->name,
+                                          (GxICR(i) & GxICR_LEVEL) >>
+                                          GxICR_LEVEL_SHIFT);
+                       else
+                               seq_printf(p, " %14s",
+                                          irq_desc[i].chip->name);
+
                        seq_printf(p, "  %s", action->name);
 
                        for (action = action->next;
@@ -240,11 +406,13 @@ int show_interrupts(struct seq_file *p, void *v)
 
                /* polish off with NMI and error counters */
        case NR_IRQS:
+#ifdef CONFIG_MN10300_WD_TIMER
                seq_printf(p, "NMI: ");
                for (j = 0; j < NR_CPUS; j++)
                        if (cpu_online(j))
                                seq_printf(p, "%10u ", nmi_count(j));
                seq_putc(p, '\n');
+#endif
 
                seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
                break;
@@ -252,3 +420,51 @@ int show_interrupts(struct seq_file *p, void *v)
 
        return 0;
 }
+
+#ifdef CONFIG_HOTPLUG_CPU
+void migrate_irqs(void)
+{
+       irq_desc_t *desc;
+       int irq;
+       unsigned int self, new;
+       unsigned long flags;
+
+       self = smp_processor_id();
+       for (irq = 0; irq < NR_IRQS; irq++) {
+               desc = irq_desc + irq;
+
+               if (desc->status == IRQ_PER_CPU)
+                       continue;
+
+               if (cpu_isset(self, irq_desc[irq].affinity) &&
+                   !cpus_intersects(irq_affinity[irq], cpu_online_map)) {
+                       int cpu_id;
+                       cpu_id = first_cpu(cpu_online_map);
+                       cpu_set(cpu_id, irq_desc[irq].affinity);
+               }
+               /* We need to operate irq_affinity_online atomically. */
+               arch_local_cli_save(flags);
+               if (irq_affinity_online[irq] == self) {
+                       u16 x, tmp;
+
+                       x = GxICR(irq);
+                       GxICR(irq) = x & GxICR_LEVEL;
+                       tmp = GxICR(irq);
+
+                       new = any_online_cpu(irq_desc[irq].affinity);
+                       irq_affinity_online[irq] = new;
+
+                       CROSS_GxICR(irq, new) =
+                               (x & GxICR_LEVEL) | GxICR_DETECT;
+                       tmp = CROSS_GxICR(irq, new);
+
+                       x &= GxICR_LEVEL | GxICR_ENABLE;
+                       if (GxICR(irq) & GxICR_REQUEST) {
+                               x |= GxICR_REQUEST | GxICR_DETECT;
+                       CROSS_GxICR(irq, new) = x;
+                       tmp = CROSS_GxICR(irq, new);
+               }
+               arch_local_irq_restore(flags);
+       }
+}
+#endif /* CONFIG_HOTPLUG_CPU */
index 67e6389d625a43a7cf62d4d1f5358233b5fef3d7..0311a7fcea16999c0ad6c6aa6dcfd23720ac953d 100644 (file)
@@ -377,8 +377,10 @@ void __kprobes arch_arm_kprobe(struct kprobe *p)
 
 void __kprobes arch_disarm_kprobe(struct kprobe *p)
 {
+#ifndef CONFIG_MN10300_CACHE_SNOOP
        mn10300_dcache_flush();
        mn10300_icache_inv();
+#endif
 }
 
 void arch_remove_kprobe(struct kprobe *p)
@@ -390,8 +392,10 @@ void __kprobes disarm_kprobe(struct kprobe *p, struct pt_regs *regs)
 {
        *p->addr = p->opcode;
        regs->pc = (unsigned long) p->addr;
+#ifndef CONFIG_MN10300_CACHE_SNOOP
        mn10300_dcache_flush();
        mn10300_icache_inv();
+#endif
 }
 
 static inline
index 66702d256610cd765785b0b0495388dc8ddf0af3..dfc1b6f2fa9a4b70f251d8766c773c39fc7733fe 100644 (file)
@@ -39,7 +39,7 @@
 ###############################################################################
        .balign L1_CACHE_BYTES
 ENTRY(mn10300_serial_vdma_interrupt)
-       or      EPSW_IE,psw                     # permit overriding by
+#      or      EPSW_IE,psw                     # permit overriding by
                                                # debugging interrupts
        movm    [d2,d3,a2,a3,exreg0],(sp)
 
@@ -164,7 +164,7 @@ mnsc_vdma_tx_noint:
        rti
 
 mnsc_vdma_tx_empty:
-       mov     +(GxICR_LEVEL_1|GxICR_DETECT),d2
+       mov     +(NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)|GxICR_DETECT),d2
        movhu   d2,(e3)                 # disable the interrupt
        movhu   (e3),d2                 # flush
 
@@ -175,7 +175,7 @@ mnsc_vdma_tx_break:
        movhu   (SCxCTR,e2),d2          # turn on break mode
        or      SC01CTR_BKE,d2
        movhu   d2,(SCxCTR,e2)
-       mov     +(GxICR_LEVEL_1|GxICR_DETECT),d2
+       mov     +(NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)|GxICR_DETECT),d2
        movhu   d2,(e3)                 # disable transmit interrupts on this
                                        # channel
        movhu   (e3),d2                 # flush
index db509dd80565b9e91c2b661c08c5fd64bc52e8d4..996384dba45da8dabca666d78328a00811aa4931 100644 (file)
@@ -44,6 +44,11 @@ static const char serial_revdate[] = "2007-11-06";
 #include <unit/timex.h>
 #include "mn10300-serial.h"
 
+#ifdef CONFIG_SMP
+#undef  GxICR
+#define GxICR(X) CROSS_GxICR(X, 0)
+#endif /* CONFIG_SMP */
+
 #define kenter(FMT, ...) \
        printk(KERN_DEBUG "-->%s(" FMT ")\n", __func__, ##__VA_ARGS__)
 #define _enter(FMT, ...) \
@@ -57,6 +62,11 @@ static const char serial_revdate[] = "2007-11-06";
 #define _proto(FMT, ...) \
        no_printk(KERN_DEBUG "### MNSERIAL " FMT " ###\n", ##__VA_ARGS__)
 
+#ifndef CODMSB
+/* c_cflag bit meaning */
+#define CODMSB 004000000000    /* change Transfer bit-order */
+#endif
+
 #define NR_UARTS 3
 
 #ifdef CONFIG_MN10300_TTYSM_CONSOLE
@@ -152,26 +162,35 @@ struct mn10300_serial_port mn10300_serial_port_sif0 = {
        .name           = "ttySM0",
        ._iobase        = &SC0CTR,
        ._control       = &SC0CTR,
-       ._status        = (volatile u8 *) &SC0STR,
+       ._status        = (volatile u8 *)&SC0STR,
        ._intr          = &SC0ICR,
        ._rxb           = &SC0RXB,
        ._txb           = &SC0TXB,
        .rx_name        = "ttySM0:Rx",
        .tx_name        = "ttySM0:Tx",
-#ifdef CONFIG_MN10300_TTYSM0_TIMER8
+#if defined(CONFIG_MN10300_TTYSM0_TIMER8)
        .tm_name        = "ttySM0:Timer8",
        ._tmxmd         = &TM8MD,
        ._tmxbr         = &TM8BR,
        ._tmicr         = &TM8ICR,
        .tm_irq         = TM8IRQ,
        .div_timer      = MNSCx_DIV_TIMER_16BIT,
-#else /* CONFIG_MN10300_TTYSM0_TIMER2 */
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER0)
+       .tm_name        = "ttySM0:Timer0",
+       ._tmxmd         = &TM0MD,
+       ._tmxbr         = (volatile u16 *)&TM0BR,
+       ._tmicr         = &TM0ICR,
+       .tm_irq         = TM0IRQ,
+       .div_timer      = MNSCx_DIV_TIMER_8BIT,
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER2)
        .tm_name        = "ttySM0:Timer2",
        ._tmxmd         = &TM2MD,
-       ._tmxbr         = (volatile u16 *) &TM2BR,
+       ._tmxbr         = (volatile u16 *)&TM2BR,
        ._tmicr         = &TM2ICR,
        .tm_irq         = TM2IRQ,
        .div_timer      = MNSCx_DIV_TIMER_8BIT,
+#else
+#error "Unknown config for ttySM0"
 #endif
        .rx_irq         = SC0RXIRQ,
        .tx_irq         = SC0TXIRQ,
@@ -205,26 +224,35 @@ struct mn10300_serial_port mn10300_serial_port_sif1 = {
        .name           = "ttySM1",
        ._iobase        = &SC1CTR,
        ._control       = &SC1CTR,
-       ._status        = (volatile u8 *) &SC1STR,
+       ._status        = (volatile u8 *)&SC1STR,
        ._intr          = &SC1ICR,
        ._rxb           = &SC1RXB,
        ._txb           = &SC1TXB,
        .rx_name        = "ttySM1:Rx",
        .tx_name        = "ttySM1:Tx",
-#ifdef CONFIG_MN10300_TTYSM1_TIMER9
+#if defined(CONFIG_MN10300_TTYSM1_TIMER9)
        .tm_name        = "ttySM1:Timer9",
        ._tmxmd         = &TM9MD,
        ._tmxbr         = &TM9BR,
        ._tmicr         = &TM9ICR,
        .tm_irq         = TM9IRQ,
        .div_timer      = MNSCx_DIV_TIMER_16BIT,
-#else /* CONFIG_MN10300_TTYSM1_TIMER3 */
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER3)
        .tm_name        = "ttySM1:Timer3",
        ._tmxmd         = &TM3MD,
-       ._tmxbr         = (volatile u16 *) &TM3BR,
+       ._tmxbr         = (volatile u16 *)&TM3BR,
        ._tmicr         = &TM3ICR,
        .tm_irq         = TM3IRQ,
        .div_timer      = MNSCx_DIV_TIMER_8BIT,
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER12)
+       .tm_name        = "ttySM1/Timer12",
+       ._tmxmd         = &TM12MD,
+       ._tmxbr         = &TM12BR,
+       ._tmicr         = &TM12ICR,
+       .tm_irq         = TM12IRQ,
+       .div_timer      = MNSCx_DIV_TIMER_16BIT,
+#else
+#error "Unknown config for ttySM1"
 #endif
        .rx_irq         = SC1RXIRQ,
        .tx_irq         = SC1TXIRQ,
@@ -260,20 +288,45 @@ struct mn10300_serial_port mn10300_serial_port_sif2 = {
        .uart.lock      =
        __SPIN_LOCK_UNLOCKED(mn10300_serial_port_sif2.uart.lock),
        .name           = "ttySM2",
-       .rx_name        = "ttySM2:Rx",
-       .tx_name        = "ttySM2:Tx",
-       .tm_name        = "ttySM2:Timer10",
        ._iobase        = &SC2CTR,
        ._control       = &SC2CTR,
-       ._status        = &SC2STR,
+       ._status        = (volatile u8 *)&SC2STR,
        ._intr          = &SC2ICR,
        ._rxb           = &SC2RXB,
        ._txb           = &SC2TXB,
+       .rx_name        = "ttySM2:Rx",
+       .tx_name        = "ttySM2:Tx",
+#if defined(CONFIG_MN10300_TTYSM2_TIMER10)
+       .tm_name        = "ttySM2/Timer10",
        ._tmxmd         = &TM10MD,
        ._tmxbr         = &TM10BR,
        ._tmicr         = &TM10ICR,
        .tm_irq         = TM10IRQ,
        .div_timer      = MNSCx_DIV_TIMER_16BIT,
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER9)
+       .tm_name        = "ttySM2/Timer9",
+       ._tmxmd         = &TM9MD,
+       ._tmxbr         = &TM9BR,
+       ._tmicr         = &TM9ICR,
+       .tm_irq         = TM9IRQ,
+       .div_timer      = MNSCx_DIV_TIMER_16BIT,
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER1)
+       .tm_name        = "ttySM2/Timer1",
+       ._tmxmd         = &TM1MD,
+       ._tmxbr         = (volatile u16 *)&TM1BR,
+       ._tmicr         = &TM1ICR,
+       .tm_irq         = TM1IRQ,
+       .div_timer      = MNSCx_DIV_TIMER_8BIT,
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER3)
+       .tm_name        = "ttySM2/Timer3",
+       ._tmxmd         = &TM3MD,
+       ._tmxbr         = (volatile u16 *)&TM3BR,
+       ._tmicr         = &TM3ICR,
+       .tm_irq         = TM3IRQ,
+       .div_timer      = MNSCx_DIV_TIMER_8BIT,
+#else
+#error "Unknown config for ttySM2"
+#endif
        .rx_irq         = SC2RXIRQ,
        .tx_irq         = SC2TXIRQ,
        .rx_icr         = &GxICR(SC2RXIRQ),
@@ -322,9 +375,13 @@ struct mn10300_serial_port *mn10300_serial_ports[NR_UARTS + 1] = {
  */
 static void mn10300_serial_mask_ack(unsigned int irq)
 {
+       unsigned long flags;
        u16 tmp;
+
+       flags = arch_local_cli_save();
        GxICR(irq) = GxICR_LEVEL_6;
        tmp = GxICR(irq); /* flush write buffer */
+       arch_local_irq_restore(flags);
 }
 
 static void mn10300_serial_nop(unsigned int irq)
@@ -348,23 +405,36 @@ struct mn10300_serial_int mn10300_serial_int_tbl[NR_IRQS];
 
 static void mn10300_serial_dis_tx_intr(struct mn10300_serial_port *port)
 {
+       unsigned long flags;
        u16 x;
-       *port->tx_icr = GxICR_LEVEL_1 | GxICR_DETECT;
+
+       flags = arch_local_cli_save();
+       *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
        x = *port->tx_icr;
+       arch_local_irq_restore(flags);
 }
 
 static void mn10300_serial_en_tx_intr(struct mn10300_serial_port *port)
 {
+       unsigned long flags;
        u16 x;
-       *port->tx_icr = GxICR_LEVEL_1 | GxICR_ENABLE;
+
+       flags = arch_local_cli_save();
+       *port->tx_icr =
+               NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL) | GxICR_ENABLE;
        x = *port->tx_icr;
+       arch_local_irq_restore(flags);
 }
 
 static void mn10300_serial_dis_rx_intr(struct mn10300_serial_port *port)
 {
+       unsigned long flags;
        u16 x;
-       *port->rx_icr = GxICR_LEVEL_1 | GxICR_DETECT;
+
+       flags = arch_local_cli_save();
+       *port->rx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
        x = *port->rx_icr;
+       arch_local_irq_restore(flags);
 }
 
 /*
@@ -650,7 +720,7 @@ static unsigned int mn10300_serial_tx_empty(struct uart_port *_port)
 static void mn10300_serial_set_mctrl(struct uart_port *_port,
                                     unsigned int mctrl)
 {
-       struct mn10300_serial_port *port =
+       struct mn10300_serial_port *port __attribute__ ((unused)) =
                container_of(_port, struct mn10300_serial_port, uart);
 
        _enter("%s,%x", port->name, mctrl);
@@ -706,6 +776,7 @@ static void mn10300_serial_start_tx(struct uart_port *_port)
                        UART_XMIT_SIZE));
 
        /* kick the virtual DMA controller */
+       arch_local_cli();
        x = *port->tx_icr;
        x |= GxICR_ENABLE;
 
@@ -716,10 +787,14 @@ static void mn10300_serial_start_tx(struct uart_port *_port)
 
        _debug("CTR=%04hx ICR=%02hx STR=%04x TMD=%02hx TBR=%04hx ICR=%04hx",
               *port->_control, *port->_intr, *port->_status,
-              *port->_tmxmd, *port->_tmxbr, *port->tx_icr);
+              *port->_tmxmd,
+              (port->div_timer == MNSCx_DIV_TIMER_8BIT) ?
+                  *(volatile u8 *)port->_tmxbr : *port->_tmxbr,
+              *port->tx_icr);
 
        *port->tx_icr = x;
        x = *port->tx_icr;
+       arch_local_sti();
 }
 
 /*
@@ -842,8 +917,10 @@ static int mn10300_serial_startup(struct uart_port *_port)
        pint->port = port;
        pint->vdma = mn10300_serial_vdma_tx_handler;
 
-       set_intr_level(port->rx_irq, GxICR_LEVEL_1);
-       set_intr_level(port->tx_irq, GxICR_LEVEL_1);
+       set_intr_level(port->rx_irq,
+               NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL));
+       set_intr_level(port->tx_irq,
+               NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL));
        set_irq_chip(port->tm_irq, &mn10300_serial_pic);
 
        if (request_irq(port->rx_irq, mn10300_serial_interrupt,
@@ -876,6 +953,7 @@ error:
  */
 static void mn10300_serial_shutdown(struct uart_port *_port)
 {
+       u16 x;
        struct mn10300_serial_port *port =
                container_of(_port, struct mn10300_serial_port, uart);
 
@@ -897,8 +975,12 @@ static void mn10300_serial_shutdown(struct uart_port *_port)
        free_irq(port->rx_irq, port);
        free_irq(port->tx_irq, port);
 
-       *port->rx_icr = GxICR_LEVEL_1;
-       *port->tx_icr = GxICR_LEVEL_1;
+       arch_local_cli();
+       *port->rx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
+       x = *port->rx_icr;
+       *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
+       x = *port->tx_icr;
+       arch_local_sti();
 }
 
 /*
@@ -947,11 +1029,66 @@ static void mn10300_serial_change_speed(struct mn10300_serial_port *port,
        /* Determine divisor based on baud rate */
        battempt = 0;
 
-       if (div_timer == MNSCx_DIV_TIMER_16BIT)
-               scxctr |= SC0CTR_CK_TM8UFLOW_8; /* ( == SC1CTR_CK_TM9UFLOW_8
-                                                *   == SC2CTR_CK_TM10UFLOW) */
-       else if (div_timer == MNSCx_DIV_TIMER_8BIT)
+       switch (port->uart.line) {
+#ifdef CONFIG_MN10300_TTYSM0
+       case 0: /* ttySM0 */
+#if   defined(CONFIG_MN10300_TTYSM0_TIMER8)
+               scxctr |= SC0CTR_CK_TM8UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER0)
+               scxctr |= SC0CTR_CK_TM0UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM0_TIMER2)
                scxctr |= SC0CTR_CK_TM2UFLOW_8;
+#else
+#error "Unknown config for ttySM0"
+#endif
+               break;
+#endif /* CONFIG_MN10300_TTYSM0 */
+
+#ifdef CONFIG_MN10300_TTYSM1
+       case 1: /* ttySM1 */
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+#if   defined(CONFIG_MN10300_TTYSM1_TIMER9)
+               scxctr |= SC1CTR_CK_TM9UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM1_TIMER3)
+               scxctr |= SC1CTR_CK_TM3UFLOW_8;
+#else
+#error "Unknown config for ttySM1"
+#endif
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+#if defined(CONFIG_MN10300_TTYSM1_TIMER12)
+               scxctr |= SC1CTR_CK_TM12UFLOW_8;
+#else
+#error "Unknown config for ttySM1"
+#endif
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+               break;
+#endif /* CONFIG_MN10300_TTYSM1 */
+
+#ifdef CONFIG_MN10300_TTYSM2
+       case 2: /* ttySM2 */
+#if defined(CONFIG_AM33_2)
+#if   defined(CONFIG_MN10300_TTYSM2_TIMER10)
+               scxctr |= SC2CTR_CK_TM10UFLOW;
+#else
+#error "Unknown config for ttySM2"
+#endif
+#else /* CONFIG_AM33_2 */
+#if   defined(CONFIG_MN10300_TTYSM2_TIMER9)
+               scxctr |= SC2CTR_CK_TM9UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER1)
+               scxctr |= SC2CTR_CK_TM1UFLOW_8;
+#elif defined(CONFIG_MN10300_TTYSM2_TIMER3)
+               scxctr |= SC2CTR_CK_TM3UFLOW_8;
+#else
+#error "Unknown config for ttySM2"
+#endif
+#endif /* CONFIG_AM33_2 */
+               break;
+#endif /* CONFIG_MN10300_TTYSM2 */
+
+       default:
+               break;
+       }
 
 try_alternative:
        baud = uart_get_baud_rate(&port->uart, new, old, 0,
@@ -1195,6 +1332,12 @@ static void mn10300_serial_set_termios(struct uart_port *_port,
                ctr &= ~SC2CTR_TWE;
                *port->_control = ctr;
        }
+
+       /* change Transfer bit-order (LSB/MSB) */
+       if (new->c_cflag & CODMSB)
+               *port->_control |= SC01CTR_OD_MSBFIRST; /* MSB MODE */
+       else
+               *port->_control &= ~SC01CTR_OD_MSBFIRST; /* LSB MODE */
 }
 
 /*
@@ -1302,11 +1445,16 @@ static int __init mn10300_serial_init(void)
        printk(KERN_INFO "%s version %s (%s)\n",
               serial_name, serial_version, serial_revdate);
 
-#ifdef CONFIG_MN10300_TTYSM2
-       SC2TIM = 8; /* make the baud base of timer 2 IOCLK/8 */
+#if defined(CONFIG_MN10300_TTYSM2) && defined(CONFIG_AM33_2)
+       {
+               int tmp;
+               SC2TIM = 8; /* make the baud base of timer 2 IOCLK/8 */
+               tmp = SC2TIM;
+       }
 #endif
 
-       set_intr_stub(EXCEP_IRQ_LEVEL1, mn10300_serial_vdma_interrupt);
+       set_intr_stub(NUM2EXCEP_IRQ_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL),
+               mn10300_serial_vdma_interrupt);
 
        ret = uart_register_driver(&mn10300_serial_driver);
        if (!ret) {
@@ -1366,9 +1514,11 @@ static void mn10300_serial_console_write(struct console *co,
        port = mn10300_serial_ports[co->index];
 
        /* firstly hijack the serial port from the "virtual DMA" controller */
+       arch_local_cli();
        txicr = *port->tx_icr;
-       *port->tx_icr = GxICR_LEVEL_1;
+       *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL);
        tmp = *port->tx_icr;
+       arch_local_sti();
 
        /* the transmitter may be disabled */
        scxctr = *port->_control;
@@ -1422,8 +1572,10 @@ static void mn10300_serial_console_write(struct console *co,
        if (!(scxctr & SC01CTR_TXE))
                *port->_control = scxctr;
 
+       arch_local_cli();
        *port->tx_icr = txicr;
        tmp = *port->tx_icr;
+       arch_local_sti();
 }
 
 /*
index 996244745ccae8f8fe990cac2e34fd502eb0545d..f2f5c9cfaabd803e02b95fad30267e1cde8a35c3 100644 (file)
@@ -16,6 +16,7 @@
 #include <asm/intctl-regs.h>
 #include <asm/timer-regs.h>
 #include <asm/frame.inc>
+#include <linux/threads.h>
 
        .text
 
@@ -53,7 +54,13 @@ watchdog_handler:
        .type   touch_nmi_watchdog,@function
 touch_nmi_watchdog:
        clr     d0
-       mov     d0,(watchdog_alert_counter)
+       clr     d1
+       mov     watchdog_alert_counter, a0
+       setlb
+       mov     d0, (a0+)
+       inc     d1
+       cmp     NR_CPUS, d1
+       lne
        ret     [],0
 
        .size   touch_nmi_watchdog,.-touch_nmi_watchdog
index f362d9d138f1ad90012d5fa5e69f1f8b387dbcba..c5e12bfd9fcdbb2e28d5b42d1916c2b08f2d20c5 100644 (file)
@@ -30,7 +30,7 @@
 static DEFINE_SPINLOCK(watchdog_print_lock);
 static unsigned int watchdog;
 static unsigned int watchdog_hz = 1;
-unsigned int watchdog_alert_counter;
+unsigned int watchdog_alert_counter[NR_CPUS];
 
 EXPORT_SYMBOL(touch_nmi_watchdog);
 
@@ -39,9 +39,6 @@ EXPORT_SYMBOL(touch_nmi_watchdog);
  * is to check its timer makes IRQ counts. If they are not
  * changing then that CPU has some problem.
  *
- * as these watchdog NMI IRQs are generated on every CPU, we only
- * have to check the current processor.
- *
  * since NMIs dont listen to _any_ locks, we have to be extremely
  * careful not to rely on unsafe variables. The printk might lock
  * up though, so we have to break up any console locks first ...
@@ -69,8 +66,8 @@ int __init check_watchdog(void)
 
        printk(KERN_INFO "OK.\n");
 
-       /* now that we know it works we can reduce NMI frequency to
-        * something more reasonable; makes a difference in some configs
+       /* now that we know it works we can reduce NMI frequency to something
+        * more reasonable; makes a difference in some configs
         */
        watchdog_hz = 1;
 
@@ -121,15 +118,22 @@ void __init watchdog_go(void)
        }
 }
 
+#ifdef CONFIG_SMP
+static void watchdog_dump_register(void *dummy)
+{
+       printk(KERN_ERR "--- Register Dump (CPU%d) ---\n", CPUID);
+       show_registers(current_frame());
+}
+#endif
+
 asmlinkage
 void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
 {
-
        /*
         * Since current-> is always on the stack, and we always switch
         * the stack NMI-atomically, it's safe to use smp_processor_id().
         */
-       int sum, cpu = smp_processor_id();
+       int sum, cpu;
        int irq = NMIIRQ;
        u8 wdt, tmp;
 
@@ -138,43 +142,61 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep)
        tmp = WDCTR;
        NMICR = NMICR_WDIF;
 
-       nmi_count(cpu)++;
+       nmi_count(smp_processor_id())++;
        kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq));
-       sum = irq_stat[cpu].__irq_count;
-
-       if (last_irq_sums[cpu] == sum) {
-               /*
-                * Ayiee, looks like this CPU is stuck ...
-                * wait a few IRQs (5 seconds) before doing the oops ...
-                */
-               watchdog_alert_counter++;
-               if (watchdog_alert_counter == 5 * watchdog_hz) {
-                       spin_lock(&watchdog_print_lock);
+
+       for_each_online_cpu(cpu) {
+
+               sum = irq_stat[cpu].__irq_count;
+
+               if ((last_irq_sums[cpu] == sum)
+#if defined(CONFIG_GDBSTUB) && defined(CONFIG_SMP)
+                       && !(CHK_GDBSTUB_BUSY()
+                            || atomic_read(&cpu_doing_single_step))
+#endif
+                       ) {
                        /*
-                        * We are in trouble anyway, lets at least try
-                        * to get a message out.
+                        * Ayiee, looks like this CPU is stuck ...
+                        * wait a few IRQs (5 seconds) before doing the oops ...
                         */
-                       bust_spinlocks(1);
-                       printk(KERN_ERR
-                              "NMI Watchdog detected LOCKUP on CPU%d,"
-                              " pc %08lx, registers:\n",
-                              cpu, regs->pc);
-                       show_registers(regs);
-                       printk("console shuts up ...\n");
-                       console_silent();
-                       spin_unlock(&watchdog_print_lock);
-                       bust_spinlocks(0);
+                       watchdog_alert_counter[cpu]++;
+                       if (watchdog_alert_counter[cpu] == 5 * watchdog_hz) {
+                               spin_lock(&watchdog_print_lock);
+                               /*
+                                * We are in trouble anyway, lets at least try
+                                * to get a message out.
+                                */
+                               bust_spinlocks(1);
+                               printk(KERN_ERR
+                                      "NMI Watchdog detected LOCKUP on CPU%d,"
+                                      " pc %08lx, registers:\n",
+                                      cpu, regs->pc);
+#ifdef CONFIG_SMP
+                               printk(KERN_ERR
+                                      "--- Register Dump (CPU%d) ---\n",
+                                      CPUID);
+#endif
+                               show_registers(regs);
+#ifdef CONFIG_SMP
+                               smp_nmi_call_function(watchdog_dump_register,
+                                       NULL, 1);
+#endif
+                               printk(KERN_NOTICE "console shuts up ...\n");
+                               console_silent();
+                               spin_unlock(&watchdog_print_lock);
+                               bust_spinlocks(0);
 #ifdef CONFIG_GDBSTUB
-                       if (gdbstub_busy)
-                               gdbstub_exception(regs, excep);
-                       else
-                               gdbstub_intercept(regs, excep);
+                               if (CHK_GDBSTUB_BUSY_AND_ACTIVE())
+                                       gdbstub_exception(regs, excep);
+                               else
+                                       gdbstub_intercept(regs, excep);
 #endif
-                       do_exit(SIGSEGV);
+                               do_exit(SIGSEGV);
+                       }
+               } else {
+                       last_irq_sums[cpu] = sum;
+                       watchdog_alert_counter[cpu] = 0;
                }
-       } else {
-               last_irq_sums[cpu] = sum;
-               watchdog_alert_counter = 0;
        }
 
        WDCTR = wdt | WDCTR_WDRST;
index f48373e2bc1cffab139be817cf6f540776c51b79..0d0f8049a17b557183be48f844bb4b100f5cc87a 100644 (file)
@@ -57,6 +57,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
 void (*pm_power_off)(void);
 EXPORT_SYMBOL(pm_power_off);
 
+#if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU)
 /*
  * we use this if we don't have any better idle routine
  */
@@ -69,6 +70,35 @@ static void default_idle(void)
                local_irq_enable();
 }
 
+#else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU  */
+/*
+ * On SMP it's slightly faster (but much more power-consuming!)
+ * to poll the ->work.need_resched flag instead of waiting for the
+ * cross-CPU IPI to arrive. Use this option with caution.
+ */
+static inline void poll_idle(void)
+{
+       int oldval;
+
+       local_irq_enable();
+
+       /*
+        * Deal with another CPU just having chosen a thread to
+        * run here:
+        */
+       oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
+
+       if (!oldval) {
+               set_thread_flag(TIF_POLLING_NRFLAG);
+               while (!need_resched())
+                       cpu_relax();
+               clear_thread_flag(TIF_POLLING_NRFLAG);
+       } else {
+               set_need_resched();
+       }
+}
+#endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */
+
 /*
  * the idle thread
  * - there's no useful work to be done, so just try to conserve power and have
@@ -77,8 +107,6 @@ static void default_idle(void)
  */
 void cpu_idle(void)
 {
-       int cpu = smp_processor_id();
-
        /* endless idle loop with no priority at all */
        for (;;) {
                while (!need_resched()) {
@@ -86,10 +114,13 @@ void cpu_idle(void)
 
                        smp_rmb();
                        idle = pm_idle;
-                       if (!idle)
+                       if (!idle) {
+#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
+                               idle = poll_idle;
+#else  /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
                                idle = default_idle;
-
-                       irq_stat[cpu].idle_timestamp = jiffies;
+#endif /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */
+                       }
                        idle();
                }
 
@@ -197,6 +228,7 @@ int copy_thread(unsigned long clone_flags,
                unsigned long c_usp, unsigned long ustk_size,
                struct task_struct *p, struct pt_regs *kregs)
 {
+       struct thread_info *ti = task_thread_info(p);
        struct pt_regs *c_uregs, *c_kregs, *uregs;
        unsigned long c_ksp;
 
@@ -217,7 +249,7 @@ int copy_thread(unsigned long clone_flags,
 
        /* the new TLS pointer is passed in as arg #5 to sys_clone() */
        if (clone_flags & CLONE_SETTLS)
-               c_uregs->e2 = __frame->d3;
+               c_uregs->e2 = current_frame()->d3;
 
        /* set up the return kernel frame if called from kernel_thread() */
        c_kregs = c_uregs;
@@ -235,7 +267,7 @@ int copy_thread(unsigned long clone_flags,
        }
 
        /* set up things up so the scheduler can start the new task */
-       p->thread.__frame = c_kregs;
+       ti->frame       = c_kregs;
        p->thread.a3    = (unsigned long) c_kregs;
        p->thread.sp    = c_ksp;
        p->thread.pc    = (unsigned long) ret_from_fork;
@@ -247,25 +279,26 @@ int copy_thread(unsigned long clone_flags,
 
 /*
  * clone a process
- * - tlsptr is retrieved by copy_thread() from __frame->d3
+ * - tlsptr is retrieved by copy_thread() from current_frame()->d3
  */
 asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp,
                          int __user *parent_tidptr, int __user *child_tidptr,
                          int __user *tlsptr)
 {
-       return do_fork(clone_flags, newsp ?: __frame->sp, __frame, 0,
-                      parent_tidptr, child_tidptr);
+       return do_fork(clone_flags, newsp ?: current_frame()->sp,
+                      current_frame(), 0, parent_tidptr, child_tidptr);
 }
 
 asmlinkage long sys_fork(void)
 {
-       return do_fork(SIGCHLD, __frame->sp, __frame, 0, NULL, NULL);
+       return do_fork(SIGCHLD, current_frame()->sp,
+                      current_frame(), 0, NULL, NULL);
 }
 
 asmlinkage long sys_vfork(void)
 {
-       return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, __frame->sp, __frame,
-                      0, NULL, NULL);
+       return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, current_frame()->sp,
+                      current_frame(), 0, NULL, NULL);
 }
 
 asmlinkage long sys_execve(const char __user *name,
@@ -279,7 +312,7 @@ asmlinkage long sys_execve(const char __user *name,
        error = PTR_ERR(filename);
        if (IS_ERR(filename))
                return error;
-       error = do_execve(filename, argv, envp, __frame);
+       error = do_execve(filename, argv, envp, current_frame());
        putname(filename);
        return error;
 }
index 20d7d0306b1680e023518da900305e79ae3205fe..4f342f75d00cd18c9428e02dbb8a0e849dc73f41 100644 (file)
@@ -41,7 +41,7 @@ static __init int profile_init(void)
        tmp = TM11ICR;
 
        printk(KERN_INFO "Profiling initiated on timer 11, priority 0, %uHz\n",
-              mn10300_ioclk / 8 / (TM11BR + 1));
+              MN10300_IOCLK / 8 / (TM11BR + 1));
        printk(KERN_INFO "Profile histogram stored %p-%p\n",
               prof_buffer, (u8 *)(prof_buffer + prof_len) - 1);
 
index 4eef0e7224f63728feb8f99216cbad2b5be9999f..e9e20f9a4dd37de605f93fef669195ecda900296 100644 (file)
 DEFINE_SPINLOCK(rtc_lock);
 EXPORT_SYMBOL(rtc_lock);
 
-/* time for RTC to update itself in ioclks */
-static unsigned long mn10300_rtc_update_period;
-
+/*
+ * Read the current RTC time
+ */
 void read_persistent_clock(struct timespec *ts)
 {
        struct rtc_time tm;
 
        get_rtc_time(&tm);
 
-       ts->tv_sec = mktime(tm.tm_year, tm.tm_mon, tm.tm_mday,
-                     tm.tm_hour, tm.tm_min, tm.tm_sec);
        ts->tv_nsec = 0;
+       ts->tv_sec = mktime(tm.tm_year, tm.tm_mon, tm.tm_mday,
+                           tm.tm_hour, tm.tm_min, tm.tm_sec);
+
+       /* if rtc is way off in the past, set something reasonable */
+       if (ts->tv_sec < 0)
+               ts->tv_sec = mktime(2009, 1, 1, 12, 0, 0);
 }
 
 /*
@@ -115,39 +119,14 @@ int update_persistent_clock(struct timespec now)
  */
 void __init calibrate_clock(void)
 {
-       unsigned long count0, counth, count1;
        unsigned char status;
 
        /* make sure the RTC is running and is set to operate in 24hr mode */
        status = RTSRC;
        RTCRB |= RTCRB_SET;
        RTCRB |= RTCRB_TM_24HR;
+       RTCRB &= ~RTCRB_DM_BINARY;
        RTCRA |= RTCRA_DVR;
        RTCRA &= ~RTCRA_DVR;
        RTCRB &= ~RTCRB_SET;
-
-       /* work out the clock speed by counting clock cycles between ends of
-        * the RTC update cycle - track the RTC through one complete update
-        * cycle (1 second)
-        */
-       startup_timestamp_counter();
-
-       while (!(RTCRA & RTCRA_UIP)) {}
-       while ((RTCRA & RTCRA_UIP)) {}
-
-       count0 = TMTSCBC;
-
-       while (!(RTCRA & RTCRA_UIP)) {}
-
-       counth = TMTSCBC;
-
-       while ((RTCRA & RTCRA_UIP)) {}
-
-       count1 = TMTSCBC;
-
-       shutdown_timestamp_counter();
-
-       MN10300_TSCCLK = count0 - count1; /* the timers count down */
-       mn10300_rtc_update_period = counth - count1;
-       MN10300_TSC_PER_HZ = MN10300_TSCCLK / HZ;
 }
index d464affcba0e31a0d201ac07f51fc7e4bd55cbf6..9e7a3209a3e1bc7297a9a9c7704cbcd46d6ae45c 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/init.h>
 #include <linux/bootmem.h>
 #include <linux/seq_file.h>
+#include <linux/cpu.h>
 #include <asm/processor.h>
 #include <linux/console.h>
 #include <asm/uaccess.h>
@@ -30,7 +31,6 @@
 #include <asm/io.h>
 #include <asm/smp.h>
 #include <proc/proc.h>
-#include <asm/busctl-regs.h>
 #include <asm/fpu.h>
 #include <asm/sections.h>
 
@@ -64,11 +64,13 @@ unsigned long memory_size;
 struct thread_info *__current_ti = &init_thread_union.thread_info;
 struct task_struct *__current = &init_task;
 
-#define mn10300_known_cpus 3
+#define mn10300_known_cpus 5
 static const char *const mn10300_cputypes[] = {
-       "am33v1",
-       "am33v2",
-       "am34v1",
+       "am33-1",
+       "am33-2",
+       "am34-1",
+       "am33-3",
+       "am34-2",
        "unknown"
 };
 
@@ -123,6 +125,7 @@ void __init setup_arch(char **cmdline_p)
 
        cpu_init();
        unit_setup();
+       smp_init_cpus();
        parse_mem_cmdline(cmdline_p);
 
        init_mm.start_code = (unsigned long)&_text;
@@ -179,57 +182,55 @@ void __init setup_arch(char **cmdline_p)
 void __init cpu_init(void)
 {
        unsigned long cpurev = CPUREV, type;
-       unsigned long base, size;
 
        type = (CPUREV & CPUREV_TYPE) >> CPUREV_TYPE_S;
        if (type > mn10300_known_cpus)
                type = mn10300_known_cpus;
 
-       printk(KERN_INFO "Matsushita %s, rev %ld\n",
+       printk(KERN_INFO "Panasonic %s, rev %ld\n",
               mn10300_cputypes[type],
               (cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S);
 
-       /* determine the memory size and base from the memory controller regs */
-       memory_size = 0;
-
-       base = SDBASE(0);
-       if (base & SDBASE_CE) {
-               size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
-               size = ~size + 1;
-               base &= SDBASE_CBA;
+       get_mem_info(&phys_memory_base, &memory_size);
+       phys_memory_end = phys_memory_base + memory_size;
 
-               printk(KERN_INFO "SDRAM[0]: %luMb @%08lx\n", size >> 20, base);
-               memory_size += size;
-               phys_memory_base = base;
-       }
+       fpu_init_state();
+}
 
-       base = SDBASE(1);
-       if (base & SDBASE_CE) {
-               size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
-               size = ~size + 1;
-               base &= SDBASE_CBA;
+static struct cpu cpu_devices[NR_CPUS];
 
-               printk(KERN_INFO "SDRAM[1]: %luMb @%08lx\n", size >> 20, base);
-               memory_size += size;
-               if (phys_memory_base == 0)
-                       phys_memory_base = base;
-       }
+static int __init topology_init(void)
+{
+       int i;
 
-       phys_memory_end = phys_memory_base + memory_size;
+       for_each_present_cpu(i)
+               register_cpu(&cpu_devices[i], i);
 
-#ifdef CONFIG_FPU
-       fpu_init_state();
-#endif
+       return 0;
 }
 
+subsys_initcall(topology_init);
+
 /*
  * Get CPU information for use by the procfs.
  */
 static int show_cpuinfo(struct seq_file *m, void *v)
 {
+#ifdef CONFIG_SMP
+       struct mn10300_cpuinfo *c = v;
+       unsigned long cpu_id = c - cpu_data;
+       unsigned long cpurev = c->type, type, icachesz, dcachesz;
+#else  /* CONFIG_SMP */
+       unsigned long cpu_id = 0;
        unsigned long cpurev = CPUREV, type, icachesz, dcachesz;
+#endif /* CONFIG_SMP */
 
-       type = (CPUREV & CPUREV_TYPE) >> CPUREV_TYPE_S;
+#ifdef CONFIG_SMP
+       if (!cpu_online(cpu_id))
+               return 0;
+#endif
+
+       type = (cpurev & CPUREV_TYPE) >> CPUREV_TYPE_S;
        if (type > mn10300_known_cpus)
                type = mn10300_known_cpus;
 
@@ -244,13 +245,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                1024;
 
        seq_printf(m,
-                  "processor  : 0\n"
-                  "vendor_id  : Matsushita\n"
+                  "processor  : %ld\n"
+                  "vendor_id  : " PROCESSOR_VENDOR_NAME "\n"
                   "cpu core   : %s\n"
                   "cpu rev    : %lu\n"
                   "model name : " PROCESSOR_MODEL_NAME         "\n"
                   "icache size: %lu\n"
                   "dcache size: %lu\n",
+                  cpu_id,
                   mn10300_cputypes[type],
                   (cpurev & CPUREV_REVISION) >> CPUREV_REVISION_S,
                   icachesz,
@@ -262,8 +264,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
                   "bogomips   : %lu.%02lu\n\n",
                   MN10300_IOCLK / 1000000,
                   (MN10300_IOCLK / 10000) % 100,
+#ifdef CONFIG_SMP
+                  c->loops_per_jiffy / (500000 / HZ),
+                  (c->loops_per_jiffy / (5000 / HZ)) % 100
+#else  /* CONFIG_SMP */
                   loops_per_jiffy / (500000 / HZ),
                   (loops_per_jiffy / (5000 / HZ)) % 100
+#endif /* CONFIG_SMP */
                   );
 
        return 0;
index d4de05ab786464cd585e7f1f0e6ed1652ad3de1c..690f4e9507d77811ef58f75a57960b1b078189a5 100644 (file)
@@ -91,7 +91,7 @@ asmlinkage long sys_sigaction(int sig,
  */
 asmlinkage long sys_sigaltstack(const stack_t __user *uss, stack_t *uoss)
 {
-       return do_sigaltstack(uss, uoss, __frame->sp);
+       return do_sigaltstack(uss, uoss, current_frame()->sp);
 }
 
 /*
@@ -156,10 +156,11 @@ badframe:
  */
 asmlinkage long sys_sigreturn(void)
 {
-       struct sigframe __user *frame = (struct sigframe __user *) __frame->sp;
+       struct sigframe __user *frame;
        sigset_t set;
        long d0;
 
+       frame = (struct sigframe __user *) current_frame()->sp;
        if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
                goto badframe;
        if (__get_user(set.sig[0], &frame->sc.oldmask))
@@ -176,7 +177,7 @@ asmlinkage long sys_sigreturn(void)
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
 
-       if (restore_sigcontext(__frame, &frame->sc, &d0))
+       if (restore_sigcontext(current_frame(), &frame->sc, &d0))
                goto badframe;
 
        return d0;
@@ -191,11 +192,11 @@ badframe:
  */
 asmlinkage long sys_rt_sigreturn(void)
 {
-       struct rt_sigframe __user *frame =
-               (struct rt_sigframe __user *) __frame->sp;
+       struct rt_sigframe __user *frame;
        sigset_t set;
-       unsigned long d0;
+       long d0;
 
+       frame = (struct rt_sigframe __user *) current_frame()->sp;
        if (verify_area(VERIFY_READ, frame, sizeof(*frame)))
                goto badframe;
        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
@@ -207,10 +208,11 @@ asmlinkage long sys_rt_sigreturn(void)
        recalc_sigpending();
        spin_unlock_irq(&current->sighand->siglock);
 
-       if (restore_sigcontext(__frame, &frame->uc.uc_mcontext, &d0))
+       if (restore_sigcontext(current_frame(), &frame->uc.uc_mcontext, &d0))
                goto badframe;
 
-       if (do_sigaltstack(&frame->uc.uc_stack, NULL, __frame->sp) == -EFAULT)
+       if (do_sigaltstack(&frame->uc.uc_stack, NULL, current_frame()->sp) ==
+           -EFAULT)
                goto badframe;
 
        return d0;
@@ -572,7 +574,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
 
        if (thread_info_flags & _TIF_NOTIFY_RESUME) {
                clear_thread_flag(TIF_NOTIFY_RESUME);
-               tracehook_notify_resume(__frame);
+               tracehook_notify_resume(current_frame());
                if (current->replacement_session_keyring)
                        key_replace_session_keyring();
        }
diff --git a/arch/mn10300/kernel/smp-low.S b/arch/mn10300/kernel/smp-low.S
new file mode 100644 (file)
index 0000000..72938ce
--- /dev/null
@@ -0,0 +1,97 @@
+/* SMP IPI low-level handler
+ *
+ * Copyright (C) 2006-2007 Matsushita Electric Industrial Co., Ltd.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/system.h>
+#include <asm/thread_info.h>
+#include <asm/cpu-regs.h>
+#include <proc/smp-regs.h>
+#include <asm/asm-offsets.h>
+#include <asm/frame.inc>
+
+       .am33_2
+
+###############################################################################
+#
+# IPI interrupt handler
+#
+###############################################################################
+       .globl mn10300_low_ipi_handler
+mn10300_low_ipi_handler:
+       add     -4,sp
+       mov     d0,(sp)
+       movhu   (IAGR),d0
+       and     IAGR_GN,d0
+       lsr     0x2,d0
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+       cmp     FLUSH_CACHE_IPI,d0
+       beq     mn10300_flush_cache_ipi
+#endif
+       cmp     SMP_BOOT_IRQ,d0
+       beq     mn10300_smp_boot_ipi
+       /* OTHERS */
+       mov     (sp),d0
+       add     4,sp
+#ifdef CONFIG_GDBSTUB
+       jmp     gdbstub_io_rx_handler
+#else
+       jmp     end
+#endif
+
+###############################################################################
+#
+# Cache flush IPI interrupt handler
+#
+###############################################################################
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+mn10300_flush_cache_ipi:
+       mov     (sp),d0
+       add     4,sp
+
+       /* FLUSH_CACHE_IPI */
+       add     -4,sp
+       SAVE_ALL
+       mov     GxICR_DETECT,d2
+       movbu   d2,(GxICR(FLUSH_CACHE_IPI))     # ACK the interrupt
+       movhu   (GxICR(FLUSH_CACHE_IPI)),d2
+       call    smp_cache_interrupt[],0
+       RESTORE_ALL
+       jmp     end
+#endif
+
+###############################################################################
+#
+# SMP boot CPU IPI interrupt handler
+#
+###############################################################################
+mn10300_smp_boot_ipi:
+       /* clear interrupt */
+       movhu   (GxICR(SMP_BOOT_IRQ)),d0
+       and     ~GxICR_REQUEST,d0
+       movhu   d0,(GxICR(SMP_BOOT_IRQ))
+       mov     (sp),d0
+       add     4,sp
+
+       # get stack
+       mov     (CPUID),a0
+       add     -1,a0
+       add     a0,a0
+       add     a0,a0
+       mov     (start_stack,a0),a0
+       mov     a0,sp
+       jmp     initialize_secondary
+
+
+# Jump here after RTI to suppress the icache lookahead
+end:
diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c
new file mode 100644 (file)
index 0000000..0dcd1c6
--- /dev/null
@@ -0,0 +1,1152 @@
+/* SMP support routines.
+ *
+ * Copyright (C) 2006-2008 Panasonic Corporation
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/profile.h>
+#include <linux/smp.h>
+#include <asm/tlbflush.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/processor.h>
+#include <asm/bug.h>
+#include <asm/exceptions.h>
+#include <asm/hardirq.h>
+#include <asm/fpu.h>
+#include <asm/mmu_context.h>
+#include <asm/thread_info.h>
+#include <asm/cpu-regs.h>
+#include <asm/intctl-regs.h>
+#include "internal.h"
+
+#ifdef CONFIG_HOTPLUG_CPU
+#include <linux/cpu.h>
+#include <asm/cacheflush.h>
+
+static unsigned long sleep_mode[NR_CPUS];
+
+static void run_sleep_cpu(unsigned int cpu);
+static void run_wakeup_cpu(unsigned int cpu);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+/*
+ * Debug Message function
+ */
+
+#undef DEBUG_SMP
+#ifdef DEBUG_SMP
+#define Dprintk(fmt, ...) printk(KERN_DEBUG fmt, ##__VA_ARGS__)
+#else
+#define Dprintk(fmt, ...) no_printk(KERN_DEBUG fmt, ##__VA_ARGS__)
+#endif
+
+/* timeout value in msec for smp_nmi_call_function. zero is no timeout. */
+#define        CALL_FUNCTION_NMI_IPI_TIMEOUT   0
+
+/*
+ * Structure and data for smp_nmi_call_function().
+ */
+struct nmi_call_data_struct {
+       smp_call_func_t func;
+       void            *info;
+       cpumask_t       started;
+       cpumask_t       finished;
+       int             wait;
+       char            size_alignment[0]
+       __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
+} __attribute__ ((__aligned__(SMP_CACHE_BYTES)));
+
+static DEFINE_SPINLOCK(smp_nmi_call_lock);
+static struct nmi_call_data_struct *nmi_call_data;
+
+/*
+ * Data structures and variables
+ */
+static cpumask_t cpu_callin_map;       /* Bitmask of callin CPUs */
+static cpumask_t cpu_callout_map;      /* Bitmask of callout CPUs */
+cpumask_t cpu_boot_map;                        /* Bitmask of boot APs */
+unsigned long start_stack[NR_CPUS - 1];
+
+/*
+ * Per CPU parameters
+ */
+struct mn10300_cpuinfo cpu_data[NR_CPUS] __cacheline_aligned;
+
+static int cpucount;                   /* The count of boot CPUs */
+static cpumask_t smp_commenced_mask;
+cpumask_t cpu_initialized __initdata = CPU_MASK_NONE;
+
+/*
+ * Function Prototypes
+ */
+static int do_boot_cpu(int);
+static void smp_show_cpu_info(int cpu_id);
+static void smp_callin(void);
+static void smp_online(void);
+static void smp_store_cpu_info(int);
+static void smp_cpu_init(void);
+static void smp_tune_scheduling(void);
+static void send_IPI_mask(const cpumask_t *cpumask, int irq);
+static void init_ipi(void);
+
+/*
+ * IPI Initialization interrupt definitions
+ */
+static void mn10300_ipi_disable(unsigned int irq);
+static void mn10300_ipi_enable(unsigned int irq);
+static void mn10300_ipi_ack(unsigned int irq);
+static void mn10300_ipi_nop(unsigned int irq);
+
+static struct irq_chip mn10300_ipi_type = {
+       .name           = "cpu_ipi",
+       .disable        = mn10300_ipi_disable,
+       .enable         = mn10300_ipi_enable,
+       .ack            = mn10300_ipi_ack,
+       .eoi            = mn10300_ipi_nop
+};
+
+static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id);
+static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id);
+
+static struct irqaction reschedule_ipi = {
+       .handler        = smp_reschedule_interrupt,
+       .name           = "smp reschedule IPI"
+};
+static struct irqaction call_function_ipi = {
+       .handler        = smp_call_function_interrupt,
+       .name           = "smp call function IPI"
+};
+
+#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id);
+static struct irqaction local_timer_ipi = {
+       .handler        = smp_ipi_timer_interrupt,
+       .flags          = IRQF_DISABLED,
+       .name           = "smp local timer IPI"
+};
+#endif
+
+/**
+ * init_ipi - Initialise the IPI mechanism
+ */
+static void init_ipi(void)
+{
+       unsigned long flags;
+       u16 tmp16;
+
+       /* set up the reschedule IPI */
+       set_irq_chip_and_handler(RESCHEDULE_IPI,
+                                &mn10300_ipi_type, handle_percpu_irq);
+       setup_irq(RESCHEDULE_IPI, &reschedule_ipi);
+       set_intr_level(RESCHEDULE_IPI, RESCHEDULE_GxICR_LV);
+       mn10300_ipi_enable(RESCHEDULE_IPI);
+
+       /* set up the call function IPI */
+       set_irq_chip_and_handler(CALL_FUNC_SINGLE_IPI,
+                                &mn10300_ipi_type, handle_percpu_irq);
+       setup_irq(CALL_FUNC_SINGLE_IPI, &call_function_ipi);
+       set_intr_level(CALL_FUNC_SINGLE_IPI, CALL_FUNCTION_GxICR_LV);
+       mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
+
+       /* set up the local timer IPI */
+#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
+    defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+       set_irq_chip_and_handler(LOCAL_TIMER_IPI,
+                                &mn10300_ipi_type, handle_percpu_irq);
+       setup_irq(LOCAL_TIMER_IPI, &local_timer_ipi);
+       set_intr_level(LOCAL_TIMER_IPI, LOCAL_TIMER_GxICR_LV);
+       mn10300_ipi_enable(LOCAL_TIMER_IPI);
+#endif
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+       /* set up the cache flush IPI */
+       flags = arch_local_cli_save();
+       __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(FLUSH_CACHE_GxICR_LV),
+                       mn10300_low_ipi_handler);
+       GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
+       mn10300_ipi_enable(FLUSH_CACHE_IPI);
+       arch_local_irq_restore(flags);
+#endif
+
+       /* set up the NMI call function IPI */
+       flags = arch_local_cli_save();
+       GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
+       tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
+       arch_local_irq_restore(flags);
+
+       /* set up the SMP boot IPI */
+       flags = arch_local_cli_save();
+       __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(SMP_BOOT_GxICR_LV),
+                       mn10300_low_ipi_handler);
+       arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_shutdown - Shut down handling of an IPI
+ * @irq: The IPI to be shut down.
+ */
+static void mn10300_ipi_shutdown(unsigned int irq)
+{
+       unsigned long flags;
+       u16 tmp;
+
+       flags = arch_local_cli_save();
+
+       tmp = GxICR(irq);
+       GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
+       tmp = GxICR(irq);
+
+       arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_enable - Enable an IPI
+ * @irq: The IPI to be enabled.
+ */
+static void mn10300_ipi_enable(unsigned int irq)
+{
+       unsigned long flags;
+       u16 tmp;
+
+       flags = arch_local_cli_save();
+
+       tmp = GxICR(irq);
+       GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE;
+       tmp = GxICR(irq);
+
+       arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_disable - Disable an IPI
+ * @irq: The IPI to be disabled.
+ */
+static void mn10300_ipi_disable(unsigned int irq)
+{
+       unsigned long flags;
+       u16 tmp;
+
+       flags = arch_local_cli_save();
+
+       tmp = GxICR(irq);
+       GxICR(irq) = tmp & GxICR_LEVEL;
+       tmp = GxICR(irq);
+
+       arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_ack - Acknowledge an IPI interrupt in the PIC
+ * @irq: The IPI to be acknowledged.
+ *
+ * Clear the interrupt detection flag for the IPI on the appropriate interrupt
+ * channel in the PIC.
+ */
+static void mn10300_ipi_ack(unsigned int irq)
+{
+       unsigned long flags;
+       u16 tmp;
+
+       flags = arch_local_cli_save();
+       GxICR_u8(irq) = GxICR_DETECT;
+       tmp = GxICR(irq);
+       arch_local_irq_restore(flags);
+}
+
+/**
+ * mn10300_ipi_nop - Dummy IPI action
+ * @irq: The IPI to be acted upon.
+ */
+static void mn10300_ipi_nop(unsigned int irq)
+{
+}
+
+/**
+ * send_IPI_mask - Send IPIs to all CPUs in list
+ * @cpumask: The list of CPUs to target.
+ * @irq: The IPI request to be sent.
+ *
+ * Send the specified IPI to all the CPUs in the list, not waiting for them to
+ * finish before returning.  The caller is responsible for synchronisation if
+ * that is needed.
+ */
+static void send_IPI_mask(const cpumask_t *cpumask, int irq)
+{
+       int i;
+       u16 tmp;
+
+       for (i = 0; i < NR_CPUS; i++) {
+               if (cpu_isset(i, *cpumask)) {
+                       /* send IPI */
+                       tmp = CROSS_GxICR(irq, i);
+                       CROSS_GxICR(irq, i) =
+                               tmp | GxICR_REQUEST | GxICR_DETECT;
+                       tmp = CROSS_GxICR(irq, i); /* flush write buffer */
+               }
+       }
+}
+
+/**
+ * send_IPI_self - Send an IPI to this CPU.
+ * @irq: The IPI request to be sent.
+ *
+ * Send the specified IPI to the current CPU.
+ */
+void send_IPI_self(int irq)
+{
+       send_IPI_mask(cpumask_of(smp_processor_id()), irq);
+}
+
+/**
+ * send_IPI_allbutself - Send IPIs to all the other CPUs.
+ * @irq: The IPI request to be sent.
+ *
+ * Send the specified IPI to all CPUs in the system barring the current one,
+ * not waiting for them to finish before returning.  The caller is responsible
+ * for synchronisation if that is needed.
+ */
+void send_IPI_allbutself(int irq)
+{
+       cpumask_t cpumask;
+
+       cpumask = cpu_online_map;
+       cpu_clear(smp_processor_id(), cpumask);
+       send_IPI_mask(&cpumask, irq);
+}
+
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
+{
+       BUG();
+       /*send_IPI_mask(mask, CALL_FUNCTION_IPI);*/
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+       send_IPI_mask(cpumask_of(cpu), CALL_FUNC_SINGLE_IPI);
+}
+
+/**
+ * smp_send_reschedule - Send reschedule IPI to a CPU
+ * @cpu: The CPU to target.
+ */
+void smp_send_reschedule(int cpu)
+{
+       send_IPI_mask(cpumask_of(cpu), RESCHEDULE_IPI);
+}
+
+/**
+ * smp_nmi_call_function - Send a call function NMI IPI to all CPUs
+ * @func: The function to ask to be run.
+ * @info: The context data to pass to that function.
+ * @wait: If true, wait (atomically) until function is run on all CPUs.
+ *
+ * Send a non-maskable request to all CPUs in the system, requesting them to
+ * run the specified function with the given context data, and, potentially, to
+ * wait for completion of that function on all CPUs.
+ *
+ * Returns 0 if successful, -ETIMEDOUT if we were asked to wait, but hit the
+ * timeout.
+ */
+int smp_nmi_call_function(smp_call_func_t func, void *info, int wait)
+{
+       struct nmi_call_data_struct data;
+       unsigned long flags;
+       unsigned int cnt;
+       int cpus, ret = 0;
+
+       cpus = num_online_cpus() - 1;
+       if (cpus < 1)
+               return 0;
+
+       data.func = func;
+       data.info = info;
+       data.started = cpu_online_map;
+       cpu_clear(smp_processor_id(), data.started);
+       data.wait = wait;
+       if (wait)
+               data.finished = data.started;
+
+       spin_lock_irqsave(&smp_nmi_call_lock, flags);
+       nmi_call_data = &data;
+       smp_mb();
+
+       /* Send a message to all other CPUs and wait for them to respond */
+       send_IPI_allbutself(CALL_FUNCTION_NMI_IPI);
+
+       /* Wait for response */
+       if (CALL_FUNCTION_NMI_IPI_TIMEOUT > 0) {
+               for (cnt = 0;
+                    cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
+                            !cpus_empty(data.started);
+                    cnt++)
+                       mdelay(1);
+
+               if (wait && cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT) {
+                       for (cnt = 0;
+                            cnt < CALL_FUNCTION_NMI_IPI_TIMEOUT &&
+                                    !cpus_empty(data.finished);
+                            cnt++)
+                               mdelay(1);
+               }
+
+               if (cnt >= CALL_FUNCTION_NMI_IPI_TIMEOUT)
+                       ret = -ETIMEDOUT;
+
+       } else {
+               /* If timeout value is zero, wait until cpumask has been
+                * cleared */
+               while (!cpus_empty(data.started))
+                       barrier();
+               if (wait)
+                       while (!cpus_empty(data.finished))
+                               barrier();
+       }
+
+       spin_unlock_irqrestore(&smp_nmi_call_lock, flags);
+       return ret;
+}
+
+/**
+ * stop_this_cpu - Callback to stop a CPU.
+ * @unused: Callback context (ignored).
+ */
+void stop_this_cpu(void *unused)
+{
+       static volatile int stopflag;
+       unsigned long flags;
+
+#ifdef CONFIG_GDBSTUB
+       /* In case of single stepping smp_send_stop by other CPU,
+        * clear procindebug to avoid deadlock.
+        */
+       atomic_set(&procindebug[smp_processor_id()], 0);
+#endif /* CONFIG_GDBSTUB */
+
+       flags = arch_local_cli_save();
+       cpu_clear(smp_processor_id(), cpu_online_map);
+
+       while (!stopflag)
+               cpu_relax();
+
+       cpu_set(smp_processor_id(), cpu_online_map);
+       arch_local_irq_restore(flags);
+}
+
+/**
+ * smp_send_stop - Send a stop request to all CPUs.
+ */
+void smp_send_stop(void)
+{
+       smp_nmi_call_function(stop_this_cpu, NULL, 0);
+}
+
+/**
+ * smp_reschedule_interrupt - Reschedule IPI handler
+ * @irq: The interrupt number.
+ * @dev_id: The device ID.
+ *
+ * We need do nothing here, since the scheduling will be effected on our way
+ * back through entry.S.
+ *
+ * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
+ */
+static irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id)
+{
+       /* do nothing */
+       return IRQ_HANDLED;
+}
+
+/**
+ * smp_call_function_interrupt - Call function IPI handler
+ * @irq: The interrupt number.
+ * @dev_id: The device ID.
+ *
+ * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
+ */
+static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id)
+{
+       /* generic_smp_call_function_interrupt(); */
+       generic_smp_call_function_single_interrupt();
+       return IRQ_HANDLED;
+}
+
+/**
+ * smp_nmi_call_function_interrupt - Non-maskable call function IPI handler
+ */
+void smp_nmi_call_function_interrupt(void)
+{
+       smp_call_func_t func = nmi_call_data->func;
+       void *info = nmi_call_data->info;
+       int wait = nmi_call_data->wait;
+
+       /* Notify the initiating CPU that I've grabbed the data and am about to
+        * execute the function
+        */
+       smp_mb();
+       cpu_clear(smp_processor_id(), nmi_call_data->started);
+       (*func)(info);
+
+       if (wait) {
+               smp_mb();
+               cpu_clear(smp_processor_id(), nmi_call_data->finished);
+       }
+}
+
+#if !defined(CONFIG_GENERIC_CLOCKEVENTS) || \
+    defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+/**
+ * smp_ipi_timer_interrupt - Local timer IPI handler
+ * @irq: The interrupt number.
+ * @dev_id: The device ID.
+ *
+ * Returns IRQ_HANDLED to indicate we handled the interrupt successfully.
+ */
+static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id)
+{
+       return local_timer_interrupt();
+}
+#endif
+
+void __init smp_init_cpus(void)
+{
+       int i;
+       for (i = 0; i < NR_CPUS; i++) {
+               set_cpu_possible(i, true);
+               set_cpu_present(i, true);
+       }
+}
+
+/**
+ * smp_cpu_init - Initialise AP in start_secondary.
+ *
+ * For this Application Processor, set up init_mm, initialise FPU and set
+ * interrupt level 0-6 setting.
+ */
+static void __init smp_cpu_init(void)
+{
+       unsigned long flags;
+       int cpu_id = smp_processor_id();
+       u16 tmp16;
+
+       if (test_and_set_bit(cpu_id, &cpu_initialized)) {
+               printk(KERN_WARNING "CPU#%d already initialized!\n", cpu_id);
+               for (;;)
+                       local_irq_enable();
+       }
+       printk(KERN_INFO "Initializing CPU#%d\n", cpu_id);
+
+       atomic_inc(&init_mm.mm_count);
+       current->active_mm = &init_mm;
+       BUG_ON(current->mm);
+
+       enter_lazy_tlb(&init_mm, current);
+
+       /* Force FPU initialization */
+       clear_using_fpu(current);
+
+       GxICR(CALL_FUNC_SINGLE_IPI) = CALL_FUNCTION_GxICR_LV | GxICR_DETECT;
+       mn10300_ipi_enable(CALL_FUNC_SINGLE_IPI);
+
+       GxICR(LOCAL_TIMER_IPI) = LOCAL_TIMER_GxICR_LV | GxICR_DETECT;
+       mn10300_ipi_enable(LOCAL_TIMER_IPI);
+
+       GxICR(RESCHEDULE_IPI) = RESCHEDULE_GxICR_LV | GxICR_DETECT;
+       mn10300_ipi_enable(RESCHEDULE_IPI);
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+       GxICR(FLUSH_CACHE_IPI) = FLUSH_CACHE_GxICR_LV | GxICR_DETECT;
+       mn10300_ipi_enable(FLUSH_CACHE_IPI);
+#endif
+
+       mn10300_ipi_shutdown(SMP_BOOT_IRQ);
+
+       /* Set up the non-maskable call function IPI */
+       flags = arch_local_cli_save();
+       GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
+       tmp16 = GxICR(CALL_FUNCTION_NMI_IPI);
+       arch_local_irq_restore(flags);
+}
+
+/**
+ * smp_prepare_cpu_init - Initialise CPU in startup_secondary
+ *
+ * Set interrupt level 0-6 setting and init ICR of gdbstub.
+ */
+void smp_prepare_cpu_init(void)
+{
+       int loop;
+
+       /* Set the interrupt vector registers */
+       IVAR0 = EXCEP_IRQ_LEVEL0;
+       IVAR1 = EXCEP_IRQ_LEVEL1;
+       IVAR2 = EXCEP_IRQ_LEVEL2;
+       IVAR3 = EXCEP_IRQ_LEVEL3;
+       IVAR4 = EXCEP_IRQ_LEVEL4;
+       IVAR5 = EXCEP_IRQ_LEVEL5;
+       IVAR6 = EXCEP_IRQ_LEVEL6;
+
+       /* Disable all interrupts and set to priority 6 (lowest) */
+       for (loop = 0; loop < GxICR_NUM_IRQS; loop++)
+               GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
+
+#ifdef CONFIG_GDBSTUB
+       /* initialise GDB-stub */
+       do {
+               unsigned long flags;
+               u16 tmp16;
+
+               flags = arch_local_cli_save();
+               GxICR(GDB_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT;
+               tmp16 = GxICR(GDB_NMI_IPI);
+               arch_local_irq_restore(flags);
+       } while (0);
+#endif
+}
+
+/**
+ * start_secondary - Activate a secondary CPU (AP)
+ * @unused: Thread parameter (ignored).
+ */
+int __init start_secondary(void *unused)
+{
+       smp_cpu_init();
+       smp_callin();
+       while (!cpu_isset(smp_processor_id(), smp_commenced_mask))
+               cpu_relax();
+
+       local_flush_tlb();
+       preempt_disable();
+       smp_online();
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+       init_clockevents();
+#endif
+       cpu_idle();
+       return 0;
+}
+
+/**
+ * smp_prepare_cpus - Boot up secondary CPUs (APs)
+ * @max_cpus: Maximum number of CPUs to boot.
+ *
+ * Call do_boot_cpu, and boot up APs.
+ */
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+       int phy_id;
+
+       /* Setup boot CPU information */
+       smp_store_cpu_info(0);
+       smp_tune_scheduling();
+
+       init_ipi();
+
+       /* If SMP should be disabled, then finish */
+       if (max_cpus == 0) {
+               printk(KERN_INFO "SMP mode deactivated.\n");
+               goto smp_done;
+       }
+
+       /* Boot secondary CPUs (for which phy_id > 0) */
+       for (phy_id = 0; phy_id < NR_CPUS; phy_id++) {
+               /* Don't boot primary CPU */
+               if (max_cpus <= cpucount + 1)
+                       continue;
+               if (phy_id != 0)
+                       do_boot_cpu(phy_id);
+               set_cpu_possible(phy_id, true);
+               smp_show_cpu_info(phy_id);
+       }
+
+smp_done:
+       Dprintk("Boot done.\n");
+}
+
+/**
+ * smp_store_cpu_info - Save a CPU's information
+ * @cpu: The CPU to save for.
+ *
+ * Save boot_cpu_data and jiffy for the specified CPU.
+ */
+static void __init smp_store_cpu_info(int cpu)
+{
+       struct mn10300_cpuinfo *ci = &cpu_data[cpu];
+
+       *ci = boot_cpu_data;
+       ci->loops_per_jiffy = loops_per_jiffy;
+       ci->type = CPUREV;
+}
+
+/**
+ * smp_tune_scheduling - Set time slice value
+ *
+ * Nothing to do here.
+ */
+static void __init smp_tune_scheduling(void)
+{
+}
+
+/**
+ * do_boot_cpu: Boot up one CPU
+ * @phy_id: Physical ID of CPU to boot.
+ *
+ * Send an IPI to a secondary CPU to boot it.  Returns 0 on success, 1
+ * otherwise.
+ */
+static int __init do_boot_cpu(int phy_id)
+{
+       struct task_struct *idle;
+       unsigned long send_status, callin_status;
+       int timeout, cpu_id;
+
+       send_status = GxICR_REQUEST;
+       callin_status = 0;
+       timeout = 0;
+       cpu_id = phy_id;
+
+       cpucount++;
+
+       /* Create idle thread for this CPU */
+       idle = fork_idle(cpu_id);
+       if (IS_ERR(idle))
+               panic("Failed fork for CPU#%d.", cpu_id);
+
+       idle->thread.pc = (unsigned long)start_secondary;
+
+       printk(KERN_NOTICE "Booting CPU#%d\n", cpu_id);
+       start_stack[cpu_id - 1] = idle->thread.sp;
+
+       task_thread_info(idle)->cpu = cpu_id;
+
+       /* Send boot IPI to AP */
+       send_IPI_mask(cpumask_of(phy_id), SMP_BOOT_IRQ);
+
+       Dprintk("Waiting for send to finish...\n");
+
+       /* Wait for AP's IPI receive in 100[ms] */
+       do {
+               udelay(1000);
+               send_status =
+                       CROSS_GxICR(SMP_BOOT_IRQ, phy_id) & GxICR_REQUEST;
+       } while (send_status == GxICR_REQUEST && timeout++ < 100);
+
+       Dprintk("Waiting for cpu_callin_map.\n");
+
+       if (send_status == 0) {
+               /* Allow AP to start initializing */
+               cpu_set(cpu_id, cpu_callout_map);
+
+               /* Wait for setting cpu_callin_map */
+               timeout = 0;
+               do {
+                       udelay(1000);
+                       callin_status = cpu_isset(cpu_id, cpu_callin_map);
+               } while (callin_status == 0 && timeout++ < 5000);
+
+               if (callin_status == 0)
+                       Dprintk("Not responding.\n");
+       } else {
+               printk(KERN_WARNING "IPI not delivered.\n");
+       }
+
+       if (send_status == GxICR_REQUEST || callin_status == 0) {
+               cpu_clear(cpu_id, cpu_callout_map);
+               cpu_clear(cpu_id, cpu_callin_map);
+               cpu_clear(cpu_id, cpu_initialized);
+               cpucount--;
+               return 1;
+       }
+       return 0;
+}
+
+/**
+ * smp_show_cpu_info - Show SMP CPU information
+ * @cpu: The CPU of interest.
+ */
+static void __init smp_show_cpu_info(int cpu)
+{
+       struct mn10300_cpuinfo *ci = &cpu_data[cpu];
+
+       printk(KERN_INFO
+              "CPU#%d : ioclk speed: %lu.%02luMHz : bogomips : %lu.%02lu\n",
+              cpu,
+              MN10300_IOCLK / 1000000,
+              (MN10300_IOCLK / 10000) % 100,
+              ci->loops_per_jiffy / (500000 / HZ),
+              (ci->loops_per_jiffy / (5000 / HZ)) % 100);
+}
+
+/**
+ * smp_callin - Set cpu_callin_map of the current CPU ID
+ */
+static void __init smp_callin(void)
+{
+       unsigned long timeout;
+       int cpu;
+
+       cpu = smp_processor_id();
+       timeout = jiffies + (2 * HZ);
+
+       if (cpu_isset(cpu, cpu_callin_map)) {
+               printk(KERN_ERR "CPU#%d already present.\n", cpu);
+               BUG();
+       }
+       Dprintk("CPU#%d waiting for CALLOUT\n", cpu);
+
+       /* Wait for AP startup 2s total */
+       while (time_before(jiffies, timeout)) {
+               if (cpu_isset(cpu, cpu_callout_map))
+                       break;
+               cpu_relax();
+       }
+
+       if (!time_before(jiffies, timeout)) {
+               printk(KERN_ERR
+                      "BUG: CPU#%d started up but did not get a callout!\n",
+                      cpu);
+               BUG();
+       }
+
+#ifdef CONFIG_CALIBRATE_DELAY
+       calibrate_delay();              /* Get our bogomips */
+#endif
+
+       /* Save our processor parameters */
+       smp_store_cpu_info(cpu);
+
+       /* Allow the boot processor to continue */
+       cpu_set(cpu, cpu_callin_map);
+}
+
+/**
+ * smp_online - Set cpu_online_map
+ */
+static void __init smp_online(void)
+{
+       int cpu;
+
+       cpu = smp_processor_id();
+
+       local_irq_enable();
+
+       cpu_set(cpu, cpu_online_map);
+       smp_wmb();
+}
+
+/**
+ * smp_cpus_done -
+ * @max_cpus: Maximum CPU count.
+ *
+ * Do nothing.
+ */
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+/*
+ * smp_prepare_boot_cpu - Set up stuff for the boot processor.
+ *
+ * Set up the cpu_online_map, cpu_callout_map and cpu_callin_map of the boot
+ * processor (CPU 0).
+ */
+void __devinit smp_prepare_boot_cpu(void)
+{
+       cpu_set(0, cpu_callout_map);
+       cpu_set(0, cpu_callin_map);
+       current_thread_info()->cpu = 0;
+}
+
+/*
+ * initialize_secondary - Initialise a secondary CPU (Application Processor).
+ *
+ * Set SP register and jump to thread's PC address.
+ */
+void initialize_secondary(void)
+{
+       asm volatile (
+               "mov    %0,sp   \n"
+               "jmp    (%1)    \n"
+               :
+               : "a"(current->thread.sp), "a"(current->thread.pc));
+}
+
+/**
+ * __cpu_up - Set smp_commenced_mask for the nominated CPU
+ * @cpu: The target CPU.
+ */
+int __devinit __cpu_up(unsigned int cpu)
+{
+       int timeout;
+
+#ifdef CONFIG_HOTPLUG_CPU
+       if (num_online_cpus() == 1)
+               disable_hlt();
+       if (sleep_mode[cpu])
+               run_wakeup_cpu(cpu);
+#endif /* CONFIG_HOTPLUG_CPU */
+
+       cpu_set(cpu, smp_commenced_mask);
+
+       /* Wait 5s total for a response */
+       for (timeout = 0 ; timeout < 5000 ; timeout++) {
+               if (cpu_isset(cpu, cpu_online_map))
+                       break;
+               udelay(1000);
+       }
+
+       BUG_ON(!cpu_isset(cpu, cpu_online_map));
+       return 0;
+}
+
+/**
+ * setup_profiling_timer - Set up the profiling timer
+ * @multiplier - The frequency multiplier to use
+ *
+ * The frequency of the profiling timer can be changed by writing a multiplier
+ * value into /proc/profile.
+ */
+int setup_profiling_timer(unsigned int multiplier)
+{
+       return -EINVAL;
+}
+
+/*
+ * CPU hotplug routines
+ */
+#ifdef CONFIG_HOTPLUG_CPU
+
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+static int __init topology_init(void)
+{
+       int cpu, ret;
+
+       for_each_cpu(cpu) {
+               ret = register_cpu(&per_cpu(cpu_devices, cpu), cpu, NULL);
+               if (ret)
+                       printk(KERN_WARNING
+                              "topology_init: register_cpu %d failed (%d)\n",
+                              cpu, ret);
+       }
+       return 0;
+}
+
+subsys_initcall(topology_init);
+
+int __cpu_disable(void)
+{
+       int cpu = smp_processor_id();
+       if (cpu == 0)
+               return -EBUSY;
+
+       migrate_irqs();
+       cpu_clear(cpu, current->active_mm->cpu_vm_mask);
+       return 0;
+}
+
+void __cpu_die(unsigned int cpu)
+{
+       run_sleep_cpu(cpu);
+
+       if (num_online_cpus() == 1)
+               enable_hlt();
+}
+
+#ifdef CONFIG_MN10300_CACHE_ENABLED
+static inline void hotplug_cpu_disable_cache(void)
+{
+       int tmp;
+       asm volatile(
+               "       movhu   (%1),%0 \n"
+               "       and     %2,%0   \n"
+               "       movhu   %0,(%1) \n"
+               "1:     movhu   (%1),%0 \n"
+               "       btst    %3,%0   \n"
+               "       bne     1b      \n"
+               : "=&r"(tmp)
+               : "a"(&CHCTR),
+                 "i"(~(CHCTR_ICEN | CHCTR_DCEN)),
+                 "i"(CHCTR_ICBUSY | CHCTR_DCBUSY)
+               : "memory", "cc");
+}
+
+static inline void hotplug_cpu_enable_cache(void)
+{
+       int tmp;
+       asm volatile(
+               "movhu  (%1),%0 \n"
+               "or     %2,%0   \n"
+               "movhu  %0,(%1) \n"
+               : "=&r"(tmp)
+               : "a"(&CHCTR),
+                 "i"(CHCTR_ICEN | CHCTR_DCEN)
+               : "memory", "cc");
+}
+
+static inline void hotplug_cpu_invalidate_cache(void)
+{
+       int tmp;
+       asm volatile (
+               "movhu  (%1),%0 \n"
+               "or     %2,%0   \n"
+               "movhu  %0,(%1) \n"
+               : "=&r"(tmp)
+               : "a"(&CHCTR),
+                 "i"(CHCTR_ICINV | CHCTR_DCINV)
+               : "cc");
+}
+
+#else /* CONFIG_MN10300_CACHE_ENABLED */
+#define hotplug_cpu_disable_cache()    do {} while (0)
+#define hotplug_cpu_enable_cache()     do {} while (0)
+#define hotplug_cpu_invalidate_cache() do {} while (0)
+#endif /* CONFIG_MN10300_CACHE_ENABLED */
+
+/**
+ * hotplug_cpu_nmi_call_function - Call a function on other CPUs for hotplug
+ * @cpumask: List of target CPUs.
+ * @func: The function to call on those CPUs.
+ * @info: The context data for the function to be called.
+ * @wait: Whether to wait for the calls to complete.
+ *
+ * Non-maskably call a function on another CPU for hotplug purposes.
+ *
+ * This function must be called with maskable interrupts disabled.
+ */
+static int hotplug_cpu_nmi_call_function(cpumask_t cpumask,
+                                        smp_call_func_t func, void *info,
+                                        int wait)
+{
+       /*
+        * The address and the size of nmi_call_func_mask_data
+        * need to be aligned on L1_CACHE_BYTES.
+        */
+       static struct nmi_call_data_struct nmi_call_func_mask_data
+               __cacheline_aligned;
+       unsigned long start, end;
+
+       start = (unsigned long)&nmi_call_func_mask_data;
+       end = start + sizeof(struct nmi_call_data_struct);
+
+       nmi_call_func_mask_data.func = func;
+       nmi_call_func_mask_data.info = info;
+       nmi_call_func_mask_data.started = cpumask;
+       nmi_call_func_mask_data.wait = wait;
+       if (wait)
+               nmi_call_func_mask_data.finished = cpumask;
+
+       spin_lock(&smp_nmi_call_lock);
+       nmi_call_data = &nmi_call_func_mask_data;
+       mn10300_local_dcache_flush_range(start, end);
+       smp_wmb();
+
+       send_IPI_mask(cpumask, CALL_FUNCTION_NMI_IPI);
+
+       do {
+               mn10300_local_dcache_inv_range(start, end);
+               barrier();
+       } while (!cpus_empty(nmi_call_func_mask_data.started));
+
+       if (wait) {
+               do {
+                       mn10300_local_dcache_inv_range(start, end);
+                       barrier();
+               } while (!cpus_empty(nmi_call_func_mask_data.finished));
+       }
+
+       spin_unlock(&smp_nmi_call_lock);
+       return 0;
+}
+
+static void restart_wakeup_cpu(void)
+{
+       unsigned int cpu = smp_processor_id();
+
+       cpu_set(cpu, cpu_callin_map);
+       local_flush_tlb();
+       cpu_set(cpu, cpu_online_map);
+       smp_wmb();
+}
+
+static void prepare_sleep_cpu(void *unused)
+{
+       sleep_mode[smp_processor_id()] = 1;
+       smp_mb();
+       mn10300_local_dcache_flush_inv();
+       hotplug_cpu_disable_cache();
+       hotplug_cpu_invalidate_cache();
+}
+
+/* when this function called, IE=0, NMID=0. */
+static void sleep_cpu(void *unused)
+{
+       unsigned int cpu_id = smp_processor_id();
+       /*
+        * CALL_FUNCTION_NMI_IPI for wakeup_cpu() shall not be requested,
+        * before this cpu goes in SLEEP mode.
+        */
+       do {
+               smp_mb();
+               __sleep_cpu();
+       } while (sleep_mode[cpu_id]);
+       restart_wakeup_cpu();
+}
+
+static void run_sleep_cpu(unsigned int cpu)
+{
+       unsigned long flags;
+       cpumask_t cpumask = cpumask_of(cpu);
+
+       flags = arch_local_cli_save();
+       hotplug_cpu_nmi_call_function(cpumask, prepare_sleep_cpu, NULL, 1);
+       hotplug_cpu_nmi_call_function(cpumask, sleep_cpu, NULL, 0);
+       udelay(1);              /* delay for the cpu to sleep. */
+       arch_local_irq_restore(flags);
+}
+
+static void wakeup_cpu(void)
+{
+       hotplug_cpu_invalidate_cache();
+       hotplug_cpu_enable_cache();
+       smp_mb();
+       sleep_mode[smp_processor_id()] = 0;
+}
+
+static void run_wakeup_cpu(unsigned int cpu)
+{
+       unsigned long flags;
+
+       flags = arch_local_cli_save();
+#if NR_CPUS == 2
+       mn10300_local_dcache_flush_inv();
+#else
+       /*
+        * Before waking up the cpu,
+        * all online cpus should stop and flush D-Cache for global data.
+        */
+#error not support NR_CPUS > 2, when CONFIG_HOTPLUG_CPU=y.
+#endif
+       hotplug_cpu_nmi_call_function(cpumask_of(cpu), wakeup_cpu, NULL, 1);
+       arch_local_irq_restore(flags);
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
index 630aad71b9461513aa0db71445d9fc1adc2f4ed6..9074d0fb8788fbc5187d6d77b026a271c91050f5 100644 (file)
@@ -15,6 +15,9 @@
 #include <linux/linkage.h>
 #include <asm/thread_info.h>
 #include <asm/cpu-regs.h>
+#ifdef CONFIG_SMP
+#include <proc/smp-regs.h>
+#endif /* CONFIG_SMP */
 
        .text
 
@@ -35,8 +38,6 @@ ENTRY(__switch_to)
        mov     d1,a1
 
        # save prev context
-       mov     (__frame),d0
-       mov     d0,(THREAD_FRAME,a0)
        mov     __switch_back,d0
        mov     d0,(THREAD_PC,a0)
        mov     sp,a2
@@ -58,8 +59,6 @@ ENTRY(__switch_to)
        mov     a2,e2
 #endif
 
-       mov     (THREAD_FRAME,a1),a2
-       mov     a2,(__frame)
        mov     (THREAD_PC,a1),a2
        mov     d2,d0                   # for ret_from_fork
        mov     d0,a0                   # for __switch_to
index 8f7f6d22783d5065a61bb8bb4636c776b5892e5f..f860a340acc920e0565f4d341fa263b5c8ea49a5 100644 (file)
 #include <linux/smp.h>
 #include <linux/profile.h>
 #include <linux/cnt32_to_63.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
 #include <asm/irq.h>
 #include <asm/div64.h>
 #include <asm/processor.h>
 #include <asm/intctl-regs.h>
 #include <asm/rtc.h>
-
-#ifdef CONFIG_MN10300_RTC
-unsigned long mn10300_ioclk;           /* system I/O clock frequency */
-unsigned long mn10300_iobclk;          /* system I/O clock frequency */
-unsigned long mn10300_tsc_per_HZ;      /* number of ioclks per jiffy */
-#endif /* CONFIG_MN10300_RTC */
+#include "internal.h"
 
 static unsigned long mn10300_last_tsc; /* time-stamp counter at last time
                                         * interrupt occurred */
 
-static irqreturn_t timer_interrupt(int irq, void *dev_id);
-
-static struct irqaction timer_irq = {
-       .handler        = timer_interrupt,
-       .flags          = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER,
-       .name           = "timer",
-};
-
 static unsigned long sched_clock_multiplier;
 
 /*
@@ -54,9 +43,12 @@ unsigned long long sched_clock(void)
        unsigned long tsc, tmp;
        unsigned product[3]; /* 96-bit intermediate value */
 
+       /* cnt32_to_63() is not safe with preemption */
+       preempt_disable();
+
        /* read the TSC value
         */
-       tsc = 0 - get_cycles(); /* get_cycles() counts down */
+       tsc = get_cycles();
 
        /* expand to 64-bits.
         * - sched_clock() must be called once a minute or better or the
@@ -64,6 +56,8 @@ unsigned long long sched_clock(void)
         */
        tsc64.ll = cnt32_to_63(tsc) & 0x7fffffffffffffffULL;
 
+       preempt_enable();
+
        /* scale the 64-bit TSC value to a nanosecond value via a 96-bit
         * intermediate
         */
@@ -90,6 +84,20 @@ static void __init mn10300_sched_clock_init(void)
                __muldiv64u(NSEC_PER_SEC, 1 << 16, MN10300_TSCCLK);
 }
 
+/**
+ * local_timer_interrupt - Local timer interrupt handler
+ *
+ * Handle local timer interrupts for this CPU.  They may have been propagated
+ * to this CPU from the CPU that actually gets them by way of an IPI.
+ */
+irqreturn_t local_timer_interrupt(void)
+{
+       profile_tick(CPU_PROFILING);
+       update_process_times(user_mode(get_irq_regs()));
+       return IRQ_HANDLED;
+}
+
+#ifndef CONFIG_GENERIC_TIME
 /*
  * advance the kernel's time keeping clocks (xtime and jiffies)
  * - we use Timer 0 & 1 cascaded as a clock to nudge us the next time
@@ -98,27 +106,73 @@ static void __init mn10300_sched_clock_init(void)
 static irqreturn_t timer_interrupt(int irq, void *dev_id)
 {
        unsigned tsc, elapse;
+       irqreturn_t ret;
 
        write_seqlock(&xtime_lock);
 
        while (tsc = get_cycles(),
-              elapse = mn10300_last_tsc - tsc, /* time elapsed since last
+              elapse = tsc - mn10300_last_tsc, /* time elapsed since last
                                                 * tick */
               elapse > MN10300_TSC_PER_HZ
               ) {
-               mn10300_last_tsc -= MN10300_TSC_PER_HZ;
+               mn10300_last_tsc += MN10300_TSC_PER_HZ;
 
                /* advance the kernel's time tracking system */
-               profile_tick(CPU_PROFILING);
                do_timer(1);
        }
 
        write_sequnlock(&xtime_lock);
 
-       update_process_times(user_mode(get_irq_regs()));
+       ret = local_timer_interrupt();
+#ifdef CONFIG_SMP
+       send_IPI_allbutself(LOCAL_TIMER_IPI);
+#endif
+       return ret;
+}
 
-       return IRQ_HANDLED;
+static struct irqaction timer_irq = {
+       .handler        = timer_interrupt,
+       .flags          = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER,
+       .name           = "timer",
+};
+#endif /* CONFIG_GENERIC_TIME */
+
+#ifdef CONFIG_CSRC_MN10300
+void __init clocksource_set_clock(struct clocksource *cs, unsigned int clock)
+{
+       u64 temp;
+       u32 shift;
+
+       /* Find a shift value */
+       for (shift = 32; shift > 0; shift--) {
+               temp = (u64) NSEC_PER_SEC << shift;
+               do_div(temp, clock);
+               if ((temp >> 32) == 0)
+                       break;
+       }
+       cs->shift = shift;
+       cs->mult = (u32) temp;
 }
+#endif
+
+#if CONFIG_CEVT_MN10300
+void __cpuinit clockevent_set_clock(struct clock_event_device *cd,
+                                   unsigned int clock)
+{
+       u64 temp;
+       u32 shift;
+
+       /* Find a shift value */
+       for (shift = 32; shift > 0; shift--) {
+               temp = (u64) clock << shift;
+               do_div(temp, NSEC_PER_SEC);
+               if ((temp >> 32) == 0)
+                       break;
+       }
+       cd->shift = shift;
+       cd->mult = (u32) temp;
+}
+#endif
 
 /*
  * initialise the various timers used by the main part of the kernel
@@ -131,21 +185,25 @@ void __init time_init(void)
         */
        TMPSCNT |= TMPSCNT_ENABLE;
 
+#ifdef CONFIG_GENERIC_TIME
+       init_clocksource();
+#else
        startup_timestamp_counter();
+#endif
 
        printk(KERN_INFO
               "timestamp counter I/O clock running at %lu.%02lu"
               " (calibrated against RTC)\n",
               MN10300_TSCCLK / 1000000, (MN10300_TSCCLK / 10000) % 100);
 
-       mn10300_last_tsc = TMTSCBC;
-
-       /* use timer 0 & 1 cascaded to tick at as close to HZ as possible */
-       setup_irq(TMJCIRQ, &timer_irq);
+       mn10300_last_tsc = read_timestamp_counter();
 
-       set_intr_level(TMJCIRQ, TMJCICR_LEVEL);
-
-       startup_jiffies_counter();
+#ifdef CONFIG_GENERIC_CLOCKEVENTS
+       init_clockevents();
+#else
+       reload_jiffies_counter(MN10300_JC_PER_HZ - 1);
+       setup_jiffies_interrupt(TMJCIRQ, &timer_irq, CONFIG_TIMER_IRQ_LEVEL);
+#endif
 
 #ifdef CONFIG_MN10300_WD_TIMER
        /* start the watchdog timer */
index 91365adba4f5568925d3398545d3b9bcc9ed9f96..b90c3f160c77b0e598fe90578d9362de54e4a1f1 100644 (file)
@@ -45,9 +45,6 @@
 #error "INTERRUPT_VECTOR_BASE not aligned to 16MiB boundary!"
 #endif
 
-struct pt_regs *__frame; /* current frame pointer */
-EXPORT_SYMBOL(__frame);
-
 int kstack_depth_to_print = 24;
 
 spinlock_t die_lock = __SPIN_LOCK_UNLOCKED(die_lock);
@@ -101,7 +98,6 @@ DO_EINFO(SIGILL,  {}, "invalid opcode",              invalid_op,     ILL_ILLOPC);
 DO_EINFO(SIGILL,  {}, "invalid ex opcode",     invalid_exop,   ILL_ILLOPC);
 DO_EINFO(SIGBUS,  {}, "invalid address",       mem_error,      BUS_ADRERR);
 DO_EINFO(SIGBUS,  {}, "bus error",             bus_error,      BUS_ADRERR);
-DO_EINFO(SIGILL,  {}, "FPU invalid opcode",    fpu_invalid_op, ILL_COPROC);
 
 DO_ERROR(SIGTRAP,
 #ifndef CONFIG_MN10300_USING_JTAG
@@ -222,11 +218,14 @@ void show_registers_only(struct pt_regs *regs)
        printk(KERN_EMERG "threadinfo=%p task=%p)\n",
               current_thread_info(), current);
 
-       if ((unsigned long) current >= 0x90000000UL &&
-           (unsigned long) current < 0x94000000UL)
+       if ((unsigned long) current >= PAGE_OFFSET &&
+           (unsigned long) current < (unsigned long)high_memory)
                printk(KERN_EMERG "Process %s (pid: %d)\n",
                       current->comm, current->pid);
 
+#ifdef CONFIG_SMP
+       printk(KERN_EMERG "CPUID:  %08x\n", CPUID);
+#endif
        printk(KERN_EMERG "CPUP:   %04hx\n", CPUP);
        printk(KERN_EMERG "TBR:    %08x\n", TBR);
        printk(KERN_EMERG "DEAR:   %08x\n", DEAR);
@@ -522,8 +521,12 @@ void __init set_intr_stub(enum exception_code code, void *handler)
 {
        unsigned long addr;
        u8 *vector = (u8 *)(CONFIG_INTERRUPT_VECTOR_BASE + code);
+       unsigned long flags;
 
        addr = (unsigned long) handler - (unsigned long) vector;
+
+       flags = arch_local_cli_save();
+
        vector[0] = 0xdc;               /* JMP handler */
        vector[1] = addr;
        vector[2] = addr >> 8;
@@ -533,30 +536,12 @@ void __init set_intr_stub(enum exception_code code, void *handler)
        vector[6] = 0xcb;
        vector[7] = 0xcb;
 
-       mn10300_dcache_flush_inv();
-       mn10300_icache_inv();
-}
-
-/*
- * set an interrupt stub to invoke the JTAG unit and then jump to a handler
- */
-void __init set_jtag_stub(enum exception_code code, void *handler)
-{
-       unsigned long addr;
-       u8 *vector = (u8 *)(CONFIG_INTERRUPT_VECTOR_BASE + code);
-
-       addr = (unsigned long) handler - ((unsigned long) vector + 1);
-       vector[0] = 0xff;               /* PI to jump into JTAG debugger */
-       vector[1] = 0xdc;               /* jmp handler */
-       vector[2] = addr;
-       vector[3] = addr >> 8;
-       vector[4] = addr >> 16;
-       vector[5] = addr >> 24;
-       vector[6] = 0xcb;
-       vector[7] = 0xcb;
+       arch_local_irq_restore(flags);
 
+#ifndef CONFIG_MN10300_CACHE_SNOOP
        mn10300_dcache_flush_inv();
-       flush_icache_range((unsigned long) vector, (unsigned long) vector + 8);
+       mn10300_icache_inv();
+#endif
 }
 
 /*
@@ -581,7 +566,6 @@ void __init trap_init(void)
        set_excp_vector(EXCEP_PRIVINSACC,       insn_acc_error);
        set_excp_vector(EXCEP_PRIVDATACC,       data_acc_error);
        set_excp_vector(EXCEP_DATINSACC,        insn_acc_error);
-       set_excp_vector(EXCEP_FPU_DISABLED,     fpu_disabled);
        set_excp_vector(EXCEP_FPU_UNIMPINS,     fpu_invalid_op);
        set_excp_vector(EXCEP_FPU_OPERATION,    fpu_exception);
 
index 440a7dcbf87b52afb443451b5973d9ad16e48116..a66c6cdaf4424974de52cbc97583b294691e0d12 100644 (file)
@@ -15,7 +15,7 @@
 /*
  * try flipping a bit using BSET and BCLR
  */
-void change_bit(int nr, volatile void *addr)
+void change_bit(unsigned long nr, volatile void *addr)
 {
        if (test_bit(nr, addr))
                goto try_clear_bit;
@@ -34,7 +34,7 @@ try_clear_bit:
 /*
  * try flipping a bit using BSET and BCLR and returning the old value
  */
-int test_and_change_bit(int nr, volatile void *addr)
+int test_and_change_bit(unsigned long nr, volatile void *addr)
 {
        if (test_bit(nr, addr))
                goto try_clear_bit;
index fdf6f710f94ec388f8c230afc8bff6139cc6295b..8e7ceb8ba33ddaeaf7487139fb6d9eab5f9788b4 100644 (file)
@@ -38,14 +38,14 @@ EXPORT_SYMBOL(__delay);
  */
 void __udelay(unsigned long usecs)
 {
-       signed long ioclk, stop;
+       unsigned long start, stop, cnt;
 
        /* usecs * CLK / 1E6 */
        stop = __muldiv64u(usecs, MN10300_TSCCLK, 1000000);
-       stop = TMTSCBC - stop;
+       start = TMTSCBC;
 
        do {
-               ioclk = TMTSCBC;
-       } while (stop < ioclk);
+               cnt = start - TMTSCBC;
+       } while (cnt < stop);
 }
 EXPORT_SYMBOL(__udelay);
index e138994e1667dc9e45f4a1539b33ebe9ea70874a..1d27bba0cd8fced51a19fe2b77c539f95eaa2c76 100644 (file)
  */
 #include <asm/cache.h>
 
-        .section .text
-        .balign        L1_CACHE_BYTES
+       .section .text
+       .balign L1_CACHE_BYTES
 
 ###############################################################################
 #
-# unsigned int do_csum(const unsigned char *buff, size_t len)
+# unsigned int do_csum(const unsigned char *buff, int len)
 #
 ###############################################################################
        .globl  do_csum
-        .type  do_csum,@function
+       .type   do_csum,@function
 do_csum:
        movm    [d2,d3],(sp)
-       mov     d0,(12,sp)
-       mov     d1,(16,sp)
        mov     d1,d2                           # count
        mov     d0,a0                           # buff
+       mov     a0,a1
        clr     d1                              # accumulator
 
        cmp     +0,d2
-       beq     do_csum_done                    # return if zero-length buffer
+       ble     do_csum_done                    # check for zero length or negative
 
        # 4-byte align the buffer pointer
        btst    +3,a0
@@ -41,17 +40,15 @@ do_csum:
        inc     a0
        asl     +8,d0
        add     d0,d1
-       addc    +0,d1
        add     -1,d2
-do_csum_addr_not_odd:
 
+do_csum_addr_not_odd:
        cmp     +2,d2
        bcs     do_csum_fewer_than_4
        btst    +2,a0
        beq     do_csum_now_4b_aligned
        movhu   (a0+),d0
        add     d0,d1
-       addc    +0,d1
        add     -2,d2
        cmp     +4,d2
        bcs     do_csum_fewer_than_4
@@ -66,20 +63,20 @@ do_csum_now_4b_aligned:
 
 do_csum_loop:
        mov     (a0+),d0
-       add     d0,d1
        mov     (a0+),e0
-       addc    e0,d1
        mov     (a0+),e1
-       addc    e1,d1
        mov     (a0+),e3
+       add     d0,d1
+       addc    e0,d1
+       addc    e1,d1
        addc    e3,d1
        mov     (a0+),d0
-       addc    d0,d1
        mov     (a0+),e0
-       addc    e0,d1
        mov     (a0+),e1
-       addc    e1,d1
        mov     (a0+),e3
+       addc    d0,d1
+       addc    e0,d1
+       addc    e1,d1
        addc    e3,d1
        addc    +0,d1
 
@@ -94,12 +91,12 @@ do_csum_remainder:
        cmp     +16,d2
        bcs     do_csum_fewer_than_16
        mov     (a0+),d0
-       add     d0,d1
        mov     (a0+),e0
-       addc    e0,d1
        mov     (a0+),e1
-       addc    e1,d1
        mov     (a0+),e3
+       add     d0,d1
+       addc    e0,d1
+       addc    e1,d1
        addc    e3,d1
        addc    +0,d1
        add     -16,d2
@@ -131,9 +128,9 @@ do_csum_fewer_than_4:
        xor_cmp d0,d0,+2,d2
        bcs     do_csum_fewer_than_2
        movhu   (a0+),d0
-do_csum_fewer_than_2:
        and     +1,d2
        beq     do_csum_add_last_bit
+do_csum_fewer_than_2:
        movbu   (a0),d3
        add     d3,d0
 do_csum_add_last_bit:
@@ -142,21 +139,19 @@ do_csum_add_last_bit:
 
 do_csum_done:
        # compress the checksum down to 16 bits
-       mov     +0xffff0000,d2
-       and     d1,d2
+       mov     +0xffff0000,d0
+       and     d1,d0
        asl     +16,d1
-       add     d2,d1,d0
+       add     d1,d0
        addc    +0xffff,d0
        lsr     +16,d0
 
        # flip the halves of the word result if the buffer was oddly aligned
-       mov     (12,sp),d1
-       and     +1,d1
+       and     +1,a1
        beq     do_csum_not_oddly_aligned
        swaph   d0,d0                           # exchange bits 15:8 with 7:0
 
 do_csum_not_oddly_aligned:
        ret     [d2,d3],8
 
-do_csum_end:
-       .size   do_csum, do_csum_end-do_csum
+       .size   do_csum, .-do_csum
diff --git a/arch/mn10300/mm/Kconfig.cache b/arch/mn10300/mm/Kconfig.cache
new file mode 100644 (file)
index 0000000..c4fd923
--- /dev/null
@@ -0,0 +1,101 @@
+#
+# MN10300 CPU cache options
+#
+
+choice
+       prompt "CPU Caching mode"
+       default MN10300_CACHE_WBACK
+       help
+         This option determines the caching mode for the kernel.
+
+         Write-Back caching mode involves the all reads and writes causing
+         the affected cacheline to be read into the cache first before being
+         operated upon. Memory is not then updated by a write until the cache
+         is filled and a cacheline needs to be displaced from the cache to
+         make room. Only at that point is it written back.
+
+         Write-Through caching only fetches cachelines from memory on a
+         read. Writes always get written directly to memory. If the affected
+         cacheline is also in cache, it will be updated too.
+
+         The final option is to turn of caching entirely.
+
+config MN10300_CACHE_WBACK
+       bool "Write-Back"
+       help
+         The dcache operates in delayed write-back mode.  It must be manually
+         flushed if writes are made that subsequently need to be executed or
+         to be DMA'd by a device.
+
+config MN10300_CACHE_WTHRU
+       bool "Write-Through"
+       help
+         The dcache operates in immediate write-through mode.  Writes are
+         committed to RAM immediately in addition to being stored in the
+         cache.  This means that the written data is immediately available for
+         execution or DMA.
+
+         This is not available for use with an SMP kernel if cache flushing
+         and invalidation by automatic purge register is not selected.
+
+config MN10300_CACHE_DISABLED
+       bool "Disabled"
+       help
+         The icache and dcache are disabled.
+
+endchoice
+
+config MN10300_CACHE_ENABLED
+       def_bool y if !MN10300_CACHE_DISABLED
+
+
+choice
+       prompt "CPU cache flush/invalidate method"
+       default MN10300_CACHE_MANAGE_BY_TAG if !AM34_2
+       default MN10300_CACHE_MANAGE_BY_REG if AM34_2
+       depends on MN10300_CACHE_ENABLED
+       help
+         This determines the method by which CPU cache flushing and
+         invalidation is performed.
+
+config MN10300_CACHE_MANAGE_BY_TAG
+       bool "Use the cache tag registers directly"
+       depends on !(SMP && MN10300_CACHE_WTHRU)
+
+config MN10300_CACHE_MANAGE_BY_REG
+       bool "Flush areas by way of automatic purge registers (AM34 only)"
+       depends on AM34_2
+
+endchoice
+
+config MN10300_CACHE_INV_BY_TAG
+       def_bool y if MN10300_CACHE_MANAGE_BY_TAG && MN10300_CACHE_ENABLED
+
+config MN10300_CACHE_INV_BY_REG
+       def_bool y if MN10300_CACHE_MANAGE_BY_REG && MN10300_CACHE_ENABLED
+
+config MN10300_CACHE_FLUSH_BY_TAG
+       def_bool y if MN10300_CACHE_MANAGE_BY_TAG && MN10300_CACHE_WBACK
+
+config MN10300_CACHE_FLUSH_BY_REG
+       def_bool y if MN10300_CACHE_MANAGE_BY_REG && MN10300_CACHE_WBACK
+
+
+config MN10300_HAS_CACHE_SNOOP
+       def_bool n
+
+config MN10300_CACHE_SNOOP
+       bool "Use CPU Cache Snooping"
+       depends on MN10300_CACHE_ENABLED && MN10300_HAS_CACHE_SNOOP
+       default y
+
+config MN10300_CACHE_FLUSH_ICACHE
+       def_bool y if MN10300_CACHE_WBACK && !MN10300_CACHE_SNOOP
+       help
+         Set if we need the dcache flushing before the icache is invalidated.
+
+config MN10300_CACHE_INV_ICACHE
+       def_bool y if MN10300_CACHE_WTHRU && !MN10300_CACHE_SNOOP
+       help
+         Set if we need the icache to be invalidated, even if the dcache is in
+         write-through mode and doesn't need flushing.
index 1557277fbc5c03962c56f39b7d1a5687bdea80bd..203fee23f7d70efebe0b6c11eb1bfdb6dc3cae6c 100644 (file)
@@ -2,11 +2,21 @@
 # Makefile for the MN10300-specific memory management code
 #
 
-cacheflush-y   := cache.o cache-mn10300.o
-cacheflush-$(CONFIG_MN10300_CACHE_WBACK) += cache-flush-mn10300.o
+cache-smp-wback-$(CONFIG_MN10300_CACHE_WBACK) := cache-smp-flush.o
+
+cacheflush-y   := cache.o
+cacheflush-$(CONFIG_SMP) += cache-smp.o cache-smp-inv.o $(cache-smp-wback-y)
+cacheflush-$(CONFIG_MN10300_CACHE_INV_ICACHE) += cache-inv-icache.o
+cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_ICACHE) += cache-flush-icache.o
+cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_TAG) += cache-inv-by-tag.o
+cacheflush-$(CONFIG_MN10300_CACHE_INV_BY_REG) += cache-inv-by-reg.o
+cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_TAG) += cache-flush-by-tag.o
+cacheflush-$(CONFIG_MN10300_CACHE_FLUSH_BY_REG) += cache-flush-by-reg.o
 
 cacheflush-$(CONFIG_MN10300_CACHE_DISABLED) := cache-disabled.o
 
 obj-y := \
        init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
        misalignment.o dma-alloc.o $(cacheflush-y)
+
+obj-$(CONFIG_SMP) += tlb-smp.o
diff --git a/arch/mn10300/mm/cache-flush-by-reg.S b/arch/mn10300/mm/cache-flush-by-reg.S
new file mode 100644 (file)
index 0000000..1dcae02
--- /dev/null
@@ -0,0 +1,308 @@
+/* MN10300 CPU core caching routines, using indirect regs on cache controller
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+
+       .am33_2
+
+#ifndef CONFIG_SMP
+       .globl mn10300_dcache_flush
+       .globl mn10300_dcache_flush_page
+       .globl mn10300_dcache_flush_range
+       .globl mn10300_dcache_flush_range2
+       .globl mn10300_dcache_flush_inv
+       .globl mn10300_dcache_flush_inv_page
+       .globl mn10300_dcache_flush_inv_range
+       .globl mn10300_dcache_flush_inv_range2
+
+mn10300_dcache_flush           = mn10300_local_dcache_flush
+mn10300_dcache_flush_page      = mn10300_local_dcache_flush_page
+mn10300_dcache_flush_range     = mn10300_local_dcache_flush_range
+mn10300_dcache_flush_range2    = mn10300_local_dcache_flush_range2
+mn10300_dcache_flush_inv       = mn10300_local_dcache_flush_inv
+mn10300_dcache_flush_inv_page  = mn10300_local_dcache_flush_inv_page
+mn10300_dcache_flush_inv_range = mn10300_local_dcache_flush_inv_range
+mn10300_dcache_flush_inv_range2        = mn10300_local_dcache_flush_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush(void)
+# Flush the entire data cache back to RAM
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_dcache_flush
+        .type  mn10300_local_dcache_flush,@function
+mn10300_local_dcache_flush:
+       movhu   (CHCTR),d0
+       btst    CHCTR_DCEN,d0
+       beq     mn10300_local_dcache_flush_end
+
+       mov     DCPGCR,a0
+
+       LOCAL_CLI_SAVE(d1)
+
+       # wait for busy bit of area purge
+       setlb
+       mov     (a0),d0
+       btst    DCPGCR_DCPGBSY,d0
+       lne
+
+       # set mask
+       clr     d0
+       mov     d0,(DCPGMR)
+
+       # area purge
+       #
+       # DCPGCR = DCPGCR_DCP
+       #
+       mov     DCPGCR_DCP,d0
+       mov     d0,(a0)
+
+       # wait for busy bit of area purge
+       setlb
+       mov     (a0),d0
+       btst    DCPGCR_DCPGBSY,d0
+       lne
+
+       LOCAL_IRQ_RESTORE(d1)
+
+mn10300_local_dcache_flush_end:
+       ret     [],0
+       .size   mn10300_local_dcache_flush,.-mn10300_local_dcache_flush
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_page(unsigned long start)
+# void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size)
+# Flush a range of addresses on a page in the dcache
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_dcache_flush_page
+       .globl  mn10300_local_dcache_flush_range
+       .globl  mn10300_local_dcache_flush_range2
+       .type   mn10300_local_dcache_flush_page,@function
+       .type   mn10300_local_dcache_flush_range,@function
+       .type   mn10300_local_dcache_flush_range2,@function
+mn10300_local_dcache_flush_page:
+       and     ~(PAGE_SIZE-1),d0
+       mov     PAGE_SIZE,d1
+mn10300_local_dcache_flush_range2:
+       add     d0,d1
+mn10300_local_dcache_flush_range:
+       movm    [d2,d3,a2],(sp)
+
+       movhu   (CHCTR),d2
+       btst    CHCTR_DCEN,d2
+       beq     mn10300_local_dcache_flush_range_end
+
+       # calculate alignsize
+       #
+       # alignsize = L1_CACHE_BYTES;
+       # for (i = (end - start - 1) / L1_CACHE_BYTES ;  i > 0; i >>= 1)
+       #     alignsize <<= 1;
+       # d2 = alignsize;
+       #
+       mov     L1_CACHE_BYTES,d2
+       sub     d0,d1,d3
+       add     -1,d3
+       lsr     L1_CACHE_SHIFT,d3
+       beq     2f
+1:
+       add     d2,d2
+       lsr     1,d3
+       bne     1b
+2:
+       mov     d1,a1           # a1 = end
+
+       LOCAL_CLI_SAVE(d3)
+       mov     DCPGCR,a0
+
+       # wait for busy bit of area purge
+       setlb
+       mov     (a0),d1
+       btst    DCPGCR_DCPGBSY,d1
+       lne
+
+       # determine the mask
+       mov     d2,d1
+       add     -1,d1
+       not     d1              # d1 = mask = ~(alignsize-1)
+       mov     d1,(DCPGMR)
+
+       and     d1,d0,a2        # a2 = mask & start
+
+dcpgloop:
+       # area purge
+       mov     a2,d0
+       or      DCPGCR_DCP,d0
+       mov     d0,(a0)         # DCPGCR = (mask & start) | DCPGCR_DCP
+
+       # wait for busy bit of area purge
+       setlb
+       mov     (a0),d1
+       btst    DCPGCR_DCPGBSY,d1
+       lne
+
+       # check purge of end address
+       add     d2,a2           # a2 += alignsize
+       cmp     a1,a2           # if (a2 < end) goto dcpgloop
+       bns     dcpgloop
+
+       LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_dcache_flush_range_end:
+       ret     [d2,d3,a2],12
+
+       .size   mn10300_local_dcache_flush_page,.-mn10300_local_dcache_flush_page
+       .size   mn10300_local_dcache_flush_range,.-mn10300_local_dcache_flush_range
+       .size   mn10300_local_dcache_flush_range2,.-mn10300_local_dcache_flush_range2
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv(void)
+# Flush the entire data cache and invalidate all entries
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_dcache_flush_inv
+       .type   mn10300_local_dcache_flush_inv,@function
+mn10300_local_dcache_flush_inv:
+       movhu   (CHCTR),d0
+       btst    CHCTR_DCEN,d0
+       beq     mn10300_local_dcache_flush_inv_end
+
+       mov     DCPGCR,a0
+
+       LOCAL_CLI_SAVE(d1)
+
+       # wait for busy bit of area purge & invalidate
+       setlb
+       mov     (a0),d0
+       btst    DCPGCR_DCPGBSY,d0
+       lne
+
+       # set the mask to cover everything
+       clr     d0
+       mov     d0,(DCPGMR)
+
+       # area purge & invalidate
+       mov     DCPGCR_DCP|DCPGCR_DCI,d0
+       mov     d0,(a0)
+
+       # wait for busy bit of area purge & invalidate
+       setlb
+       mov     (a0),d0
+       btst    DCPGCR_DCPGBSY,d0
+       lne
+
+       LOCAL_IRQ_RESTORE(d1)
+
+mn10300_local_dcache_flush_inv_end:
+       ret     [],0
+       .size   mn10300_local_dcache_flush_inv,.-mn10300_local_dcache_flush_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv_page(unsigned long start)
+# void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size)
+# Flush and invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_dcache_flush_inv_page
+       .globl  mn10300_local_dcache_flush_inv_range
+       .globl  mn10300_local_dcache_flush_inv_range2
+       .type   mn10300_local_dcache_flush_inv_page,@function
+       .type   mn10300_local_dcache_flush_inv_range,@function
+       .type   mn10300_local_dcache_flush_inv_range2,@function
+mn10300_local_dcache_flush_inv_page:
+       and     ~(PAGE_SIZE-1),d0
+       mov     PAGE_SIZE,d1
+mn10300_local_dcache_flush_inv_range2:
+       add     d0,d1
+mn10300_local_dcache_flush_inv_range:
+       movm    [d2,d3,a2],(sp)
+
+       movhu   (CHCTR),d2
+       btst    CHCTR_DCEN,d2
+       beq     mn10300_local_dcache_flush_inv_range_end
+
+       # calculate alignsize
+       #
+       # alignsize = L1_CACHE_BYTES;
+       # for (i = (end - start - 1) / L1_CACHE_BYTES; i > 0; i >>= 1)
+       #     alignsize <<= 1;
+       # d2 = alignsize
+       #
+       mov     L1_CACHE_BYTES,d2
+       sub     d0,d1,d3
+       add     -1,d3
+       lsr     L1_CACHE_SHIFT,d3
+       beq     2f
+1:
+       add     d2,d2
+       lsr     1,d3
+       bne     1b
+2:
+       mov     d1,a1           # a1 = end
+
+       LOCAL_CLI_SAVE(d3)
+       mov     DCPGCR,a0
+
+       # wait for busy bit of area purge & invalidate
+       setlb
+       mov     (a0),d1
+       btst    DCPGCR_DCPGBSY,d1
+       lne
+
+       # set the mask
+       mov     d2,d1
+       add     -1,d1
+       not     d1              # d1 = mask = ~(alignsize-1)
+       mov     d1,(DCPGMR)
+
+       and     d1,d0,a2        # a2 = mask & start
+
+dcpgivloop:
+       # area purge & invalidate
+       mov     a2,d0
+       or      DCPGCR_DCP|DCPGCR_DCI,d0
+       mov     d0,(a0)         # DCPGCR = (mask & start)|DCPGCR_DCP|DCPGCR_DCI
+
+       # wait for busy bit of area purge & invalidate
+       setlb
+       mov     (a0),d1
+       btst    DCPGCR_DCPGBSY,d1
+       lne
+
+       # check purge & invalidate of end address
+       add     d2,a2           # a2 += alignsize
+       cmp     a1,a2           # if (a2 < end) goto dcpgivloop
+       bns     dcpgivloop
+
+       LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_dcache_flush_inv_range_end:
+       ret     [d2,d3,a2],12
+       .size   mn10300_local_dcache_flush_inv_page,.-mn10300_local_dcache_flush_inv_page
+       .size   mn10300_local_dcache_flush_inv_range,.-mn10300_local_dcache_flush_inv_range
+       .size   mn10300_local_dcache_flush_inv_range2,.-mn10300_local_dcache_flush_inv_range2
diff --git a/arch/mn10300/mm/cache-flush-by-tag.S b/arch/mn10300/mm/cache-flush-by-tag.S
new file mode 100644 (file)
index 0000000..5cd6a27
--- /dev/null
@@ -0,0 +1,251 @@
+/* MN10300 CPU core caching routines, using direct tag flushing
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+
+       .am33_2
+
+#ifndef CONFIG_SMP
+       .globl mn10300_dcache_flush
+       .globl mn10300_dcache_flush_page
+       .globl mn10300_dcache_flush_range
+       .globl mn10300_dcache_flush_range2
+       .globl mn10300_dcache_flush_inv
+       .globl mn10300_dcache_flush_inv_page
+       .globl mn10300_dcache_flush_inv_range
+       .globl mn10300_dcache_flush_inv_range2
+
+mn10300_dcache_flush           = mn10300_local_dcache_flush
+mn10300_dcache_flush_page      = mn10300_local_dcache_flush_page
+mn10300_dcache_flush_range     = mn10300_local_dcache_flush_range
+mn10300_dcache_flush_range2    = mn10300_local_dcache_flush_range2
+mn10300_dcache_flush_inv       = mn10300_local_dcache_flush_inv
+mn10300_dcache_flush_inv_page  = mn10300_local_dcache_flush_inv_page
+mn10300_dcache_flush_inv_range = mn10300_local_dcache_flush_inv_range
+mn10300_dcache_flush_inv_range2        = mn10300_local_dcache_flush_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush(void)
+# Flush the entire data cache back to RAM
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_dcache_flush
+        .type  mn10300_local_dcache_flush,@function
+mn10300_local_dcache_flush:
+       movhu   (CHCTR),d0
+       btst    CHCTR_DCEN,d0
+       beq     mn10300_local_dcache_flush_end
+
+       # read the addresses tagged in the cache's tag RAM and attempt to flush
+       # those addresses specifically
+       # - we rely on the hardware to filter out invalid tag entry addresses
+       mov     DCACHE_TAG(0,0),a0              # dcache tag RAM access address
+       mov     DCACHE_PURGE(0,0),a1            # dcache purge request address
+       mov     L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1  # total number of entries
+
+mn10300_local_dcache_flush_loop:
+       mov     (a0),d0
+       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
+       or      L1_CACHE_TAG_VALID,d0           # retain valid entries in the
+                                               # cache
+       mov     d0,(a1)                         # conditional purge
+
+       add     L1_CACHE_BYTES,a0
+       add     L1_CACHE_BYTES,a1
+       add     -1,d1
+       bne     mn10300_local_dcache_flush_loop
+
+mn10300_local_dcache_flush_end:
+       ret     [],0
+       .size   mn10300_local_dcache_flush,.-mn10300_local_dcache_flush
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_page(unsigned long start)
+# void mn10300_local_dcache_flush_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_range2(unsigned long start, unsigned long size)
+# Flush a range of addresses on a page in the dcache
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_dcache_flush_page
+       .globl  mn10300_local_dcache_flush_range
+       .globl  mn10300_local_dcache_flush_range2
+       .type   mn10300_local_dcache_flush_page,@function
+       .type   mn10300_local_dcache_flush_range,@function
+       .type   mn10300_local_dcache_flush_range2,@function
+mn10300_local_dcache_flush_page:
+       and     ~(PAGE_SIZE-1),d0
+       mov     PAGE_SIZE,d1
+mn10300_local_dcache_flush_range2:
+       add     d0,d1
+mn10300_local_dcache_flush_range:
+       movm    [d2],(sp)
+
+       movhu   (CHCTR),d2
+       btst    CHCTR_DCEN,d2
+       beq     mn10300_local_dcache_flush_range_end
+
+       sub     d0,d1,a0
+       cmp     MN10300_DCACHE_FLUSH_BORDER,a0
+       ble     1f
+
+       movm    (sp),[d2]
+       bra     mn10300_local_dcache_flush
+1:
+
+       # round start addr down
+       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
+       mov     d0,a1
+
+       add     L1_CACHE_BYTES,d1                       # round end addr up
+       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+
+       # write a request to flush all instances of an address from the cache
+       mov     DCACHE_PURGE(0,0),a0
+       mov     a1,d0
+       and     L1_CACHE_TAG_ENTRY,d0
+       add     d0,a0                           # starting dcache purge control
+                                               # reg address
+
+       sub     a1,d1
+       lsr     L1_CACHE_SHIFT,d1               # total number of entries to
+                                               # examine
+
+       or      L1_CACHE_TAG_VALID,a1           # retain valid entries in the
+                                               # cache
+
+mn10300_local_dcache_flush_range_loop:
+       mov     a1,(L1_CACHE_WAYDISP*0,a0)      # conditionally purge this line
+                                               # all ways
+
+       add     L1_CACHE_BYTES,a0
+       add     L1_CACHE_BYTES,a1
+       and     ~L1_CACHE_WAYDISP,a0            # make sure way stay on way 0
+       add     -1,d1
+       bne     mn10300_local_dcache_flush_range_loop
+
+mn10300_local_dcache_flush_range_end:
+       ret     [d2],4
+
+       .size   mn10300_local_dcache_flush_page,.-mn10300_local_dcache_flush_page
+       .size   mn10300_local_dcache_flush_range,.-mn10300_local_dcache_flush_range
+       .size   mn10300_local_dcache_flush_range2,.-mn10300_local_dcache_flush_range2
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv(void)
+# Flush the entire data cache and invalidate all entries
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_dcache_flush_inv
+       .type   mn10300_local_dcache_flush_inv,@function
+mn10300_local_dcache_flush_inv:
+       movhu   (CHCTR),d0
+       btst    CHCTR_DCEN,d0
+       beq     mn10300_local_dcache_flush_inv_end
+
+       mov     L1_CACHE_NENTRIES,d1
+       clr     a1
+
+mn10300_local_dcache_flush_inv_loop:
+       mov     (DCACHE_PURGE_WAY0(0),a1),d0    # unconditional purge
+       mov     (DCACHE_PURGE_WAY1(0),a1),d0    # unconditional purge
+       mov     (DCACHE_PURGE_WAY2(0),a1),d0    # unconditional purge
+       mov     (DCACHE_PURGE_WAY3(0),a1),d0    # unconditional purge
+
+       add     L1_CACHE_BYTES,a1
+       add     -1,d1
+       bne     mn10300_local_dcache_flush_inv_loop
+
+mn10300_local_dcache_flush_inv_end:
+       ret     [],0
+       .size   mn10300_local_dcache_flush_inv,.-mn10300_local_dcache_flush_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_flush_inv_page(unsigned long start)
+# void mn10300_local_dcache_flush_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_flush_inv_range2(unsigned long start, unsigned long size)
+# Flush and invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_dcache_flush_inv_page
+       .globl  mn10300_local_dcache_flush_inv_range
+       .globl  mn10300_local_dcache_flush_inv_range2
+       .type   mn10300_local_dcache_flush_inv_page,@function
+       .type   mn10300_local_dcache_flush_inv_range,@function
+       .type   mn10300_local_dcache_flush_inv_range2,@function
+mn10300_local_dcache_flush_inv_page:
+       and     ~(PAGE_SIZE-1),d0
+       mov     PAGE_SIZE,d1
+mn10300_local_dcache_flush_inv_range2:
+       add     d0,d1
+mn10300_local_dcache_flush_inv_range:
+       movm    [d2],(sp)
+
+       movhu   (CHCTR),d2
+       btst    CHCTR_DCEN,d2
+       beq     mn10300_local_dcache_flush_inv_range_end
+
+       sub     d0,d1,a0
+       cmp     MN10300_DCACHE_FLUSH_INV_BORDER,a0
+       ble     1f
+
+       movm    (sp),[d2]
+       bra     mn10300_local_dcache_flush_inv
+1:
+
+       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0      # round start
+                                                               # addr down
+       mov     d0,a1
+
+       add     L1_CACHE_BYTES,d1                       # round end addr up
+       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+
+       # write a request to flush and invalidate all instances of an address
+       # from the cache
+       mov     DCACHE_PURGE(0,0),a0
+       mov     a1,d0
+       and     L1_CACHE_TAG_ENTRY,d0
+       add     d0,a0                           # starting dcache purge control
+                                               # reg address
+
+       sub     a1,d1
+       lsr     L1_CACHE_SHIFT,d1               # total number of entries to
+                                               # examine
+
+mn10300_local_dcache_flush_inv_range_loop:
+       mov     a1,(L1_CACHE_WAYDISP*0,a0)      # conditionally purge this line
+                                               # in all ways
+
+       add     L1_CACHE_BYTES,a0
+       add     L1_CACHE_BYTES,a1
+       and     ~L1_CACHE_WAYDISP,a0            # make sure way stay on way 0
+       add     -1,d1
+       bne     mn10300_local_dcache_flush_inv_range_loop
+
+mn10300_local_dcache_flush_inv_range_end:
+       ret     [d2],4
+       .size   mn10300_local_dcache_flush_inv_page,.-mn10300_local_dcache_flush_inv_page
+       .size   mn10300_local_dcache_flush_inv_range,.-mn10300_local_dcache_flush_inv_range
+       .size   mn10300_local_dcache_flush_inv_range2,.-mn10300_local_dcache_flush_inv_range2
diff --git a/arch/mn10300/mm/cache-flush-icache.c b/arch/mn10300/mm/cache-flush-icache.c
new file mode 100644 (file)
index 0000000..fdb1a9d
--- /dev/null
@@ -0,0 +1,155 @@
+/* Flush dcache and invalidate icache when the dcache is in writeback mode
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
+
+/**
+ * flush_icache_page - Flush a page from the dcache and invalidate the icache
+ * @vma: The VMA the page is part of.
+ * @page: The page to be flushed.
+ *
+ * Write a page back from the dcache and invalidate the icache so that we can
+ * run code from it that we've just written into it
+ */
+void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+       unsigned long start = page_to_phys(page);
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+
+       mn10300_local_dcache_flush_page(start);
+       mn10300_local_icache_inv_page(start);
+
+       smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, start + PAGE_SIZE);
+       smp_unlock_cache(flags);
+}
+EXPORT_SYMBOL(flush_icache_page);
+
+/**
+ * flush_icache_page_range - Flush dcache and invalidate icache for part of a
+ *                             single page
+ * @start: The starting virtual address of the page part.
+ * @end: The ending virtual address of the page part.
+ *
+ * Flush the dcache and invalidate the icache for part of a single page, as
+ * determined by the virtual addresses given.  The page must be in the paged
+ * area.
+ */
+static void flush_icache_page_range(unsigned long start, unsigned long end)
+{
+       unsigned long addr, size, off;
+       struct page *page;
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *ppte, pte;
+
+       /* work out how much of the page to flush */
+       off = start & ~PAGE_MASK;
+       size = end - start;
+
+       /* get the physical address the page is mapped to from the page
+        * tables */
+       pgd = pgd_offset(current->mm, start);
+       if (!pgd || !pgd_val(*pgd))
+               return;
+
+       pud = pud_offset(pgd, start);
+       if (!pud || !pud_val(*pud))
+               return;
+
+       pmd = pmd_offset(pud, start);
+       if (!pmd || !pmd_val(*pmd))
+               return;
+
+       ppte = pte_offset_map(pmd, start);
+       if (!ppte)
+               return;
+       pte = *ppte;
+       pte_unmap(ppte);
+
+       if (pte_none(pte))
+               return;
+
+       page = pte_page(pte);
+       if (!page)
+               return;
+
+       addr = page_to_phys(page);
+
+       /* flush the dcache and invalidate the icache coverage on that
+        * region */
+       mn10300_local_dcache_flush_range2(addr + off, size);
+       mn10300_local_icache_inv_range2(addr + off, size);
+       smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start, end);
+}
+
+/**
+ * flush_icache_range - Globally flush dcache and invalidate icache for region
+ * @start: The starting virtual address of the region.
+ * @end: The ending virtual address of the region.
+ *
+ * This is used by the kernel to globally flush some code it has just written
+ * from the dcache back to RAM and then to globally invalidate the icache over
+ * that region so that that code can be run on all CPUs in the system.
+ */
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+       unsigned long start_page, end_page;
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+
+       if (end > 0x80000000UL) {
+               /* addresses above 0xa0000000 do not go through the cache */
+               if (end > 0xa0000000UL) {
+                       end = 0xa0000000UL;
+                       if (start >= end)
+                               goto done;
+               }
+
+               /* kernel addresses between 0x80000000 and 0x9fffffff do not
+                * require page tables, so we just map such addresses
+                * directly */
+               start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
+               mn10300_local_dcache_flush_range(start_page, end);
+               mn10300_local_icache_inv_range(start_page, end);
+               smp_cache_call(SMP_IDCACHE_INV_FLUSH_RANGE, start_page, end);
+               if (start_page == start)
+                       goto done;
+               end = start_page;
+       }
+
+       start_page = start & PAGE_MASK;
+       end_page = (end - 1) & PAGE_MASK;
+
+       if (start_page == end_page) {
+               /* the first and last bytes are on the same page */
+               flush_icache_page_range(start, end);
+       } else if (start_page + 1 == end_page) {
+               /* split over two virtually contiguous pages */
+               flush_icache_page_range(start, end_page);
+               flush_icache_page_range(end_page, end);
+       } else {
+               /* more than 2 pages; just flush the entire cache */
+               mn10300_dcache_flush();
+               mn10300_icache_inv();
+               smp_cache_call(SMP_IDCACHE_INV_FLUSH, 0, 0);
+       }
+
+done:
+       smp_unlock_cache(flags);
+}
+EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/mn10300/mm/cache-flush-mn10300.S b/arch/mn10300/mm/cache-flush-mn10300.S
deleted file mode 100644 (file)
index c8ed1cb..0000000
+++ /dev/null
@@ -1,192 +0,0 @@
-/* MN10300 CPU core caching routines
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-
-#include <linux/sys.h>
-#include <linux/linkage.h>
-#include <asm/smp.h>
-#include <asm/page.h>
-#include <asm/cache.h>
-
-       .am33_2
-       .globl mn10300_dcache_flush
-       .globl mn10300_dcache_flush_page
-       .globl mn10300_dcache_flush_range
-       .globl mn10300_dcache_flush_range2
-       .globl mn10300_dcache_flush_inv
-       .globl mn10300_dcache_flush_inv_page
-       .globl mn10300_dcache_flush_inv_range
-       .globl mn10300_dcache_flush_inv_range2
-
-###############################################################################
-#
-# void mn10300_dcache_flush(void)
-# Flush the entire data cache back to RAM
-#
-###############################################################################
-       ALIGN
-mn10300_dcache_flush:
-       movhu   (CHCTR),d0
-       btst    CHCTR_DCEN,d0
-       beq     mn10300_dcache_flush_end
-
-       # read the addresses tagged in the cache's tag RAM and attempt to flush
-       # those addresses specifically
-       # - we rely on the hardware to filter out invalid tag entry addresses
-       mov     DCACHE_TAG(0,0),a0              # dcache tag RAM access address
-       mov     DCACHE_PURGE(0,0),a1            # dcache purge request address
-       mov     L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1  # total number of entries
-
-mn10300_dcache_flush_loop:
-       mov     (a0),d0
-       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
-       or      L1_CACHE_TAG_VALID,d0           # retain valid entries in the
-                                               # cache
-       mov     d0,(a1)                         # conditional purge
-
-mn10300_dcache_flush_skip:
-       add     L1_CACHE_BYTES,a0
-       add     L1_CACHE_BYTES,a1
-       add     -1,d1
-       bne     mn10300_dcache_flush_loop
-
-mn10300_dcache_flush_end:
-       ret     [],0
-
-###############################################################################
-#
-# void mn10300_dcache_flush_page(unsigned start)
-# void mn10300_dcache_flush_range(unsigned start, unsigned end)
-# void mn10300_dcache_flush_range2(unsigned start, unsigned size)
-# Flush a range of addresses on a page in the dcache
-#
-###############################################################################
-       ALIGN
-mn10300_dcache_flush_page:
-       mov     PAGE_SIZE,d1
-mn10300_dcache_flush_range2:
-       add     d0,d1
-mn10300_dcache_flush_range:
-       movm    [d2,d3],(sp)
-
-       movhu   (CHCTR),d2
-       btst    CHCTR_DCEN,d2
-       beq     mn10300_dcache_flush_range_end
-
-       # round start addr down
-       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
-       mov     d0,a1
-
-       add     L1_CACHE_BYTES,d1                       # round end addr up
-       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
-
-       # write a request to flush all instances of an address from the cache
-       mov     DCACHE_PURGE(0,0),a0
-       mov     a1,d0
-       and     L1_CACHE_TAG_ENTRY,d0
-       add     d0,a0                           # starting dcache purge control
-                                               # reg address
-
-       sub     a1,d1
-       lsr     L1_CACHE_SHIFT,d1               # total number of entries to
-                                               # examine
-
-       or      L1_CACHE_TAG_VALID,a1           # retain valid entries in the
-                                               # cache
-
-mn10300_dcache_flush_range_loop:
-       mov     a1,(L1_CACHE_WAYDISP*0,a0)      # conditionally purge this line
-                                               # all ways
-
-       add     L1_CACHE_BYTES,a0
-       add     L1_CACHE_BYTES,a1
-       and     ~L1_CACHE_WAYDISP,a0            # make sure way stay on way 0
-       add     -1,d1
-       bne     mn10300_dcache_flush_range_loop
-
-mn10300_dcache_flush_range_end:
-       ret     [d2,d3],8
-
-###############################################################################
-#
-# void mn10300_dcache_flush_inv(void)
-# Flush the entire data cache and invalidate all entries
-#
-###############################################################################
-       ALIGN
-mn10300_dcache_flush_inv:
-       movhu   (CHCTR),d0
-       btst    CHCTR_DCEN,d0
-       beq     mn10300_dcache_flush_inv_end
-
-       # hit each line in the dcache with an unconditional purge
-       mov     DCACHE_PURGE(0,0),a1            # dcache purge request address
-       mov     L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1  # total number of entries
-
-mn10300_dcache_flush_inv_loop:
-       mov     (a1),d0                         # unconditional purge
-
-       add     L1_CACHE_BYTES,a1
-       add     -1,d1
-       bne     mn10300_dcache_flush_inv_loop
-
-mn10300_dcache_flush_inv_end:
-       ret     [],0
-
-###############################################################################
-#
-# void mn10300_dcache_flush_inv_page(unsigned start)
-# void mn10300_dcache_flush_inv_range(unsigned start, unsigned end)
-# void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size)
-# Flush and invalidate a range of addresses on a page in the dcache
-#
-###############################################################################
-       ALIGN
-mn10300_dcache_flush_inv_page:
-       mov     PAGE_SIZE,d1
-mn10300_dcache_flush_inv_range2:
-       add     d0,d1
-mn10300_dcache_flush_inv_range:
-       movm    [d2,d3],(sp)
-       movhu   (CHCTR),d2
-       btst    CHCTR_DCEN,d2
-       beq     mn10300_dcache_flush_inv_range_end
-
-       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0      # round start
-                                                               # addr down
-       mov     d0,a1
-
-       add     L1_CACHE_BYTES,d1                       # round end addr up
-       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
-
-       # write a request to flush and invalidate all instances of an address
-       # from the cache
-       mov     DCACHE_PURGE(0,0),a0
-       mov     a1,d0
-       and     L1_CACHE_TAG_ENTRY,d0
-       add     d0,a0                           # starting dcache purge control
-                                               # reg address
-
-       sub     a1,d1
-       lsr     L1_CACHE_SHIFT,d1               # total number of entries to
-                                               # examine
-
-mn10300_dcache_flush_inv_range_loop:
-       mov     a1,(L1_CACHE_WAYDISP*0,a0)      # conditionally purge this line
-                                               # in all ways
-
-       add     L1_CACHE_BYTES,a0
-       add     L1_CACHE_BYTES,a1
-       and     ~L1_CACHE_WAYDISP,a0            # make sure way stay on way 0
-       add     -1,d1
-       bne     mn10300_dcache_flush_inv_range_loop
-
-mn10300_dcache_flush_inv_range_end:
-       ret     [d2,d3],8
diff --git a/arch/mn10300/mm/cache-inv-by-reg.S b/arch/mn10300/mm/cache-inv-by-reg.S
new file mode 100644 (file)
index 0000000..c895086
--- /dev/null
@@ -0,0 +1,356 @@
+/* MN10300 CPU cache invalidation routines, using automatic purge registers
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+#include <asm/cacheflush.h>
+
+#define mn10300_local_dcache_inv_range_intr_interval \
+       +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
+
+#if mn10300_local_dcache_inv_range_intr_interval > 0xff
+#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
+#endif
+
+       .am33_2
+
+#ifndef CONFIG_SMP
+       .globl  mn10300_icache_inv
+       .globl  mn10300_icache_inv_page
+       .globl  mn10300_icache_inv_range
+       .globl  mn10300_icache_inv_range2
+       .globl  mn10300_dcache_inv
+       .globl  mn10300_dcache_inv_page
+       .globl  mn10300_dcache_inv_range
+       .globl  mn10300_dcache_inv_range2
+
+mn10300_icache_inv             = mn10300_local_icache_inv
+mn10300_icache_inv_page                = mn10300_local_icache_inv_page
+mn10300_icache_inv_range       = mn10300_local_icache_inv_range
+mn10300_icache_inv_range2      = mn10300_local_icache_inv_range2
+mn10300_dcache_inv             = mn10300_local_dcache_inv
+mn10300_dcache_inv_page                = mn10300_local_dcache_inv_page
+mn10300_dcache_inv_range       = mn10300_local_dcache_inv_range
+mn10300_dcache_inv_range2      = mn10300_local_dcache_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_icache_inv(void)
+# Invalidate the entire icache
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_icache_inv
+        .type  mn10300_local_icache_inv,@function
+mn10300_local_icache_inv:
+       mov     CHCTR,a0
+
+       movhu   (a0),d0
+       btst    CHCTR_ICEN,d0
+       beq     mn10300_local_icache_inv_end
+
+       # invalidate
+       or      CHCTR_ICINV,d0
+       movhu   d0,(a0)
+       movhu   (a0),d0
+
+mn10300_local_icache_inv_end:
+       ret     [],0
+       .size   mn10300_local_icache_inv,.-mn10300_local_icache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv(void)
+# Invalidate the entire dcache
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_dcache_inv
+       .type   mn10300_local_dcache_inv,@function
+mn10300_local_dcache_inv:
+       mov     CHCTR,a0
+
+       movhu   (a0),d0
+       btst    CHCTR_DCEN,d0
+       beq     mn10300_local_dcache_inv_end
+
+       # invalidate
+       or      CHCTR_DCINV,d0
+       movhu   d0,(a0)
+       movhu   (a0),d0
+
+mn10300_local_dcache_inv_end:
+       ret     [],0
+       .size   mn10300_local_dcache_inv,.-mn10300_local_dcache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size)
+# void mn10300_local_dcache_inv_page(unsigned long start)
+# Invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_dcache_inv_page
+       .globl  mn10300_local_dcache_inv_range
+       .globl  mn10300_local_dcache_inv_range2
+       .type   mn10300_local_dcache_inv_page,@function
+       .type   mn10300_local_dcache_inv_range,@function
+       .type   mn10300_local_dcache_inv_range2,@function
+mn10300_local_dcache_inv_page:
+       and     ~(PAGE_SIZE-1),d0
+       mov     PAGE_SIZE,d1
+mn10300_local_dcache_inv_range2:
+       add     d0,d1
+mn10300_local_dcache_inv_range:
+       # If we are in writeback mode we check the start and end alignments,
+       # and if they're not cacheline-aligned, we must flush any bits outside
+       # the range that share cachelines with stuff inside the range
+#ifdef CONFIG_MN10300_CACHE_WBACK
+       btst    ~(L1_CACHE_BYTES-1),d0
+       bne     1f
+       btst    ~(L1_CACHE_BYTES-1),d1
+       beq     2f
+1:
+       bra     mn10300_local_dcache_flush_inv_range
+2:
+#endif /* CONFIG_MN10300_CACHE_WBACK */
+
+       movm    [d2,d3,a2],(sp)
+
+       mov     CHCTR,a0
+       movhu   (a0),d2
+       btst    CHCTR_DCEN,d2
+       beq     mn10300_local_dcache_inv_range_end
+
+       # round the addresses out to be full cachelines, unless we're in
+       # writeback mode, in which case we would be in flush and invalidate by
+       # now
+#ifndef CONFIG_MN10300_CACHE_WBACK
+       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0      # round start
+                                                               # addr down
+
+       mov     L1_CACHE_BYTES-1,d2
+       add     d2,d1
+       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1      # round end addr up
+#endif /* !CONFIG_MN10300_CACHE_WBACK */
+
+       sub     d0,d1,d2                # calculate the total size
+       mov     d0,a2                   # A2 = start address
+       mov     d1,a1                   # A1 = end address
+
+       LOCAL_CLI_SAVE(d3)
+
+       mov     DCPGCR,a0               # make sure the purger isn't busy
+       setlb
+       mov     (a0),d0
+       btst    DCPGCR_DCPGBSY,d0
+       lne
+
+       # skip initial address alignment calculation if address is zero
+       mov     d2,d1
+       cmp     0,a2
+       beq     1f
+
+dcivloop:
+       /* calculate alignsize
+        *
+        * alignsize = L1_CACHE_BYTES;
+        * while (! start & alignsize) {
+        *      alignsize <<=1;
+        * }
+        * d1 = alignsize;
+        */
+       mov     L1_CACHE_BYTES,d1
+       lsr     1,d1
+       setlb
+       add     d1,d1
+       mov     d1,d0
+       and     a2,d0
+       leq
+
+1:
+       /* calculate invsize
+        *
+        * if (totalsize > alignsize) {
+        *      invsize = alignsize;
+        * } else {
+        *      invsize = totalsize;
+        *      tmp = 0x80000000;
+        *      while (! invsize & tmp) {
+        *              tmp >>= 1;
+        *      }
+        *      invsize = tmp;
+        * }
+        * d1 = invsize
+        */
+       cmp     d2,d1
+       bns     2f
+       mov     d2,d1
+
+       mov     0x80000000,d0           # start from 31bit=1
+       setlb
+       lsr     1,d0
+       mov     d0,e0
+       and     d1,e0
+       leq
+       mov     d0,d1
+
+2:
+       /* set mask
+        *
+        * mask = ~(invsize-1);
+        * DCPGMR = mask;
+        */
+       mov     d1,d0
+       add     -1,d0
+       not     d0
+       mov     d0,(DCPGMR)
+
+       # invalidate area
+       mov     a2,d0
+       or      DCPGCR_DCI,d0
+       mov     d0,(a0)                 # DCPGCR = (mask & start) | DCPGCR_DCI
+
+       setlb                           # wait for the purge to complete
+       mov     (a0),d0
+       btst    DCPGCR_DCPGBSY,d0
+       lne
+
+       sub     d1,d2                   # decrease size remaining
+       add     d1,a2                   # increase next start address
+
+       /* check invalidating of end address
+        *
+        * a2 = a2 + invsize
+        * if (a2 < end) {
+        *     goto dcivloop;
+        * } */
+       cmp     a1,a2
+       bns     dcivloop
+
+       LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_dcache_inv_range_end:
+       ret     [d2,d3,a2],12
+       .size   mn10300_local_dcache_inv_page,.-mn10300_local_dcache_inv_page
+       .size   mn10300_local_dcache_inv_range,.-mn10300_local_dcache_inv_range
+       .size   mn10300_local_dcache_inv_range2,.-mn10300_local_dcache_inv_range2
+
+###############################################################################
+#
+# void mn10300_local_icache_inv_page(unsigned long start)
+# void mn10300_local_icache_inv_range2(unsigned long start, unsigned long size)
+# void mn10300_local_icache_inv_range(unsigned long start, unsigned long end)
+# Invalidate a range of addresses on a page in the icache
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_icache_inv_page
+       .globl  mn10300_local_icache_inv_range
+       .globl  mn10300_local_icache_inv_range2
+       .type   mn10300_local_icache_inv_page,@function
+       .type   mn10300_local_icache_inv_range,@function
+       .type   mn10300_local_icache_inv_range2,@function
+mn10300_local_icache_inv_page:
+       and     ~(PAGE_SIZE-1),d0
+       mov     PAGE_SIZE,d1
+mn10300_local_icache_inv_range2:
+       add     d0,d1
+mn10300_local_icache_inv_range:
+       movm    [d2,d3,a2],(sp)
+
+       mov     CHCTR,a0
+       movhu   (a0),d2
+       btst    CHCTR_ICEN,d2
+       beq     mn10300_local_icache_inv_range_reg_end
+
+       /* calculate alignsize
+        *
+        * alignsize = L1_CACHE_BYTES;
+        * for (i = (end - start - 1) / L1_CACHE_BYTES ;  i > 0; i >>= 1) {
+        *     alignsize <<= 1;
+        * }
+        * d2 = alignsize;
+        */
+       mov     L1_CACHE_BYTES,d2
+       sub     d0,d1,d3
+       add     -1,d3
+       lsr     L1_CACHE_SHIFT,d3
+       beq     2f
+1:
+       add     d2,d2
+       lsr     1,d3
+       bne     1b
+2:
+
+       /* a1 = end */
+       mov     d1,a1
+
+       LOCAL_CLI_SAVE(d3)
+
+       mov     ICIVCR,a0
+       /* wait for busy bit of area invalidation */
+       setlb
+       mov     (a0),d1
+       btst    ICIVCR_ICIVBSY,d1
+       lne
+
+       /* set mask
+        *
+        * mask = ~(alignsize-1);
+        * ICIVMR = mask;
+        */
+       mov     d2,d1
+       add     -1,d1
+       not     d1
+       mov     d1,(ICIVMR)
+       /* a2 = mask & start */
+       and     d1,d0,a2
+
+icivloop:
+       /* area invalidate
+        *
+        * ICIVCR = (mask & start) | ICIVCR_ICI
+        */
+       mov     a2,d0
+       or      ICIVCR_ICI,d0
+       mov     d0,(a0)
+
+       /* wait for busy bit of area invalidation */
+       setlb
+       mov     (a0),d1
+       btst    ICIVCR_ICIVBSY,d1
+       lne
+
+       /* check invalidating of end address
+        *
+        * a2 = a2 + alignsize
+        * if (a2 < end) {
+        *     goto icivloop;
+        * } */
+       add     d2,a2
+       cmp     a1,a2
+       bns     icivloop
+
+       LOCAL_IRQ_RESTORE(d3)
+
+mn10300_local_icache_inv_range_reg_end:
+       ret     [d2,d3,a2],12
+       .size   mn10300_local_icache_inv_page,.-mn10300_local_icache_inv_page
+       .size   mn10300_local_icache_inv_range,.-mn10300_local_icache_inv_range
+       .size   mn10300_local_icache_inv_range2,.-mn10300_local_icache_inv_range2
diff --git a/arch/mn10300/mm/cache-inv-by-tag.S b/arch/mn10300/mm/cache-inv-by-tag.S
new file mode 100644 (file)
index 0000000..e9713b4
--- /dev/null
@@ -0,0 +1,348 @@
+/* MN10300 CPU core caching routines
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/smp.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/irqflags.h>
+#include <asm/cacheflush.h>
+
+#define mn10300_local_dcache_inv_range_intr_interval \
+       +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
+
+#if mn10300_local_dcache_inv_range_intr_interval > 0xff
+#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
+#endif
+
+       .am33_2
+
+       .globl  mn10300_local_icache_inv_page
+       .globl  mn10300_local_icache_inv_range
+       .globl  mn10300_local_icache_inv_range2
+
+mn10300_local_icache_inv_page  = mn10300_local_icache_inv
+mn10300_local_icache_inv_range = mn10300_local_icache_inv
+mn10300_local_icache_inv_range2        = mn10300_local_icache_inv
+
+#ifndef CONFIG_SMP
+       .globl  mn10300_icache_inv
+       .globl  mn10300_icache_inv_page
+       .globl  mn10300_icache_inv_range
+       .globl  mn10300_icache_inv_range2
+       .globl  mn10300_dcache_inv
+       .globl  mn10300_dcache_inv_page
+       .globl  mn10300_dcache_inv_range
+       .globl  mn10300_dcache_inv_range2
+
+mn10300_icache_inv             = mn10300_local_icache_inv
+mn10300_icache_inv_page                = mn10300_local_icache_inv_page
+mn10300_icache_inv_range       = mn10300_local_icache_inv_range
+mn10300_icache_inv_range2      = mn10300_local_icache_inv_range2
+mn10300_dcache_inv             = mn10300_local_dcache_inv
+mn10300_dcache_inv_page                = mn10300_local_dcache_inv_page
+mn10300_dcache_inv_range       = mn10300_local_dcache_inv_range
+mn10300_dcache_inv_range2      = mn10300_local_dcache_inv_range2
+
+#endif /* !CONFIG_SMP */
+
+###############################################################################
+#
+# void mn10300_local_icache_inv(void)
+# Invalidate the entire icache
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_icache_inv
+        .type  mn10300_local_icache_inv,@function
+mn10300_local_icache_inv:
+       mov     CHCTR,a0
+
+       movhu   (a0),d0
+       btst    CHCTR_ICEN,d0
+       beq     mn10300_local_icache_inv_end
+
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+       LOCAL_CLI_SAVE(d1)
+
+       # disable the icache
+       and     ~CHCTR_ICEN,d0
+       movhu   d0,(a0)
+
+       # and wait for it to calm down
+       setlb
+       movhu   (a0),d0
+       btst    CHCTR_ICBUSY,d0
+       lne
+
+       # invalidate
+       or      CHCTR_ICINV,d0
+       movhu   d0,(a0)
+
+       # wait for the cache to finish
+       mov     CHCTR,a0
+       setlb
+       movhu   (a0),d0
+       btst    CHCTR_ICBUSY,d0
+       lne
+
+       # and reenable it
+       and     ~CHCTR_ICINV,d0
+       or      CHCTR_ICEN,d0
+       movhu   d0,(a0)
+       movhu   (a0),d0
+
+       LOCAL_IRQ_RESTORE(d1)
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+       # invalidate
+       or      CHCTR_ICINV,d0
+       movhu   d0,(a0)
+       movhu   (a0),d0
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+
+mn10300_local_icache_inv_end:
+       ret     [],0
+       .size   mn10300_local_icache_inv,.-mn10300_local_icache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv(void)
+# Invalidate the entire dcache
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_dcache_inv
+       .type   mn10300_local_dcache_inv,@function
+mn10300_local_dcache_inv:
+       mov     CHCTR,a0
+
+       movhu   (a0),d0
+       btst    CHCTR_DCEN,d0
+       beq     mn10300_local_dcache_inv_end
+
+#if defined(CONFIG_AM33_2) || defined(CONFIG_AM33_3)
+       LOCAL_CLI_SAVE(d1)
+
+       # disable the dcache
+       and     ~CHCTR_DCEN,d0
+       movhu   d0,(a0)
+
+       # and wait for it to calm down
+       setlb
+       movhu   (a0),d0
+       btst    CHCTR_DCBUSY,d0
+       lne
+
+       # invalidate
+       or      CHCTR_DCINV,d0
+       movhu   d0,(a0)
+
+       # wait for the cache to finish
+       mov     CHCTR,a0
+       setlb
+       movhu   (a0),d0
+       btst    CHCTR_DCBUSY,d0
+       lne
+
+       # and reenable it
+       and     ~CHCTR_DCINV,d0
+       or      CHCTR_DCEN,d0
+       movhu   d0,(a0)
+       movhu   (a0),d0
+
+       LOCAL_IRQ_RESTORE(d1)
+#else /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+       # invalidate
+       or      CHCTR_DCINV,d0
+       movhu   d0,(a0)
+       movhu   (a0),d0
+#endif /* CONFIG_AM33_2 || CONFIG_AM33_3 */
+
+mn10300_local_dcache_inv_end:
+       ret     [],0
+       .size   mn10300_local_dcache_inv,.-mn10300_local_dcache_inv
+
+###############################################################################
+#
+# void mn10300_local_dcache_inv_range(unsigned long start, unsigned long end)
+# void mn10300_local_dcache_inv_range2(unsigned long start, unsigned long size)
+# void mn10300_local_dcache_inv_page(unsigned long start)
+# Invalidate a range of addresses on a page in the dcache
+#
+###############################################################################
+       ALIGN
+       .globl  mn10300_local_dcache_inv_page
+       .globl  mn10300_local_dcache_inv_range
+       .globl  mn10300_local_dcache_inv_range2
+       .type   mn10300_local_dcache_inv_page,@function
+       .type   mn10300_local_dcache_inv_range,@function
+       .type   mn10300_local_dcache_inv_range2,@function
+mn10300_local_dcache_inv_page:
+       and     ~(PAGE_SIZE-1),d0
+       mov     PAGE_SIZE,d1
+mn10300_local_dcache_inv_range2:
+       add     d0,d1
+mn10300_local_dcache_inv_range:
+       # If we are in writeback mode we check the start and end alignments,
+       # and if they're not cacheline-aligned, we must flush any bits outside
+       # the range that share cachelines with stuff inside the range
+#ifdef CONFIG_MN10300_CACHE_WBACK
+       btst    ~(L1_CACHE_BYTES-1),d0
+       bne     1f
+       btst    ~(L1_CACHE_BYTES-1),d1
+       beq     2f
+1:
+       bra     mn10300_local_dcache_flush_inv_range
+2:
+#endif /* CONFIG_MN10300_CACHE_WBACK */
+
+       movm    [d2,d3,a2],(sp)
+
+       mov     CHCTR,a2
+       movhu   (a2),d2
+       btst    CHCTR_DCEN,d2
+       beq     mn10300_local_dcache_inv_range_end
+
+#ifndef CONFIG_MN10300_CACHE_WBACK
+       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0      # round start
+                                                               # addr down
+
+       add     L1_CACHE_BYTES,d1               # round end addr up
+       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
+#endif /* !CONFIG_MN10300_CACHE_WBACK */
+       mov     d0,a1
+
+       clr     d2                              # we're going to clear tag RAM
+                                               # entries
+
+       # read the tags from the tag RAM, and if they indicate a valid dirty
+       # cache line then invalidate that line
+       mov     DCACHE_TAG(0,0),a0
+       mov     a1,d0
+       and     L1_CACHE_TAG_ENTRY,d0
+       add     d0,a0                           # starting dcache tag RAM
+                                               # access address
+
+       sub     a1,d1
+       lsr     L1_CACHE_SHIFT,d1               # total number of entries to
+                                               # examine
+
+       and     ~(L1_CACHE_DISPARITY-1),a1      # determine comparator base
+
+mn10300_local_dcache_inv_range_outer_loop:
+       LOCAL_CLI_SAVE(d3)
+
+       # disable the dcache
+       movhu   (a2),d0
+       and     ~CHCTR_DCEN,d0
+       movhu   d0,(a2)
+
+       # and wait for it to calm down
+       setlb
+       movhu   (a2),d0
+       btst    CHCTR_DCBUSY,d0
+       lne
+
+mn10300_local_dcache_inv_range_loop:
+
+       # process the way 0 slot
+       mov     (L1_CACHE_WAYDISP*0,a0),d0      # read the tag in the way 0 slot
+       btst    L1_CACHE_TAG_VALID,d0
+       beq     mn10300_local_dcache_inv_range_skip_0   # jump if this cacheline
+                                               # is not valid
+
+       xor     a1,d0
+       lsr     12,d0
+       bne     mn10300_local_dcache_inv_range_skip_0   # jump if not this cacheline
+
+       mov     d2,(L1_CACHE_WAYDISP*0,a0)      # kill the tag
+
+mn10300_local_dcache_inv_range_skip_0:
+
+       # process the way 1 slot
+       mov     (L1_CACHE_WAYDISP*1,a0),d0      # read the tag in the way 1 slot
+       btst    L1_CACHE_TAG_VALID,d0
+       beq     mn10300_local_dcache_inv_range_skip_1   # jump if this cacheline
+                                               # is not valid
+
+       xor     a1,d0
+       lsr     12,d0
+       bne     mn10300_local_dcache_inv_range_skip_1   # jump if not this cacheline
+
+       mov     d2,(L1_CACHE_WAYDISP*1,a0)      # kill the tag
+
+mn10300_local_dcache_inv_range_skip_1:
+
+       # process the way 2 slot
+       mov     (L1_CACHE_WAYDISP*2,a0),d0      # read the tag in the way 2 slot
+       btst    L1_CACHE_TAG_VALID,d0
+       beq     mn10300_local_dcache_inv_range_skip_2   # jump if this cacheline
+                                               # is not valid
+
+       xor     a1,d0
+       lsr     12,d0
+       bne     mn10300_local_dcache_inv_range_skip_2   # jump if not this cacheline
+
+       mov     d2,(L1_CACHE_WAYDISP*2,a0)      # kill the tag
+
+mn10300_local_dcache_inv_range_skip_2:
+
+       # process the way 3 slot
+       mov     (L1_CACHE_WAYDISP*3,a0),d0      # read the tag in the way 3 slot
+       btst    L1_CACHE_TAG_VALID,d0
+       beq     mn10300_local_dcache_inv_range_skip_3   # jump if this cacheline
+                                               # is not valid
+
+       xor     a1,d0
+       lsr     12,d0
+       bne     mn10300_local_dcache_inv_range_skip_3   # jump if not this cacheline
+
+       mov     d2,(L1_CACHE_WAYDISP*3,a0)      # kill the tag
+
+mn10300_local_dcache_inv_range_skip_3:
+
+       # approx every N steps we re-enable the cache and see if there are any
+       # interrupts to be processed
+       # we also break out if we've reached the end of the loop
+       # (the bottom nibble of the count is zero in both cases)
+       add     L1_CACHE_BYTES,a0
+       add     L1_CACHE_BYTES,a1
+       and     ~L1_CACHE_WAYDISP,a0
+       add     -1,d1
+       btst    mn10300_local_dcache_inv_range_intr_interval,d1
+       bne     mn10300_local_dcache_inv_range_loop
+
+       # wait for the cache to finish what it's doing
+       setlb
+       movhu   (a2),d0
+       btst    CHCTR_DCBUSY,d0
+       lne
+
+       # and reenable it
+       or      CHCTR_DCEN,d0
+       movhu   d0,(a2)
+       movhu   (a2),d0
+
+       # re-enable interrupts
+       # - we don't bother with delay NOPs as we'll have enough instructions
+       #   before we disable interrupts again to give the interrupts a chance
+       #   to happen
+       LOCAL_IRQ_RESTORE(d3)
+
+       # go around again if the counter hasn't yet reached zero
+       add     0,d1
+       bne     mn10300_local_dcache_inv_range_outer_loop
+
+mn10300_local_dcache_inv_range_end:
+       ret     [d2,d3,a2],12
+       .size   mn10300_local_dcache_inv_page,.-mn10300_local_dcache_inv_page
+       .size   mn10300_local_dcache_inv_range,.-mn10300_local_dcache_inv_range
+       .size   mn10300_local_dcache_inv_range2,.-mn10300_local_dcache_inv_range2
diff --git a/arch/mn10300/mm/cache-inv-icache.c b/arch/mn10300/mm/cache-inv-icache.c
new file mode 100644 (file)
index 0000000..a8933a6
--- /dev/null
@@ -0,0 +1,129 @@
+/* Invalidate icache when dcache doesn't need invalidation as it's in
+ * write-through mode
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
+
+/**
+ * flush_icache_page_range - Flush dcache and invalidate icache for part of a
+ *                             single page
+ * @start: The starting virtual address of the page part.
+ * @end: The ending virtual address of the page part.
+ *
+ * Invalidate the icache for part of a single page, as determined by the
+ * virtual addresses given.  The page must be in the paged area.  The dcache is
+ * not flushed as the cache must be in write-through mode to get here.
+ */
+static void flush_icache_page_range(unsigned long start, unsigned long end)
+{
+       unsigned long addr, size, off;
+       struct page *page;
+       pgd_t *pgd;
+       pud_t *pud;
+       pmd_t *pmd;
+       pte_t *ppte, pte;
+
+       /* work out how much of the page to flush */
+       off = start & ~PAGE_MASK;
+       size = end - start;
+
+       /* get the physical address the page is mapped to from the page
+        * tables */
+       pgd = pgd_offset(current->mm, start);
+       if (!pgd || !pgd_val(*pgd))
+               return;
+
+       pud = pud_offset(pgd, start);
+       if (!pud || !pud_val(*pud))
+               return;
+
+       pmd = pmd_offset(pud, start);
+       if (!pmd || !pmd_val(*pmd))
+               return;
+
+       ppte = pte_offset_map(pmd, start);
+       if (!ppte)
+               return;
+       pte = *ppte;
+       pte_unmap(ppte);
+
+       if (pte_none(pte))
+               return;
+
+       page = pte_page(pte);
+       if (!page)
+               return;
+
+       addr = page_to_phys(page);
+
+       /* invalidate the icache coverage on that region */
+       mn10300_local_icache_inv_range2(addr + off, size);
+       smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
+}
+
+/**
+ * flush_icache_range - Globally flush dcache and invalidate icache for region
+ * @start: The starting virtual address of the region.
+ * @end: The ending virtual address of the region.
+ *
+ * This is used by the kernel to globally flush some code it has just written
+ * from the dcache back to RAM and then to globally invalidate the icache over
+ * that region so that that code can be run on all CPUs in the system.
+ */
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+       unsigned long start_page, end_page;
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+
+       if (end > 0x80000000UL) {
+               /* addresses above 0xa0000000 do not go through the cache */
+               if (end > 0xa0000000UL) {
+                       end = 0xa0000000UL;
+                       if (start >= end)
+                               goto done;
+               }
+
+               /* kernel addresses between 0x80000000 and 0x9fffffff do not
+                * require page tables, so we just map such addresses
+                * directly */
+               start_page = (start >= 0x80000000UL) ? start : 0x80000000UL;
+               mn10300_icache_inv_range(start_page, end);
+               smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end);
+               if (start_page == start)
+                       goto done;
+               end = start_page;
+       }
+
+       start_page = start & PAGE_MASK;
+       end_page = (end - 1) & PAGE_MASK;
+
+       if (start_page == end_page) {
+               /* the first and last bytes are on the same page */
+               flush_icache_page_range(start, end);
+       } else if (start_page + 1 == end_page) {
+               /* split over two virtually contiguous pages */
+               flush_icache_page_range(start, end_page);
+               flush_icache_page_range(end_page, end);
+       } else {
+               /* more than 2 pages; just flush the entire cache */
+               mn10300_local_icache_inv();
+               smp_cache_call(SMP_ICACHE_INV, 0, 0);
+       }
+
+done:
+       smp_unlock_cache(flags);
+}
+EXPORT_SYMBOL(flush_icache_range);
diff --git a/arch/mn10300/mm/cache-mn10300.S b/arch/mn10300/mm/cache-mn10300.S
deleted file mode 100644 (file)
index e839d0a..0000000
+++ /dev/null
@@ -1,289 +0,0 @@
-/* MN10300 CPU core caching routines
- *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
- * Written by David Howells (dhowells@redhat.com)
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public Licence
- * as published by the Free Software Foundation; either version
- * 2 of the Licence, or (at your option) any later version.
- */
-#include <linux/sys.h>
-#include <linux/linkage.h>
-#include <asm/smp.h>
-#include <asm/page.h>
-#include <asm/cache.h>
-
-#define mn10300_dcache_inv_range_intr_interval \
-       +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
-
-#if mn10300_dcache_inv_range_intr_interval > 0xff
-#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
-#endif
-
-       .am33_2
-
-       .globl mn10300_icache_inv
-       .globl mn10300_dcache_inv
-       .globl mn10300_dcache_inv_range
-       .globl mn10300_dcache_inv_range2
-       .globl mn10300_dcache_inv_page
-
-###############################################################################
-#
-# void mn10300_icache_inv(void)
-# Invalidate the entire icache
-#
-###############################################################################
-       ALIGN
-mn10300_icache_inv:
-       mov     CHCTR,a0
-
-       movhu   (a0),d0
-       btst    CHCTR_ICEN,d0
-       beq     mn10300_icache_inv_end
-
-       mov     epsw,d1
-       and     ~EPSW_IE,epsw
-       nop
-       nop
-
-       # disable the icache
-       and     ~CHCTR_ICEN,d0
-       movhu   d0,(a0)
-
-       # and wait for it to calm down
-       setlb
-       movhu   (a0),d0
-       btst    CHCTR_ICBUSY,d0
-       lne
-
-       # invalidate
-       or      CHCTR_ICINV,d0
-       movhu   d0,(a0)
-
-       # wait for the cache to finish
-       mov     CHCTR,a0
-       setlb
-       movhu   (a0),d0
-       btst    CHCTR_ICBUSY,d0
-       lne
-
-       # and reenable it
-       and     ~CHCTR_ICINV,d0
-       or      CHCTR_ICEN,d0
-       movhu   d0,(a0)
-       movhu   (a0),d0
-
-       mov     d1,epsw
-
-mn10300_icache_inv_end:
-       ret     [],0
-
-###############################################################################
-#
-# void mn10300_dcache_inv(void)
-# Invalidate the entire dcache
-#
-###############################################################################
-       ALIGN
-mn10300_dcache_inv:
-       mov     CHCTR,a0
-
-       movhu   (a0),d0
-       btst    CHCTR_DCEN,d0
-       beq     mn10300_dcache_inv_end
-
-       mov     epsw,d1
-       and     ~EPSW_IE,epsw
-       nop
-       nop
-
-       # disable the dcache
-       and     ~CHCTR_DCEN,d0
-       movhu   d0,(a0)
-
-       # and wait for it to calm down
-       setlb
-       movhu   (a0),d0
-       btst    CHCTR_DCBUSY,d0
-       lne
-
-       # invalidate
-       or      CHCTR_DCINV,d0
-       movhu   d0,(a0)
-
-       # wait for the cache to finish
-       mov     CHCTR,a0
-       setlb
-       movhu   (a0),d0
-       btst    CHCTR_DCBUSY,d0
-       lne
-
-       # and reenable it
-       and     ~CHCTR_DCINV,d0
-       or      CHCTR_DCEN,d0
-       movhu   d0,(a0)
-       movhu   (a0),d0
-
-       mov     d1,epsw
-
-mn10300_dcache_inv_end:
-       ret     [],0
-
-###############################################################################
-#
-# void mn10300_dcache_inv_range(unsigned start, unsigned end)
-# void mn10300_dcache_inv_range2(unsigned start, unsigned size)
-# void mn10300_dcache_inv_page(unsigned start)
-# Invalidate a range of addresses on a page in the dcache
-#
-###############################################################################
-       ALIGN
-mn10300_dcache_inv_page:
-       mov     PAGE_SIZE,d1
-mn10300_dcache_inv_range2:
-       add     d0,d1
-mn10300_dcache_inv_range:
-       movm    [d2,d3,a2],(sp)
-       mov     CHCTR,a2
-
-       movhu   (a2),d2
-       btst    CHCTR_DCEN,d2
-       beq     mn10300_dcache_inv_range_end
-
-       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0      # round start
-                                                               # addr down
-       mov     d0,a1
-
-       add     L1_CACHE_BYTES,d1                       # round end addr up
-       and     L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
-
-       clr     d2                              # we're going to clear tag ram
-                                               # entries
-
-       # read the tags from the tag RAM, and if they indicate a valid dirty
-       # cache line then invalidate that line
-       mov     DCACHE_TAG(0,0),a0
-       mov     a1,d0
-       and     L1_CACHE_TAG_ENTRY,d0
-       add     d0,a0                           # starting dcache tag RAM
-                                               # access address
-
-       sub     a1,d1
-       lsr     L1_CACHE_SHIFT,d1               # total number of entries to
-                                               # examine
-
-       and     ~(L1_CACHE_DISPARITY-1),a1      # determine comparator base
-
-mn10300_dcache_inv_range_outer_loop:
-       # disable interrupts
-       mov     epsw,d3
-       and     ~EPSW_IE,epsw
-       nop                                     # note that reading CHCTR and
-                                               # AND'ing D0 occupy two delay
-                                               # slots after disabling
-                                               # interrupts
-
-       # disable the dcache
-       movhu   (a2),d0
-       and     ~CHCTR_DCEN,d0
-       movhu   d0,(a2)
-
-       # and wait for it to calm down
-       setlb
-       movhu   (a2),d0
-       btst    CHCTR_DCBUSY,d0
-       lne
-
-mn10300_dcache_inv_range_loop:
-
-       # process the way 0 slot
-       mov     (L1_CACHE_WAYDISP*0,a0),d0      # read the tag in the way 0 slot
-       btst    L1_CACHE_TAG_VALID,d0
-       beq     mn10300_dcache_inv_range_skip_0 # jump if this cacheline is not
-                                               # valid
-
-       xor     a1,d0
-       lsr     12,d0
-       bne     mn10300_dcache_inv_range_skip_0 # jump if not this cacheline
-
-       mov     d2,(a0)                         # kill the tag
-
-mn10300_dcache_inv_range_skip_0:
-
-       # process the way 1 slot
-       mov     (L1_CACHE_WAYDISP*1,a0),d0      # read the tag in the way 1 slot
-       btst    L1_CACHE_TAG_VALID,d0
-       beq     mn10300_dcache_inv_range_skip_1 # jump if this cacheline is not
-                                               # valid
-
-       xor     a1,d0
-       lsr     12,d0
-       bne     mn10300_dcache_inv_range_skip_1 # jump if not this cacheline
-
-       mov     d2,(a0)                         # kill the tag
-
-mn10300_dcache_inv_range_skip_1:
-
-       # process the way 2 slot
-       mov     (L1_CACHE_WAYDISP*2,a0),d0      # read the tag in the way 2 slot
-       btst    L1_CACHE_TAG_VALID,d0
-       beq     mn10300_dcache_inv_range_skip_2 # jump if this cacheline is not
-                                               # valid
-
-       xor     a1,d0
-       lsr     12,d0
-       bne     mn10300_dcache_inv_range_skip_2 # jump if not this cacheline
-
-       mov     d2,(a0)                         # kill the tag
-
-mn10300_dcache_inv_range_skip_2:
-
-       # process the way 3 slot
-       mov     (L1_CACHE_WAYDISP*3,a0),d0      # read the tag in the way 3 slot
-       btst    L1_CACHE_TAG_VALID,d0
-       beq     mn10300_dcache_inv_range_skip_3 # jump if this cacheline is not
-                                               # valid
-
-       xor     a1,d0
-       lsr     12,d0
-       bne     mn10300_dcache_inv_range_skip_3 # jump if not this cacheline
-
-       mov     d2,(a0)                         # kill the tag
-
-mn10300_dcache_inv_range_skip_3:
-
-       # approx every N steps we re-enable the cache and see if there are any
-       # interrupts to be processed
-       # we also break out if we've reached the end of the loop
-       # (the bottom nibble of the count is zero in both cases)
-       add     L1_CACHE_BYTES,a0
-       add     L1_CACHE_BYTES,a1
-       add     -1,d1
-       btst    mn10300_dcache_inv_range_intr_interval,d1
-       bne     mn10300_dcache_inv_range_loop
-
-       # wait for the cache to finish what it's doing
-       setlb
-       movhu   (a2),d0
-       btst    CHCTR_DCBUSY,d0
-       lne
-
-       # and reenable it
-       or      CHCTR_DCEN,d0
-       movhu   d0,(a2)
-       movhu   (a2),d0
-
-       # re-enable interrupts
-       # - we don't bother with delay NOPs as we'll have enough instructions
-       #   before we disable interrupts again to give the interrupts a chance
-       #   to happen
-       mov     d3,epsw
-
-       # go around again if the counter hasn't yet reached zero
-       add     0,d1
-       bne     mn10300_dcache_inv_range_outer_loop
-
-mn10300_dcache_inv_range_end:
-       ret     [d2,d3,a2],12
diff --git a/arch/mn10300/mm/cache-smp-flush.c b/arch/mn10300/mm/cache-smp-flush.c
new file mode 100644 (file)
index 0000000..fd51af5
--- /dev/null
@@ -0,0 +1,156 @@
+/* Functions for global dcache flush when writeback caching in SMP
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include "cache-smp.h"
+
+/**
+ * mn10300_dcache_flush - Globally flush data cache
+ *
+ * Flush the data cache on all CPUs.
+ */
+void mn10300_dcache_flush(void)
+{
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+       mn10300_local_dcache_flush();
+       smp_cache_call(SMP_DCACHE_FLUSH, 0, 0);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_page - Globally flush a page of data cache
+ * @start: The address of the page of memory to be flushed.
+ *
+ * Flush a range of addresses in the data cache on all CPUs covering
+ * the page that includes the given address.
+ */
+void mn10300_dcache_flush_page(unsigned long start)
+{
+       unsigned long flags;
+
+       start &= ~(PAGE_SIZE-1);
+
+       flags = smp_lock_cache();
+       mn10300_local_dcache_flush_page(start);
+       smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + PAGE_SIZE);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_range - Globally flush range of data cache
+ * @start: The start address of the region to be flushed.
+ * @end: The end address of the region to be flushed.
+ *
+ * Flush a range of addresses in the data cache on all CPUs, between start and
+ * end-1 inclusive.
+ */
+void mn10300_dcache_flush_range(unsigned long start, unsigned long end)
+{
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+       mn10300_local_dcache_flush_range(start, end);
+       smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, end);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_range2 - Globally flush range of data cache
+ * @start: The start address of the region to be flushed.
+ * @size: The size of the region to be flushed.
+ *
+ * Flush a range of addresses in the data cache on all CPUs, between start and
+ * start+size-1 inclusive.
+ */
+void mn10300_dcache_flush_range2(unsigned long start, unsigned long size)
+{
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+       mn10300_local_dcache_flush_range2(start, size);
+       smp_cache_call(SMP_DCACHE_FLUSH_RANGE, start, start + size);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv - Globally flush and invalidate data cache
+ *
+ * Flush and invalidate the data cache on all CPUs.
+ */
+void mn10300_dcache_flush_inv(void)
+{
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+       mn10300_local_dcache_flush_inv();
+       smp_cache_call(SMP_DCACHE_FLUSH_INV, 0, 0);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv_page - Globally flush and invalidate a page of data
+ *     cache
+ * @start: The address of the page of memory to be flushed and invalidated.
+ *
+ * Flush and invalidate a range of addresses in the data cache on all CPUs
+ * covering the page that includes the given address.
+ */
+void mn10300_dcache_flush_inv_page(unsigned long start)
+{
+       unsigned long flags;
+
+       start &= ~(PAGE_SIZE-1);
+
+       flags = smp_lock_cache();
+       mn10300_local_dcache_flush_inv_page(start);
+       smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, start + PAGE_SIZE);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv_range - Globally flush and invalidate range of data
+ *     cache
+ * @start: The start address of the region to be flushed and invalidated.
+ * @end: The end address of the region to be flushed and invalidated.
+ *
+ * Flush and invalidate a range of addresses in the data cache on all CPUs,
+ * between start and end-1 inclusive.
+ */
+void mn10300_dcache_flush_inv_range(unsigned long start, unsigned long end)
+{
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+       mn10300_local_dcache_flush_inv_range(start, end);
+       smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, end);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_flush_inv_range2 - Globally flush and invalidate range of data
+ *     cache
+ * @start: The start address of the region to be flushed and invalidated.
+ * @size: The size of the region to be flushed and invalidated.
+ *
+ * Flush and invalidate a range of addresses in the data cache on all CPUs,
+ * between start and start+size-1 inclusive.
+ */
+void mn10300_dcache_flush_inv_range2(unsigned long start, unsigned long size)
+{
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+       mn10300_local_dcache_flush_inv_range2(start, size);
+       smp_cache_call(SMP_DCACHE_FLUSH_INV_RANGE, start, start + size);
+       smp_unlock_cache(flags);
+}
diff --git a/arch/mn10300/mm/cache-smp-inv.c b/arch/mn10300/mm/cache-smp-inv.c
new file mode 100644 (file)
index 0000000..ff17873
--- /dev/null
@@ -0,0 +1,153 @@
+/* Functions for global i/dcache invalidation when caching in SMP
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/mm.h>
+#include <asm/cacheflush.h>
+#include "cache-smp.h"
+
+/**
+ * mn10300_icache_inv - Globally invalidate instruction cache
+ *
+ * Invalidate the instruction cache on all CPUs.
+ */
+void mn10300_icache_inv(void)
+{
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+       mn10300_local_icache_inv();
+       smp_cache_call(SMP_ICACHE_INV, 0, 0);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_icache_inv_page - Globally invalidate a page of instruction cache
+ * @start: The address of the page of memory to be invalidated.
+ *
+ * Invalidate a range of addresses in the instruction cache on all CPUs
+ * covering the page that includes the given address.
+ */
+void mn10300_icache_inv_page(unsigned long start)
+{
+       unsigned long flags;
+
+       start &= ~(PAGE_SIZE-1);
+
+       flags = smp_lock_cache();
+       mn10300_local_icache_inv_page(start);
+       smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + PAGE_SIZE);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_icache_inv_range - Globally invalidate range of instruction cache
+ * @start: The start address of the region to be invalidated.
+ * @end: The end address of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the instruction cache on all CPUs,
+ * between start and end-1 inclusive.
+ */
+void mn10300_icache_inv_range(unsigned long start, unsigned long end)
+{
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+       mn10300_local_icache_inv_range(start, end);
+       smp_cache_call(SMP_ICACHE_INV_RANGE, start, end);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_icache_inv_range2 - Globally invalidate range of instruction cache
+ * @start: The start address of the region to be invalidated.
+ * @size: The size of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the instruction cache on all CPUs,
+ * between start and start+size-1 inclusive.
+ */
+void mn10300_icache_inv_range2(unsigned long start, unsigned long size)
+{
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+       mn10300_local_icache_inv_range2(start, size);
+       smp_cache_call(SMP_ICACHE_INV_RANGE, start, start + size);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv - Globally invalidate data cache
+ *
+ * Invalidate the data cache on all CPUs.
+ */
+void mn10300_dcache_inv(void)
+{
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+       mn10300_local_dcache_inv();
+       smp_cache_call(SMP_DCACHE_INV, 0, 0);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv_page - Globally invalidate a page of data cache
+ * @start: The address of the page of memory to be invalidated.
+ *
+ * Invalidate a range of addresses in the data cache on all CPUs covering the
+ * page that includes the given address.
+ */
+void mn10300_dcache_inv_page(unsigned long start)
+{
+       unsigned long flags;
+
+       start &= ~(PAGE_SIZE-1);
+
+       flags = smp_lock_cache();
+       mn10300_local_dcache_inv_page(start);
+       smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + PAGE_SIZE);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv_range - Globally invalidate range of data cache
+ * @start: The start address of the region to be invalidated.
+ * @end: The end address of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the data cache on all CPUs, between start
+ * and end-1 inclusive.
+ */
+void mn10300_dcache_inv_range(unsigned long start, unsigned long end)
+{
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+       mn10300_local_dcache_inv_range(start, end);
+       smp_cache_call(SMP_DCACHE_INV_RANGE, start, end);
+       smp_unlock_cache(flags);
+}
+
+/**
+ * mn10300_dcache_inv_range2 - Globally invalidate range of data cache
+ * @start: The start address of the region to be invalidated.
+ * @size: The size of the region to be invalidated.
+ *
+ * Invalidate a range of addresses in the data cache on all CPUs, between start
+ * and start+size-1 inclusive.
+ */
+void mn10300_dcache_inv_range2(unsigned long start, unsigned long size)
+{
+       unsigned long flags;
+
+       flags = smp_lock_cache();
+       mn10300_local_dcache_inv_range2(start, size);
+       smp_cache_call(SMP_DCACHE_INV_RANGE, start, start + size);
+       smp_unlock_cache(flags);
+}
diff --git a/arch/mn10300/mm/cache-smp.c b/arch/mn10300/mm/cache-smp.c
new file mode 100644 (file)
index 0000000..4a6e9a4
--- /dev/null
@@ -0,0 +1,105 @@
+/* SMP global caching code
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/threads.h>
+#include <linux/interrupt.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/cacheflush.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
+
+DEFINE_SPINLOCK(smp_cache_lock);
+static unsigned long smp_cache_mask;
+static unsigned long smp_cache_start;
+static unsigned long smp_cache_end;
+static cpumask_t smp_cache_ipi_map;            /* Bitmask of cache IPI done CPUs */
+
+/**
+ * smp_cache_interrupt - Handle IPI request to flush caches.
+ *
+ * Handle a request delivered by IPI to flush the current CPU's
+ * caches.  The parameters are stored in smp_cache_*.
+ */
+void smp_cache_interrupt(void)
+{
+       unsigned long opr_mask = smp_cache_mask;
+
+       switch ((enum smp_dcache_ops)(opr_mask & SMP_DCACHE_OP_MASK)) {
+       case SMP_DCACHE_NOP:
+               break;
+       case SMP_DCACHE_INV:
+               mn10300_local_dcache_inv();
+               break;
+       case SMP_DCACHE_INV_RANGE:
+               mn10300_local_dcache_inv_range(smp_cache_start, smp_cache_end);
+               break;
+       case SMP_DCACHE_FLUSH:
+               mn10300_local_dcache_flush();
+               break;
+       case SMP_DCACHE_FLUSH_RANGE:
+               mn10300_local_dcache_flush_range(smp_cache_start,
+                                                smp_cache_end);
+               break;
+       case SMP_DCACHE_FLUSH_INV:
+               mn10300_local_dcache_flush_inv();
+               break;
+       case SMP_DCACHE_FLUSH_INV_RANGE:
+               mn10300_local_dcache_flush_inv_range(smp_cache_start,
+                                                    smp_cache_end);
+               break;
+       }
+
+       switch ((enum smp_icache_ops)(opr_mask & SMP_ICACHE_OP_MASK)) {
+       case SMP_ICACHE_NOP:
+               break;
+       case SMP_ICACHE_INV:
+               mn10300_local_icache_inv();
+               break;
+       case SMP_ICACHE_INV_RANGE:
+               mn10300_local_icache_inv_range(smp_cache_start, smp_cache_end);
+               break;
+       }
+
+       cpu_clear(smp_processor_id(), smp_cache_ipi_map);
+}
+
+/**
+ * smp_cache_call - Issue an IPI to request the other CPUs flush caches
+ * @opr_mask: Cache operation flags
+ * @start: Start address of request
+ * @end: End address of request
+ *
+ * Send cache flush IPI to other CPUs.  This invokes smp_cache_interrupt()
+ * above on those other CPUs and then waits for them to finish.
+ *
+ * The caller must hold smp_cache_lock.
+ */
+void smp_cache_call(unsigned long opr_mask,
+                   unsigned long start, unsigned long end)
+{
+       smp_cache_mask = opr_mask;
+       smp_cache_start = start;
+       smp_cache_end = end;
+       smp_cache_ipi_map = cpu_online_map;
+       cpu_clear(smp_processor_id(), smp_cache_ipi_map);
+
+       send_IPI_allbutself(FLUSH_CACHE_IPI);
+
+       while (!cpus_empty(smp_cache_ipi_map))
+               /* nothing. lockup detection does not belong here */
+               mb();
+}
diff --git a/arch/mn10300/mm/cache-smp.h b/arch/mn10300/mm/cache-smp.h
new file mode 100644 (file)
index 0000000..cb52892
--- /dev/null
@@ -0,0 +1,69 @@
+/* SMP caching definitions
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+
+/*
+ * Operation requests for smp_cache_call().
+ *
+ * One of smp_icache_ops and one of smp_dcache_ops can be OR'd together.
+ */
+enum smp_icache_ops {
+       SMP_ICACHE_NOP                  = 0x0000,
+       SMP_ICACHE_INV                  = 0x0001,
+       SMP_ICACHE_INV_RANGE            = 0x0002,
+};
+#define SMP_ICACHE_OP_MASK             0x0003
+
+enum smp_dcache_ops {
+       SMP_DCACHE_NOP                  = 0x0000,
+       SMP_DCACHE_INV                  = 0x0004,
+       SMP_DCACHE_INV_RANGE            = 0x0008,
+       SMP_DCACHE_FLUSH                = 0x000c,
+       SMP_DCACHE_FLUSH_RANGE          = 0x0010,
+       SMP_DCACHE_FLUSH_INV            = 0x0014,
+       SMP_DCACHE_FLUSH_INV_RANGE      = 0x0018,
+};
+#define SMP_DCACHE_OP_MASK             0x001c
+
+#define        SMP_IDCACHE_INV_FLUSH           (SMP_ICACHE_INV | SMP_DCACHE_FLUSH)
+#define SMP_IDCACHE_INV_FLUSH_RANGE    (SMP_ICACHE_INV_RANGE | SMP_DCACHE_FLUSH_RANGE)
+
+/*
+ * cache-smp.c
+ */
+#ifdef CONFIG_SMP
+extern spinlock_t smp_cache_lock;
+
+extern void smp_cache_call(unsigned long opr_mask,
+                          unsigned long addr, unsigned long end);
+
+static inline unsigned long smp_lock_cache(void)
+       __acquires(&smp_cache_lock)
+{
+       unsigned long flags;
+       spin_lock_irqsave(&smp_cache_lock, flags);
+       return flags;
+}
+
+static inline void smp_unlock_cache(unsigned long flags)
+       __releases(&smp_cache_lock)
+{
+       spin_unlock_irqrestore(&smp_cache_lock, flags);
+}
+
+#else
+static inline unsigned long smp_lock_cache(void) { return 0; }
+static inline void smp_unlock_cache(unsigned long flags) {}
+static inline void smp_cache_call(unsigned long opr_mask,
+                                 unsigned long addr, unsigned long end)
+{
+}
+#endif /* CONFIG_SMP */
index 9261217e8d2c5741bb500b829bbd7663859b5541..0a1f0aa92ebc78ff9881993e16d3015cb524cb96 100644 (file)
 #include <asm/cacheflush.h>
 #include <asm/io.h>
 #include <asm/uaccess.h>
+#include <asm/smp.h>
+#include "cache-smp.h"
 
 EXPORT_SYMBOL(mn10300_icache_inv);
+EXPORT_SYMBOL(mn10300_icache_inv_range);
+EXPORT_SYMBOL(mn10300_icache_inv_range2);
+EXPORT_SYMBOL(mn10300_icache_inv_page);
 EXPORT_SYMBOL(mn10300_dcache_inv);
 EXPORT_SYMBOL(mn10300_dcache_inv_range);
 EXPORT_SYMBOL(mn10300_dcache_inv_range2);
@@ -36,96 +41,6 @@ EXPORT_SYMBOL(mn10300_dcache_flush_range2);
 EXPORT_SYMBOL(mn10300_dcache_flush_page);
 #endif
 
-/*
- * write a page back from the dcache and invalidate the icache so that we can
- * run code from it that we've just written into it
- */
-void flush_icache_page(struct vm_area_struct *vma, struct page *page)
-{
-       mn10300_dcache_flush_page(page_to_phys(page));
-       mn10300_icache_inv();
-}
-EXPORT_SYMBOL(flush_icache_page);
-
-/*
- * write some code we've just written back from the dcache and invalidate the
- * icache so that we can run that code
- */
-void flush_icache_range(unsigned long start, unsigned long end)
-{
-#ifdef CONFIG_MN10300_CACHE_WBACK
-       unsigned long addr, size, base, off;
-       struct page *page;
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *ppte, pte;
-
-       if (end > 0x80000000UL) {
-               /* addresses above 0xa0000000 do not go through the cache */
-               if (end > 0xa0000000UL) {
-                       end = 0xa0000000UL;
-                       if (start >= end)
-                               return;
-               }
-
-               /* kernel addresses between 0x80000000 and 0x9fffffff do not
-                * require page tables, so we just map such addresses directly */
-               base = (start >= 0x80000000UL) ? start : 0x80000000UL;
-               mn10300_dcache_flush_range(base, end);
-               if (base == start)
-                       goto invalidate;
-               end = base;
-       }
-
-       for (; start < end; start += size) {
-               /* work out how much of the page to flush */
-               off = start & (PAGE_SIZE - 1);
-
-               size = end - start;
-               if (size > PAGE_SIZE - off)
-                       size = PAGE_SIZE - off;
-
-               /* get the physical address the page is mapped to from the page
-                * tables */
-               pgd = pgd_offset(current->mm, start);
-               if (!pgd || !pgd_val(*pgd))
-                       continue;
-
-               pud = pud_offset(pgd, start);
-               if (!pud || !pud_val(*pud))
-                       continue;
-
-               pmd = pmd_offset(pud, start);
-               if (!pmd || !pmd_val(*pmd))
-                       continue;
-
-               ppte = pte_offset_map(pmd, start);
-               if (!ppte)
-                       continue;
-               pte = *ppte;
-               pte_unmap(ppte);
-
-               if (pte_none(pte))
-                       continue;
-
-               page = pte_page(pte);
-               if (!page)
-                       continue;
-
-               addr = page_to_phys(page);
-
-               /* flush the dcache and invalidate the icache coverage on that
-                * region */
-               mn10300_dcache_flush_range2(addr + off, size);
-       }
-#endif
-
-invalidate:
-       mn10300_icache_inv();
-}
-EXPORT_SYMBOL(flush_icache_range);
-
 /*
  * allow userspace to flush the instruction cache
  */
index 81f153fa51b4a6ecb07111f1d20383fbf0b6eaf2..59c3da49d9d9e4ad08108bf2f1eebe8182f46e0c 100644 (file)
@@ -39,10 +39,6 @@ void bust_spinlocks(int yes)
 {
        if (yes) {
                oops_in_progress = 1;
-#ifdef CONFIG_SMP
-               /* Many serial drivers do __global_cli() */
-               global_irq_lock = 0;
-#endif
        } else {
                int loglevel_save = console_loglevel;
 #ifdef CONFIG_VT
@@ -100,8 +96,6 @@ static void print_pagetable_entries(pgd_t *pgdir, unsigned long address)
 }
 #endif
 
-asmlinkage void monitor_signal(struct pt_regs *);
-
 /*
  * This routine handles page faults.  It determines the address,
  * and the problem, and then passes it off to one of the appropriate
@@ -279,7 +273,6 @@ good_area:
  */
 bad_area:
        up_read(&mm->mmap_sem);
-       monitor_signal(regs);
 
        /* User mode accesses just cause a SIGSEGV */
        if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
@@ -292,7 +285,6 @@ bad_area:
        }
 
 no_context:
-       monitor_signal(regs);
        /* Are we prepared to handle this kernel fault?  */
        if (fixup_exception(regs))
                return;
@@ -338,14 +330,13 @@ no_context:
  */
 out_of_memory:
        up_read(&mm->mmap_sem);
-       if ((fault_code & MMUFCR_xFC_ACCESS) != MMUFCR_xFC_ACCESS_USR)
-               goto no_context;
-       pagefault_out_of_memory();
-       return;
+       printk(KERN_ALERT "VM: killing process %s\n", tsk->comm);
+       if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
+               do_exit(SIGKILL);
+       goto no_context;
 
 do_sigbus:
        up_read(&mm->mmap_sem);
-       monitor_signal(regs);
 
        /*
         * Send a sigbus, regardless of whether we were in kernel
index 6e6bc0e51521811895e4c3e11e73fdb2fdc49118..48907cc3bdb77311526c1b1ca173b20983091f8d 100644 (file)
@@ -41,6 +41,10 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
 unsigned long highstart_pfn, highend_pfn;
 
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+static struct vm_struct user_iomap_vm;
+#endif
+
 /*
  * set up paging
  */
@@ -73,7 +77,24 @@ void __init paging_init(void)
        /* pass the memory from the bootmem allocator to the main allocator */
        free_area_init(zones_size);
 
-       __flush_tlb_all();
+#ifdef CONFIG_MN10300_HAS_ATOMIC_OPS_UNIT
+       /* The Atomic Operation Unit registers need to be mapped to userspace
+        * for all processes.  The following uses vm_area_register_early() to
+        * reserve the first page of the vmalloc area and sets the pte for that
+        * page.
+        *
+        * glibc hardcodes this virtual mapping, so we're pretty much stuck with
+        * it from now on.
+        */
+       user_iomap_vm.flags = VM_USERMAP;
+       user_iomap_vm.size = 1 << PAGE_SHIFT;
+       vm_area_register_early(&user_iomap_vm, PAGE_SIZE);
+       ppte = kernel_vmalloc_ptes;
+       set_pte(ppte, pfn_pte(USER_ATOMIC_OPS_PAGE_ADDR >> PAGE_SHIFT,
+                             PAGE_USERIO));
+#endif
+
+       local_flush_tlb_all();
 }
 
 /*
@@ -84,8 +105,7 @@ void __init mem_init(void)
        int codesize, reservedpages, datasize, initsize;
        int tmp;
 
-       if (!mem_map)
-               BUG();
+       BUG_ON(!mem_map);
 
 #define START_PFN      (contig_page_data.bdata->node_min_pfn)
 #define MAX_LOW_PFN    (contig_page_data.bdata->node_low_pfn)
index 6dffbf97ac2601d40ceb91a2e4a5116fd1bceeb8..eef989c1d0c10baf8614edbd8f87ea09c4d2cf91 100644 (file)
@@ -449,8 +449,7 @@ found_opcode:
               regs->pc, opcode, pop->opcode, pop->params[0], pop->params[1]);
 
        tmp = format_tbl[pop->format].opsz;
-       if (tmp > noc)
-               BUG(); /* match was less complete than it ought to have been */
+       BUG_ON(tmp > noc); /* match was less complete than it ought to have been */
 
        if (tmp < noc) {
                tmp = noc - tmp;
index 36ba02191d408251f9eaefa366610824e34d3d69..a4f7d3dcc6e6ff8d5028e6e60de4a8281b219702 100644 (file)
 #include <asm/mmu_context.h>
 #include <asm/tlbflush.h>
 
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
 /*
  * list of the MMU contexts last allocated on each CPU
  */
 unsigned long mmu_context_cache[NR_CPUS] = {
-       [0 ... NR_CPUS - 1] = MMU_CONTEXT_FIRST_VERSION * 2 - 1,
+       [0 ... NR_CPUS - 1] =
+       MMU_CONTEXT_FIRST_VERSION * 2 - (1 - MMU_CONTEXT_TLBPID_LOCK_NR),
 };
-
-/*
- * flush the specified TLB entry
- */
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
-{
-       unsigned long pteu, cnx, flags;
-
-       addr &= PAGE_MASK;
-
-       /* make sure the context doesn't migrate and defend against
-        * interference from vmalloc'd regions */
-       local_irq_save(flags);
-
-       cnx = mm_context(vma->vm_mm);
-
-       if (cnx != MMU_NO_CONTEXT) {
-               pteu = addr | (cnx & 0x000000ffUL);
-               IPTEU = pteu;
-               DPTEU = pteu;
-               if (IPTEL & xPTEL_V)
-                       IPTEL = 0;
-               if (DPTEL & xPTEL_V)
-                       DPTEL = 0;
-       }
-
-       local_irq_restore(flags);
-}
+#endif /* CONFIG_MN10300_TLB_USE_PIDR */
 
 /*
  * preemptively set a TLB entry
@@ -63,10 +38,16 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t *pte
         * interference from vmalloc'd regions */
        local_irq_save(flags);
 
+       cnx = ~MMU_NO_CONTEXT;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
        cnx = mm_context(vma->vm_mm);
+#endif
 
        if (cnx != MMU_NO_CONTEXT) {
-               pteu = addr | (cnx & 0x000000ffUL);
+               pteu = addr;
+#ifdef CONFIG_MN10300_TLB_USE_PIDR
+               pteu |= cnx & MMU_CONTEXT_TLBPID_MASK;
+#endif
                if (!(pte_val(pte) & _PAGE_NX)) {
                        IPTEU = pteu;
                        if (IPTEL & xPTEL_V)
index 9c1624c9e4e9fa05f33d621877c931dbb9ae1abb..450f7ba3f8f2ed8aaa39cfa111782be77df2c834 100644 (file)
@@ -59,7 +59,7 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
         * It's enough to flush this one mapping.
         * (PGE mappings get flushed as well)
         */
-       __flush_tlb_one(vaddr);
+       local_flush_tlb_one(vaddr);
 }
 
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
index 7095147dcb8ba2f83bbad869b7b2e4366dd4a6a5..b9940177d81be5f6026f38e769c05163f6d46215 100644 (file)
@@ -27,7 +27,6 @@
 ###############################################################################
        .type   itlb_miss,@function
 ENTRY(itlb_miss)
-       and     ~EPSW_NMID,epsw
 #ifdef CONFIG_GDBSTUB
        movm    [d2,d3,a2],(sp)
 #else
@@ -38,6 +37,12 @@ ENTRY(itlb_miss)
        nop
 #endif
 
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+       mov     (MMUCTR),d2
+       mov     d2,(MMUCTR)
+#endif
+
+       and     ~EPSW_NMID,epsw
        mov     (IPTEU),d3
        mov     (PTBR),a2
        mov     d3,d2
@@ -56,10 +61,16 @@ ENTRY(itlb_miss)
        btst    _PAGE_VALID,d2
        beq     itlb_miss_fault         # jump if doesn't point to a page
                                        # (might be a swap id)
+#if    ((_PAGE_ACCESSED & 0xffffff00) == 0)
        bset    _PAGE_ACCESSED,(0,a2)
-       and     ~(xPTEL_UNUSED1|xPTEL_UNUSED2),d2
+#elif  ((_PAGE_ACCESSED & 0xffff00ff) == 0)
+       bset    +(_PAGE_ACCESSED >> 8),(1,a2)
+#else
+#error "_PAGE_ACCESSED value is out of range"
+#endif
+       and     ~xPTEL2_UNUSED1,d2
 itlb_miss_set:
-       mov     d2,(IPTEL             # change the TLB
+       mov     d2,(IPTEL2)             # change the TLB
 #ifdef CONFIG_GDBSTUB
        movm    (sp),[d2,d3,a2]
 #endif
@@ -79,7 +90,6 @@ itlb_miss_fault:
 ###############################################################################
        .type   dtlb_miss,@function
 ENTRY(dtlb_miss)
-       and     ~EPSW_NMID,epsw
 #ifdef CONFIG_GDBSTUB
        movm    [d2,d3,a2],(sp)
 #else
@@ -90,6 +100,12 @@ ENTRY(dtlb_miss)
        nop
 #endif
 
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+       mov     (MMUCTR),d2
+       mov     d2,(MMUCTR)
+#endif
+
+       and     ~EPSW_NMID,epsw
        mov     (DPTEU),d3
        mov     (PTBR),a2
        mov     d3,d2
@@ -108,10 +124,16 @@ ENTRY(dtlb_miss)
        btst    _PAGE_VALID,d2
        beq     dtlb_miss_fault         # jump if doesn't point to a page
                                        # (might be a swap id)
+#if    ((_PAGE_ACCESSED & 0xffffff00) == 0)
        bset    _PAGE_ACCESSED,(0,a2)
-       and     ~(xPTEL_UNUSED1|xPTEL_UNUSED2),d2
+#elif  ((_PAGE_ACCESSED & 0xffff00ff) == 0)
+       bset    +(_PAGE_ACCESSED >> 8),(1,a2)
+#else
+#error "_PAGE_ACCESSED value is out of range"
+#endif
+       and     ~xPTEL2_UNUSED1,d2
 dtlb_miss_set:
-       mov     d2,(DPTEL             # change the TLB
+       mov     d2,(DPTEL2)             # change the TLB
 #ifdef CONFIG_GDBSTUB
        movm    (sp),[d2,d3,a2]
 #endif
@@ -130,9 +152,15 @@ dtlb_miss_fault:
 ###############################################################################
        .type   itlb_aerror,@function
 ENTRY(itlb_aerror)
-       and     ~EPSW_NMID,epsw
        add     -4,sp
        SAVE_ALL
+
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+       mov     (MMUCTR),d1
+       mov     d1,(MMUCTR)
+#endif
+
+       and     ~EPSW_NMID,epsw
        add     -4,sp                           # need to pass three params
 
        # calculate the fault code
@@ -140,15 +168,13 @@ ENTRY(itlb_aerror)
        or      0x00010000,d1                   # it's an instruction fetch
 
        # determine the page address
-       mov     (IPTEU),a2
-       mov     a2,d0
+       mov     (IPTEU),d0
        and     PAGE_MASK,d0
        mov     d0,(12,sp)
 
        clr     d0
-       mov     d0,(IPTEL)
+       mov     d0,(IPTEL2)
 
-       and     ~EPSW_NMID,epsw
        or      EPSW_IE,epsw
        mov     fp,d0
        call    do_page_fault[],0               # do_page_fault(regs,code,addr
@@ -163,10 +189,16 @@ ENTRY(itlb_aerror)
 ###############################################################################
        .type   dtlb_aerror,@function
 ENTRY(dtlb_aerror)
-       and     ~EPSW_NMID,epsw
        add     -4,sp
        SAVE_ALL
+
+#if defined(CONFIG_ERRATUM_NEED_TO_RELOAD_MMUCTR)
+       mov     (MMUCTR),d1
+       mov     d1,(MMUCTR)
+#endif
+
        add     -4,sp                           # need to pass three params
+       and     ~EPSW_NMID,epsw
 
        # calculate the fault code
        movhu   (MMUFCR_DFC),d1
@@ -178,9 +210,8 @@ ENTRY(dtlb_aerror)
        mov     d0,(12,sp)
 
        clr     d0
-       mov     d0,(DPTEL)
+       mov     d0,(DPTEL2)
 
-       and     ~EPSW_NMID,epsw
        or      EPSW_IE,epsw
        mov     fp,d0
        call    do_page_fault[],0               # do_page_fault(regs,code,addr
diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c
new file mode 100644 (file)
index 0000000..0b6a5ad
--- /dev/null
@@ -0,0 +1,214 @@
+/* SMP TLB support routines.
+ *
+ * Copyright (C) 2006-2008 Panasonic Corporation
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/jiffies.h>
+#include <linux/cpumask.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/profile.h>
+#include <linux/smp.h>
+#include <asm/tlbflush.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <asm/processor.h>
+#include <asm/bug.h>
+#include <asm/exceptions.h>
+#include <asm/hardirq.h>
+#include <asm/fpu.h>
+#include <asm/mmu_context.h>
+#include <asm/thread_info.h>
+#include <asm/cpu-regs.h>
+#include <asm/intctl-regs.h>
+
+/*
+ * For flush TLB
+ */
+#define FLUSH_ALL      0xffffffff
+
+static cpumask_t flush_cpumask;
+static struct mm_struct *flush_mm;
+static unsigned long flush_va;
+static DEFINE_SPINLOCK(tlbstate_lock);
+
+DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
+       &init_mm, 0
+};
+
+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+                            unsigned long va);
+static void do_flush_tlb_all(void *info);
+
+/**
+ * smp_flush_tlb - Callback to invalidate the TLB.
+ * @unused: Callback context (ignored).
+ */
+void smp_flush_tlb(void *unused)
+{
+       unsigned long cpu_id;
+
+       cpu_id = get_cpu();
+
+       if (!cpu_isset(cpu_id, flush_cpumask))
+               /* This was a BUG() but until someone can quote me the line
+                * from the intel manual that guarantees an IPI to multiple
+                * CPUs is retried _only_ on the erroring CPUs its staying as a
+                * return
+                *
+                * BUG();
+                */
+               goto out;
+
+       if (flush_va == FLUSH_ALL)
+               local_flush_tlb();
+       else
+               local_flush_tlb_page(flush_mm, flush_va);
+
+       smp_mb__before_clear_bit();
+       cpu_clear(cpu_id, flush_cpumask);
+       smp_mb__after_clear_bit();
+out:
+       put_cpu();
+}
+
+/**
+ * flush_tlb_others - Tell the specified CPUs to invalidate their TLBs
+ * @cpumask: The list of CPUs to target.
+ * @mm: The VM context to flush from (if va!=FLUSH_ALL).
+ * @va: Virtual address to flush or FLUSH_ALL to flush everything.
+ */
+static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
+                            unsigned long va)
+{
+       cpumask_t tmp;
+
+       /* A couple of sanity checks (to be removed):
+        * - mask must not be empty
+        * - current CPU must not be in mask
+        * - we do not send IPIs to as-yet unbooted CPUs.
+        */
+       BUG_ON(!mm);
+       BUG_ON(cpus_empty(cpumask));
+       BUG_ON(cpu_isset(smp_processor_id(), cpumask));
+
+       cpus_and(tmp, cpumask, cpu_online_map);
+       BUG_ON(!cpus_equal(cpumask, tmp));
+
+       /* I'm not happy about this global shared spinlock in the MM hot path,
+        * but we'll see how contended it is.
+        *
+        * Temporarily this turns IRQs off, so that lockups are detected by the
+        * NMI watchdog.
+        */
+       spin_lock(&tlbstate_lock);
+
+       flush_mm = mm;
+       flush_va = va;
+#if NR_CPUS <= BITS_PER_LONG
+       atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
+#else
+#error Not supported.
+#endif
+
+       /* FIXME: if NR_CPUS>=3, change send_IPI_mask */
+       smp_call_function(smp_flush_tlb, NULL, 1);
+
+       while (!cpus_empty(flush_cpumask))
+               /* Lockup detection does not belong here */
+               smp_mb();
+
+       flush_mm = NULL;
+       flush_va = 0;
+       spin_unlock(&tlbstate_lock);
+}
+
+/**
+ * flush_tlb_mm - Invalidate TLB of specified VM context
+ * @mm: The VM context to invalidate.
+ */
+void flush_tlb_mm(struct mm_struct *mm)
+{
+       cpumask_t cpu_mask;
+
+       preempt_disable();
+       cpu_mask = mm->cpu_vm_mask;
+       cpu_clear(smp_processor_id(), cpu_mask);
+
+       local_flush_tlb();
+       if (!cpus_empty(cpu_mask))
+               flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+
+       preempt_enable();
+}
+
+/**
+ * flush_tlb_current_task - Invalidate TLB of current task
+ */
+void flush_tlb_current_task(void)
+{
+       struct mm_struct *mm = current->mm;
+       cpumask_t cpu_mask;
+
+       preempt_disable();
+       cpu_mask = mm->cpu_vm_mask;
+       cpu_clear(smp_processor_id(), cpu_mask);
+
+       local_flush_tlb();
+       if (!cpus_empty(cpu_mask))
+               flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
+
+       preempt_enable();
+}
+
+/**
+ * flush_tlb_page - Invalidate TLB of page
+ * @vma: The VM context to invalidate the page for.
+ * @va: The virtual address of the page to invalidate.
+ */
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
+{
+       struct mm_struct *mm = vma->vm_mm;
+       cpumask_t cpu_mask;
+
+       preempt_disable();
+       cpu_mask = mm->cpu_vm_mask;
+       cpu_clear(smp_processor_id(), cpu_mask);
+
+       local_flush_tlb_page(mm, va);
+       if (!cpus_empty(cpu_mask))
+               flush_tlb_others(cpu_mask, mm, va);
+
+       preempt_enable();
+}
+
+/**
+ * do_flush_tlb_all - Callback to completely invalidate a TLB
+ * @unused: Callback context (ignored).
+ */
+static void do_flush_tlb_all(void *unused)
+{
+       local_flush_tlb_all();
+}
+
+/**
+ * flush_tlb_all - Completely invalidate TLBs on all CPUs
+ */
+void flush_tlb_all(void)
+{
+       on_each_cpu(do_flush_tlb_all, 0, 1);
+}
index bdc1f9a59b4ccb360fddd5f88898a06d83cedafe..c1528004163ce8fe54418368ce3aea757e31104f 100644 (file)
  */
 #define MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL    4
 
+/*
+ * The size of range at which it becomes more economical to just flush the
+ * whole cache rather than trying to flush the specified range.
+ */
+#define MN10300_DCACHE_FLUSH_BORDER    \
+       +(L1_CACHE_NWAYS * L1_CACHE_NENTRIES * L1_CACHE_BYTES)
+#define MN10300_DCACHE_FLUSH_INV_BORDER        \
+       +(L1_CACHE_NWAYS * L1_CACHE_NENTRIES * L1_CACHE_BYTES)
+
 #endif /* _ASM_PROC_CACHE_H */
index aa23e147d620f3d1d3b3fbce9641862d2dcb7e95..704a819f1f4b123c0b2a37171aa429c0abe48c81 100644 (file)
@@ -13,6 +13,4 @@
 
 #include <unit/clock.h>
 
-#define MN10300_WDCLK          MN10300_IOCLK
-
 #endif /* _ASM_PROC_CLOCK_H */
diff --git a/arch/mn10300/proc-mn103e010/include/proc/dmactl-regs.h b/arch/mn10300/proc-mn103e010/include/proc/dmactl-regs.h
new file mode 100644 (file)
index 0000000..d72d328
--- /dev/null
@@ -0,0 +1,102 @@
+/* MN103E010 on-board DMA controller registers
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _ASM_PROC_DMACTL_REGS_H
+#define _ASM_PROC_DMACTL_REGS_H
+
+#include <asm/cpu-regs.h>
+
+#ifdef __KERNEL__
+
+/* DMA registers */
+#define        DMxCTR(N)               __SYSREG(0xd2000000 + ((N) * 0x100), u32)       /* control reg */
+#define        DMxCTR_BG               0x0000001f      /* transfer request source */
+#define        DMxCTR_BG_SOFT          0x00000000      /* - software source */
+#define        DMxCTR_BG_SC0TX         0x00000002      /* - serial port 0 transmission */
+#define        DMxCTR_BG_SC0RX         0x00000003      /* - serial port 0 reception */
+#define        DMxCTR_BG_SC1TX         0x00000004      /* - serial port 1 transmission */
+#define        DMxCTR_BG_SC1RX         0x00000005      /* - serial port 1 reception */
+#define        DMxCTR_BG_SC2TX         0x00000006      /* - serial port 2 transmission */
+#define        DMxCTR_BG_SC2RX         0x00000007      /* - serial port 2 reception */
+#define        DMxCTR_BG_TM0UFLOW      0x00000008      /* - timer 0 underflow */
+#define        DMxCTR_BG_TM1UFLOW      0x00000009      /* - timer 1 underflow */
+#define        DMxCTR_BG_TM2UFLOW      0x0000000a      /* - timer 2 underflow */
+#define        DMxCTR_BG_TM3UFLOW      0x0000000b      /* - timer 3 underflow */
+#define        DMxCTR_BG_TM6ACMPCAP    0x0000000c      /* - timer 6A compare/capture */
+#define        DMxCTR_BG_AFE           0x0000000d      /* - analogue front-end interrupt source */
+#define        DMxCTR_BG_ADC           0x0000000e      /* - A/D conversion end interrupt source */
+#define        DMxCTR_BG_IRDA          0x0000000f      /* - IrDA interrupt source */
+#define        DMxCTR_BG_RTC           0x00000010      /* - RTC interrupt source */
+#define        DMxCTR_BG_XIRQ0         0x00000011      /* - XIRQ0 pin interrupt source */
+#define        DMxCTR_BG_XIRQ1         0x00000012      /* - XIRQ1 pin interrupt source */
+#define        DMxCTR_BG_XDMR0         0x00000013      /* - external request 0 source (XDMR0 pin) */
+#define        DMxCTR_BG_XDMR1         0x00000014      /* - external request 1 source (XDMR1 pin) */
+#define        DMxCTR_SAM              0x000000e0      /* DMA transfer src addr mode */
+#define        DMxCTR_SAM_INCR         0x00000000      /* - increment */
+#define        DMxCTR_SAM_DECR         0x00000020      /* - decrement */
+#define        DMxCTR_SAM_FIXED        0x00000040      /* - fixed */
+#define        DMxCTR_DAM              0x00000000      /* DMA transfer dest addr mode */
+#define        DMxCTR_DAM_INCR         0x00000000      /* - increment */
+#define        DMxCTR_DAM_DECR         0x00000100      /* - decrement */
+#define        DMxCTR_DAM_FIXED        0x00000200      /* - fixed */
+#define        DMxCTR_TM               0x00001800      /* DMA transfer mode */
+#define        DMxCTR_TM_BATCH         0x00000000      /* - batch transfer */
+#define        DMxCTR_TM_INTERM        0x00001000      /* - intermittent transfer */
+#define        DMxCTR_UT               0x00006000      /* DMA transfer unit */
+#define        DMxCTR_UT_1             0x00000000      /* - 1 byte */
+#define        DMxCTR_UT_2             0x00002000      /* - 2 byte */
+#define        DMxCTR_UT_4             0x00004000      /* - 4 byte */
+#define        DMxCTR_UT_16            0x00006000      /* - 16 byte */
+#define        DMxCTR_TEN              0x00010000      /* DMA channel transfer enable */
+#define        DMxCTR_RQM              0x00060000      /* external request input source mode */
+#define        DMxCTR_RQM_FALLEDGE     0x00000000      /* - falling edge */
+#define        DMxCTR_RQM_RISEEDGE     0x00020000      /* - rising edge */
+#define        DMxCTR_RQM_LOLEVEL      0x00040000      /* - low level */
+#define        DMxCTR_RQM_HILEVEL      0x00060000      /* - high level */
+#define        DMxCTR_RQF              0x01000000      /* DMA transfer request flag */
+#define        DMxCTR_XEND             0x80000000      /* DMA transfer end flag */
+
+#define        DMxSRC(N)               __SYSREG(0xd2000004 + ((N) * 0x100), u32)       /* control reg */
+
+#define        DMxDST(N)               __SYSREG(0xd2000008 + ((N) * 0x100), u32)       /* src addr reg */
+
+#define        DMxSIZ(N)               __SYSREG(0xd200000c + ((N) * 0x100), u32)       /* dest addr reg */
+#define DMxSIZ_CT              0x000fffff      /* number of bytes to transfer */
+
+#define        DMxCYC(N)               __SYSREG(0xd2000010 + ((N) * 0x100), u32)       /* intermittent
+                                                                                * size reg */
+#define DMxCYC_CYC             0x000000ff      /* number of interrmittent transfers -1 */
+
+#define DM0IRQ                 16              /* DMA channel 0 complete IRQ */
+#define DM1IRQ                 17              /* DMA channel 1 complete IRQ */
+#define DM2IRQ                 18              /* DMA channel 2 complete IRQ */
+#define DM3IRQ                 19              /* DMA channel 3 complete IRQ */
+
+#define        DM0ICR                  GxICR(DM0IRQ)   /* DMA channel 0 complete intr ctrl reg */
+#define        DM1ICR                  GxICR(DM0IR1)   /* DMA channel 1 complete intr ctrl reg */
+#define        DM2ICR                  GxICR(DM0IR2)   /* DMA channel 2 complete intr ctrl reg */
+#define        DM3ICR                  GxICR(DM0IR3)   /* DMA channel 3 complete intr ctrl reg */
+
+#ifndef __ASSEMBLY__
+
+struct mn10300_dmactl_regs {
+       u32             ctr;
+       const void      *src;
+       void            *dst;
+       u32             siz;
+       u32             cyc;
+} __attribute__((aligned(0x100)));
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_PROC_DMACTL_REGS_H */
diff --git a/arch/mn10300/proc-mn103e010/include/proc/intctl-regs.h b/arch/mn10300/proc-mn103e010/include/proc/intctl-regs.h
new file mode 100644 (file)
index 0000000..f537801
--- /dev/null
@@ -0,0 +1,29 @@
+#ifndef _ASM_PROC_INTCTL_REGS_H
+#define _ASM_PROC_INTCTL_REGS_H
+
+#ifndef _ASM_INTCTL_REGS_H
+# error "please don't include this file directly"
+#endif
+
+/* intr acceptance group reg */
+#define IAGR                   __SYSREG(0xd4000100, u16)
+
+/* group number register */
+#define IAGR_GN                        0x00fc
+
+#define __GET_XIRQ_TRIGGER(X, Z) (((Z) >> ((X) * 2)) & 3)
+
+#define __SET_XIRQ_TRIGGER(X, Y, Z)            \
+({                                             \
+       typeof(Z) x = (Z);                      \
+       x &= ~(3 << ((X) * 2));                 \
+       x |= ((Y) & 3) << ((X) * 2);            \
+       (Z) = x;                                \
+})
+
+/* external pin intr spec reg */
+#define EXTMD                  __SYSREG(0xd4000200, u16)
+#define GET_XIRQ_TRIGGER(X)    __GET_XIRQ_TRIGGER(X, EXTMD)
+#define SET_XIRQ_TRIGGER(X, Y) __SET_XIRQ_TRIGGER(X, Y, EXTMD)
+
+#endif /* _ASM_PROC_INTCTL_REGS_H */
index 22a2b93f70b7415cc1b775b3bd2e92eb43d61d3d..39c4f8e7d2d322f3fd2b4bb3c25ab3ac8ca1ff6d 100644 (file)
@@ -12,7 +12,7 @@
 #ifndef _ASM_PROC_PROC_H
 #define _ASM_PROC_PROC_H
 
-#define PROCESSOR_VENDOR_NAME  "Matsushita"
+#define PROCESSOR_VENDOR_NAME  "Panasonic"
 #define PROCESSOR_MODEL_NAME   "mn103e010"
 
 #endif /* _ASM_PROC_PROC_H */
index 9a482efafa82d34c1b03c3d5b136eb7eda6551c8..27b97980dca4d15eb05080ceedb92f873009eabd 100644 (file)
@@ -9,7 +9,9 @@
  * 2 of the Licence, or (at your option) any later version.
  */
 #include <linux/kernel.h>
+#include <asm/fpu.h>
 #include <asm/rtc.h>
+#include <asm/busctl-regs.h>
 
 /*
  * initialise the on-silicon processor peripherals
@@ -28,6 +30,7 @@ asmlinkage void __init processor_init(void)
        __set_intr_stub(EXCEP_DAERROR,          dtlb_aerror);
        __set_intr_stub(EXCEP_BUSERROR,         raw_bus_error);
        __set_intr_stub(EXCEP_DOUBLE_FAULT,     double_fault);
+       __set_intr_stub(EXCEP_FPU_DISABLED,     fpu_disabled);
        __set_intr_stub(EXCEP_SYSCALL0,         system_call);
 
        __set_intr_stub(EXCEP_NMI,              nmi_handler);
@@ -73,3 +76,37 @@ asmlinkage void __init processor_init(void)
 
        calibrate_clock();
 }
+
+/*
+ * determine the memory size and base from the memory controller regs
+ */
+void __init get_mem_info(unsigned long *mem_base, unsigned long *mem_size)
+{
+       unsigned long base, size;
+
+       *mem_base = 0;
+       *mem_size = 0;
+
+       base = SDBASE(0);
+       if (base & SDBASE_CE) {
+               size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
+               size = ~size + 1;
+               base &= SDBASE_CBA;
+
+               printk(KERN_INFO "SDRAM[0]: %luMb @%08lx\n", size >> 20, base);
+               *mem_size += size;
+               *mem_base = base;
+       }
+
+       base = SDBASE(1);
+       if (base & SDBASE_CE) {
+               size = (base & SDBASE_CBAM) << SDBASE_CBAM_SHIFT;
+               size = ~size + 1;
+               base &= SDBASE_CBA;
+
+               printk(KERN_INFO "SDRAM[1]: %luMb @%08lx\n", size >> 20, base);
+               *mem_size += size;
+               if (*mem_base == 0)
+                       *mem_base = base;
+       }
+}
diff --git a/arch/mn10300/proc-mn2ws0050/Makefile b/arch/mn10300/proc-mn2ws0050/Makefile
new file mode 100644 (file)
index 0000000..d4ca133
--- /dev/null
@@ -0,0 +1,5 @@
+#
+# Makefile for the linux kernel.
+#
+
+obj-y   := proc-init.o
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
new file mode 100644 (file)
index 0000000..cafd7b5
--- /dev/null
@@ -0,0 +1,48 @@
+/* Cache specification
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ *  13-Nov-2006 MEI Add L1_CACHE_SHIFT_MAX definition.
+ *  29-Jul-2008 MEI Add define for MN10300_HAS_AREAPURGE_REG.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_PROC_CACHE_H
+#define _ASM_PROC_CACHE_H
+
+/*
+ * L1 cache
+ */
+#define L1_CACHE_NWAYS         4               /* number of ways in caches */
+#define L1_CACHE_NENTRIES      128             /* number of entries in each way */
+#define L1_CACHE_BYTES         32              /* bytes per entry */
+#define L1_CACHE_SHIFT         5               /* shift for bytes per entry */
+#define L1_CACHE_WAYDISP       0x1000          /* distance from one way to the next */
+
+#define L1_CACHE_TAG_VALID     0x00000001      /* cache tag valid bit */
+#define L1_CACHE_TAG_DIRTY     0x00000008      /* data cache tag dirty bit */
+#define L1_CACHE_TAG_ENTRY     0x00000fe0      /* cache tag entry address mask */
+#define L1_CACHE_TAG_ADDRESS   0xfffff000      /* cache tag line address mask */
+
+/*
+ * specification of the interval between interrupt checking intervals whilst
+ * managing the cache with the interrupts disabled
+ */
+#define MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL    4
+
+/*
+ * The size of range at which it becomes more economical to just flush the
+ * whole cache rather than trying to flush the specified range.
+ */
+#define MN10300_DCACHE_FLUSH_BORDER \
+       +(L1_CACHE_NWAYS * L1_CACHE_NENTRIES * L1_CACHE_BYTES)
+#define MN10300_DCACHE_FLUSH_INV_BORDER        \
+       +(L1_CACHE_NWAYS * L1_CACHE_NENTRIES * L1_CACHE_BYTES)
+
+#endif /* _ASM_PROC_CACHE_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/clock.h b/arch/mn10300/proc-mn2ws0050/include/proc/clock.h
new file mode 100644 (file)
index 0000000..fe4c0a4
--- /dev/null
@@ -0,0 +1,20 @@
+/* clock.h: proc-specific clocks
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ *  23-Feb-2007 MEI Delete define for watchdog timer.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_PROC_CLOCK_H
+#define _ASM_PROC_CLOCK_H
+
+#include <unit/clock.h>
+
+#endif /* _ASM_PROC_CLOCK_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/dmactl-regs.h b/arch/mn10300/proc-mn2ws0050/include/proc/dmactl-regs.h
new file mode 100644 (file)
index 0000000..4c4319e
--- /dev/null
@@ -0,0 +1,103 @@
+/* MN2WS0050 on-board DMA controller registers
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ */
+
+#ifndef _ASM_PROC_DMACTL_REGS_H
+#define _ASM_PROC_DMACTL_REGS_H
+
+#include <asm/cpu-regs.h>
+
+#ifdef __KERNEL__
+
+/* DMA registers */
+#define        DMxCTR(N)               __SYSREG(0xd4005000+(N*0x100), u32)     /* control reg */
+#define        DMxCTR_BG               0x0000001f      /* transfer request source */
+#define        DMxCTR_BG_SOFT          0x00000000      /* - software source */
+#define        DMxCTR_BG_SC0TX         0x00000002      /* - serial port 0 transmission */
+#define        DMxCTR_BG_SC0RX         0x00000003      /* - serial port 0 reception */
+#define        DMxCTR_BG_SC1TX         0x00000004      /* - serial port 1 transmission */
+#define        DMxCTR_BG_SC1RX         0x00000005      /* - serial port 1 reception */
+#define        DMxCTR_BG_SC2TX         0x00000006      /* - serial port 2 transmission */
+#define        DMxCTR_BG_SC2RX         0x00000007      /* - serial port 2 reception */
+#define        DMxCTR_BG_TM0UFLOW      0x00000008      /* - timer 0 underflow */
+#define        DMxCTR_BG_TM1UFLOW      0x00000009      /* - timer 1 underflow */
+#define        DMxCTR_BG_TM2UFLOW      0x0000000a      /* - timer 2 underflow */
+#define        DMxCTR_BG_TM3UFLOW      0x0000000b      /* - timer 3 underflow */
+#define        DMxCTR_BG_TM6ACMPCAP    0x0000000c      /* - timer 6A compare/capture */
+#define        DMxCTR_BG_RYBY          0x0000000d      /* - NAND Flash RY/BY request source */
+#define        DMxCTR_BG_RMC           0x0000000e      /* - remote controller output */
+#define        DMxCTR_BG_XIRQ12        0x00000011      /* - XIRQ12 pin interrupt source */
+#define        DMxCTR_BG_XIRQ13        0x00000012      /* - XIRQ13 pin interrupt source */
+#define        DMxCTR_BG_TCK           0x00000014      /* - tick timer underflow */
+#define        DMxCTR_BG_SC4TX         0x00000019      /* - serial port4 transmission */
+#define        DMxCTR_BG_SC4RX         0x0000001a      /* - serial port4 reception */
+#define        DMxCTR_BG_SC5TX         0x0000001b      /* - serial port5 transmission */
+#define        DMxCTR_BG_SC5RX         0x0000001c      /* - serial port5 reception */
+#define        DMxCTR_BG_SC6TX         0x0000001d      /* - serial port6 transmission */
+#define        DMxCTR_BG_SC6RX         0x0000001e      /* - serial port6 reception */
+#define        DMxCTR_BG_TMSUFLOW      0x0000001f      /* - timestamp timer underflow */
+#define        DMxCTR_SAM              0x00000060      /* DMA transfer src addr mode */
+#define        DMxCTR_SAM_INCR         0x00000000      /* - increment */
+#define        DMxCTR_SAM_DECR         0x00000020      /* - decrement */
+#define        DMxCTR_SAM_FIXED        0x00000040      /* - fixed */
+#define        DMxCTR_DAM              0x00000300      /* DMA transfer dest addr mode */
+#define        DMxCTR_DAM_INCR         0x00000000      /* - increment */
+#define        DMxCTR_DAM_DECR         0x00000100      /* - decrement */
+#define        DMxCTR_DAM_FIXED        0x00000200      /* - fixed */
+#define        DMxCTR_UT               0x00006000      /* DMA transfer unit */
+#define        DMxCTR_UT_1             0x00000000      /* - 1 byte */
+#define        DMxCTR_UT_2             0x00002000      /* - 2 byte */
+#define        DMxCTR_UT_4             0x00004000      /* - 4 byte */
+#define        DMxCTR_UT_16            0x00006000      /* - 16 byte */
+#define DMxCTR_RRE             0x00008000      /* DMA round robin enable */
+#define        DMxCTR_TEN              0x00010000      /* DMA channel transfer enable */
+#define        DMxCTR_RQM              0x00060000      /* external request input source mode */
+#define        DMxCTR_RQM_FALLEDGE     0x00000000      /* - falling edge */
+#define        DMxCTR_RQM_RISEEDGE     0x00020000      /* - rising edge */
+#define        DMxCTR_RQM_LOLEVEL      0x00040000      /* - low level */
+#define        DMxCTR_RQM_HILEVEL      0x00060000      /* - high level */
+#define        DMxCTR_RQF              0x01000000      /* DMA transfer request flag */
+#define        DMxCTR_PERR             0x40000000      /* DMA transfer parameter error flag */
+#define        DMxCTR_XEND             0x80000000      /* DMA transfer end flag */
+
+#define        DMxSRC(N)               __SYSREG(0xd4005004+(N*0x100), u32)     /* control reg */
+
+#define        DMxDST(N)               __SYSREG(0xd4005008+(N*0x100), u32)     /* source addr reg */
+
+#define        DMxSIZ(N)               __SYSREG(0xd400500c+(N*0x100), u32)     /* dest addr reg */
+#define DMxSIZ_CT              0x000fffff      /* number of bytes to transfer */
+
+#define        DMxCYC(N)               __SYSREG(0xd4005010+(N*0x100), u32)     /* intermittent size reg */
+#define DMxCYC_CYC             0x000000ff      /* number of interrmittent transfers -1 */
+
+#define DM0IRQ                 16              /* DMA channel 0 complete IRQ */
+#define DM1IRQ                 17              /* DMA channel 1 complete IRQ */
+#define DM2IRQ                 18              /* DMA channel 2 complete IRQ */
+#define DM3IRQ                 19              /* DMA channel 3 complete IRQ */
+
+#define        DM0ICR                  GxICR(DM0IRQ)   /* DMA channel 0 complete intr ctrl reg */
+#define        DM1ICR                  GxICR(DM0IR1)   /* DMA channel 1 complete intr ctrl reg */
+#define        DM2ICR                  GxICR(DM0IR2)   /* DMA channel 2 complete intr ctrl reg */
+#define        DM3ICR                  GxICR(DM0IR3)   /* DMA channel 3 complete intr ctrl reg */
+
+#ifndef __ASSEMBLY__
+
+struct mn10300_dmactl_regs {
+       u32             ctr;
+       const void      *src;
+       void            *dst;
+       u32             siz;
+       u32             cyc;
+} __attribute__((aligned(0x100)));
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_PROC_DMACTL_REGS_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/intctl-regs.h b/arch/mn10300/proc-mn2ws0050/include/proc/intctl-regs.h
new file mode 100644 (file)
index 0000000..a1e9772
--- /dev/null
@@ -0,0 +1,29 @@
+#ifndef _ASM_PROC_INTCTL_REGS_H
+#define _ASM_PROC_INTCTL_REGS_H
+
+#ifndef _ASM_INTCTL_REGS_H
+# error "please don't include this file directly"
+#endif
+
+/* intr acceptance group reg */
+#define IAGR                   __SYSREG(0xd4000100, u16)
+
+/* group number register */
+#define IAGR_GN                        0x003fc
+
+#define __GET_XIRQ_TRIGGER(X, Z) (((Z) >> ((X) * 2)) & 3)
+
+#define __SET_XIRQ_TRIGGER(X, Y, Z)            \
+({                                             \
+       typeof(Z) x = (Z);                      \
+       x &= ~(3 << ((X) * 2));                 \
+       x |= ((Y) & 3) << ((X) * 2);            \
+       (Z) = x;                                \
+})
+
+/* external pin intr spec reg */
+#define EXTMD0                 __SYSREG(0xd4000200, u32)
+#define GET_XIRQ_TRIGGER(X)    __GET_XIRQ_TRIGGER(X, EXTMD0)
+#define SET_XIRQ_TRIGGER(X, Y) __SET_XIRQ_TRIGGER(X, Y, EXTMD0)
+
+#endif /* _ASM_PROC_INTCTL_REGS_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/irq.h b/arch/mn10300/proc-mn2ws0050/include/proc/irq.h
new file mode 100644 (file)
index 0000000..37777a8
--- /dev/null
@@ -0,0 +1,49 @@
+/* MN2WS0050 on-board interrupt controller registers
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ *  13-Nov-2006 MEI Define extended IRQ number for SMP support.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _PROC_IRQ_H
+#define _PROC_IRQ_H
+
+#ifdef __KERNEL__
+
+#define GxICR_NUM_IRQS         163
+#ifdef CONFIG_SMP
+#define GxICR_NUM_EXT_IRQS     197
+#endif  /* CONFIG_SMP */
+
+#define GxICR_NUM_XIRQS                16
+
+#define XIRQ0          34
+#define XIRQ1          35
+#define XIRQ2          36
+#define XIRQ3          37
+#define XIRQ4          38
+#define XIRQ5          39
+#define XIRQ6          40
+#define XIRQ7          41
+#define XIRQ8          42
+#define XIRQ9          43
+#define XIRQ10         44
+#define XIRQ11         45
+#define XIRQ12         46
+#define XIRQ13         47
+#define XIRQ14         48
+#define XIRQ15         49
+
+#define XIRQ2IRQ(num)  (XIRQ0 + num)
+
+#endif /* __KERNEL__ */
+
+#endif /* _PROC_IRQ_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/nand-regs.h b/arch/mn10300/proc-mn2ws0050/include/proc/nand-regs.h
new file mode 100644 (file)
index 0000000..84448f3
--- /dev/null
@@ -0,0 +1,120 @@
+/* NAND flash interface register definitions
+ *
+ * Copyright (C) 2008-2009 Panasonic Corporation
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef        _PROC_NAND_REGS_H_
+#define        _PROC_NAND_REGS_H_
+
+/* command register */
+#define FCOMMAND_0             __SYSREG(0xd8f00000, u8) /* fcommand[24:31] */
+#define FCOMMAND_1             __SYSREG(0xd8f00001, u8) /* fcommand[16:23] */
+#define FCOMMAND_2             __SYSREG(0xd8f00002, u8) /* fcommand[8:15] */
+#define FCOMMAND_3             __SYSREG(0xd8f00003, u8) /* fcommand[0:7] */
+
+/* for dma 16 byte trans, use FCOMMAND2 register */
+#define FCOMMAND2_0            __SYSREG(0xd8f00110, u8) /* fcommand2[24:31] */
+#define FCOMMAND2_1            __SYSREG(0xd8f00111, u8) /* fcommand2[16:23] */
+#define FCOMMAND2_2            __SYSREG(0xd8f00112, u8) /* fcommand2[8:15] */
+#define FCOMMAND2_3            __SYSREG(0xd8f00113, u8) /* fcommand2[0:7] */
+
+#define FCOMMAND_FIEN          0x80            /* nand flash I/F enable */
+#define FCOMMAND_BW_8BIT       0x00            /* 8bit bus width */
+#define FCOMMAND_BW_16BIT      0x40            /* 16bit bus width */
+#define FCOMMAND_BLOCKSZ_SMALL 0x00            /* small block */
+#define FCOMMAND_BLOCKSZ_LARGE 0x20            /* large block */
+#define FCOMMAND_DMASTART      0x10            /* dma start */
+#define FCOMMAND_RYBY          0x08            /* ready/busy flag */
+#define FCOMMAND_RYBYINTMSK    0x04            /* mask ready/busy interrupt */
+#define FCOMMAND_XFWP          0x02            /* write protect enable */
+#define FCOMMAND_XFCE          0x01            /* flash device disable */
+#define FCOMMAND_SEQKILL       0x10            /* stop seq-read */
+#define FCOMMAND_ANUM          0x07            /* address cycle */
+#define FCOMMAND_ANUM_NONE     0x00            /* address cycle none */
+#define FCOMMAND_ANUM_1CYC     0x01            /* address cycle 1cycle */
+#define FCOMMAND_ANUM_2CYC     0x02            /* address cycle 2cycle */
+#define FCOMMAND_ANUM_3CYC     0x03            /* address cycle 3cycle */
+#define FCOMMAND_ANUM_4CYC     0x04            /* address cycle 4cycle */
+#define FCOMMAND_ANUM_5CYC     0x05            /* address cycle 5cycle */
+#define FCOMMAND_FCMD_READ0    0x00            /* read1 command */
+#define FCOMMAND_FCMD_SEQIN    0x80            /* page program 1st command */
+#define FCOMMAND_FCMD_PAGEPROG 0x10            /* page program 2nd command */
+#define FCOMMAND_FCMD_RESET    0xff            /* reset command */
+#define FCOMMAND_FCMD_ERASE1   0x60            /* erase 1st command */
+#define FCOMMAND_FCMD_ERASE2   0xd0            /* erase 2nd command */
+#define FCOMMAND_FCMD_STATUS   0x70            /* read status command */
+#define FCOMMAND_FCMD_READID   0x90            /* read id command */
+#define FCOMMAND_FCMD_READOOB  0x50            /* read3 command */
+/* address register */
+#define FADD                   __SYSREG(0xd8f00004, u32)
+/* address register 2 */
+#define FADD2                  __SYSREG(0xd8f00008, u32)
+/* error judgement register */
+#define FJUDGE                 __SYSREG(0xd8f0000c, u32)
+#define FJUDGE_NOERR           0x0             /* no error */
+#define FJUDGE_1BITERR         0x1             /* 1bit error in data area */
+#define FJUDGE_PARITYERR       0x2             /* parity error */
+#define FJUDGE_UNCORRECTABLE   0x3             /* uncorrectable error */
+#define FJUDGE_ERRJDG_MSK      0x3             /* mask of judgement result */
+/* 1st ECC store register */
+#define FECC11                 __SYSREG(0xd8f00010, u32)
+/* 2nd ECC store register */
+#define FECC12                 __SYSREG(0xd8f00014, u32)
+/* 3rd ECC store register */
+#define FECC21                 __SYSREG(0xd8f00018, u32)
+/* 4th ECC store register */
+#define FECC22                 __SYSREG(0xd8f0001c, u32)
+/* 5th ECC store register */
+#define FECC31                 __SYSREG(0xd8f00020, u32)
+/* 6th ECC store register */
+#define FECC32                 __SYSREG(0xd8f00024, u32)
+/* 7th ECC store register */
+#define FECC41                 __SYSREG(0xd8f00028, u32)
+/* 8th ECC store register */
+#define FECC42                 __SYSREG(0xd8f0002c, u32)
+/* data register */
+#define FDATA                  __SYSREG(0xd8f00030, u32)
+/* access pulse register */
+#define FPWS                   __SYSREG(0xd8f00100, u32)
+#define FPWS_PWS1W_2CLK                0x00000000 /* write pulse width 1clock */
+#define FPWS_PWS1W_3CLK                0x01000000 /* write pulse width 2clock */
+#define FPWS_PWS1W_4CLK                0x02000000 /* write pulse width 4clock */
+#define FPWS_PWS1W_5CLK                0x03000000 /* write pulse width 5clock */
+#define FPWS_PWS1W_6CLK                0x04000000 /* write pulse width 6clock */
+#define FPWS_PWS1W_7CLK                0x05000000 /* write pulse width 7clock */
+#define FPWS_PWS1W_8CLK                0x06000000 /* write pulse width 8clock */
+#define FPWS_PWS1R_3CLK                0x00010000 /* read pulse width 3clock */
+#define FPWS_PWS1R_4CLK                0x00020000 /* read pulse width 4clock */
+#define FPWS_PWS1R_5CLK                0x00030000 /* read pulse width 5clock */
+#define FPWS_PWS1R_6CLK                0x00040000 /* read pulse width 6clock */
+#define FPWS_PWS1R_7CLK                0x00050000 /* read pulse width 7clock */
+#define FPWS_PWS1R_8CLK                0x00060000 /* read pulse width 8clock */
+#define FPWS_PWS2W_2CLK                0x00000100 /* write pulse interval 2clock */
+#define FPWS_PWS2W_3CLK                0x00000200 /* write pulse interval 3clock */
+#define FPWS_PWS2W_4CLK                0x00000300 /* write pulse interval 4clock */
+#define FPWS_PWS2W_5CLK                0x00000400 /* write pulse interval 5clock */
+#define FPWS_PWS2W_6CLK                0x00000500 /* write pulse interval 6clock */
+#define FPWS_PWS2R_2CLK                0x00000001 /* read pulse interval 2clock */
+#define FPWS_PWS2R_3CLK                0x00000002 /* read pulse interval 3clock */
+#define FPWS_PWS2R_4CLK                0x00000003 /* read pulse interval 4clock */
+#define FPWS_PWS2R_5CLK                0x00000004 /* read pulse interval 5clock */
+#define FPWS_PWS2R_6CLK                0x00000005 /* read pulse interval 6clock */
+/* command register 2 */
+#define FCOMMAND2              __SYSREG(0xd8f00110, u32)
+/* transfer frequency register */
+#define FNUM                   __SYSREG(0xd8f00114, u32)
+#define FSDATA_ADDR            0xd8f00400
+/* active data register */
+#define FSDATA                 __SYSREG(FSDATA_ADDR, u32)
+
+#endif /* _PROC_NAND_REGS_H_ */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/proc.h b/arch/mn10300/proc-mn2ws0050/include/proc/proc.h
new file mode 100644 (file)
index 0000000..90d5cad
--- /dev/null
@@ -0,0 +1,18 @@
+/* proc.h: MN2WS0050 processor description
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_PROC_PROC_H
+#define _ASM_PROC_PROC_H
+
+#define PROCESSOR_VENDOR_NAME  "Panasonic"
+#define PROCESSOR_MODEL_NAME   "mn2ws0050"
+
+#endif /* _ASM_PROC_PROC_H */
diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/smp-regs.h b/arch/mn10300/proc-mn2ws0050/include/proc/smp-regs.h
new file mode 100644 (file)
index 0000000..22f277f
--- /dev/null
@@ -0,0 +1,51 @@
+/* MN10300/AM33v2 Microcontroller SMP registers
+ *
+ * Copyright (C) 2006 Matsushita Electric Industrial Co., Ltd.
+ * All Rights Reserved.
+ * Created:
+ *  13-Nov-2006 MEI Add extended cache and atomic operation register
+ *                  for SMP support.
+ *  23-Feb-2007 MEI Add define for gdbstub SMP.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_PROC_SMP_REGS_H
+#define _ASM_PROC_SMP_REGS_H
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+#endif
+#include <asm/cpu-regs.h>
+
+/*
+ * Reference to the interrupt controllers of other CPUs
+ */
+#define CROSS_ICR_CPU_SHIFT    16
+
+#define CROSS_GxICR(X, CPU)    __SYSREG(0xc4000000 + (X) * 4 + \
+       ((X) >= 64 && (X) < 192) * 0xf00 + ((CPU) << CROSS_ICR_CPU_SHIFT), u16)
+#define CROSS_GxICR_u8(X, CPU) __SYSREG(0xc4000000 + (X) * 4 +         \
+       (((X) >= 64) && ((X) < 192)) * 0xf00 + ((CPU) << CROSS_ICR_CPU_SHIFT), u8)
+
+/* CPU ID register */
+#define CPUID          __SYSREGC(0xc0000054, u32)
+#define CPUID_MASK     0x00000007      /* CPU ID mask */
+
+/* extended cache control register */
+#define ECHCTR         __SYSREG(0xc0000c20, u32)
+#define ECHCTR_IBCM    0x00000001      /* instruction cache broad cast mask */
+#define ECHCTR_DBCM    0x00000002      /* data cache broad cast mask */
+#define ECHCTR_ISPM    0x00000004      /* instruction cache snoop mask */
+#define ECHCTR_DSPM    0x00000008      /* data cache snoop mask */
+
+#define NMIAGR         __SYSREG(0xd400013c, u16)
+#define NMIAGR_GN      0x03fc
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_PROC_SMP_REGS_H */
diff --git a/arch/mn10300/proc-mn2ws0050/proc-init.c b/arch/mn10300/proc-mn2ws0050/proc-init.c
new file mode 100644 (file)
index 0000000..c58249b
--- /dev/null
@@ -0,0 +1,134 @@
+/* MN2WS0050 processor initialisation
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+#include <asm/smp.h>
+#include <asm/pgalloc.h>
+#include <asm/busctl-regs.h>
+#include <unit/timex.h>
+#include <asm/fpu.h>
+#include <asm/rtc.h>
+
+#define MEMCONF __SYSREGC(0xdf800400, u32)
+
+/*
+ * initialise the on-silicon processor peripherals
+ */
+asmlinkage void __init processor_init(void)
+{
+       int loop;
+
+       /* set up the exception table first */
+       for (loop = 0x000; loop < 0x400; loop += 8)
+               __set_intr_stub(loop, __common_exception);
+
+       __set_intr_stub(EXCEP_ITLBMISS,         itlb_miss);
+       __set_intr_stub(EXCEP_DTLBMISS,         dtlb_miss);
+       __set_intr_stub(EXCEP_IAERROR,          itlb_aerror);
+       __set_intr_stub(EXCEP_DAERROR,          dtlb_aerror);
+       __set_intr_stub(EXCEP_BUSERROR,         raw_bus_error);
+       __set_intr_stub(EXCEP_DOUBLE_FAULT,     double_fault);
+       __set_intr_stub(EXCEP_FPU_DISABLED,     fpu_disabled);
+       __set_intr_stub(EXCEP_SYSCALL0,         system_call);
+
+       __set_intr_stub(EXCEP_NMI,              nmi_handler);
+       __set_intr_stub(EXCEP_WDT,              nmi_handler);
+       __set_intr_stub(EXCEP_IRQ_LEVEL0,       irq_handler);
+       __set_intr_stub(EXCEP_IRQ_LEVEL1,       irq_handler);
+       __set_intr_stub(EXCEP_IRQ_LEVEL2,       irq_handler);
+       __set_intr_stub(EXCEP_IRQ_LEVEL3,       irq_handler);
+       __set_intr_stub(EXCEP_IRQ_LEVEL4,       irq_handler);
+       __set_intr_stub(EXCEP_IRQ_LEVEL5,       irq_handler);
+       __set_intr_stub(EXCEP_IRQ_LEVEL6,       irq_handler);
+
+       IVAR0 = EXCEP_IRQ_LEVEL0;
+       IVAR1 = EXCEP_IRQ_LEVEL1;
+       IVAR2 = EXCEP_IRQ_LEVEL2;
+       IVAR3 = EXCEP_IRQ_LEVEL3;
+       IVAR4 = EXCEP_IRQ_LEVEL4;
+       IVAR5 = EXCEP_IRQ_LEVEL5;
+       IVAR6 = EXCEP_IRQ_LEVEL6;
+
+#ifndef CONFIG_MN10300_HAS_CACHE_SNOOP
+       mn10300_dcache_flush_inv();
+       mn10300_icache_inv();
+#endif
+
+       /* disable all interrupts and set to priority 6 (lowest) */
+#ifdef CONFIG_SMP
+       for (loop = 0; loop < GxICR_NUM_IRQS; loop++)
+               GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
+#else  /* !CONFIG_SMP */
+       for (loop = 0; loop < NR_IRQS; loop++)
+               GxICR(loop) = GxICR_LEVEL_6 | GxICR_DETECT;
+#endif /* !CONFIG_SMP */
+
+       /* clear the timers */
+       TM0MD   = 0;
+       TM1MD   = 0;
+       TM2MD   = 0;
+       TM3MD   = 0;
+       TM4MD   = 0;
+       TM5MD   = 0;
+       TM6MD   = 0;
+       TM6MDA  = 0;
+       TM6MDB  = 0;
+       TM7MD   = 0;
+       TM8MD   = 0;
+       TM9MD   = 0;
+       TM10MD  = 0;
+       TM11MD  = 0;
+       TM12MD  = 0;
+       TM13MD  = 0;
+       TM14MD  = 0;
+       TM15MD  = 0;
+
+       calibrate_clock();
+}
+
+/*
+ * determine the memory size and base from the memory controller regs
+ */
+void __init get_mem_info(unsigned long *mem_base, unsigned long *mem_size)
+{
+       unsigned long memconf = MEMCONF;
+       unsigned long size = 0; /* order: MByte */
+
+       *mem_base = 0x90000000; /* fixed address */
+
+       switch (memconf & 0x00000003) {
+       case 0x01:
+               size = 256 / 8;         /* 256 Mbit per chip */
+               break;
+       case 0x02:
+               size = 512 / 8;         /* 512 Mbit per chip */
+               break;
+       case 0x03:
+               size = 1024 / 8;        /*   1 Gbit per chip */
+               break;
+       default:
+               panic("Invalid SDRAM size");
+               break;
+       }
+
+       printk(KERN_INFO "DDR2-SDRAM: %luMB x 2 @%08lx\n", size, *mem_base);
+
+       *mem_size = (size * 2) << 20;
+}
index 2a0bf79ab968162eb514c2190dc9a8e136845645..0316907a012ef9ec14f5677ea9bdd3bb2de68917 100644 (file)
 
 #ifndef __ASSEMBLY__
 
-#ifdef CONFIG_MN10300_RTC
-
-extern unsigned long mn10300_ioclk;    /* IOCLK (crystal speed) in HZ */
-extern unsigned long mn10300_iobclk;
-extern unsigned long mn10300_tsc_per_HZ;
-
-#define MN10300_IOCLK          mn10300_ioclk
-/* If this processors has a another clock, uncomment the below. */
-/* #define MN10300_IOBCLK      mn10300_iobclk */
-
-#else /* !CONFIG_MN10300_RTC */
-
 #define MN10300_IOCLK          33333333UL
 /* #define MN10300_IOBCLK      66666666UL */
 
-#endif /* !CONFIG_MN10300_RTC */
-
-#define MN10300_JCCLK          MN10300_IOCLK
-#define MN10300_TSCCLK         MN10300_IOCLK
-
-#ifdef CONFIG_MN10300_RTC
-#define MN10300_TSC_PER_HZ     mn10300_tsc_per_HZ
-#else /* !CONFIG_MN10300_RTC */
-#define MN10300_TSC_PER_HZ     (MN10300_TSCCLK/HZ)
-#endif /* !CONFIG_MN10300_RTC */
-
 #endif /* !__ASSEMBLY__ */
 
+#define MN10300_WDCLK          MN10300_IOCLK
+
 #endif /* _ASM_UNIT_CLOCK_H */
index 047566cd2e36d2677b33803c13384f906f17c0b7..991e356bac5f91c11a7efa563217b48f61b130bc 100644 (file)
 
 #define SERIAL_IRQ     XIRQ0   /* Dual serial (PC16552)        (Hi) */
 
+/*
+ * The ASB2303 has an 18.432 MHz clock the UART
+ */
+#define BASE_BAUD      (18432000 / 16)
+
 /*
  * dispose of the /dev/ttyS0 and /dev/ttyS1 serial ports
  */
index f206b63c95b4f282114315870d12458a19c6a700..cc18fe7d8b90e2abc9061b430b7072386aeed0d8 100644 (file)
@@ -1,6 +1,6 @@
-/* ASB2303-specific timer specifcations
+/* ASB2303-specific timer specifications
  *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2007, 2010 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
  * This program is free software; you can redistribute it and/or
 
 #include <asm/timer-regs.h>
 #include <unit/clock.h>
+#include <asm/param.h>
 
 /*
  * jiffies counter specifications
  */
 
 #define        TMJCBR_MAX              0xffff
-#define        TMJCBC                  TM01BC
-
-#define        TMJCMD                  TM01MD
-#define        TMJCBR                  TM01BR
 #define        TMJCIRQ                 TM1IRQ
 #define        TMJCICR                 TM1ICR
-#define        TMJCICR_LEVEL           GxICR_LEVEL_5
 
 #ifndef __ASSEMBLY__
 
-static inline void startup_jiffies_counter(void)
+#define MN10300_SRC_IOCLK      MN10300_IOCLK
+
+#ifndef HZ
+# error HZ undeclared.
+#endif /* !HZ */
+/* use as little prescaling as possible to avoid losing accuracy */
+#if (MN10300_SRC_IOCLK + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE                1
+# define JC_TIMER_CLKSRC       TM0MD_SRC_IOCLK
+# define TSC_TIMER_CLKSRC      TM4MD_SRC_IOCLK
+#elif (MN10300_SRC_IOCLK / 8 + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE                8
+# define JC_TIMER_CLKSRC       TM0MD_SRC_IOCLK_8
+# define TSC_TIMER_CLKSRC      TM4MD_SRC_IOCLK_8
+#elif (MN10300_SRC_IOCLK / 32 + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE                32
+# define JC_TIMER_CLKSRC       TM0MD_SRC_IOCLK_32
+# define TSC_TIMER_CLKSRC      TM4MD_SRC_IOCLK_32
+#else
+# error You lose.
+#endif
+
+#define MN10300_JCCLK          (MN10300_SRC_IOCLK / IOCLK_PRESCALE)
+#define MN10300_TSCCLK         (MN10300_SRC_IOCLK / IOCLK_PRESCALE)
+
+#define MN10300_JC_PER_HZ      ((MN10300_JCCLK + HZ / 2) / HZ)
+#define MN10300_TSC_PER_HZ     ((MN10300_TSCCLK + HZ / 2) / HZ)
+
+static inline void stop_jiffies_counter(void)
 {
-       unsigned rate;
-       u16 md, t16;
-
-       /* use as little prescaling as possible to avoid losing accuracy */
-       md = TM0MD_SRC_IOCLK;
-       rate = MN10300_JCCLK / HZ;
-
-       if (rate > TMJCBR_MAX) {
-               md = TM0MD_SRC_IOCLK_8;
-               rate = MN10300_JCCLK / 8 / HZ;
-
-               if (rate > TMJCBR_MAX) {
-                       md = TM0MD_SRC_IOCLK_32;
-                       rate = MN10300_JCCLK / 32 / HZ;
-
-                       if (rate > TMJCBR_MAX)
-                               BUG();
-               }
-       }
+       u16 tmp;
+       TM01MD = JC_TIMER_CLKSRC | TM1MD_SRC_TM0CASCADE << 8;
+       tmp = TM01MD;
+}
 
-       TMJCBR = rate - 1;
-       t16 = TMJCBR;
+static inline void reload_jiffies_counter(u32 cnt)
+{
+       u32 tmp;
 
-       TMJCMD =
-               md |
-               TM1MD_SRC_TM0CASCADE << 8 |
-               TM0MD_INIT_COUNTER |
-               TM1MD_INIT_COUNTER << 8;
+       TM01BR = cnt;
+       tmp = TM01BR;
 
-       TMJCMD =
-               md |
-               TM1MD_SRC_TM0CASCADE << 8 |
-               TM0MD_COUNT_ENABLE |
-               TM1MD_COUNT_ENABLE << 8;
+       TM01MD = JC_TIMER_CLKSRC |              \
+                TM1MD_SRC_TM0CASCADE << 8 |    \
+                TM0MD_INIT_COUNTER |           \
+                TM1MD_INIT_COUNTER << 8;
 
-       t16 = TMJCMD;
 
-       TMJCICR |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
-       t16 = TMJCICR;
-}
+       TM01MD = JC_TIMER_CLKSRC |              \
+                TM1MD_SRC_TM0CASCADE << 8 |    \
+                TM0MD_COUNT_ENABLE |           \
+                TM1MD_COUNT_ENABLE << 8;
 
-static inline void shutdown_jiffies_counter(void)
-{
+       tmp = TM01MD;
 }
 
 #endif /* !__ASSEMBLY__ */
@@ -94,29 +99,39 @@ static inline void shutdown_jiffies_counter(void)
 
 static inline void startup_timestamp_counter(void)
 {
+       u32 t32;
+
        /* set up timer 4 & 5 cascaded as a 32-bit counter to count real time
         * - count down from 4Gig-1 to 0 and wrap at IOCLK rate
         */
        TM45BR = TMTSCBR_MAX;
+       t32 = TM45BR;
 
-       TM4MD = TM4MD_SRC_IOCLK;
+       TM4MD = TSC_TIMER_CLKSRC;
        TM4MD |= TM4MD_INIT_COUNTER;
        TM4MD &= ~TM4MD_INIT_COUNTER;
        TM4ICR = 0;
+       t32 = TM4ICR;
 
        TM5MD = TM5MD_SRC_TM4CASCADE;
        TM5MD |= TM5MD_INIT_COUNTER;
        TM5MD &= ~TM5MD_INIT_COUNTER;
        TM5ICR = 0;
+       t32 = TM5ICR;
 
        TM5MD |= TM5MD_COUNT_ENABLE;
        TM4MD |= TM4MD_COUNT_ENABLE;
+       t32 = TM5MD;
+       t32 = TM4MD;
 }
 
 static inline void shutdown_timestamp_counter(void)
 {
+       u8 t8;
        TM4MD = 0;
        TM5MD = 0;
+       t8 = TM4MD;
+       t8 = TM5MD;
 }
 
 /*
@@ -127,7 +142,7 @@ typedef unsigned long cycles_t;
 
 static inline cycles_t read_timestamp_counter(void)
 {
-       return (cycles_t)TMTSCBC;
+       return (cycles_t)~TMTSCBC;
 }
 
 #endif /* !__ASSEMBLY__ */
index 70e8cb4ea266153e0d0217933ff84e4f61bed7e1..834a76aa551a06d37ceeae41d2b2d65f7b33409b 100644 (file)
@@ -31,6 +31,14 @@ asmlinkage void __init unit_init(void)
        SET_XIRQ_TRIGGER(3, XIRQ_TRIGGER_HILEVEL);
        SET_XIRQ_TRIGGER(4, XIRQ_TRIGGER_LOWLEVEL);
        SET_XIRQ_TRIGGER(5, XIRQ_TRIGGER_LOWLEVEL);
+
+#ifdef CONFIG_EXT_SERIAL_IRQ_LEVEL
+       set_intr_level(XIRQ0, NUM2GxICR_LEVEL(CONFIG_EXT_SERIAL_IRQ_LEVEL));
+#endif
+
+#ifdef CONFIG_ETHERNET_IRQ_LEVEL
+       set_intr_level(XIRQ3, NUM2GxICR_LEVEL(CONFIG_ETHERNET_IRQ_LEVEL));
+#endif
 }
 
 /*
@@ -51,7 +59,7 @@ void __init unit_init_IRQ(void)
                switch (GET_XIRQ_TRIGGER(extnum)) {
                case XIRQ_TRIGGER_HILEVEL:
                case XIRQ_TRIGGER_LOWLEVEL:
-                       set_intr_postackable(XIRQ2IRQ(extnum));
+                       mn10300_set_lateack_irq_type(XIRQ2IRQ(extnum));
                        break;
                default:
                        break;
index 67be3f2eb18e928b68363601ae5440bb737b9e2b..29e3425431cfc868b57f73b7d59602f176aa9615 100644 (file)
 
 #ifndef __ASSEMBLY__
 
-#ifdef CONFIG_MN10300_RTC
-
-extern unsigned long mn10300_ioclk;    /* IOCLK (crystal speed) in HZ */
-extern unsigned long mn10300_iobclk;
-extern unsigned long mn10300_tsc_per_HZ;
-
-#define MN10300_IOCLK          mn10300_ioclk
-/* If this processors has a another clock, uncomment the below. */
-/* #define MN10300_IOBCLK      mn10300_iobclk */
-
-#else /* !CONFIG_MN10300_RTC */
-
 #define MN10300_IOCLK          33333333UL
 /* #define MN10300_IOBCLK      66666666UL */
 
-#endif /* !CONFIG_MN10300_RTC */
-
-#define MN10300_JCCLK          MN10300_IOCLK
-#define MN10300_TSCCLK         MN10300_IOCLK
-
-#ifdef CONFIG_MN10300_RTC
-#define MN10300_TSC_PER_HZ     mn10300_tsc_per_HZ
-#else /* !CONFIG_MN10300_RTC */
-#define MN10300_TSC_PER_HZ     (MN10300_TSCCLK/HZ)
-#endif /* !CONFIG_MN10300_RTC */
-
 #endif /* !__ASSEMBLY__ */
 
+#define MN10300_WDCLK          MN10300_IOCLK
+
 #endif /* _ASM_UNIT_CLOCK_H */
index 8086cc092cecca946aac943387c0a019e8be8c4a..88c08219315f5b175869120ae1dd8b4a8f9c1722 100644 (file)
 
 #define SERIAL_IRQ     XIRQ0   /* Dual serial (PC16552)        (Hi) */
 
+/*
+ * The ASB2305 has an 18.432 MHz clock the UART
+ */
+#define BASE_BAUD      (18432000 / 16)
+
 /*
  * dispose of the /dev/ttyS0 serial port
  */
index d1c72d59fa9fa92e4ce0710979c14b9c879d5e6d..758af30d1a16aad5b74820431e5abda8cc4ed1df 100644 (file)
@@ -1,6 +1,6 @@
-/* ASB2305 timer specifcations
+/* ASB2305-specific timer specifications
  *
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2007, 2010 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
  *
  * This program is free software; you can redistribute it and/or
 
 #include <asm/timer-regs.h>
 #include <unit/clock.h>
+#include <asm/param.h>
 
 /*
  * jiffies counter specifications
  */
 
 #define        TMJCBR_MAX              0xffff
-#define        TMJCBC                  TM01BC
-
-#define        TMJCMD                  TM01MD
-#define        TMJCBR                  TM01BR
 #define        TMJCIRQ                 TM1IRQ
 #define        TMJCICR                 TM1ICR
-#define        TMJCICR_LEVEL           GxICR_LEVEL_5
 
 #ifndef __ASSEMBLY__
 
-static inline void startup_jiffies_counter(void)
+#define MN10300_SRC_IOCLK      MN10300_IOCLK
+
+#ifndef HZ
+# error HZ undeclared.
+#endif /* !HZ */
+/* use as little prescaling as possible to avoid losing accuracy */
+#if (MN10300_SRC_IOCLK + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE                1
+# define JC_TIMER_CLKSRC       TM0MD_SRC_IOCLK
+# define TSC_TIMER_CLKSRC      TM4MD_SRC_IOCLK
+#elif (MN10300_SRC_IOCLK / 8 + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE                8
+# define JC_TIMER_CLKSRC       TM0MD_SRC_IOCLK_8
+# define TSC_TIMER_CLKSRC      TM4MD_SRC_IOCLK_8
+#elif (MN10300_SRC_IOCLK / 32 + HZ / 2) / HZ - 1 <= TMJCBR_MAX
+# define IOCLK_PRESCALE                32
+# define JC_TIMER_CLKSRC       TM0MD_SRC_IOCLK_32
+# define TSC_TIMER_CLKSRC      TM4MD_SRC_IOCLK_32
+#else
+# error You lose.
+#endif
+
+#define MN10300_JCCLK          (MN10300_SRC_IOCLK / IOCLK_PRESCALE)
+#define MN10300_TSCCLK         (MN10300_SRC_IOCLK / IOCLK_PRESCALE)
+
+#define MN10300_JC_PER_HZ      ((MN10300_JCCLK + HZ / 2) / HZ)
+#define MN10300_TSC_PER_HZ     ((MN10300_TSCCLK + HZ / 2) / HZ)
+
+static inline void stop_jiffies_counter(void)
 {
-       unsigned rate;
-       u16 md, t16;
-
-       /* use as little prescaling as possible to avoid losing accuracy */
-       md = TM0MD_SRC_IOCLK;
-       rate = MN10300_JCCLK / HZ;
-
-       if (rate > TMJCBR_MAX) {
-               md = TM0MD_SRC_IOCLK_8;
-               rate = MN10300_JCCLK / 8 / HZ;
-
-               if (rate > TMJCBR_MAX) {
-                       md = TM0MD_SRC_IOCLK_32;
-                       rate = MN10300_JCCLK / 32 / HZ;
-
-                       if (rate > TMJCBR_MAX)
-                               BUG();
-               }
-       }
+       u16 tmp;
+       TM01MD = JC_TIMER_CLKSRC | TM1MD_SRC_TM0CASCADE << 8;
+       tmp = TM01MD;
+}
 
-       TMJCBR = rate - 1;
-       t16 = TMJCBR;
+static inline void reload_jiffies_counter(u32 cnt)
+{
+       u32 tmp;
 
-       TMJCMD =
-               md |
-               TM1MD_SRC_TM0CASCADE << 8 |
-               TM0MD_INIT_COUNTER |
-               TM1MD_INIT_COUNTER << 8;
+       TM01BR = cnt;
+       tmp = TM01BR;
 
-       TMJCMD =
-               md |
-               TM1MD_SRC_TM0CASCADE << 8 |
-               TM0MD_COUNT_ENABLE |
-               TM1MD_COUNT_ENABLE << 8;
+       TM01MD = JC_TIMER_CLKSRC |              \
+                TM1MD_SRC_TM0CASCADE << 8 |    \
+                TM0MD_INIT_COUNTER |           \
+                TM1MD_INIT_COUNTER << 8;
 
-       t16 = TMJCMD;
 
-       TMJCICR |= GxICR_ENABLE | GxICR_DETECT | GxICR_REQUEST;
-       t16 = TMJCICR;
-}
+       TM01MD = JC_TIMER_CLKSRC |              \
+                TM1MD_SRC_TM0CASCADE << 8 |    \
+                TM0MD_COUNT_ENABLE |           \
+                TM1MD_COUNT_ENABLE << 8;
 
-static inline void shutdown_jiffies_counter(void)
-{
+       tmp = TM01MD;
 }
 
 #endif /* !__ASSEMBLY__ */
@@ -94,29 +99,39 @@ static inline void shutdown_jiffies_counter(void)
 
 static inline void startup_timestamp_counter(void)
 {
+       u32 t32;
+
        /* set up timer 4 & 5 cascaded as a 32-bit counter to count real time
         * - count down from 4Gig-1 to 0 and wrap at IOCLK rate
         */
        TM45BR = TMTSCBR_MAX;
+       t32 = TM45BR;
 
-       TM4MD = TM4MD_SRC_IOCLK;
+       TM4MD = TSC_TIMER_CLKSRC;
        TM4MD |= TM4MD_INIT_COUNTER;
        TM4MD &= ~TM4MD_INIT_COUNTER;
        TM4ICR = 0;
+       t32 = TM4ICR;
 
        TM5MD = TM5MD_SRC_TM4CASCADE;
        TM5MD |= TM5MD_INIT_COUNTER;
        TM5MD &= ~TM5MD_INIT_COUNTER;
        TM5ICR = 0;
+       t32 = TM5ICR;
 
        TM5MD |= TM5MD_COUNT_ENABLE;
        TM4MD |= TM4MD_COUNT_ENABLE;
+       t32 = TM5MD;
+       t32 = TM4MD;
 }
 
 static inline void shutdown_timestamp_counter(void)
 {
+       u8 t8;
        TM4MD = 0;
        TM5MD = 0;
+       t8 = TM4MD;
+       t8 = TM5MD;
 }
 
 /*
@@ -127,7 +142,7 @@ typedef unsigned long cycles_t;
 
 static inline cycles_t read_timestamp_counter(void)
 {
-       return (cycles_t) TMTSCBC;
+       return (cycles_t)~TMTSCBC;
 }
 
 #endif /* !__ASSEMBLY__ */
index 45b40ac6c4647b3bb0751a1352e21e2e0a66ae3e..8e6763e6f25011f9d4eb68509c12ccb0dcb5014a 100644 (file)
@@ -93,7 +93,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
        struct pci_bus *bus;
        struct pci_dev *dev;
        int idx;
-       struct resource *r, *pr;
+       struct resource *r;
 
        /* Depth-First Search on bus tree */
        list_for_each_entry(bus, bus_list, node) {
@@ -105,10 +105,8 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
                                r = &dev->resource[idx];
                                if (!r->flags)
                                        continue;
-                               pr = pci_find_parent_resource(dev, r);
                                if (!r->start ||
-                                   !pr ||
-                                   request_resource(pr, r) < 0) {
+                                   pci_claim_resource(dev, idx) < 0) {
                                        printk(KERN_ERR "PCI:"
                                               " Cannot allocate resource"
                                               " region %d of bridge %s\n",
@@ -131,7 +129,7 @@ static void __init pcibios_allocate_resources(int pass)
        struct pci_dev *dev = NULL;
        int idx, disabled;
        u16 command;
-       struct resource *r, *pr;
+       struct resource *r;
 
        for_each_pci_dev(dev) {
                pci_read_config_word(dev, PCI_COMMAND, &command);
@@ -150,8 +148,7 @@ static void __init pcibios_allocate_resources(int pass)
                                    " (f=%lx, d=%d, p=%d)\n",
                                    pci_name(dev), r->start, r->end, r->flags,
                                    disabled, pass);
-                               pr = pci_find_parent_resource(dev, r);
-                               if (!pr || request_resource(pr, r) < 0) {
+                               if (pci_claim_resource(dev, idx) < 0) {
                                        printk(KERN_ERR "PCI:"
                                               " Cannot allocate resource"
                                               " region %d of device %s\n",
@@ -184,7 +181,7 @@ static void __init pcibios_allocate_resources(int pass)
 static int __init pcibios_assign_resources(void)
 {
        struct pci_dev *dev = NULL;
-       struct resource *r, *pr;
+       struct resource *r;
 
        if (!(pci_probe & PCI_ASSIGN_ROMS)) {
                /* Try to use BIOS settings for ROMs, otherwise let
@@ -194,8 +191,7 @@ static int __init pcibios_assign_resources(void)
                        r = &dev->resource[PCI_ROM_RESOURCE];
                        if (!r->flags || !r->start)
                                continue;
-                       pr = pci_find_parent_resource(dev, r);
-                       if (!pr || request_resource(pr, r) < 0) {
+                       if (pci_claim_resource(dev, PCI_ROM_RESOURCE) < 0) {
                                r->end -= r->start;
                                r->start = 0;
                        }
index 6d8720a0a59945bfa4b3c60d70c4d2ceb48c8a12..a4954fe82094be9404d8bdab02ea2bd1fc38ea3c 100644 (file)
@@ -503,7 +503,7 @@ asmlinkage void __init unit_pci_init(void)
        struct pci_ops *o = &pci_direct_ampci;
        u32 x;
 
-       set_intr_level(XIRQ1, GxICR_LEVEL_3);
+       set_intr_level(XIRQ1, NUM2GxICR_LEVEL(CONFIG_PCI_IRQ_LEVEL));
 
        memset(&bus, 0, sizeof(bus));
 
index a76c8e0ab90ff3aaf9a30d573b01259911d53edd..e1becd6b757132bd5664e82c5a676250ad660c3a 100644 (file)
@@ -26,8 +26,10 @@ asmlinkage void __init unit_init(void)
 {
 #ifndef CONFIG_GDBSTUB_ON_TTYSx
        /* set the 16550 interrupt line to level 3 if not being used for GDB */
-       set_intr_level(XIRQ0, GxICR_LEVEL_3);
+#ifdef CONFIG_EXT_SERIAL_IRQ_LEVEL
+       set_intr_level(XIRQ0, NUM2GxICR_LEVEL(CONFIG_EXT_SERIAL_IRQ_LEVEL));
 #endif
+#endif /* CONFIG_GDBSTUB_ON_TTYSx */
 }
 
 /*
@@ -51,7 +53,7 @@ void __init unit_init_IRQ(void)
                switch (GET_XIRQ_TRIGGER(extnum)) {
                case XIRQ_TRIGGER_HILEVEL:
                case XIRQ_TRIGGER_LOWLEVEL:
-                       set_intr_postackable(XIRQ2IRQ(extnum));
+                       mn10300_set_lateack_irq_type(XIRQ2IRQ(extnum));
                        break;
                default:
                        break;
diff --git a/arch/mn10300/unit-asb2364/Makefile b/arch/mn10300/unit-asb2364/Makefile
new file mode 100644 (file)
index 0000000..b3263ec
--- /dev/null
@@ -0,0 +1,12 @@
+#
+# Makefile for the linux kernel.
+#
+# Note! Dependencies are done automagically by 'make dep', which also
+# removes any old dependencies. DON'T put your own dependencies here
+# unless it's something special (ie not a .c file).
+#
+# Note 2! The CFLAGS definitions are now in the main makefile...
+
+obj-y   := unit-init.o leds.o irq-fpga.o
+
+obj-$(CONFIG_SMSC911X) += smsc911x.o
diff --git a/arch/mn10300/unit-asb2364/include/unit/clock.h b/arch/mn10300/unit-asb2364/include/unit/clock.h
new file mode 100644 (file)
index 0000000..d34ac9a
--- /dev/null
@@ -0,0 +1,29 @@
+/* clock.h: unit-specific clocks
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * Modified by Matsushita Electric Industrial Co., Ltd.
+ * Modifications:
+ *     23-Feb-2007 MEI Add define for watchdog timer.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_UNIT_CLOCK_H
+#define _ASM_UNIT_CLOCK_H
+
+#ifndef __ASSEMBLY__
+
+#define MN10300_IOCLK          100000000UL             /* for DDR800 */
+/*#define MN10300_IOCLK                83333333UL */           /* for DDR667 */
+#define MN10300_IOBCLK         MN10300_IOCLK           /* IOBCLK is equal to IOCLK */
+
+#endif /* !__ASSEMBLY__ */
+
+#define MN10300_WDCLK          27000000UL
+
+#endif /* _ASM_UNIT_CLOCK_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/fpga-regs.h b/arch/mn10300/unit-asb2364/include/unit/fpga-regs.h
new file mode 100644 (file)
index 0000000..7cf1205
--- /dev/null
@@ -0,0 +1,52 @@
+/* ASB2364 FPGA registers
+ */
+
+#ifndef _ASM_UNIT_FPGA_REGS_H
+#define _ASM_UNIT_FPGA_REGS_H
+
+#include <asm/cpu-regs.h>
+
+#ifdef __KERNEL__
+
+#define ASB2364_FPGA_REG_RESET_LAN     __SYSREG(0xa9001300, u16)
+#define ASB2364_FPGA_REG_RESET_UART    __SYSREG(0xa9001304, u16)
+#define ASB2364_FPGA_REG_RESET_I2C     __SYSREG(0xa9001308, u16)
+#define ASB2364_FPGA_REG_RESET_USB     __SYSREG(0xa900130c, u16)
+#define ASB2364_FPGA_REG_RESET_AV      __SYSREG(0xa9001310, u16)
+
+#define ASB2364_FPGA_REG_IRQ(X)                __SYSREG(0xa9001590+((X)*4), u16)
+#define ASB2364_FPGA_REG_IRQ_LAN       ASB2364_FPGA_REG_IRQ(0)
+#define ASB2364_FPGA_REG_IRQ_UART      ASB2364_FPGA_REG_IRQ(1)
+#define ASB2364_FPGA_REG_IRQ_I2C       ASB2364_FPGA_REG_IRQ(2)
+#define ASB2364_FPGA_REG_IRQ_USB       ASB2364_FPGA_REG_IRQ(3)
+#define ASB2364_FPGA_REG_IRQ_FPGA      ASB2364_FPGA_REG_IRQ(5)
+
+#define ASB2364_FPGA_REG_MASK(X)       __SYSREG(0xa9001590+((X)*4), u16)
+#define ASB2364_FPGA_REG_MASK_LAN      ASB2364_FPGA_REG_MASK(0)
+#define ASB2364_FPGA_REG_MASK_UART     ASB2364_FPGA_REG_MASK(1)
+#define ASB2364_FPGA_REG_MASK_I2C      ASB2364_FPGA_REG_MASK(2)
+#define ASB2364_FPGA_REG_MASK_USB      ASB2364_FPGA_REG_MASK(3)
+#define ASB2364_FPGA_REG_MASK_FPGA     ASB2364_FPGA_REG_MASK(5)
+
+#define ASB2364_FPGA_REG_CPLD5_SET1    __SYSREG(0xa9002500, u16)
+#define ASB2364_FPGA_REG_CPLD5_SET2    __SYSREG(0xa9002504, u16)
+#define ASB2364_FPGA_REG_CPLD6_SET1    __SYSREG(0xa9002600, u16)
+#define ASB2364_FPGA_REG_CPLD6_SET2    __SYSREG(0xa9002604, u16)
+#define ASB2364_FPGA_REG_CPLD7_SET1    __SYSREG(0xa9002700, u16)
+#define ASB2364_FPGA_REG_CPLD7_SET2    __SYSREG(0xa9002704, u16)
+#define ASB2364_FPGA_REG_CPLD8_SET1    __SYSREG(0xa9002800, u16)
+#define ASB2364_FPGA_REG_CPLD8_SET2    __SYSREG(0xa9002804, u16)
+#define ASB2364_FPGA_REG_CPLD9_SET1    __SYSREG(0xa9002900, u16)
+#define ASB2364_FPGA_REG_CPLD9_SET2    __SYSREG(0xa9002904, u16)
+#define ASB2364_FPGA_REG_CPLD10_SET1   __SYSREG(0xa9002a00, u16)
+#define ASB2364_FPGA_REG_CPLD10_SET2   __SYSREG(0xa9002a04, u16)
+
+#define SyncExBus()                                    \
+       do {                                            \
+               unsigned short w;                       \
+               w = *(volatile short *)0xa9000000;      \
+       } while (0)
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_UNIT_FPGA_REGS_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/irq.h b/arch/mn10300/unit-asb2364/include/unit/irq.h
new file mode 100644 (file)
index 0000000..786148e
--- /dev/null
@@ -0,0 +1,35 @@
+/* ASB2364 FPGA irq numbers
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef _UNIT_IRQ_H
+#define _UNIT_IRQ_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_SMP
+#define NR_CPU_IRQS    GxICR_NUM_EXT_IRQS
+#else
+#define NR_CPU_IRQS    GxICR_NUM_IRQS
+#endif
+
+enum {
+       FPGA_LAN_IRQ    = NR_CPU_IRQS,
+       FPGA_UART_IRQ,
+       FPGA_I2C_IRQ,
+       FPGA_USB_IRQ,
+       FPGA_RESERVED_IRQ,
+       FPGA_FPGA_IRQ,
+       NR_IRQS
+};
+
+extern void __init irq_fpga_init(void);
+
+#endif /* !__ASSEMBLY__ */
+#endif /* _UNIT_IRQ_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/leds.h b/arch/mn10300/unit-asb2364/include/unit/leds.h
new file mode 100644 (file)
index 0000000..03a3933
--- /dev/null
@@ -0,0 +1,54 @@
+/* Unit-specific leds
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_UNIT_LEDS_H
+#define _ASM_UNIT_LEDS_H
+
+#include <asm/pio-regs.h>
+#include <asm/cpu-regs.h>
+#include <asm/exceptions.h>
+
+#define MN10300_USE_7SEGLEDS   0
+
+#define ASB2364_7SEGLEDS       __SYSREG(0xA9001630, u32)
+
+/*
+ * use the 7-segment LEDs to indicate states
+ */
+
+#if MN10300_USE_7SEGLEDS
+/* flip the 7-segment LEDs between "Gdb-" and "----" */
+#define mn10300_set_gdbleds(ONOFF)                                     \
+       do {                                                            \
+               ASB2364_7SEGLEDS = (ONOFF) ? 0x8543077f : 0x7f7f7f7f;   \
+       } while (0)
+#else
+#define mn10300_set_gdbleds(ONOFF) do {} while (0)
+#endif
+
+#if MN10300_USE_7SEGLEDS
+/* indicate double-fault by displaying "db-f" on the LEDs */
+#define mn10300_set_dbfleds                    \
+       mov     0x43077f1d,d0           ;       \
+       mov     d0,(ASB2364_7SEGLEDS)
+#else
+#define mn10300_set_dbfleds
+#endif
+
+#ifndef __ASSEMBLY__
+extern void peripheral_leds_display_exception(enum exception_code);
+extern void peripheral_leds_led_chase(void);
+extern void peripheral_leds7x4_display_dec(unsigned int, unsigned int);
+extern void peripheral_leds7x4_display_hex(unsigned int, unsigned int);
+extern void debug_to_serial(const char *, int);
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_UNIT_LEDS_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/serial.h b/arch/mn10300/unit-asb2364/include/unit/serial.h
new file mode 100644 (file)
index 0000000..7f048bb
--- /dev/null
@@ -0,0 +1,151 @@
+/* Unit-specific 8250 serial ports
+ *
+ * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_UNIT_SERIAL_H
+#define _ASM_UNIT_SERIAL_H
+
+#include <asm/cpu-regs.h>
+#include <proc/irq.h>
+#include <unit/fpga-regs.h>
+#include <linux/serial_reg.h>
+
+#define SERIAL_PORT0_BASE_ADDRESS      0xA8200000
+
+#define SERIAL_IRQ     XIRQ1   /* single serial (TL16C550C)    (Lo) */
+
+/*
+ * The ASB2364 has an 12.288 MHz clock
+ * for your UART.
+ *
+ * It'd be nice if someone built a serial card with a 24.576 MHz
+ * clock, since the 16550A is capable of handling a top speed of 1.5
+ * megabits/second; but this requires the faster clock.
+ */
+#define BASE_BAUD (12288000 / 16)
+
+/*
+ * dispose of the /dev/ttyS0 and /dev/ttyS1 serial ports
+ */
+#ifndef CONFIG_GDBSTUB_ON_TTYSx
+
+#define SERIAL_PORT_DFNS                                               \
+       {                                                               \
+               .baud_base      = BASE_BAUD,                            \
+               .irq            = SERIAL_IRQ,                           \
+               .flags          = STD_COM_FLAGS,                        \
+               .iomem_base     = (u8 *) SERIAL_PORT0_BASE_ADDRESS,     \
+               .iomem_reg_shift = 1,                                   \
+               .io_type        = SERIAL_IO_MEM,                        \
+       },
+
+#ifndef __ASSEMBLY__
+
+static inline void __debug_to_serial(const char *p, int n)
+{
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#else /* CONFIG_GDBSTUB_ON_TTYSx */
+
+#define SERIAL_PORT_DFNS /* stolen by gdb-stub */
+
+#if defined(CONFIG_GDBSTUB_ON_TTYS0)
+#define GDBPORT_SERIAL_RX      __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_RX  * 4, u8)
+#define GDBPORT_SERIAL_TX      __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_TX  * 4, u8)
+#define GDBPORT_SERIAL_DLL     __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_DLL * 4, u8)
+#define GDBPORT_SERIAL_DLM     __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_DLM * 4, u8)
+#define GDBPORT_SERIAL_IER     __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_IER * 4, u8)
+#define GDBPORT_SERIAL_IIR     __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_IIR * 4, u8)
+#define GDBPORT_SERIAL_FCR     __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_FCR * 4, u8)
+#define GDBPORT_SERIAL_LCR     __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_LCR * 4, u8)
+#define GDBPORT_SERIAL_MCR     __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_MCR * 4, u8)
+#define GDBPORT_SERIAL_LSR     __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_LSR * 4, u8)
+#define GDBPORT_SERIAL_MSR     __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_MSR * 4, u8)
+#define GDBPORT_SERIAL_SCR     __SYSREG(SERIAL_PORT0_BASE_ADDRESS + UART_SCR * 4, u8)
+#define GDBPORT_SERIAL_IRQ     SERIAL_IRQ
+
+#elif defined(CONFIG_GDBSTUB_ON_TTYS1)
+#error The ASB2364 does not have a /dev/ttyS1
+#endif
+
+#ifndef __ASSEMBLY__
+
+static inline void __debug_to_serial(const char *p, int n)
+{
+       char ch;
+
+#define LSR_WAIT_FOR(STATE)    \
+       do {} while (!(GDBPORT_SERIAL_LSR & UART_LSR_##STATE))
+#define FLOWCTL_QUERY(LINE)    \
+       ({ GDBPORT_SERIAL_MSR & UART_MSR_##LINE; })
+#define FLOWCTL_WAIT_FOR(LINE) \
+       do {} while (!(GDBPORT_SERIAL_MSR & UART_MSR_##LINE))
+#define FLOWCTL_CLEAR(LINE)    \
+       do { GDBPORT_SERIAL_MCR &= ~UART_MCR_##LINE; } while (0)
+#define FLOWCTL_SET(LINE)      \
+       do { GDBPORT_SERIAL_MCR |= UART_MCR_##LINE; } while (0)
+
+       FLOWCTL_SET(DTR);
+
+       for (; n > 0; n--) {
+               LSR_WAIT_FOR(THRE);
+               FLOWCTL_WAIT_FOR(CTS);
+
+               ch = *p++;
+               if (ch == 0x0a) {
+                       GDBPORT_SERIAL_TX = 0x0d;
+                       LSR_WAIT_FOR(THRE);
+                       FLOWCTL_WAIT_FOR(CTS);
+               }
+               GDBPORT_SERIAL_TX = ch;
+       }
+
+       FLOWCTL_CLEAR(DTR);
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_GDBSTUB_ON_TTYSx */
+
+#define SERIAL_INITIALIZE                                      \
+do {                                                           \
+       /* release reset */                                     \
+       ASB2364_FPGA_REG_RESET_UART = 0x0001;                   \
+       SyncExBus();                                            \
+} while (0)
+
+#define SERIAL_CHECK_INTERRUPT                                 \
+do {                                                           \
+       if ((ASB2364_FPGA_REG_IRQ_UART & 0x0001) == 0x0001) {   \
+               return IRQ_NONE;                                \
+       }                                                       \
+} while (0)
+
+#define SERIAL_CLEAR_INTERRUPT                                 \
+do {                                                           \
+       ASB2364_FPGA_REG_IRQ_UART = 0x0001;                     \
+       SyncExBus();                                            \
+} while (0)
+
+#define SERIAL_SET_INT_MASK                                    \
+do {                                                           \
+       ASB2364_FPGA_REG_MASK_UART = 0x0001;                    \
+       SyncExBus();                                            \
+} while (0)
+
+#define SERIAL_CLEAR_INT_MASK                                  \
+do {                                                           \
+       ASB2364_FPGA_REG_MASK_UART = 0x0000;                    \
+       SyncExBus();                                            \
+} while (0)
+
+#endif /* _ASM_UNIT_SERIAL_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/smsc911x.h b/arch/mn10300/unit-asb2364/include/unit/smsc911x.h
new file mode 100644 (file)
index 0000000..4c1ede5
--- /dev/null
@@ -0,0 +1,171 @@
+/* Support for the SMSC911x NIC
+ *
+ * Copyright (C) 2006 Matsushita Electric Industrial Co., Ltd.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_UNIT_SMSC911X_H
+#define _ASM_UNIT_SMSC911X_H
+
+#include <linux/netdevice.h>
+#include <proc/irq.h>
+#include <unit/fpga-regs.h>
+
+#define MN10300_USE_EXT_EEPROM
+
+
+#define SMSC911X_BASE          0xA8000000UL
+#define SMSC911X_BASE_END      0xA8000100UL
+#define SMSC911X_IRQ           FPGA_LAN_IRQ
+
+/*
+ * Allow the FPGA to be initialised by the SMSC911x driver
+ */
+#undef SMSC_INITIALIZE
+#define SMSC_INITIALIZE()                                      \
+do {                                                           \
+       /* release reset */                                     \
+       ASB2364_FPGA_REG_RESET_LAN = 0x0001;                    \
+       SyncExBus();                                            \
+} while (0)
+
+#ifdef MN10300_USE_EXT_EEPROM
+#include <linux/delay.h>
+#include <unit/clock.h>
+
+#define EEPROM_ADDRESS 0xA0
+#define MAC_OFFSET     0x0008
+#define USE_IIC_CH     0       /* 0 or 1 */
+#define IIC_OFFSET     (0x80000 * USE_IIC_CH)
+#define IIC_DTRM       __SYSREG(0xd8400000 + IIC_OFFSET, u32)
+#define IIC_DREC       __SYSREG(0xd8400004 + IIC_OFFSET, u32)
+#define IIC_MYADD      __SYSREG(0xd8400008 + IIC_OFFSET, u32)
+#define IIC_CLK                __SYSREG(0xd840000c + IIC_OFFSET, u32)
+#define IIC_BRST       __SYSREG(0xd8400010 + IIC_OFFSET, u32)
+#define IIC_HOLD       __SYSREG(0xd8400014 + IIC_OFFSET, u32)
+#define IIC_BSTS       __SYSREG(0xd8400018 + IIC_OFFSET, u32)
+#define IIC_ICR                __SYSREG(0xd4000080 + 4 * USE_IIC_CH, u16)
+
+#define IIC_CLK_PLS    ((unsigned short)(MN10300_IOCLK / 100000 - 1))
+#define IIC_CLK_LOW    ((unsigned short)(IIC_CLK_PLS / 2))
+
+#define SYS_IIC_DTRM_Bit_STA   ((unsigned short)0x0400)
+#define SYS_IIC_DTRM_Bit_STO   ((unsigned short)0x0200)
+#define SYS_IIC_DTRM_Bit_ACK   ((unsigned short)0x0100)
+#define SYS_IIC_DTRM_Bit_DATA  ((unsigned short)0x00FF)
+
+static inline void POLL_INT_REQ(volatile u16 *icr)
+{
+       unsigned long flags;
+       u16 tmp;
+
+       while (!(*icr & GxICR_REQUEST))
+               ;
+       flags = arch_local_cli_save();
+       tmp = *icr;
+       *icr = (tmp & GxICR_LEVEL) | GxICR_DETECT;
+       tmp = *icr;
+       arch_local_irq_restore(flags);
+}
+
+/*
+ * Implement the SMSC911x hook for MAC address retrieval
+ */
+#undef smsc_get_mac
+static inline int smsc_get_mac(struct net_device *dev)
+{
+       unsigned char *mac_buf = dev->dev_addr;
+       int i;
+       unsigned short value;
+       unsigned int data;
+       int mac_length = 6;
+       int check;
+       u16 orig_gicr, tmp;
+       unsigned long flags;
+
+       /* save original GnICR and clear GnICR.IE */
+       flags = arch_local_cli_save();
+       orig_gicr = IIC_ICR;
+       IIC_ICR = orig_gicr & GxICR_LEVEL;
+       tmp = IIC_ICR;
+       arch_local_irq_restore(flags);
+
+       IIC_MYADD = 0x00000008;
+       IIC_CLK = (IIC_CLK_LOW << 16) + (IIC_CLK_PLS);
+       /* bus hung recovery */
+
+       while (1) {
+               check = 0;
+               for (i = 0; i < 3; i++) {
+                       if ((IIC_BSTS & 0x00000003) == 0x00000003)
+                               check++;
+                       udelay(3);
+               }
+
+               if (check == 3) {
+                       IIC_BRST = 0x00000003;
+                       break;
+               } else {
+                       for (i = 0; i < 3; i++) {
+                               IIC_BRST = 0x00000002;
+                               udelay(8);
+                               IIC_BRST = 0x00000003;
+                               udelay(8);
+                       }
+               }
+       }
+
+       IIC_BRST = 0x00000002;
+       IIC_BRST = 0x00000003;
+
+       value   =  SYS_IIC_DTRM_Bit_STA | SYS_IIC_DTRM_Bit_ACK;
+       value   |= (((unsigned short)EEPROM_ADDRESS & SYS_IIC_DTRM_Bit_DATA) |
+                   (unsigned short)0x0000);
+       IIC_DTRM = value;
+       POLL_INT_REQ(&IIC_ICR);
+
+       /** send offset of MAC address in EEPROM **/
+       IIC_DTRM = (unsigned char)((MAC_OFFSET & 0xFF00) >> 8);
+       POLL_INT_REQ(&IIC_ICR);
+
+       IIC_DTRM = (unsigned char)(MAC_OFFSET & 0x00FF);
+       POLL_INT_REQ(&IIC_ICR);
+
+       udelay(1000);
+
+       value   =  SYS_IIC_DTRM_Bit_STA;
+       value   |= (((unsigned short)EEPROM_ADDRESS & SYS_IIC_DTRM_Bit_DATA) |
+                   (unsigned short)0x0001);
+       IIC_DTRM = value;
+       POLL_INT_REQ(&IIC_ICR);
+
+       IIC_DTRM = 0x00000000;
+       while (mac_length > 0) {
+               POLL_INT_REQ(&IIC_ICR);
+
+               data = IIC_DREC;
+               mac_length--;
+               if (mac_length == 0)
+                       value = 0x00000300;     /* stop IIC bus */
+               else if (mac_length == 1)
+                       value = 0x00000100;     /* no ack */
+               else
+                       value = 0x00000000;     /* ack */
+               IIC_DTRM = value;
+               *mac_buf++ = (unsigned char)(data & 0xff);
+       }
+
+       /* restore GnICR.LV and GnICR.IE */
+       flags = arch_local_cli_save();
+       IIC_ICR = (orig_gicr & (GxICR_LEVEL | GxICR_ENABLE));
+       tmp = IIC_ICR;
+       arch_local_irq_restore(flags);
+
+       return 0;
+}
+#endif /* MN10300_USE_EXT_EEPROM */
+#endif /* _ASM_UNIT_SMSC911X_H */
diff --git a/arch/mn10300/unit-asb2364/include/unit/timex.h b/arch/mn10300/unit-asb2364/include/unit/timex.h
new file mode 100644 (file)
index 0000000..ddb7ed0
--- /dev/null
@@ -0,0 +1,159 @@
+/* timex.h: MN2WS0038 architecture timer specifications
+ *
+ * Copyright (C) 2002, 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_UNIT_TIMEX_H
+#define _ASM_UNIT_TIMEX_H
+
+#ifndef __ASSEMBLY__
+#include <linux/irq.h>
+#endif /* __ASSEMBLY__ */
+
+#include <asm/timer-regs.h>
+#include <unit/clock.h>
+#include <asm/param.h>
+
+/*
+ * jiffies counter specifications
+ */
+
+#define        TMJCBR_MAX              0xffffff        /* 24bit */
+#define        TMJCIRQ                 TMTIRQ
+
+#ifndef __ASSEMBLY__
+
+#define MN10300_SRC_IOBCLK     MN10300_IOBCLK
+
+#ifndef HZ
+# error HZ undeclared.
+#endif /* !HZ */
+
+#define MN10300_JCCLK          (MN10300_SRC_IOBCLK)
+#define MN10300_TSCCLK         (MN10300_SRC_IOBCLK)
+
+#define MN10300_JC_PER_HZ      ((MN10300_JCCLK + HZ / 2) / HZ)
+#define MN10300_TSC_PER_HZ     ((MN10300_TSCCLK + HZ / 2) / HZ)
+
+/* Check bit width of MTM interval value that sets base register */
+#if (MN10300_JC_PER_HZ - 1) > TMJCBR_MAX
+# error MTM tick timer interval value is overflow.
+#endif
+
+static inline void stop_jiffies_counter(void)
+{
+       u16 tmp;
+       TMTMD = 0;
+       tmp = TMTMD;
+}
+
+static inline void reload_jiffies_counter(u32 cnt)
+{
+       u32 tmp;
+
+       TMTBR = cnt;
+       tmp = TMTBR;
+
+       TMTMD = TMTMD_TMTLDE;
+       TMTMD = TMTMD_TMTCNE;
+       tmp = TMTMD;
+}
+
+#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_CLOCKEVENTS) && \
+    !defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST)
+/*
+ * If we aren't using broadcasting, each core needs its own event timer.
+ * Since CPU0 uses the tick timer which is 24-bits, we use timer 4 & 5
+ * cascaded to 32-bits for CPU1 (but only really use 24-bits to match
+ * CPU0).
+ */
+
+#define        TMJC1IRQ                TM5IRQ
+
+static inline void stop_jiffies_counter1(void)
+{
+       u8 tmp;
+       TM4MD = 0;
+       TM5MD = 0;
+       tmp = TM4MD;
+       tmp = TM5MD;
+}
+
+static inline void reload_jiffies_counter1(u32 cnt)
+{
+       u32 tmp;
+
+       TM45BR = cnt;
+       tmp = TM45BR;
+
+       TM4MD = TM4MD_INIT_COUNTER;
+       tmp = TM4MD;
+
+       TM5MD = TM5MD_SRC_TM4CASCADE | TM5MD_INIT_COUNTER;
+       TM5MD = TM5MD_SRC_TM4CASCADE | TM5MD_COUNT_ENABLE;
+       tmp = TM5MD;
+
+       TM4MD = TM4MD_COUNT_ENABLE;
+       tmp = TM4MD;
+}
+#endif /* CONFIG_SMP&GENERIC_CLOCKEVENTS&!GENERIC_CLOCKEVENTS_BROADCAST */
+
+#endif /* !__ASSEMBLY__ */
+
+
+/*
+ * timestamp counter specifications
+ */
+#define        TMTSCBR_MAX     0xffffffff
+
+#ifndef __ASSEMBLY__
+
+/* Use 32-bit timestamp counter */
+#define        TMTSCMD         TMSMD
+#define        TMTSCBR         TMSBR
+#define        TMTSCBC         TMSBC
+#define        TMTSCICR        TMSICR
+
+static inline void startup_timestamp_counter(void)
+{
+       u32 sync;
+
+       /* set up TMS(Timestamp) 32bit timer register to count real time
+        * - count down from 4Gig-1 to 0 and wrap at IOBCLK rate
+        */
+
+       TMTSCBR = TMTSCBR_MAX;
+       sync = TMTSCBR;
+
+       TMTSCICR = 0;
+       sync = TMTSCICR;
+
+       TMTSCMD = TMTMD_TMTLDE;
+       TMTSCMD = TMTMD_TMTCNE;
+       sync = TMTSCMD;
+}
+
+static inline void shutdown_timestamp_counter(void)
+{
+       TMTSCMD = 0;
+}
+
+/*
+ * we use a cascaded pair of 16-bit down-counting timers to count I/O
+ * clock cycles for the purposes of time keeping
+ */
+typedef unsigned long cycles_t;
+
+static inline cycles_t read_timestamp_counter(void)
+{
+       return (cycles_t)~TMTSCBC;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_UNIT_TIMEX_H */
diff --git a/arch/mn10300/unit-asb2364/irq-fpga.c b/arch/mn10300/unit-asb2364/irq-fpga.c
new file mode 100644 (file)
index 0000000..fcf2975
--- /dev/null
@@ -0,0 +1,96 @@
+/* ASB2364 FPGA interrupt multiplexing
+ *
+ * Copyright (C) 2010 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <unit/fpga-regs.h>
+
+/*
+ * FPGA PIC operations
+ */
+static void asb2364_fpga_mask(unsigned int irq)
+{
+       ASB2364_FPGA_REG_MASK(irq - NR_CPU_IRQS) = 0x0001;
+       SyncExBus();
+}
+
+static void asb2364_fpga_ack(unsigned int irq)
+{
+       ASB2364_FPGA_REG_IRQ(irq - NR_CPU_IRQS) = 0x0001;
+       SyncExBus();
+}
+
+static void asb2364_fpga_mask_ack(unsigned int irq)
+{
+       ASB2364_FPGA_REG_MASK(irq - NR_CPU_IRQS) = 0x0001;
+       SyncExBus();
+       ASB2364_FPGA_REG_IRQ(irq - NR_CPU_IRQS) = 0x0001;
+       SyncExBus();
+}
+
+static void asb2364_fpga_unmask(unsigned int irq)
+{
+       ASB2364_FPGA_REG_MASK(irq - NR_CPU_IRQS) = 0x0000;
+       SyncExBus();
+}
+
+static struct irq_chip asb2364_fpga_pic = {
+       .name           = "fpga",
+       .ack            = asb2364_fpga_ack,
+       .mask           = asb2364_fpga_mask,
+       .mask_ack       = asb2364_fpga_mask_ack,
+       .unmask         = asb2364_fpga_unmask,
+};
+
+/*
+ * FPGA PIC interrupt handler
+ */
+static irqreturn_t fpga_interrupt(int irq, void *_mask)
+{
+       if ((ASB2364_FPGA_REG_IRQ_LAN  & 0x0001) != 0x0001)
+               generic_handle_irq(FPGA_LAN_IRQ);
+       if ((ASB2364_FPGA_REG_IRQ_UART & 0x0001) != 0x0001)
+               generic_handle_irq(FPGA_UART_IRQ);
+       if ((ASB2364_FPGA_REG_IRQ_I2C  & 0x0001) != 0x0001)
+               generic_handle_irq(FPGA_I2C_IRQ);
+       if ((ASB2364_FPGA_REG_IRQ_USB  & 0x0001) != 0x0001)
+               generic_handle_irq(FPGA_USB_IRQ);
+       if ((ASB2364_FPGA_REG_IRQ_FPGA & 0x0001) != 0x0001)
+               generic_handle_irq(FPGA_FPGA_IRQ);
+
+       return IRQ_HANDLED;
+}
+
+/*
+ * Define an interrupt action for each FPGA PIC output
+ */
+static struct irqaction fpga_irq[]  = {
+       [0] = {
+               .handler        = fpga_interrupt,
+               .flags          = IRQF_DISABLED | IRQF_SHARED,
+               .name           = "fpga",
+       },
+};
+
+/*
+ * Initialise the FPGA's PIC
+ */
+void __init irq_fpga_init(void)
+{
+       int irq;
+
+       for (irq = NR_CPU_IRQS; irq < NR_IRQS; irq++)
+               set_irq_chip_and_handler(irq, &asb2364_fpga_pic, handle_level_irq);
+
+       /* the FPGA drives the XIRQ1 input on the CPU PIC */
+       setup_irq(XIRQ1, &fpga_irq[0]);
+}
diff --git a/arch/mn10300/unit-asb2364/leds.c b/arch/mn10300/unit-asb2364/leds.c
new file mode 100644 (file)
index 0000000..1ff830c
--- /dev/null
@@ -0,0 +1,98 @@
+/* leds.c: ASB2364 peripheral 7seg LEDs x4 support
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+#include <asm/processor.h>
+#include <asm/intctl-regs.h>
+#include <asm/rtc-regs.h>
+#include <unit/leds.h>
+
+#if MN10300_USE_7SEGLEDS
+static const u8 asb2364_led_hex_tbl[16] = {
+       0x80, 0xf2, 0x48, 0x60, 0x32, 0x24, 0x04, 0xf0,
+       0x00, 0x20, 0x10, 0x06, 0x8c, 0x42, 0x0c, 0x1c
+};
+
+static const u32 asb2364_led_chase_tbl[6] = {
+       ~0x02020202,    /* top          - segA */
+       ~0x04040404,    /* right top    - segB */
+       ~0x08080808,    /* right bottom - segC */
+       ~0x10101010,    /* bottom       - segD */
+       ~0x20202020,    /* left bottom  - segE */
+       ~0x40404040,    /* left top     - segF */
+};
+
+static unsigned asb2364_led_chase;
+
+void peripheral_leds7x4_display_dec(unsigned int val, unsigned int points)
+{
+       u32 leds;
+
+       leds = asb2364_led_hex_tbl[(val/1000) % 10];
+       leds <<= 8;
+       leds |= asb2364_led_hex_tbl[(val/100) % 10];
+       leds <<= 8;
+       leds |= asb2364_led_hex_tbl[(val/10) % 10];
+       leds <<= 8;
+       leds |= asb2364_led_hex_tbl[val % 10];
+       leds |= points^0x01010101;
+
+       ASB2364_7SEGLEDS = leds;
+}
+
+void peripheral_leds7x4_display_hex(unsigned int val, unsigned int points)
+{
+       u32 leds;
+
+       leds = asb2364_led_hex_tbl[(val/1000) % 10];
+       leds <<= 8;
+       leds |= asb2364_led_hex_tbl[(val/100) % 10];
+       leds <<= 8;
+       leds |= asb2364_led_hex_tbl[(val/10) % 10];
+       leds <<= 8;
+       leds |= asb2364_led_hex_tbl[val % 10];
+       leds |= points^0x01010101;
+
+       ASB2364_7SEGLEDS = leds;
+}
+
+/* display triple horizontal bar and exception code */
+void peripheral_leds_display_exception(enum exception_code code)
+{
+       u32 leds;
+
+       leds = asb2364_led_hex_tbl[(code/0x100) % 0x10];
+       leds <<= 8;
+       leds |= asb2364_led_hex_tbl[(code/0x10) % 0x10];
+       leds <<= 8;
+       leds |= asb2364_led_hex_tbl[code % 0x10];
+       leds |= 0x6d010101;
+
+       ASB2364_7SEGLEDS = leds;
+}
+
+void peripheral_leds_led_chase(void)
+{
+       ASB2364_7SEGLEDS = asb2364_led_chase_tbl[asb2364_led_chase];
+       asb2364_led_chase++;
+       if (asb2364_led_chase >= 6)
+               asb2364_led_chase = 0;
+}
+#else  /* MN10300_USE_7SEGLEDS */
+void peripheral_leds7x4_display_dec(unsigned int val, unsigned int points) { }
+void peripheral_leds7x4_display_hex(unsigned int val, unsigned int points) { }
+void peripheral_leds_display_exception(enum exception_code code) { }
+void peripheral_leds_led_chase(void) { }
+#endif /* MN10300_USE_7SEGLEDS */
diff --git a/arch/mn10300/unit-asb2364/smsc911x.c b/arch/mn10300/unit-asb2364/smsc911x.c
new file mode 100644 (file)
index 0000000..544a73e
--- /dev/null
@@ -0,0 +1,58 @@
+/* Specification for the SMSC911x NIC
+ *
+ * Copyright (C) 2006 Matsushita Electric Industrial Co., Ltd.
+ * All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/smsc911x.h>
+#include <unit/smsc911x.h>
+
+static struct smsc911x_platform_config smsc911x_config = {
+       .irq_polarity   = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
+       .irq_type       = SMSC911X_IRQ_TYPE_OPEN_DRAIN,
+       .flags          = SMSC911X_USE_32BIT,
+};
+
+static struct resource smsc911x_resources[] = {
+       [0] = {
+               .start  = SMSC911X_BASE,
+               .end    = SMSC911X_BASE_END,
+               .flags  = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start  = SMSC911X_IRQ,
+               .end    = SMSC911X_IRQ,
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+
+static struct platform_device smsc911x_device = {
+       .name           = "smsc911x",
+       .id             = 0,
+       .num_resources  = ARRAY_SIZE(smsc911x_resources),
+       .resource       = smsc911x_resources,
+       .dev            = {
+               .platform_data = &smsc911x_config,
+       }
+};
+
+/*
+ * add platform devices
+ */
+static int __init unit_device_init(void)
+{
+       platform_device_register(&smsc911x_device);
+       return 0;
+}
+
+device_initcall(unit_device_init);
diff --git a/arch/mn10300/unit-asb2364/unit-init.c b/arch/mn10300/unit-asb2364/unit-init.c
new file mode 100644 (file)
index 0000000..1144080
--- /dev/null
@@ -0,0 +1,88 @@
+/* ASB2364 initialisation
+ *
+ * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/param.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+#include <asm/setup.h>
+#include <asm/processor.h>
+#include <asm/irq.h>
+#include <asm/intctl-regs.h>
+#include <unit/fpga-regs.h>
+
+/*
+ * initialise some of the unit hardware before gdbstub is set up
+ */
+asmlinkage void __init unit_init(void)
+{
+       /* set up the external interrupts */
+
+       /* XIRQ[0]: NAND RXBY */
+       /* SET_XIRQ_TRIGGER(0, XIRQ_TRIGGER_LOWLEVEL); */
+
+       /* XIRQ[1]: LAN, UART, I2C, USB, PCI, FPGA */
+       SET_XIRQ_TRIGGER(1, XIRQ_TRIGGER_LOWLEVEL);
+
+       /* XIRQ[2]: Extend Slot 1-9 */
+       /* SET_XIRQ_TRIGGER(2, XIRQ_TRIGGER_LOWLEVEL); */
+
+#if defined(CONFIG_EXT_SERIAL_IRQ_LEVEL) &&    \
+    defined(CONFIG_ETHERNET_IRQ_LEVEL) &&      \
+    (CONFIG_EXT_SERIAL_IRQ_LEVEL != CONFIG_ETHERNET_IRQ_LEVEL)
+# error CONFIG_EXT_SERIAL_IRQ_LEVEL != CONFIG_ETHERNET_IRQ_LEVEL
+#endif
+
+#if defined(CONFIG_EXT_SERIAL_IRQ_LEVEL)
+       set_intr_level(XIRQ1, NUM2GxICR_LEVEL(CONFIG_EXT_SERIAL_IRQ_LEVEL));
+#elif defined(CONFIG_ETHERNET_IRQ_LEVEL)
+       set_intr_level(XIRQ1, NUM2GxICR_LEVEL(CONFIG_ETHERNET_IRQ_LEVEL));
+#endif
+}
+
+/*
+ * initialise the rest of the unit hardware after gdbstub is ready
+ */
+asmlinkage void __init unit_setup(void)
+{
+
+}
+
+/*
+ * initialise the external interrupts used by a unit of this type
+ */
+void __init unit_init_IRQ(void)
+{
+       unsigned int extnum;
+
+       for (extnum = 0 ; extnum < NR_XIRQS ; extnum++) {
+               switch (GET_XIRQ_TRIGGER(extnum)) {
+                       /* LEVEL triggered interrupts should be made
+                        * post-ACK'able as they hold their lines until
+                        * serviced
+                        */
+               case XIRQ_TRIGGER_HILEVEL:
+               case XIRQ_TRIGGER_LOWLEVEL:
+                       mn10300_set_lateack_irq_type(XIRQ2IRQ(extnum));
+                       break;
+               default:
+                       break;
+               }
+       }
+
+#define IRQCTL __SYSREG(0xd5000090, u32)
+       IRQCTL |= 0x02;
+
+       irq_fpga_init();
+}
index 86fe67fd49ba0c1dd8d794de61839766c2efacb5..9334539ebf75bcb01b91b088bedd7aca9199a03d 100644 (file)
@@ -1041,7 +1041,7 @@ config SMC911X
        tristate "SMSC LAN911[5678] support"
        select CRC32
        select MII
-       depends on ARM || SUPERH
+       depends on ARM || SUPERH || MN10300
        help
          This is a driver for SMSC's LAN911x series of Ethernet chipsets
          including the new LAN9115, LAN9116, LAN9117, and LAN9118.
@@ -1055,7 +1055,7 @@ config SMC911X
 
 config SMSC911X
        tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
-       depends on ARM || SUPERH || BLACKFIN || MIPS
+       depends on ARM || SUPERH || BLACKFIN || MIPS || MN10300
        select CRC32
        select MII
        select PHYLIB
@@ -1067,6 +1067,14 @@ config SMSC911X
          <file:Documentation/networking/net-modules.txt>. The module
          will be called smsc911x.
 
+config SMSC911X_ARCH_HOOKS
+       def_bool n
+       depends on SMSC911X
+       help
+         If the arch enables this, it allows the arch to implement various
+         hooks for more comprehensive interrupt control and also to override
+         the source of the MAC address.
+
 config NET_VENDOR_RACAL
        bool "Racal-Interlan (Micom) NI cards"
        depends on ISA
index a8e5856ce8821b4f441173be994d90bd021b9bcb..64bfdae5956fee14160d26bca07670c5025b74eb 100644 (file)
@@ -2075,7 +2075,7 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
        } else {
                /* Try reading mac address from device. if EEPROM is present
                 * it will already have been set */
-               smsc911x_read_mac_address(dev);
+               smsc_get_mac(dev);
 
                if (is_valid_ether_addr(dev->dev_addr)) {
                        /* eeprom values are valid  so use them */
@@ -2176,6 +2176,7 @@ static struct platform_driver smsc911x_driver = {
 /* Entry point for loading the module */
 static int __init smsc911x_init_module(void)
 {
+       SMSC_INITIALIZE();
        return platform_driver_register(&smsc911x_driver);
 }
 
index 016360c65ce2ded5afe129f91139a9582fbef485..52f38e12a879db9d344fb80f169f05eba31fa860 100644 (file)
 #define LPA_PAUSE_ALL                  (LPA_PAUSE_CAP | \
                                         LPA_PAUSE_ASYM)
 
+/*
+ * Provide hooks to let the arch add to the initialisation procedure
+ * and to override the source of the MAC address.
+ */
+#define SMSC_INITIALIZE()              do {} while (0)
+#define smsc_get_mac(dev)              smsc911x_read_mac_address((dev))
+
+#ifdef CONFIG_SMSC911X_ARCH_HOOKS
+#include <asm/smsc911x.h>
+#endif
+
 #endif                         /* __SMSC911X_H__ */
index cfa2d20e35f152a8bad87ec32bcd27f206214c69..6dc95cac6b3dabc390ac271cca9a0e4fb16436ac 100644 (file)
 
 extern void cpu_idle(void);
 
+typedef void (*smp_call_func_t)(void *info);
 struct call_single_data {
        struct list_head list;
-       void (*func) (void *info);
+       smp_call_func_t func;
        void *info;
        u16 flags;
        u16 priv;
@@ -24,8 +25,8 @@ struct call_single_data {
 /* total number of cpus in this system (may exceed NR_CPUS) */
 extern unsigned int total_cpus;
 
-int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
-                               int wait);
+int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
+                            int wait);
 
 #ifdef CONFIG_SMP
 
@@ -69,15 +70,15 @@ extern void smp_cpus_done(unsigned int max_cpus);
 /*
  * Call a function on all other processors
  */
-int smp_call_function(void(*func)(void *info), void *info, int wait);
+int smp_call_function(smp_call_func_t func, void *info, int wait);
 void smp_call_function_many(const struct cpumask *mask,
-                           void (*func)(void *info), void *info, bool wait);
+                           smp_call_func_t func, void *info, bool wait);
 
 void __smp_call_function_single(int cpuid, struct call_single_data *data,
                                int wait);
 
 int smp_call_function_any(const struct cpumask *mask,
-                         void (*func)(void *info), void *info, int wait);
+                         smp_call_func_t func, void *info, int wait);
 
 /*
  * Generic and arch helpers
@@ -94,7 +95,7 @@ void ipi_call_unlock_irq(void);
 /*
  * Call a function on all processors
  */
-int on_each_cpu(void (*func) (void *info), void *info, int wait);
+int on_each_cpu(smp_call_func_t func, void *info, int wait);
 
 #define MSG_ALL_BUT_SELF       0x8000  /* Assume <32768 CPU's */
 #define MSG_ALL                        0x8001
@@ -122,7 +123,7 @@ static inline void smp_send_stop(void) { }
  *     These macros fold the SMP functionality into a single CPU system
  */
 #define raw_smp_processor_id()                 0
-static inline int up_smp_call_function(void (*func)(void *), void *info)
+static inline int up_smp_call_function(smp_call_func_t func, void *info)
 {
        return 0;
 }
@@ -143,7 +144,7 @@ static inline void smp_send_reschedule(int cpu) { }
 static inline void init_call_single_data(void) { }
 
 static inline int
-smp_call_function_any(const struct cpumask *mask, void (*func)(void *info),
+smp_call_function_any(const struct cpumask *mask, smp_call_func_t func,
                      void *info, int wait)
 {
        return smp_call_function_single(0, func, info, wait);
index ed6aacfcb7efb307fe313ea798e7074f2c8f4f92..12ed8b013e2d7cd8be438a808f3a6981ab4cd70a 100644 (file)
@@ -267,7 +267,7 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
  *
  * Returns 0 on success, else a negative status code.
  */
-int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
+int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
                             int wait)
 {
        struct call_single_data d = {
@@ -336,7 +336,7 @@ EXPORT_SYMBOL(smp_call_function_single);
  *     3) any other online cpu in @mask
  */
 int smp_call_function_any(const struct cpumask *mask,
-                         void (*func)(void *info), void *info, int wait)
+                         smp_call_func_t func, void *info, int wait)
 {
        unsigned int cpu;
        const struct cpumask *nodemask;
@@ -416,7 +416,7 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
  * must be disabled when calling this function.
  */
 void smp_call_function_many(const struct cpumask *mask,
-                           void (*func)(void *), void *info, bool wait)
+                           smp_call_func_t func, void *info, bool wait)
 {
        struct call_function_data *data;
        unsigned long flags;
@@ -500,7 +500,7 @@ EXPORT_SYMBOL(smp_call_function_many);
  * You must not call this function with disabled interrupts or from a
  * hardware interrupt handler or from a bottom half handler.
  */
-int smp_call_function(void (*func)(void *), void *info, int wait)
+int smp_call_function(smp_call_func_t func, void *info, int wait)
 {
        preempt_disable();
        smp_call_function_many(cpu_online_mask, func, info, wait);
index 4e348dbaecd75a297e59101aa9cb5b9ffc872968..e2b6f5634e0d38bf4b4a8df34816e86af72a07e9 100644 (file)
@@ -1,9 +1,9 @@
 /*
  * Access kernel memory without faulting.
  */
-#include <linux/uaccess.h>
 #include <linux/module.h>
 #include <linux/mm.h>
+#include <linux/uaccess.h>
 
 /**
  * probe_kernel_read(): safely attempt to read from a location