From: Linus Torvalds Date: Sun, 3 Mar 2013 20:06:09 +0000 (-0800) Subject: Merge tag 'metag-v3.9-rc1-v4' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan... X-Git-Tag: v3.9-rc1~7 X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=8fd5e7a2d9574b3cac1c9264ad1aed3b613ed6fe;p=karo-tx-linux.git Merge tag 'metag-v3.9-rc1-v4' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/metag Pull new ImgTec Meta architecture from James Hogan: "This adds core architecture support for Imagination's Meta processor cores, followed by some later miscellaneous arch/metag cleanups and fixes which I kept separate to ease review: - Support for basic Meta 1 (ATP) and Meta 2 (HTP) core architecture - A few fixes all over, particularly for symbol prefixes - A few privilege protection fixes - Several cleanups (setup.c includes, split out a lot of metag_ksyms.c) - Fix some missing exports - Convert hugetlb to use vm_unmapped_area() - Copy device tree to non-init memory - Provide dma_get_sgtable()" * tag 'metag-v3.9-rc1-v4' of git://git.kernel.org/pub/scm/linux/kernel/git/jhogan/metag: (61 commits) metag: Provide dma_get_sgtable() metag: prom.h: remove declaration of metag_dt_memblock_reserve() metag: copy devicetree to non-init memory metag: cleanup metag_ksyms.c includes metag: move mm/init.c exports out of metag_ksyms.c metag: move usercopy.c exports out of metag_ksyms.c metag: move setup.c exports out of metag_ksyms.c metag: move kick.c exports out of metag_ksyms.c metag: move traps.c exports out of metag_ksyms.c metag: move irq enable out of irqflags.h on SMP genksyms: fix metag symbol prefix on crc symbols metag: hugetlb: convert to vm_unmapped_area() metag: export clear_page and copy_page metag: export metag_code_cache_flush_all metag: protect more non-MMU memory regions metag: make TXPRIVEXT bits explicit metag: kernel/setup.c: sort includes perf: Enable building perf tools for Meta metag: add boot time LNKGET/LNKSET check metag: add __init to metag_cache_probe() ... --- 8fd5e7a2d9574b3cac1c9264ad1aed3b613ed6fe diff --cc Documentation/00-INDEX index 0f3e8bbab8d7,f61be4a2b38b..45b3df936d2f --- a/Documentation/00-INDEX +++ b/Documentation/00-INDEX @@@ -299,16 -226,12 +299,18 @@@ memory-hotplug.tx - Hotpluggable memory support, how to use and current status. memory.txt - info on typical Linux memory problems. + metag/ + - directory with info about Linux on Meta architecture. mips/ - directory with info about Linux on MIPS architecture. +misc-devices/ + - directory with info about devices using the misc dev subsystem mmc/ - directory with info about the MMC subsystem +mn10300/ + - directory with info about the mn10300 architecture port +mtd/ + - directory with info about memory technology devices (flash) mono.txt - how to execute Mono-based .NET binaries with the help of BINFMT_MISC. mutex-design.txt diff --cc arch/metag/Kconfig index 000000000000,30adc7875daa..afc8973d1488 mode 000000,100644..100644 --- a/arch/metag/Kconfig +++ b/arch/metag/Kconfig @@@ -1,0 -1,295 +1,290 @@@ + config SYMBOL_PREFIX + string + default "_" + + config METAG + def_bool y + select EMBEDDED + select GENERIC_ATOMIC64 + select GENERIC_CLOCKEVENTS + select GENERIC_IRQ_SHOW - select GENERIC_SIGALTSTACK + select GENERIC_SMP_IDLE_THREAD + select HAVE_64BIT_ALIGNED_ACCESS + select HAVE_ARCH_TRACEHOOK + select HAVE_C_RECORDMCOUNT + select HAVE_DEBUG_KMEMLEAK + select HAVE_DYNAMIC_FTRACE + select HAVE_FTRACE_MCOUNT_RECORD + select HAVE_FUNCTION_TRACER + select HAVE_FUNCTION_TRACE_MCOUNT_TEST + select HAVE_GENERIC_HARDIRQS - select HAVE_IRQ_WORK + select HAVE_KERNEL_BZIP2 + select HAVE_KERNEL_GZIP + select HAVE_KERNEL_LZO + select HAVE_KERNEL_XZ + select HAVE_MEMBLOCK + select HAVE_MEMBLOCK_NODE_MAP + select HAVE_MOD_ARCH_SPECIFIC + select HAVE_PERF_EVENTS + select HAVE_SYSCALL_TRACEPOINTS + select IRQ_DOMAIN + select MODULES_USE_ELF_RELA + select OF + select OF_EARLY_FLATTREE + select SPARSE_IRQ + -config ARCH_NO_VIRT_TO_BUS - def_bool y - + config STACKTRACE_SUPPORT + def_bool y + + config LOCKDEP_SUPPORT + def_bool y + + config HAVE_LATENCYTOP_SUPPORT + def_bool y + + config RWSEM_GENERIC_SPINLOCK + def_bool y + + config RWSEM_XCHGADD_ALGORITHM + bool + + config GENERIC_HWEIGHT + def_bool y + + config GENERIC_CALIBRATE_DELAY + def_bool y + + config GENERIC_GPIO + def_bool n + + config NO_IOPORT + def_bool y + + source "init/Kconfig" + + source "kernel/Kconfig.freezer" + + menu "Processor type and features" + + config MMU + def_bool y + + config STACK_GROWSUP + def_bool y + + config HOTPLUG_CPU + bool "Enable CPU hotplug support" + depends on SMP + help + Say Y here to allow turning CPUs off and on. CPUs can be + controlled through /sys/devices/system/cpu. + + Say N if you want to disable CPU hotplug. + + config HIGHMEM + bool "High Memory Support" + help + The address space of Meta processors is only 4 Gigabytes large + and it has to accommodate user address space, kernel address + space as well as some memory mapped IO. That means that, if you + have a large amount of physical memory and/or IO, not all of the + memory can be "permanently mapped" by the kernel. The physical + memory that is not permanently mapped is called "high memory". + + Depending on the selected kernel/user memory split, minimum + vmalloc space and actual amount of RAM, you may not need this + option which should result in a slightly faster kernel. + + If unsure, say n. + + source "arch/metag/mm/Kconfig" + + source "arch/metag/Kconfig.soc" + + config METAG_META12 + bool + help + Select this from the SoC config symbol to indicate that it contains a + Meta 1.2 core. + + config METAG_META21 + bool + help + Select this from the SoC config symbol to indicate that it contains a + Meta 2.1 core. + + config SMP + bool "Symmetric multi-processing support" + depends on METAG_META21 && METAG_META21_MMU + select USE_GENERIC_SMP_HELPERS + help + This enables support for systems with more than one thread running + Linux. If you have a system with only one thread running Linux, + say N. Otherwise, say Y. + + config NR_CPUS + int "Maximum number of CPUs (2-4)" if SMP + range 2 4 if SMP + default "1" if !SMP + default "4" if SMP + + config METAG_SMP_WRITE_REORDERING + bool + help + This attempts to prevent cache-memory incoherence due to external + reordering of writes from different hardware threads when SMP is + enabled. It adds fences (system event 0) to smp_mb and smp_rmb in an + attempt to catch some of the cases, and also before writes to shared + memory in LOCK1 protected atomics and spinlocks. + This will not completely prevent cache incoherency on affected cores. + + config METAG_LNKGET_AROUND_CACHE + bool + depends on METAG_META21 + help + This indicates that the LNKGET/LNKSET instructions go around the + cache, which requires some extra cache flushes when the memory needs + to be accessed by normal GET/SET instructions too. + + choice + prompt "Atomicity primitive" + default METAG_ATOMICITY_LNKGET + help + This option selects the mechanism for performing atomic operations. + + config METAG_ATOMICITY_IRQSOFF + depends on !SMP + bool "irqsoff" + help + This option disables interrupts to achieve atomicity. This mechanism + is not SMP-safe. + + config METAG_ATOMICITY_LNKGET + depends on METAG_META21 + bool "lnkget/lnkset" + help + This option uses the LNKGET and LNKSET instructions to achieve + atomicity. LNKGET/LNKSET are load-link/store-conditional instructions. + Choose this option if your system requires low latency. + + config METAG_ATOMICITY_LOCK1 + depends on SMP + bool "lock1" + help + This option uses the LOCK1 instruction for atomicity. This is mainly + provided as a debugging aid if the lnkget/lnkset atomicity primitive + isn't working properly. + + endchoice + + config METAG_FPU + bool "FPU Support" + depends on METAG_META21 + default y + help + This option allows processes to use FPU hardware available with this + CPU. If this option is not enabled FPU registers will not be saved + and restored on context-switch. + + If you plan on running programs which are compiled to use hard floats + say Y here. + + config METAG_DSP + bool "DSP Support" + help + This option allows processes to use DSP hardware available + with this CPU. If this option is not enabled DSP registers + will not be saved and restored on context-switch. + + If you plan on running DSP programs say Y here. + + config METAG_PERFCOUNTER_IRQS + bool "PerfCounters interrupt support" + depends on METAG_META21 + help + This option enables using interrupts to collect information from + Performance Counters. This option is supported in new META21 + (starting from HTP265). + + When disabled, Performance Counters information will be collected + based on Timer Interrupt. + + config METAG_DA + bool "DA support" + help + Say Y if you plan to use a DA debug adapter with Linux. The presence + of the DA will be detected automatically at boot, so it is safe to say + Y to this option even when booting without a DA. + + This enables support for services provided by DA JTAG debug adapters, + such as: + - communication over DA channels (such as the console driver). + - use of the DA filesystem. + + menu "Boot options" + + config METAG_BUILTIN_DTB + bool "Embed DTB in kernel image" + default y + help + Embeds a device tree binary in the kernel image. + + config METAG_BUILTIN_DTB_NAME + string "Built in DTB" + depends on METAG_BUILTIN_DTB + help + Set the name of the DTB to embed (leave blank to pick one + automatically based on kernel configuration). + + config CMDLINE_BOOL + bool "Default bootloader kernel arguments" + + config CMDLINE + string "Kernel command line" + depends on CMDLINE_BOOL + help + On some architectures there is currently no way for the boot loader + to pass arguments to the kernel. For these architectures, you should + supply some command-line options at build time by entering them + here. + + config CMDLINE_FORCE + bool "Force default kernel command string" + depends on CMDLINE_BOOL + help + Set this to have arguments from the default kernel command string + override those passed by the boot loader. + + endmenu + + source "kernel/Kconfig.preempt" + + source kernel/Kconfig.hz + + endmenu + + menu "Power management options" + + source kernel/power/Kconfig + + endmenu + + menu "Executable file formats" + + source "fs/Kconfig.binfmt" + + endmenu + + source "net/Kconfig" + + source "drivers/Kconfig" + + source "fs/Kconfig" + + source "arch/metag/Kconfig.debug" + + source "security/Kconfig" + + source "crypto/Kconfig" + + source "lib/Kconfig" diff --cc arch/metag/configs/meta1_defconfig index 000000000000,ad663ca53208..c35a75e8ecfe mode 000000,100644..100644 --- a/arch/metag/configs/meta1_defconfig +++ b/arch/metag/configs/meta1_defconfig @@@ -1,0 -1,41 +1,40 @@@ -CONFIG_EXPERIMENTAL=y + # CONFIG_LOCALVERSION_AUTO is not set + # CONFIG_SWAP is not set + CONFIG_LOG_BUF_SHIFT=13 + CONFIG_SYSFS_DEPRECATED=y + CONFIG_SYSFS_DEPRECATED_V2=y + CONFIG_KALLSYMS_ALL=y + # CONFIG_ELF_CORE is not set + CONFIG_SLAB=y + # CONFIG_BLK_DEV_BSG is not set + CONFIG_PARTITION_ADVANCED=y + # CONFIG_MSDOS_PARTITION is not set + # CONFIG_IOSCHED_DEADLINE is not set + # CONFIG_IOSCHED_CFQ is not set + CONFIG_FLATMEM_MANUAL=y + CONFIG_META12_FPGA=y + CONFIG_METAG_DA=y + CONFIG_HZ_100=y + CONFIG_DEVTMPFS=y + CONFIG_DEVTMPFS_MOUNT=y + # CONFIG_STANDALONE is not set + # CONFIG_PREVENT_FIRMWARE_BUILD is not set + # CONFIG_FW_LOADER is not set + CONFIG_BLK_DEV_RAM=y + CONFIG_BLK_DEV_RAM_COUNT=1 + CONFIG_BLK_DEV_RAM_SIZE=16384 + # CONFIG_INPUT is not set + # CONFIG_SERIO is not set + # CONFIG_VT is not set + # CONFIG_LEGACY_PTYS is not set + CONFIG_DA_TTY=y + CONFIG_DA_CONSOLE=y + # CONFIG_DEVKMEM is not set + # CONFIG_HW_RANDOM is not set + # CONFIG_HWMON is not set + # CONFIG_USB_SUPPORT is not set + # CONFIG_DNOTIFY is not set + CONFIG_TMPFS=y + # CONFIG_MISC_FILESYSTEMS is not set + # CONFIG_SCHED_DEBUG is not set + CONFIG_DEBUG_INFO=y diff --cc arch/metag/configs/meta2_defconfig index 000000000000,47922e9306af..fb3148410183 mode 000000,100644..100644 --- a/arch/metag/configs/meta2_defconfig +++ b/arch/metag/configs/meta2_defconfig @@@ -1,0 -1,42 +1,41 @@@ -CONFIG_EXPERIMENTAL=y + # CONFIG_LOCALVERSION_AUTO is not set + # CONFIG_SWAP is not set + CONFIG_SYSVIPC=y + CONFIG_LOG_BUF_SHIFT=13 + CONFIG_SYSFS_DEPRECATED=y + CONFIG_SYSFS_DEPRECATED_V2=y + CONFIG_KALLSYMS_ALL=y + # CONFIG_ELF_CORE is not set + CONFIG_SLAB=y + # CONFIG_BLK_DEV_BSG is not set + CONFIG_PARTITION_ADVANCED=y + # CONFIG_MSDOS_PARTITION is not set + # CONFIG_IOSCHED_DEADLINE is not set + # CONFIG_IOSCHED_CFQ is not set + CONFIG_METAG_L2C=y + CONFIG_FLATMEM_MANUAL=y + CONFIG_METAG_HALT_ON_PANIC=y + CONFIG_METAG_DA=y + CONFIG_HZ_100=y + CONFIG_DEVTMPFS=y + # CONFIG_STANDALONE is not set + # CONFIG_PREVENT_FIRMWARE_BUILD is not set + # CONFIG_FW_LOADER is not set + CONFIG_BLK_DEV_RAM=y + CONFIG_BLK_DEV_RAM_COUNT=1 + CONFIG_BLK_DEV_RAM_SIZE=16384 + # CONFIG_INPUT is not set + # CONFIG_SERIO is not set + # CONFIG_VT is not set + # CONFIG_LEGACY_PTYS is not set + CONFIG_DA_TTY=y + CONFIG_DA_CONSOLE=y + # CONFIG_DEVKMEM is not set + # CONFIG_HW_RANDOM is not set + # CONFIG_HWMON is not set + # CONFIG_USB_SUPPORT is not set + # CONFIG_DNOTIFY is not set + CONFIG_TMPFS=y + # CONFIG_MISC_FILESYSTEMS is not set + # CONFIG_SCHED_DEBUG is not set + CONFIG_DEBUG_INFO=y diff --cc arch/metag/configs/meta2_smp_defconfig index 000000000000,f5082505872d..6c7b777ac276 mode 000000,100644..100644 --- a/arch/metag/configs/meta2_smp_defconfig +++ b/arch/metag/configs/meta2_smp_defconfig @@@ -1,0 -1,43 +1,42 @@@ -CONFIG_EXPERIMENTAL=y + # CONFIG_LOCALVERSION_AUTO is not set + # CONFIG_SWAP is not set + CONFIG_SYSVIPC=y + CONFIG_LOG_BUF_SHIFT=13 + CONFIG_SYSFS_DEPRECATED=y + CONFIG_SYSFS_DEPRECATED_V2=y + CONFIG_KALLSYMS_ALL=y + # CONFIG_ELF_CORE is not set + CONFIG_SLAB=y + # CONFIG_BLK_DEV_BSG is not set + CONFIG_PARTITION_ADVANCED=y + # CONFIG_MSDOS_PARTITION is not set + # CONFIG_IOSCHED_DEADLINE is not set + # CONFIG_IOSCHED_CFQ is not set + CONFIG_METAG_L2C=y + CONFIG_FLATMEM_MANUAL=y + CONFIG_METAG_HALT_ON_PANIC=y + CONFIG_SMP=y + CONFIG_METAG_DA=y + CONFIG_HZ_100=y + CONFIG_DEVTMPFS=y + # CONFIG_STANDALONE is not set + # CONFIG_PREVENT_FIRMWARE_BUILD is not set + # CONFIG_FW_LOADER is not set + CONFIG_BLK_DEV_RAM=y + CONFIG_BLK_DEV_RAM_COUNT=1 + CONFIG_BLK_DEV_RAM_SIZE=16384 + # CONFIG_INPUT is not set + # CONFIG_SERIO is not set + # CONFIG_VT is not set + # CONFIG_LEGACY_PTYS is not set + CONFIG_DA_TTY=y + CONFIG_DA_CONSOLE=y + # CONFIG_DEVKMEM is not set + # CONFIG_HW_RANDOM is not set + # CONFIG_HWMON is not set + # CONFIG_USB_SUPPORT is not set + # CONFIG_DNOTIFY is not set + CONFIG_TMPFS=y + # CONFIG_MISC_FILESYSTEMS is not set + # CONFIG_SCHED_DEBUG is not set + CONFIG_DEBUG_INFO=y diff --cc arch/metag/kernel/traps.c index 000000000000,5cad15601cff..8961f247b500 mode 000000,100644..100644 --- a/arch/metag/kernel/traps.c +++ b/arch/metag/kernel/traps.c @@@ -1,0 -1,995 +1,995 @@@ + /* + * Meta exception handling. + * + * Copyright (C) 2005,2006,2007,2008,2009,2012 Imagination Technologies Ltd. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file COPYING in the main directory of this archive + * for more details. + */ + + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + + #include + #include + #include + #include + #include + #include + #include + #include + #include + #include + + /* Passing syscall arguments as long long is quicker. */ + typedef unsigned int (*LPSYSCALL) (unsigned long long, + unsigned long long, + unsigned long long); + + /* + * Users of LNKSET should compare the bus error bits obtained from DEFR + * against TXDEFR_LNKSET_SUCCESS only as the failure code will vary between + * different cores revisions. + */ + #define TXDEFR_LNKSET_SUCCESS 0x02000000 + #define TXDEFR_LNKSET_FAILURE 0x04000000 + + /* + * Our global TBI handle. Initialised from setup.c/setup_arch. + */ + DECLARE_PER_CPU(PTBI, pTBI); + + #ifdef CONFIG_SMP + static DEFINE_PER_CPU(unsigned int, trigger_mask); + #else + unsigned int global_trigger_mask; + EXPORT_SYMBOL(global_trigger_mask); + #endif + + unsigned long per_cpu__stack_save[NR_CPUS]; + + static const char * const trap_names[] = { + [TBIXXF_SIGNUM_IIF] = "Illegal instruction fault", + [TBIXXF_SIGNUM_PGF] = "Privilege violation", + [TBIXXF_SIGNUM_DHF] = "Unaligned data access fault", + [TBIXXF_SIGNUM_IGF] = "Code fetch general read failure", + [TBIXXF_SIGNUM_DGF] = "Data access general read/write fault", + [TBIXXF_SIGNUM_IPF] = "Code fetch page fault", + [TBIXXF_SIGNUM_DPF] = "Data access page fault", + [TBIXXF_SIGNUM_IHF] = "Instruction breakpoint", + [TBIXXF_SIGNUM_DWF] = "Read-only data access fault", + }; + + const char *trap_name(int trapno) + { + if (trapno >= 0 && trapno < ARRAY_SIZE(trap_names) + && trap_names[trapno]) + return trap_names[trapno]; + return "Unknown fault"; + } + + static DEFINE_SPINLOCK(die_lock); + + void die(const char *str, struct pt_regs *regs, long err, + unsigned long addr) + { + static int die_counter; + + oops_enter(); + + spin_lock_irq(&die_lock); + console_verbose(); + bust_spinlocks(1); + pr_err("%s: err %04lx (%s) addr %08lx [#%d]\n", str, err & 0xffff, + trap_name(err & 0xffff), addr, ++die_counter); + + print_modules(); + show_regs(regs); + + pr_err("Process: %s (pid: %d, stack limit = %p)\n", current->comm, + task_pid_nr(current), task_stack_page(current) + THREAD_SIZE); + + bust_spinlocks(0); - add_taint(TAINT_DIE); ++ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); + if (kexec_should_crash(current)) + crash_kexec(regs); + + if (in_interrupt()) + panic("Fatal exception in interrupt"); + + if (panic_on_oops) + panic("Fatal exception"); + + spin_unlock_irq(&die_lock); + oops_exit(); + do_exit(SIGSEGV); + } + + #ifdef CONFIG_METAG_DSP + /* + * The ECH encoding specifies the size of a DSPRAM as, + * + * "slots" / 4 + * + * A "slot" is the size of two DSPRAM bank entries; an entry from + * DSPRAM bank A and an entry from DSPRAM bank B. One DSPRAM bank + * entry is 4 bytes. + */ + #define SLOT_SZ 8 + static inline unsigned int decode_dspram_size(unsigned int size) + { + unsigned int _sz = size & 0x7f; + + return _sz * SLOT_SZ * 4; + } + + static void dspram_save(struct meta_ext_context *dsp_ctx, + unsigned int ramA_sz, unsigned int ramB_sz) + { + unsigned int ram_sz[2]; + int i; + + ram_sz[0] = ramA_sz; + ram_sz[1] = ramB_sz; + + for (i = 0; i < 2; i++) { + if (ram_sz[i] != 0) { + unsigned int sz; + + if (i == 0) + sz = decode_dspram_size(ram_sz[i] >> 8); + else + sz = decode_dspram_size(ram_sz[i]); + + if (dsp_ctx->ram[i] == NULL) { + dsp_ctx->ram[i] = kmalloc(sz, GFP_KERNEL); + + if (dsp_ctx->ram[i] == NULL) + panic("couldn't save DSP context"); + } else { + if (ram_sz[i] > dsp_ctx->ram_sz[i]) { + kfree(dsp_ctx->ram[i]); + + dsp_ctx->ram[i] = kmalloc(sz, + GFP_KERNEL); + + if (dsp_ctx->ram[i] == NULL) + panic("couldn't save DSP context"); + } + } + + if (i == 0) + __TBIDspramSaveA(ram_sz[i], dsp_ctx->ram[i]); + else + __TBIDspramSaveB(ram_sz[i], dsp_ctx->ram[i]); + + dsp_ctx->ram_sz[i] = ram_sz[i]; + } + } + } + #endif /* CONFIG_METAG_DSP */ + + /* + * Allow interrupts to be nested and save any "extended" register + * context state, e.g. DSP regs and RAMs. + */ + static void nest_interrupts(TBIRES State, unsigned long mask) + { + #ifdef CONFIG_METAG_DSP + struct meta_ext_context *dsp_ctx; + unsigned int D0_8; + + /* + * D0.8 may contain an ECH encoding. The upper 16 bits + * tell us what DSP resources the current process is + * using. OR the bits into the SaveMask so that + * __TBINestInts() knows what resources to save as + * part of this context. + * + * Don't save the context if we're nesting interrupts in the + * kernel because the kernel doesn't use DSP hardware. + */ + D0_8 = __core_reg_get(D0.8); + + if (D0_8 && (State.Sig.SaveMask & TBICTX_PRIV_BIT)) { + State.Sig.SaveMask |= (D0_8 >> 16); + + dsp_ctx = current->thread.dsp_context; + if (dsp_ctx == NULL) { + dsp_ctx = kzalloc(sizeof(*dsp_ctx), GFP_KERNEL); + if (dsp_ctx == NULL) + panic("couldn't save DSP context: ENOMEM"); + + current->thread.dsp_context = dsp_ctx; + } + + current->thread.user_flags |= (D0_8 & 0xffff0000); + __TBINestInts(State, &dsp_ctx->regs, mask); + dspram_save(dsp_ctx, D0_8 & 0x7f00, D0_8 & 0x007f); + } else + __TBINestInts(State, NULL, mask); + #else + __TBINestInts(State, NULL, mask); + #endif + } + + void head_end(TBIRES State, unsigned long mask) + { + unsigned int savemask = (unsigned short)State.Sig.SaveMask; + unsigned int ctx_savemask = (unsigned short)State.Sig.pCtx->SaveMask; + + if (savemask & TBICTX_PRIV_BIT) { + ctx_savemask |= TBICTX_PRIV_BIT; + current->thread.user_flags = savemask; + } + + /* Always undo the sleep bit */ + ctx_savemask &= ~TBICTX_WAIT_BIT; + + /* Always save the catch buffer and RD pipe if they are dirty */ + savemask |= TBICTX_XCBF_BIT; + + /* Only save the catch and RD if we have not already done so. + * Note - the RD bits are in the pCtx only, and not in the + * State.SaveMask. + */ + if ((savemask & TBICTX_CBUF_BIT) || + (ctx_savemask & TBICTX_CBRP_BIT)) { + /* Have we already saved the buffers though? + * - See TestTrack 5071 */ + if (ctx_savemask & TBICTX_XCBF_BIT) { + /* Strip off the bits so the call to __TBINestInts + * won't save the buffers again. */ + savemask &= ~TBICTX_CBUF_BIT; + ctx_savemask &= ~TBICTX_CBRP_BIT; + } + } + + #ifdef CONFIG_METAG_META21 + { + unsigned int depth, txdefr; + + /* + * Save TXDEFR state. + * + * The process may have been interrupted after a LNKSET, but + * before it could read the DEFR state, so we mustn't lose that + * state or it could end up retrying an atomic operation that + * succeeded. + * + * All interrupts are disabled at this point so we + * don't need to perform any locking. We must do this + * dance before we use LNKGET or LNKSET. + */ + BUG_ON(current->thread.int_depth > HARDIRQ_BITS); + + depth = current->thread.int_depth++; + + txdefr = __core_reg_get(TXDEFR); + + txdefr &= TXDEFR_BUS_STATE_BITS; + if (txdefr & TXDEFR_LNKSET_SUCCESS) + current->thread.txdefr_failure &= ~(1 << depth); + else + current->thread.txdefr_failure |= (1 << depth); + } + #endif + + State.Sig.SaveMask = savemask; + State.Sig.pCtx->SaveMask = ctx_savemask; + + nest_interrupts(State, mask); + + #ifdef CONFIG_METAG_POISON_CATCH_BUFFERS + /* Poison the catch registers. This shows up any mistakes we have + * made in their handling MUCH quicker. + */ + __core_reg_set(TXCATCH0, 0x87650021); + __core_reg_set(TXCATCH1, 0x87654322); + __core_reg_set(TXCATCH2, 0x87654323); + __core_reg_set(TXCATCH3, 0x87654324); + #endif /* CONFIG_METAG_POISON_CATCH_BUFFERS */ + } + + TBIRES tail_end_sys(TBIRES State, int syscall, int *restart) + { + struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx; + unsigned long flags; + + local_irq_disable(); + + if (user_mode(regs)) { + flags = current_thread_info()->flags; + if (flags & _TIF_WORK_MASK && + do_work_pending(regs, flags, syscall)) { + *restart = 1; + return State; + } + + #ifdef CONFIG_METAG_FPU + if (current->thread.fpu_context && + current->thread.fpu_context->needs_restore) { + __TBICtxFPURestore(State, current->thread.fpu_context); + /* + * Clearing this bit ensures the FP unit is not made + * active again unless it is used. + */ + State.Sig.SaveMask &= ~TBICTX_FPAC_BIT; + current->thread.fpu_context->needs_restore = false; + } + State.Sig.TrigMask |= TBI_TRIG_BIT(TBID_SIGNUM_DFR); + #endif + } + + /* TBI will turn interrupts back on at some point. */ + if (!irqs_disabled_flags((unsigned long)State.Sig.TrigMask)) + trace_hardirqs_on(); + + #ifdef CONFIG_METAG_DSP + /* + * If we previously saved an extended context then restore it + * now. Otherwise, clear D0.8 because this process is not + * using DSP hardware. + */ + if (State.Sig.pCtx->SaveMask & TBICTX_XEXT_BIT) { + unsigned int D0_8; + struct meta_ext_context *dsp_ctx = current->thread.dsp_context; + + /* Make sure we're going to return to userland. */ + BUG_ON(current->thread.int_depth != 1); + + if (dsp_ctx->ram_sz[0] > 0) + __TBIDspramRestoreA(dsp_ctx->ram_sz[0], + dsp_ctx->ram[0]); + if (dsp_ctx->ram_sz[1] > 0) + __TBIDspramRestoreB(dsp_ctx->ram_sz[1], + dsp_ctx->ram[1]); + + State.Sig.SaveMask |= State.Sig.pCtx->SaveMask; + __TBICtxRestore(State, current->thread.dsp_context); + D0_8 = __core_reg_get(D0.8); + D0_8 |= current->thread.user_flags & 0xffff0000; + D0_8 |= (dsp_ctx->ram_sz[1] | dsp_ctx->ram_sz[0]) & 0xffff; + __core_reg_set(D0.8, D0_8); + } else + __core_reg_set(D0.8, 0); + #endif /* CONFIG_METAG_DSP */ + + #ifdef CONFIG_METAG_META21 + { + unsigned int depth, txdefr; + + /* + * If there hasn't been a LNKSET since the last LNKGET then the + * link flag will be set, causing the next LNKSET to succeed if + * the addresses match. The two LNK operations may not be a pair + * (e.g. see atomic_read()), so the LNKSET should fail. + * We use a conditional-never LNKSET to clear the link flag + * without side effects. + */ + asm volatile("LNKSETDNV [D0Re0],D0Re0"); + + depth = --current->thread.int_depth; + + BUG_ON(user_mode(regs) && depth); + + txdefr = __core_reg_get(TXDEFR); + + txdefr &= ~TXDEFR_BUS_STATE_BITS; + + /* Do we need to restore a failure code into TXDEFR? */ + if (current->thread.txdefr_failure & (1 << depth)) + txdefr |= (TXDEFR_LNKSET_FAILURE | TXDEFR_BUS_TRIG_BIT); + else + txdefr |= (TXDEFR_LNKSET_SUCCESS | TXDEFR_BUS_TRIG_BIT); + + __core_reg_set(TXDEFR, txdefr); + } + #endif + return State; + } + + #ifdef CONFIG_SMP + /* + * If we took an interrupt in the middle of __kuser_get_tls then we need + * to rewind the PC to the start of the function in case the process + * gets migrated to another thread (SMP only) and it reads the wrong tls + * data. + */ + static inline void _restart_critical_section(TBIRES State) + { + unsigned long get_tls_start; + unsigned long get_tls_end; + + get_tls_start = (unsigned long)__kuser_get_tls - + (unsigned long)&__user_gateway_start; + + get_tls_start += USER_GATEWAY_PAGE; + + get_tls_end = (unsigned long)__kuser_get_tls_end - + (unsigned long)&__user_gateway_start; + + get_tls_end += USER_GATEWAY_PAGE; + + if ((State.Sig.pCtx->CurrPC >= get_tls_start) && + (State.Sig.pCtx->CurrPC < get_tls_end)) + State.Sig.pCtx->CurrPC = get_tls_start; + } + #else + /* + * If we took an interrupt in the middle of + * __kuser_cmpxchg then we need to rewind the PC to the + * start of the function. + */ + static inline void _restart_critical_section(TBIRES State) + { + unsigned long cmpxchg_start; + unsigned long cmpxchg_end; + + cmpxchg_start = (unsigned long)__kuser_cmpxchg - + (unsigned long)&__user_gateway_start; + + cmpxchg_start += USER_GATEWAY_PAGE; + + cmpxchg_end = (unsigned long)__kuser_cmpxchg_end - + (unsigned long)&__user_gateway_start; + + cmpxchg_end += USER_GATEWAY_PAGE; + + if ((State.Sig.pCtx->CurrPC >= cmpxchg_start) && + (State.Sig.pCtx->CurrPC < cmpxchg_end)) + State.Sig.pCtx->CurrPC = cmpxchg_start; + } + #endif + + /* Used by kick_handler() */ + void restart_critical_section(TBIRES State) + { + _restart_critical_section(State); + } + + TBIRES trigger_handler(TBIRES State, int SigNum, int Triggers, int Inst, + PTBI pTBI) + { + head_end(State, ~INTS_OFF_MASK); + + /* If we interrupted user code handle any critical sections. */ + if (State.Sig.SaveMask & TBICTX_PRIV_BIT) + _restart_critical_section(State); + + trace_hardirqs_off(); + + do_IRQ(SigNum, (struct pt_regs *)State.Sig.pCtx); + + return tail_end(State); + } + + static unsigned int load_fault(PTBICTXEXTCB0 pbuf) + { + return pbuf->CBFlags & TXCATCH0_READ_BIT; + } + + static unsigned long fault_address(PTBICTXEXTCB0 pbuf) + { + return pbuf->CBAddr; + } + + static void unhandled_fault(struct pt_regs *regs, unsigned long addr, + int signo, int code, int trapno) + { + if (user_mode(regs)) { + siginfo_t info; + + if (show_unhandled_signals && unhandled_signal(current, signo) + && printk_ratelimit()) { + + pr_info("pid %d unhandled fault: pc 0x%08x, addr 0x%08lx, trap %d (%s)\n", + current->pid, regs->ctx.CurrPC, addr, + trapno, trap_name(trapno)); + print_vma_addr(" in ", regs->ctx.CurrPC); + print_vma_addr(" rtp in ", regs->ctx.DX[4].U1); + printk("\n"); + show_regs(regs); + } + + info.si_signo = signo; + info.si_errno = 0; + info.si_code = code; + info.si_addr = (__force void __user *)addr; + info.si_trapno = trapno; + force_sig_info(signo, &info, current); + } else { + die("Oops", regs, trapno, addr); + } + } + + static int handle_data_fault(PTBICTXEXTCB0 pcbuf, struct pt_regs *regs, + unsigned int data_address, int trapno) + { + int ret; + + ret = do_page_fault(regs, data_address, !load_fault(pcbuf), trapno); + + return ret; + } + + static unsigned long get_inst_fault_address(struct pt_regs *regs) + { + return regs->ctx.CurrPC; + } + + TBIRES fault_handler(TBIRES State, int SigNum, int Triggers, + int Inst, PTBI pTBI) + { + struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx; + PTBICTXEXTCB0 pcbuf = (PTBICTXEXTCB0)®s->extcb0; + unsigned long data_address; + + head_end(State, ~INTS_OFF_MASK); + + /* Hardware breakpoint or data watch */ + if ((SigNum == TBIXXF_SIGNUM_IHF) || + ((SigNum == TBIXXF_SIGNUM_DHF) && + (pcbuf[0].CBFlags & (TXCATCH0_WATCH1_BIT | + TXCATCH0_WATCH0_BIT)))) { + State = __TBIUnExpXXX(State, SigNum, Triggers, Inst, + pTBI); + return tail_end(State); + } + + local_irq_enable(); + + data_address = fault_address(pcbuf); + + switch (SigNum) { + case TBIXXF_SIGNUM_IGF: + /* 1st-level entry invalid (instruction fetch) */ + case TBIXXF_SIGNUM_IPF: { + /* 2nd-level entry invalid (instruction fetch) */ + unsigned long addr = get_inst_fault_address(regs); + do_page_fault(regs, addr, 0, SigNum); + break; + } + + case TBIXXF_SIGNUM_DGF: + /* 1st-level entry invalid (data access) */ + case TBIXXF_SIGNUM_DPF: + /* 2nd-level entry invalid (data access) */ + case TBIXXF_SIGNUM_DWF: + /* Write to read only page */ + handle_data_fault(pcbuf, regs, data_address, SigNum); + break; + + case TBIXXF_SIGNUM_IIF: + /* Illegal instruction */ + unhandled_fault(regs, regs->ctx.CurrPC, SIGILL, ILL_ILLOPC, + SigNum); + break; + + case TBIXXF_SIGNUM_DHF: + /* Unaligned access */ + unhandled_fault(regs, data_address, SIGBUS, BUS_ADRALN, + SigNum); + break; + case TBIXXF_SIGNUM_PGF: + /* Privilege violation */ + unhandled_fault(regs, data_address, SIGSEGV, SEGV_ACCERR, + SigNum); + break; + default: + BUG(); + break; + } + + return tail_end(State); + } + + static bool switch_is_syscall(unsigned int inst) + { + return inst == __METAG_SW_ENCODING(SYS); + } + + static bool switch_is_legacy_syscall(unsigned int inst) + { + return inst == __METAG_SW_ENCODING(SYS_LEGACY); + } + + static inline void step_over_switch(struct pt_regs *regs, unsigned int inst) + { + regs->ctx.CurrPC += 4; + } + + static inline int test_syscall_work(void) + { + return current_thread_info()->flags & _TIF_WORK_SYSCALL_MASK; + } + + TBIRES switch1_handler(TBIRES State, int SigNum, int Triggers, + int Inst, PTBI pTBI) + { + struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx; + unsigned int sysnumber; + unsigned long long a1_a2, a3_a4, a5_a6; + LPSYSCALL syscall_entry; + int restart; + + head_end(State, ~INTS_OFF_MASK); + + /* + * If this is not a syscall SWITCH it could be a breakpoint. + */ + if (!switch_is_syscall(Inst)) { + /* + * Alert the user if they're trying to use legacy system + * calls. This suggests they need to update their C + * library and build against up to date kernel headers. + */ + if (switch_is_legacy_syscall(Inst)) + pr_warn_once("WARNING: A legacy syscall was made. Your userland needs updating.\n"); + /* + * We don't know how to handle the SWITCH and cannot + * safely ignore it, so treat all unknown switches + * (including breakpoints) as traps. + */ + force_sig(SIGTRAP, current); + return tail_end(State); + } + + local_irq_enable(); + + restart_syscall: + restart = 0; + sysnumber = regs->ctx.DX[0].U1; + + if (test_syscall_work()) + sysnumber = syscall_trace_enter(regs); + + /* Skip over the SWITCH instruction - or you just get 'stuck' on it! */ + step_over_switch(regs, Inst); + + if (sysnumber >= __NR_syscalls) { + pr_debug("unknown syscall number: %d\n", sysnumber); + syscall_entry = (LPSYSCALL) sys_ni_syscall; + } else { + syscall_entry = (LPSYSCALL) sys_call_table[sysnumber]; + } + + /* Use 64bit loads for speed. */ + a5_a6 = *(unsigned long long *)®s->ctx.DX[1]; + a3_a4 = *(unsigned long long *)®s->ctx.DX[2]; + a1_a2 = *(unsigned long long *)®s->ctx.DX[3]; + + /* here is the actual call to the syscall handler functions */ + regs->ctx.DX[0].U0 = syscall_entry(a1_a2, a3_a4, a5_a6); + + if (test_syscall_work()) + syscall_trace_leave(regs); + + State = tail_end_sys(State, sysnumber, &restart); + /* Handlerless restarts shouldn't go via userland */ + if (restart) + goto restart_syscall; + return State; + } + + TBIRES switchx_handler(TBIRES State, int SigNum, int Triggers, + int Inst, PTBI pTBI) + { + struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx; + + /* + * This can be caused by any user process simply executing an unusual + * SWITCH instruction. If there's no DA, __TBIUnExpXXX will cause the + * thread to stop, so signal a SIGTRAP instead. + */ + head_end(State, ~INTS_OFF_MASK); + if (user_mode(regs)) + force_sig(SIGTRAP, current); + else + State = __TBIUnExpXXX(State, SigNum, Triggers, Inst, pTBI); + return tail_end(State); + } + + #ifdef CONFIG_METAG_META21 + TBIRES fpe_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI) + { + struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx; + unsigned int error_state = Triggers; + siginfo_t info; + + head_end(State, ~INTS_OFF_MASK); + + local_irq_enable(); + + info.si_signo = SIGFPE; + + if (error_state & TXSTAT_FPE_INVALID_BIT) + info.si_code = FPE_FLTINV; + else if (error_state & TXSTAT_FPE_DIVBYZERO_BIT) + info.si_code = FPE_FLTDIV; + else if (error_state & TXSTAT_FPE_OVERFLOW_BIT) + info.si_code = FPE_FLTOVF; + else if (error_state & TXSTAT_FPE_UNDERFLOW_BIT) + info.si_code = FPE_FLTUND; + else if (error_state & TXSTAT_FPE_INEXACT_BIT) + info.si_code = FPE_FLTRES; + else + info.si_code = 0; + info.si_errno = 0; + info.si_addr = (__force void __user *)regs->ctx.CurrPC; + force_sig_info(SIGFPE, &info, current); + + return tail_end(State); + } + #endif + + #ifdef CONFIG_METAG_SUSPEND_MEM + struct traps_context { + PTBIAPIFN fnSigs[TBID_SIGNUM_MAX + 1]; + }; + + static struct traps_context *metag_traps_context; + + int traps_save_context(void) + { + unsigned long cpu = smp_processor_id(); + PTBI _pTBI = per_cpu(pTBI, cpu); + struct traps_context *context; + + context = kzalloc(sizeof(*context), GFP_ATOMIC); + if (!context) + return -ENOMEM; + + memcpy(context->fnSigs, (void *)_pTBI->fnSigs, sizeof(context->fnSigs)); + + metag_traps_context = context; + return 0; + } + + int traps_restore_context(void) + { + unsigned long cpu = smp_processor_id(); + PTBI _pTBI = per_cpu(pTBI, cpu); + struct traps_context *context = metag_traps_context; + + metag_traps_context = NULL; + + memcpy((void *)_pTBI->fnSigs, context->fnSigs, sizeof(context->fnSigs)); + + kfree(context); + return 0; + } + #endif + + #ifdef CONFIG_SMP + static inline unsigned int _get_trigger_mask(void) + { + unsigned long cpu = smp_processor_id(); + return per_cpu(trigger_mask, cpu); + } + + unsigned int get_trigger_mask(void) + { + return _get_trigger_mask(); + } + EXPORT_SYMBOL(get_trigger_mask); + + static void set_trigger_mask(unsigned int mask) + { + unsigned long cpu = smp_processor_id(); + per_cpu(trigger_mask, cpu) = mask; + } + + void arch_local_irq_enable(void) + { + preempt_disable(); + arch_local_irq_restore(_get_trigger_mask()); + preempt_enable_no_resched(); + } + EXPORT_SYMBOL(arch_local_irq_enable); + #else + static void set_trigger_mask(unsigned int mask) + { + global_trigger_mask = mask; + } + #endif + + void __cpuinit per_cpu_trap_init(unsigned long cpu) + { + TBIRES int_context; + unsigned int thread = cpu_2_hwthread_id[cpu]; + + set_trigger_mask(TBI_INTS_INIT(thread) | /* interrupts */ + TBI_TRIG_BIT(TBID_SIGNUM_LWK) | /* low level kick */ + TBI_TRIG_BIT(TBID_SIGNUM_SW1) | + TBI_TRIG_BIT(TBID_SIGNUM_SWS)); + + /* non-priv - use current stack */ + int_context.Sig.pCtx = NULL; + /* Start with interrupts off */ + int_context.Sig.TrigMask = INTS_OFF_MASK; + int_context.Sig.SaveMask = 0; + + /* And call __TBIASyncTrigger() */ + __TBIASyncTrigger(int_context); + } + + void __init trap_init(void) + { + unsigned long cpu = smp_processor_id(); + PTBI _pTBI = per_cpu(pTBI, cpu); + + _pTBI->fnSigs[TBID_SIGNUM_XXF] = fault_handler; + _pTBI->fnSigs[TBID_SIGNUM_SW0] = switchx_handler; + _pTBI->fnSigs[TBID_SIGNUM_SW1] = switch1_handler; + _pTBI->fnSigs[TBID_SIGNUM_SW2] = switchx_handler; + _pTBI->fnSigs[TBID_SIGNUM_SW3] = switchx_handler; + _pTBI->fnSigs[TBID_SIGNUM_SWK] = kick_handler; + + #ifdef CONFIG_METAG_META21 + _pTBI->fnSigs[TBID_SIGNUM_DFR] = __TBIHandleDFR; + _pTBI->fnSigs[TBID_SIGNUM_FPE] = fpe_handler; + #endif + + per_cpu_trap_init(cpu); + } + + void tbi_startup_interrupt(int irq) + { + unsigned long cpu = smp_processor_id(); + PTBI _pTBI = per_cpu(pTBI, cpu); + + BUG_ON(irq > TBID_SIGNUM_MAX); + + /* For TR1 and TR2, the thread id is encoded in the irq number */ + if (irq >= TBID_SIGNUM_T10 && irq < TBID_SIGNUM_TR3) + cpu = hwthread_id_2_cpu[(irq - TBID_SIGNUM_T10) % 4]; + + set_trigger_mask(get_trigger_mask() | TBI_TRIG_BIT(irq)); + + _pTBI->fnSigs[irq] = trigger_handler; + } + + void tbi_shutdown_interrupt(int irq) + { + unsigned long cpu = smp_processor_id(); + PTBI _pTBI = per_cpu(pTBI, cpu); + + BUG_ON(irq > TBID_SIGNUM_MAX); + + set_trigger_mask(get_trigger_mask() & ~TBI_TRIG_BIT(irq)); + + _pTBI->fnSigs[irq] = __TBIUnExpXXX; + } + + int ret_from_fork(TBIRES arg) + { + struct task_struct *prev = arg.Switch.pPara; + struct task_struct *tsk = current; + struct pt_regs *regs = task_pt_regs(tsk); + int (*fn)(void *); + TBIRES Next; + + schedule_tail(prev); + + if (tsk->flags & PF_KTHREAD) { + fn = (void *)regs->ctx.DX[4].U1; + BUG_ON(!fn); + + fn((void *)regs->ctx.DX[3].U1); + } + + if (test_syscall_work()) + syscall_trace_leave(regs); + + preempt_disable(); + + Next.Sig.TrigMask = get_trigger_mask(); + Next.Sig.SaveMask = 0; + Next.Sig.pCtx = ®s->ctx; + + set_gateway_tls(current->thread.tls_ptr); + + preempt_enable_no_resched(); + + /* And interrupts should come back on when we resume the real usermode + * code. Call __TBIASyncResume() + */ + __TBIASyncResume(tail_end(Next)); + /* ASyncResume should NEVER return */ + BUG(); + return 0; + } + + void show_trace(struct task_struct *tsk, unsigned long *sp, + struct pt_regs *regs) + { + unsigned long addr; + #ifdef CONFIG_FRAME_POINTER + unsigned long fp, fpnew; + unsigned long stack; + #endif + + if (regs && user_mode(regs)) + return; + + printk("\nCall trace: "); + #ifdef CONFIG_KALLSYMS + printk("\n"); + #endif + + if (!tsk) + tsk = current; + + #ifdef CONFIG_FRAME_POINTER + if (regs) { + print_ip_sym(regs->ctx.CurrPC); + fp = regs->ctx.AX[1].U0; + } else { + fp = __core_reg_get(A0FrP); + } + + /* detect when the frame pointer has been used for other purposes and + * doesn't point to the stack (it may point completely elsewhere which + * kstack_end may not detect). + */ + stack = (unsigned long)task_stack_page(tsk); + while (fp >= stack && fp + 8 <= stack + THREAD_SIZE) { + addr = __raw_readl((unsigned long *)(fp + 4)) - 4; + if (kernel_text_address(addr)) + print_ip_sym(addr); + else + break; + /* stack grows up, so frame pointers must decrease */ + fpnew = __raw_readl((unsigned long *)(fp + 0)); + if (fpnew >= fp) + break; + fp = fpnew; + } + #else + while (!kstack_end(sp)) { + addr = (*sp--) - 4; + if (kernel_text_address(addr)) + print_ip_sym(addr); + } + #endif + + printk("\n"); + + debug_show_held_locks(tsk); + } + + void show_stack(struct task_struct *tsk, unsigned long *sp) + { + if (!tsk) + tsk = current; + if (tsk == current) + sp = (unsigned long *)current_stack_pointer; + else + sp = (unsigned long *)tsk->thread.kernel_context->AX[0].U0; + + show_trace(tsk, sp, NULL); + } + + void dump_stack(void) + { + show_stack(NULL, NULL); + } + EXPORT_SYMBOL(dump_stack); diff --cc drivers/clocksource/Kconfig index e920cbe519fa,75bc7520ace5..e507ab7df60b --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@@ -60,5 -54,12 +60,10 @@@ config CLKSRC_DBX500_PRCMU_SCHED_CLOC help Use the always on PRCMU Timer as sched_clock -config CLKSRC_ARM_GENERIC - def_bool y if ARM64 - help - This option enables support for the ARM generic timer. +config ARM_ARCH_TIMER + bool + + config CLKSRC_METAG_GENERIC + def_bool y if METAG + help + This option enables support for the Meta per-thread timers. diff --cc drivers/clocksource/Makefile index 7d671b85a98e,09dcd49b7e31..4d8283aec5b5 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@@ -17,7 -16,6 +17,8 @@@ obj-$(CONFIG_CLKSRC_DBX500_PRCMU) += cl obj-$(CONFIG_ARMADA_370_XP_TIMER) += time-armada-370-xp.o obj-$(CONFIG_ARCH_BCM2835) += bcm2835_timer.o obj-$(CONFIG_SUNXI_TIMER) += sunxi_timer.o +obj-$(CONFIG_ARCH_TEGRA) += tegra20_timer.o +obj-$(CONFIG_VT8500_TIMER) += vt8500_timer.o -obj-$(CONFIG_CLKSRC_ARM_GENERIC) += arm_generic.o +obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o + obj-$(CONFIG_CLKSRC_METAG_GENERIC) += metag_generic.o diff --cc drivers/irqchip/Makefile index e65fbf2cdf71,ff02f6b98863..98e3b87bdf1b --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@@ -1,9 -1,6 +1,11 @@@ +obj-$(CONFIG_IRQCHIP) += irqchip.o + obj-$(CONFIG_ARCH_BCM2835) += irq-bcm2835.o +obj-$(CONFIG_ARCH_EXYNOS) += exynos-combiner.o + obj-$(CONFIG_METAG) += irq-metag-ext.o + obj-$(CONFIG_METAG_PERFCOUNTER_IRQS) += irq-metag.o obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi.o -obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o +obj-$(CONFIG_ARM_GIC) += irq-gic.o +obj-$(CONFIG_ARM_VIC) += irq-vic.o +obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o diff --cc tools/perf/perf.h index d5818c98d051,768507e079e2..74659ecf93e0 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@@ -94,12 -98,12 +94,18 @@@ #define CPUINFO_PROC "cpu model" #endif +#ifdef __arc__ +#define rmb() asm volatile("" ::: "memory") +#define cpu_relax() rmb() +#define CPUINFO_PROC "Processor" +#endif + + #ifdef __metag__ + #define rmb() asm volatile("" ::: "memory") + #define cpu_relax() asm volatile("" ::: "memory") + #define CPUINFO_PROC "CPU" + #endif + #include #include #include