select HAVE_DMA_API_DEBUG
select HAVE_IDE if PCI || ISA || PCMCIA
select HAVE_DMA_ATTRS
- - select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
+ + select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_MEMBLOCK
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
select HAVE_ARCH_KGDB
select HAVE_ARCH_TRACEHOOK
++++++ select HAVE_SYSCALL_TRACEPOINTS
select HAVE_KPROBES if !XIP_KERNEL
select HAVE_KRETPROBES if (HAVE_KPROBES)
select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
select HARDIRQS_SW_RESEND
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
- - select GENERIC_IRQ_PROBE
select ARCH_WANT_IPC_PARSE_VERSION
select HARDIRQS_SW_RESEND
select CPU_PM if (SUSPEND || CPU_IDLE)
bool
default y
- - config GENERIC_LOCKBREAK
- - bool
- - default y
- - depends on SMP && PREEMPT
- -
config RWSEM_GENERIC_SPINLOCK
bool
default y
on systems with an outer cache, the store buffer is drained
explicitly.
++ ++++config ARM_ERRATA_775420
++ ++++ bool "ARM errata: A data cache maintenance operation which aborts, might lead to deadlock"
++ ++++ depends on CPU_V7
++ ++++ help
++ ++++ This option enables the workaround for the 775420 Cortex-A9 (r2p2,
++ ++++ r2p6,r2p8,r2p10,r3p0) erratum. In case a date cache maintenance
++ ++++ operation aborts with MMU exception, it might cause the processor
++ ++++ to deadlock. This workaround puts DSB before executing ISB if
++ ++++ an abort may occur on cache maintenance.
++ ++++
endmenu
source "arch/arm/common/Kconfig"
configuration it is safe to say N, otherwise say Y.
config UACCESS_WITH_MEMCPY
---- -- bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user() (EXPERIMENTAL)"
---- -- depends on MMU && EXPERIMENTAL
++++ ++ bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
++++ ++ depends on MMU
default y if CPU_FEROCEON
help
Implement faster copy_to_user and clear_user methods for CPU
neutralized via a kernel panic.
This feature requires gcc version 4.2 or above.
--- ---config DEPRECATED_PARAM_STRUCT
--- --- bool "Provide old way to pass kernel parameters"
--- --- help
--- --- This was deprecated in 2001 and announced to live on for 5 years.
--- --- Some old boot loaders still use this way.
--- ---
endmenu
menu "Boot options"
help
Include support for flattened device tree machine descriptions.
+++ +++config ATAGS
+++ +++ bool "Support for the traditional ATAGS boot data passing" if USE_OF
+++ +++ default y
+++ +++ help
+++ +++ This is the traditional way of passing data to the kernel at boot
+++ +++ time. If you are solely relying on the flattened device tree (or
+++ +++ the ARM_ATAG_DTB_COMPAT option) then you may unselect this option
+++ +++ to remove ATAGS support from your kernel binary. If unsure,
+++ +++ leave this to y.
+++ +++
+++ +++config DEPRECATED_PARAM_STRUCT
+++ +++ bool "Provide old way to pass kernel parameters"
+++ +++ depends on ATAGS
+++ +++ help
+++ +++ This was deprecated in 2001 and announced to live on for 5 years.
+++ +++ Some old boot loaders still use this way.
+++ +++
# Compressed boot loader in ROM. Yes, we really want to ask about
# TEXT and BSS so we preserve their values in the config files.
config ZBOOT_ROM_TEXT
choice
prompt "Kernel command line type" if CMDLINE != ""
default CMDLINE_FROM_BOOTLOADER
+++ +++ depends on ATAGS
config CMDLINE_FROM_BOOTLOADER
bool "Use bootloader kernel arguments if available"
config ATAGS_PROC
bool "Export atags in procfs"
--- --- depends on KEXEC
+++ +++ depends on ATAGS && KEXEC
default y
help
Should the atags used to boot the kernel be exported in an "atags"
config CPU_FREQ_IMX
tristate "CPUfreq driver for i.MX CPUs"
depends on ARCH_MXC && CPU_FREQ
+ + select CPU_FREQ_TABLE
help
This enables the CPUfreq driver for i.MX CPUs.
KBUILD_IMAGE := zImage
endif
---- --all: $(KBUILD_IMAGE)
++++ ++# Build the DT binary blobs if we have OF configured
++++ ++ifeq ($(CONFIG_USE_OF),y)
++++ ++KBUILD_DTBS := dtbs
++++ ++endif
++++ ++
++++ ++all: $(KBUILD_IMAGE) $(KBUILD_DTBS)
boot := arch/arm/boot
zinstall uinstall install: vmlinux
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
-- --- %.dtb:
++ +++ %.dtb: scripts
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
-- --- dtbs:
++ +++ dtbs: scripts
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
# We use MRPROPER_FILES and CLEAN_FILES now
echo ' uImage - U-Boot wrapped zImage'
echo ' bootpImage - Combined zImage and initial RAM disk'
echo ' (supply initrd image via make variable INITRD=<path>)'
---- -- echo ' dtbs - Build device tree blobs for enabled boards'
++++ ++ echo '* dtbs - Build device tree blobs for enabled boards'
echo ' install - Install uncompressed kernel'
echo ' zinstall - Install compressed kernel'
echo ' uinstall - Install U-Boot wrapped compressed kernel'
__u32 syscall; /* syscall number */
__u8 used_cp[16]; /* thread used copro */
unsigned long tp_value;
++++++#ifdef CONFIG_CRUNCH
struct crunch_state crunchstate;
++++++#endif
union fp_state fpstate __attribute__((aligned(8)));
union vfp_state vfpstate;
#ifdef CONFIG_ARM_THUMBEE
#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
#define TIF_SYSCALL_TRACE 8
#define TIF_SYSCALL_AUDIT 9
++++++ #define TIF_SYSCALL_TRACEPOINT 10
#define TIF_POLLING_NRFLAG 16
#define TIF_USING_IWMMXT 17
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
++++++ #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
#define _TIF_SECCOMP (1 << TIF_SECCOMP)
/* Checks for any syscall work in entry-common.S */
------ #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
++++++ #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SYSCALL_TRACEPOINT)
/*
* Change these and you break ASM code in entry-common.S
#define __NR_setns (__NR_SYSCALL_BASE+375)
#define __NR_process_vm_readv (__NR_SYSCALL_BASE+376)
#define __NR_process_vm_writev (__NR_SYSCALL_BASE+377)
++ ++++ /* 378 for kcmp */
++ +++
++++++ /*
++++++ * This may need to be greater than __NR_last_syscall+1 in order to
++++++ * account for the padding in the syscall table
++++++ */
++++++ #ifdef __KERNEL__
++++++ #define __NR_syscalls (380)
++++++ #endif /* __KERNEL__ */
+
/*
* The following SWIs are ARM private.
*/
*/
#define __IGNORE_fadvise64_64
#define __IGNORE_migrate_pages
++ ++++#define __IGNORE_kcmp
#endif /* __KERNEL__ */
#endif /* __ASM_ARM_UNISTD_H */
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
++++ ++#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/syscore_ops.h>
#include <linux/timer.h>
u32 epoch_cyc_copy;
u32 mult;
u32 shift;
+ + bool suspended;
+ + bool needs_suspend;
};
static void sched_clock_poll(unsigned long wrap_ticks);
static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0);
++++ ++static int irqtime = -1;
++++ ++
++++ ++core_param(irqtime, irqtime, int, 0400);
static struct clock_data cd = {
.mult = NSEC_PER_SEC / HZ,
u64 epoch_ns;
u32 epoch_cyc;
+ + if (cd.suspended)
+ + return cd.epoch_ns;
+ +
/*
* Load the epoch_cyc and epoch_ns atomically. We do this by
* ensuring that we always write epoch_cyc, epoch_ns and
update_sched_clock();
}
+ + void __init setup_sched_clock_needs_suspend(u32 (*read)(void), int bits,
+ + unsigned long rate)
+ + {
+ + setup_sched_clock(read, bits, rate);
+ + cd.needs_suspend = true;
+ + }
+ +
void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
{
unsigned long r, w;
*/
cd.epoch_ns = 0;
++++ ++ /* Enable IRQ time accounting if we have a fast enough sched_clock */
++++ ++ if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
++++ ++ enable_sched_clock_irqtime();
++++ ++
pr_debug("Registered %pF as sched_clock source\n", read);
}
static int sched_clock_suspend(void)
{
sched_clock_poll(sched_clock_timer.data);
+ + if (cd.needs_suspend)
+ + cd.suspended = true;
return 0;
}
+ + static void sched_clock_resume(void)
+ + {
+ + if (cd.needs_suspend) {
+ + cd.epoch_cyc = read_sched_clock();
+ + cd.epoch_cyc_copy = cd.epoch_cyc;
+ + cd.suspended = false;
+ + }
+ + }
+ +
static struct syscore_ops sched_clock_ops = {
.suspend = sched_clock_suspend,
+ + .resume = sched_clock_resume,
};
static int __init sched_clock_syscore_init(void)