select HAVE_IDE if PCI || ISA || PCMCIA
select HAVE_DMA_ATTRS
select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
- -- select CMA if (CPU_V6 || CPU_V6K || CPU_V7)
select HAVE_MEMBLOCK
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
select GENERIC_SMP_IDLE_THREAD
select KTIME_SCALAR
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
++++ + select GENERIC_STRNCPY_FROM_USER
++++ + select GENERIC_STRNLEN_USER
++++ + select DCACHE_WORD_ACCESS if (CPU_V6 || CPU_V6K || CPU_V7) && !CPU_BIG_ENDIAN
help
The ARM series is a line of low-power-consumption RISC chip designs
licensed by ARM Ltd and targeted at embedded applications and
select ICST
select GENERIC_CLOCKEVENTS
select ARCH_WANT_OPTIONAL_GPIOLIB
+ ++ select NEED_MACH_IO_H if PCI
select PLAT_VERSATILE
select PLAT_VERSATILE_CLCD
select PLAT_VERSATILE_FPGA_IRQ
select PCI
select ARCH_REQUIRE_GPIOLIB
select GENERIC_CLOCKEVENTS
+ ++ select NEED_MACH_IO_H
select PLAT_ORION
help
Support for the following Marvell Orion 5x series SoCs:
bootloaders, this option allows zImage to extract the information
from the ATAG list and store it at run time into the appended DTB.
++++ +choice
++++ + prompt "Kernel command line type" if ARM_ATAG_DTB_COMPAT
++++ + default ARM_ATAG_DTB_COMPAT_CMDLINE_FROM_BOOTLOADER
++++ +
++++ +config ARM_ATAG_DTB_COMPAT_CMDLINE_FROM_BOOTLOADER
++++ + bool "Use bootloader kernel arguments if available"
++++ + help
++++ + Uses the command-line options passed by the boot loader instead of
++++ + the device tree bootargs property. If the boot loader doesn't provide
++++ + any, the device tree bootargs property will be used.
++++ +
++++ +config ARM_ATAG_DTB_COMPAT_CMDLINE_EXTEND
++++ + bool "Extend with bootloader kernel arguments"
++++ + help
++++ + The command-line arguments provided by the boot loader will be
++++ + appended to the the device tree bootargs property.
++++ +
++++ +endchoice
++++ +
config CMDLINE
string "Default kernel command string"
default ""
static struct clock_event_device __percpu **arch_timer_evt;
++ +++extern void init_current_timer_delay(unsigned long freq);
++ +++
/*
* Architected system timer support.
*/
/* Be safe... */
arch_timer_disable();
---- - clk->features = CLOCK_EVT_FEAT_ONESHOT;
++++ + clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP;
clk->name = "arch_sys_timer";
clk->rating = 450;
clk->set_mode = arch_timer_set_mode;
return arch_counter_get_cntpct();
}
++ +++int read_current_timer(unsigned long *timer_val)
++ +++{
++ +++ if (!arch_timer_rate)
++ +++ return -ENXIO;
++ +++ *timer_val = arch_counter_get_cntpct();
++ +++ return 0;
++ +++}
++ +++
static struct clocksource clocksource_counter = {
.name = "arch_sys_counter",
.rating = 400,
if (err)
goto out_free_irq;
++ +++ init_current_timer_delay(arch_timer_rate);
return 0;
out_free_irq:
extern void fpundefinstr(void);
/* platform dependent support */
-- ---EXPORT_SYMBOL(__udelay);
-- ---EXPORT_SYMBOL(__const_udelay);
++ +++EXPORT_SYMBOL(arm_delay_ops);
/* networking */
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(memchr);
EXPORT_SYMBOL(__memzero);
---- - /* user mem (segment) */
---- -EXPORT_SYMBOL(__strnlen_user);
---- -EXPORT_SYMBOL(__strncpy_from_user);
---- -
#ifdef CONFIG_MMU
EXPORT_SYMBOL(copy_page);
/* Set at runtime when we know what CPU type we are. */
static struct arm_pmu *cpu_pmu;
---- -enum arm_perf_pmu_ids
---- -armpmu_get_pmu_id(void)
++++ +const char *perf_pmu_name(void)
{
---- - int id = -ENODEV;
---- -
---- - if (cpu_pmu != NULL)
---- - id = cpu_pmu->id;
++++ + if (!cpu_pmu)
++++ + return NULL;
---- - return id;
++++ + return cpu_pmu->pmu.name;
}
---- -EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
++++ +EXPORT_SYMBOL_GPL(perf_pmu_name);
int perf_num_counters(void)
{
event_requires_mode_exclusion(&event->attr)) {
pr_debug("ARM performance counters do not support "
"mode exclusion\n");
- -- return -EPERM;
+ ++ return -EOPNOTSUPP;
}
/*
cpu_pmu->name, cpu_pmu->num_events);
cpu_pmu_init(cpu_pmu);
register_cpu_notifier(&pmu_cpu_notifier);
---- - armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
++++ + armpmu_register(cpu_pmu, cpu_pmu->name, PERF_TYPE_RAW);
} else {
pr_info("no hardware support available\n");
}
#define S_ISA " ARM"
#endif
---- -static int __die(const char *str, int err, struct thread_info *thread, struct pt_regs *regs)
++++ +static int __die(const char *str, int err, struct pt_regs *regs)
{
---- - struct task_struct *tsk = thread->task;
++++ + struct task_struct *tsk = current;
static int die_counter;
int ret;
/* trap and error numbers are mostly meaningless on ARM */
ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
if (ret == NOTIFY_STOP)
---- - return ret;
++++ + return 1;
print_modules();
__show_regs(regs);
printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",
---- - TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
++++ + TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk));
if (!user_mode(regs) || in_interrupt()) {
dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
dump_instr(KERN_EMERG, regs);
}
---- - return ret;
++++ + return 0;
}
---- -static DEFINE_RAW_SPINLOCK(die_lock);
++++ +static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
++++ +static int die_owner = -1;
++++ +static unsigned int die_nest_count;
---- -/*
---- - * This function is protected against re-entrancy.
---- - */
---- -void die(const char *str, struct pt_regs *regs, int err)
++++ +static unsigned long oops_begin(void)
{
---- - struct thread_info *thread = current_thread_info();
---- - int ret;
---- - enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
++++ + int cpu;
++++ + unsigned long flags;
oops_enter();
---- - raw_spin_lock_irq(&die_lock);
++++ + /* racy, but better than risking deadlock. */
++++ + raw_local_irq_save(flags);
++++ + cpu = smp_processor_id();
++++ + if (!arch_spin_trylock(&die_lock)) {
++++ + if (cpu == die_owner)
++++ + /* nested oops. should stop eventually */;
++++ + else
++++ + arch_spin_lock(&die_lock);
++++ + }
++++ + die_nest_count++;
++++ + die_owner = cpu;
console_verbose();
bust_spinlocks(1);
---- - if (!user_mode(regs))
---- - bug_type = report_bug(regs->ARM_pc, regs);
---- - if (bug_type != BUG_TRAP_TYPE_NONE)
---- - str = "Oops - BUG";
---- - ret = __die(str, err, thread, regs);
++++ + return flags;
++++ +}
---- - if (regs && kexec_should_crash(thread->task))
++++ +static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
++++ +{
++++ + if (regs && kexec_should_crash(current))
crash_kexec(regs);
bust_spinlocks(0);
++++ + die_owner = -1;
add_taint(TAINT_DIE);
---- - raw_spin_unlock_irq(&die_lock);
++++ + die_nest_count--;
++++ + if (!die_nest_count)
++++ + /* Nest count reaches zero, release the lock. */
++++ + arch_spin_unlock(&die_lock);
++++ + raw_local_irq_restore(flags);
oops_exit();
if (in_interrupt())
panic("Fatal exception in interrupt");
if (panic_on_oops)
panic("Fatal exception");
---- - if (ret != NOTIFY_STOP)
---- - do_exit(SIGSEGV);
++++ + if (signr)
++++ + do_exit(signr);
++++ +}
++++ +
++++ +/*
++++ + * This function is protected against re-entrancy.
++++ + */
++++ +void die(const char *str, struct pt_regs *regs, int err)
++++ +{
++++ + enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
++++ + unsigned long flags = oops_begin();
++++ + int sig = SIGSEGV;
++++ +
++++ + if (!user_mode(regs))
++++ + bug_type = report_bug(regs->ARM_pc, regs);
++++ + if (bug_type != BUG_TRAP_TYPE_NONE)
++++ + str = "Oops - BUG";
++++ +
++++ + if (__die(str, err, regs))
++++ + sig = 0;
++++ +
++++ + oops_end(flags, regs, sig);
}
void arm_notify_die(const char *str, struct pt_regs *regs,
*/
memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
sigreturn_codes, sizeof(sigreturn_codes));
+ ++ memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),
+ ++ syscall_restart_code, sizeof(syscall_restart_code));
flush_icache_range(vectors, vectors + PAGE_SIZE);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
csumpartialcopy.o csumpartialcopyuser.o clearbit.o \
-- --- delay.o findbit.o memchr.o memcpy.o \
++ +++ delay.o delay-loop.o findbit.o memchr.o memcpy.o \
memmove.o memset.o memzero.o setbit.o \
---- - strncpy_from_user.o strnlen_user.o \
strchr.o strrchr.o \
testchangebit.o testclearbit.o testsetbit.o \
ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
#include <linux/slab.h>
#include <linux/iommu.h>
#include <linux/vmalloc.h>
+++++ #include <linux/sizes.h>
#include <asm/memory.h>
#include <asm/highmem.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
----- #include <asm/sizes.h>
#include <asm/mach/arch.h>
#include <asm/dma-iommu.h>
#include <asm/mach/map.h>
#define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
- --unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
+ ++static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
void __init init_consistent_dma_size(unsigned long size)
{
unsigned long base = consistent_base;
unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
- --#ifndef CONFIG_ARM_DMA_USE_IOMMU
- -- if (cpu_architecture() >= CPU_ARCH_ARMv6)
+ ++ if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
return 0;
- --#endif
consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
if (!consistent_pte) {
.vm_list = LIST_HEAD_INIT(coherent_head.vm_list),
};
- --size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
+ ++static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
static int __init early_coherent_pool(char *p)
{
struct page *page;
void *ptr;
- -- if (cpu_architecture() < CPU_ARCH_ARMv6)
+ ++ if (!IS_ENABLED(CONFIG_CMA))
return 0;
ptr = __alloc_from_contiguous(NULL, size, prot, &page);
if (arch_is_coherent() || nommu())
addr = __alloc_simple_buffer(dev, size, gfp, &page);
- -- else if (cpu_architecture() < CPU_ARCH_ARMv6)
+ ++ else if (!IS_ENABLED(CONFIG_CMA))
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
else if (gfp & GFP_ATOMIC)
addr = __alloc_from_pool(dev, size, &page, caller);
if (arch_is_coherent() || nommu()) {
__dma_free_buffer(page, size);
- -- } else if (cpu_architecture() < CPU_ARCH_ARMv6) {
+ ++ } else if (!IS_ENABLED(CONFIG_CMA)) {
__dma_free_remap(cpu_addr, size);
__dma_free_buffer(page, size);
} else {
return NULL;
while (count) {
- -- int j, order = __ffs(count);
+ ++ int j, order = __fls(count);
pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
while (!pages[i] && order)
while (--i)
if (pages[i])
__free_pages(pages[i], 0);
----- if (array_size < PAGE_SIZE)
+++++ if (array_size <= PAGE_SIZE)
kfree(pages);
else
vfree(pages);
for (i = 0; i < count; i++)
if (pages[i])
__free_pages(pages[i], 0);
----- if (array_size < PAGE_SIZE)
+++++ if (array_size <= PAGE_SIZE)
kfree(pages);
else
vfree(pages);
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <linux/dma-contiguous.h>
+++++ #include <linux/sizes.h>
#include <asm/mach-types.h>
#include <asm/memblock.h>
#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/setup.h>
----- #include <asm/sizes.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
* allocations. This must be the smallest DMA mask in the system,
* so a successful GFP_DMA allocation will always satisfy this.
*/
- --u32 arm_dma_limit;
+ ++phys_addr_t arm_dma_limit;
static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
unsigned long dma_size)
#include <linux/memblock.h>
#include <linux/fs.h>
#include <linux/vmalloc.h>
+++++ #include <linux/sizes.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/sections.h>
#include <asm/cachetype.h>
#include <asm/setup.h>
----- #include <asm/sizes.h>
#include <asm/smp_plat.h>
#include <asm/tlb.h>
#include <asm/highmem.h>
cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
---- - /*
---- - * Only use write-through for non-SMP systems
---- - */
---- - if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
---- - vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
---- -
/*
* Enable CPU-specific coherency if supported.
* (Only available on XSC3 at the moment.)
}
}
+ ++#ifndef CONFIG_ARM_LPAE
+ ++
+ ++/*
+ ++ * The Linux PMD is made of two consecutive section entries covering 2MB
+ ++ * (see definition in include/asm/pgtable-2level.h). However a call to
+ ++ * create_mapping() may optimize static mappings by using individual
+ ++ * 1MB section mappings. This leaves the actual PMD potentially half
+ ++ * initialized if the top or bottom section entry isn't used, leaving it
+ ++ * open to problems if a subsequent ioremap() or vmalloc() tries to use
+ ++ * the virtual space left free by that unused section entry.
+ ++ *
+ ++ * Let's avoid the issue by inserting dummy vm entries covering the unused
+ ++ * PMD halves once the static mappings are in place.
+ ++ */
+ ++
+ ++static void __init pmd_empty_section_gap(unsigned long addr)
+ ++{
+ ++ struct vm_struct *vm;
+ ++
+ ++ vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
+ ++ vm->addr = (void *)addr;
+ ++ vm->size = SECTION_SIZE;
+ ++ vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+ ++ vm->caller = pmd_empty_section_gap;
+ ++ vm_area_add_early(vm);
+ ++}
+ ++
+ ++static void __init fill_pmd_gaps(void)
+ ++{
+ ++ struct vm_struct *vm;
+ ++ unsigned long addr, next = 0;
+ ++ pmd_t *pmd;
+ ++
+ ++ /* we're still single threaded hence no lock needed here */
+ ++ for (vm = vmlist; vm; vm = vm->next) {
+ ++ if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+ ++ continue;
+ ++ addr = (unsigned long)vm->addr;
+ ++ if (addr < next)
+ ++ continue;
+ ++
+ ++ /*
+ ++ * Check if this vm starts on an odd section boundary.
+ ++ * If so and the first section entry for this PMD is free
+ ++ * then we block the corresponding virtual address.
+ ++ */
+ ++ if ((addr & ~PMD_MASK) == SECTION_SIZE) {
+ ++ pmd = pmd_off_k(addr);
+ ++ if (pmd_none(*pmd))
+ ++ pmd_empty_section_gap(addr & PMD_MASK);
+ ++ }
+ ++
+ ++ /*
+ ++ * Then check if this vm ends on an odd section boundary.
+ ++ * If so and the second section entry for this PMD is empty
+ ++ * then we block the corresponding virtual address.
+ ++ */
+ ++ addr += vm->size;
+ ++ if ((addr & ~PMD_MASK) == SECTION_SIZE) {
+ ++ pmd = pmd_off_k(addr) + 1;
+ ++ if (pmd_none(*pmd))
+ ++ pmd_empty_section_gap(addr);
+ ++ }
+ ++
+ ++ /* no need to look at any vm entry until we hit the next PMD */
+ ++ next = (addr + PMD_SIZE - 1) & PMD_MASK;
+ ++ }
+ ++}
+ ++
+ ++#else
+ ++#define fill_pmd_gaps() do { } while (0)
+ ++#endif
+ ++
static void * __initdata vmalloc_min =
(void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
*/
if (mdesc->map_io)
mdesc->map_io();
+ ++ fill_pmd_gaps();
/*
* Finally flush the caches and tlb to ensure that we're in a
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/pinctrl/consumer.h>
+++++ #include <linux/sizes.h>
#include <asm/io.h>
----- #include <asm/sizes.h>
#define UART_NR 14
struct uart_amba_port {
struct uart_port port;
struct clk *clk;
+ ++ /* Two optional pin states - default & sleep */
+ ++ struct pinctrl *pinctrl;
+ ++ struct pinctrl_state *pins_default;
+ ++ struct pinctrl_state *pins_sleep;
const struct vendor_data *vendor;
unsigned int dmacr; /* dma control reg */
unsigned int im; /* interrupt mask */
unsigned int cr;
int retval;
+ ++ /* Optionaly enable pins to be muxed in and configured */
+ ++ if (!IS_ERR(uap->pins_default)) {
+ ++ retval = pinctrl_select_state(uap->pinctrl, uap->pins_default);
+ ++ if (retval)
+ ++ dev_err(port->dev,
+ ++ "could not set default pins\n");
+ ++ }
+ ++
retval = clk_prepare(uap->clk);
if (retval)
goto out;
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
unsigned int cr;
+ ++ int retval;
/*
* disable all interrupts
*/
clk_disable(uap->clk);
clk_unprepare(uap->clk);
+ ++ /* Optionally let pins go into sleep states */
+ ++ if (!IS_ERR(uap->pins_sleep)) {
+ ++ retval = pinctrl_select_state(uap->pinctrl, uap->pins_sleep);
+ ++ if (retval)
+ ++ dev_err(port->dev,
+ ++ "could not set pins to sleep state\n");
+ ++ }
+ ++
if (uap->port.dev->platform_data) {
struct amba_pl011_data *plat;
if (!uap)
return -ENODEV;
+ ++ /* Allow pins to be muxed in and configured */
+ ++ if (!IS_ERR(uap->pins_default)) {
+ ++ ret = pinctrl_select_state(uap->pinctrl, uap->pins_default);
+ ++ if (ret)
+ ++ dev_err(uap->port.dev,
+ ++ "could not set default pins\n");
+ ++ }
+ ++
ret = clk_prepare(uap->clk);
if (ret)
return ret;
{
struct uart_amba_port *uap;
struct vendor_data *vendor = id->data;
- -- struct pinctrl *pinctrl;
void __iomem *base;
int i, ret;
goto free;
}
- -- pinctrl = devm_pinctrl_get_select_default(&dev->dev);
- -- if (IS_ERR(pinctrl)) {
- -- ret = PTR_ERR(pinctrl);
+ ++ uap->pinctrl = devm_pinctrl_get(&dev->dev);
+ ++ if (IS_ERR(uap->pinctrl)) {
+ ++ ret = PTR_ERR(uap->pinctrl);
goto unmap;
}
+ ++ uap->pins_default = pinctrl_lookup_state(uap->pinctrl,
+ ++ PINCTRL_STATE_DEFAULT);
+ ++ if (IS_ERR(uap->pins_default))
+ ++ dev_err(&dev->dev, "could not get default pinstate\n");
+ ++
+ ++ uap->pins_sleep = pinctrl_lookup_state(uap->pinctrl,
+ ++ PINCTRL_STATE_SLEEP);
+ ++ if (IS_ERR(uap->pins_sleep))
+ ++ dev_dbg(&dev->dev, "could not get sleep pinstate\n");
uap->clk = clk_get(&dev->dev, NULL);
if (IS_ERR(uap->clk)) {