help
The base address of exception vectors.
-- bool "Patch physical to virtual translations at runtime"
--- bool "Patch physical to virtual translations at runtime (EXPERIMENTAL)"
--- depends on EXPERIMENTAL
+config ARM_PATCH_PHYS_VIRT
----- of physical memory is at a 16MB boundary, or theoretically 64K
----- for the MSM machine class.
+++++ + bool "Patch physical to virtual translations at runtime" if EMBEDDED
+++++ + default y
+ depends on !XIP_KERNEL && MMU
+ depends on !ARCH_REALVIEW || !SPARSEMEM
+ help
+ Patch phys-to-virt and virt-to-phys translation functions at
+ boot and module load time according to the position of the
+ kernel in system memory.
+
+ This can only be used with non-XIP MMU kernels where the base
----- config ARM_PATCH_PHYS_VIRT_16BIT
----- def_bool y
----- depends on ARM_PATCH_PHYS_VIRT && ARCH_MSM
----- help
----- This option extends the physical to virtual translation patching
----- to allow physical memory down to a theoretical minimum of 64K
----- boundaries.
+++++ + of physical memory is at a 16MB boundary.
+++++ +
+++++ + Only disable this option if you know that you do not require
+++++ + this feature (eg, building a kernel for a single machine) and
+++++ + you need to shrink the kernel to the minimal size.
+
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
bool "Atmel AT91"
select ARCH_REQUIRE_GPIOLIB
select HAVE_CLK
----- select ARM_PATCH_PHYS_VIRT if MMU
+ select CLKDEV_LOOKUP
help
This enables support for systems based on the Atmel AT91RM9200,
AT91SAM9 and AT91CAP9 processors.
visible impact on the overall performance or power consumption of the
processor.
+config ARM_ERRATA_751472
+ bool "ARM errata: Interrupted ICIALLUIS may prevent completion of broadcasted operation"
+ depends on CPU_V7 && SMP
+ help
+ This option enables the workaround for the 751472 Cortex-A9 (prior
+ to r3p0) erratum. An interrupted ICIALLUIS operation may prevent the
+ completion of a following broadcasted operation if the second
+ operation is received by a CPU before the ICIALLUIS has completed,
+ potentially leading to corrupted entries in the cache or TLB.
+
+config ARM_ERRATA_753970
+ bool "ARM errata: cache sync operation may be faulty"
+ depends on CACHE_PL310
+ help
+ This option enables the workaround for the 753970 PL310 (r3p0) erratum.
+
+ Under some condition the effect of cache sync operation on
+ the store buffer still remains when the operation completes.
+ This means that the store buffer is always asked to drain and
+ this prevents it from merging any further writes. The workaround
+ is to replace the normal offset of cache sync operation (0x730)
+ by another offset targeting an unmapped PL310 register 0x740.
+ This has the same effect as the cache sync operation: store buffer
+ drain and waiting for all buffers empty.
+
+config ARM_ERRATA_754322
+ bool "ARM errata: possible faulty MMU translations following an ASID switch"
+ depends on CPU_V7
+ help
+ This option enables the workaround for the 754322 Cortex-A9 (r2p*,
+ r3p*) erratum. A speculative memory access may cause a page table walk
+ which starts prior to an ASID switch but completes afterwards. This
+ can populate the micro-TLB with a stale entry which may be hit with
+ the new ASID. This workaround places two dsb instructions in the mm
+ switching code so that no page table walks can cross the ASID switch.
+
+config ARM_ERRATA_754327
+ bool "ARM errata: no automatic Store Buffer drain"
+ depends on CPU_V7 && SMP
+ help
+ This option enables the workaround for the 754327 Cortex-A9 (prior to
+ r2p0) erratum. The Store Buffer does not have any automatic draining
+ mechanism and therefore a livelock may occur if an external agent
+ continuously polls a memory location waiting to observe an update.
+ This workaround defines cpu_relax() as smp_mb(), preventing correctly
+ written polling loops from denying visibility of updates to memory.
+
+++++config ARM_ERRATA_364296
+++++ bool "ARM errata: Possible cache data corruption with hit-under-miss enabled"
+++++ depends on CPU_V6 && !SMP
+++++ help
+++++ This options enables the workaround for the 364296 ARM1136
+++++ r0p2 erratum (possible cache data corruption with
+++++ hit-under-miss enabled). It sets the undocumented bit 31 in
+++++ the auxiliary control register and the FI bit in the control
+++++ register, thus disabling hit-under-miss without putting the
+++++ processor into full low interrupt latency mode. ARM11MPCore
+++++ is not affected.
+++++
endmenu
source "arch/arm/common/Kconfig"
If you don't know what to do here, say Y.
++++ ++config ARM_CPU_TOPOLOGY
++++ ++ bool "Support cpu topology definition"
++++ ++ depends on SMP && CPU_V7
++++ ++ default y
++++ ++ help
++++ ++ Support ARM cpu topology definition. The MPIDR register defines
++++ ++ affinity between processors which is then used to describe the cpu
++++ ++ topology of an ARM System.
++++ ++
++++ ++config SCHED_MC
++++ ++ bool "Multi-core scheduler support"
++++ ++ depends on ARM_CPU_TOPOLOGY
++++ ++ help
++++ ++ Multi-core scheduler support improves the CPU scheduler's decision
++++ ++ making when dealing with multi-core CPU chips at a cost of slightly
++++ ++ increased overhead in some places. If unsure say N here.
++++ ++
++++ ++config SCHED_SMT
++++ ++ bool "SMT scheduler support"
++++ ++ depends on ARM_CPU_TOPOLOGY
++++ ++ help
++++ ++ Improves the CPU scheduler's decision making when dealing with
++++ ++ MultiThreading at a cost of slightly increased overhead in some
++++ ++ places. If unsure say N here.
++++ ++
config HAVE_ARM_SCU
bool
- depends on SMP
help
This option enables support for the ARM system coherency unit
#define L2X0_CACHE_ID_PART_MASK (0xf << 6)
#define L2X0_CACHE_ID_PART_L210 (1 << 6)
#define L2X0_CACHE_ID_PART_L310 (3 << 6)
-#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x3 << 17)
+
+#define L2X0_AUX_CTRL_MASK 0xc0000fff
++++ ++#define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0
++++ ++#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK 0x7
++++ ++#define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT 3
++++ ++#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (0x7 << 3)
++++ ++#define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT 6
++++ ++#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (0x7 << 6)
++++ ++#define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT 9
++++ ++#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (0x7 << 9)
+#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16
+#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17
---- #define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x3 << 17)
+++++#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17)
+#define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22
+#define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26
+#define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27
+#define L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT 28
+#define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT 29
+#define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30
+
++++ ++#define L2X0_LATENCY_CTRL_SETUP_SHIFT 0
++++ ++#define L2X0_LATENCY_CTRL_RD_SHIFT 4
++++ ++#define L2X0_LATENCY_CTRL_WR_SHIFT 8
++++ ++
++++ ++#define L2X0_ADDR_FILTER_EN 1
++++ +
#ifndef __ASSEMBLY__
extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask);
++++ ++extern int l2x0_of_init(__u32 aux_val, __u32 aux_mask);
#endif
#endif
static inline void
tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
- if (!tlb->fullmm && tlb->range_end > 0)
- flush_tlb_range(vma, tlb->range_start, tlb->range_end);
+ if (!tlb->fullmm)
+ tlb_flush(tlb);
+}
+
+static inline int __tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+ if (tlb_fast_mode(tlb)) {
+ free_page_and_swap_cache(page);
+ return 1; /* avoid calling tlb_flush_mmu */
+ }
+
+ tlb->pages[tlb->nr++] = page;
+ VM_BUG_ON(tlb->nr > tlb->max);
+ return tlb->max - tlb->nr;
+}
+
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+ if (!__tlb_remove_page(tlb, page))
+ tlb_flush_mmu(tlb);
+}
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
+ unsigned long addr)
+{
+ pgtable_page_dtor(pte);
+ tlb_add_flush(tlb, addr);
+ tlb_remove_page(tlb, pte);
}
-#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
-#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
+#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
++++++ #define pud_free_tlb(tlb, pudp, addr) pud_free((tlb)->mm, pudp)
#define tlb_migrate_finish(mm) do { } while (0)
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/smsc911x.h>
+#include <linux/usb/msm_hsusb.h>
+#include <linux/clkdev.h>
+++++ +#include <linux/memblock.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
+#include <asm/memory.h>
#include <asm/setup.h>
-- ----#include <mach/gpio.h>
#include <mach/board.h>
-#include <mach/memory.h>
#include <mach/msm_iomap.h>
#include <mach/dma.h>
extern struct sys_timer msm_timer;
+++++ +static void __init msm7x30_fixup(struct machine_desc *desc, struct tag *tag,
+++++ + char **cmdline, struct meminfo *mi)
+++++ +{
+++++ + for (; tag->hdr.size; tag = tag_next(tag))
+++++ + if (tag->hdr.tag == ATAG_MEM && tag->u.mem.start == 0x200000) {
+++++ + tag->u.mem.start = 0;
+++++ + tag->u.mem.size += SZ_2M;
+++++ + }
+++++ +}
+++++ +
+++++ +static void __init msm7x30_reserve(void)
+++++ +{
+++++ + memblock_remove(0x0, SZ_2M);
+++++ +}
+++++ +
+static int hsusb_phy_init_seq[] = {
+ 0x30, 0x32, /* Enable and set Pre-Emphasis Depth to 20% */
+ 0x02, 0x36, /* Disable CDR Auto Reset feature */
+ -1
+};
+
+static struct msm_otg_platform_data msm_otg_pdata = {
+ .phy_init_seq = hsusb_phy_init_seq,
+ .mode = USB_PERIPHERAL,
+ .otg_control = OTG_PHY_CONTROL,
+};
+
+struct msm_gpiomux_config msm_gpiomux_configs[GPIOMUX_NGPIOS] = {
+#ifdef CONFIG_SERIAL_MSM_CONSOLE
+ [49] = { /* UART2 RFR */
+ .suspended = GPIOMUX_DRV_2MA | GPIOMUX_PULL_DOWN |
+ GPIOMUX_FUNC_2 | GPIOMUX_VALID,
+ },
+ [50] = { /* UART2 CTS */
+ .suspended = GPIOMUX_DRV_2MA | GPIOMUX_PULL_DOWN |
+ GPIOMUX_FUNC_2 | GPIOMUX_VALID,
+ },
+ [51] = { /* UART2 RX */
+ .suspended = GPIOMUX_DRV_2MA | GPIOMUX_PULL_DOWN |
+ GPIOMUX_FUNC_2 | GPIOMUX_VALID,
+ },
+ [52] = { /* UART2 TX */
+ .suspended = GPIOMUX_DRV_2MA | GPIOMUX_PULL_DOWN |
+ GPIOMUX_FUNC_2 | GPIOMUX_VALID,
+ },
+#endif
+};
+
static struct platform_device *devices[] __initdata = {
#if defined(CONFIG_SERIAL_MSM) || defined(CONFIG_MSM_SERIAL_DEBUGGER)
&msm_device_uart2,
}
MACHINE_START(MSM7X30_SURF, "QCT MSM7X30 SURF")
-#ifdef CONFIG_MSM_DEBUG_UART
-#endif
- .boot_params = PHYS_OFFSET + 0x100,
+ .boot_params = PLAT_PHYS_OFFSET + 0x100,
+++++ + .fixup = msm7x30_fixup,
+++++ + .reserve = msm7x30_reserve,
.map_io = msm7x30_map_io,
.init_irq = msm7x30_init_irq,
.init_machine = msm7x30_init,
MACHINE_END
MACHINE_START(MSM7X30_FFA, "QCT MSM7X30 FFA")
-#ifdef CONFIG_MSM_DEBUG_UART
-#endif
- .boot_params = PHYS_OFFSET + 0x100,
+ .boot_params = PLAT_PHYS_OFFSET + 0x100,
+++++ + .fixup = msm7x30_fixup,
+++++ + .reserve = msm7x30_reserve,
.map_io = msm7x30_map_io,
.init_irq = msm7x30_init_irq,
.init_machine = msm7x30_init,
MACHINE_END
MACHINE_START(MSM7X30_FLUID, "QCT MSM7X30 FLUID")
-#ifdef CONFIG_MSM_DEBUG_UART
-#endif
- .boot_params = PHYS_OFFSET + 0x100,
+ .boot_params = PLAT_PHYS_OFFSET + 0x100,
+++++ + .fixup = msm7x30_fixup,
+++++ + .reserve = msm7x30_reserve,
.map_io = msm7x30_map_io,
.init_irq = msm7x30_init_irq,
.init_machine = msm7x30_init,