- prefetch-instr : Instruction prefetch. Value: <0> (forcibly disable),
<1> (forcibly enable), property absent (retain settings set by
firmware)
+- arm,dynamic-clock-gating : L2 dynamic clock gating. Value: <0> (forcibly
+ disable), <1> (forcibly enable), property absent (OS specific behavior,
+ preferrably retain firmware settings)
+- arm,standby-mode: L2 standby mode enable. Value <0> (forcibly disable),
+ <1> (forcibly enable), property absent (OS specific behavior,
+ preferrably retain firmware settings)
Example:
crashkernel=<range1>:<size1>[,<range2>:<size2>,...][@offset]
range=start-[end]
-Please note, on arm, the offset is required.
- crashkernel=<range1>:<size1>[,<range2>:<size2>,...]@offset
- range=start-[end]
-
- 'start' is inclusive and 'end' is exclusive.
-
For example:
crashkernel=512M-2G:64M,2G-:128M
on the memory consumption of the kdump system. In general this is not
dependent on the memory size of the production system.
- On arm, use "crashkernel=Y@X". Note that the start address of the kernel
- will be aligned to 128MiB (0x08000000), so if the start address is not then
- any space below the alignment point may be overwritten by the dump-capture kernel,
- which means it is possible that the vmcore is not that precise as expected.
+ On arm, the use of "crashkernel=Y@X" is no longer necessary; the
+ kernel will automatically locate the crash kernel image within the
+ first 512MB of RAM if X is not given.
Load the Dump-capture Kernel
for broken drivers that don't call it.
skip_isa_align [X86] do not align io start addr, so can
handle more pci cards
- firmware [ARM] Do not re-enumerate the bus but instead
- just use the configuration from the
- bootloader. This is currently used on
- IXP2000 systems where the bus has to be
- configured a certain way for adjunct CPUs.
noearly [X86] Don't do any early type 1 scanning.
This might help on some broken boards which
machine check when some devices' config space
$(call if_changed,objcopy)
@$(kecho) ' Kernel: $@ is ready'
-PHONY += initrd
+PHONY += initrd install zinstall uinstall
initrd:
@test "$(INITRD_PHYS)" != "" || \
(echo This machine does not support INITRD; exit -1)
static inline void dma_mark_clean(void *addr, size_t size) { }
-extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
-
/**
* arm_dma_alloc - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
#define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x)))
#define pfn_to_kaddr(pfn) __va((phys_addr_t)(pfn) << PAGE_SHIFT)
-extern unsigned long (*arch_virt_to_idmap)(unsigned long x);
+extern long long arch_phys_to_idmap_offset;
/*
- * These are for systems that have a hardware interconnect supported alias of
- * physical memory for idmap purposes. Most cases should leave these
+ * These are for systems that have a hardware interconnect supported alias
+ * of physical memory for idmap purposes. Most cases should leave these
* untouched. Note: this can only return addresses less than 4GiB.
*/
+static inline bool arm_has_idmap_alias(void)
+{
+ return IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset != 0;
+}
+
+#define IDMAP_INVALID_ADDR ((u32)~0)
+
+static inline unsigned long phys_to_idmap(phys_addr_t addr)
+{
+ if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset) {
+ addr += arch_phys_to_idmap_offset;
+ if (addr > (u32)~0)
+ addr = IDMAP_INVALID_ADDR;
+ }
+ return addr;
+}
+
+static inline phys_addr_t idmap_to_phys(unsigned long idmap)
+{
+ phys_addr_t addr = idmap;
+
+ if (IS_ENABLED(CONFIG_MMU) && arch_phys_to_idmap_offset)
+ addr -= arch_phys_to_idmap_offset;
+
+ return addr;
+}
+
static inline unsigned long __virt_to_idmap(unsigned long x)
{
- if (IS_ENABLED(CONFIG_MMU) && arch_virt_to_idmap)
- return arch_virt_to_idmap(x);
- else
- return __virt_to_phys(x);
+ return phys_to_idmap(__virt_to_phys(x));
}
#define virt_to_idmap(x) __virt_to_idmap((unsigned long)(x))
if (!strcmp(str, "debug")) {
debug_pci = 1;
return NULL;
- } else if (!strcmp(str, "firmware")) {
- pci_add_flags(PCI_PROBE_ONLY);
- return NULL;
}
return str;
}
{
local_irq_disable();
smp_send_stop();
-
- local_irq_disable();
while (1);
}
/* Whoops - the platform was unable to reboot. Tell the user! */
printk("Reboot failed -- System halted\n");
- local_irq_disable();
while (1);
}
late_initcall(init_machine_late);
#ifdef CONFIG_KEXEC
+/*
+ * The crash region must be aligned to 128MB to avoid
+ * zImage relocating below the reserved region.
+ */
+#define CRASH_ALIGN (128 << 20)
+
static inline unsigned long long get_total_mem(void)
{
unsigned long total;
if (ret)
return;
+ if (crash_base <= 0) {
+ unsigned long long crash_max = idmap_to_phys((u32)~0);
+ crash_base = memblock_find_in_range(CRASH_ALIGN, crash_max,
+ crash_size, CRASH_ALIGN);
+ if (!crash_base) {
+ pr_err("crashkernel reservation failed - No suitable area found.\n");
+ return;
+ }
+ } else {
+ unsigned long long start;
+
+ start = memblock_find_in_range(crash_base,
+ crash_base + crash_size,
+ crash_size, SECTION_SIZE);
+ if (start != crash_base) {
+ pr_err("crashkernel reservation failed - memory is in use.\n");
+ return;
+ }
+ }
+
ret = memblock_reserve(crash_base, crash_size);
if (ret < 0) {
pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
-static unsigned long keystone_virt_to_idmap(unsigned long x)
-{
- return (phys_addr_t)(x) - CONFIG_PAGE_OFFSET + KEYSTONE_LOW_PHYS_START;
-}
-
static long long __init keystone_pv_fixup(void)
{
long long offset;
offset = KEYSTONE_HIGH_PHYS_START - KEYSTONE_LOW_PHYS_START;
/* Populate the arch idmap hook */
- arch_virt_to_idmap = keystone_virt_to_idmap;
+ arch_phys_to_idmap_offset = -offset;
return offset;
}
aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP);
}
- /* r3p0 or later has power control register */
- if (rev >= L310_CACHE_ID_RTL_R3P0)
- l2x0_saved_regs.pwr_ctrl = L310_DYNAMIC_CLK_GATING_EN |
- L310_STNDBY_MODE_EN;
-
/*
* Always enable non-secure access to the lockdown registers -
* we write to them as part of the L2C enable sequence so they
u32 filter[2] = { 0, 0 };
u32 assoc;
u32 prefetch;
+ u32 power;
u32 val;
int ret;
}
l2x0_saved_regs.prefetch_ctrl = prefetch;
+
+ power = l2x0_saved_regs.pwr_ctrl |
+ L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN;
+
+ ret = of_property_read_u32(np, "arm,dynamic-clock-gating", &val);
+ if (!ret) {
+ if (!val)
+ power &= ~L310_DYNAMIC_CLK_GATING_EN;
+ } else if (ret != -EINVAL) {
+ pr_err("L2C-310 OF dynamic-clock-gating property value is missing or invalid\n");
+ }
+ ret = of_property_read_u32(np, "arm,standby-mode", &val);
+ if (!ret) {
+ if (!val)
+ power &= ~L310_STNDBY_MODE_EN;
+ } else if (ret != -EINVAL) {
+ pr_err("L2C-310 OF standby-mode property value is missing or invalid\n");
+ }
+
+ l2x0_saved_regs.pwr_ctrl = power;
}
static const struct l2c_init_data of_l2c310_data __initconst = {
void __iomem *ctrl_base;
void __iomem *rev_base;
void __iomem *op_base;
+ void __iomem *way_ctrl_base;
u32 way_present_mask;
u32 way_locked_mask;
u32 nsets;
struct uniphier_cache_data *data,
u32 way_mask)
{
+ unsigned int cpu;
+
data->way_locked_mask = way_mask & data->way_present_mask;
- writel_relaxed(~data->way_locked_mask & data->way_present_mask,
- data->ctrl_base + UNIPHIER_SSCLPDAWCR);
+ for_each_possible_cpu(cpu)
+ writel_relaxed(~data->way_locked_mask & data->way_present_mask,
+ data->way_ctrl_base + 4 * cpu);
}
static void uniphier_cache_maint_range(unsigned long start, unsigned long end,
goto err;
}
+ data->way_ctrl_base = data->ctrl_base + 0xc00;
+
if (*cache_level == 2) {
u32 revision = readl(data->rev_base + UNIPHIER_SSCID);
/*
*/
if (revision <= 0x16)
data->range_op_max_size = (u32)1 << 22;
+
+ /*
+ * Unfortunatly, the offset address of active way control base
+ * varies from SoC to SoC.
+ */
+ switch (revision) {
+ case 0x11: /* sLD3 */
+ data->way_ctrl_base = data->ctrl_base + 0x870;
+ break;
+ case 0x12: /* LD4 */
+ case 0x16: /* sld8 */
+ data->way_ctrl_base = data->ctrl_base + 0x840;
+ break;
+ default:
+ break;
+ }
}
data->range_op_max_size -= data->line_size;
.sync_single_for_device = arm_dma_sync_single_for_device,
.sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
.sync_sg_for_device = arm_dma_sync_sg_for_device,
- .set_dma_mask = arm_dma_set_mask,
};
EXPORT_SYMBOL(arm_dma_ops);
.get_sgtable = arm_dma_get_sgtable,
.map_page = arm_coherent_dma_map_page,
.map_sg = arm_dma_map_sg,
- .set_dma_mask = arm_dma_set_mask,
};
EXPORT_SYMBOL(arm_coherent_dma_ops);
}
EXPORT_SYMBOL(dma_supported);
-int arm_dma_set_mask(struct device *dev, u64 dma_mask)
-{
- if (!dev->dma_mask || !dma_supported(dev, dma_mask))
- return -EIO;
-
- *dev->dma_mask = dma_mask;
-
- return 0;
-}
-
#define PREALLOC_DMA_DEBUG_ENTRIES 4096
static int __init dma_debug_do_init(void)
.unmap_sg = arm_iommu_unmap_sg,
.sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
.sync_sg_for_device = arm_iommu_sync_sg_for_device,
-
- .set_dma_mask = arm_dma_set_mask,
};
struct dma_map_ops iommu_coherent_ops = {
.map_sg = arm_coherent_iommu_map_sg,
.unmap_sg = arm_coherent_iommu_unmap_sg,
-
- .set_dma_mask = arm_dma_set_mask,
};
/**
* page tables.
*/
pgd_t *idmap_pgd;
-unsigned long (*arch_virt_to_idmap)(unsigned long x);
+long long arch_phys_to_idmap_offset;
#ifdef CONFIG_ARM_LPAE
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
# Copyright (C) 2001 Russell King
#
-include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types
- @$(kecho) ' Generating $@'
- @mkdir -p $(dir $@)
- $(Q)$(AWK) -f $^ > $@ || { rm -f $@; /bin/false; }
+quiet_cmd_gen_mach = GEN $@
+ cmd_gen_mach = mkdir -p $(dir $@) && \
+ $(AWK) -f $(filter-out $(PHONY),$^) > $@ || \
+ { rm -f $@; /bin/false; }
+
+include/generated/mach-types.h: $(src)/gen-mach-types $(src)/mach-types FORCE
+ $(call if_changed,gen_mach)
kfree(d);
}
-/**
- * amba_device_add - add a previously allocated AMBA device structure
- * @dev: AMBA device allocated by amba_device_alloc
- * @parent: resource parent for this devices resources
- *
- * Claim the resource, and read the device cell ID if not already
- * initialized. Register the AMBA device with the Linux device
- * manager.
- */
-int amba_device_add(struct amba_device *dev, struct resource *parent)
+static int amba_device_try_add(struct amba_device *dev, struct resource *parent)
{
u32 size;
void __iomem *tmp;
goto err_release;
}
+ ret = dev_pm_domain_attach(&dev->dev, true);
+ if (ret == -EPROBE_DEFER) {
+ iounmap(tmp);
+ goto err_release;
+ }
+
ret = amba_get_enable_pclk(dev);
if (ret == 0) {
u32 pid, cid;
}
iounmap(tmp);
+ dev_pm_domain_detach(&dev->dev, true);
if (ret)
goto err_release;
err_out:
return ret;
}
+
+/*
+ * Registration of AMBA device require reading its pid and cid registers.
+ * To do this, the device must be turned on (if it is a part of power domain)
+ * and have clocks enabled. However in some cases those resources might not be
+ * yet available. Returning EPROBE_DEFER is not a solution in such case,
+ * because callers don't handle this special error code. Instead such devices
+ * are added to the special list and their registration is retried from
+ * periodic worker, until all resources are available and registration succeeds.
+ */
+struct deferred_device {
+ struct amba_device *dev;
+ struct resource *parent;
+ struct list_head node;
+};
+
+static LIST_HEAD(deferred_devices);
+static DEFINE_MUTEX(deferred_devices_lock);
+
+static void amba_deferred_retry_func(struct work_struct *dummy);
+static DECLARE_DELAYED_WORK(deferred_retry_work, amba_deferred_retry_func);
+
+#define DEFERRED_DEVICE_TIMEOUT (msecs_to_jiffies(5 * 1000))
+
+static void amba_deferred_retry_func(struct work_struct *dummy)
+{
+ struct deferred_device *ddev, *tmp;
+
+ mutex_lock(&deferred_devices_lock);
+
+ list_for_each_entry_safe(ddev, tmp, &deferred_devices, node) {
+ int ret = amba_device_try_add(ddev->dev, ddev->parent);
+
+ if (ret == -EPROBE_DEFER)
+ continue;
+
+ list_del_init(&ddev->node);
+ kfree(ddev);
+ }
+
+ if (!list_empty(&deferred_devices))
+ schedule_delayed_work(&deferred_retry_work,
+ DEFERRED_DEVICE_TIMEOUT);
+
+ mutex_unlock(&deferred_devices_lock);
+}
+
+/**
+ * amba_device_add - add a previously allocated AMBA device structure
+ * @dev: AMBA device allocated by amba_device_alloc
+ * @parent: resource parent for this devices resources
+ *
+ * Claim the resource, and read the device cell ID if not already
+ * initialized. Register the AMBA device with the Linux device
+ * manager.
+ */
+int amba_device_add(struct amba_device *dev, struct resource *parent)
+{
+ int ret = amba_device_try_add(dev, parent);
+
+ if (ret == -EPROBE_DEFER) {
+ struct deferred_device *ddev;
+
+ ddev = kmalloc(sizeof(*ddev), GFP_KERNEL);
+ if (!ddev)
+ return -ENOMEM;
+
+ ddev->dev = dev;
+ ddev->parent = parent;
+ ret = 0;
+
+ mutex_lock(&deferred_devices_lock);
+
+ if (list_empty(&deferred_devices))
+ schedule_delayed_work(&deferred_retry_work,
+ DEFERRED_DEVICE_TIMEOUT);
+ list_add_tail(&ddev->node, &deferred_devices);
+
+ mutex_unlock(&deferred_devices_lock);
+ }
+ return ret;
+}
EXPORT_SYMBOL_GPL(amba_device_add);
static struct amba_device *
static int all_symbols = 0;
static int absolute_percpu = 0;
static char symbol_prefix_char = '\0';
-static unsigned long long kernel_start_addr = 0;
static int base_relative = 0;
int token_profit[0x10000];
static char *special_suffixes[] = {
"_veneer", /* arm */
+ "_from_arm", /* arm */
+ "_from_thumb", /* arm */
NULL };
int i;
char *sym_name = (char *)s->sym + 1;
-
- if (s->addr < kernel_start_addr)
- return 0;
-
/* skip prefix char */
if (symbol_prefix_char && *sym_name == symbol_prefix_char)
sym_name++;
if ((*p == '"' && *(p+2) == '"') || (*p == '\'' && *(p+2) == '\''))
p++;
symbol_prefix_char = *p;
- } else if (strncmp(argv[i], "--page-offset=", 14) == 0) {
- const char *p = &argv[i][14];
- kernel_start_addr = strtoull(p, NULL, 16);
} else if (strcmp(argv[i], "--base-relative") == 0)
base_relative = 1;
else
kallsymopt="${kallsymopt} --all-symbols"
fi
- if [ -n "${CONFIG_ARM}" ] && [ -z "${CONFIG_XIP_KERNEL}" ] && [ -n "${CONFIG_PAGE_OFFSET}" ]; then
- kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
- fi
-
if [ -n "${CONFIG_KALLSYMS_ABSOLUTE_PERCPU}" ]; then
kallsymopt="${kallsymopt} --absolute-percpu"
fi