From: Matt Redfearn Date: Tue, 20 Sep 2016 08:47:26 +0000 (+0100) Subject: MIPS: smp.c: Introduce mechanism for freeing and allocating IPIs X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=7688c5391038e60377275f078e6d7043dc115efc;p=linux-beck.git MIPS: smp.c: Introduce mechanism for freeing and allocating IPIs For the MIPS remote processor implementation, we need additional IPIs to talk to the remote processor. Since MIPS GIC reserves exactly the right number of IPI IRQs required by Linux for the number of VPs in the system, this is not possible without releasing some recources. This commit introduces mips_smp_ipi_allocate() which allocates IPIs to a given cpumask. It is called as normal with the cpu_possible_mask at bootup to initialise IPIs to all CPUs. mips_smp_ipi_free() may then be used to free IPIs to a subset of those CPUs so that their hardware resources can be reused. Signed-off-by: Matt Redfearn Cc: Bjorn Andersson Cc: Ohad Ben-Cohen Cc: Thomas Gleixner Cc: Lisa Parratt Cc: James Hogan Cc: Qais Yousef Cc: Paul Burton Cc: linux-mips@linux-mips.org Cc: linux-remoteproc@vger.kernel.org Cc: lisa.parratt@imgtec.com Cc: linux-kernel@vger.kernel.org Patchwork: https://patchwork.linux-mips.org/patch/14285/ Signed-off-by: Ralf Baechle --- diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index 8bc6c70a4030..060f23ff1817 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -85,6 +85,20 @@ static inline void __cpu_die(unsigned int cpu) extern void play_dead(void); #endif +/* + * This function will set up the necessary IPIs for Linux to communicate + * with the CPUs in mask. + * Return 0 on success. + */ +int mips_smp_ipi_allocate(const struct cpumask *mask); + +/* + * This function will free up IPIs allocated with mips_smp_ipi_allocate to the + * CPUs in mask, which must be a subset of the IPIs that have been configured. + * Return 0 on success. + */ +int mips_smp_ipi_free(const struct cpumask *mask); + static inline void arch_send_call_function_single_ipi(int cpu) { extern struct plat_smp_ops *mp_ops; /* private */ diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index cb02df215365..0e131c9c39f6 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c @@ -231,7 +231,7 @@ static struct irqaction irq_call = { .name = "IPI call" }; -static __init void smp_ipi_init_one(unsigned int virq, +static void smp_ipi_init_one(unsigned int virq, struct irqaction *action) { int ret; @@ -241,9 +241,11 @@ static __init void smp_ipi_init_one(unsigned int virq, BUG_ON(ret); } -static int __init mips_smp_ipi_init(void) +static unsigned int call_virq, sched_virq; + +int mips_smp_ipi_allocate(const struct cpumask *mask) { - unsigned int call_virq, sched_virq; + int virq; struct irq_domain *ipidomain; struct device_node *node; @@ -270,16 +272,20 @@ static int __init mips_smp_ipi_init(void) if (!ipidomain) return 0; - call_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask); - BUG_ON(!call_virq); + virq = irq_reserve_ipi(ipidomain, mask); + BUG_ON(!virq); + if (!call_virq) + call_virq = virq; - sched_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask); - BUG_ON(!sched_virq); + virq = irq_reserve_ipi(ipidomain, mask); + BUG_ON(!virq); + if (!sched_virq) + sched_virq = virq; if (irq_domain_is_ipi_per_cpu(ipidomain)) { int cpu; - for_each_cpu(cpu, cpu_possible_mask) { + for_each_cpu(cpu, mask) { smp_ipi_init_one(call_virq + cpu, &irq_call); smp_ipi_init_one(sched_virq + cpu, &irq_resched); } @@ -288,6 +294,45 @@ static int __init mips_smp_ipi_init(void) smp_ipi_init_one(sched_virq, &irq_resched); } + return 0; +} + +int mips_smp_ipi_free(const struct cpumask *mask) +{ + struct irq_domain *ipidomain; + struct device_node *node; + + node = of_irq_find_parent(of_root); + ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI); + + /* + * Some platforms have half DT setup. So if we found irq node but + * didn't find an ipidomain, try to search for one that is not in the + * DT. + */ + if (node && !ipidomain) + ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI); + + BUG_ON(!ipidomain); + + if (irq_domain_is_ipi_per_cpu(ipidomain)) { + int cpu; + + for_each_cpu(cpu, mask) { + remove_irq(call_virq + cpu, &irq_call); + remove_irq(sched_virq + cpu, &irq_resched); + } + } + irq_destroy_ipi(call_virq, mask); + irq_destroy_ipi(sched_virq, mask); + return 0; +} + + +static int __init mips_smp_ipi_init(void) +{ + mips_smp_ipi_allocate(cpu_possible_mask); + call_desc = irq_to_desc(call_virq); sched_desc = irq_to_desc(sched_virq);