]> git.karo-electronics.de Git - karo-tx-linux.git/blob - arch/mips/kernel/smp.c
scsi: qedi: Fix return code in qedi_ep_connect()
[karo-tx-linux.git] / arch / mips / kernel / smp.c
1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA.
15  *
16  * Copyright (C) 2000, 2001 Kanoj Sarcar
17  * Copyright (C) 2000, 2001 Ralf Baechle
18  * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
19  * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
20  */
21 #include <linux/cache.h>
22 #include <linux/delay.h>
23 #include <linux/init.h>
24 #include <linux/interrupt.h>
25 #include <linux/smp.h>
26 #include <linux/spinlock.h>
27 #include <linux/threads.h>
28 #include <linux/export.h>
29 #include <linux/time.h>
30 #include <linux/timex.h>
31 #include <linux/sched/mm.h>
32 #include <linux/cpumask.h>
33 #include <linux/cpu.h>
34 #include <linux/err.h>
35 #include <linux/ftrace.h>
36 #include <linux/irqdomain.h>
37 #include <linux/of.h>
38 #include <linux/of_irq.h>
39
40 #include <linux/atomic.h>
41 #include <asm/cpu.h>
42 #include <asm/processor.h>
43 #include <asm/idle.h>
44 #include <asm/r4k-timer.h>
45 #include <asm/mips-cpc.h>
46 #include <asm/mmu_context.h>
47 #include <asm/time.h>
48 #include <asm/setup.h>
49 #include <asm/maar.h>
50
51 int __cpu_number_map[NR_CPUS];          /* Map physical to logical */
52 EXPORT_SYMBOL(__cpu_number_map);
53
54 int __cpu_logical_map[NR_CPUS];         /* Map logical to physical */
55 EXPORT_SYMBOL(__cpu_logical_map);
56
57 /* Number of TCs (or siblings in Intel speak) per CPU core */
58 int smp_num_siblings = 1;
59 EXPORT_SYMBOL(smp_num_siblings);
60
61 /* representing the TCs (or siblings in Intel speak) of each logical CPU */
62 cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
63 EXPORT_SYMBOL(cpu_sibling_map);
64
65 /* representing the core map of multi-core chips of each logical CPU */
66 cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
67 EXPORT_SYMBOL(cpu_core_map);
68
69 static DECLARE_COMPLETION(cpu_running);
70
71 /*
72  * A logcal cpu mask containing only one VPE per core to
73  * reduce the number of IPIs on large MT systems.
74  */
75 cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
76 EXPORT_SYMBOL(cpu_foreign_map);
77
78 /* representing cpus for which sibling maps can be computed */
79 static cpumask_t cpu_sibling_setup_map;
80
81 /* representing cpus for which core maps can be computed */
82 static cpumask_t cpu_core_setup_map;
83
84 cpumask_t cpu_coherent_mask;
85
86 #ifdef CONFIG_GENERIC_IRQ_IPI
87 static struct irq_desc *call_desc;
88 static struct irq_desc *sched_desc;
89 #endif
90
91 static inline void set_cpu_sibling_map(int cpu)
92 {
93         int i;
94
95         cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
96
97         if (smp_num_siblings > 1) {
98                 for_each_cpu(i, &cpu_sibling_setup_map) {
99                         if (cpu_data[cpu].package == cpu_data[i].package &&
100                                     cpu_data[cpu].core == cpu_data[i].core) {
101                                 cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
102                                 cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
103                         }
104                 }
105         } else
106                 cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
107 }
108
109 static inline void set_cpu_core_map(int cpu)
110 {
111         int i;
112
113         cpumask_set_cpu(cpu, &cpu_core_setup_map);
114
115         for_each_cpu(i, &cpu_core_setup_map) {
116                 if (cpu_data[cpu].package == cpu_data[i].package) {
117                         cpumask_set_cpu(i, &cpu_core_map[cpu]);
118                         cpumask_set_cpu(cpu, &cpu_core_map[i]);
119                 }
120         }
121 }
122
123 /*
124  * Calculate a new cpu_foreign_map mask whenever a
125  * new cpu appears or disappears.
126  */
127 void calculate_cpu_foreign_map(void)
128 {
129         int i, k, core_present;
130         cpumask_t temp_foreign_map;
131
132         /* Re-calculate the mask */
133         cpumask_clear(&temp_foreign_map);
134         for_each_online_cpu(i) {
135                 core_present = 0;
136                 for_each_cpu(k, &temp_foreign_map)
137                         if (cpu_data[i].package == cpu_data[k].package &&
138                             cpu_data[i].core == cpu_data[k].core)
139                                 core_present = 1;
140                 if (!core_present)
141                         cpumask_set_cpu(i, &temp_foreign_map);
142         }
143
144         for_each_online_cpu(i)
145                 cpumask_andnot(&cpu_foreign_map[i],
146                                &temp_foreign_map, &cpu_sibling_map[i]);
147 }
148
149 struct plat_smp_ops *mp_ops;
150 EXPORT_SYMBOL(mp_ops);
151
152 void register_smp_ops(struct plat_smp_ops *ops)
153 {
154         if (mp_ops)
155                 printk(KERN_WARNING "Overriding previously set SMP ops\n");
156
157         mp_ops = ops;
158 }
159
160 #ifdef CONFIG_GENERIC_IRQ_IPI
161 void mips_smp_send_ipi_single(int cpu, unsigned int action)
162 {
163         mips_smp_send_ipi_mask(cpumask_of(cpu), action);
164 }
165
166 void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
167 {
168         unsigned long flags;
169         unsigned int core;
170         int cpu;
171
172         local_irq_save(flags);
173
174         switch (action) {
175         case SMP_CALL_FUNCTION:
176                 __ipi_send_mask(call_desc, mask);
177                 break;
178
179         case SMP_RESCHEDULE_YOURSELF:
180                 __ipi_send_mask(sched_desc, mask);
181                 break;
182
183         default:
184                 BUG();
185         }
186
187         if (mips_cpc_present()) {
188                 for_each_cpu(cpu, mask) {
189                         core = cpu_data[cpu].core;
190
191                         if (core == current_cpu_data.core)
192                                 continue;
193
194                         while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
195                                 mips_cm_lock_other(core, 0);
196                                 mips_cpc_lock_other(core);
197                                 write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
198                                 mips_cpc_unlock_other();
199                                 mips_cm_unlock_other();
200                         }
201                 }
202         }
203
204         local_irq_restore(flags);
205 }
206
207
208 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
209 {
210         scheduler_ipi();
211
212         return IRQ_HANDLED;
213 }
214
215 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
216 {
217         generic_smp_call_function_interrupt();
218
219         return IRQ_HANDLED;
220 }
221
222 static struct irqaction irq_resched = {
223         .handler        = ipi_resched_interrupt,
224         .flags          = IRQF_PERCPU,
225         .name           = "IPI resched"
226 };
227
228 static struct irqaction irq_call = {
229         .handler        = ipi_call_interrupt,
230         .flags          = IRQF_PERCPU,
231         .name           = "IPI call"
232 };
233
234 static void smp_ipi_init_one(unsigned int virq,
235                                     struct irqaction *action)
236 {
237         int ret;
238
239         irq_set_handler(virq, handle_percpu_irq);
240         ret = setup_irq(virq, action);
241         BUG_ON(ret);
242 }
243
244 static unsigned int call_virq, sched_virq;
245
246 int mips_smp_ipi_allocate(const struct cpumask *mask)
247 {
248         int virq;
249         struct irq_domain *ipidomain;
250         struct device_node *node;
251
252         node = of_irq_find_parent(of_root);
253         ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
254
255         /*
256          * Some platforms have half DT setup. So if we found irq node but
257          * didn't find an ipidomain, try to search for one that is not in the
258          * DT.
259          */
260         if (node && !ipidomain)
261                 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
262
263         /*
264          * There are systems which use IPI IRQ domains, but only have one
265          * registered when some runtime condition is met. For example a Malta
266          * kernel may include support for GIC & CPU interrupt controller IPI
267          * IRQ domains, but if run on a system with no GIC & no MT ASE then
268          * neither will be supported or registered.
269          *
270          * We only have a problem if we're actually using multiple CPUs so fail
271          * loudly if that is the case. Otherwise simply return, skipping IPI
272          * setup, if we're running with only a single CPU.
273          */
274         if (!ipidomain) {
275                 BUG_ON(num_present_cpus() > 1);
276                 return 0;
277         }
278
279         virq = irq_reserve_ipi(ipidomain, mask);
280         BUG_ON(!virq);
281         if (!call_virq)
282                 call_virq = virq;
283
284         virq = irq_reserve_ipi(ipidomain, mask);
285         BUG_ON(!virq);
286         if (!sched_virq)
287                 sched_virq = virq;
288
289         if (irq_domain_is_ipi_per_cpu(ipidomain)) {
290                 int cpu;
291
292                 for_each_cpu(cpu, mask) {
293                         smp_ipi_init_one(call_virq + cpu, &irq_call);
294                         smp_ipi_init_one(sched_virq + cpu, &irq_resched);
295                 }
296         } else {
297                 smp_ipi_init_one(call_virq, &irq_call);
298                 smp_ipi_init_one(sched_virq, &irq_resched);
299         }
300
301         return 0;
302 }
303
304 int mips_smp_ipi_free(const struct cpumask *mask)
305 {
306         struct irq_domain *ipidomain;
307         struct device_node *node;
308
309         node = of_irq_find_parent(of_root);
310         ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
311
312         /*
313          * Some platforms have half DT setup. So if we found irq node but
314          * didn't find an ipidomain, try to search for one that is not in the
315          * DT.
316          */
317         if (node && !ipidomain)
318                 ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
319
320         BUG_ON(!ipidomain);
321
322         if (irq_domain_is_ipi_per_cpu(ipidomain)) {
323                 int cpu;
324
325                 for_each_cpu(cpu, mask) {
326                         remove_irq(call_virq + cpu, &irq_call);
327                         remove_irq(sched_virq + cpu, &irq_resched);
328                 }
329         }
330         irq_destroy_ipi(call_virq, mask);
331         irq_destroy_ipi(sched_virq, mask);
332         return 0;
333 }
334
335
336 static int __init mips_smp_ipi_init(void)
337 {
338         mips_smp_ipi_allocate(cpu_possible_mask);
339
340         call_desc = irq_to_desc(call_virq);
341         sched_desc = irq_to_desc(sched_virq);
342
343         return 0;
344 }
345 early_initcall(mips_smp_ipi_init);
346 #endif
347
348 /*
349  * First C code run on the secondary CPUs after being started up by
350  * the master.
351  */
352 asmlinkage void start_secondary(void)
353 {
354         unsigned int cpu;
355
356         cpu_probe();
357         per_cpu_trap_init(false);
358         mips_clockevent_init();
359         mp_ops->init_secondary();
360         cpu_report();
361         maar_init();
362
363         /*
364          * XXX parity protection should be folded in here when it's converted
365          * to an option instead of something based on .cputype
366          */
367
368         calibrate_delay();
369         preempt_disable();
370         cpu = smp_processor_id();
371         cpu_data[cpu].udelay_val = loops_per_jiffy;
372
373         cpumask_set_cpu(cpu, &cpu_coherent_mask);
374         notify_cpu_starting(cpu);
375
376         complete(&cpu_running);
377         synchronise_count_slave(cpu);
378
379         set_cpu_online(cpu, true);
380
381         set_cpu_sibling_map(cpu);
382         set_cpu_core_map(cpu);
383
384         calculate_cpu_foreign_map();
385
386         /*
387          * irq will be enabled in ->smp_finish(), enabling it too early
388          * is dangerous.
389          */
390         WARN_ON_ONCE(!irqs_disabled());
391         mp_ops->smp_finish();
392
393         cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
394 }
395
396 static void stop_this_cpu(void *dummy)
397 {
398         /*
399          * Remove this CPU:
400          */
401
402         set_cpu_online(smp_processor_id(), false);
403         calculate_cpu_foreign_map();
404         local_irq_disable();
405         while (1);
406 }
407
408 void smp_send_stop(void)
409 {
410         smp_call_function(stop_this_cpu, NULL, 0);
411 }
412
413 void __init smp_cpus_done(unsigned int max_cpus)
414 {
415 }
416
417 /* called from main before smp_init() */
418 void __init smp_prepare_cpus(unsigned int max_cpus)
419 {
420         init_new_context(current, &init_mm);
421         current_thread_info()->cpu = 0;
422         mp_ops->prepare_cpus(max_cpus);
423         set_cpu_sibling_map(0);
424         set_cpu_core_map(0);
425         calculate_cpu_foreign_map();
426 #ifndef CONFIG_HOTPLUG_CPU
427         init_cpu_present(cpu_possible_mask);
428 #endif
429         cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
430 }
431
432 /* preload SMP state for boot cpu */
433 void smp_prepare_boot_cpu(void)
434 {
435         set_cpu_possible(0, true);
436         set_cpu_online(0, true);
437 }
438
439 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
440 {
441         mp_ops->boot_secondary(cpu, tidle);
442
443         /*
444          * We must check for timeout here, as the CPU will not be marked
445          * online until the counters are synchronised.
446          */
447         if (!wait_for_completion_timeout(&cpu_running,
448                                          msecs_to_jiffies(1000))) {
449                 pr_crit("CPU%u: failed to start\n", cpu);
450                 return -EIO;
451         }
452
453         synchronise_count_master(cpu);
454         return 0;
455 }
456
457 /* Not really SMP stuff ... */
458 int setup_profiling_timer(unsigned int multiplier)
459 {
460         return 0;
461 }
462
463 static void flush_tlb_all_ipi(void *info)
464 {
465         local_flush_tlb_all();
466 }
467
468 void flush_tlb_all(void)
469 {
470         on_each_cpu(flush_tlb_all_ipi, NULL, 1);
471 }
472
473 static void flush_tlb_mm_ipi(void *mm)
474 {
475         local_flush_tlb_mm((struct mm_struct *)mm);
476 }
477
478 /*
479  * Special Variant of smp_call_function for use by TLB functions:
480  *
481  *  o No return value
482  *  o collapses to normal function call on UP kernels
483  *  o collapses to normal function call on systems with a single shared
484  *    primary cache.
485  */
486 static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
487 {
488         smp_call_function(func, info, 1);
489 }
490
491 static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
492 {
493         preempt_disable();
494
495         smp_on_other_tlbs(func, info);
496         func(info);
497
498         preempt_enable();
499 }
500
501 /*
502  * The following tlb flush calls are invoked when old translations are
503  * being torn down, or pte attributes are changing. For single threaded
504  * address spaces, a new context is obtained on the current cpu, and tlb
505  * context on other cpus are invalidated to force a new context allocation
506  * at switch_mm time, should the mm ever be used on other cpus. For
507  * multithreaded address spaces, intercpu interrupts have to be sent.
508  * Another case where intercpu interrupts are required is when the target
509  * mm might be active on another cpu (eg debuggers doing the flushes on
510  * behalf of debugees, kswapd stealing pages from another process etc).
511  * Kanoj 07/00.
512  */
513
514 void flush_tlb_mm(struct mm_struct *mm)
515 {
516         preempt_disable();
517
518         if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
519                 smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
520         } else {
521                 unsigned int cpu;
522
523                 for_each_online_cpu(cpu) {
524                         if (cpu != smp_processor_id() && cpu_context(cpu, mm))
525                                 cpu_context(cpu, mm) = 0;
526                 }
527         }
528         local_flush_tlb_mm(mm);
529
530         preempt_enable();
531 }
532
533 struct flush_tlb_data {
534         struct vm_area_struct *vma;
535         unsigned long addr1;
536         unsigned long addr2;
537 };
538
539 static void flush_tlb_range_ipi(void *info)
540 {
541         struct flush_tlb_data *fd = info;
542
543         local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
544 }
545
546 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
547 {
548         struct mm_struct *mm = vma->vm_mm;
549
550         preempt_disable();
551         if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
552                 struct flush_tlb_data fd = {
553                         .vma = vma,
554                         .addr1 = start,
555                         .addr2 = end,
556                 };
557
558                 smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
559         } else {
560                 unsigned int cpu;
561                 int exec = vma->vm_flags & VM_EXEC;
562
563                 for_each_online_cpu(cpu) {
564                         /*
565                          * flush_cache_range() will only fully flush icache if
566                          * the VMA is executable, otherwise we must invalidate
567                          * ASID without it appearing to has_valid_asid() as if
568                          * mm has been completely unused by that CPU.
569                          */
570                         if (cpu != smp_processor_id() && cpu_context(cpu, mm))
571                                 cpu_context(cpu, mm) = !exec;
572                 }
573         }
574         local_flush_tlb_range(vma, start, end);
575         preempt_enable();
576 }
577
578 static void flush_tlb_kernel_range_ipi(void *info)
579 {
580         struct flush_tlb_data *fd = info;
581
582         local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
583 }
584
585 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
586 {
587         struct flush_tlb_data fd = {
588                 .addr1 = start,
589                 .addr2 = end,
590         };
591
592         on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
593 }
594
595 static void flush_tlb_page_ipi(void *info)
596 {
597         struct flush_tlb_data *fd = info;
598
599         local_flush_tlb_page(fd->vma, fd->addr1);
600 }
601
602 void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
603 {
604         preempt_disable();
605         if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
606                 struct flush_tlb_data fd = {
607                         .vma = vma,
608                         .addr1 = page,
609                 };
610
611                 smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
612         } else {
613                 unsigned int cpu;
614
615                 for_each_online_cpu(cpu) {
616                         /*
617                          * flush_cache_page() only does partial flushes, so
618                          * invalidate ASID without it appearing to
619                          * has_valid_asid() as if mm has been completely unused
620                          * by that CPU.
621                          */
622                         if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
623                                 cpu_context(cpu, vma->vm_mm) = 1;
624                 }
625         }
626         local_flush_tlb_page(vma, page);
627         preempt_enable();
628 }
629
630 static void flush_tlb_one_ipi(void *info)
631 {
632         unsigned long vaddr = (unsigned long) info;
633
634         local_flush_tlb_one(vaddr);
635 }
636
637 void flush_tlb_one(unsigned long vaddr)
638 {
639         smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
640 }
641
642 EXPORT_SYMBOL(flush_tlb_page);
643 EXPORT_SYMBOL(flush_tlb_one);
644
645 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
646
647 static DEFINE_PER_CPU(atomic_t, tick_broadcast_count);
648 static DEFINE_PER_CPU(struct call_single_data, tick_broadcast_csd);
649
650 void tick_broadcast(const struct cpumask *mask)
651 {
652         atomic_t *count;
653         struct call_single_data *csd;
654         int cpu;
655
656         for_each_cpu(cpu, mask) {
657                 count = &per_cpu(tick_broadcast_count, cpu);
658                 csd = &per_cpu(tick_broadcast_csd, cpu);
659
660                 if (atomic_inc_return(count) == 1)
661                         smp_call_function_single_async(cpu, csd);
662         }
663 }
664
665 static void tick_broadcast_callee(void *info)
666 {
667         int cpu = smp_processor_id();
668         tick_receive_broadcast();
669         atomic_set(&per_cpu(tick_broadcast_count, cpu), 0);
670 }
671
672 static int __init tick_broadcast_init(void)
673 {
674         struct call_single_data *csd;
675         int cpu;
676
677         for (cpu = 0; cpu < NR_CPUS; cpu++) {
678                 csd = &per_cpu(tick_broadcast_csd, cpu);
679                 csd->func = tick_broadcast_callee;
680         }
681
682         return 0;
683 }
684 early_initcall(tick_broadcast_init);
685
686 #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */