]> git.karo-electronics.de Git - linux-beck.git/blob - kernel/cpu.c
cpu/hotplug: Unpark smpboot threads from the state machine
[linux-beck.git] / kernel / cpu.c
1 /* CPU control.
2  * (C) 2001, 2002, 2003, 2004 Rusty Russell
3  *
4  * This code is licenced under the GPL.
5  */
6 #include <linux/proc_fs.h>
7 #include <linux/smp.h>
8 #include <linux/init.h>
9 #include <linux/notifier.h>
10 #include <linux/sched.h>
11 #include <linux/unistd.h>
12 #include <linux/cpu.h>
13 #include <linux/oom.h>
14 #include <linux/rcupdate.h>
15 #include <linux/export.h>
16 #include <linux/bug.h>
17 #include <linux/kthread.h>
18 #include <linux/stop_machine.h>
19 #include <linux/mutex.h>
20 #include <linux/gfp.h>
21 #include <linux/suspend.h>
22 #include <linux/lockdep.h>
23 #include <linux/tick.h>
24 #include <linux/irq.h>
25
26 #include <trace/events/power.h>
27 #define CREATE_TRACE_POINTS
28 #include <trace/events/cpuhp.h>
29
30 #include "smpboot.h"
31
32 /**
33  * cpuhp_cpu_state - Per cpu hotplug state storage
34  * @state:      The current cpu state
35  * @target:     The target state
36  */
37 struct cpuhp_cpu_state {
38         enum cpuhp_state        state;
39         enum cpuhp_state        target;
40 };
41
42 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state);
43
44 /**
45  * cpuhp_step - Hotplug state machine step
46  * @name:       Name of the step
47  * @startup:    Startup function of the step
48  * @teardown:   Teardown function of the step
49  * @skip_onerr: Do not invoke the functions on error rollback
50  *              Will go away once the notifiers are gone
51  * @cant_stop:  Bringup/teardown can't be stopped at this step
52  */
53 struct cpuhp_step {
54         const char      *name;
55         int             (*startup)(unsigned int cpu);
56         int             (*teardown)(unsigned int cpu);
57         bool            skip_onerr;
58         bool            cant_stop;
59 };
60
61 static DEFINE_MUTEX(cpuhp_state_mutex);
62 static struct cpuhp_step cpuhp_bp_states[];
63 static struct cpuhp_step cpuhp_ap_states[];
64
65 /**
66  * cpuhp_invoke_callback _ Invoke the callbacks for a given state
67  * @cpu:        The cpu for which the callback should be invoked
68  * @step:       The step in the state machine
69  * @cb:         The callback function to invoke
70  *
71  * Called from cpu hotplug and from the state register machinery
72  */
73 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state step,
74                                  int (*cb)(unsigned int))
75 {
76         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
77         int ret = 0;
78
79         if (cb) {
80                 trace_cpuhp_enter(cpu, st->target, step, cb);
81                 ret = cb(cpu);
82                 trace_cpuhp_exit(cpu, st->state, step, ret);
83         }
84         return ret;
85 }
86
87 #ifdef CONFIG_SMP
88 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
89 static DEFINE_MUTEX(cpu_add_remove_lock);
90 bool cpuhp_tasks_frozen;
91 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
92
93 /*
94  * The following two APIs (cpu_maps_update_begin/done) must be used when
95  * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
96  * The APIs cpu_notifier_register_begin/done() must be used to protect CPU
97  * hotplug callback (un)registration performed using __register_cpu_notifier()
98  * or __unregister_cpu_notifier().
99  */
100 void cpu_maps_update_begin(void)
101 {
102         mutex_lock(&cpu_add_remove_lock);
103 }
104 EXPORT_SYMBOL(cpu_notifier_register_begin);
105
106 void cpu_maps_update_done(void)
107 {
108         mutex_unlock(&cpu_add_remove_lock);
109 }
110 EXPORT_SYMBOL(cpu_notifier_register_done);
111
112 static RAW_NOTIFIER_HEAD(cpu_chain);
113
114 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
115  * Should always be manipulated under cpu_add_remove_lock
116  */
117 static int cpu_hotplug_disabled;
118
119 #ifdef CONFIG_HOTPLUG_CPU
120
121 static struct {
122         struct task_struct *active_writer;
123         /* wait queue to wake up the active_writer */
124         wait_queue_head_t wq;
125         /* verifies that no writer will get active while readers are active */
126         struct mutex lock;
127         /*
128          * Also blocks the new readers during
129          * an ongoing cpu hotplug operation.
130          */
131         atomic_t refcount;
132
133 #ifdef CONFIG_DEBUG_LOCK_ALLOC
134         struct lockdep_map dep_map;
135 #endif
136 } cpu_hotplug = {
137         .active_writer = NULL,
138         .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
139         .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
140 #ifdef CONFIG_DEBUG_LOCK_ALLOC
141         .dep_map = {.name = "cpu_hotplug.lock" },
142 #endif
143 };
144
145 /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
146 #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
147 #define cpuhp_lock_acquire_tryread() \
148                                   lock_map_acquire_tryread(&cpu_hotplug.dep_map)
149 #define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
150 #define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
151
152
153 void get_online_cpus(void)
154 {
155         might_sleep();
156         if (cpu_hotplug.active_writer == current)
157                 return;
158         cpuhp_lock_acquire_read();
159         mutex_lock(&cpu_hotplug.lock);
160         atomic_inc(&cpu_hotplug.refcount);
161         mutex_unlock(&cpu_hotplug.lock);
162 }
163 EXPORT_SYMBOL_GPL(get_online_cpus);
164
165 void put_online_cpus(void)
166 {
167         int refcount;
168
169         if (cpu_hotplug.active_writer == current)
170                 return;
171
172         refcount = atomic_dec_return(&cpu_hotplug.refcount);
173         if (WARN_ON(refcount < 0)) /* try to fix things up */
174                 atomic_inc(&cpu_hotplug.refcount);
175
176         if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
177                 wake_up(&cpu_hotplug.wq);
178
179         cpuhp_lock_release();
180
181 }
182 EXPORT_SYMBOL_GPL(put_online_cpus);
183
184 /*
185  * This ensures that the hotplug operation can begin only when the
186  * refcount goes to zero.
187  *
188  * Note that during a cpu-hotplug operation, the new readers, if any,
189  * will be blocked by the cpu_hotplug.lock
190  *
191  * Since cpu_hotplug_begin() is always called after invoking
192  * cpu_maps_update_begin(), we can be sure that only one writer is active.
193  *
194  * Note that theoretically, there is a possibility of a livelock:
195  * - Refcount goes to zero, last reader wakes up the sleeping
196  *   writer.
197  * - Last reader unlocks the cpu_hotplug.lock.
198  * - A new reader arrives at this moment, bumps up the refcount.
199  * - The writer acquires the cpu_hotplug.lock finds the refcount
200  *   non zero and goes to sleep again.
201  *
202  * However, this is very difficult to achieve in practice since
203  * get_online_cpus() not an api which is called all that often.
204  *
205  */
206 void cpu_hotplug_begin(void)
207 {
208         DEFINE_WAIT(wait);
209
210         cpu_hotplug.active_writer = current;
211         cpuhp_lock_acquire();
212
213         for (;;) {
214                 mutex_lock(&cpu_hotplug.lock);
215                 prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
216                 if (likely(!atomic_read(&cpu_hotplug.refcount)))
217                                 break;
218                 mutex_unlock(&cpu_hotplug.lock);
219                 schedule();
220         }
221         finish_wait(&cpu_hotplug.wq, &wait);
222 }
223
224 void cpu_hotplug_done(void)
225 {
226         cpu_hotplug.active_writer = NULL;
227         mutex_unlock(&cpu_hotplug.lock);
228         cpuhp_lock_release();
229 }
230
231 /*
232  * Wait for currently running CPU hotplug operations to complete (if any) and
233  * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
234  * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
235  * hotplug path before performing hotplug operations. So acquiring that lock
236  * guarantees mutual exclusion from any currently running hotplug operations.
237  */
238 void cpu_hotplug_disable(void)
239 {
240         cpu_maps_update_begin();
241         cpu_hotplug_disabled++;
242         cpu_maps_update_done();
243 }
244 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
245
246 void cpu_hotplug_enable(void)
247 {
248         cpu_maps_update_begin();
249         WARN_ON(--cpu_hotplug_disabled < 0);
250         cpu_maps_update_done();
251 }
252 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
253 #endif  /* CONFIG_HOTPLUG_CPU */
254
255 /* Need to know about CPUs going up/down? */
256 int register_cpu_notifier(struct notifier_block *nb)
257 {
258         int ret;
259         cpu_maps_update_begin();
260         ret = raw_notifier_chain_register(&cpu_chain, nb);
261         cpu_maps_update_done();
262         return ret;
263 }
264
265 int __register_cpu_notifier(struct notifier_block *nb)
266 {
267         return raw_notifier_chain_register(&cpu_chain, nb);
268 }
269
270 static int __cpu_notify(unsigned long val, unsigned int cpu, int nr_to_call,
271                         int *nr_calls)
272 {
273         unsigned long mod = cpuhp_tasks_frozen ? CPU_TASKS_FROZEN : 0;
274         void *hcpu = (void *)(long)cpu;
275
276         int ret;
277
278         ret = __raw_notifier_call_chain(&cpu_chain, val | mod, hcpu, nr_to_call,
279                                         nr_calls);
280
281         return notifier_to_errno(ret);
282 }
283
284 static int cpu_notify(unsigned long val, unsigned int cpu)
285 {
286         return __cpu_notify(val, cpu, -1, NULL);
287 }
288
289 /* Notifier wrappers for transitioning to state machine */
290 static int notify_prepare(unsigned int cpu)
291 {
292         int nr_calls = 0;
293         int ret;
294
295         ret = __cpu_notify(CPU_UP_PREPARE, cpu, -1, &nr_calls);
296         if (ret) {
297                 nr_calls--;
298                 printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
299                                 __func__, cpu);
300                 __cpu_notify(CPU_UP_CANCELED, cpu, nr_calls, NULL);
301         }
302         return ret;
303 }
304
305 static int notify_online(unsigned int cpu)
306 {
307         cpu_notify(CPU_ONLINE, cpu);
308         return 0;
309 }
310
311 static int notify_starting(unsigned int cpu)
312 {
313         cpu_notify(CPU_STARTING, cpu);
314         return 0;
315 }
316
317 static int bringup_cpu(unsigned int cpu)
318 {
319         struct task_struct *idle = idle_thread_get(cpu);
320         int ret;
321
322         /* Arch-specific enabling code. */
323         ret = __cpu_up(cpu, idle);
324         if (ret) {
325                 cpu_notify(CPU_UP_CANCELED, cpu);
326                 return ret;
327         }
328         BUG_ON(!cpu_online(cpu));
329         return 0;
330 }
331
332 #ifdef CONFIG_HOTPLUG_CPU
333 EXPORT_SYMBOL(register_cpu_notifier);
334 EXPORT_SYMBOL(__register_cpu_notifier);
335
336 void unregister_cpu_notifier(struct notifier_block *nb)
337 {
338         cpu_maps_update_begin();
339         raw_notifier_chain_unregister(&cpu_chain, nb);
340         cpu_maps_update_done();
341 }
342 EXPORT_SYMBOL(unregister_cpu_notifier);
343
344 void __unregister_cpu_notifier(struct notifier_block *nb)
345 {
346         raw_notifier_chain_unregister(&cpu_chain, nb);
347 }
348 EXPORT_SYMBOL(__unregister_cpu_notifier);
349
350 /**
351  * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
352  * @cpu: a CPU id
353  *
354  * This function walks all processes, finds a valid mm struct for each one and
355  * then clears a corresponding bit in mm's cpumask.  While this all sounds
356  * trivial, there are various non-obvious corner cases, which this function
357  * tries to solve in a safe manner.
358  *
359  * Also note that the function uses a somewhat relaxed locking scheme, so it may
360  * be called only for an already offlined CPU.
361  */
362 void clear_tasks_mm_cpumask(int cpu)
363 {
364         struct task_struct *p;
365
366         /*
367          * This function is called after the cpu is taken down and marked
368          * offline, so its not like new tasks will ever get this cpu set in
369          * their mm mask. -- Peter Zijlstra
370          * Thus, we may use rcu_read_lock() here, instead of grabbing
371          * full-fledged tasklist_lock.
372          */
373         WARN_ON(cpu_online(cpu));
374         rcu_read_lock();
375         for_each_process(p) {
376                 struct task_struct *t;
377
378                 /*
379                  * Main thread might exit, but other threads may still have
380                  * a valid mm. Find one.
381                  */
382                 t = find_lock_task_mm(p);
383                 if (!t)
384                         continue;
385                 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
386                 task_unlock(t);
387         }
388         rcu_read_unlock();
389 }
390
391 static inline void check_for_tasks(int dead_cpu)
392 {
393         struct task_struct *g, *p;
394
395         read_lock(&tasklist_lock);
396         for_each_process_thread(g, p) {
397                 if (!p->on_rq)
398                         continue;
399                 /*
400                  * We do the check with unlocked task_rq(p)->lock.
401                  * Order the reading to do not warn about a task,
402                  * which was running on this cpu in the past, and
403                  * it's just been woken on another cpu.
404                  */
405                 rmb();
406                 if (task_cpu(p) != dead_cpu)
407                         continue;
408
409                 pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n",
410                         p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags);
411         }
412         read_unlock(&tasklist_lock);
413 }
414
415 static void cpu_notify_nofail(unsigned long val, unsigned int cpu)
416 {
417         BUG_ON(cpu_notify(val, cpu));
418 }
419
420 static int notify_down_prepare(unsigned int cpu)
421 {
422         int err, nr_calls = 0;
423
424         err = __cpu_notify(CPU_DOWN_PREPARE, cpu, -1, &nr_calls);
425         if (err) {
426                 nr_calls--;
427                 __cpu_notify(CPU_DOWN_FAILED, cpu, nr_calls, NULL);
428                 pr_warn("%s: attempt to take down CPU %u failed\n",
429                                 __func__, cpu);
430         }
431         return err;
432 }
433
434 static int notify_dying(unsigned int cpu)
435 {
436         cpu_notify(CPU_DYING, cpu);
437         return 0;
438 }
439
440 /* Take this CPU down. */
441 static int take_cpu_down(void *_param)
442 {
443         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
444         enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
445         int err, cpu = smp_processor_id();
446
447         /* Ensure this CPU doesn't handle any more interrupts. */
448         err = __cpu_disable();
449         if (err < 0)
450                 return err;
451
452         /* Invoke the former CPU_DYING callbacks */
453         for (; st->state > target; st->state--) {
454                 struct cpuhp_step *step = cpuhp_ap_states + st->state;
455
456                 cpuhp_invoke_callback(cpu, st->state, step->teardown);
457         }
458         /* Give up timekeeping duties */
459         tick_handover_do_timer();
460         /* Park the stopper thread */
461         stop_machine_park(cpu);
462         return 0;
463 }
464
465 static int takedown_cpu(unsigned int cpu)
466 {
467         int err;
468
469         /*
470          * By now we've cleared cpu_active_mask, wait for all preempt-disabled
471          * and RCU users of this state to go away such that all new such users
472          * will observe it.
473          *
474          * For CONFIG_PREEMPT we have preemptible RCU and its sync_rcu() might
475          * not imply sync_sched(), so wait for both.
476          *
477          * Do sync before park smpboot threads to take care the rcu boost case.
478          */
479         if (IS_ENABLED(CONFIG_PREEMPT))
480                 synchronize_rcu_mult(call_rcu, call_rcu_sched);
481         else
482                 synchronize_rcu();
483
484         /*
485          * Prevent irq alloc/free while the dying cpu reorganizes the
486          * interrupt affinities.
487          */
488         irq_lock_sparse();
489
490         /*
491          * So now all preempt/rcu users must observe !cpu_active().
492          */
493         err = stop_machine(take_cpu_down, NULL, cpumask_of(cpu));
494         if (err) {
495                 /* CPU didn't die: tell everyone.  Can't complain. */
496                 cpu_notify_nofail(CPU_DOWN_FAILED, cpu);
497                 irq_unlock_sparse();
498                 return err;
499         }
500         BUG_ON(cpu_online(cpu));
501
502         /*
503          * The migration_call() CPU_DYING callback will have removed all
504          * runnable tasks from the cpu, there's only the idle task left now
505          * that the migration thread is done doing the stop_machine thing.
506          *
507          * Wait for the stop thread to go away.
508          */
509         while (!per_cpu(cpu_dead_idle, cpu))
510                 cpu_relax();
511         smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
512         per_cpu(cpu_dead_idle, cpu) = false;
513
514         /* Interrupts are moved away from the dying cpu, reenable alloc/free */
515         irq_unlock_sparse();
516
517         hotplug_cpu__broadcast_tick_pull(cpu);
518         /* This actually kills the CPU. */
519         __cpu_die(cpu);
520
521         tick_cleanup_dead_cpu(cpu);
522         return 0;
523 }
524
525 static int notify_dead(unsigned int cpu)
526 {
527         cpu_notify_nofail(CPU_DEAD, cpu);
528         check_for_tasks(cpu);
529         return 0;
530 }
531
532 #else
533 #define notify_down_prepare     NULL
534 #define takedown_cpu            NULL
535 #define notify_dead             NULL
536 #define notify_dying            NULL
537 #endif
538
539 #ifdef CONFIG_HOTPLUG_CPU
540 static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
541 {
542         for (st->state++; st->state < st->target; st->state++) {
543                 struct cpuhp_step *step = cpuhp_bp_states + st->state;
544
545                 if (!step->skip_onerr)
546                         cpuhp_invoke_callback(cpu, st->state, step->startup);
547         }
548 }
549
550 /* Requires cpu_add_remove_lock to be held */
551 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
552                            enum cpuhp_state target)
553 {
554         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
555         int prev_state, ret = 0;
556         bool hasdied = false;
557
558         if (num_online_cpus() == 1)
559                 return -EBUSY;
560
561         if (!cpu_present(cpu))
562                 return -EINVAL;
563
564         cpu_hotplug_begin();
565
566         cpuhp_tasks_frozen = tasks_frozen;
567
568         prev_state = st->state;
569         st->target = target;
570         for (; st->state > st->target; st->state--) {
571                 struct cpuhp_step *step = cpuhp_bp_states + st->state;
572
573                 ret = cpuhp_invoke_callback(cpu, st->state, step->teardown);
574                 if (ret) {
575                         st->target = prev_state;
576                         undo_cpu_down(cpu, st);
577                         break;
578                 }
579         }
580         hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
581
582         cpu_hotplug_done();
583         /* This post dead nonsense must die */
584         if (!ret && hasdied)
585                 cpu_notify_nofail(CPU_POST_DEAD, cpu);
586         return ret;
587 }
588
589 static int do_cpu_down(unsigned int cpu, enum cpuhp_state target)
590 {
591         int err;
592
593         cpu_maps_update_begin();
594
595         if (cpu_hotplug_disabled) {
596                 err = -EBUSY;
597                 goto out;
598         }
599
600         err = _cpu_down(cpu, 0, target);
601
602 out:
603         cpu_maps_update_done();
604         return err;
605 }
606 int cpu_down(unsigned int cpu)
607 {
608         return do_cpu_down(cpu, CPUHP_OFFLINE);
609 }
610 EXPORT_SYMBOL(cpu_down);
611 #endif /*CONFIG_HOTPLUG_CPU*/
612
613 /**
614  * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
615  * @cpu: cpu that just started
616  *
617  * This function calls the cpu_chain notifiers with CPU_STARTING.
618  * It must be called by the arch code on the new cpu, before the new cpu
619  * enables interrupts and before the "boot" cpu returns from __cpu_up().
620  */
621 void notify_cpu_starting(unsigned int cpu)
622 {
623         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
624         enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
625
626         while (st->state < target) {
627                 struct cpuhp_step *step;
628
629                 st->state++;
630                 step = cpuhp_ap_states + st->state;
631                 cpuhp_invoke_callback(cpu, st->state, step->startup);
632         }
633 }
634
635 /*
636  * Called from the idle task. We need to set active here, so we can kick off
637  * the stopper thread.
638  */
639 static int cpuhp_set_cpu_active(unsigned int cpu)
640 {
641         /* The cpu is marked online, set it active now */
642         set_cpu_active(cpu, true);
643         /* Unpark the stopper thread */
644         stop_machine_unpark(cpu);
645         return 0;
646 }
647
648 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
649 {
650         for (st->state--; st->state > st->target; st->state--) {
651                 struct cpuhp_step *step = cpuhp_bp_states + st->state;
652
653                 if (!step->skip_onerr)
654                         cpuhp_invoke_callback(cpu, st->state, step->teardown);
655         }
656 }
657
658 /* Requires cpu_add_remove_lock to be held */
659 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
660 {
661         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
662         struct task_struct *idle;
663         int prev_state, ret = 0;
664
665         cpu_hotplug_begin();
666
667         if (!cpu_present(cpu)) {
668                 ret = -EINVAL;
669                 goto out;
670         }
671
672         /*
673          * The caller of do_cpu_up might have raced with another
674          * caller. Ignore it for now.
675          */
676         if (st->state >= target)
677                 goto out;
678
679         if (st->state == CPUHP_OFFLINE) {
680                 /* Let it fail before we try to bring the cpu up */
681                 idle = idle_thread_get(cpu);
682                 if (IS_ERR(idle)) {
683                         ret = PTR_ERR(idle);
684                         goto out;
685                 }
686         }
687
688         cpuhp_tasks_frozen = tasks_frozen;
689
690         prev_state = st->state;
691         st->target = target;
692         while (st->state < st->target) {
693                 struct cpuhp_step *step;
694
695                 st->state++;
696                 step = cpuhp_bp_states + st->state;
697                 ret = cpuhp_invoke_callback(cpu, st->state, step->startup);
698                 if (ret) {
699                         st->target = prev_state;
700                         undo_cpu_up(cpu, st);
701                         break;
702                 }
703         }
704 out:
705         cpu_hotplug_done();
706         return ret;
707 }
708
709 static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
710 {
711         int err = 0;
712
713         if (!cpu_possible(cpu)) {
714                 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
715                        cpu);
716 #if defined(CONFIG_IA64)
717                 pr_err("please check additional_cpus= boot parameter\n");
718 #endif
719                 return -EINVAL;
720         }
721
722         err = try_online_node(cpu_to_node(cpu));
723         if (err)
724                 return err;
725
726         cpu_maps_update_begin();
727
728         if (cpu_hotplug_disabled) {
729                 err = -EBUSY;
730                 goto out;
731         }
732
733         err = _cpu_up(cpu, 0, target);
734 out:
735         cpu_maps_update_done();
736         return err;
737 }
738
739 int cpu_up(unsigned int cpu)
740 {
741         return do_cpu_up(cpu, CPUHP_ONLINE);
742 }
743 EXPORT_SYMBOL_GPL(cpu_up);
744
745 #ifdef CONFIG_PM_SLEEP_SMP
746 static cpumask_var_t frozen_cpus;
747
748 int disable_nonboot_cpus(void)
749 {
750         int cpu, first_cpu, error = 0;
751
752         cpu_maps_update_begin();
753         first_cpu = cpumask_first(cpu_online_mask);
754         /*
755          * We take down all of the non-boot CPUs in one shot to avoid races
756          * with the userspace trying to use the CPU hotplug at the same time
757          */
758         cpumask_clear(frozen_cpus);
759
760         pr_info("Disabling non-boot CPUs ...\n");
761         for_each_online_cpu(cpu) {
762                 if (cpu == first_cpu)
763                         continue;
764                 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
765                 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
766                 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
767                 if (!error)
768                         cpumask_set_cpu(cpu, frozen_cpus);
769                 else {
770                         pr_err("Error taking CPU%d down: %d\n", cpu, error);
771                         break;
772                 }
773         }
774
775         if (!error)
776                 BUG_ON(num_online_cpus() > 1);
777         else
778                 pr_err("Non-boot CPUs are not disabled\n");
779
780         /*
781          * Make sure the CPUs won't be enabled by someone else. We need to do
782          * this even in case of failure as all disable_nonboot_cpus() users are
783          * supposed to do enable_nonboot_cpus() on the failure path.
784          */
785         cpu_hotplug_disabled++;
786
787         cpu_maps_update_done();
788         return error;
789 }
790
791 void __weak arch_enable_nonboot_cpus_begin(void)
792 {
793 }
794
795 void __weak arch_enable_nonboot_cpus_end(void)
796 {
797 }
798
799 void enable_nonboot_cpus(void)
800 {
801         int cpu, error;
802
803         /* Allow everyone to use the CPU hotplug again */
804         cpu_maps_update_begin();
805         WARN_ON(--cpu_hotplug_disabled < 0);
806         if (cpumask_empty(frozen_cpus))
807                 goto out;
808
809         pr_info("Enabling non-boot CPUs ...\n");
810
811         arch_enable_nonboot_cpus_begin();
812
813         for_each_cpu(cpu, frozen_cpus) {
814                 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
815                 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
816                 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
817                 if (!error) {
818                         pr_info("CPU%d is up\n", cpu);
819                         continue;
820                 }
821                 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
822         }
823
824         arch_enable_nonboot_cpus_end();
825
826         cpumask_clear(frozen_cpus);
827 out:
828         cpu_maps_update_done();
829 }
830
831 static int __init alloc_frozen_cpus(void)
832 {
833         if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
834                 return -ENOMEM;
835         return 0;
836 }
837 core_initcall(alloc_frozen_cpus);
838
839 /*
840  * When callbacks for CPU hotplug notifications are being executed, we must
841  * ensure that the state of the system with respect to the tasks being frozen
842  * or not, as reported by the notification, remains unchanged *throughout the
843  * duration* of the execution of the callbacks.
844  * Hence we need to prevent the freezer from racing with regular CPU hotplug.
845  *
846  * This synchronization is implemented by mutually excluding regular CPU
847  * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
848  * Hibernate notifications.
849  */
850 static int
851 cpu_hotplug_pm_callback(struct notifier_block *nb,
852                         unsigned long action, void *ptr)
853 {
854         switch (action) {
855
856         case PM_SUSPEND_PREPARE:
857         case PM_HIBERNATION_PREPARE:
858                 cpu_hotplug_disable();
859                 break;
860
861         case PM_POST_SUSPEND:
862         case PM_POST_HIBERNATION:
863                 cpu_hotplug_enable();
864                 break;
865
866         default:
867                 return NOTIFY_DONE;
868         }
869
870         return NOTIFY_OK;
871 }
872
873
874 static int __init cpu_hotplug_pm_sync_init(void)
875 {
876         /*
877          * cpu_hotplug_pm_callback has higher priority than x86
878          * bsp_pm_callback which depends on cpu_hotplug_pm_callback
879          * to disable cpu hotplug to avoid cpu hotplug race.
880          */
881         pm_notifier(cpu_hotplug_pm_callback, 0);
882         return 0;
883 }
884 core_initcall(cpu_hotplug_pm_sync_init);
885
886 #endif /* CONFIG_PM_SLEEP_SMP */
887
888 #endif /* CONFIG_SMP */
889
890 /* Boot processor state steps */
891 static struct cpuhp_step cpuhp_bp_states[] = {
892         [CPUHP_OFFLINE] = {
893                 .name                   = "offline",
894                 .startup                = NULL,
895                 .teardown               = NULL,
896         },
897 #ifdef CONFIG_SMP
898         [CPUHP_CREATE_THREADS]= {
899                 .name                   = "threads:create",
900                 .startup                = smpboot_create_threads,
901                 .teardown               = NULL,
902                 .cant_stop              = true,
903         },
904         [CPUHP_NOTIFY_PREPARE] = {
905                 .name                   = "notify:prepare",
906                 .startup                = notify_prepare,
907                 .teardown               = notify_dead,
908                 .skip_onerr             = true,
909                 .cant_stop              = true,
910         },
911         [CPUHP_BRINGUP_CPU] = {
912                 .name                   = "cpu:bringup",
913                 .startup                = bringup_cpu,
914                 .teardown               = NULL,
915                 .cant_stop              = true,
916         },
917         [CPUHP_TEARDOWN_CPU] = {
918                 .name                   = "cpu:teardown",
919                 .startup                = NULL,
920                 .teardown               = takedown_cpu,
921                 .cant_stop              = true,
922         },
923         [CPUHP_CPU_SET_ACTIVE] = {
924                 .name                   = "cpu:active",
925                 .startup                = cpuhp_set_cpu_active,
926                 .teardown               = NULL,
927         },
928         [CPUHP_SMPBOOT_THREADS] = {
929                 .name                   = "smpboot:threads",
930                 .startup                = smpboot_unpark_threads,
931                 .teardown               = smpboot_park_threads,
932         },
933         [CPUHP_NOTIFY_ONLINE] = {
934                 .name                   = "notify:online",
935                 .startup                = notify_online,
936                 .teardown               = notify_down_prepare,
937                 .cant_stop              = true,
938         },
939 #endif
940         [CPUHP_ONLINE] = {
941                 .name                   = "online",
942                 .startup                = NULL,
943                 .teardown               = NULL,
944         },
945 };
946
947 /* Application processor state steps */
948 static struct cpuhp_step cpuhp_ap_states[] = {
949 #ifdef CONFIG_SMP
950         [CPUHP_AP_NOTIFY_STARTING] = {
951                 .name                   = "notify:starting",
952                 .startup                = notify_starting,
953                 .teardown               = notify_dying,
954                 .skip_onerr             = true,
955                 .cant_stop              = true,
956         },
957 #endif
958         [CPUHP_ONLINE] = {
959                 .name                   = "online",
960                 .startup                = NULL,
961                 .teardown               = NULL,
962         },
963 };
964
965 /* Sanity check for callbacks */
966 static int cpuhp_cb_check(enum cpuhp_state state)
967 {
968         if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
969                 return -EINVAL;
970         return 0;
971 }
972
973 static bool cpuhp_is_ap_state(enum cpuhp_state state)
974 {
975         return (state > CPUHP_AP_OFFLINE && state < CPUHP_AP_ONLINE);
976 }
977
978 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
979 {
980         struct cpuhp_step *sp;
981
982         sp = cpuhp_is_ap_state(state) ? cpuhp_ap_states : cpuhp_bp_states;
983         return sp + state;
984 }
985
986 static void cpuhp_store_callbacks(enum cpuhp_state state,
987                                   const char *name,
988                                   int (*startup)(unsigned int cpu),
989                                   int (*teardown)(unsigned int cpu))
990 {
991         /* (Un)Install the callbacks for further cpu hotplug operations */
992         struct cpuhp_step *sp;
993
994         mutex_lock(&cpuhp_state_mutex);
995         sp = cpuhp_get_step(state);
996         sp->startup = startup;
997         sp->teardown = teardown;
998         sp->name = name;
999         mutex_unlock(&cpuhp_state_mutex);
1000 }
1001
1002 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1003 {
1004         return cpuhp_get_step(state)->teardown;
1005 }
1006
1007 /* Helper function to run callback on the target cpu */
1008 static void cpuhp_on_cpu_cb(void *__cb)
1009 {
1010         int (*cb)(unsigned int cpu) = __cb;
1011
1012         BUG_ON(cb(smp_processor_id()));
1013 }
1014
1015 /*
1016  * Call the startup/teardown function for a step either on the AP or
1017  * on the current CPU.
1018  */
1019 static int cpuhp_issue_call(int cpu, enum cpuhp_state state,
1020                             int (*cb)(unsigned int), bool bringup)
1021 {
1022         int ret;
1023
1024         if (!cb)
1025                 return 0;
1026
1027         /*
1028          * This invokes the callback directly for now. In a later step we
1029          * convert that to use cpuhp_invoke_callback().
1030          */
1031         if (cpuhp_is_ap_state(state)) {
1032                 /*
1033                  * Note, that a function called on the AP is not
1034                  * allowed to fail.
1035                  */
1036                 if (cpu_online(cpu))
1037                         smp_call_function_single(cpu, cpuhp_on_cpu_cb, cb, 1);
1038                 return 0;
1039         }
1040
1041         /*
1042          * The non AP bound callbacks can fail on bringup. On teardown
1043          * e.g. module removal we crash for now.
1044          */
1045         ret = cb(cpu);
1046         BUG_ON(ret && !bringup);
1047         return ret;
1048 }
1049
1050 /*
1051  * Called from __cpuhp_setup_state on a recoverable failure.
1052  *
1053  * Note: The teardown callbacks for rollback are not allowed to fail!
1054  */
1055 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1056                                    int (*teardown)(unsigned int cpu))
1057 {
1058         int cpu;
1059
1060         if (!teardown)
1061                 return;
1062
1063         /* Roll back the already executed steps on the other cpus */
1064         for_each_present_cpu(cpu) {
1065                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1066                 int cpustate = st->state;
1067
1068                 if (cpu >= failedcpu)
1069                         break;
1070
1071                 /* Did we invoke the startup call on that cpu ? */
1072                 if (cpustate >= state)
1073                         cpuhp_issue_call(cpu, state, teardown, false);
1074         }
1075 }
1076
1077 /*
1078  * Returns a free for dynamic slot assignment of the Online state. The states
1079  * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1080  * by having no name assigned.
1081  */
1082 static int cpuhp_reserve_state(enum cpuhp_state state)
1083 {
1084         enum cpuhp_state i;
1085
1086         mutex_lock(&cpuhp_state_mutex);
1087         for (i = CPUHP_ONLINE_DYN; i <= CPUHP_ONLINE_DYN_END; i++) {
1088                 if (cpuhp_bp_states[i].name)
1089                         continue;
1090
1091                 cpuhp_bp_states[i].name = "Reserved";
1092                 mutex_unlock(&cpuhp_state_mutex);
1093                 return i;
1094         }
1095         mutex_unlock(&cpuhp_state_mutex);
1096         WARN(1, "No more dynamic states available for CPU hotplug\n");
1097         return -ENOSPC;
1098 }
1099
1100 /**
1101  * __cpuhp_setup_state - Setup the callbacks for an hotplug machine state
1102  * @state:      The state to setup
1103  * @invoke:     If true, the startup function is invoked for cpus where
1104  *              cpu state >= @state
1105  * @startup:    startup callback function
1106  * @teardown:   teardown callback function
1107  *
1108  * Returns 0 if successful, otherwise a proper error code
1109  */
1110 int __cpuhp_setup_state(enum cpuhp_state state,
1111                         const char *name, bool invoke,
1112                         int (*startup)(unsigned int cpu),
1113                         int (*teardown)(unsigned int cpu))
1114 {
1115         int cpu, ret = 0;
1116         int dyn_state = 0;
1117
1118         if (cpuhp_cb_check(state) || !name)
1119                 return -EINVAL;
1120
1121         get_online_cpus();
1122
1123         /* currently assignments for the ONLINE state are possible */
1124         if (state == CPUHP_ONLINE_DYN) {
1125                 dyn_state = 1;
1126                 ret = cpuhp_reserve_state(state);
1127                 if (ret < 0)
1128                         goto out;
1129                 state = ret;
1130         }
1131
1132         cpuhp_store_callbacks(state, name, startup, teardown);
1133
1134         if (!invoke || !startup)
1135                 goto out;
1136
1137         /*
1138          * Try to call the startup callback for each present cpu
1139          * depending on the hotplug state of the cpu.
1140          */
1141         for_each_present_cpu(cpu) {
1142                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1143                 int cpustate = st->state;
1144
1145                 if (cpustate < state)
1146                         continue;
1147
1148                 ret = cpuhp_issue_call(cpu, state, startup, true);
1149                 if (ret) {
1150                         cpuhp_rollback_install(cpu, state, teardown);
1151                         cpuhp_store_callbacks(state, NULL, NULL, NULL);
1152                         goto out;
1153                 }
1154         }
1155 out:
1156         put_online_cpus();
1157         if (!ret && dyn_state)
1158                 return state;
1159         return ret;
1160 }
1161 EXPORT_SYMBOL(__cpuhp_setup_state);
1162
1163 /**
1164  * __cpuhp_remove_state - Remove the callbacks for an hotplug machine state
1165  * @state:      The state to remove
1166  * @invoke:     If true, the teardown function is invoked for cpus where
1167  *              cpu state >= @state
1168  *
1169  * The teardown callback is currently not allowed to fail. Think
1170  * about module removal!
1171  */
1172 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
1173 {
1174         int (*teardown)(unsigned int cpu) = cpuhp_get_teardown_cb(state);
1175         int cpu;
1176
1177         BUG_ON(cpuhp_cb_check(state));
1178
1179         get_online_cpus();
1180
1181         if (!invoke || !teardown)
1182                 goto remove;
1183
1184         /*
1185          * Call the teardown callback for each present cpu depending
1186          * on the hotplug state of the cpu. This function is not
1187          * allowed to fail currently!
1188          */
1189         for_each_present_cpu(cpu) {
1190                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1191                 int cpustate = st->state;
1192
1193                 if (cpustate >= state)
1194                         cpuhp_issue_call(cpu, state, teardown, false);
1195         }
1196 remove:
1197         cpuhp_store_callbacks(state, NULL, NULL, NULL);
1198         put_online_cpus();
1199 }
1200 EXPORT_SYMBOL(__cpuhp_remove_state);
1201
1202 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
1203 static ssize_t show_cpuhp_state(struct device *dev,
1204                                 struct device_attribute *attr, char *buf)
1205 {
1206         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1207
1208         return sprintf(buf, "%d\n", st->state);
1209 }
1210 static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
1211
1212 static ssize_t write_cpuhp_target(struct device *dev,
1213                                   struct device_attribute *attr,
1214                                   const char *buf, size_t count)
1215 {
1216         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1217         struct cpuhp_step *sp;
1218         int target, ret;
1219
1220         ret = kstrtoint(buf, 10, &target);
1221         if (ret)
1222                 return ret;
1223
1224 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
1225         if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
1226                 return -EINVAL;
1227 #else
1228         if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
1229                 return -EINVAL;
1230 #endif
1231
1232         ret = lock_device_hotplug_sysfs();
1233         if (ret)
1234                 return ret;
1235
1236         mutex_lock(&cpuhp_state_mutex);
1237         sp = cpuhp_get_step(target);
1238         ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
1239         mutex_unlock(&cpuhp_state_mutex);
1240         if (ret)
1241                 return ret;
1242
1243         if (st->state < target)
1244                 ret = do_cpu_up(dev->id, target);
1245         else
1246                 ret = do_cpu_down(dev->id, target);
1247
1248         unlock_device_hotplug();
1249         return ret ? ret : count;
1250 }
1251
1252 static ssize_t show_cpuhp_target(struct device *dev,
1253                                  struct device_attribute *attr, char *buf)
1254 {
1255         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
1256
1257         return sprintf(buf, "%d\n", st->target);
1258 }
1259 static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
1260
1261 static struct attribute *cpuhp_cpu_attrs[] = {
1262         &dev_attr_state.attr,
1263         &dev_attr_target.attr,
1264         NULL
1265 };
1266
1267 static struct attribute_group cpuhp_cpu_attr_group = {
1268         .attrs = cpuhp_cpu_attrs,
1269         .name = "hotplug",
1270         NULL
1271 };
1272
1273 static ssize_t show_cpuhp_states(struct device *dev,
1274                                  struct device_attribute *attr, char *buf)
1275 {
1276         ssize_t cur, res = 0;
1277         int i;
1278
1279         mutex_lock(&cpuhp_state_mutex);
1280         for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
1281                 struct cpuhp_step *sp = cpuhp_get_step(i);
1282
1283                 if (sp->name) {
1284                         cur = sprintf(buf, "%3d: %s\n", i, sp->name);
1285                         buf += cur;
1286                         res += cur;
1287                 }
1288         }
1289         mutex_unlock(&cpuhp_state_mutex);
1290         return res;
1291 }
1292 static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
1293
1294 static struct attribute *cpuhp_cpu_root_attrs[] = {
1295         &dev_attr_states.attr,
1296         NULL
1297 };
1298
1299 static struct attribute_group cpuhp_cpu_root_attr_group = {
1300         .attrs = cpuhp_cpu_root_attrs,
1301         .name = "hotplug",
1302         NULL
1303 };
1304
1305 static int __init cpuhp_sysfs_init(void)
1306 {
1307         int cpu, ret;
1308
1309         ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
1310                                  &cpuhp_cpu_root_attr_group);
1311         if (ret)
1312                 return ret;
1313
1314         for_each_possible_cpu(cpu) {
1315                 struct device *dev = get_cpu_device(cpu);
1316
1317                 if (!dev)
1318                         continue;
1319                 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
1320                 if (ret)
1321                         return ret;
1322         }
1323         return 0;
1324 }
1325 device_initcall(cpuhp_sysfs_init);
1326 #endif
1327
1328 /*
1329  * cpu_bit_bitmap[] is a special, "compressed" data structure that
1330  * represents all NR_CPUS bits binary values of 1<<nr.
1331  *
1332  * It is used by cpumask_of() to get a constant address to a CPU
1333  * mask value that has a single bit set only.
1334  */
1335
1336 /* cpu_bit_bitmap[0] is empty - so we can back into it */
1337 #define MASK_DECLARE_1(x)       [x+1][0] = (1UL << (x))
1338 #define MASK_DECLARE_2(x)       MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
1339 #define MASK_DECLARE_4(x)       MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
1340 #define MASK_DECLARE_8(x)       MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
1341
1342 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
1343
1344         MASK_DECLARE_8(0),      MASK_DECLARE_8(8),
1345         MASK_DECLARE_8(16),     MASK_DECLARE_8(24),
1346 #if BITS_PER_LONG > 32
1347         MASK_DECLARE_8(32),     MASK_DECLARE_8(40),
1348         MASK_DECLARE_8(48),     MASK_DECLARE_8(56),
1349 #endif
1350 };
1351 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
1352
1353 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
1354 EXPORT_SYMBOL(cpu_all_bits);
1355
1356 #ifdef CONFIG_INIT_ALL_POSSIBLE
1357 struct cpumask __cpu_possible_mask __read_mostly
1358         = {CPU_BITS_ALL};
1359 #else
1360 struct cpumask __cpu_possible_mask __read_mostly;
1361 #endif
1362 EXPORT_SYMBOL(__cpu_possible_mask);
1363
1364 struct cpumask __cpu_online_mask __read_mostly;
1365 EXPORT_SYMBOL(__cpu_online_mask);
1366
1367 struct cpumask __cpu_present_mask __read_mostly;
1368 EXPORT_SYMBOL(__cpu_present_mask);
1369
1370 struct cpumask __cpu_active_mask __read_mostly;
1371 EXPORT_SYMBOL(__cpu_active_mask);
1372
1373 void init_cpu_present(const struct cpumask *src)
1374 {
1375         cpumask_copy(&__cpu_present_mask, src);
1376 }
1377
1378 void init_cpu_possible(const struct cpumask *src)
1379 {
1380         cpumask_copy(&__cpu_possible_mask, src);
1381 }
1382
1383 void init_cpu_online(const struct cpumask *src)
1384 {
1385         cpumask_copy(&__cpu_online_mask, src);
1386 }
1387
1388 /*
1389  * Activate the first processor.
1390  */
1391 void __init boot_cpu_init(void)
1392 {
1393         int cpu = smp_processor_id();
1394
1395         /* Mark the boot cpu "present", "online" etc for SMP and UP case */
1396         set_cpu_online(cpu, true);
1397         set_cpu_active(cpu, true);
1398         set_cpu_present(cpu, true);
1399         set_cpu_possible(cpu, true);
1400 }
1401
1402 /*
1403  * Must be called _AFTER_ setting up the per_cpu areas
1404  */
1405 void __init boot_cpu_state_init(void)
1406 {
1407         per_cpu_ptr(&cpuhp_state, smp_processor_id())->state = CPUHP_ONLINE;
1408 }