3 #include <linux/proc_fs.h>
4 #include <linux/seq_file.h>
5 #include <linux/kallsyms.h>
6 #include <linux/utsname.h>
7 #include <linux/security.h>
8 #include <linux/export.h>
10 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
11 static struct autogroup autogroup_default;
12 static atomic_t autogroup_seq_nr;
14 void __init autogroup_init(struct task_struct *init_task)
16 autogroup_default.tg = &root_task_group;
17 kref_init(&autogroup_default.kref);
18 init_rwsem(&autogroup_default.lock);
19 init_task->signal->autogroup = &autogroup_default;
22 void autogroup_free(struct task_group *tg)
27 static inline void autogroup_destroy(struct kref *kref)
29 struct autogroup *ag = container_of(kref, struct autogroup, kref);
31 #ifdef CONFIG_RT_GROUP_SCHED
32 /* We've redirected RT tasks to the root task group... */
36 sched_offline_group(ag->tg);
37 sched_destroy_group(ag->tg);
40 static inline void autogroup_kref_put(struct autogroup *ag)
42 kref_put(&ag->kref, autogroup_destroy);
45 static inline struct autogroup *autogroup_kref_get(struct autogroup *ag)
51 static inline struct autogroup *autogroup_task_get(struct task_struct *p)
56 if (!lock_task_sighand(p, &flags))
57 return autogroup_kref_get(&autogroup_default);
59 ag = autogroup_kref_get(p->signal->autogroup);
60 unlock_task_sighand(p, &flags);
65 static inline struct autogroup *autogroup_create(void)
67 struct autogroup *ag = kzalloc(sizeof(*ag), GFP_KERNEL);
68 struct task_group *tg;
73 tg = sched_create_group(&root_task_group);
79 init_rwsem(&ag->lock);
80 ag->id = atomic_inc_return(&autogroup_seq_nr);
82 #ifdef CONFIG_RT_GROUP_SCHED
84 * Autogroup RT tasks are redirected to the root task group
85 * so we don't have to move tasks around upon policy change,
86 * or flail around trying to allocate bandwidth on the fly.
87 * A bandwidth exception in __sched_setscheduler() allows
88 * the policy change to proceed.
90 free_rt_sched_group(tg);
91 tg->rt_se = root_task_group.rt_se;
92 tg->rt_rq = root_task_group.rt_rq;
96 sched_online_group(tg, &root_task_group);
102 if (printk_ratelimit()) {
103 printk(KERN_WARNING "autogroup_create: %s failure.\n",
104 ag ? "sched_create_group()" : "kmalloc()");
107 return autogroup_kref_get(&autogroup_default);
110 bool task_wants_autogroup(struct task_struct *p, struct task_group *tg)
112 if (tg != &root_task_group)
116 * We can only assume the task group can't go away on us if
117 * autogroup_move_group() can see us on ->thread_group list.
119 if (p->flags & PF_EXITING)
126 autogroup_move_group(struct task_struct *p, struct autogroup *ag)
128 struct autogroup *prev;
129 struct task_struct *t;
132 BUG_ON(!lock_task_sighand(p, &flags));
134 prev = p->signal->autogroup;
136 unlock_task_sighand(p, &flags);
140 p->signal->autogroup = autogroup_kref_get(ag);
142 if (!READ_ONCE(sysctl_sched_autogroup_enabled))
145 for_each_thread(p, t)
148 unlock_task_sighand(p, &flags);
149 autogroup_kref_put(prev);
152 /* Allocates GFP_KERNEL, cannot be called under any spinlock */
153 void sched_autogroup_create_attach(struct task_struct *p)
155 struct autogroup *ag = autogroup_create();
157 autogroup_move_group(p, ag);
158 /* drop extra reference added by autogroup_create() */
159 autogroup_kref_put(ag);
161 EXPORT_SYMBOL(sched_autogroup_create_attach);
163 /* Cannot be called under siglock. Currently has no users */
164 void sched_autogroup_detach(struct task_struct *p)
166 autogroup_move_group(p, &autogroup_default);
168 EXPORT_SYMBOL(sched_autogroup_detach);
170 void sched_autogroup_fork(struct signal_struct *sig)
172 sig->autogroup = autogroup_task_get(current);
175 void sched_autogroup_exit(struct signal_struct *sig)
177 autogroup_kref_put(sig->autogroup);
180 static int __init setup_autogroup(char *str)
182 sysctl_sched_autogroup_enabled = 0;
187 __setup("noautogroup", setup_autogroup);
189 #ifdef CONFIG_PROC_FS
191 int proc_sched_autogroup_set_nice(struct task_struct *p, int nice)
193 static unsigned long next = INITIAL_JIFFIES;
194 struct autogroup *ag;
197 if (nice < MIN_NICE || nice > MAX_NICE)
200 err = security_task_setnice(current, nice);
204 if (nice < 0 && !can_nice(current, nice))
207 /* this is a heavy operation taking global locks.. */
208 if (!capable(CAP_SYS_ADMIN) && time_before(jiffies, next))
211 next = HZ / 10 + jiffies;
212 ag = autogroup_task_get(p);
214 down_write(&ag->lock);
215 err = sched_group_set_shares(ag->tg, prio_to_weight[nice + 20]);
220 autogroup_kref_put(ag);
225 void proc_sched_autogroup_show_task(struct task_struct *p, struct seq_file *m)
227 struct autogroup *ag = autogroup_task_get(p);
229 if (!task_group_is_autogroup(ag->tg))
232 down_read(&ag->lock);
233 seq_printf(m, "/autogroup-%ld nice %d\n", ag->id, ag->nice);
237 autogroup_kref_put(ag);
239 #endif /* CONFIG_PROC_FS */
241 #ifdef CONFIG_SCHED_DEBUG
242 int autogroup_path(struct task_group *tg, char *buf, int buflen)
244 if (!task_group_is_autogroup(tg))
247 return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id);
249 #endif /* CONFIG_SCHED_DEBUG */