2 #include <net/tcp_memcontrol.h>
5 #include <linux/nsproxy.h>
6 #include <linux/memcontrol.h>
7 #include <linux/module.h>
9 static void memcg_tcp_enter_memory_pressure(struct sock *sk)
11 if (sk->sk_cgrp->memory_pressure)
12 sk->sk_cgrp->memory_pressure = 1;
14 EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure);
16 int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
19 * The root cgroup does not use res_counters, but rather,
20 * rely on the data already collected by the network
23 struct res_counter *res_parent = NULL;
24 struct cg_proto *cg_proto, *parent_cg;
25 struct mem_cgroup *parent = parent_mem_cgroup(memcg);
27 cg_proto = tcp_prot.proto_cgroup(memcg);
31 cg_proto->sysctl_mem[0] = sysctl_tcp_mem[0];
32 cg_proto->sysctl_mem[1] = sysctl_tcp_mem[1];
33 cg_proto->sysctl_mem[2] = sysctl_tcp_mem[2];
34 cg_proto->memory_pressure = 0;
35 cg_proto->memcg = memcg;
37 parent_cg = tcp_prot.proto_cgroup(parent);
39 res_parent = &parent_cg->memory_allocated;
41 res_counter_init(&cg_proto->memory_allocated, res_parent);
42 percpu_counter_init(&cg_proto->sockets_allocated, 0);
46 EXPORT_SYMBOL(tcp_init_cgroup);
48 void tcp_destroy_cgroup(struct mem_cgroup *memcg)
50 struct cg_proto *cg_proto;
52 cg_proto = tcp_prot.proto_cgroup(memcg);
56 percpu_counter_destroy(&cg_proto->sockets_allocated);
58 EXPORT_SYMBOL(tcp_destroy_cgroup);
60 static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
62 struct cg_proto *cg_proto;
66 cg_proto = tcp_prot.proto_cgroup(memcg);
70 if (val > RES_COUNTER_MAX)
71 val = RES_COUNTER_MAX;
73 ret = res_counter_set_limit(&cg_proto->memory_allocated, val);
77 for (i = 0; i < 3; i++)
78 cg_proto->sysctl_mem[i] = min_t(long, val >> PAGE_SHIFT,
81 if (val == RES_COUNTER_MAX)
82 clear_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
83 else if (val != RES_COUNTER_MAX) {
85 * The active bit needs to be written after the static_key
86 * update. This is what guarantees that the socket activation
87 * function is the last one to run. See sock_update_memcg() for
88 * details, and note that we don't mark any socket as belonging
89 * to this memcg until that flag is up.
91 * We need to do this, because static_keys will span multiple
92 * sites, but we can't control their order. If we mark a socket
93 * as accounted, but the accounting functions are not patched in
94 * yet, we'll lose accounting.
96 * We never race with the readers in sock_update_memcg(),
97 * because when this value change, the code to process it is not
100 * The activated bit is used to guarantee that no two writers
101 * will do the update in the same memcg. Without that, we can't
102 * properly shutdown the static key.
104 if (!test_and_set_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags))
105 static_key_slow_inc(&memcg_socket_limit_enabled);
106 set_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
112 static int tcp_cgroup_write(struct cgroup_subsys_state *css, struct cftype *cft,
115 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
116 unsigned long long val;
119 switch (cft->private) {
121 /* see memcontrol.c */
122 ret = res_counter_memparse_write_strategy(buffer, &val);
125 ret = tcp_update_limit(memcg, val);
134 static u64 tcp_read_stat(struct mem_cgroup *memcg, int type, u64 default_val)
136 struct cg_proto *cg_proto;
138 cg_proto = tcp_prot.proto_cgroup(memcg);
142 return res_counter_read_u64(&cg_proto->memory_allocated, type);
145 static u64 tcp_read_usage(struct mem_cgroup *memcg)
147 struct cg_proto *cg_proto;
149 cg_proto = tcp_prot.proto_cgroup(memcg);
151 return atomic_long_read(&tcp_memory_allocated) << PAGE_SHIFT;
153 return res_counter_read_u64(&cg_proto->memory_allocated, RES_USAGE);
156 static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft)
158 struct mem_cgroup *memcg = mem_cgroup_from_css(css);
161 switch (cft->private) {
163 val = tcp_read_stat(memcg, RES_LIMIT, RES_COUNTER_MAX);
166 val = tcp_read_usage(memcg);
170 val = tcp_read_stat(memcg, cft->private, 0);
178 static int tcp_cgroup_reset(struct cgroup_subsys_state *css, unsigned int event)
180 struct mem_cgroup *memcg;
181 struct cg_proto *cg_proto;
183 memcg = mem_cgroup_from_css(css);
184 cg_proto = tcp_prot.proto_cgroup(memcg);
190 res_counter_reset_max(&cg_proto->memory_allocated);
193 res_counter_reset_failcnt(&cg_proto->memory_allocated);
200 static struct cftype tcp_files[] = {
202 .name = "kmem.tcp.limit_in_bytes",
203 .write_string = tcp_cgroup_write,
204 .read_u64 = tcp_cgroup_read,
205 .private = RES_LIMIT,
208 .name = "kmem.tcp.usage_in_bytes",
209 .read_u64 = tcp_cgroup_read,
210 .private = RES_USAGE,
213 .name = "kmem.tcp.failcnt",
214 .private = RES_FAILCNT,
215 .trigger = tcp_cgroup_reset,
216 .read_u64 = tcp_cgroup_read,
219 .name = "kmem.tcp.max_usage_in_bytes",
220 .private = RES_MAX_USAGE,
221 .trigger = tcp_cgroup_reset,
222 .read_u64 = tcp_cgroup_read,
227 static int __init tcp_memcontrol_init(void)
229 WARN_ON(cgroup_add_cftypes(&mem_cgroup_subsys, tcp_files));
232 __initcall(tcp_memcontrol_init);