Own __icmp(v6)_sk should be present in each namespace. So, it should be
allocated dynamically. Though, alloc_percpu does not fit the case as it
implies additional dereferrence for no bonus.
Allocate data for pointers just like __percpu_alloc_mask does and place
pointers to struct sock into this array.
Signed-off-by: Denis V. Lunev <den@openvz.org>
Acked-by: Daniel Lezcano <dlezcano@fr.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
*
* On SMP we have one ICMP socket per-cpu.
*/
*
* On SMP we have one ICMP socket per-cpu.
*/
-static DEFINE_PER_CPU(struct sock *, __icmp_sk) = NULL;
-#define icmp_sk __get_cpu_var(__icmp_sk)
+static struct sock **__icmp_sk = NULL;
+#define icmp_sk (__icmp_sk[smp_processor_id()])
static inline int icmp_xmit_lock(struct sock *sk)
{
static inline int icmp_xmit_lock(struct sock *sk)
{
for_each_possible_cpu(i) {
struct sock *sk;
for_each_possible_cpu(i) {
struct sock *sk;
- sk = per_cpu(__icmp_sk, i);
if (sk == NULL)
continue;
if (sk == NULL)
continue;
- per_cpu(__icmp_sk, i) = NULL;
sock_release(sk->sk_socket);
}
sock_release(sk->sk_socket);
}
+ kfree(__icmp_sk);
+ __icmp_sk = NULL;
}
int __init icmp_init(void)
{
int i, err;
}
int __init icmp_init(void)
{
int i, err;
+ __icmp_sk = kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
+ if (__icmp_sk == NULL)
+ return -ENOMEM;
+
for_each_possible_cpu(i) {
struct sock *sk;
struct socket *sock;
for_each_possible_cpu(i) {
struct sock *sk;
struct socket *sock;
- per_cpu(__icmp_sk, i) = sk = sock->sk;
+ __icmp_sk[i] = sk = sock->sk;
sk->sk_allocation = GFP_ATOMIC;
/* Enough space for 2 64K ICMP packets, including
sk->sk_allocation = GFP_ATOMIC;
/* Enough space for 2 64K ICMP packets, including
*
* On SMP we have one ICMP socket per-cpu.
*/
*
* On SMP we have one ICMP socket per-cpu.
*/
-static DEFINE_PER_CPU(struct sock *, __icmpv6_sk) = NULL;
-#define icmpv6_sk __get_cpu_var(__icmpv6_sk)
+static struct sock **__icmpv6_sk = NULL;
+#define icmpv6_sk (__icmpv6_sk[smp_processor_id()])
static int icmpv6_rcv(struct sk_buff *skb);
static int icmpv6_rcv(struct sk_buff *skb);
struct sock *sk;
int err, i, j;
struct sock *sk;
int err, i, j;
+ __icmpv6_sk = kzalloc(nr_cpu_ids * sizeof(struct sock *), GFP_KERNEL);
+ if (__icmpv6_sk == NULL)
+ return -ENOMEM;
+
for_each_possible_cpu(i) {
struct socket *sock;
err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
for_each_possible_cpu(i) {
struct socket *sock;
err = sock_create_kern(PF_INET6, SOCK_RAW, IPPROTO_ICMPV6,
- per_cpu(__icmpv6_sk, i) = sk = sock->sk;
+ __icmpv6_sk[i] = sk = sock->sk;
sk->sk_allocation = GFP_ATOMIC;
/*
* Split off their lock-class, because sk->sk_dst_lock
sk->sk_allocation = GFP_ATOMIC;
/*
* Split off their lock-class, because sk->sk_dst_lock
for (j = 0; j < i; j++) {
if (!cpu_possible(j))
continue;
for (j = 0; j < i; j++) {
if (!cpu_possible(j))
continue;
- sock_release(per_cpu(__icmpv6_sk, j)->sk_socket);
+ sock_release(__icmpv6_sk[j]->sk_socket);
int i;
for_each_possible_cpu(i) {
int i;
for_each_possible_cpu(i) {
- sock_release(per_cpu(__icmpv6_sk, i)->sk_socket);
+ sock_release(__icmpv6_sk[i]->sk_socket);
}
inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
}
}
inet6_del_protocol(&icmpv6_protocol, IPPROTO_ICMPV6);
}