]> git.karo-electronics.de Git - karo-tx-linux.git/commitdiff
tcp/dccp: fix race at listener dismantle phase
authorEric Dumazet <edumazet@google.com>
Wed, 14 Oct 2015 18:16:28 +0000 (11:16 -0700)
committerDavid S. Miller <davem@davemloft.net>
Fri, 16 Oct 2015 07:52:19 +0000 (00:52 -0700)
Under stress, a close() on a listener can trigger the
WARN_ON(sk->sk_ack_backlog) in inet_csk_listen_stop()

We need to test if listener is still active before queueing
a child in inet_csk_reqsk_queue_add()

Create a common inet_child_forget() helper, and use it
from inet_csk_reqsk_queue_add() and inet_csk_listen_stop()

Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/inet_connection_sock.h
include/net/request_sock.h
net/ipv4/inet_connection_sock.c

index e84ea9f2498ffeba25c997ae8f2436b717ec5906..63615709839d1c958142e0c4ad27512aa570a1da 100644 (file)
@@ -268,13 +268,8 @@ struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
                                            struct sock *newsk,
                                            const struct request_sock *req);
 
-static inline void inet_csk_reqsk_queue_add(struct sock *sk,
-                                           struct request_sock *req,
-                                           struct sock *child)
-{
-       reqsk_queue_add(&inet_csk(sk)->icsk_accept_queue, req, sk, child);
-}
-
+void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
+                             struct sock *child);
 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
                                   unsigned long timeout);
 
index 2e73748956d590ffe9cfd536dea119c189ee197f..a0dde04eb178015ac97d17a6243cbe50cb83a885 100644 (file)
@@ -186,25 +186,6 @@ static inline bool reqsk_queue_empty(const struct request_sock_queue *queue)
        return queue->rskq_accept_head == NULL;
 }
 
-static inline void reqsk_queue_add(struct request_sock_queue *queue,
-                                  struct request_sock *req,
-                                  struct sock *parent,
-                                  struct sock *child)
-{
-       spin_lock(&queue->rskq_lock);
-       req->sk = child;
-       sk_acceptq_added(parent);
-
-       if (queue->rskq_accept_head == NULL)
-               queue->rskq_accept_head = req;
-       else
-               queue->rskq_accept_tail->dl_next = req;
-
-       queue->rskq_accept_tail = req;
-       req->dl_next = NULL;
-       spin_unlock(&queue->rskq_lock);
-}
-
 static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue *queue,
                                                      struct sock *parent)
 {
index b85c720956a996210dc070cdbeb05d20b6e773ee..8430bc8ccd58c5f666cb0e5d0abeb2ca21e0156e 100644 (file)
@@ -764,6 +764,53 @@ int inet_csk_listen_start(struct sock *sk, int backlog)
 }
 EXPORT_SYMBOL_GPL(inet_csk_listen_start);
 
+static void inet_child_forget(struct sock *sk, struct request_sock *req,
+                             struct sock *child)
+{
+       sk->sk_prot->disconnect(child, O_NONBLOCK);
+
+       sock_orphan(child);
+
+       percpu_counter_inc(sk->sk_prot->orphan_count);
+
+       if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
+               BUG_ON(tcp_sk(child)->fastopen_rsk != req);
+               BUG_ON(sk != req->rsk_listener);
+
+               /* Paranoid, to prevent race condition if
+                * an inbound pkt destined for child is
+                * blocked by sock lock in tcp_v4_rcv().
+                * Also to satisfy an assertion in
+                * tcp_v4_destroy_sock().
+                */
+               tcp_sk(child)->fastopen_rsk = NULL;
+       }
+       inet_csk_destroy_sock(child);
+       reqsk_put(req);
+}
+
+void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
+                             struct sock *child)
+{
+       struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
+
+       spin_lock(&queue->rskq_lock);
+       if (unlikely(sk->sk_state != TCP_LISTEN)) {
+               inet_child_forget(sk, req, child);
+       } else {
+               req->sk = child;
+               req->dl_next = NULL;
+               if (queue->rskq_accept_head == NULL)
+                       queue->rskq_accept_head = req;
+               else
+                       queue->rskq_accept_tail->dl_next = req;
+               queue->rskq_accept_tail = req;
+               sk_acceptq_added(sk);
+       }
+       spin_unlock(&queue->rskq_lock);
+}
+EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
+
 /*
  *     This routine closes sockets which have been at least partially
  *     opened, but not yet accepted.
@@ -790,31 +837,11 @@ void inet_csk_listen_stop(struct sock *sk)
                WARN_ON(sock_owned_by_user(child));
                sock_hold(child);
 
-               sk->sk_prot->disconnect(child, O_NONBLOCK);
-
-               sock_orphan(child);
-
-               percpu_counter_inc(sk->sk_prot->orphan_count);
-
-               if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
-                       BUG_ON(tcp_sk(child)->fastopen_rsk != req);
-                       BUG_ON(sk != req->rsk_listener);
-
-                       /* Paranoid, to prevent race condition if
-                        * an inbound pkt destined for child is
-                        * blocked by sock lock in tcp_v4_rcv().
-                        * Also to satisfy an assertion in
-                        * tcp_v4_destroy_sock().
-                        */
-                       tcp_sk(child)->fastopen_rsk = NULL;
-               }
-               inet_csk_destroy_sock(child);
-
+               inet_child_forget(sk, req, child);
                bh_unlock_sock(child);
                local_bh_enable();
                sock_put(child);
 
-               reqsk_put(req);
                cond_resched();
        }
        if (queue->fastopenq.rskq_rst_head) {
@@ -829,7 +856,7 @@ void inet_csk_listen_stop(struct sock *sk)
                        req = next;
                }
        }
-       WARN_ON(sk->sk_ack_backlog);
+       WARN_ON_ONCE(sk->sk_ack_backlog);
 }
 EXPORT_SYMBOL_GPL(inet_csk_listen_stop);