]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - net/ipv4/udp.c
netfilter: nf_conntrack_tcp: fix unaligned memory access in tcp_sack
[karo-tx-linux.git] / net / ipv4 / udp.c
index 383d17359d01f1a81080df5aabef22266c1dd1c4..a9b62ee588f0b26a3f9b9bbbcedc4be40f8b5154 100644 (file)
@@ -302,7 +302,7 @@ static struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
        return result;
 }
 
-static inline struct sock *udp_v4_mcast_next(struct sock *sk,
+static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk,
                                             __be16 loc_port, __be32 loc_addr,
                                             __be16 rmt_port, __be32 rmt_addr,
                                             int dif)
@@ -314,7 +314,8 @@ static inline struct sock *udp_v4_mcast_next(struct sock *sk,
        sk_for_each_from(s, node) {
                struct inet_sock *inet = inet_sk(s);
 
-               if (s->sk_hash != hnum                                  ||
+               if (!net_eq(sock_net(s), net)                           ||
+                   s->sk_hash != hnum                                  ||
                    (inet->daddr && inet->daddr != rmt_addr)            ||
                    (inet->dport != rmt_port && inet->dport)            ||
                    (inet->rcv_saddr && inet->rcv_saddr != loc_addr)    ||
@@ -951,6 +952,29 @@ int udp_disconnect(struct sock *sk, int flags)
        return 0;
 }
 
+static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+       int is_udplite = IS_UDPLITE(sk);
+       int rc;
+
+       if ((rc = sock_queue_rcv_skb(sk, skb)) < 0) {
+               /* Note that an ENOMEM error is charged twice */
+               if (rc == -ENOMEM) {
+                       UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
+                                        is_udplite);
+                       atomic_inc(&sk->sk_drops);
+               }
+               goto drop;
+       }
+
+       return 0;
+
+drop:
+       UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
+       kfree_skb(skb);
+       return -1;
+}
+
 /* returns:
  *  -1: error
  *   0: success
@@ -1042,17 +1066,16 @@ int udp_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
                        goto drop;
        }
 
-       if ((rc = sock_queue_rcv_skb(sk,skb)) < 0) {
-               /* Note that an ENOMEM error is charged twice */
-               if (rc == -ENOMEM) {
-                       UDP_INC_STATS_BH(sock_net(sk),
-                                       UDP_MIB_RCVBUFERRORS, is_udplite);
-                       atomic_inc(&sk->sk_drops);
-               }
-               goto drop;
-       }
+       rc = 0;
 
-       return 0;
+       bh_lock_sock(sk);
+       if (!sock_owned_by_user(sk))
+               rc = __udp_queue_rcv_skb(sk, skb);
+       else
+               sk_add_backlog(sk, skb);
+       bh_unlock_sock(sk);
+
+       return rc;
 
 drop:
        UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
@@ -1077,28 +1100,21 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
        read_lock(&udp_hash_lock);
        sk = sk_head(&udptable[udp_hashfn(net, ntohs(uh->dest))]);
        dif = skb->dev->ifindex;
-       sk = udp_v4_mcast_next(sk, uh->dest, daddr, uh->source, saddr, dif);
+       sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
        if (sk) {
                struct sock *sknext = NULL;
 
                do {
                        struct sk_buff *skb1 = skb;
 
-                       sknext = udp_v4_mcast_next(sk_next(sk), uh->dest, daddr,
-                                                  uh->source, saddr, dif);
+                       sknext = udp_v4_mcast_next(net, sk_next(sk), uh->dest,
+                                                  daddr, uh->source, saddr,
+                                                  dif);
                        if (sknext)
                                skb1 = skb_clone(skb, GFP_ATOMIC);
 
                        if (skb1) {
-                               int ret = 0;
-
-                               bh_lock_sock_nested(sk);
-                               if (!sock_owned_by_user(sk))
-                                       ret = udp_queue_rcv_skb(sk, skb1);
-                               else
-                                       sk_add_backlog(sk, skb1);
-                               bh_unlock_sock(sk);
-
+                               int ret = udp_queue_rcv_skb(sk, skb1);
                                if (ret > 0)
                                        /* we should probably re-process instead
                                         * of dropping packets here. */
@@ -1158,7 +1174,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
                   int proto)
 {
        struct sock *sk;
-       struct udphdr *uh = udp_hdr(skb);
+       struct udphdr *uh;
        unsigned short ulen;
        struct rtable *rt = (struct rtable*)skb->dst;
        __be32 saddr = ip_hdr(skb)->saddr;
@@ -1171,6 +1187,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
        if (!pskb_may_pull(skb, sizeof(struct udphdr)))
                goto drop;              /* No space for header. */
 
+       uh   = udp_hdr(skb);
        ulen = ntohs(uh->len);
        if (ulen > skb->len)
                goto short_packet;
@@ -1193,13 +1210,7 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct hlist_head udptable[],
                        uh->dest, inet_iif(skb), udptable);
 
        if (sk != NULL) {
-               int ret = 0;
-               bh_lock_sock_nested(sk);
-               if (!sock_owned_by_user(sk))
-                       ret = udp_queue_rcv_skb(sk, skb);
-               else
-                       sk_add_backlog(sk, skb);
-               bh_unlock_sock(sk);
+               int ret = udp_queue_rcv_skb(sk, skb);
                sock_put(sk);
 
                /* a return value > 0 means to resubmit the input, but
@@ -1492,7 +1503,7 @@ struct proto udp_prot = {
        .sendmsg           = udp_sendmsg,
        .recvmsg           = udp_recvmsg,
        .sendpage          = udp_sendpage,
-       .backlog_rcv       = udp_queue_rcv_skb,
+       .backlog_rcv       = __udp_queue_rcv_skb,
        .hash              = udp_lib_hash,
        .unhash            = udp_lib_unhash,
        .get_port          = udp_v4_get_port,