]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - net/dccp/proto.c
Merge mulgrave-w:git/linux-2.6
[mv-sheeva.git] / net / dccp / proto.c
index 5317fd3e66910ca25f5d73bf9dbd63e7fe400183..962df0ea31aa75dbaa0071ed278bec30b6d004da 100644 (file)
@@ -9,7 +9,6 @@
  *     published by the Free Software Foundation.
  */
 
-#include <linux/config.h>
 #include <linux/dccp.h>
 #include <linux/module.h>
 #include <linux/types.h>
@@ -485,7 +484,7 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
                        err = -EINVAL;
                else
                        err = dccp_setsockopt_change(sk, DCCPO_CHANGE_L,
-                                                    (struct dccp_so_feat *)
+                                                    (struct dccp_so_feat __user *)
                                                     optval);
                break;
 
@@ -494,7 +493,7 @@ static int do_dccp_setsockopt(struct sock *sk, int level, int optname,
                        err = -EINVAL;
                else
                        err = dccp_setsockopt_change(sk, DCCPO_CHANGE_R,
-                                                    (struct dccp_so_feat *)
+                                                    (struct dccp_so_feat __user *)
                                                     optval);
                break;
 
@@ -663,17 +662,8 @@ int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        if (rc != 0)
                goto out_discard;
 
-       rc = dccp_write_xmit(sk, skb, &timeo);
-       /*
-        * XXX we don't use sk_write_queue, so just discard the packet.
-        *     Current plan however is to _use_ sk_write_queue with
-        *     an algorith similar to tcp_sendmsg, where the main difference
-        *     is that in DCCP we have to respect packet boundaries, so
-        *     no coalescing of skbs.
-        *
-        *     This bug was _quickly_ found & fixed by just looking at an OSTRA
-        *     generated callgraph 8) -acme
-        */
+       skb_queue_tail(&sk->sk_write_queue, skb);
+       dccp_write_xmit(sk,0);
 out_release:
        release_sock(sk);
        return rc ? : len;
@@ -847,6 +837,7 @@ static int dccp_close_state(struct sock *sk)
 
 void dccp_close(struct sock *sk, long timeout)
 {
+       struct dccp_sock *dp = dccp_sk(sk);
        struct sk_buff *skb;
        int state;
 
@@ -863,6 +854,8 @@ void dccp_close(struct sock *sk, long timeout)
                goto adjudge_to_death;
        }
 
+       sk_stop_timer(sk, &dp->dccps_xmit_timer);
+
        /*
         * We need to flush the recv. buffs.  We do this only on the
         * descriptor close, not protocol-sourced closes, because the