]> git.karo-electronics.de Git - karo-tx-linux.git/blobdiff - net/ipv4/tcp_input.c
Merge remote-tracking branch 'net-next/master'
[karo-tx-linux.git] / net / ipv4 / tcp_input.c
index 47b8ab7dce9cc5620eaef1468bedaeee05b2f19e..b935397c703c569bf01e6ebfd85e6bfd1b474227 100644 (file)
@@ -755,7 +755,12 @@ static void tcp_update_pacing_rate(struct sock *sk)
        if (tp->srtt > 8 + 2)
                do_div(rate, tp->srtt);
 
-       sk->sk_pacing_rate = min_t(u64, rate, sk->sk_max_pacing_rate);
+       /* ACCESS_ONCE() is needed because sch_fq fetches sk_pacing_rate
+        * without any lock. We want to make sure compiler wont store
+        * intermediate values in this location.
+        */
+       ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate,
+                                               sk->sk_max_pacing_rate);
 }
 
 /* Calculate rto without backoff.  This is the second half of Van Jacobson's
@@ -3333,7 +3338,7 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
                        tcp_init_cwnd_reduction(sk, true);
                        tcp_set_ca_state(sk, TCP_CA_CWR);
                        tcp_end_cwnd_reduction(sk);
-                       tcp_set_ca_state(sk, TCP_CA_Open);
+                       tcp_try_keep_open(sk);
                        NET_INC_STATS_BH(sock_net(sk),
                                         LINUX_MIB_TCPLOSSPROBERECOVERY);
                }
@@ -5746,6 +5751,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                } else
                        tcp_init_metrics(sk);
 
+               tcp_update_pacing_rate(sk);
+
                /* Prevent spurious tcp_cwnd_restart() on first data packet */
                tp->lsndtime = tcp_time_stamp;