]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/ipv4/tcp_metrics.c
tcp: better comments for RTO initiallization
[karo-tx-linux.git] / net / ipv4 / tcp_metrics.c
1 #include <linux/rcupdate.h>
2 #include <linux/spinlock.h>
3 #include <linux/jiffies.h>
4 #include <linux/module.h>
5 #include <linux/cache.h>
6 #include <linux/slab.h>
7 #include <linux/init.h>
8 #include <linux/tcp.h>
9 #include <linux/hash.h>
10 #include <linux/tcp_metrics.h>
11 #include <linux/vmalloc.h>
12
13 #include <net/inet_connection_sock.h>
14 #include <net/net_namespace.h>
15 #include <net/request_sock.h>
16 #include <net/inetpeer.h>
17 #include <net/sock.h>
18 #include <net/ipv6.h>
19 #include <net/dst.h>
20 #include <net/tcp.h>
21 #include <net/genetlink.h>
22
23 int sysctl_tcp_nometrics_save __read_mostly;
24
25 struct tcp_fastopen_metrics {
26         u16     mss;
27         u16     syn_loss:10;            /* Recurring Fast Open SYN losses */
28         unsigned long   last_syn_loss;  /* Last Fast Open SYN loss */
29         struct  tcp_fastopen_cookie     cookie;
30 };
31
32 struct tcp_metrics_block {
33         struct tcp_metrics_block __rcu  *tcpm_next;
34         struct inetpeer_addr            tcpm_addr;
35         unsigned long                   tcpm_stamp;
36         u32                             tcpm_ts;
37         u32                             tcpm_ts_stamp;
38         u32                             tcpm_lock;
39         u32                             tcpm_vals[TCP_METRIC_MAX + 1];
40         struct tcp_fastopen_metrics     tcpm_fastopen;
41
42         struct rcu_head                 rcu_head;
43 };
44
45 static bool tcp_metric_locked(struct tcp_metrics_block *tm,
46                               enum tcp_metric_index idx)
47 {
48         return tm->tcpm_lock & (1 << idx);
49 }
50
51 static u32 tcp_metric_get(struct tcp_metrics_block *tm,
52                           enum tcp_metric_index idx)
53 {
54         return tm->tcpm_vals[idx];
55 }
56
57 static u32 tcp_metric_get_jiffies(struct tcp_metrics_block *tm,
58                                   enum tcp_metric_index idx)
59 {
60         return msecs_to_jiffies(tm->tcpm_vals[idx]);
61 }
62
63 static void tcp_metric_set(struct tcp_metrics_block *tm,
64                            enum tcp_metric_index idx,
65                            u32 val)
66 {
67         tm->tcpm_vals[idx] = val;
68 }
69
70 static void tcp_metric_set_msecs(struct tcp_metrics_block *tm,
71                                  enum tcp_metric_index idx,
72                                  u32 val)
73 {
74         tm->tcpm_vals[idx] = jiffies_to_msecs(val);
75 }
76
77 static bool addr_same(const struct inetpeer_addr *a,
78                       const struct inetpeer_addr *b)
79 {
80         const struct in6_addr *a6, *b6;
81
82         if (a->family != b->family)
83                 return false;
84         if (a->family == AF_INET)
85                 return a->addr.a4 == b->addr.a4;
86
87         a6 = (const struct in6_addr *) &a->addr.a6[0];
88         b6 = (const struct in6_addr *) &b->addr.a6[0];
89
90         return ipv6_addr_equal(a6, b6);
91 }
92
93 struct tcpm_hash_bucket {
94         struct tcp_metrics_block __rcu  *chain;
95 };
96
97 static DEFINE_SPINLOCK(tcp_metrics_lock);
98
99 static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst,
100                           bool fastopen_clear)
101 {
102         u32 val;
103
104         tm->tcpm_stamp = jiffies;
105
106         val = 0;
107         if (dst_metric_locked(dst, RTAX_RTT))
108                 val |= 1 << TCP_METRIC_RTT;
109         if (dst_metric_locked(dst, RTAX_RTTVAR))
110                 val |= 1 << TCP_METRIC_RTTVAR;
111         if (dst_metric_locked(dst, RTAX_SSTHRESH))
112                 val |= 1 << TCP_METRIC_SSTHRESH;
113         if (dst_metric_locked(dst, RTAX_CWND))
114                 val |= 1 << TCP_METRIC_CWND;
115         if (dst_metric_locked(dst, RTAX_REORDERING))
116                 val |= 1 << TCP_METRIC_REORDERING;
117         tm->tcpm_lock = val;
118
119         tm->tcpm_vals[TCP_METRIC_RTT] = dst_metric_raw(dst, RTAX_RTT);
120         tm->tcpm_vals[TCP_METRIC_RTTVAR] = dst_metric_raw(dst, RTAX_RTTVAR);
121         tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
122         tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
123         tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
124         tm->tcpm_ts = 0;
125         tm->tcpm_ts_stamp = 0;
126         if (fastopen_clear) {
127                 tm->tcpm_fastopen.mss = 0;
128                 tm->tcpm_fastopen.syn_loss = 0;
129                 tm->tcpm_fastopen.cookie.len = 0;
130         }
131 }
132
133 static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst,
134                                           struct inetpeer_addr *addr,
135                                           unsigned int hash,
136                                           bool reclaim)
137 {
138         struct tcp_metrics_block *tm;
139         struct net *net;
140
141         spin_lock_bh(&tcp_metrics_lock);
142         net = dev_net(dst->dev);
143         if (unlikely(reclaim)) {
144                 struct tcp_metrics_block *oldest;
145
146                 oldest = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain);
147                 for (tm = rcu_dereference(oldest->tcpm_next); tm;
148                      tm = rcu_dereference(tm->tcpm_next)) {
149                         if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp))
150                                 oldest = tm;
151                 }
152                 tm = oldest;
153         } else {
154                 tm = kmalloc(sizeof(*tm), GFP_ATOMIC);
155                 if (!tm)
156                         goto out_unlock;
157         }
158         tm->tcpm_addr = *addr;
159
160         tcpm_suck_dst(tm, dst, true);
161
162         if (likely(!reclaim)) {
163                 tm->tcpm_next = net->ipv4.tcp_metrics_hash[hash].chain;
164                 rcu_assign_pointer(net->ipv4.tcp_metrics_hash[hash].chain, tm);
165         }
166
167 out_unlock:
168         spin_unlock_bh(&tcp_metrics_lock);
169         return tm;
170 }
171
172 #define TCP_METRICS_TIMEOUT             (60 * 60 * HZ)
173
174 static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst)
175 {
176         if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT)))
177                 tcpm_suck_dst(tm, dst, false);
178 }
179
180 #define TCP_METRICS_RECLAIM_DEPTH       5
181 #define TCP_METRICS_RECLAIM_PTR         (struct tcp_metrics_block *) 0x1UL
182
183 static struct tcp_metrics_block *tcp_get_encode(struct tcp_metrics_block *tm, int depth)
184 {
185         if (tm)
186                 return tm;
187         if (depth > TCP_METRICS_RECLAIM_DEPTH)
188                 return TCP_METRICS_RECLAIM_PTR;
189         return NULL;
190 }
191
192 static struct tcp_metrics_block *__tcp_get_metrics(const struct inetpeer_addr *addr,
193                                                    struct net *net, unsigned int hash)
194 {
195         struct tcp_metrics_block *tm;
196         int depth = 0;
197
198         for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
199              tm = rcu_dereference(tm->tcpm_next)) {
200                 if (addr_same(&tm->tcpm_addr, addr))
201                         break;
202                 depth++;
203         }
204         return tcp_get_encode(tm, depth);
205 }
206
207 static struct tcp_metrics_block *__tcp_get_metrics_req(struct request_sock *req,
208                                                        struct dst_entry *dst)
209 {
210         struct tcp_metrics_block *tm;
211         struct inetpeer_addr addr;
212         unsigned int hash;
213         struct net *net;
214
215         addr.family = req->rsk_ops->family;
216         switch (addr.family) {
217         case AF_INET:
218                 addr.addr.a4 = inet_rsk(req)->rmt_addr;
219                 hash = (__force unsigned int) addr.addr.a4;
220                 break;
221         case AF_INET6:
222                 *(struct in6_addr *)addr.addr.a6 = inet6_rsk(req)->rmt_addr;
223                 hash = ipv6_addr_hash(&inet6_rsk(req)->rmt_addr);
224                 break;
225         default:
226                 return NULL;
227         }
228
229         net = dev_net(dst->dev);
230         hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
231
232         for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
233              tm = rcu_dereference(tm->tcpm_next)) {
234                 if (addr_same(&tm->tcpm_addr, &addr))
235                         break;
236         }
237         tcpm_check_stamp(tm, dst);
238         return tm;
239 }
240
241 static struct tcp_metrics_block *__tcp_get_metrics_tw(struct inet_timewait_sock *tw)
242 {
243         struct inet6_timewait_sock *tw6;
244         struct tcp_metrics_block *tm;
245         struct inetpeer_addr addr;
246         unsigned int hash;
247         struct net *net;
248
249         addr.family = tw->tw_family;
250         switch (addr.family) {
251         case AF_INET:
252                 addr.addr.a4 = tw->tw_daddr;
253                 hash = (__force unsigned int) addr.addr.a4;
254                 break;
255         case AF_INET6:
256                 tw6 = inet6_twsk((struct sock *)tw);
257                 *(struct in6_addr *)addr.addr.a6 = tw6->tw_v6_daddr;
258                 hash = ipv6_addr_hash(&tw6->tw_v6_daddr);
259                 break;
260         default:
261                 return NULL;
262         }
263
264         net = twsk_net(tw);
265         hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
266
267         for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
268              tm = rcu_dereference(tm->tcpm_next)) {
269                 if (addr_same(&tm->tcpm_addr, &addr))
270                         break;
271         }
272         return tm;
273 }
274
275 static struct tcp_metrics_block *tcp_get_metrics(struct sock *sk,
276                                                  struct dst_entry *dst,
277                                                  bool create)
278 {
279         struct tcp_metrics_block *tm;
280         struct inetpeer_addr addr;
281         unsigned int hash;
282         struct net *net;
283         bool reclaim;
284
285         addr.family = sk->sk_family;
286         switch (addr.family) {
287         case AF_INET:
288                 addr.addr.a4 = inet_sk(sk)->inet_daddr;
289                 hash = (__force unsigned int) addr.addr.a4;
290                 break;
291         case AF_INET6:
292                 *(struct in6_addr *)addr.addr.a6 = inet6_sk(sk)->daddr;
293                 hash = ipv6_addr_hash(&inet6_sk(sk)->daddr);
294                 break;
295         default:
296                 return NULL;
297         }
298
299         net = dev_net(dst->dev);
300         hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
301
302         tm = __tcp_get_metrics(&addr, net, hash);
303         reclaim = false;
304         if (tm == TCP_METRICS_RECLAIM_PTR) {
305                 reclaim = true;
306                 tm = NULL;
307         }
308         if (!tm && create)
309                 tm = tcpm_new(dst, &addr, hash, reclaim);
310         else
311                 tcpm_check_stamp(tm, dst);
312
313         return tm;
314 }
315
316 /* Save metrics learned by this TCP session.  This function is called
317  * only, when TCP finishes successfully i.e. when it enters TIME-WAIT
318  * or goes from LAST-ACK to CLOSE.
319  */
320 void tcp_update_metrics(struct sock *sk)
321 {
322         const struct inet_connection_sock *icsk = inet_csk(sk);
323         struct dst_entry *dst = __sk_dst_get(sk);
324         struct tcp_sock *tp = tcp_sk(sk);
325         struct tcp_metrics_block *tm;
326         unsigned long rtt;
327         u32 val;
328         int m;
329
330         if (sysctl_tcp_nometrics_save || !dst)
331                 return;
332
333         if (dst->flags & DST_HOST)
334                 dst_confirm(dst);
335
336         rcu_read_lock();
337         if (icsk->icsk_backoff || !tp->srtt) {
338                 /* This session failed to estimate rtt. Why?
339                  * Probably, no packets returned in time.  Reset our
340                  * results.
341                  */
342                 tm = tcp_get_metrics(sk, dst, false);
343                 if (tm && !tcp_metric_locked(tm, TCP_METRIC_RTT))
344                         tcp_metric_set(tm, TCP_METRIC_RTT, 0);
345                 goto out_unlock;
346         } else
347                 tm = tcp_get_metrics(sk, dst, true);
348
349         if (!tm)
350                 goto out_unlock;
351
352         rtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
353         m = rtt - tp->srtt;
354
355         /* If newly calculated rtt larger than stored one, store new
356          * one. Otherwise, use EWMA. Remember, rtt overestimation is
357          * always better than underestimation.
358          */
359         if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
360                 if (m <= 0)
361                         rtt = tp->srtt;
362                 else
363                         rtt -= (m >> 3);
364                 tcp_metric_set_msecs(tm, TCP_METRIC_RTT, rtt);
365         }
366
367         if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
368                 unsigned long var;
369
370                 if (m < 0)
371                         m = -m;
372
373                 /* Scale deviation to rttvar fixed point */
374                 m >>= 1;
375                 if (m < tp->mdev)
376                         m = tp->mdev;
377
378                 var = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
379                 if (m >= var)
380                         var = m;
381                 else
382                         var -= (var - m) >> 2;
383
384                 tcp_metric_set_msecs(tm, TCP_METRIC_RTTVAR, var);
385         }
386
387         if (tcp_in_initial_slowstart(tp)) {
388                 /* Slow start still did not finish. */
389                 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
390                         val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
391                         if (val && (tp->snd_cwnd >> 1) > val)
392                                 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
393                                                tp->snd_cwnd >> 1);
394                 }
395                 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
396                         val = tcp_metric_get(tm, TCP_METRIC_CWND);
397                         if (tp->snd_cwnd > val)
398                                 tcp_metric_set(tm, TCP_METRIC_CWND,
399                                                tp->snd_cwnd);
400                 }
401         } else if (tp->snd_cwnd > tp->snd_ssthresh &&
402                    icsk->icsk_ca_state == TCP_CA_Open) {
403                 /* Cong. avoidance phase, cwnd is reliable. */
404                 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
405                         tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
406                                        max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
407                 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
408                         val = tcp_metric_get(tm, TCP_METRIC_CWND);
409                         tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
410                 }
411         } else {
412                 /* Else slow start did not finish, cwnd is non-sense,
413                  * ssthresh may be also invalid.
414                  */
415                 if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
416                         val = tcp_metric_get(tm, TCP_METRIC_CWND);
417                         tcp_metric_set(tm, TCP_METRIC_CWND,
418                                        (val + tp->snd_ssthresh) >> 1);
419                 }
420                 if (!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
421                         val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
422                         if (val && tp->snd_ssthresh > val)
423                                 tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
424                                                tp->snd_ssthresh);
425                 }
426                 if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) {
427                         val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
428                         if (val < tp->reordering &&
429                             tp->reordering != sysctl_tcp_reordering)
430                                 tcp_metric_set(tm, TCP_METRIC_REORDERING,
431                                                tp->reordering);
432                 }
433         }
434         tm->tcpm_stamp = jiffies;
435 out_unlock:
436         rcu_read_unlock();
437 }
438
439 /* Initialize metrics on socket. */
440
441 void tcp_init_metrics(struct sock *sk)
442 {
443         struct dst_entry *dst = __sk_dst_get(sk);
444         struct tcp_sock *tp = tcp_sk(sk);
445         struct tcp_metrics_block *tm;
446         u32 val, crtt = 0; /* cached RTT scaled by 8 */
447
448         if (dst == NULL)
449                 goto reset;
450
451         dst_confirm(dst);
452
453         rcu_read_lock();
454         tm = tcp_get_metrics(sk, dst, true);
455         if (!tm) {
456                 rcu_read_unlock();
457                 goto reset;
458         }
459
460         if (tcp_metric_locked(tm, TCP_METRIC_CWND))
461                 tp->snd_cwnd_clamp = tcp_metric_get(tm, TCP_METRIC_CWND);
462
463         val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
464         if (val) {
465                 tp->snd_ssthresh = val;
466                 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
467                         tp->snd_ssthresh = tp->snd_cwnd_clamp;
468         } else {
469                 /* ssthresh may have been reduced unnecessarily during.
470                  * 3WHS. Restore it back to its initial default.
471                  */
472                 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
473         }
474         val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
475         if (val && tp->reordering != val) {
476                 tcp_disable_fack(tp);
477                 tcp_disable_early_retrans(tp);
478                 tp->reordering = val;
479         }
480
481         crtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
482         rcu_read_unlock();
483 reset:
484         /* The initial RTT measurement from the SYN/SYN-ACK is not ideal
485          * to seed the RTO for later data packets because SYN packets are
486          * small. Use the per-dst cached values to seed the RTO but keep
487          * the RTT estimator variables intact (e.g., srtt, mdev, rttvar).
488          * Later the RTO will be updated immediately upon obtaining the first
489          * data RTT sample (tcp_rtt_estimator()). Hence the cached RTT only
490          * influences the first RTO but not later RTT estimation.
491          *
492          * But if RTT is not available from the SYN (due to retransmits or
493          * syn cookies) or the cache, force a conservative 3secs timeout.
494          *
495          * A bit of theory. RTT is time passed after "normal" sized packet
496          * is sent until it is ACKed. In normal circumstances sending small
497          * packets force peer to delay ACKs and calculation is correct too.
498          * The algorithm is adaptive and, provided we follow specs, it
499          * NEVER underestimate RTT. BUT! If peer tries to make some clever
500          * tricks sort of "quick acks" for time long enough to decrease RTT
501          * to low value, and then abruptly stops to do it and starts to delay
502          * ACKs, wait for troubles.
503          */
504         if (crtt > tp->srtt) {
505                 inet_csk(sk)->icsk_rto = crtt + max(crtt >> 2, tcp_rto_min(sk));
506         } else if (tp->srtt == 0) {
507                 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
508                  * 3WHS. This is most likely due to retransmission,
509                  * including spurious one. Reset the RTO back to 3secs
510                  * from the more aggressive 1sec to avoid more spurious
511                  * retransmission.
512                  */
513                 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
514                 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
515         }
516         /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
517          * retransmitted. In light of RFC6298 more aggressive 1sec
518          * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
519          * retransmission has occurred.
520          */
521         if (tp->total_retrans > 1)
522                 tp->snd_cwnd = 1;
523         else
524                 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
525         tp->snd_cwnd_stamp = tcp_time_stamp;
526 }
527
528 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst, bool paws_check)
529 {
530         struct tcp_metrics_block *tm;
531         bool ret;
532
533         if (!dst)
534                 return false;
535
536         rcu_read_lock();
537         tm = __tcp_get_metrics_req(req, dst);
538         if (paws_check) {
539                 if (tm &&
540                     (u32)get_seconds() - tm->tcpm_ts_stamp < TCP_PAWS_MSL &&
541                     (s32)(tm->tcpm_ts - req->ts_recent) > TCP_PAWS_WINDOW)
542                         ret = false;
543                 else
544                         ret = true;
545         } else {
546                 if (tm && tcp_metric_get(tm, TCP_METRIC_RTT) && tm->tcpm_ts_stamp)
547                         ret = true;
548                 else
549                         ret = false;
550         }
551         rcu_read_unlock();
552
553         return ret;
554 }
555 EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
556
557 void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
558 {
559         struct tcp_metrics_block *tm;
560
561         rcu_read_lock();
562         tm = tcp_get_metrics(sk, dst, true);
563         if (tm) {
564                 struct tcp_sock *tp = tcp_sk(sk);
565
566                 if ((u32)get_seconds() - tm->tcpm_ts_stamp <= TCP_PAWS_MSL) {
567                         tp->rx_opt.ts_recent_stamp = tm->tcpm_ts_stamp;
568                         tp->rx_opt.ts_recent = tm->tcpm_ts;
569                 }
570         }
571         rcu_read_unlock();
572 }
573 EXPORT_SYMBOL_GPL(tcp_fetch_timewait_stamp);
574
575 /* VJ's idea. Save last timestamp seen from this destination and hold
576  * it at least for normal timewait interval to use for duplicate
577  * segment detection in subsequent connections, before they enter
578  * synchronized state.
579  */
580 bool tcp_remember_stamp(struct sock *sk)
581 {
582         struct dst_entry *dst = __sk_dst_get(sk);
583         bool ret = false;
584
585         if (dst) {
586                 struct tcp_metrics_block *tm;
587
588                 rcu_read_lock();
589                 tm = tcp_get_metrics(sk, dst, true);
590                 if (tm) {
591                         struct tcp_sock *tp = tcp_sk(sk);
592
593                         if ((s32)(tm->tcpm_ts - tp->rx_opt.ts_recent) <= 0 ||
594                             ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
595                              tm->tcpm_ts_stamp <= (u32)tp->rx_opt.ts_recent_stamp)) {
596                                 tm->tcpm_ts_stamp = (u32)tp->rx_opt.ts_recent_stamp;
597                                 tm->tcpm_ts = tp->rx_opt.ts_recent;
598                         }
599                         ret = true;
600                 }
601                 rcu_read_unlock();
602         }
603         return ret;
604 }
605
606 bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw)
607 {
608         struct tcp_metrics_block *tm;
609         bool ret = false;
610
611         rcu_read_lock();
612         tm = __tcp_get_metrics_tw(tw);
613         if (tm) {
614                 const struct tcp_timewait_sock *tcptw;
615                 struct sock *sk = (struct sock *) tw;
616
617                 tcptw = tcp_twsk(sk);
618                 if ((s32)(tm->tcpm_ts - tcptw->tw_ts_recent) <= 0 ||
619                     ((u32)get_seconds() - tm->tcpm_ts_stamp > TCP_PAWS_MSL &&
620                      tm->tcpm_ts_stamp <= (u32)tcptw->tw_ts_recent_stamp)) {
621                         tm->tcpm_ts_stamp = (u32)tcptw->tw_ts_recent_stamp;
622                         tm->tcpm_ts        = tcptw->tw_ts_recent;
623                 }
624                 ret = true;
625         }
626         rcu_read_unlock();
627
628         return ret;
629 }
630
631 static DEFINE_SEQLOCK(fastopen_seqlock);
632
633 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
634                             struct tcp_fastopen_cookie *cookie,
635                             int *syn_loss, unsigned long *last_syn_loss)
636 {
637         struct tcp_metrics_block *tm;
638
639         rcu_read_lock();
640         tm = tcp_get_metrics(sk, __sk_dst_get(sk), false);
641         if (tm) {
642                 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
643                 unsigned int seq;
644
645                 do {
646                         seq = read_seqbegin(&fastopen_seqlock);
647                         if (tfom->mss)
648                                 *mss = tfom->mss;
649                         *cookie = tfom->cookie;
650                         *syn_loss = tfom->syn_loss;
651                         *last_syn_loss = *syn_loss ? tfom->last_syn_loss : 0;
652                 } while (read_seqretry(&fastopen_seqlock, seq));
653         }
654         rcu_read_unlock();
655 }
656
657 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
658                             struct tcp_fastopen_cookie *cookie, bool syn_lost)
659 {
660         struct tcp_metrics_block *tm;
661
662         rcu_read_lock();
663         tm = tcp_get_metrics(sk, __sk_dst_get(sk), true);
664         if (tm) {
665                 struct tcp_fastopen_metrics *tfom = &tm->tcpm_fastopen;
666
667                 write_seqlock_bh(&fastopen_seqlock);
668                 tfom->mss = mss;
669                 if (cookie->len > 0)
670                         tfom->cookie = *cookie;
671                 if (syn_lost) {
672                         ++tfom->syn_loss;
673                         tfom->last_syn_loss = jiffies;
674                 } else
675                         tfom->syn_loss = 0;
676                 write_sequnlock_bh(&fastopen_seqlock);
677         }
678         rcu_read_unlock();
679 }
680
681 static struct genl_family tcp_metrics_nl_family = {
682         .id             = GENL_ID_GENERATE,
683         .hdrsize        = 0,
684         .name           = TCP_METRICS_GENL_NAME,
685         .version        = TCP_METRICS_GENL_VERSION,
686         .maxattr        = TCP_METRICS_ATTR_MAX,
687         .netnsok        = true,
688 };
689
690 static struct nla_policy tcp_metrics_nl_policy[TCP_METRICS_ATTR_MAX + 1] = {
691         [TCP_METRICS_ATTR_ADDR_IPV4]    = { .type = NLA_U32, },
692         [TCP_METRICS_ATTR_ADDR_IPV6]    = { .type = NLA_BINARY,
693                                             .len = sizeof(struct in6_addr), },
694         /* Following attributes are not received for GET/DEL,
695          * we keep them for reference
696          */
697 #if 0
698         [TCP_METRICS_ATTR_AGE]          = { .type = NLA_MSECS, },
699         [TCP_METRICS_ATTR_TW_TSVAL]     = { .type = NLA_U32, },
700         [TCP_METRICS_ATTR_TW_TS_STAMP]  = { .type = NLA_S32, },
701         [TCP_METRICS_ATTR_VALS]         = { .type = NLA_NESTED, },
702         [TCP_METRICS_ATTR_FOPEN_MSS]    = { .type = NLA_U16, },
703         [TCP_METRICS_ATTR_FOPEN_SYN_DROPS]      = { .type = NLA_U16, },
704         [TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS]    = { .type = NLA_MSECS, },
705         [TCP_METRICS_ATTR_FOPEN_COOKIE] = { .type = NLA_BINARY,
706                                             .len = TCP_FASTOPEN_COOKIE_MAX, },
707 #endif
708 };
709
710 /* Add attributes, caller cancels its header on failure */
711 static int tcp_metrics_fill_info(struct sk_buff *msg,
712                                  struct tcp_metrics_block *tm)
713 {
714         struct nlattr *nest;
715         int i;
716
717         switch (tm->tcpm_addr.family) {
718         case AF_INET:
719                 if (nla_put_be32(msg, TCP_METRICS_ATTR_ADDR_IPV4,
720                                 tm->tcpm_addr.addr.a4) < 0)
721                         goto nla_put_failure;
722                 break;
723         case AF_INET6:
724                 if (nla_put(msg, TCP_METRICS_ATTR_ADDR_IPV6, 16,
725                             tm->tcpm_addr.addr.a6) < 0)
726                         goto nla_put_failure;
727                 break;
728         default:
729                 return -EAFNOSUPPORT;
730         }
731
732         if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE,
733                           jiffies - tm->tcpm_stamp) < 0)
734                 goto nla_put_failure;
735         if (tm->tcpm_ts_stamp) {
736                 if (nla_put_s32(msg, TCP_METRICS_ATTR_TW_TS_STAMP,
737                                 (s32) (get_seconds() - tm->tcpm_ts_stamp)) < 0)
738                         goto nla_put_failure;
739                 if (nla_put_u32(msg, TCP_METRICS_ATTR_TW_TSVAL,
740                                 tm->tcpm_ts) < 0)
741                         goto nla_put_failure;
742         }
743
744         {
745                 int n = 0;
746
747                 nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
748                 if (!nest)
749                         goto nla_put_failure;
750                 for (i = 0; i < TCP_METRIC_MAX + 1; i++) {
751                         if (!tm->tcpm_vals[i])
752                                 continue;
753                         if (nla_put_u32(msg, i + 1, tm->tcpm_vals[i]) < 0)
754                                 goto nla_put_failure;
755                         n++;
756                 }
757                 if (n)
758                         nla_nest_end(msg, nest);
759                 else
760                         nla_nest_cancel(msg, nest);
761         }
762
763         {
764                 struct tcp_fastopen_metrics tfom_copy[1], *tfom;
765                 unsigned int seq;
766
767                 do {
768                         seq = read_seqbegin(&fastopen_seqlock);
769                         tfom_copy[0] = tm->tcpm_fastopen;
770                 } while (read_seqretry(&fastopen_seqlock, seq));
771
772                 tfom = tfom_copy;
773                 if (tfom->mss &&
774                     nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_MSS,
775                                 tfom->mss) < 0)
776                         goto nla_put_failure;
777                 if (tfom->syn_loss &&
778                     (nla_put_u16(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROPS,
779                                 tfom->syn_loss) < 0 ||
780                      nla_put_msecs(msg, TCP_METRICS_ATTR_FOPEN_SYN_DROP_TS,
781                                 jiffies - tfom->last_syn_loss) < 0))
782                         goto nla_put_failure;
783                 if (tfom->cookie.len > 0 &&
784                     nla_put(msg, TCP_METRICS_ATTR_FOPEN_COOKIE,
785                             tfom->cookie.len, tfom->cookie.val) < 0)
786                         goto nla_put_failure;
787         }
788
789         return 0;
790
791 nla_put_failure:
792         return -EMSGSIZE;
793 }
794
795 static int tcp_metrics_dump_info(struct sk_buff *skb,
796                                  struct netlink_callback *cb,
797                                  struct tcp_metrics_block *tm)
798 {
799         void *hdr;
800
801         hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
802                           &tcp_metrics_nl_family, NLM_F_MULTI,
803                           TCP_METRICS_CMD_GET);
804         if (!hdr)
805                 return -EMSGSIZE;
806
807         if (tcp_metrics_fill_info(skb, tm) < 0)
808                 goto nla_put_failure;
809
810         return genlmsg_end(skb, hdr);
811
812 nla_put_failure:
813         genlmsg_cancel(skb, hdr);
814         return -EMSGSIZE;
815 }
816
817 static int tcp_metrics_nl_dump(struct sk_buff *skb,
818                                struct netlink_callback *cb)
819 {
820         struct net *net = sock_net(skb->sk);
821         unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
822         unsigned int row, s_row = cb->args[0];
823         int s_col = cb->args[1], col = s_col;
824
825         for (row = s_row; row < max_rows; row++, s_col = 0) {
826                 struct tcp_metrics_block *tm;
827                 struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash + row;
828
829                 rcu_read_lock();
830                 for (col = 0, tm = rcu_dereference(hb->chain); tm;
831                      tm = rcu_dereference(tm->tcpm_next), col++) {
832                         if (col < s_col)
833                                 continue;
834                         if (tcp_metrics_dump_info(skb, cb, tm) < 0) {
835                                 rcu_read_unlock();
836                                 goto done;
837                         }
838                 }
839                 rcu_read_unlock();
840         }
841
842 done:
843         cb->args[0] = row;
844         cb->args[1] = col;
845         return skb->len;
846 }
847
848 static int parse_nl_addr(struct genl_info *info, struct inetpeer_addr *addr,
849                          unsigned int *hash, int optional)
850 {
851         struct nlattr *a;
852
853         a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV4];
854         if (a) {
855                 addr->family = AF_INET;
856                 addr->addr.a4 = nla_get_be32(a);
857                 *hash = (__force unsigned int) addr->addr.a4;
858                 return 0;
859         }
860         a = info->attrs[TCP_METRICS_ATTR_ADDR_IPV6];
861         if (a) {
862                 if (nla_len(a) != sizeof(struct in6_addr))
863                         return -EINVAL;
864                 addr->family = AF_INET6;
865                 memcpy(addr->addr.a6, nla_data(a), sizeof(addr->addr.a6));
866                 *hash = ipv6_addr_hash((struct in6_addr *) addr->addr.a6);
867                 return 0;
868         }
869         return optional ? 1 : -EAFNOSUPPORT;
870 }
871
872 static int tcp_metrics_nl_cmd_get(struct sk_buff *skb, struct genl_info *info)
873 {
874         struct tcp_metrics_block *tm;
875         struct inetpeer_addr addr;
876         unsigned int hash;
877         struct sk_buff *msg;
878         struct net *net = genl_info_net(info);
879         void *reply;
880         int ret;
881
882         ret = parse_nl_addr(info, &addr, &hash, 0);
883         if (ret < 0)
884                 return ret;
885
886         msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
887         if (!msg)
888                 return -ENOMEM;
889
890         reply = genlmsg_put_reply(msg, info, &tcp_metrics_nl_family, 0,
891                                   info->genlhdr->cmd);
892         if (!reply)
893                 goto nla_put_failure;
894
895         hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
896         ret = -ESRCH;
897         rcu_read_lock();
898         for (tm = rcu_dereference(net->ipv4.tcp_metrics_hash[hash].chain); tm;
899              tm = rcu_dereference(tm->tcpm_next)) {
900                 if (addr_same(&tm->tcpm_addr, &addr)) {
901                         ret = tcp_metrics_fill_info(msg, tm);
902                         break;
903                 }
904         }
905         rcu_read_unlock();
906         if (ret < 0)
907                 goto out_free;
908
909         genlmsg_end(msg, reply);
910         return genlmsg_reply(msg, info);
911
912 nla_put_failure:
913         ret = -EMSGSIZE;
914
915 out_free:
916         nlmsg_free(msg);
917         return ret;
918 }
919
920 #define deref_locked_genl(p)    \
921         rcu_dereference_protected(p, lockdep_genl_is_held() && \
922                                      lockdep_is_held(&tcp_metrics_lock))
923
924 #define deref_genl(p)   rcu_dereference_protected(p, lockdep_genl_is_held())
925
926 static int tcp_metrics_flush_all(struct net *net)
927 {
928         unsigned int max_rows = 1U << net->ipv4.tcp_metrics_hash_log;
929         struct tcpm_hash_bucket *hb = net->ipv4.tcp_metrics_hash;
930         struct tcp_metrics_block *tm;
931         unsigned int row;
932
933         for (row = 0; row < max_rows; row++, hb++) {
934                 spin_lock_bh(&tcp_metrics_lock);
935                 tm = deref_locked_genl(hb->chain);
936                 if (tm)
937                         hb->chain = NULL;
938                 spin_unlock_bh(&tcp_metrics_lock);
939                 while (tm) {
940                         struct tcp_metrics_block *next;
941
942                         next = deref_genl(tm->tcpm_next);
943                         kfree_rcu(tm, rcu_head);
944                         tm = next;
945                 }
946         }
947         return 0;
948 }
949
950 static int tcp_metrics_nl_cmd_del(struct sk_buff *skb, struct genl_info *info)
951 {
952         struct tcpm_hash_bucket *hb;
953         struct tcp_metrics_block *tm;
954         struct tcp_metrics_block __rcu **pp;
955         struct inetpeer_addr addr;
956         unsigned int hash;
957         struct net *net = genl_info_net(info);
958         int ret;
959
960         ret = parse_nl_addr(info, &addr, &hash, 1);
961         if (ret < 0)
962                 return ret;
963         if (ret > 0)
964                 return tcp_metrics_flush_all(net);
965
966         hash = hash_32(hash, net->ipv4.tcp_metrics_hash_log);
967         hb = net->ipv4.tcp_metrics_hash + hash;
968         pp = &hb->chain;
969         spin_lock_bh(&tcp_metrics_lock);
970         for (tm = deref_locked_genl(*pp); tm;
971              pp = &tm->tcpm_next, tm = deref_locked_genl(*pp)) {
972                 if (addr_same(&tm->tcpm_addr, &addr)) {
973                         *pp = tm->tcpm_next;
974                         break;
975                 }
976         }
977         spin_unlock_bh(&tcp_metrics_lock);
978         if (!tm)
979                 return -ESRCH;
980         kfree_rcu(tm, rcu_head);
981         return 0;
982 }
983
984 static struct genl_ops tcp_metrics_nl_ops[] = {
985         {
986                 .cmd = TCP_METRICS_CMD_GET,
987                 .doit = tcp_metrics_nl_cmd_get,
988                 .dumpit = tcp_metrics_nl_dump,
989                 .policy = tcp_metrics_nl_policy,
990                 .flags = GENL_ADMIN_PERM,
991         },
992         {
993                 .cmd = TCP_METRICS_CMD_DEL,
994                 .doit = tcp_metrics_nl_cmd_del,
995                 .policy = tcp_metrics_nl_policy,
996                 .flags = GENL_ADMIN_PERM,
997         },
998 };
999
1000 static unsigned int tcpmhash_entries;
1001 static int __init set_tcpmhash_entries(char *str)
1002 {
1003         ssize_t ret;
1004
1005         if (!str)
1006                 return 0;
1007
1008         ret = kstrtouint(str, 0, &tcpmhash_entries);
1009         if (ret)
1010                 return 0;
1011
1012         return 1;
1013 }
1014 __setup("tcpmhash_entries=", set_tcpmhash_entries);
1015
1016 static int __net_init tcp_net_metrics_init(struct net *net)
1017 {
1018         size_t size;
1019         unsigned int slots;
1020
1021         slots = tcpmhash_entries;
1022         if (!slots) {
1023                 if (totalram_pages >= 128 * 1024)
1024                         slots = 16 * 1024;
1025                 else
1026                         slots = 8 * 1024;
1027         }
1028
1029         net->ipv4.tcp_metrics_hash_log = order_base_2(slots);
1030         size = sizeof(struct tcpm_hash_bucket) << net->ipv4.tcp_metrics_hash_log;
1031
1032         net->ipv4.tcp_metrics_hash = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
1033         if (!net->ipv4.tcp_metrics_hash)
1034                 net->ipv4.tcp_metrics_hash = vzalloc(size);
1035
1036         if (!net->ipv4.tcp_metrics_hash)
1037                 return -ENOMEM;
1038
1039         return 0;
1040 }
1041
1042 static void __net_exit tcp_net_metrics_exit(struct net *net)
1043 {
1044         unsigned int i;
1045
1046         for (i = 0; i < (1U << net->ipv4.tcp_metrics_hash_log) ; i++) {
1047                 struct tcp_metrics_block *tm, *next;
1048
1049                 tm = rcu_dereference_protected(net->ipv4.tcp_metrics_hash[i].chain, 1);
1050                 while (tm) {
1051                         next = rcu_dereference_protected(tm->tcpm_next, 1);
1052                         kfree(tm);
1053                         tm = next;
1054                 }
1055         }
1056         if (is_vmalloc_addr(net->ipv4.tcp_metrics_hash))
1057                 vfree(net->ipv4.tcp_metrics_hash);
1058         else
1059                 kfree(net->ipv4.tcp_metrics_hash);
1060 }
1061
1062 static __net_initdata struct pernet_operations tcp_net_metrics_ops = {
1063         .init   =       tcp_net_metrics_init,
1064         .exit   =       tcp_net_metrics_exit,
1065 };
1066
1067 void __init tcp_metrics_init(void)
1068 {
1069         int ret;
1070
1071         ret = register_pernet_subsys(&tcp_net_metrics_ops);
1072         if (ret < 0)
1073                 goto cleanup;
1074         ret = genl_register_family_with_ops(&tcp_metrics_nl_family,
1075                                             tcp_metrics_nl_ops,
1076                                             ARRAY_SIZE(tcp_metrics_nl_ops));
1077         if (ret < 0)
1078                 goto cleanup_subsys;
1079         return;
1080
1081 cleanup_subsys:
1082         unregister_pernet_subsys(&tcp_net_metrics_ops);
1083
1084 cleanup:
1085         return;
1086 }