unsigned int idx;
struct dst_entry *dst = csk->dst;
- csk->advmss = dst_metric(dst, RTAX_ADVMSS);
+ csk->advmss = dst_metric_advmss(dst);
if (csk->advmss > pmtu - 40)
csk->advmss = pmtu - 40;
static inline u32
dst_metric(const struct dst_entry *dst, const int metric)
{
- WARN_ON_ONCE(metric == RTAX_HOPLIMIT);
+ WARN_ON_ONCE(metric == RTAX_HOPLIMIT ||
+ metric == RTAX_ADVMSS);
return dst_metric_raw(dst, metric);
}
+static inline u32
+dst_metric_advmss(const struct dst_entry *dst)
+{
+ u32 advmss = dst_metric_raw(dst, RTAX_ADVMSS);
+
+ if (!advmss)
+ advmss = dst->ops->default_advmss(dst);
+
+ return advmss;
+}
+
static inline void dst_metric_set(struct dst_entry *dst, int metric, u32 val)
{
dst->_metrics[metric-1] = val;
int (*gc)(struct dst_ops *ops);
struct dst_entry * (*check)(struct dst_entry *, __u32 cookie);
+ unsigned int (*default_advmss)(const struct dst_entry *);
void (*destroy)(struct dst_entry *);
void (*ifdown)(struct dst_entry *,
struct net_device *dev, int how);
return -EINVAL;
scp->state = DN_CC;
- scp->segsize_loc = dst_metric(__sk_dst_get(sk), RTAX_ADVMSS);
+ scp->segsize_loc = dst_metric_advmss(__sk_dst_get(sk));
dn_send_conn_conf(sk, allocation);
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
sk->sk_route_caps = sk->sk_dst_cache->dev->features;
sock->state = SS_CONNECTING;
scp->state = DN_CI;
- scp->segsize_loc = dst_metric(sk->sk_dst_cache, RTAX_ADVMSS);
+ scp->segsize_loc = dst_metric_advmss(sk->sk_dst_cache);
dn_nsp_send_conninit(sk, NSP_CI);
err = -EINPROGRESS;
static int dn_dst_gc(struct dst_ops *ops);
static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
+static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
static void dn_dst_link_failure(struct sk_buff *);
static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu);
.gc_thresh = 128,
.gc = dn_dst_gc,
.check = dn_dst_check,
+ .default_advmss = dn_dst_default_advmss,
.negative_advice = dn_dst_negative_advice,
.link_failure = dn_dst_link_failure,
.update_pmtu = dn_dst_update_pmtu,
}
if (!(dst_metric_locked(dst, RTAX_ADVMSS))) {
u32 mss = mtu - DN_MAX_NSP_DATA_HEADER;
- if (dst_metric(dst, RTAX_ADVMSS) > mss)
+ u32 existing_mss = dst_metric_raw(dst, RTAX_ADVMSS);
+ if (!existing_mss || existing_mss > mss)
dst_metric_set(dst, RTAX_ADVMSS, mss);
}
}
return NET_RX_DROP;
}
+static unsigned int dn_dst_default_advmss(const struct dst_entry *dst)
+{
+ return dn_mss_from_pmtu(dst->dev, dst_mtu(dst));
+}
+
static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
{
struct dn_fib_info *fi = res->fi;
struct net_device *dev = rt->dst.dev;
struct neighbour *n;
- unsigned mss;
+ unsigned int metric;
if (fi) {
if (DN_FIB_RES_GW(*res) &&
if (dst_metric(&rt->dst, RTAX_MTU) == 0 ||
dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
dst_metric_set(&rt->dst, RTAX_MTU, rt->dst.dev->mtu);
- mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst));
- if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0 ||
- dst_metric(&rt->dst, RTAX_ADVMSS) > mss)
- dst_metric_set(&rt->dst, RTAX_ADVMSS, mss);
+ metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS);
+ if (metric) {
+ unsigned int mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst));
+ if (metric > mss)
+ dst_metric_set(&rt->dst, RTAX_ADVMSS, mss);
+ }
return 0;
}
*/
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
+static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
static void ipv4_dst_destroy(struct dst_entry *dst);
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
.protocol = cpu_to_be16(ETH_P_IP),
.gc = rt_garbage_collect,
.check = ipv4_dst_check,
+ .default_advmss = ipv4_default_advmss,
.destroy = ipv4_dst_destroy,
.ifdown = ipv4_dst_ifdown,
.negative_advice = ipv4_negative_advice,
(__force u32)r->rt_gateway,
r->rt_flags, atomic_read(&r->dst.__refcnt),
r->dst.__use, 0, (__force u32)r->rt_src,
- (dst_metric(&r->dst, RTAX_ADVMSS) ?
- (int)dst_metric(&r->dst, RTAX_ADVMSS) + 40 : 0),
+ dst_metric_advmss(&r->dst) + 40,
dst_metric(&r->dst, RTAX_WINDOW),
(int)((dst_metric(&r->dst, RTAX_RTT) >> 3) +
dst_metric(&r->dst, RTAX_RTTVAR)),
}
#endif
+static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
+{
+ unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
+
+ if (advmss == 0) {
+ advmss = max_t(unsigned int, dst->dev->mtu - 40,
+ ip_rt_min_advmss);
+ if (advmss > 65535 - 40)
+ advmss = 65535 - 40;
+ }
+ return advmss;
+}
+
static void rt_set_nexthop(struct rtable *rt, struct fib_result *res, u32 itag)
{
struct dst_entry *dst = &rt->dst;
if (dst_mtu(dst) > IP_MAX_MTU)
dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU);
- if (dst_metric(dst, RTAX_ADVMSS) == 0)
- dst_metric_set(dst, RTAX_ADVMSS,
- max_t(unsigned int, dst->dev->mtu - 40,
- ip_rt_min_advmss));
- if (dst_metric(dst, RTAX_ADVMSS) > 65535 - 40)
+ if (dst_metric_raw(dst, RTAX_ADVMSS) > 65535 - 40)
dst_metric_set(dst, RTAX_ADVMSS, 65535 - 40);
#ifdef CONFIG_NET_CLS_ROUTE
tcp_mtup_init(newsk);
tcp_sync_mss(newsk, dst_mtu(dst));
- newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
+ newtp->advmss = dst_metric_advmss(dst);
if (tcp_sk(sk)->rx_opt.user_mss &&
tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
struct dst_entry *dst = __sk_dst_get(sk);
int mss = tp->advmss;
- if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) {
- mss = dst_metric(dst, RTAX_ADVMSS);
- tp->advmss = mss;
+ if (dst) {
+ unsigned int metric = dst_metric_advmss(dst);
+
+ if (metric < mss) {
+ mss = metric;
+ tp->advmss = mss;
+ }
}
return (__u16)mss;
skb_dst_set(skb, dst_clone(dst));
- mss = dst_metric(dst, RTAX_ADVMSS);
+ mss = dst_metric_advmss(dst);
if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss)
mss = tp->rx_opt.user_mss;
if (!tp->window_clamp)
tp->window_clamp = dst_metric(dst, RTAX_WINDOW);
- tp->advmss = dst_metric(dst, RTAX_ADVMSS);
+ tp->advmss = dst_metric_advmss(dst);
if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss)
tp->advmss = tp->rx_opt.user_mss;
static struct rt6_info * ip6_rt_copy(struct rt6_info *ort);
static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
+static unsigned int ip6_default_advmss(const struct dst_entry *dst);
static struct dst_entry *ip6_negative_advice(struct dst_entry *);
static void ip6_dst_destroy(struct dst_entry *);
static void ip6_dst_ifdown(struct dst_entry *,
.gc = ip6_dst_gc,
.gc_thresh = 1024,
.check = ip6_dst_check,
+ .default_advmss = ip6_default_advmss,
.destroy = ip6_dst_destroy,
.ifdown = ip6_dst_ifdown,
.negative_advice = ip6_negative_advice,
static int ipv6_get_mtu(struct net_device *dev);
-static inline unsigned int ipv6_advmss(struct net *net, unsigned int mtu)
+static unsigned int ip6_default_advmss(const struct dst_entry *dst)
{
+ struct net_device *dev = dst->dev;
+ unsigned int mtu = dst_mtu(dst);
+ struct net *net = dev_net(dev);
+
mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
atomic_set(&rt->dst.__refcnt, 1);
dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 255);
dst_metric_set(&rt->dst, RTAX_MTU, ipv6_get_mtu(rt->rt6i_dev));
- dst_metric_set(&rt->dst, RTAX_ADVMSS, ipv6_advmss(net, dst_mtu(&rt->dst)));
rt->dst.output = ip6_output;
#if 0 /* there's no chance to use these for ndisc */
if (!dst_mtu(&rt->dst))
dst_metric_set(&rt->dst, RTAX_MTU, ipv6_get_mtu(dev));
- if (!dst_metric(&rt->dst, RTAX_ADVMSS))
- dst_metric_set(&rt->dst, RTAX_ADVMSS, ipv6_advmss(net, dst_mtu(&rt->dst)));
rt->dst.dev = dev;
rt->rt6i_idev = idev;
rt->rt6i_table = table;
nrt->rt6i_nexthop = neigh_clone(neigh);
/* Reset pmtu, it may be better */
dst_metric_set(&nrt->dst, RTAX_MTU, ipv6_get_mtu(neigh->dev));
- dst_metric_set(&nrt->dst, RTAX_ADVMSS, ipv6_advmss(dev_net(neigh->dev),
- dst_mtu(&nrt->dst)));
if (ip6_ins_rt(nrt))
goto out;
rt->rt6i_dev = net->loopback_dev;
rt->rt6i_idev = idev;
dst_metric_set(&rt->dst, RTAX_MTU, ipv6_get_mtu(rt->rt6i_dev));
- dst_metric_set(&rt->dst, RTAX_ADVMSS, ipv6_advmss(net, dst_mtu(&rt->dst)));
dst_metric_set(&rt->dst, RTAX_HOPLIMIT, -1);
rt->dst.obsolete = -1;
{
struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
struct inet6_dev *idev;
- struct net *net = dev_net(arg->dev);
/* In IPv6 pmtu discovery is not optional,
so that RTAX_MTU lock cannot disable it.
(dst_mtu(&rt->dst) < arg->mtu &&
dst_mtu(&rt->dst) == idev->cnf.mtu6))) {
dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
- dst_metric_set(&rt->dst, RTAX_ADVMSS, ipv6_advmss(net, arg->mtu));
}
return 0;
}
tcp_mtup_init(newsk);
tcp_sync_mss(newsk, dst_mtu(dst));
- newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
+ newtp->advmss = dst_metric_advmss(dst);
tcp_initialize_rcv_mss(newsk);
newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
return 1;
}
+static unsigned int xfrm_default_advmss(const struct dst_entry *dst)
+{
+ return dst_metric_advmss(dst->path);
+}
+
int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
{
struct net *net;
dst_ops->kmem_cachep = xfrm_dst_cache;
if (likely(dst_ops->check == NULL))
dst_ops->check = xfrm_dst_check;
+ if (likely(dst_ops->default_advmss == NULL))
+ dst_ops->default_advmss = xfrm_default_advmss;
if (likely(dst_ops->negative_advice == NULL))
dst_ops->negative_advice = xfrm_negative_advice;
if (likely(dst_ops->link_failure == NULL))