X-Git-Url: https://git.karo-electronics.de/?a=blobdiff_plain;f=net%2Fipv4%2Froute.c;h=23bbe29b3bba6ef7e484ff7a455e1ca146ed8b11;hb=b587ee3ba21f58b7770a132e6bca5c6658ac5095;hp=8d62d85e68dc1ffa883a9189a068e4de57506552;hpb=7586eceb0abc0ea1c2b023e3e5d4dfd4ff40930a;p=karo-tx-linux.git diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 8d62d85e68dc..23bbe29b3bba 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -149,6 +149,7 @@ static void ipv4_dst_destroy(struct dst_entry *dst); static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst); static void ipv4_link_failure(struct sk_buff *skb); static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu); +static void ip_do_redirect(struct dst_entry *dst, struct sk_buff *skb); static int rt_garbage_collect(struct dst_ops *ops); static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev, @@ -158,37 +159,13 @@ static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev, static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old) { - struct rtable *rt = (struct rtable *) dst; - struct inet_peer *peer; - u32 *p = NULL; - - peer = rt_get_peer_create(rt, rt->rt_dst); - if (peer) { - u32 *old_p = __DST_METRICS_PTR(old); - unsigned long prev, new; - - p = peer->metrics; - if (inet_metrics_new(peer)) - memcpy(p, old_p, sizeof(u32) * RTAX_MAX); - - new = (unsigned long) p; - prev = cmpxchg(&dst->_metrics, old, new); - - if (prev != old) { - p = __DST_METRICS_PTR(prev); - if (prev & DST_METRICS_READ_ONLY) - p = NULL; - } else { - if (rt->fi) { - fib_info_put(rt->fi); - rt->fi = NULL; - } - } - } - return p; + WARN_ON(1); + return NULL; } -static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr); +static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, + struct sk_buff *skb, + const void *daddr); static struct dst_ops ipv4_dst_ops = { .family = AF_INET, @@ -203,6 +180,7 @@ static struct dst_ops ipv4_dst_ops = { .negative_advice = ipv4_negative_advice, .link_failure = ipv4_link_failure, .update_pmtu = ip_rt_update_pmtu, + .redirect = ip_do_redirect, .local_out = __ip_local_out, .neigh_lookup = ipv4_neigh_lookup, }; @@ -418,29 +396,19 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v) "HHUptod\tSpecDst"); else { struct rtable *r = v; - struct neighbour *n; - int len, HHUptod; - - rcu_read_lock(); - n = dst_get_neighbour_noref(&r->dst); - HHUptod = (n && (n->nud_state & NUD_CONNECTED)) ? 1 : 0; - rcu_read_unlock(); + int len; seq_printf(seq, "%s\t%08X\t%08X\t%8X\t%d\t%u\t%d\t" - "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", - r->dst.dev ? r->dst.dev->name : "*", - (__force u32)r->rt_dst, - (__force u32)r->rt_gateway, - r->rt_flags, atomic_read(&r->dst.__refcnt), - r->dst.__use, 0, (__force u32)r->rt_src, - dst_metric_advmss(&r->dst) + 40, - dst_metric(&r->dst, RTAX_WINDOW), - (int)((dst_metric(&r->dst, RTAX_RTT) >> 3) + - dst_metric(&r->dst, RTAX_RTTVAR)), - r->rt_key_tos, - -1, - HHUptod, - r->rt_spec_dst, &len); + "%08X\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n", + r->dst.dev ? r->dst.dev->name : "*", + (__force u32)r->rt_dst, + (__force u32)r->rt_gateway, + r->rt_flags, atomic_read(&r->dst.__refcnt), + r->dst.__use, 0, (__force u32)r->rt_src, + dst_metric_advmss(&r->dst) + 40, + dst_metric(&r->dst, RTAX_WINDOW), 0, + r->rt_key_tos, + -1, 0, 0, &len); seq_printf(seq, "%*s\n", 127 - len, ""); } @@ -677,7 +645,7 @@ static inline int rt_fast_clean(struct rtable *rth) static inline int rt_valuable(struct rtable *rth) { return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) || - (rt_has_peer(rth) && rt_peer_ptr(rth)->pmtu_expires); + rth->dst.expires; } static int rt_may_expire(struct rtable *rth, unsigned long tmo1, unsigned long tmo2) @@ -870,34 +838,22 @@ static void rt_check_expire(void) while ((rth = rcu_dereference_protected(*rthp, lockdep_is_held(rt_hash_lock_addr(i)))) != NULL) { prefetch(rth->dst.rt_next); - if (rt_is_expired(rth)) { + if (rt_is_expired(rth) || + rt_may_expire(rth, tmo, ip_rt_gc_timeout)) { *rthp = rth->dst.rt_next; rt_free(rth); continue; } - if (rth->dst.expires) { - /* Entry is expired even if it is in use */ - if (time_before_eq(jiffies, rth->dst.expires)) { -nofree: - tmo >>= 1; - rthp = &rth->dst.rt_next; - /* - * We only count entries on - * a chain with equal hash inputs once - * so that entries for different QOS - * levels, and other non-hash input - * attributes don't unfairly skew - * the length computation - */ - length += has_noalias(rt_hash_table[i].chain, rth); - continue; - } - } else if (!rt_may_expire(rth, tmo, ip_rt_gc_timeout)) - goto nofree; - /* Cleanup aged off entries. */ - *rthp = rth->dst.rt_next; - rt_free(rth); + /* We only count entries on a chain with equal + * hash inputs once so that entries for + * different QOS levels, and other non-hash + * input attributes don't unfairly skew the + * length computation + */ + tmo >>= 1; + rthp = &rth->dst.rt_next; + length += has_noalias(rt_hash_table[i].chain, rth); } spin_unlock_bh(rt_hash_lock_addr(i)); sum += length; @@ -935,7 +891,6 @@ static void rt_cache_invalidate(struct net *net) get_random_bytes(&shuffle, sizeof(shuffle)); atomic_add(shuffle + 1U, &net->ipv4.rt_genid); - inetpeer_invalidate_family(AF_INET); } /* @@ -1108,20 +1063,20 @@ static int slow_chain_length(const struct rtable *head) return length >> FRACT_BITS; } -static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const void *daddr) +static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, + struct sk_buff *skb, + const void *daddr) { - static const __be32 inaddr_any = 0; struct net_device *dev = dst->dev; const __be32 *pkey = daddr; const struct rtable *rt; struct neighbour *n; rt = (const struct rtable *) dst; - - if (dev->flags & (IFF_LOOPBACK | IFF_POINTOPOINT)) - pkey = &inaddr_any; - else if (rt->rt_gateway) + if (rt->rt_gateway) pkey = (const __be32 *) &rt->rt_gateway; + else if (skb) + pkey = &ip_hdr(skb)->daddr; n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey); if (n) @@ -1129,16 +1084,6 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, const vo return neigh_create(&arp_tbl, pkey, dev); } -static int rt_bind_neighbour(struct rtable *rt) -{ - struct neighbour *n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway); - if (IS_ERR(n)) - return PTR_ERR(n); - dst_set_neighbour(&rt->dst, n); - - return 0; -} - static struct rtable *rt_intern_hash(unsigned int hash, struct rtable *rt, struct sk_buff *skb, int ifindex) { @@ -1147,7 +1092,6 @@ static struct rtable *rt_intern_hash(unsigned int hash, struct rtable *rt, unsigned long now; u32 min_score; int chain_length; - int attempts = !in_softirq(); restart: chain_length = 0; @@ -1174,15 +1118,6 @@ restart: */ rt->dst.flags |= DST_NOCACHE; - if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) { - int err = rt_bind_neighbour(rt); - if (err) { - net_warn_ratelimited("Neighbour table failure & not caching routes\n"); - ip_rt_put(rt); - return ERR_PTR(err); - } - } - goto skip_hashing; } @@ -1265,40 +1200,6 @@ restart: } } - /* Try to bind route to arp only if it is output - route or unicast forwarding path. - */ - if (rt->rt_type == RTN_UNICAST || rt_is_output_route(rt)) { - int err = rt_bind_neighbour(rt); - if (err) { - spin_unlock_bh(rt_hash_lock_addr(hash)); - - if (err != -ENOBUFS) { - rt_drop(rt); - return ERR_PTR(err); - } - - /* Neighbour tables are full and nothing - can be released. Try to shrink route cache, - it is most likely it holds some neighbour records. - */ - if (attempts-- > 0) { - int saved_elasticity = ip_rt_gc_elasticity; - int saved_int = ip_rt_gc_min_interval; - ip_rt_gc_elasticity = 1; - ip_rt_gc_min_interval = 0; - rt_garbage_collect(&ipv4_dst_ops); - ip_rt_gc_min_interval = saved_int; - ip_rt_gc_elasticity = saved_elasticity; - goto restart; - } - - net_warn_ratelimited("Neighbour table overflow\n"); - rt_drop(rt); - return ERR_PTR(-ENOBUFS); - } - } - rt->dst.rt_next = rt_hash_table[hash].chain; /* @@ -1316,31 +1217,6 @@ skip_hashing: return rt; } -static atomic_t __rt_peer_genid = ATOMIC_INIT(0); - -static u32 rt_peer_genid(void) -{ - return atomic_read(&__rt_peer_genid); -} - -void rt_bind_peer(struct rtable *rt, __be32 daddr, int create) -{ - struct inet_peer_base *base; - struct inet_peer *peer; - - base = inetpeer_base_ptr(rt->_peer); - if (!base) - return; - - peer = inet_getpeer_v4(base, daddr, create); - if (peer) { - if (!rt_set_peer(rt, peer)) - inet_putpeer(peer); - else - rt->rt_peer_genid = rt_peer_genid(); - } -} - /* * Peer allocation may fail only in serious out-of-memory conditions. However * we still can generate some output. @@ -1363,20 +1239,15 @@ static void ip_select_fb_ident(struct iphdr *iph) void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more) { - struct rtable *rt = (struct rtable *) dst; - - if (rt && !(rt->dst.flags & DST_NOPEER)) { - struct inet_peer *peer = rt_get_peer_create(rt, rt->rt_dst); + struct net *net = dev_net(dst->dev); + struct inet_peer *peer; - /* If peer is attached to destination, it is never detached, - so that we need not to grab a lock to dereference it. - */ - if (peer) { - iph->id = htons(inet_getid(peer, more)); - return; - } - } else if (!rt) - pr_debug("rt_bind_peer(0) @%p\n", __builtin_return_address(0)); + peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1); + if (peer) { + iph->id = htons(inet_getid(peer, more)); + inet_putpeer(peer); + return; + } ip_select_fb_ident(iph); } @@ -1402,43 +1273,35 @@ static void rt_del(unsigned int hash, struct rtable *rt) spin_unlock_bh(rt_hash_lock_addr(hash)); } -static void check_peer_redir(struct dst_entry *dst, struct inet_peer *peer) +static void ip_do_redirect(struct dst_entry *dst, struct sk_buff *skb) { - struct rtable *rt = (struct rtable *) dst; - __be32 orig_gw = rt->rt_gateway; - struct neighbour *n, *old_n; - - dst_confirm(&rt->dst); + const struct iphdr *iph = (const struct iphdr *) skb->data; + __be32 new_gw = icmp_hdr(skb)->un.gateway; + __be32 old_gw = ip_hdr(skb)->saddr; + struct net_device *dev = skb->dev; + __be32 daddr = iph->daddr; + __be32 saddr = iph->saddr; + struct in_device *in_dev; + struct neighbour *n; + struct rtable *rt; + struct net *net; - rt->rt_gateway = peer->redirect_learned.a4; + switch (icmp_hdr(skb)->code & 7) { + case ICMP_REDIR_NET: + case ICMP_REDIR_NETTOS: + case ICMP_REDIR_HOST: + case ICMP_REDIR_HOSTTOS: + break; - n = ipv4_neigh_lookup(&rt->dst, &rt->rt_gateway); - if (IS_ERR(n)) { - rt->rt_gateway = orig_gw; + default: return; } - old_n = xchg(&rt->dst._neighbour, n); - if (old_n) - neigh_release(old_n); - if (!(n->nud_state & NUD_VALID)) { - neigh_event_send(n, NULL); - } else { - rt->rt_flags |= RTCF_REDIRECTED; - call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n); - } -} -/* called in rcu_read_lock() section */ -void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, - __be32 saddr, struct net_device *dev) -{ - int s, i; - struct in_device *in_dev = __in_dev_get_rcu(dev); - __be32 skeys[2] = { saddr, 0 }; - int ikeys[2] = { dev->ifindex, 0 }; - struct inet_peer *peer; - struct net *net; + rt = (struct rtable *) dst; + if (rt->rt_gateway != old_gw) + return; + in_dev = __in_dev_get_rcu(dev); if (!in_dev) return; @@ -1458,40 +1321,16 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw, goto reject_redirect; } - for (s = 0; s < 2; s++) { - for (i = 0; i < 2; i++) { - unsigned int hash; - struct rtable __rcu **rthp; - struct rtable *rt; - - hash = rt_hash(daddr, skeys[s], ikeys[i], rt_genid(net)); - - rthp = &rt_hash_table[hash].chain; - - while ((rt = rcu_dereference(*rthp)) != NULL) { - rthp = &rt->dst.rt_next; - - if (rt->rt_key_dst != daddr || - rt->rt_key_src != skeys[s] || - rt->rt_oif != ikeys[i] || - rt_is_input_route(rt) || - rt_is_expired(rt) || - !net_eq(dev_net(rt->dst.dev), net) || - rt->dst.error || - rt->dst.dev != dev || - rt->rt_gateway != old_gw) - continue; - - peer = rt_get_peer_create(rt, rt->rt_dst); - if (peer) { - if (peer->redirect_learned.a4 != new_gw) { - peer->redirect_learned.a4 = new_gw; - atomic_inc(&__rt_peer_genid); - } - check_peer_redir(&rt->dst, peer); - } - } + n = ipv4_neigh_lookup(dst, NULL, &new_gw); + if (n) { + if (!(n->nud_state & NUD_VALID)) { + neigh_event_send(n, NULL); + } else { + rt->rt_gateway = new_gw; + rt->rt_flags |= RTCF_REDIRECTED; + call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n); } + neigh_release(n); } return; @@ -1506,23 +1345,6 @@ reject_redirect: ; } -static bool peer_pmtu_expired(struct inet_peer *peer) -{ - unsigned long orig = ACCESS_ONCE(peer->pmtu_expires); - - return orig && - time_after_eq(jiffies, orig) && - cmpxchg(&peer->pmtu_expires, orig, 0) == orig; -} - -static bool peer_pmtu_cleaned(struct inet_peer *peer) -{ - unsigned long orig = ACCESS_ONCE(peer->pmtu_expires); - - return orig && - cmpxchg(&peer->pmtu_expires, orig, 0) == orig; -} - static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) { struct rtable *rt = (struct rtable *)dst; @@ -1532,16 +1354,13 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst) if (dst->obsolete > 0) { ip_rt_put(rt); ret = NULL; - } else if (rt->rt_flags & RTCF_REDIRECTED) { + } else if ((rt->rt_flags & RTCF_REDIRECTED) || + rt->dst.expires) { unsigned int hash = rt_hash(rt->rt_key_dst, rt->rt_key_src, rt->rt_oif, rt_genid(dev_net(dst->dev))); rt_del(hash, rt); ret = NULL; - } else if (rt_has_peer(rt)) { - struct inet_peer *peer = rt_peer_ptr(rt); - if (peer_pmtu_expired(peer)) - dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig); } } return ret; @@ -1568,6 +1387,7 @@ void ip_rt_send_redirect(struct sk_buff *skb) struct rtable *rt = skb_rtable(skb); struct in_device *in_dev; struct inet_peer *peer; + struct net *net; int log_martians; rcu_read_lock(); @@ -1579,7 +1399,8 @@ void ip_rt_send_redirect(struct sk_buff *skb) log_martians = IN_DEV_LOG_MARTIANS(in_dev); rcu_read_unlock(); - peer = rt_get_peer_create(rt, rt->rt_dst); + net = dev_net(rt->dst.dev); + peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1); if (!peer) { icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway); return; @@ -1596,7 +1417,7 @@ void ip_rt_send_redirect(struct sk_buff *skb) */ if (peer->rate_tokens >= ip_rt_redirect_number) { peer->rate_last = jiffies; - return; + goto out_put_peer; } /* Check for load limit; set rate_last to the latest sent @@ -1617,16 +1438,34 @@ void ip_rt_send_redirect(struct sk_buff *skb) &rt->rt_dst, &rt->rt_gateway); #endif } +out_put_peer: + inet_putpeer(peer); } static int ip_error(struct sk_buff *skb) { + struct in_device *in_dev = __in_dev_get_rcu(skb->dev); struct rtable *rt = skb_rtable(skb); struct inet_peer *peer; unsigned long now; + struct net *net; bool send; int code; + net = dev_net(rt->dst.dev); + if (!IN_DEV_FORWARD(in_dev)) { + switch (rt->dst.error) { + case EHOSTUNREACH: + IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS); + break; + + case ENETUNREACH: + IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES); + break; + } + goto out; + } + switch (rt->dst.error) { case EINVAL: default: @@ -1636,15 +1475,14 @@ static int ip_error(struct sk_buff *skb) break; case ENETUNREACH: code = ICMP_NET_UNREACH; - IP_INC_STATS_BH(dev_net(rt->dst.dev), - IPSTATS_MIB_INNOROUTES); + IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES); break; case EACCES: code = ICMP_PKT_FILTERED; break; } - peer = rt_get_peer_create(rt, rt->rt_dst); + peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1); send = true; if (peer) { @@ -1657,6 +1495,7 @@ static int ip_error(struct sk_buff *skb) peer->rate_tokens -= ip_rt_error_cost; else send = false; + inet_putpeer(peer); } if (send) icmp_send(skb, ICMP_DEST_UNREACH, code, 0); @@ -1665,50 +1504,17 @@ out: kfree_skb(skb); return 0; } -static void check_peer_pmtu(struct dst_entry *dst, struct inet_peer *peer) -{ - unsigned long expires = ACCESS_ONCE(peer->pmtu_expires); - - if (!expires) - return; - if (time_before(jiffies, expires)) { - u32 orig_dst_mtu = dst_mtu(dst); - if (peer->pmtu_learned < orig_dst_mtu) { - if (!peer->pmtu_orig) - peer->pmtu_orig = dst_metric_raw(dst, RTAX_MTU); - dst_metric_set(dst, RTAX_MTU, peer->pmtu_learned); - } - } else if (cmpxchg(&peer->pmtu_expires, expires, 0) == expires) - dst_metric_set(dst, RTAX_MTU, peer->pmtu_orig); -} - static void ip_rt_update_pmtu(struct dst_entry *dst, u32 mtu) { struct rtable *rt = (struct rtable *) dst; - struct inet_peer *peer; dst_confirm(dst); - peer = rt_get_peer_create(rt, rt->rt_dst); - if (peer) { - unsigned long pmtu_expires = ACCESS_ONCE(peer->pmtu_expires); - - if (mtu < ip_rt_min_pmtu) - mtu = ip_rt_min_pmtu; - if (!pmtu_expires || mtu < peer->pmtu_learned) { - - pmtu_expires = jiffies + ip_rt_mtu_expires; - if (!pmtu_expires) - pmtu_expires = 1UL; - - peer->pmtu_learned = mtu; - peer->pmtu_expires = pmtu_expires; + if (mtu < ip_rt_min_pmtu) + mtu = ip_rt_min_pmtu; - atomic_inc(&__rt_peer_genid); - rt->rt_peer_genid = rt_peer_genid(); - } - check_peer_pmtu(dst, peer); - } + rt->rt_pmtu = mtu; + dst_set_expires(&rt->dst, ip_rt_mtu_expires); } void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, @@ -1719,7 +1525,7 @@ void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu, struct rtable *rt; flowi4_init_output(&fl4, oif, mark, RT_TOS(iph->tos), RT_SCOPE_UNIVERSE, - protocol, flow_flags | FLOWI_FLAG_PRECOW_METRICS, + protocol, flow_flags, iph->daddr, iph->saddr, 0, 0); rt = __ip_route_output_key(net, &fl4); if (!IS_ERR(rt)) { @@ -1740,22 +1546,33 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu) } EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu); -static void ipv4_validate_peer(struct rtable *rt) +void ipv4_redirect(struct sk_buff *skb, struct net *net, + int oif, u32 mark, u8 protocol, int flow_flags) { - if (rt->rt_peer_genid != rt_peer_genid()) { - struct inet_peer *peer = rt_get_peer(rt, rt->rt_dst); + const struct iphdr *iph = (const struct iphdr *)skb->data; + struct flowi4 fl4; + struct rtable *rt; - if (peer) { - check_peer_pmtu(&rt->dst, peer); + flowi4_init_output(&fl4, oif, mark, RT_TOS(iph->tos), RT_SCOPE_UNIVERSE, + protocol, flow_flags, iph->daddr, iph->saddr, 0, 0); + rt = __ip_route_output_key(net, &fl4); + if (!IS_ERR(rt)) { + ip_do_redirect(&rt->dst, skb); + ip_rt_put(rt); + } +} +EXPORT_SYMBOL_GPL(ipv4_redirect); - if (peer->redirect_learned.a4 && - peer->redirect_learned.a4 != rt->rt_gateway) - check_peer_redir(&rt->dst, peer); - } +void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk) +{ + const struct inet_sock *inet = inet_sk(sk); - rt->rt_peer_genid = rt_peer_genid(); - } + return ipv4_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, + sk->sk_mark, + inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, + inet_sk_flowi_flags(sk)); } +EXPORT_SYMBOL_GPL(ipv4_sk_redirect); static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) { @@ -1763,7 +1580,6 @@ static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie) if (rt_is_expired(rt)) return NULL; - ipv4_validate_peer(rt); return dst; } @@ -1775,10 +1591,6 @@ static void ipv4_dst_destroy(struct dst_entry *dst) fib_info_put(rt->fi); rt->fi = NULL; } - if (rt_has_peer(rt)) { - struct inet_peer *peer = rt_peer_ptr(rt); - inet_putpeer(peer); - } } @@ -1789,11 +1601,8 @@ static void ipv4_link_failure(struct sk_buff *skb) icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); rt = skb_rtable(skb); - if (rt && rt_has_peer(rt)) { - struct inet_peer *peer = rt_peer_ptr(rt); - if (peer_pmtu_cleaned(peer)) - dst_metric_set(&rt->dst, RTAX_MTU, peer->pmtu_orig); - } + if (rt) + dst_set_expires(&rt->dst, 0); } static int ip_rt_bug(struct sk_buff *skb) @@ -1873,7 +1682,13 @@ static unsigned int ipv4_default_advmss(const struct dst_entry *dst) static unsigned int ipv4_mtu(const struct dst_entry *dst) { const struct rtable *rt = (const struct rtable *) dst; - unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); + unsigned int mtu = rt->rt_pmtu; + + if (mtu && time_after_eq(jiffies, rt->dst.expires)) + mtu = 0; + + if (!mtu) + mtu = dst_metric_raw(dst, RTAX_MTU); if (mtu && rt_is_output_route(rt)) return mtu; @@ -1895,63 +1710,27 @@ static unsigned int ipv4_mtu(const struct dst_entry *dst) static void rt_init_metrics(struct rtable *rt, const struct flowi4 *fl4, struct fib_info *fi) { - struct inet_peer_base *base; - struct inet_peer *peer; - int create = 0; - - /* If a peer entry exists for this destination, we must hook - * it up in order to get at cached metrics. - */ - if (fl4 && (fl4->flowi4_flags & FLOWI_FLAG_PRECOW_METRICS)) - create = 1; - - base = inetpeer_base_ptr(rt->_peer); - BUG_ON(!base); - - peer = inet_getpeer_v4(base, rt->rt_dst, create); - if (peer) { - __rt_set_peer(rt, peer); - rt->rt_peer_genid = rt_peer_genid(); - if (inet_metrics_new(peer)) - memcpy(peer->metrics, fi->fib_metrics, - sizeof(u32) * RTAX_MAX); - dst_init_metrics(&rt->dst, peer->metrics, false); - - check_peer_pmtu(&rt->dst, peer); - - if (peer->redirect_learned.a4 && - peer->redirect_learned.a4 != rt->rt_gateway) { - rt->rt_gateway = peer->redirect_learned.a4; - rt->rt_flags |= RTCF_REDIRECTED; - } - } else { - if (fi->fib_metrics != (u32 *) dst_default_metrics) { - rt->fi = fi; - atomic_inc(&fi->fib_clntref); - } - dst_init_metrics(&rt->dst, fi->fib_metrics, true); + if (fi->fib_metrics != (u32 *) dst_default_metrics) { + rt->fi = fi; + atomic_inc(&fi->fib_clntref); } + dst_init_metrics(&rt->dst, fi->fib_metrics, true); } static void rt_set_nexthop(struct rtable *rt, const struct flowi4 *fl4, const struct fib_result *res, struct fib_info *fi, u16 type, u32 itag) { - struct dst_entry *dst = &rt->dst; - if (fi) { if (FIB_RES_GW(*res) && FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) rt->rt_gateway = FIB_RES_GW(*res); rt_init_metrics(rt, fl4, fi); #ifdef CONFIG_IP_ROUTE_CLASSID - dst->tclassid = FIB_RES_NH(*res).nh_tclassid; + rt->dst.tclassid = FIB_RES_NH(*res).nh_tclassid; #endif } - if (dst_mtu(dst) > IP_MAX_MTU) - dst_metric_set(dst, RTAX_MTU, IP_MAX_MTU); - #ifdef CONFIG_IP_ROUTE_CLASSID #ifdef CONFIG_IP_MULTIPLE_TABLES set_class_tag(rt, fib_rules_tclass(res)); @@ -1975,7 +1754,6 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, { unsigned int hash; struct rtable *rth; - __be32 spec_dst; struct in_device *in_dev = __in_dev_get_rcu(dev); u32 itag = 0; int err; @@ -1996,10 +1774,9 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, if (ipv4_is_zeronet(saddr)) { if (!ipv4_is_local_multicast(daddr)) goto e_inval; - spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK); } else { - err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst, - &itag); + err = fib_validate_source(skb, saddr, 0, tos, 0, dev, + in_dev, &itag); if (err < 0) goto e_err; } @@ -2025,10 +1802,8 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr, rth->rt_iif = dev->ifindex; rth->rt_oif = 0; rth->rt_mark = skb->mark; + rth->rt_pmtu = 0; rth->rt_gateway = daddr; - rth->rt_spec_dst= spec_dst; - rth->rt_peer_genid = 0; - rt_init_peer(rth, dev_net(dev)->ipv4.peers); rth->fi = NULL; if (our) { rth->dst.input= ip_local_deliver; @@ -2090,7 +1865,6 @@ static int __mkroute_input(struct sk_buff *skb, int err; struct in_device *out_dev; unsigned int flags = 0; - __be32 spec_dst; u32 itag; /* get a working reference to the output device */ @@ -2102,7 +1876,7 @@ static int __mkroute_input(struct sk_buff *skb, err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res), - in_dev->dev, &spec_dst, &itag); + in_dev->dev, in_dev, &itag); if (err < 0) { ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr, saddr); @@ -2153,10 +1927,8 @@ static int __mkroute_input(struct sk_buff *skb, rth->rt_iif = in_dev->dev->ifindex; rth->rt_oif = 0; rth->rt_mark = skb->mark; + rth->rt_pmtu = 0; rth->rt_gateway = daddr; - rth->rt_spec_dst= spec_dst; - rth->rt_peer_genid = 0; - rt_init_peer(rth, &res->table->tb_peers); rth->fi = NULL; rth->dst.input = ip_forward; @@ -2220,7 +1992,6 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, u32 itag = 0; struct rtable *rth; unsigned int hash; - __be32 spec_dst; int err = -EINVAL; struct net *net = dev_net(dev); @@ -2267,11 +2038,8 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, fl4.daddr = daddr; fl4.saddr = saddr; err = fib_lookup(net, &fl4, &res); - if (err != 0) { - if (!IN_DEV_FORWARD(in_dev)) - goto e_hostunreach; + if (err != 0) goto no_route; - } RT_CACHE_STAT_INC(in_slow_tot); @@ -2281,17 +2049,16 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, if (res.type == RTN_LOCAL) { err = fib_validate_source(skb, saddr, daddr, tos, net->loopback_dev->ifindex, - dev, &spec_dst, &itag); + dev, in_dev, &itag); if (err < 0) goto martian_source_keep_err; if (err) flags |= RTCF_DIRECTSRC; - spec_dst = daddr; goto local_input; } if (!IN_DEV_FORWARD(in_dev)) - goto e_hostunreach; + goto no_route; if (res.type != RTN_UNICAST) goto martian_destination; @@ -2302,11 +2069,9 @@ brd_input: if (skb->protocol != htons(ETH_P_IP)) goto e_inval; - if (ipv4_is_zeronet(saddr)) - spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK); - else { - err = fib_validate_source(skb, saddr, 0, tos, 0, dev, &spec_dst, - &itag); + if (!ipv4_is_zeronet(saddr)) { + err = fib_validate_source(skb, saddr, 0, tos, 0, dev, + in_dev, &itag); if (err < 0) goto martian_source_keep_err; if (err) @@ -2336,17 +2101,12 @@ local_input: rth->rt_key_tos = tos; rth->rt_dst = daddr; rth->rt_src = saddr; -#ifdef CONFIG_IP_ROUTE_CLASSID - rth->dst.tclassid = itag; -#endif rth->rt_route_iif = dev->ifindex; rth->rt_iif = dev->ifindex; rth->rt_oif = 0; rth->rt_mark = skb->mark; + rth->rt_pmtu = 0; rth->rt_gateway = daddr; - rth->rt_spec_dst= spec_dst; - rth->rt_peer_genid = 0; - rt_init_peer(rth, net->ipv4.peers); rth->fi = NULL; if (res.type == RTN_UNREACHABLE) { rth->dst.input= ip_error; @@ -2362,7 +2122,6 @@ local_input: no_route: RT_CACHE_STAT_INC(in_no_route); - spec_dst = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); res.type = RTN_UNREACHABLE; if (err == -ESRCH) err = -ENETUNREACH; @@ -2379,10 +2138,6 @@ martian_destination: &daddr, &saddr, dev->name); #endif -e_hostunreach: - err = -EHOSTUNREACH; - goto out; - e_inval: err = -EINVAL; goto out; @@ -2426,7 +2181,6 @@ int ip_route_input_common(struct sk_buff *skb, __be32 daddr, __be32 saddr, rth->rt_mark == skb->mark && net_eq(dev_net(rth->dst.dev), net) && !rt_is_expired(rth)) { - ipv4_validate_peer(rth); if (noref) { dst_use_noref(&rth->dst, jiffies); skb_dst_set_noref(skb, &rth->dst); @@ -2548,22 +2302,15 @@ static struct rtable *__mkroute_output(const struct fib_result *res, rth->rt_iif = orig_oif ? : dev_out->ifindex; rth->rt_oif = orig_oif; rth->rt_mark = fl4->flowi4_mark; + rth->rt_pmtu = 0; rth->rt_gateway = fl4->daddr; - rth->rt_spec_dst= fl4->saddr; - rth->rt_peer_genid = 0; - rt_init_peer(rth, (res->table ? - &res->table->tb_peers : - dev_net(dev_out)->ipv4.peers)); rth->fi = NULL; RT_CACHE_STAT_INC(out_slow_tot); - if (flags & RTCF_LOCAL) { + if (flags & RTCF_LOCAL) rth->dst.input = ip_local_deliver; - rth->rt_spec_dst = fl4->daddr; - } if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) { - rth->rt_spec_dst = fl4->saddr; if (flags & RTCF_LOCAL && !(dev_out->flags & IFF_LOOPBACK)) { rth->dst.output = ip_mc_output; @@ -2810,7 +2557,6 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *flp4) (IPTOS_RT_MASK | RTO_ONLINK)) && net_eq(dev_net(rth->dst.dev), net) && !rt_is_expired(rth)) { - ipv4_validate_peer(rth); dst_use(&rth->dst, jiffies); RT_CACHE_STAT_INC(out_hit); rcu_read_unlock_bh(); @@ -2845,6 +2591,10 @@ static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, u32 mtu) { } +static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sk_buff *skb) +{ +} + static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst, unsigned long old) { @@ -2859,6 +2609,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = { .mtu = ipv4_blackhole_mtu, .default_advmss = ipv4_default_advmss, .update_pmtu = ipv4_rt_blackhole_update_pmtu, + .redirect = ipv4_rt_blackhole_redirect, .cow_metrics = ipv4_rt_blackhole_cow_metrics, .neigh_lookup = ipv4_neigh_lookup, }; @@ -2874,7 +2625,6 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or new->__use = 1; new->input = dst_discard; new->output = dst_discard; - dst_copy_metrics(new, &ort->dst); new->dev = ort->dst.dev; if (new->dev) @@ -2887,6 +2637,7 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or rt->rt_iif = ort->rt_iif; rt->rt_oif = ort->rt_oif; rt->rt_mark = ort->rt_mark; + rt->rt_pmtu = ort->rt_pmtu; rt->rt_genid = rt_genid(net); rt->rt_flags = ort->rt_flags; @@ -2894,8 +2645,6 @@ struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_or rt->rt_dst = ort->rt_dst; rt->rt_src = ort->rt_src; rt->rt_gateway = ort->rt_gateway; - rt->rt_spec_dst = ort->rt_spec_dst; - rt_transfer_peer(rt, ort); rt->fi = ort->fi; if (rt->fi) atomic_inc(&rt->fi->fib_clntref); @@ -2933,7 +2682,7 @@ static int rt_fill_info(struct net *net, struct rtmsg *r; struct nlmsghdr *nlh; unsigned long expires = 0; - u32 id = 0, ts = 0, tsage = 0, error; + u32 error; nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags); if (nlh == NULL) @@ -2969,10 +2718,8 @@ static int rt_fill_info(struct net *net, nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid)) goto nla_put_failure; #endif - if (rt_is_input_route(rt)) { - if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_spec_dst)) - goto nla_put_failure; - } else if (rt->rt_src != rt->rt_key_src) { + if (!rt_is_input_route(rt) && + rt->rt_src != rt->rt_key_src) { if (nla_put_be32(skb, RTA_PREFSRC, rt->rt_src)) goto nla_put_failure; } @@ -2988,21 +2735,12 @@ static int rt_fill_info(struct net *net, goto nla_put_failure; error = rt->dst.error; - if (rt_has_peer(rt)) { - const struct inet_peer *peer = rt_peer_ptr(rt); - inet_peer_refcheck(peer); - id = atomic_read(&peer->ip_id_count) & 0xffff; - if (peer->tcp_ts_stamp) { - ts = peer->tcp_ts; - tsage = get_seconds() - peer->tcp_ts_stamp; - } - expires = ACCESS_ONCE(peer->pmtu_expires); - if (expires) { - if (time_before(jiffies, expires)) - expires -= jiffies; - else - expires = 0; - } + expires = rt->dst.expires; + if (expires) { + if (time_before(jiffies, expires)) + expires -= jiffies; + else + expires = 0; } if (rt_is_input_route(rt)) { @@ -3031,8 +2769,7 @@ static int rt_fill_info(struct net *net, goto nla_put_failure; } - if (rtnl_put_cacheinfo(skb, &rt->dst, id, ts, tsage, - expires, error) < 0) + if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0) goto nla_put_failure; return nlmsg_end(skb, nlh);