& rt_hash_mask);
}
+#define rt_hash(daddr, saddr, idx) \
+ rt_hash_code((__force u32)(__be32)(daddr),\
+ (__force u32)(__be32)(saddr) ^ ((idx) << 5))
+
#ifdef CONFIG_PROC_FS
struct rt_cache_iter_state {
int bucket;
static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
{
- return memcmp(&fl1->nl_u.ip4_u, &fl2->nl_u.ip4_u, sizeof(fl1->nl_u.ip4_u)) == 0 &&
- fl1->oif == fl2->oif &&
- fl1->iif == fl2->iif;
+ return ((__force u32)((fl1->nl_u.ip4_u.daddr ^ fl2->nl_u.ip4_u.daddr) |
+ (fl1->nl_u.ip4_u.saddr ^ fl2->nl_u.ip4_u.saddr)) |
+ (fl1->mark ^ fl2->mark) |
+ (*(u16 *)&fl1->nl_u.ip4_u.tos ^
+ *(u16 *)&fl2->nl_u.ip4_u.tos) |
+ (fl1->oif ^ fl2->oif) |
+ (fl1->iif ^ fl2->iif)) == 0;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
u32 salt;
spin_lock_bh(&ip_fb_id_lock);
- salt = secure_ip_id(ip_fallback_id ^ iph->daddr);
+ salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
iph->id = htons(salt & 0xFFFF);
ip_fallback_id = salt;
spin_unlock_bh(&ip_fb_id_lock);
spin_unlock_bh(rt_hash_lock_addr(hash));
}
-void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
- u32 saddr, struct net_device *dev)
+void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
+ __be32 saddr, struct net_device *dev)
{
int i, k;
struct in_device *in_dev = in_dev_get(dev);
struct rtable *rth, **rthp;
- u32 skeys[2] = { saddr, 0 };
+ __be32 skeys[2] = { saddr, 0 };
int ikeys[2] = { dev->ifindex, 0 };
struct netevent_redirect netevent;
for (i = 0; i < 2; i++) {
for (k = 0; k < 2; k++) {
- unsigned hash = rt_hash_code(daddr,
- skeys[i] ^ (ikeys[k] << 5));
+ unsigned hash = rt_hash(daddr, skeys[i], ikeys[k]);
rthp=&rt_hash_table[hash].chain;
ret = NULL;
} else if ((rt->rt_flags & RTCF_REDIRECTED) ||
rt->u.dst.expires) {
- unsigned hash = rt_hash_code(rt->fl.fl4_dst,
- rt->fl.fl4_src ^
- (rt->fl.oif << 5));
+ unsigned hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
+ rt->fl.oif);
#if RT_CACHE_DEBUG >= 1
printk(KERN_DEBUG "ip_rt_advice: redirect to "
"%u.%u.%u.%u/%02x dropped\n",
int i;
unsigned short old_mtu = ntohs(iph->tot_len);
struct rtable *rth;
- u32 skeys[2] = { iph->saddr, 0, };
- u32 daddr = iph->daddr;
+ __be32 skeys[2] = { iph->saddr, 0, };
+ __be32 daddr = iph->daddr;
unsigned short est_mtu = 0;
if (ipv4_config.no_pmtu_disc)
return 0;
for (i = 0; i < 2; i++) {
- unsigned hash = rt_hash_code(daddr, skeys[i]);
+ unsigned hash = rt_hash(daddr, skeys[i], 0);
rcu_read_lock();
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
void ip_rt_get_source(u8 *addr, struct rtable *rt)
{
- u32 src;
+ __be32 src;
struct fib_result res;
if (rt->fl.iif == 0)
rt->rt_type = res->type;
}
-static int ip_route_input_mc(struct sk_buff *skb, u32 daddr, u32 saddr,
+static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev, int our)
{
unsigned hash;
struct rtable *rth;
- u32 spec_dst;
+ __be32 spec_dst;
struct in_device *in_dev = in_dev_get(dev);
u32 itag = 0;
rth->fl.fl4_dst = daddr;
rth->rt_dst = daddr;
rth->fl.fl4_tos = tos;
-#ifdef CONFIG_IP_ROUTE_FWMARK
- rth->fl.fl4_fwmark= skb->nfmark;
-#endif
+ rth->fl.mark = skb->mark;
rth->fl.fl4_src = saddr;
rth->rt_src = saddr;
#ifdef CONFIG_NET_CLS_ROUTE
RT_CACHE_STAT_INC(in_slow_mc);
in_dev_put(in_dev);
- hash = rt_hash_code(daddr, saddr ^ (dev->ifindex << 5));
+ hash = rt_hash(daddr, saddr, dev->ifindex);
return rt_intern_hash(hash, rth, (struct rtable**) &skb->dst);
e_nobufs:
static void ip_handle_martian_source(struct net_device *dev,
struct in_device *in_dev,
struct sk_buff *skb,
- u32 daddr,
- u32 saddr)
+ __be32 daddr,
+ __be32 saddr)
{
RT_CACHE_STAT_INC(in_martian_src);
#ifdef CONFIG_IP_ROUTE_VERBOSE
static inline int __mkroute_input(struct sk_buff *skb,
struct fib_result* res,
struct in_device *in_dev,
- u32 daddr, u32 saddr, u32 tos,
+ __be32 daddr, __be32 saddr, u32 tos,
struct rtable **result)
{
int err;
struct in_device *out_dev;
unsigned flags = 0;
- u32 spec_dst, itag;
+ __be32 spec_dst;
+ u32 itag;
/* get a working reference to the output device */
out_dev = in_dev_get(FIB_RES_DEV(*res));
#endif
if (in_dev->cnf.no_policy)
rth->u.dst.flags |= DST_NOPOLICY;
- if (in_dev->cnf.no_xfrm)
+ if (out_dev->cnf.no_xfrm)
rth->u.dst.flags |= DST_NOXFRM;
rth->fl.fl4_dst = daddr;
rth->rt_dst = daddr;
rth->fl.fl4_tos = tos;
-#ifdef CONFIG_IP_ROUTE_FWMARK
- rth->fl.fl4_fwmark= skb->nfmark;
-#endif
+ rth->fl.mark = skb->mark;
rth->fl.fl4_src = saddr;
rth->rt_src = saddr;
rth->rt_gateway = daddr;
struct fib_result* res,
const struct flowi *fl,
struct in_device *in_dev,
- u32 daddr, u32 saddr, u32 tos)
+ __be32 daddr, __be32 saddr, u32 tos)
{
struct rtable* rth = NULL;
int err;
return err;
/* put it into the cache */
- hash = rt_hash_code(daddr, saddr ^ (fl->iif << 5));
+ hash = rt_hash(daddr, saddr, fl->iif);
return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
}
struct fib_result* res,
const struct flowi *fl,
struct in_device *in_dev,
- u32 daddr, u32 saddr, u32 tos)
+ __be32 daddr, __be32 saddr, u32 tos)
{
#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
struct rtable* rth = NULL, *rtres;
return err;
/* put it into the cache */
- hash = rt_hash_code(daddr, saddr ^ (fl->iif << 5));
+ hash = rt_hash(daddr, saddr, fl->iif);
err = rt_intern_hash(hash, rth, &rtres);
if (err)
return err;
* 2. IP spoofing attempts are filtered with 100% of guarantee.
*/
-static int ip_route_input_slow(struct sk_buff *skb, u32 daddr, u32 saddr,
+static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev)
{
struct fib_result res;
.saddr = saddr,
.tos = tos,
.scope = RT_SCOPE_UNIVERSE,
-#ifdef CONFIG_IP_ROUTE_FWMARK
- .fwmark = skb->nfmark
-#endif
} },
+ .mark = skb->mark,
.iif = dev->ifindex };
unsigned flags = 0;
u32 itag = 0;
struct rtable * rth;
unsigned hash;
- u32 spec_dst;
+ __be32 spec_dst;
int err = -EINVAL;
int free_res = 0;
if (MULTICAST(saddr) || BADCLASS(saddr) || LOOPBACK(saddr))
goto martian_source;
- if (daddr == 0xFFFFFFFF || (saddr == 0 && daddr == 0))
+ if (daddr == htonl(0xFFFFFFFF) || (saddr == 0 && daddr == 0))
goto brd_input;
/* Accept zero addresses only to limited broadcast;
rth->fl.fl4_dst = daddr;
rth->rt_dst = daddr;
rth->fl.fl4_tos = tos;
-#ifdef CONFIG_IP_ROUTE_FWMARK
- rth->fl.fl4_fwmark= skb->nfmark;
-#endif
+ rth->fl.mark = skb->mark;
rth->fl.fl4_src = saddr;
rth->rt_src = saddr;
#ifdef CONFIG_NET_CLS_ROUTE
rth->rt_flags &= ~RTCF_LOCAL;
}
rth->rt_type = res.type;
- hash = rt_hash_code(daddr, saddr ^ (fl.iif << 5));
+ hash = rt_hash(daddr, saddr, fl.iif);
err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
goto done;
goto e_inval;
}
-int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr,
+int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
u8 tos, struct net_device *dev)
{
struct rtable * rth;
int iif = dev->ifindex;
tos &= IPTOS_RT_MASK;
- hash = rt_hash_code(daddr, saddr ^ (iif << 5));
+ hash = rt_hash(daddr, saddr, iif);
rcu_read_lock();
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
rth->fl.fl4_src == saddr &&
rth->fl.iif == iif &&
rth->fl.oif == 0 &&
-#ifdef CONFIG_IP_ROUTE_FWMARK
- rth->fl.fl4_fwmark == skb->nfmark &&
-#endif
+ rth->fl.mark == skb->mark &&
rth->fl.fl4_tos == tos) {
rth->u.dst.lastuse = jiffies;
dst_hold(&rth->u.dst);
if (LOOPBACK(fl->fl4_src) && !(dev_out->flags&IFF_LOOPBACK))
return -EINVAL;
- if (fl->fl4_dst == 0xFFFFFFFF)
+ if (fl->fl4_dst == htonl(0xFFFFFFFF))
res->type = RTN_BROADCAST;
else if (MULTICAST(fl->fl4_dst))
res->type = RTN_MULTICAST;
rth->fl.fl4_tos = tos;
rth->fl.fl4_src = oldflp->fl4_src;
rth->fl.oif = oldflp->oif;
-#ifdef CONFIG_IP_ROUTE_FWMARK
- rth->fl.fl4_fwmark= oldflp->fl4_fwmark;
-#endif
+ rth->fl.mark = oldflp->mark;
rth->rt_dst = fl->fl4_dst;
rth->rt_src = fl->fl4_src;
rth->rt_iif = oldflp->oif ? : dev_out->ifindex;
int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
unsigned hash;
if (err == 0) {
- hash = rt_hash_code(oldflp->fl4_dst,
- oldflp->fl4_src ^ (oldflp->oif << 5));
+ hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif);
err = rt_intern_hash(hash, rth, rp);
}
if (err != 0)
goto cleanup;
- hash = rt_hash_code(oldflp->fl4_dst,
- oldflp->fl4_src ^
- (oldflp->oif << 5));
+ hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src,
+ oldflp->oif);
err = rt_intern_hash(hash, rth, rp);
/* forward hop information to multipath impl. */
.scope = ((tos & RTO_ONLINK) ?
RT_SCOPE_LINK :
RT_SCOPE_UNIVERSE),
-#ifdef CONFIG_IP_ROUTE_FWMARK
- .fwmark = oldflp->fl4_fwmark
-#endif
} },
+ .mark = oldflp->mark,
.iif = loopback_dev.ifindex,
.oif = oldflp->oif };
struct fib_result res;
*/
if (oldflp->oif == 0
- && (MULTICAST(oldflp->fl4_dst) || oldflp->fl4_dst == 0xFFFFFFFF)) {
+ && (MULTICAST(oldflp->fl4_dst) || oldflp->fl4_dst == htonl(0xFFFFFFFF))) {
/* Special hack: user can direct multicasts
and limited broadcast via necessary interface
without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
goto out; /* Wrong error code */
}
- if (LOCAL_MCAST(oldflp->fl4_dst) || oldflp->fl4_dst == 0xFFFFFFFF) {
+ if (LOCAL_MCAST(oldflp->fl4_dst) || oldflp->fl4_dst == htonl(0xFFFFFFFF)) {
if (!fl.fl4_src)
fl.fl4_src = inet_select_addr(dev_out, 0,
RT_SCOPE_LINK);
unsigned hash;
struct rtable *rth;
- hash = rt_hash_code(flp->fl4_dst, flp->fl4_src ^ (flp->oif << 5));
+ hash = rt_hash(flp->fl4_dst, flp->fl4_src, flp->oif);
rcu_read_lock_bh();
for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
rth->fl.fl4_src == flp->fl4_src &&
rth->fl.iif == 0 &&
rth->fl.oif == flp->oif &&
-#ifdef CONFIG_IP_ROUTE_FWMARK
- rth->fl.fl4_fwmark == flp->fl4_fwmark &&
-#endif
+ rth->fl.mark == flp->mark &&
!((rth->fl.fl4_tos ^ flp->fl4_tos) &
(IPTOS_RT_MASK | RTO_ONLINK))) {
struct rtable *rt = (struct rtable*)skb->dst;
struct rtmsg *r;
struct nlmsghdr *nlh;
- struct rta_cacheinfo ci;
+ long expires;
+ u32 id = 0, ts = 0, tsage = 0, error;
nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
if (nlh == NULL)
if (rt->rt_flags & RTCF_NOTIFY)
r->rtm_flags |= RTM_F_NOTIFY;
- NLA_PUT_U32(skb, RTA_DST, rt->rt_dst);
+ NLA_PUT_BE32(skb, RTA_DST, rt->rt_dst);
if (rt->fl.fl4_src) {
r->rtm_src_len = 32;
- NLA_PUT_U32(skb, RTA_SRC, rt->fl.fl4_src);
+ NLA_PUT_BE32(skb, RTA_SRC, rt->fl.fl4_src);
}
if (rt->u.dst.dev)
NLA_PUT_U32(skb, RTA_OIF, rt->u.dst.dev->ifindex);
NLA_PUT_U32(skb, RTA_MP_ALGO, rt->rt_multipath_alg);
#endif
if (rt->fl.iif)
- NLA_PUT_U32(skb, RTA_PREFSRC, rt->rt_spec_dst);
+ NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_spec_dst);
else if (rt->rt_src != rt->fl.fl4_src)
- NLA_PUT_U32(skb, RTA_PREFSRC, rt->rt_src);
+ NLA_PUT_BE32(skb, RTA_PREFSRC, rt->rt_src);
if (rt->rt_dst != rt->rt_gateway)
- NLA_PUT_U32(skb, RTA_GATEWAY, rt->rt_gateway);
+ NLA_PUT_BE32(skb, RTA_GATEWAY, rt->rt_gateway);
if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0)
goto nla_put_failure;
- ci.rta_lastuse = jiffies_to_clock_t(jiffies - rt->u.dst.lastuse);
- ci.rta_used = rt->u.dst.__use;
- ci.rta_clntref = atomic_read(&rt->u.dst.__refcnt);
- if (rt->u.dst.expires)
- ci.rta_expires = jiffies_to_clock_t(rt->u.dst.expires - jiffies);
- else
- ci.rta_expires = 0;
- ci.rta_error = rt->u.dst.error;
- ci.rta_id = ci.rta_ts = ci.rta_tsage = 0;
+ error = rt->u.dst.error;
+ expires = rt->u.dst.expires ? rt->u.dst.expires - jiffies : 0;
if (rt->peer) {
- ci.rta_id = rt->peer->ip_id_count;
+ id = rt->peer->ip_id_count;
if (rt->peer->tcp_ts_stamp) {
- ci.rta_ts = rt->peer->tcp_ts;
- ci.rta_tsage = xtime.tv_sec - rt->peer->tcp_ts_stamp;
+ ts = rt->peer->tcp_ts;
+ tsage = xtime.tv_sec - rt->peer->tcp_ts_stamp;
}
}
if (rt->fl.iif) {
#ifdef CONFIG_IP_MROUTE
- u32 dst = rt->rt_dst;
+ __be32 dst = rt->rt_dst;
if (MULTICAST(dst) && !LOCAL_MCAST(dst) &&
ipv4_devconf.mc_forwarding) {
} else {
if (err == -EMSGSIZE)
goto nla_put_failure;
- ci.rta_error = err;
+ error = err;
}
}
} else
NLA_PUT_U32(skb, RTA_IIF, rt->fl.iif);
}
- NLA_PUT(skb, RTA_CACHEINFO, sizeof(ci), &ci);
+ if (rtnl_put_cacheinfo(skb, &rt->u.dst, id, ts, tsage,
+ expires, error) < 0)
+ goto nla_put_failure;
return nlmsg_end(skb, nlh);
struct rtmsg *rtm;
struct nlattr *tb[RTA_MAX+1];
struct rtable *rt = NULL;
- u32 dst, src, iif;
+ __be32 dst = 0;
+ __be32 src = 0;
+ u32 iif;
int err;
struct sk_buff *skb;
skb->nh.iph->protocol = IPPROTO_ICMP;
skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
- src = tb[RTA_SRC] ? nla_get_u32(tb[RTA_SRC]) : 0;
- dst = tb[RTA_DST] ? nla_get_u32(tb[RTA_DST]) : 0;
+ src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
+ dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
if (iif) {