]> git.karo-electronics.de Git - mv-sheeva.git/blobdiff - net/ipv4/route.c
[NETFILTER]: remove arpt_table indirection macro
[mv-sheeva.git] / net / ipv4 / route.c
index 7768d718e19989d0d498e56c6e08c49fb20c6c58..139799f8a8a125f9113e7fed020c87ac0947f4e8 100644 (file)
@@ -259,16 +259,14 @@ static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
 #define RT_CACHE_STAT_INC(field) \
        (__raw_get_cpu_var(rt_cache_stat).field++)
 
-static unsigned int rt_hash_code(u32 daddr, u32 saddr)
+static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx)
 {
-       return jhash_2words(daddr, saddr, atomic_read(&rt_genid))
+       return jhash_3words((__force u32)(__be32)(daddr),
+                           (__force u32)(__be32)(saddr),
+                           idx, atomic_read(&rt_genid))
                & rt_hash_mask;
 }
 
-#define rt_hash(daddr, saddr, idx) \
-       rt_hash_code((__force u32)(__be32)(daddr),\
-                    (__force u32)(__be32)(saddr) ^ ((idx) << 5))
-
 #ifdef CONFIG_PROC_FS
 struct rt_cache_iter_state {
        struct seq_net_private p;
@@ -276,15 +274,16 @@ struct rt_cache_iter_state {
        int genid;
 };
 
-static struct rtable *rt_cache_get_first(struct rt_cache_iter_state *st)
+static struct rtable *rt_cache_get_first(struct seq_file *seq)
 {
+       struct rt_cache_iter_state *st = seq->private;
        struct rtable *r = NULL;
 
        for (st->bucket = rt_hash_mask; st->bucket >= 0; --st->bucket) {
                rcu_read_lock_bh();
                r = rcu_dereference(rt_hash_table[st->bucket].chain);
                while (r) {
-                       if (dev_net(r->u.dst.dev) == st->p.net &&
+                       if (dev_net(r->u.dst.dev) == seq_file_net(seq) &&
                            r->rt_genid == st->genid)
                                return r;
                        r = rcu_dereference(r->u.dst.rt_next);
@@ -294,9 +293,10 @@ static struct rtable *rt_cache_get_first(struct rt_cache_iter_state *st)
        return r;
 }
 
-static struct rtable *__rt_cache_get_next(struct rt_cache_iter_state *st,
+static struct rtable *__rt_cache_get_next(struct seq_file *seq,
                                          struct rtable *r)
 {
+       struct rt_cache_iter_state *st = seq->private;
        r = r->u.dst.rt_next;
        while (!r) {
                rcu_read_unlock_bh();
@@ -308,11 +308,12 @@ static struct rtable *__rt_cache_get_next(struct rt_cache_iter_state *st,
        return rcu_dereference(r);
 }
 
-static struct rtable *rt_cache_get_next(struct rt_cache_iter_state *st,
+static struct rtable *rt_cache_get_next(struct seq_file *seq,
                                        struct rtable *r)
 {
-       while ((r = __rt_cache_get_next(st, r)) != NULL) {
-               if (dev_net(r->u.dst.dev) != st->p.net)
+       struct rt_cache_iter_state *st = seq->private;
+       while ((r = __rt_cache_get_next(seq, r)) != NULL) {
+               if (dev_net(r->u.dst.dev) != seq_file_net(seq))
                        continue;
                if (r->rt_genid == st->genid)
                        break;
@@ -320,12 +321,12 @@ static struct rtable *rt_cache_get_next(struct rt_cache_iter_state *st,
        return r;
 }
 
-static struct rtable *rt_cache_get_idx(struct rt_cache_iter_state *st, loff_t pos)
+static struct rtable *rt_cache_get_idx(struct seq_file *seq, loff_t pos)
 {
-       struct rtable *r = rt_cache_get_first(st);
+       struct rtable *r = rt_cache_get_first(seq);
 
        if (r)
-               while (pos && (r = rt_cache_get_next(st, r)))
+               while (pos && (r = rt_cache_get_next(seq, r)))
                        --pos;
        return pos ? NULL : r;
 }
@@ -333,9 +334,8 @@ static struct rtable *rt_cache_get_idx(struct rt_cache_iter_state *st, loff_t po
 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
 {
        struct rt_cache_iter_state *st = seq->private;
-
        if (*pos)
-               return rt_cache_get_idx(st, *pos - 1);
+               return rt_cache_get_idx(seq, *pos - 1);
        st->genid = atomic_read(&rt_genid);
        return SEQ_START_TOKEN;
 }
@@ -343,12 +343,11 @@ static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
        struct rtable *r;
-       struct rt_cache_iter_state *st = seq->private;
 
        if (v == SEQ_START_TOKEN)
-               r = rt_cache_get_first(st);
+               r = rt_cache_get_first(seq);
        else
-               r = rt_cache_get_next(st, v);
+               r = rt_cache_get_next(seq, v);
        ++*pos;
        return r;
 }
@@ -599,18 +598,18 @@ static inline int ip_rt_proc_init(void)
 }
 #endif /* CONFIG_PROC_FS */
 
-static __inline__ void rt_free(struct rtable *rt)
+static inline void rt_free(struct rtable *rt)
 {
        call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
 }
 
-static __inline__ void rt_drop(struct rtable *rt)
+static inline void rt_drop(struct rtable *rt)
 {
        ip_rt_put(rt);
        call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free);
 }
 
-static __inline__ int rt_fast_clean(struct rtable *rth)
+static inline int rt_fast_clean(struct rtable *rth)
 {
        /* Kill broadcast/multicast entries very aggresively, if they
           collide in hash table with more useful entries */
@@ -618,7 +617,7 @@ static __inline__ int rt_fast_clean(struct rtable *rth)
                rth->fl.iif && rth->u.dst.rt_next;
 }
 
-static __inline__ int rt_valuable(struct rtable *rth)
+static inline int rt_valuable(struct rtable *rth)
 {
        return (rth->rt_flags & (RTCF_REDIRECTED | RTCF_NOTIFY)) ||
                rth->u.dst.expires;
@@ -1195,7 +1194,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
                                    rth->fl.oif != ikeys[k] ||
                                    rth->fl.iif != 0 ||
                                    rth->rt_genid != atomic_read(&rt_genid) ||
-                                   dev_net(rth->u.dst.dev) != net) {
+                                   !net_eq(dev_net(rth->u.dst.dev), net)) {
                                        rthp = &rth->u.dst.rt_next;
                                        continue;
                                }
@@ -1419,7 +1418,7 @@ out:      kfree_skb(skb);
 static const unsigned short mtu_plateau[] =
 {32000, 17914, 8166, 4352, 2002, 1492, 576, 296, 216, 128 };
 
-static __inline__ unsigned short guess_mtu(unsigned short old_mtu)
+static inline unsigned short guess_mtu(unsigned short old_mtu)
 {
        int i;
 
@@ -1454,7 +1453,7 @@ unsigned short ip_rt_frag_needed(struct net *net, struct iphdr *iph,
                            rth->rt_src  == iph->saddr &&
                            rth->fl.iif == 0 &&
                            !(dst_metric_locked(&rth->u.dst, RTAX_MTU)) &&
-                           dev_net(rth->u.dst.dev) == net &&
+                           net_eq(dev_net(rth->u.dst.dev), net) &&
                            rth->rt_genid == atomic_read(&rt_genid)) {
                                unsigned short mtu = new_mtu;
 
@@ -1749,11 +1748,11 @@ static void ip_handle_martian_source(struct net_device *dev,
 #endif
 }
 
-static inline int __mkroute_input(struct sk_buff *skb,
-                                 struct fib_result* res,
-                                 struct in_device *in_dev,
-                                 __be32 daddr, __be32 saddr, u32 tos,
-                                 struct rtable **result)
+static int __mkroute_input(struct sk_buff *skb,
+                          struct fib_result *res,
+                          struct in_device *in_dev,
+                          __be32 daddr, __be32 saddr, u32 tos,
+                          struct rtable **result)
 {
 
        struct rtable *rth;
@@ -1845,11 +1844,11 @@ static inline int __mkroute_input(struct sk_buff *skb,
        return err;
 }
 
-static inline int ip_mkroute_input(struct sk_buff *skb,
-                                  struct fib_result* res,
-                                  const struct flowi *fl,
-                                  struct in_device *in_dev,
-                                  __be32 daddr, __be32 saddr, u32 tos)
+static int ip_mkroute_input(struct sk_buff *skb,
+                           struct fib_result *res,
+                           const struct flowi *fl,
+                           struct in_device *in_dev,
+                           __be32 daddr, __be32 saddr, u32 tos)
 {
        struct rtable* rth = NULL;
        int err;
@@ -2078,13 +2077,13 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
        rcu_read_lock();
        for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
             rth = rcu_dereference(rth->u.dst.rt_next)) {
-               if (rth->fl.fl4_dst == daddr &&
-                   rth->fl.fl4_src == saddr &&
-                   rth->fl.iif == iif &&
-                   rth->fl.oif == 0 &&
+               if (((rth->fl.fl4_dst ^ daddr) |
+                    (rth->fl.fl4_src ^ saddr) |
+                    (rth->fl.iif ^ iif) |
+                    rth->fl.oif |
+                    (rth->fl.fl4_tos ^ tos)) == 0 &&
                    rth->fl.mark == skb->mark &&
-                   rth->fl.fl4_tos == tos &&
-                   dev_net(rth->u.dst.dev) == net &&
+                   net_eq(dev_net(rth->u.dst.dev), net) &&
                    rth->rt_genid == atomic_read(&rt_genid)) {
                        dst_use(&rth->u.dst, jiffies);
                        RT_CACHE_STAT_INC(in_hit);
@@ -2131,12 +2130,12 @@ int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
        return ip_route_input_slow(skb, daddr, saddr, tos, dev);
 }
 
-static inline int __mkroute_output(struct rtable **result,
-                                  struct fib_result* res,
-                                  const struct flowi *fl,
-                                  const struct flowi *oldflp,
-                                  struct net_device *dev_out,
-                                  unsigned flags)
+static int __mkroute_output(struct rtable **result,
+                           struct fib_result *res,
+                           const struct flowi *fl,
+                           const struct flowi *oldflp,
+                           struct net_device *dev_out,
+                           unsigned flags)
 {
        struct rtable *rth;
        struct in_device *in_dev;
@@ -2251,12 +2250,12 @@ static inline int __mkroute_output(struct rtable **result,
        return err;
 }
 
-static inline int ip_mkroute_output(struct rtable **rp,
-                                   struct fib_result* res,
-                                   const struct flowi *fl,
-                                   const struct flowi *oldflp,
-                                   struct net_device *dev_out,
-                                   unsigned flags)
+static int ip_mkroute_output(struct rtable **rp,
+                            struct fib_result *res,
+                            const struct flowi *fl,
+                            const struct flowi *oldflp,
+                            struct net_device *dev_out,
+                            unsigned flags)
 {
        struct rtable *rth = NULL;
        int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
@@ -2486,7 +2485,7 @@ int __ip_route_output_key(struct net *net, struct rtable **rp,
                    rth->fl.mark == flp->mark &&
                    !((rth->fl.fl4_tos ^ flp->fl4_tos) &
                            (IPTOS_RT_MASK | RTO_ONLINK)) &&
-                   dev_net(rth->u.dst.dev) == net &&
+                   net_eq(dev_net(rth->u.dst.dev), net) &&
                    rth->rt_genid == atomic_read(&rt_genid)) {
                        dst_use(&rth->u.dst, jiffies);
                        RT_CACHE_STAT_INC(out_hit);
@@ -2689,7 +2688,7 @@ nla_put_failure:
 
 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr* nlh, void *arg)
 {
-       struct net *net = in_skb->sk->sk_net;
+       struct net *net = sock_net(in_skb->sk);
        struct rtmsg *rtm;
        struct nlattr *tb[RTA_MAX+1];
        struct rtable *rt = NULL;
@@ -2785,7 +2784,7 @@ int ip_rt_dump(struct sk_buff *skb,  struct netlink_callback *cb)
        int idx, s_idx;
        struct net *net;
 
-       net = skb->sk->sk_net;
+       net = sock_net(skb->sk);
 
        s_h = cb->args[0];
        if (s_h < 0)
@@ -2795,7 +2794,7 @@ int ip_rt_dump(struct sk_buff *skb,  struct netlink_callback *cb)
                rcu_read_lock_bh();
                for (rt = rcu_dereference(rt_hash_table[h].chain), idx = 0; rt;
                     rt = rcu_dereference(rt->u.dst.rt_next), idx++) {
-                       if (dev_net(rt->u.dst.dev) != net || idx < s_idx)
+                       if (!net_eq(dev_net(rt->u.dst.dev), net) || idx < s_idx)
                                continue;
                        if (rt->rt_genid != atomic_read(&rt_genid))
                                continue;
@@ -3059,7 +3058,9 @@ int __init ip_rt_init(void)
        devinet_init();
        ip_fib_init();
 
-       setup_timer(&rt_secret_timer, rt_secret_rebuild, 0);
+       rt_secret_timer.function = rt_secret_rebuild;
+       rt_secret_timer.data = 0;
+       init_timer_deferrable(&rt_secret_timer);
 
        /* All the timers, started at system startup tend
           to synchronize. Perturb it a bit.