]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/netfilter/nf_conntrack_expect.c
Merge tag 'perf-core-for-mingo-4.12-20170503' of git://git.kernel.org/pub/scm/linux...
[karo-tx-linux.git] / net / netfilter / nf_conntrack_expect.c
1 /* Expectation handling for nf_conntrack. */
2
3 /* (C) 1999-2001 Paul `Rusty' Russell
4  * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
5  * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
6  * (c) 2005-2012 Patrick McHardy <kaber@trash.net>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  */
12
13 #include <linux/types.h>
14 #include <linux/netfilter.h>
15 #include <linux/skbuff.h>
16 #include <linux/proc_fs.h>
17 #include <linux/seq_file.h>
18 #include <linux/stddef.h>
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/percpu.h>
22 #include <linux/kernel.h>
23 #include <linux/jhash.h>
24 #include <linux/moduleparam.h>
25 #include <linux/export.h>
26 #include <net/net_namespace.h>
27 #include <net/netns/hash.h>
28
29 #include <net/netfilter/nf_conntrack.h>
30 #include <net/netfilter/nf_conntrack_core.h>
31 #include <net/netfilter/nf_conntrack_expect.h>
32 #include <net/netfilter/nf_conntrack_helper.h>
33 #include <net/netfilter/nf_conntrack_tuple.h>
34 #include <net/netfilter/nf_conntrack_zones.h>
35
36 unsigned int nf_ct_expect_hsize __read_mostly;
37 EXPORT_SYMBOL_GPL(nf_ct_expect_hsize);
38
39 struct hlist_head *nf_ct_expect_hash __read_mostly;
40 EXPORT_SYMBOL_GPL(nf_ct_expect_hash);
41
42 unsigned int nf_ct_expect_max __read_mostly;
43
44 static struct kmem_cache *nf_ct_expect_cachep __read_mostly;
45 static unsigned int nf_ct_expect_hashrnd __read_mostly;
46
47 /* nf_conntrack_expect helper functions */
48 void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp,
49                                 u32 portid, int report)
50 {
51         struct nf_conn_help *master_help = nfct_help(exp->master);
52         struct net *net = nf_ct_exp_net(exp);
53
54         NF_CT_ASSERT(master_help);
55         NF_CT_ASSERT(!timer_pending(&exp->timeout));
56
57         hlist_del_rcu(&exp->hnode);
58         net->ct.expect_count--;
59
60         hlist_del_rcu(&exp->lnode);
61         master_help->expecting[exp->class]--;
62
63         nf_ct_expect_event_report(IPEXP_DESTROY, exp, portid, report);
64         nf_ct_expect_put(exp);
65
66         NF_CT_STAT_INC(net, expect_delete);
67 }
68 EXPORT_SYMBOL_GPL(nf_ct_unlink_expect_report);
69
70 static void nf_ct_expectation_timed_out(unsigned long ul_expect)
71 {
72         struct nf_conntrack_expect *exp = (void *)ul_expect;
73
74         spin_lock_bh(&nf_conntrack_expect_lock);
75         nf_ct_unlink_expect(exp);
76         spin_unlock_bh(&nf_conntrack_expect_lock);
77         nf_ct_expect_put(exp);
78 }
79
80 static unsigned int nf_ct_expect_dst_hash(const struct net *n, const struct nf_conntrack_tuple *tuple)
81 {
82         unsigned int hash, seed;
83
84         get_random_once(&nf_ct_expect_hashrnd, sizeof(nf_ct_expect_hashrnd));
85
86         seed = nf_ct_expect_hashrnd ^ net_hash_mix(n);
87
88         hash = jhash2(tuple->dst.u3.all, ARRAY_SIZE(tuple->dst.u3.all),
89                       (((tuple->dst.protonum ^ tuple->src.l3num) << 16) |
90                        (__force __u16)tuple->dst.u.all) ^ seed);
91
92         return reciprocal_scale(hash, nf_ct_expect_hsize);
93 }
94
95 static bool
96 nf_ct_exp_equal(const struct nf_conntrack_tuple *tuple,
97                 const struct nf_conntrack_expect *i,
98                 const struct nf_conntrack_zone *zone,
99                 const struct net *net)
100 {
101         return nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) &&
102                net_eq(net, nf_ct_net(i->master)) &&
103                nf_ct_zone_equal_any(i->master, zone);
104 }
105
106 struct nf_conntrack_expect *
107 __nf_ct_expect_find(struct net *net,
108                     const struct nf_conntrack_zone *zone,
109                     const struct nf_conntrack_tuple *tuple)
110 {
111         struct nf_conntrack_expect *i;
112         unsigned int h;
113
114         if (!net->ct.expect_count)
115                 return NULL;
116
117         h = nf_ct_expect_dst_hash(net, tuple);
118         hlist_for_each_entry_rcu(i, &nf_ct_expect_hash[h], hnode) {
119                 if (nf_ct_exp_equal(tuple, i, zone, net))
120                         return i;
121         }
122         return NULL;
123 }
124 EXPORT_SYMBOL_GPL(__nf_ct_expect_find);
125
126 /* Just find a expectation corresponding to a tuple. */
127 struct nf_conntrack_expect *
128 nf_ct_expect_find_get(struct net *net,
129                       const struct nf_conntrack_zone *zone,
130                       const struct nf_conntrack_tuple *tuple)
131 {
132         struct nf_conntrack_expect *i;
133
134         rcu_read_lock();
135         i = __nf_ct_expect_find(net, zone, tuple);
136         if (i && !atomic_inc_not_zero(&i->use))
137                 i = NULL;
138         rcu_read_unlock();
139
140         return i;
141 }
142 EXPORT_SYMBOL_GPL(nf_ct_expect_find_get);
143
144 /* If an expectation for this connection is found, it gets delete from
145  * global list then returned. */
146 struct nf_conntrack_expect *
147 nf_ct_find_expectation(struct net *net,
148                        const struct nf_conntrack_zone *zone,
149                        const struct nf_conntrack_tuple *tuple)
150 {
151         struct nf_conntrack_expect *i, *exp = NULL;
152         unsigned int h;
153
154         if (!net->ct.expect_count)
155                 return NULL;
156
157         h = nf_ct_expect_dst_hash(net, tuple);
158         hlist_for_each_entry(i, &nf_ct_expect_hash[h], hnode) {
159                 if (!(i->flags & NF_CT_EXPECT_INACTIVE) &&
160                     nf_ct_exp_equal(tuple, i, zone, net)) {
161                         exp = i;
162                         break;
163                 }
164         }
165         if (!exp)
166                 return NULL;
167
168         /* If master is not in hash table yet (ie. packet hasn't left
169            this machine yet), how can other end know about expected?
170            Hence these are not the droids you are looking for (if
171            master ct never got confirmed, we'd hold a reference to it
172            and weird things would happen to future packets). */
173         if (!nf_ct_is_confirmed(exp->master))
174                 return NULL;
175
176         /* Avoid race with other CPUs, that for exp->master ct, is
177          * about to invoke ->destroy(), or nf_ct_delete() via timeout
178          * or early_drop().
179          *
180          * The atomic_inc_not_zero() check tells:  If that fails, we
181          * know that the ct is being destroyed.  If it succeeds, we
182          * can be sure the ct cannot disappear underneath.
183          */
184         if (unlikely(nf_ct_is_dying(exp->master) ||
185                      !atomic_inc_not_zero(&exp->master->ct_general.use)))
186                 return NULL;
187
188         if (exp->flags & NF_CT_EXPECT_PERMANENT) {
189                 atomic_inc(&exp->use);
190                 return exp;
191         } else if (del_timer(&exp->timeout)) {
192                 nf_ct_unlink_expect(exp);
193                 return exp;
194         }
195         /* Undo exp->master refcnt increase, if del_timer() failed */
196         nf_ct_put(exp->master);
197
198         return NULL;
199 }
200
201 /* delete all expectations for this conntrack */
202 void nf_ct_remove_expectations(struct nf_conn *ct)
203 {
204         struct nf_conn_help *help = nfct_help(ct);
205         struct nf_conntrack_expect *exp;
206         struct hlist_node *next;
207
208         /* Optimization: most connection never expect any others. */
209         if (!help)
210                 return;
211
212         spin_lock_bh(&nf_conntrack_expect_lock);
213         hlist_for_each_entry_safe(exp, next, &help->expectations, lnode) {
214                 if (del_timer(&exp->timeout)) {
215                         nf_ct_unlink_expect(exp);
216                         nf_ct_expect_put(exp);
217                 }
218         }
219         spin_unlock_bh(&nf_conntrack_expect_lock);
220 }
221 EXPORT_SYMBOL_GPL(nf_ct_remove_expectations);
222
223 /* Would two expected things clash? */
224 static inline int expect_clash(const struct nf_conntrack_expect *a,
225                                const struct nf_conntrack_expect *b)
226 {
227         /* Part covered by intersection of masks must be unequal,
228            otherwise they clash */
229         struct nf_conntrack_tuple_mask intersect_mask;
230         int count;
231
232         intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
233
234         for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
235                 intersect_mask.src.u3.all[count] =
236                         a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
237         }
238
239         return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
240                net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
241                nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
242 }
243
244 static inline int expect_matches(const struct nf_conntrack_expect *a,
245                                  const struct nf_conntrack_expect *b)
246 {
247         return a->master == b->master && a->class == b->class &&
248                nf_ct_tuple_equal(&a->tuple, &b->tuple) &&
249                nf_ct_tuple_mask_equal(&a->mask, &b->mask) &&
250                net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) &&
251                nf_ct_zone_equal_any(a->master, nf_ct_zone(b->master));
252 }
253
254 /* Generally a bad idea to call this: could have matched already. */
255 void nf_ct_unexpect_related(struct nf_conntrack_expect *exp)
256 {
257         spin_lock_bh(&nf_conntrack_expect_lock);
258         if (del_timer(&exp->timeout)) {
259                 nf_ct_unlink_expect(exp);
260                 nf_ct_expect_put(exp);
261         }
262         spin_unlock_bh(&nf_conntrack_expect_lock);
263 }
264 EXPORT_SYMBOL_GPL(nf_ct_unexpect_related);
265
266 /* We don't increase the master conntrack refcount for non-fulfilled
267  * conntracks. During the conntrack destruction, the expectations are
268  * always killed before the conntrack itself */
269 struct nf_conntrack_expect *nf_ct_expect_alloc(struct nf_conn *me)
270 {
271         struct nf_conntrack_expect *new;
272
273         new = kmem_cache_alloc(nf_ct_expect_cachep, GFP_ATOMIC);
274         if (!new)
275                 return NULL;
276
277         new->master = me;
278         atomic_set(&new->use, 1);
279         return new;
280 }
281 EXPORT_SYMBOL_GPL(nf_ct_expect_alloc);
282
283 void nf_ct_expect_init(struct nf_conntrack_expect *exp, unsigned int class,
284                        u_int8_t family,
285                        const union nf_inet_addr *saddr,
286                        const union nf_inet_addr *daddr,
287                        u_int8_t proto, const __be16 *src, const __be16 *dst)
288 {
289         int len;
290
291         if (family == AF_INET)
292                 len = 4;
293         else
294                 len = 16;
295
296         exp->flags = 0;
297         exp->class = class;
298         exp->expectfn = NULL;
299         exp->helper = NULL;
300         exp->tuple.src.l3num = family;
301         exp->tuple.dst.protonum = proto;
302
303         if (saddr) {
304                 memcpy(&exp->tuple.src.u3, saddr, len);
305                 if (sizeof(exp->tuple.src.u3) > len)
306                         /* address needs to be cleared for nf_ct_tuple_equal */
307                         memset((void *)&exp->tuple.src.u3 + len, 0x00,
308                                sizeof(exp->tuple.src.u3) - len);
309                 memset(&exp->mask.src.u3, 0xFF, len);
310                 if (sizeof(exp->mask.src.u3) > len)
311                         memset((void *)&exp->mask.src.u3 + len, 0x00,
312                                sizeof(exp->mask.src.u3) - len);
313         } else {
314                 memset(&exp->tuple.src.u3, 0x00, sizeof(exp->tuple.src.u3));
315                 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3));
316         }
317
318         if (src) {
319                 exp->tuple.src.u.all = *src;
320                 exp->mask.src.u.all = htons(0xFFFF);
321         } else {
322                 exp->tuple.src.u.all = 0;
323                 exp->mask.src.u.all = 0;
324         }
325
326         memcpy(&exp->tuple.dst.u3, daddr, len);
327         if (sizeof(exp->tuple.dst.u3) > len)
328                 /* address needs to be cleared for nf_ct_tuple_equal */
329                 memset((void *)&exp->tuple.dst.u3 + len, 0x00,
330                        sizeof(exp->tuple.dst.u3) - len);
331
332         exp->tuple.dst.u.all = *dst;
333
334 #ifdef CONFIG_NF_NAT_NEEDED
335         memset(&exp->saved_addr, 0, sizeof(exp->saved_addr));
336         memset(&exp->saved_proto, 0, sizeof(exp->saved_proto));
337 #endif
338 }
339 EXPORT_SYMBOL_GPL(nf_ct_expect_init);
340
341 static void nf_ct_expect_free_rcu(struct rcu_head *head)
342 {
343         struct nf_conntrack_expect *exp;
344
345         exp = container_of(head, struct nf_conntrack_expect, rcu);
346         kmem_cache_free(nf_ct_expect_cachep, exp);
347 }
348
349 void nf_ct_expect_put(struct nf_conntrack_expect *exp)
350 {
351         if (atomic_dec_and_test(&exp->use))
352                 call_rcu(&exp->rcu, nf_ct_expect_free_rcu);
353 }
354 EXPORT_SYMBOL_GPL(nf_ct_expect_put);
355
356 static void nf_ct_expect_insert(struct nf_conntrack_expect *exp)
357 {
358         struct nf_conn_help *master_help = nfct_help(exp->master);
359         struct nf_conntrack_helper *helper;
360         struct net *net = nf_ct_exp_net(exp);
361         unsigned int h = nf_ct_expect_dst_hash(net, &exp->tuple);
362
363         /* two references : one for hash insert, one for the timer */
364         atomic_add(2, &exp->use);
365
366         hlist_add_head_rcu(&exp->lnode, &master_help->expectations);
367         master_help->expecting[exp->class]++;
368
369         hlist_add_head_rcu(&exp->hnode, &nf_ct_expect_hash[h]);
370         net->ct.expect_count++;
371
372         setup_timer(&exp->timeout, nf_ct_expectation_timed_out,
373                     (unsigned long)exp);
374         helper = rcu_dereference_protected(master_help->helper,
375                                            lockdep_is_held(&nf_conntrack_expect_lock));
376         if (helper) {
377                 exp->timeout.expires = jiffies +
378                         helper->expect_policy[exp->class].timeout * HZ;
379         }
380         add_timer(&exp->timeout);
381
382         NF_CT_STAT_INC(net, expect_create);
383 }
384
385 /* Race with expectations being used means we could have none to find; OK. */
386 static void evict_oldest_expect(struct nf_conn *master,
387                                 struct nf_conntrack_expect *new)
388 {
389         struct nf_conn_help *master_help = nfct_help(master);
390         struct nf_conntrack_expect *exp, *last = NULL;
391
392         hlist_for_each_entry(exp, &master_help->expectations, lnode) {
393                 if (exp->class == new->class)
394                         last = exp;
395         }
396
397         if (last && del_timer(&last->timeout)) {
398                 nf_ct_unlink_expect(last);
399                 nf_ct_expect_put(last);
400         }
401 }
402
403 static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect)
404 {
405         const struct nf_conntrack_expect_policy *p;
406         struct nf_conntrack_expect *i;
407         struct nf_conn *master = expect->master;
408         struct nf_conn_help *master_help = nfct_help(master);
409         struct nf_conntrack_helper *helper;
410         struct net *net = nf_ct_exp_net(expect);
411         struct hlist_node *next;
412         unsigned int h;
413         int ret = 0;
414
415         if (!master_help) {
416                 ret = -ESHUTDOWN;
417                 goto out;
418         }
419         h = nf_ct_expect_dst_hash(net, &expect->tuple);
420         hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) {
421                 if (expect_matches(i, expect)) {
422                         if (del_timer(&i->timeout)) {
423                                 nf_ct_unlink_expect(i);
424                                 nf_ct_expect_put(i);
425                                 break;
426                         }
427                 } else if (expect_clash(i, expect)) {
428                         ret = -EBUSY;
429                         goto out;
430                 }
431         }
432         /* Will be over limit? */
433         helper = rcu_dereference_protected(master_help->helper,
434                                            lockdep_is_held(&nf_conntrack_expect_lock));
435         if (helper) {
436                 p = &helper->expect_policy[expect->class];
437                 if (p->max_expected &&
438                     master_help->expecting[expect->class] >= p->max_expected) {
439                         evict_oldest_expect(master, expect);
440                         if (master_help->expecting[expect->class]
441                                                 >= p->max_expected) {
442                                 ret = -EMFILE;
443                                 goto out;
444                         }
445                 }
446         }
447
448         if (net->ct.expect_count >= nf_ct_expect_max) {
449                 net_warn_ratelimited("nf_conntrack: expectation table full\n");
450                 ret = -EMFILE;
451         }
452 out:
453         return ret;
454 }
455
456 int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
457                                 u32 portid, int report)
458 {
459         int ret;
460
461         spin_lock_bh(&nf_conntrack_expect_lock);
462         ret = __nf_ct_expect_check(expect);
463         if (ret < 0)
464                 goto out;
465
466         nf_ct_expect_insert(expect);
467
468         spin_unlock_bh(&nf_conntrack_expect_lock);
469         nf_ct_expect_event_report(IPEXP_NEW, expect, portid, report);
470         return 0;
471 out:
472         spin_unlock_bh(&nf_conntrack_expect_lock);
473         return ret;
474 }
475 EXPORT_SYMBOL_GPL(nf_ct_expect_related_report);
476
477 #ifdef CONFIG_NF_CONNTRACK_PROCFS
478 struct ct_expect_iter_state {
479         struct seq_net_private p;
480         unsigned int bucket;
481 };
482
483 static struct hlist_node *ct_expect_get_first(struct seq_file *seq)
484 {
485         struct ct_expect_iter_state *st = seq->private;
486         struct hlist_node *n;
487
488         for (st->bucket = 0; st->bucket < nf_ct_expect_hsize; st->bucket++) {
489                 n = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
490                 if (n)
491                         return n;
492         }
493         return NULL;
494 }
495
496 static struct hlist_node *ct_expect_get_next(struct seq_file *seq,
497                                              struct hlist_node *head)
498 {
499         struct ct_expect_iter_state *st = seq->private;
500
501         head = rcu_dereference(hlist_next_rcu(head));
502         while (head == NULL) {
503                 if (++st->bucket >= nf_ct_expect_hsize)
504                         return NULL;
505                 head = rcu_dereference(hlist_first_rcu(&nf_ct_expect_hash[st->bucket]));
506         }
507         return head;
508 }
509
510 static struct hlist_node *ct_expect_get_idx(struct seq_file *seq, loff_t pos)
511 {
512         struct hlist_node *head = ct_expect_get_first(seq);
513
514         if (head)
515                 while (pos && (head = ct_expect_get_next(seq, head)))
516                         pos--;
517         return pos ? NULL : head;
518 }
519
520 static void *exp_seq_start(struct seq_file *seq, loff_t *pos)
521         __acquires(RCU)
522 {
523         rcu_read_lock();
524         return ct_expect_get_idx(seq, *pos);
525 }
526
527 static void *exp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
528 {
529         (*pos)++;
530         return ct_expect_get_next(seq, v);
531 }
532
533 static void exp_seq_stop(struct seq_file *seq, void *v)
534         __releases(RCU)
535 {
536         rcu_read_unlock();
537 }
538
539 static int exp_seq_show(struct seq_file *s, void *v)
540 {
541         struct nf_conntrack_expect *expect;
542         struct nf_conntrack_helper *helper;
543         struct hlist_node *n = v;
544         char *delim = "";
545
546         expect = hlist_entry(n, struct nf_conntrack_expect, hnode);
547
548         if (expect->timeout.function)
549                 seq_printf(s, "%ld ", timer_pending(&expect->timeout)
550                            ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
551         else
552                 seq_printf(s, "- ");
553         seq_printf(s, "l3proto = %u proto=%u ",
554                    expect->tuple.src.l3num,
555                    expect->tuple.dst.protonum);
556         print_tuple(s, &expect->tuple,
557                     __nf_ct_l3proto_find(expect->tuple.src.l3num),
558                     __nf_ct_l4proto_find(expect->tuple.src.l3num,
559                                        expect->tuple.dst.protonum));
560
561         if (expect->flags & NF_CT_EXPECT_PERMANENT) {
562                 seq_printf(s, "PERMANENT");
563                 delim = ",";
564         }
565         if (expect->flags & NF_CT_EXPECT_INACTIVE) {
566                 seq_printf(s, "%sINACTIVE", delim);
567                 delim = ",";
568         }
569         if (expect->flags & NF_CT_EXPECT_USERSPACE)
570                 seq_printf(s, "%sUSERSPACE", delim);
571
572         helper = rcu_dereference(nfct_help(expect->master)->helper);
573         if (helper) {
574                 seq_printf(s, "%s%s", expect->flags ? " " : "", helper->name);
575                 if (helper->expect_policy[expect->class].name[0])
576                         seq_printf(s, "/%s",
577                                    helper->expect_policy[expect->class].name);
578         }
579
580         seq_putc(s, '\n');
581
582         return 0;
583 }
584
585 static const struct seq_operations exp_seq_ops = {
586         .start = exp_seq_start,
587         .next = exp_seq_next,
588         .stop = exp_seq_stop,
589         .show = exp_seq_show
590 };
591
592 static int exp_open(struct inode *inode, struct file *file)
593 {
594         return seq_open_net(inode, file, &exp_seq_ops,
595                         sizeof(struct ct_expect_iter_state));
596 }
597
598 static const struct file_operations exp_file_ops = {
599         .owner   = THIS_MODULE,
600         .open    = exp_open,
601         .read    = seq_read,
602         .llseek  = seq_lseek,
603         .release = seq_release_net,
604 };
605 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
606
607 static int exp_proc_init(struct net *net)
608 {
609 #ifdef CONFIG_NF_CONNTRACK_PROCFS
610         struct proc_dir_entry *proc;
611         kuid_t root_uid;
612         kgid_t root_gid;
613
614         proc = proc_create("nf_conntrack_expect", 0440, net->proc_net,
615                            &exp_file_ops);
616         if (!proc)
617                 return -ENOMEM;
618
619         root_uid = make_kuid(net->user_ns, 0);
620         root_gid = make_kgid(net->user_ns, 0);
621         if (uid_valid(root_uid) && gid_valid(root_gid))
622                 proc_set_user(proc, root_uid, root_gid);
623 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
624         return 0;
625 }
626
627 static void exp_proc_remove(struct net *net)
628 {
629 #ifdef CONFIG_NF_CONNTRACK_PROCFS
630         remove_proc_entry("nf_conntrack_expect", net->proc_net);
631 #endif /* CONFIG_NF_CONNTRACK_PROCFS */
632 }
633
634 module_param_named(expect_hashsize, nf_ct_expect_hsize, uint, 0400);
635
636 int nf_conntrack_expect_pernet_init(struct net *net)
637 {
638         net->ct.expect_count = 0;
639         return exp_proc_init(net);
640 }
641
642 void nf_conntrack_expect_pernet_fini(struct net *net)
643 {
644         exp_proc_remove(net);
645 }
646
647 int nf_conntrack_expect_init(void)
648 {
649         if (!nf_ct_expect_hsize) {
650                 nf_ct_expect_hsize = nf_conntrack_htable_size / 256;
651                 if (!nf_ct_expect_hsize)
652                         nf_ct_expect_hsize = 1;
653         }
654         nf_ct_expect_max = nf_ct_expect_hsize * 4;
655         nf_ct_expect_cachep = kmem_cache_create("nf_conntrack_expect",
656                                 sizeof(struct nf_conntrack_expect),
657                                 0, 0, NULL);
658         if (!nf_ct_expect_cachep)
659                 return -ENOMEM;
660
661         nf_ct_expect_hash = nf_ct_alloc_hashtable(&nf_ct_expect_hsize, 0);
662         if (!nf_ct_expect_hash) {
663                 kmem_cache_destroy(nf_ct_expect_cachep);
664                 return -ENOMEM;
665         }
666
667         return 0;
668 }
669
670 void nf_conntrack_expect_fini(void)
671 {
672         rcu_barrier(); /* Wait for call_rcu() before destroy */
673         kmem_cache_destroy(nf_ct_expect_cachep);
674         nf_ct_free_hashtable(nf_ct_expect_hash, nf_ct_expect_hsize);
675 }