]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/netfilter/nfnetlink_queue_core.c
netfilter: nfnetlink_queue: add security context information
[karo-tx-linux.git] / net / netfilter / nfnetlink_queue_core.c
1 /*
2  * This is a module which is used for queueing packets and communicating with
3  * userspace via nfnetlink.
4  *
5  * (C) 2005 by Harald Welte <laforge@netfilter.org>
6  * (C) 2007 by Patrick McHardy <kaber@trash.net>
7  *
8  * Based on the old ipv4-only ip_queue.c:
9  * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
10  * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License version 2 as
14  * published by the Free Software Foundation.
15  *
16  */
17 #include <linux/module.h>
18 #include <linux/skbuff.h>
19 #include <linux/init.h>
20 #include <linux/spinlock.h>
21 #include <linux/slab.h>
22 #include <linux/notifier.h>
23 #include <linux/netdevice.h>
24 #include <linux/netfilter.h>
25 #include <linux/proc_fs.h>
26 #include <linux/netfilter_ipv4.h>
27 #include <linux/netfilter_ipv6.h>
28 #include <linux/netfilter_bridge.h>
29 #include <linux/netfilter/nfnetlink.h>
30 #include <linux/netfilter/nfnetlink_queue.h>
31 #include <linux/list.h>
32 #include <net/sock.h>
33 #include <net/tcp_states.h>
34 #include <net/netfilter/nf_queue.h>
35 #include <net/netns/generic.h>
36 #include <net/netfilter/nfnetlink_queue.h>
37
38 #include <linux/atomic.h>
39
40 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
41 #include "../bridge/br_private.h"
42 #endif
43
44 #define NFQNL_QMAX_DEFAULT 1024
45
46 /* We're using struct nlattr which has 16bit nla_len. Note that nla_len
47  * includes the header length. Thus, the maximum packet length that we
48  * support is 65531 bytes. We send truncated packets if the specified length
49  * is larger than that.  Userspace can check for presence of NFQA_CAP_LEN
50  * attribute to detect truncation.
51  */
52 #define NFQNL_MAX_COPY_RANGE (0xffff - NLA_HDRLEN)
53
54 struct nfqnl_instance {
55         struct hlist_node hlist;                /* global list of queues */
56         struct rcu_head rcu;
57
58         u32 peer_portid;
59         unsigned int queue_maxlen;
60         unsigned int copy_range;
61         unsigned int queue_dropped;
62         unsigned int queue_user_dropped;
63
64
65         u_int16_t queue_num;                    /* number of this queue */
66         u_int8_t copy_mode;
67         u_int32_t flags;                        /* Set using NFQA_CFG_FLAGS */
68 /*
69  * Following fields are dirtied for each queued packet,
70  * keep them in same cache line if possible.
71  */
72         spinlock_t      lock;
73         unsigned int    queue_total;
74         unsigned int    id_sequence;            /* 'sequence' of pkt ids */
75         struct list_head queue_list;            /* packets in queue */
76 };
77
78 typedef int (*nfqnl_cmpfn)(struct nf_queue_entry *, unsigned long);
79
80 static int nfnl_queue_net_id __read_mostly;
81
82 #define INSTANCE_BUCKETS        16
83 struct nfnl_queue_net {
84         spinlock_t instances_lock;
85         struct hlist_head instance_table[INSTANCE_BUCKETS];
86 };
87
88 static struct nfnl_queue_net *nfnl_queue_pernet(struct net *net)
89 {
90         return net_generic(net, nfnl_queue_net_id);
91 }
92
93 static inline u_int8_t instance_hashfn(u_int16_t queue_num)
94 {
95         return ((queue_num >> 8) ^ queue_num) % INSTANCE_BUCKETS;
96 }
97
98 static struct nfqnl_instance *
99 instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num)
100 {
101         struct hlist_head *head;
102         struct nfqnl_instance *inst;
103
104         head = &q->instance_table[instance_hashfn(queue_num)];
105         hlist_for_each_entry_rcu(inst, head, hlist) {
106                 if (inst->queue_num == queue_num)
107                         return inst;
108         }
109         return NULL;
110 }
111
112 static struct nfqnl_instance *
113 instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, u32 portid)
114 {
115         struct nfqnl_instance *inst;
116         unsigned int h;
117         int err;
118
119         spin_lock(&q->instances_lock);
120         if (instance_lookup(q, queue_num)) {
121                 err = -EEXIST;
122                 goto out_unlock;
123         }
124
125         inst = kzalloc(sizeof(*inst), GFP_ATOMIC);
126         if (!inst) {
127                 err = -ENOMEM;
128                 goto out_unlock;
129         }
130
131         inst->queue_num = queue_num;
132         inst->peer_portid = portid;
133         inst->queue_maxlen = NFQNL_QMAX_DEFAULT;
134         inst->copy_range = NFQNL_MAX_COPY_RANGE;
135         inst->copy_mode = NFQNL_COPY_NONE;
136         spin_lock_init(&inst->lock);
137         INIT_LIST_HEAD(&inst->queue_list);
138
139         if (!try_module_get(THIS_MODULE)) {
140                 err = -EAGAIN;
141                 goto out_free;
142         }
143
144         h = instance_hashfn(queue_num);
145         hlist_add_head_rcu(&inst->hlist, &q->instance_table[h]);
146
147         spin_unlock(&q->instances_lock);
148
149         return inst;
150
151 out_free:
152         kfree(inst);
153 out_unlock:
154         spin_unlock(&q->instances_lock);
155         return ERR_PTR(err);
156 }
157
158 static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
159                         unsigned long data);
160
161 static void
162 instance_destroy_rcu(struct rcu_head *head)
163 {
164         struct nfqnl_instance *inst = container_of(head, struct nfqnl_instance,
165                                                    rcu);
166
167         nfqnl_flush(inst, NULL, 0);
168         kfree(inst);
169         module_put(THIS_MODULE);
170 }
171
172 static void
173 __instance_destroy(struct nfqnl_instance *inst)
174 {
175         hlist_del_rcu(&inst->hlist);
176         call_rcu(&inst->rcu, instance_destroy_rcu);
177 }
178
179 static void
180 instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst)
181 {
182         spin_lock(&q->instances_lock);
183         __instance_destroy(inst);
184         spin_unlock(&q->instances_lock);
185 }
186
187 static inline void
188 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
189 {
190        list_add_tail(&entry->list, &queue->queue_list);
191        queue->queue_total++;
192 }
193
194 static void
195 __dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry)
196 {
197         list_del(&entry->list);
198         queue->queue_total--;
199 }
200
201 static struct nf_queue_entry *
202 find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id)
203 {
204         struct nf_queue_entry *entry = NULL, *i;
205
206         spin_lock_bh(&queue->lock);
207
208         list_for_each_entry(i, &queue->queue_list, list) {
209                 if (i->id == id) {
210                         entry = i;
211                         break;
212                 }
213         }
214
215         if (entry)
216                 __dequeue_entry(queue, entry);
217
218         spin_unlock_bh(&queue->lock);
219
220         return entry;
221 }
222
223 static void
224 nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, unsigned long data)
225 {
226         struct nf_queue_entry *entry, *next;
227
228         spin_lock_bh(&queue->lock);
229         list_for_each_entry_safe(entry, next, &queue->queue_list, list) {
230                 if (!cmpfn || cmpfn(entry, data)) {
231                         list_del(&entry->list);
232                         queue->queue_total--;
233                         nf_reinject(entry, NF_DROP);
234                 }
235         }
236         spin_unlock_bh(&queue->lock);
237 }
238
239 static int
240 nfqnl_put_packet_info(struct sk_buff *nlskb, struct sk_buff *packet,
241                       bool csum_verify)
242 {
243         __u32 flags = 0;
244
245         if (packet->ip_summed == CHECKSUM_PARTIAL)
246                 flags = NFQA_SKB_CSUMNOTREADY;
247         else if (csum_verify)
248                 flags = NFQA_SKB_CSUM_NOTVERIFIED;
249
250         if (skb_is_gso(packet))
251                 flags |= NFQA_SKB_GSO;
252
253         return flags ? nla_put_be32(nlskb, NFQA_SKB_INFO, htonl(flags)) : 0;
254 }
255
256 static int nfqnl_put_sk_uidgid(struct sk_buff *skb, struct sock *sk)
257 {
258         const struct cred *cred;
259
260         if (!sk_fullsock(sk))
261                 return 0;
262
263         read_lock_bh(&sk->sk_callback_lock);
264         if (sk->sk_socket && sk->sk_socket->file) {
265                 cred = sk->sk_socket->file->f_cred;
266                 if (nla_put_be32(skb, NFQA_UID,
267                     htonl(from_kuid_munged(&init_user_ns, cred->fsuid))))
268                         goto nla_put_failure;
269                 if (nla_put_be32(skb, NFQA_GID,
270                     htonl(from_kgid_munged(&init_user_ns, cred->fsgid))))
271                         goto nla_put_failure;
272         }
273         read_unlock_bh(&sk->sk_callback_lock);
274         return 0;
275
276 nla_put_failure:
277         read_unlock_bh(&sk->sk_callback_lock);
278         return -1;
279 }
280
281 static u32 nfqnl_get_sk_secctx(struct sk_buff *skb, char **secdata)
282 {
283         u32 seclen = 0;
284 #if IS_ENABLED(CONFIG_NETWORK_SECMARK)
285         if (!skb || !sk_fullsock(skb->sk))
286                 return 0;
287
288         read_lock_bh(&skb->sk->sk_callback_lock);
289
290         if (skb->secmark)
291                 security_secid_to_secctx(skb->secmark, secdata, &seclen);
292
293         read_unlock_bh(&skb->sk->sk_callback_lock);
294 #endif
295         return seclen;
296 }
297
298 static struct sk_buff *
299 nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
300                            struct nf_queue_entry *entry,
301                            __be32 **packet_id_ptr)
302 {
303         size_t size;
304         size_t data_len = 0, cap_len = 0;
305         unsigned int hlen = 0;
306         struct sk_buff *skb;
307         struct nlattr *nla;
308         struct nfqnl_msg_packet_hdr *pmsg;
309         struct nlmsghdr *nlh;
310         struct nfgenmsg *nfmsg;
311         struct sk_buff *entskb = entry->skb;
312         struct net_device *indev;
313         struct net_device *outdev;
314         struct nf_conn *ct = NULL;
315         enum ip_conntrack_info uninitialized_var(ctinfo);
316         bool csum_verify;
317         char *secdata = NULL;
318         u32 seclen = 0;
319
320         size =    nlmsg_total_size(sizeof(struct nfgenmsg))
321                 + nla_total_size(sizeof(struct nfqnl_msg_packet_hdr))
322                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
323                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
324 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
325                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
326                 + nla_total_size(sizeof(u_int32_t))     /* ifindex */
327 #endif
328                 + nla_total_size(sizeof(u_int32_t))     /* mark */
329                 + nla_total_size(sizeof(struct nfqnl_msg_packet_hw))
330                 + nla_total_size(sizeof(u_int32_t))     /* skbinfo */
331                 + nla_total_size(sizeof(u_int32_t));    /* cap_len */
332
333         if (entskb->tstamp.tv64)
334                 size += nla_total_size(sizeof(struct nfqnl_msg_packet_timestamp));
335
336         if (entry->state.hook <= NF_INET_FORWARD ||
337            (entry->state.hook == NF_INET_POST_ROUTING && entskb->sk == NULL))
338                 csum_verify = !skb_csum_unnecessary(entskb);
339         else
340                 csum_verify = false;
341
342         outdev = entry->state.out;
343
344         switch ((enum nfqnl_config_mode)ACCESS_ONCE(queue->copy_mode)) {
345         case NFQNL_COPY_META:
346         case NFQNL_COPY_NONE:
347                 break;
348
349         case NFQNL_COPY_PACKET:
350                 if (!(queue->flags & NFQA_CFG_F_GSO) &&
351                     entskb->ip_summed == CHECKSUM_PARTIAL &&
352                     skb_checksum_help(entskb))
353                         return NULL;
354
355                 data_len = ACCESS_ONCE(queue->copy_range);
356                 if (data_len > entskb->len)
357                         data_len = entskb->len;
358
359                 hlen = skb_zerocopy_headlen(entskb);
360                 hlen = min_t(unsigned int, hlen, data_len);
361                 size += sizeof(struct nlattr) + hlen;
362                 cap_len = entskb->len;
363                 break;
364         }
365
366         if (queue->flags & NFQA_CFG_F_CONNTRACK)
367                 ct = nfqnl_ct_get(entskb, &size, &ctinfo);
368
369         if (queue->flags & NFQA_CFG_F_UID_GID) {
370                 size +=  (nla_total_size(sizeof(u_int32_t))     /* uid */
371                         + nla_total_size(sizeof(u_int32_t)));   /* gid */
372         }
373
374         if ((queue->flags & NFQA_CFG_F_SECCTX) && entskb->sk) {
375                 seclen = nfqnl_get_sk_secctx(entskb, &secdata);
376                 if (seclen)
377                         size += nla_total_size(seclen);
378         }
379
380         skb = nfnetlink_alloc_skb(net, size, queue->peer_portid,
381                                   GFP_ATOMIC);
382         if (!skb) {
383                 skb_tx_error(entskb);
384                 return NULL;
385         }
386
387         nlh = nlmsg_put(skb, 0, 0,
388                         NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
389                         sizeof(struct nfgenmsg), 0);
390         if (!nlh) {
391                 skb_tx_error(entskb);
392                 kfree_skb(skb);
393                 return NULL;
394         }
395         nfmsg = nlmsg_data(nlh);
396         nfmsg->nfgen_family = entry->state.pf;
397         nfmsg->version = NFNETLINK_V0;
398         nfmsg->res_id = htons(queue->queue_num);
399
400         nla = __nla_reserve(skb, NFQA_PACKET_HDR, sizeof(*pmsg));
401         pmsg = nla_data(nla);
402         pmsg->hw_protocol       = entskb->protocol;
403         pmsg->hook              = entry->state.hook;
404         *packet_id_ptr          = &pmsg->packet_id;
405
406         indev = entry->state.in;
407         if (indev) {
408 #if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
409                 if (nla_put_be32(skb, NFQA_IFINDEX_INDEV, htonl(indev->ifindex)))
410                         goto nla_put_failure;
411 #else
412                 if (entry->state.pf == PF_BRIDGE) {
413                         /* Case 1: indev is physical input device, we need to
414                          * look for bridge group (when called from
415                          * netfilter_bridge) */
416                         if (nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
417                                          htonl(indev->ifindex)) ||
418                         /* this is the bridge group "brX" */
419                         /* rcu_read_lock()ed by __nf_queue */
420                             nla_put_be32(skb, NFQA_IFINDEX_INDEV,
421                                          htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
422                                 goto nla_put_failure;
423                 } else {
424                         int physinif;
425
426                         /* Case 2: indev is bridge group, we need to look for
427                          * physical device (when called from ipv4) */
428                         if (nla_put_be32(skb, NFQA_IFINDEX_INDEV,
429                                          htonl(indev->ifindex)))
430                                 goto nla_put_failure;
431
432                         physinif = nf_bridge_get_physinif(entskb);
433                         if (physinif &&
434                             nla_put_be32(skb, NFQA_IFINDEX_PHYSINDEV,
435                                          htonl(physinif)))
436                                 goto nla_put_failure;
437                 }
438 #endif
439         }
440
441         if (outdev) {
442 #if !IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
443                 if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV, htonl(outdev->ifindex)))
444                         goto nla_put_failure;
445 #else
446                 if (entry->state.pf == PF_BRIDGE) {
447                         /* Case 1: outdev is physical output device, we need to
448                          * look for bridge group (when called from
449                          * netfilter_bridge) */
450                         if (nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
451                                          htonl(outdev->ifindex)) ||
452                         /* this is the bridge group "brX" */
453                         /* rcu_read_lock()ed by __nf_queue */
454                             nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
455                                          htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
456                                 goto nla_put_failure;
457                 } else {
458                         int physoutif;
459
460                         /* Case 2: outdev is bridge group, we need to look for
461                          * physical output device (when called from ipv4) */
462                         if (nla_put_be32(skb, NFQA_IFINDEX_OUTDEV,
463                                          htonl(outdev->ifindex)))
464                                 goto nla_put_failure;
465
466                         physoutif = nf_bridge_get_physoutif(entskb);
467                         if (physoutif &&
468                             nla_put_be32(skb, NFQA_IFINDEX_PHYSOUTDEV,
469                                          htonl(physoutif)))
470                                 goto nla_put_failure;
471                 }
472 #endif
473         }
474
475         if (entskb->mark &&
476             nla_put_be32(skb, NFQA_MARK, htonl(entskb->mark)))
477                 goto nla_put_failure;
478
479         if (indev && entskb->dev &&
480             entskb->mac_header != entskb->network_header) {
481                 struct nfqnl_msg_packet_hw phw;
482                 int len;
483
484                 memset(&phw, 0, sizeof(phw));
485                 len = dev_parse_header(entskb, phw.hw_addr);
486                 if (len) {
487                         phw.hw_addrlen = htons(len);
488                         if (nla_put(skb, NFQA_HWADDR, sizeof(phw), &phw))
489                                 goto nla_put_failure;
490                 }
491         }
492
493         if (entskb->tstamp.tv64) {
494                 struct nfqnl_msg_packet_timestamp ts;
495                 struct timeval tv = ktime_to_timeval(entskb->tstamp);
496                 ts.sec = cpu_to_be64(tv.tv_sec);
497                 ts.usec = cpu_to_be64(tv.tv_usec);
498
499                 if (nla_put(skb, NFQA_TIMESTAMP, sizeof(ts), &ts))
500                         goto nla_put_failure;
501         }
502
503         if ((queue->flags & NFQA_CFG_F_UID_GID) && entskb->sk &&
504             nfqnl_put_sk_uidgid(skb, entskb->sk) < 0)
505                 goto nla_put_failure;
506
507         if (seclen && nla_put(skb, NFQA_SECCTX, seclen, secdata))
508                 goto nla_put_failure;
509
510         if (ct && nfqnl_ct_put(skb, ct, ctinfo) < 0)
511                 goto nla_put_failure;
512
513         if (cap_len > data_len &&
514             nla_put_be32(skb, NFQA_CAP_LEN, htonl(cap_len)))
515                 goto nla_put_failure;
516
517         if (nfqnl_put_packet_info(skb, entskb, csum_verify))
518                 goto nla_put_failure;
519
520         if (data_len) {
521                 struct nlattr *nla;
522
523                 if (skb_tailroom(skb) < sizeof(*nla) + hlen)
524                         goto nla_put_failure;
525
526                 nla = (struct nlattr *)skb_put(skb, sizeof(*nla));
527                 nla->nla_type = NFQA_PAYLOAD;
528                 nla->nla_len = nla_attr_size(data_len);
529
530                 if (skb_zerocopy(skb, entskb, data_len, hlen))
531                         goto nla_put_failure;
532         }
533
534         nlh->nlmsg_len = skb->len;
535         return skb;
536
537 nla_put_failure:
538         skb_tx_error(entskb);
539         kfree_skb(skb);
540         net_err_ratelimited("nf_queue: error creating packet message\n");
541         return NULL;
542 }
543
544 static int
545 __nfqnl_enqueue_packet(struct net *net, struct nfqnl_instance *queue,
546                         struct nf_queue_entry *entry)
547 {
548         struct sk_buff *nskb;
549         int err = -ENOBUFS;
550         __be32 *packet_id_ptr;
551         int failopen = 0;
552
553         nskb = nfqnl_build_packet_message(net, queue, entry, &packet_id_ptr);
554         if (nskb == NULL) {
555                 err = -ENOMEM;
556                 goto err_out;
557         }
558         spin_lock_bh(&queue->lock);
559
560         if (queue->queue_total >= queue->queue_maxlen) {
561                 if (queue->flags & NFQA_CFG_F_FAIL_OPEN) {
562                         failopen = 1;
563                         err = 0;
564                 } else {
565                         queue->queue_dropped++;
566                         net_warn_ratelimited("nf_queue: full at %d entries, dropping packets(s)\n",
567                                              queue->queue_total);
568                 }
569                 goto err_out_free_nskb;
570         }
571         entry->id = ++queue->id_sequence;
572         *packet_id_ptr = htonl(entry->id);
573
574         /* nfnetlink_unicast will either free the nskb or add it to a socket */
575         err = nfnetlink_unicast(nskb, net, queue->peer_portid, MSG_DONTWAIT);
576         if (err < 0) {
577                 queue->queue_user_dropped++;
578                 goto err_out_unlock;
579         }
580
581         __enqueue_entry(queue, entry);
582
583         spin_unlock_bh(&queue->lock);
584         return 0;
585
586 err_out_free_nskb:
587         kfree_skb(nskb);
588 err_out_unlock:
589         spin_unlock_bh(&queue->lock);
590         if (failopen)
591                 nf_reinject(entry, NF_ACCEPT);
592 err_out:
593         return err;
594 }
595
596 static struct nf_queue_entry *
597 nf_queue_entry_dup(struct nf_queue_entry *e)
598 {
599         struct nf_queue_entry *entry = kmemdup(e, e->size, GFP_ATOMIC);
600         if (entry) {
601                 if (nf_queue_entry_get_refs(entry))
602                         return entry;
603                 kfree(entry);
604         }
605         return NULL;
606 }
607
608 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
609 /* When called from bridge netfilter, skb->data must point to MAC header
610  * before calling skb_gso_segment(). Else, original MAC header is lost
611  * and segmented skbs will be sent to wrong destination.
612  */
613 static void nf_bridge_adjust_skb_data(struct sk_buff *skb)
614 {
615         if (skb->nf_bridge)
616                 __skb_push(skb, skb->network_header - skb->mac_header);
617 }
618
619 static void nf_bridge_adjust_segmented_data(struct sk_buff *skb)
620 {
621         if (skb->nf_bridge)
622                 __skb_pull(skb, skb->network_header - skb->mac_header);
623 }
624 #else
625 #define nf_bridge_adjust_skb_data(s) do {} while (0)
626 #define nf_bridge_adjust_segmented_data(s) do {} while (0)
627 #endif
628
629 static void free_entry(struct nf_queue_entry *entry)
630 {
631         nf_queue_entry_release_refs(entry);
632         kfree(entry);
633 }
634
635 static int
636 __nfqnl_enqueue_packet_gso(struct net *net, struct nfqnl_instance *queue,
637                            struct sk_buff *skb, struct nf_queue_entry *entry)
638 {
639         int ret = -ENOMEM;
640         struct nf_queue_entry *entry_seg;
641
642         nf_bridge_adjust_segmented_data(skb);
643
644         if (skb->next == NULL) { /* last packet, no need to copy entry */
645                 struct sk_buff *gso_skb = entry->skb;
646                 entry->skb = skb;
647                 ret = __nfqnl_enqueue_packet(net, queue, entry);
648                 if (ret)
649                         entry->skb = gso_skb;
650                 return ret;
651         }
652
653         skb->next = NULL;
654
655         entry_seg = nf_queue_entry_dup(entry);
656         if (entry_seg) {
657                 entry_seg->skb = skb;
658                 ret = __nfqnl_enqueue_packet(net, queue, entry_seg);
659                 if (ret)
660                         free_entry(entry_seg);
661         }
662         return ret;
663 }
664
665 static int
666 nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
667 {
668         unsigned int queued;
669         struct nfqnl_instance *queue;
670         struct sk_buff *skb, *segs;
671         int err = -ENOBUFS;
672         struct net *net = dev_net(entry->state.in ?
673                                   entry->state.in : entry->state.out);
674         struct nfnl_queue_net *q = nfnl_queue_pernet(net);
675
676         /* rcu_read_lock()ed by nf_hook_slow() */
677         queue = instance_lookup(q, queuenum);
678         if (!queue)
679                 return -ESRCH;
680
681         if (queue->copy_mode == NFQNL_COPY_NONE)
682                 return -EINVAL;
683
684         skb = entry->skb;
685
686         switch (entry->state.pf) {
687         case NFPROTO_IPV4:
688                 skb->protocol = htons(ETH_P_IP);
689                 break;
690         case NFPROTO_IPV6:
691                 skb->protocol = htons(ETH_P_IPV6);
692                 break;
693         }
694
695         if ((queue->flags & NFQA_CFG_F_GSO) || !skb_is_gso(skb))
696                 return __nfqnl_enqueue_packet(net, queue, entry);
697
698         nf_bridge_adjust_skb_data(skb);
699         segs = skb_gso_segment(skb, 0);
700         /* Does not use PTR_ERR to limit the number of error codes that can be
701          * returned by nf_queue.  For instance, callers rely on -ECANCELED to
702          * mean 'ignore this hook'.
703          */
704         if (IS_ERR_OR_NULL(segs))
705                 goto out_err;
706         queued = 0;
707         err = 0;
708         do {
709                 struct sk_buff *nskb = segs->next;
710                 if (err == 0)
711                         err = __nfqnl_enqueue_packet_gso(net, queue,
712                                                         segs, entry);
713                 if (err == 0)
714                         queued++;
715                 else
716                         kfree_skb(segs);
717                 segs = nskb;
718         } while (segs);
719
720         if (queued) {
721                 if (err) /* some segments are already queued */
722                         free_entry(entry);
723                 kfree_skb(skb);
724                 return 0;
725         }
726  out_err:
727         nf_bridge_adjust_segmented_data(skb);
728         return err;
729 }
730
731 static int
732 nfqnl_mangle(void *data, int data_len, struct nf_queue_entry *e, int diff)
733 {
734         struct sk_buff *nskb;
735
736         if (diff < 0) {
737                 if (pskb_trim(e->skb, data_len))
738                         return -ENOMEM;
739         } else if (diff > 0) {
740                 if (data_len > 0xFFFF)
741                         return -EINVAL;
742                 if (diff > skb_tailroom(e->skb)) {
743                         nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
744                                                diff, GFP_ATOMIC);
745                         if (!nskb) {
746                                 printk(KERN_WARNING "nf_queue: OOM "
747                                       "in mangle, dropping packet\n");
748                                 return -ENOMEM;
749                         }
750                         kfree_skb(e->skb);
751                         e->skb = nskb;
752                 }
753                 skb_put(e->skb, diff);
754         }
755         if (!skb_make_writable(e->skb, data_len))
756                 return -ENOMEM;
757         skb_copy_to_linear_data(e->skb, data, data_len);
758         e->skb->ip_summed = CHECKSUM_NONE;
759         return 0;
760 }
761
762 static int
763 nfqnl_set_mode(struct nfqnl_instance *queue,
764                unsigned char mode, unsigned int range)
765 {
766         int status = 0;
767
768         spin_lock_bh(&queue->lock);
769         switch (mode) {
770         case NFQNL_COPY_NONE:
771         case NFQNL_COPY_META:
772                 queue->copy_mode = mode;
773                 queue->copy_range = 0;
774                 break;
775
776         case NFQNL_COPY_PACKET:
777                 queue->copy_mode = mode;
778                 if (range == 0 || range > NFQNL_MAX_COPY_RANGE)
779                         queue->copy_range = NFQNL_MAX_COPY_RANGE;
780                 else
781                         queue->copy_range = range;
782                 break;
783
784         default:
785                 status = -EINVAL;
786
787         }
788         spin_unlock_bh(&queue->lock);
789
790         return status;
791 }
792
793 static int
794 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
795 {
796         if (entry->state.in)
797                 if (entry->state.in->ifindex == ifindex)
798                         return 1;
799         if (entry->state.out)
800                 if (entry->state.out->ifindex == ifindex)
801                         return 1;
802 #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
803         if (entry->skb->nf_bridge) {
804                 int physinif, physoutif;
805
806                 physinif = nf_bridge_get_physinif(entry->skb);
807                 physoutif = nf_bridge_get_physoutif(entry->skb);
808
809                 if (physinif == ifindex || physoutif == ifindex)
810                         return 1;
811         }
812 #endif
813         return 0;
814 }
815
816 /* drop all packets with either indev or outdev == ifindex from all queue
817  * instances */
818 static void
819 nfqnl_dev_drop(struct net *net, int ifindex)
820 {
821         int i;
822         struct nfnl_queue_net *q = nfnl_queue_pernet(net);
823
824         rcu_read_lock();
825
826         for (i = 0; i < INSTANCE_BUCKETS; i++) {
827                 struct nfqnl_instance *inst;
828                 struct hlist_head *head = &q->instance_table[i];
829
830                 hlist_for_each_entry_rcu(inst, head, hlist)
831                         nfqnl_flush(inst, dev_cmp, ifindex);
832         }
833
834         rcu_read_unlock();
835 }
836
837 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
838
839 static int
840 nfqnl_rcv_dev_event(struct notifier_block *this,
841                     unsigned long event, void *ptr)
842 {
843         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
844
845         /* Drop any packets associated with the downed device */
846         if (event == NETDEV_DOWN)
847                 nfqnl_dev_drop(dev_net(dev), dev->ifindex);
848         return NOTIFY_DONE;
849 }
850
851 static struct notifier_block nfqnl_dev_notifier = {
852         .notifier_call  = nfqnl_rcv_dev_event,
853 };
854
855 static int
856 nfqnl_rcv_nl_event(struct notifier_block *this,
857                    unsigned long event, void *ptr)
858 {
859         struct netlink_notify *n = ptr;
860         struct nfnl_queue_net *q = nfnl_queue_pernet(n->net);
861
862         if (event == NETLINK_URELEASE && n->protocol == NETLINK_NETFILTER) {
863                 int i;
864
865                 /* destroy all instances for this portid */
866                 spin_lock(&q->instances_lock);
867                 for (i = 0; i < INSTANCE_BUCKETS; i++) {
868                         struct hlist_node *t2;
869                         struct nfqnl_instance *inst;
870                         struct hlist_head *head = &q->instance_table[i];
871
872                         hlist_for_each_entry_safe(inst, t2, head, hlist) {
873                                 if (n->portid == inst->peer_portid)
874                                         __instance_destroy(inst);
875                         }
876                 }
877                 spin_unlock(&q->instances_lock);
878         }
879         return NOTIFY_DONE;
880 }
881
882 static struct notifier_block nfqnl_rtnl_notifier = {
883         .notifier_call  = nfqnl_rcv_nl_event,
884 };
885
886 static const struct nla_policy nfqa_verdict_policy[NFQA_MAX+1] = {
887         [NFQA_VERDICT_HDR]      = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
888         [NFQA_MARK]             = { .type = NLA_U32 },
889         [NFQA_PAYLOAD]          = { .type = NLA_UNSPEC },
890         [NFQA_CT]               = { .type = NLA_UNSPEC },
891         [NFQA_EXP]              = { .type = NLA_UNSPEC },
892 };
893
894 static const struct nla_policy nfqa_verdict_batch_policy[NFQA_MAX+1] = {
895         [NFQA_VERDICT_HDR]      = { .len = sizeof(struct nfqnl_msg_verdict_hdr) },
896         [NFQA_MARK]             = { .type = NLA_U32 },
897 };
898
899 static struct nfqnl_instance *
900 verdict_instance_lookup(struct nfnl_queue_net *q, u16 queue_num, u32 nlportid)
901 {
902         struct nfqnl_instance *queue;
903
904         queue = instance_lookup(q, queue_num);
905         if (!queue)
906                 return ERR_PTR(-ENODEV);
907
908         if (queue->peer_portid != nlportid)
909                 return ERR_PTR(-EPERM);
910
911         return queue;
912 }
913
914 static struct nfqnl_msg_verdict_hdr*
915 verdicthdr_get(const struct nlattr * const nfqa[])
916 {
917         struct nfqnl_msg_verdict_hdr *vhdr;
918         unsigned int verdict;
919
920         if (!nfqa[NFQA_VERDICT_HDR])
921                 return NULL;
922
923         vhdr = nla_data(nfqa[NFQA_VERDICT_HDR]);
924         verdict = ntohl(vhdr->verdict) & NF_VERDICT_MASK;
925         if (verdict > NF_MAX_VERDICT || verdict == NF_STOLEN)
926                 return NULL;
927         return vhdr;
928 }
929
930 static int nfq_id_after(unsigned int id, unsigned int max)
931 {
932         return (int)(id - max) > 0;
933 }
934
935 static int
936 nfqnl_recv_verdict_batch(struct sock *ctnl, struct sk_buff *skb,
937                    const struct nlmsghdr *nlh,
938                    const struct nlattr * const nfqa[])
939 {
940         struct nfgenmsg *nfmsg = nlmsg_data(nlh);
941         struct nf_queue_entry *entry, *tmp;
942         unsigned int verdict, maxid;
943         struct nfqnl_msg_verdict_hdr *vhdr;
944         struct nfqnl_instance *queue;
945         LIST_HEAD(batch_list);
946         u16 queue_num = ntohs(nfmsg->res_id);
947
948         struct net *net = sock_net(ctnl);
949         struct nfnl_queue_net *q = nfnl_queue_pernet(net);
950
951         queue = verdict_instance_lookup(q, queue_num,
952                                         NETLINK_CB(skb).portid);
953         if (IS_ERR(queue))
954                 return PTR_ERR(queue);
955
956         vhdr = verdicthdr_get(nfqa);
957         if (!vhdr)
958                 return -EINVAL;
959
960         verdict = ntohl(vhdr->verdict);
961         maxid = ntohl(vhdr->id);
962
963         spin_lock_bh(&queue->lock);
964
965         list_for_each_entry_safe(entry, tmp, &queue->queue_list, list) {
966                 if (nfq_id_after(entry->id, maxid))
967                         break;
968                 __dequeue_entry(queue, entry);
969                 list_add_tail(&entry->list, &batch_list);
970         }
971
972         spin_unlock_bh(&queue->lock);
973
974         if (list_empty(&batch_list))
975                 return -ENOENT;
976
977         list_for_each_entry_safe(entry, tmp, &batch_list, list) {
978                 if (nfqa[NFQA_MARK])
979                         entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
980                 nf_reinject(entry, verdict);
981         }
982         return 0;
983 }
984
985 static int
986 nfqnl_recv_verdict(struct sock *ctnl, struct sk_buff *skb,
987                    const struct nlmsghdr *nlh,
988                    const struct nlattr * const nfqa[])
989 {
990         struct nfgenmsg *nfmsg = nlmsg_data(nlh);
991         u_int16_t queue_num = ntohs(nfmsg->res_id);
992
993         struct nfqnl_msg_verdict_hdr *vhdr;
994         struct nfqnl_instance *queue;
995         unsigned int verdict;
996         struct nf_queue_entry *entry;
997         enum ip_conntrack_info uninitialized_var(ctinfo);
998         struct nf_conn *ct = NULL;
999
1000         struct net *net = sock_net(ctnl);
1001         struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1002
1003         queue = instance_lookup(q, queue_num);
1004         if (!queue)
1005                 queue = verdict_instance_lookup(q, queue_num,
1006                                                 NETLINK_CB(skb).portid);
1007         if (IS_ERR(queue))
1008                 return PTR_ERR(queue);
1009
1010         vhdr = verdicthdr_get(nfqa);
1011         if (!vhdr)
1012                 return -EINVAL;
1013
1014         verdict = ntohl(vhdr->verdict);
1015
1016         entry = find_dequeue_entry(queue, ntohl(vhdr->id));
1017         if (entry == NULL)
1018                 return -ENOENT;
1019
1020         if (nfqa[NFQA_CT]) {
1021                 ct = nfqnl_ct_parse(entry->skb, nfqa[NFQA_CT], &ctinfo);
1022                 if (ct && nfqa[NFQA_EXP]) {
1023                         nfqnl_attach_expect(ct, nfqa[NFQA_EXP],
1024                                             NETLINK_CB(skb).portid,
1025                                             nlmsg_report(nlh));
1026                 }
1027         }
1028
1029         if (nfqa[NFQA_PAYLOAD]) {
1030                 u16 payload_len = nla_len(nfqa[NFQA_PAYLOAD]);
1031                 int diff = payload_len - entry->skb->len;
1032
1033                 if (nfqnl_mangle(nla_data(nfqa[NFQA_PAYLOAD]),
1034                                  payload_len, entry, diff) < 0)
1035                         verdict = NF_DROP;
1036
1037                 if (ct)
1038                         nfqnl_ct_seq_adjust(entry->skb, ct, ctinfo, diff);
1039         }
1040
1041         if (nfqa[NFQA_MARK])
1042                 entry->skb->mark = ntohl(nla_get_be32(nfqa[NFQA_MARK]));
1043
1044         nf_reinject(entry, verdict);
1045         return 0;
1046 }
1047
1048 static int
1049 nfqnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
1050                   const struct nlmsghdr *nlh,
1051                   const struct nlattr * const nfqa[])
1052 {
1053         return -ENOTSUPP;
1054 }
1055
1056 static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
1057         [NFQA_CFG_CMD]          = { .len = sizeof(struct nfqnl_msg_config_cmd) },
1058         [NFQA_CFG_PARAMS]       = { .len = sizeof(struct nfqnl_msg_config_params) },
1059 };
1060
1061 static const struct nf_queue_handler nfqh = {
1062         .outfn  = &nfqnl_enqueue_packet,
1063 };
1064
1065 static int
1066 nfqnl_recv_config(struct sock *ctnl, struct sk_buff *skb,
1067                   const struct nlmsghdr *nlh,
1068                   const struct nlattr * const nfqa[])
1069 {
1070         struct nfgenmsg *nfmsg = nlmsg_data(nlh);
1071         u_int16_t queue_num = ntohs(nfmsg->res_id);
1072         struct nfqnl_instance *queue;
1073         struct nfqnl_msg_config_cmd *cmd = NULL;
1074         struct net *net = sock_net(ctnl);
1075         struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1076         int ret = 0;
1077
1078         if (nfqa[NFQA_CFG_CMD]) {
1079                 cmd = nla_data(nfqa[NFQA_CFG_CMD]);
1080
1081                 /* Obsolete commands without queue context */
1082                 switch (cmd->command) {
1083                 case NFQNL_CFG_CMD_PF_BIND: return 0;
1084                 case NFQNL_CFG_CMD_PF_UNBIND: return 0;
1085                 }
1086         }
1087
1088         rcu_read_lock();
1089         queue = instance_lookup(q, queue_num);
1090         if (queue && queue->peer_portid != NETLINK_CB(skb).portid) {
1091                 ret = -EPERM;
1092                 goto err_out_unlock;
1093         }
1094
1095         if (cmd != NULL) {
1096                 switch (cmd->command) {
1097                 case NFQNL_CFG_CMD_BIND:
1098                         if (queue) {
1099                                 ret = -EBUSY;
1100                                 goto err_out_unlock;
1101                         }
1102                         queue = instance_create(q, queue_num,
1103                                                 NETLINK_CB(skb).portid);
1104                         if (IS_ERR(queue)) {
1105                                 ret = PTR_ERR(queue);
1106                                 goto err_out_unlock;
1107                         }
1108                         break;
1109                 case NFQNL_CFG_CMD_UNBIND:
1110                         if (!queue) {
1111                                 ret = -ENODEV;
1112                                 goto err_out_unlock;
1113                         }
1114                         instance_destroy(q, queue);
1115                         break;
1116                 case NFQNL_CFG_CMD_PF_BIND:
1117                 case NFQNL_CFG_CMD_PF_UNBIND:
1118                         break;
1119                 default:
1120                         ret = -ENOTSUPP;
1121                         break;
1122                 }
1123         }
1124
1125         if (nfqa[NFQA_CFG_PARAMS]) {
1126                 struct nfqnl_msg_config_params *params;
1127
1128                 if (!queue) {
1129                         ret = -ENODEV;
1130                         goto err_out_unlock;
1131                 }
1132                 params = nla_data(nfqa[NFQA_CFG_PARAMS]);
1133                 nfqnl_set_mode(queue, params->copy_mode,
1134                                 ntohl(params->copy_range));
1135         }
1136
1137         if (nfqa[NFQA_CFG_QUEUE_MAXLEN]) {
1138                 __be32 *queue_maxlen;
1139
1140                 if (!queue) {
1141                         ret = -ENODEV;
1142                         goto err_out_unlock;
1143                 }
1144                 queue_maxlen = nla_data(nfqa[NFQA_CFG_QUEUE_MAXLEN]);
1145                 spin_lock_bh(&queue->lock);
1146                 queue->queue_maxlen = ntohl(*queue_maxlen);
1147                 spin_unlock_bh(&queue->lock);
1148         }
1149
1150         if (nfqa[NFQA_CFG_FLAGS]) {
1151                 __u32 flags, mask;
1152
1153                 if (!queue) {
1154                         ret = -ENODEV;
1155                         goto err_out_unlock;
1156                 }
1157
1158                 if (!nfqa[NFQA_CFG_MASK]) {
1159                         /* A mask is needed to specify which flags are being
1160                          * changed.
1161                          */
1162                         ret = -EINVAL;
1163                         goto err_out_unlock;
1164                 }
1165
1166                 flags = ntohl(nla_get_be32(nfqa[NFQA_CFG_FLAGS]));
1167                 mask = ntohl(nla_get_be32(nfqa[NFQA_CFG_MASK]));
1168
1169                 if (flags >= NFQA_CFG_F_MAX) {
1170                         ret = -EOPNOTSUPP;
1171                         goto err_out_unlock;
1172                 }
1173 #if !IS_ENABLED(CONFIG_NETWORK_SECMARK)
1174                 if (flags & mask & NFQA_CFG_F_SECCTX) {
1175                         ret = -EOPNOTSUPP;
1176                         goto err_out_unlock;
1177                 }
1178 #endif
1179                 spin_lock_bh(&queue->lock);
1180                 queue->flags &= ~mask;
1181                 queue->flags |= flags & mask;
1182                 spin_unlock_bh(&queue->lock);
1183         }
1184
1185 err_out_unlock:
1186         rcu_read_unlock();
1187         return ret;
1188 }
1189
1190 static const struct nfnl_callback nfqnl_cb[NFQNL_MSG_MAX] = {
1191         [NFQNL_MSG_PACKET]      = { .call_rcu = nfqnl_recv_unsupp,
1192                                     .attr_count = NFQA_MAX, },
1193         [NFQNL_MSG_VERDICT]     = { .call_rcu = nfqnl_recv_verdict,
1194                                     .attr_count = NFQA_MAX,
1195                                     .policy = nfqa_verdict_policy },
1196         [NFQNL_MSG_CONFIG]      = { .call = nfqnl_recv_config,
1197                                     .attr_count = NFQA_CFG_MAX,
1198                                     .policy = nfqa_cfg_policy },
1199         [NFQNL_MSG_VERDICT_BATCH]={ .call_rcu = nfqnl_recv_verdict_batch,
1200                                     .attr_count = NFQA_MAX,
1201                                     .policy = nfqa_verdict_batch_policy },
1202 };
1203
1204 static const struct nfnetlink_subsystem nfqnl_subsys = {
1205         .name           = "nf_queue",
1206         .subsys_id      = NFNL_SUBSYS_QUEUE,
1207         .cb_count       = NFQNL_MSG_MAX,
1208         .cb             = nfqnl_cb,
1209 };
1210
1211 #ifdef CONFIG_PROC_FS
1212 struct iter_state {
1213         struct seq_net_private p;
1214         unsigned int bucket;
1215 };
1216
1217 static struct hlist_node *get_first(struct seq_file *seq)
1218 {
1219         struct iter_state *st = seq->private;
1220         struct net *net;
1221         struct nfnl_queue_net *q;
1222
1223         if (!st)
1224                 return NULL;
1225
1226         net = seq_file_net(seq);
1227         q = nfnl_queue_pernet(net);
1228         for (st->bucket = 0; st->bucket < INSTANCE_BUCKETS; st->bucket++) {
1229                 if (!hlist_empty(&q->instance_table[st->bucket]))
1230                         return q->instance_table[st->bucket].first;
1231         }
1232         return NULL;
1233 }
1234
1235 static struct hlist_node *get_next(struct seq_file *seq, struct hlist_node *h)
1236 {
1237         struct iter_state *st = seq->private;
1238         struct net *net = seq_file_net(seq);
1239
1240         h = h->next;
1241         while (!h) {
1242                 struct nfnl_queue_net *q;
1243
1244                 if (++st->bucket >= INSTANCE_BUCKETS)
1245                         return NULL;
1246
1247                 q = nfnl_queue_pernet(net);
1248                 h = q->instance_table[st->bucket].first;
1249         }
1250         return h;
1251 }
1252
1253 static struct hlist_node *get_idx(struct seq_file *seq, loff_t pos)
1254 {
1255         struct hlist_node *head;
1256         head = get_first(seq);
1257
1258         if (head)
1259                 while (pos && (head = get_next(seq, head)))
1260                         pos--;
1261         return pos ? NULL : head;
1262 }
1263
1264 static void *seq_start(struct seq_file *s, loff_t *pos)
1265         __acquires(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
1266 {
1267         spin_lock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
1268         return get_idx(s, *pos);
1269 }
1270
1271 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
1272 {
1273         (*pos)++;
1274         return get_next(s, v);
1275 }
1276
1277 static void seq_stop(struct seq_file *s, void *v)
1278         __releases(nfnl_queue_pernet(seq_file_net(s))->instances_lock)
1279 {
1280         spin_unlock(&nfnl_queue_pernet(seq_file_net(s))->instances_lock);
1281 }
1282
1283 static int seq_show(struct seq_file *s, void *v)
1284 {
1285         const struct nfqnl_instance *inst = v;
1286
1287         seq_printf(s, "%5u %6u %5u %1u %5u %5u %5u %8u %2d\n",
1288                    inst->queue_num,
1289                    inst->peer_portid, inst->queue_total,
1290                    inst->copy_mode, inst->copy_range,
1291                    inst->queue_dropped, inst->queue_user_dropped,
1292                    inst->id_sequence, 1);
1293         return 0;
1294 }
1295
1296 static const struct seq_operations nfqnl_seq_ops = {
1297         .start  = seq_start,
1298         .next   = seq_next,
1299         .stop   = seq_stop,
1300         .show   = seq_show,
1301 };
1302
1303 static int nfqnl_open(struct inode *inode, struct file *file)
1304 {
1305         return seq_open_net(inode, file, &nfqnl_seq_ops,
1306                         sizeof(struct iter_state));
1307 }
1308
1309 static const struct file_operations nfqnl_file_ops = {
1310         .owner   = THIS_MODULE,
1311         .open    = nfqnl_open,
1312         .read    = seq_read,
1313         .llseek  = seq_lseek,
1314         .release = seq_release_net,
1315 };
1316
1317 #endif /* PROC_FS */
1318
1319 static int __net_init nfnl_queue_net_init(struct net *net)
1320 {
1321         unsigned int i;
1322         struct nfnl_queue_net *q = nfnl_queue_pernet(net);
1323
1324         for (i = 0; i < INSTANCE_BUCKETS; i++)
1325                 INIT_HLIST_HEAD(&q->instance_table[i]);
1326
1327         spin_lock_init(&q->instances_lock);
1328
1329 #ifdef CONFIG_PROC_FS
1330         if (!proc_create("nfnetlink_queue", 0440,
1331                          net->nf.proc_netfilter, &nfqnl_file_ops))
1332                 return -ENOMEM;
1333 #endif
1334         return 0;
1335 }
1336
1337 static void __net_exit nfnl_queue_net_exit(struct net *net)
1338 {
1339 #ifdef CONFIG_PROC_FS
1340         remove_proc_entry("nfnetlink_queue", net->nf.proc_netfilter);
1341 #endif
1342 }
1343
1344 static struct pernet_operations nfnl_queue_net_ops = {
1345         .init   = nfnl_queue_net_init,
1346         .exit   = nfnl_queue_net_exit,
1347         .id     = &nfnl_queue_net_id,
1348         .size   = sizeof(struct nfnl_queue_net),
1349 };
1350
1351 static int __init nfnetlink_queue_init(void)
1352 {
1353         int status;
1354
1355         status = register_pernet_subsys(&nfnl_queue_net_ops);
1356         if (status < 0) {
1357                 pr_err("nf_queue: failed to register pernet ops\n");
1358                 goto out;
1359         }
1360
1361         netlink_register_notifier(&nfqnl_rtnl_notifier);
1362         status = nfnetlink_subsys_register(&nfqnl_subsys);
1363         if (status < 0) {
1364                 pr_err("nf_queue: failed to create netlink socket\n");
1365                 goto cleanup_netlink_notifier;
1366         }
1367
1368         register_netdevice_notifier(&nfqnl_dev_notifier);
1369         nf_register_queue_handler(&nfqh);
1370         return status;
1371
1372 cleanup_netlink_notifier:
1373         netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1374 out:
1375         return status;
1376 }
1377
1378 static void __exit nfnetlink_queue_fini(void)
1379 {
1380         nf_unregister_queue_handler();
1381         unregister_netdevice_notifier(&nfqnl_dev_notifier);
1382         nfnetlink_subsys_unregister(&nfqnl_subsys);
1383         netlink_unregister_notifier(&nfqnl_rtnl_notifier);
1384         unregister_pernet_subsys(&nfnl_queue_net_ops);
1385
1386         rcu_barrier(); /* Wait for completion of call_rcu()'s */
1387 }
1388
1389 MODULE_DESCRIPTION("netfilter packet queue handler");
1390 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
1391 MODULE_LICENSE("GPL");
1392 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_QUEUE);
1393
1394 module_init(nfnetlink_queue_init);
1395 module_exit(nfnetlink_queue_fini);