1 #include <linux/kernel.h>
2 #include <linux/slab.h>
3 #include <linux/init.h>
4 #include <linux/module.h>
5 #include <linux/proc_fs.h>
6 #include <linux/skbuff.h>
7 #include <linux/netfilter.h>
8 #include <linux/seq_file.h>
9 #include <linux/rcupdate.h>
10 #include <net/protocol.h>
11 #include <net/netfilter/nf_queue.h>
13 #include "nf_internals.h"
16 * A queue handler may be registered for each protocol. Each is protected by
17 * long term mutex. The handler must provide an an outfn() to accept packets
18 * for queueing and must reinject all packets it receives, no matter what.
20 static const struct nf_queue_handler *queue_handler[NFPROTO_NUMPROTO] __read_mostly;
22 static DEFINE_MUTEX(queue_handler_mutex);
24 /* return EBUSY when somebody else is registered, return EEXIST if the
25 * same handler is registered, return 0 in case of success. */
26 int nf_register_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
30 if (pf >= ARRAY_SIZE(queue_handler))
33 mutex_lock(&queue_handler_mutex);
34 if (queue_handler[pf] == qh)
36 else if (queue_handler[pf])
39 rcu_assign_pointer(queue_handler[pf], qh);
42 mutex_unlock(&queue_handler_mutex);
46 EXPORT_SYMBOL(nf_register_queue_handler);
48 /* The caller must flush their queue before this */
49 int nf_unregister_queue_handler(u_int8_t pf, const struct nf_queue_handler *qh)
51 if (pf >= ARRAY_SIZE(queue_handler))
54 mutex_lock(&queue_handler_mutex);
55 if (queue_handler[pf] && queue_handler[pf] != qh) {
56 mutex_unlock(&queue_handler_mutex);
60 rcu_assign_pointer(queue_handler[pf], NULL);
61 mutex_unlock(&queue_handler_mutex);
67 EXPORT_SYMBOL(nf_unregister_queue_handler);
69 void nf_unregister_queue_handlers(const struct nf_queue_handler *qh)
73 mutex_lock(&queue_handler_mutex);
74 for (pf = 0; pf < ARRAY_SIZE(queue_handler); pf++) {
75 if (queue_handler[pf] == qh)
76 rcu_assign_pointer(queue_handler[pf], NULL);
78 mutex_unlock(&queue_handler_mutex);
82 EXPORT_SYMBOL_GPL(nf_unregister_queue_handlers);
84 static void nf_queue_entry_release_refs(struct nf_queue_entry *entry)
86 /* Release those devices we held, or Alexey will kill me. */
88 dev_put(entry->indev);
90 dev_put(entry->outdev);
91 #ifdef CONFIG_BRIDGE_NETFILTER
92 if (entry->skb->nf_bridge) {
93 struct nf_bridge_info *nf_bridge = entry->skb->nf_bridge;
95 if (nf_bridge->physindev)
96 dev_put(nf_bridge->physindev);
97 if (nf_bridge->physoutdev)
98 dev_put(nf_bridge->physoutdev);
101 /* Drop reference to owner of hook which queued us. */
102 module_put(entry->elem->owner);
106 * Any packet that leaves via this function must come back
107 * through nf_reinject().
109 static int __nf_queue(struct sk_buff *skb,
110 struct list_head *elem,
111 u_int8_t pf, unsigned int hook,
112 struct net_device *indev,
113 struct net_device *outdev,
114 int (*okfn)(struct sk_buff *),
115 unsigned int queuenum)
118 struct nf_queue_entry *entry = NULL;
119 #ifdef CONFIG_BRIDGE_NETFILTER
120 struct net_device *physindev;
121 struct net_device *physoutdev;
123 const struct nf_afinfo *afinfo;
124 const struct nf_queue_handler *qh;
126 /* QUEUE == DROP if noone is waiting, to be safe. */
129 qh = rcu_dereference(queue_handler[pf]);
133 afinfo = nf_get_afinfo(pf);
137 entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC);
141 *entry = (struct nf_queue_entry) {
143 .elem = list_entry(elem, struct nf_hook_ops, list),
151 /* If it's going away, ignore hook. */
152 if (!try_module_get(entry->elem->owner)) {
158 /* Bump dev refs so they don't vanish while packet is out */
163 #ifdef CONFIG_BRIDGE_NETFILTER
164 if (skb->nf_bridge) {
165 physindev = skb->nf_bridge->physindev;
168 physoutdev = skb->nf_bridge->physoutdev;
170 dev_hold(physoutdev);
173 afinfo->saveroute(skb, entry);
174 status = qh->outfn(entry, queuenum);
179 nf_queue_entry_release_refs(entry);
193 int nf_queue(struct sk_buff *skb,
194 struct list_head *elem,
195 u_int8_t pf, unsigned int hook,
196 struct net_device *indev,
197 struct net_device *outdev,
198 int (*okfn)(struct sk_buff *),
199 unsigned int queuenum)
201 struct sk_buff *segs;
203 if (!skb_is_gso(skb))
204 return __nf_queue(skb, elem, pf, hook, indev, outdev, okfn,
209 skb->protocol = htons(ETH_P_IP);
212 skb->protocol = htons(ETH_P_IPV6);
216 segs = skb_gso_segment(skb, 0);
222 struct sk_buff *nskb = segs->next;
225 if (!__nf_queue(segs, elem, pf, hook, indev, outdev, okfn,
233 void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict)
235 struct sk_buff *skb = entry->skb;
236 struct list_head *elem = &entry->elem->list;
237 const struct nf_afinfo *afinfo;
241 nf_queue_entry_release_refs(entry);
243 /* Continue traversal iff userspace said ok... */
244 if (verdict == NF_REPEAT) {
249 if (verdict == NF_ACCEPT) {
250 afinfo = nf_get_afinfo(entry->pf);
251 if (!afinfo || afinfo->reroute(skb, entry) < 0)
255 if (verdict == NF_ACCEPT) {
257 verdict = nf_iterate(&nf_hooks[entry->pf][entry->hook],
259 entry->indev, entry->outdev, &elem,
260 entry->okfn, INT_MIN);
263 switch (verdict & NF_VERDICT_MASK) {
271 if (!__nf_queue(skb, elem, entry->pf, entry->hook,
272 entry->indev, entry->outdev, entry->okfn,
273 verdict >> NF_VERDICT_BITS))
284 EXPORT_SYMBOL(nf_reinject);
286 #ifdef CONFIG_PROC_FS
287 static void *seq_start(struct seq_file *seq, loff_t *pos)
289 if (*pos >= ARRAY_SIZE(queue_handler))
295 static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
299 if (*pos >= ARRAY_SIZE(queue_handler))
305 static void seq_stop(struct seq_file *s, void *v)
310 static int seq_show(struct seq_file *s, void *v)
314 const struct nf_queue_handler *qh;
317 qh = rcu_dereference(queue_handler[*pos]);
319 ret = seq_printf(s, "%2lld NONE\n", *pos);
321 ret = seq_printf(s, "%2lld %s\n", *pos, qh->name);
327 static const struct seq_operations nfqueue_seq_ops = {
334 static int nfqueue_open(struct inode *inode, struct file *file)
336 return seq_open(file, &nfqueue_seq_ops);
339 static const struct file_operations nfqueue_file_ops = {
340 .owner = THIS_MODULE,
341 .open = nfqueue_open,
344 .release = seq_release,
349 int __init netfilter_queue_init(void)
351 #ifdef CONFIG_PROC_FS
352 if (!proc_create("nf_queue", S_IRUGO,
353 proc_net_netfilter, &nfqueue_file_ops))