2 * This is a module which is used for queueing IPv4 packets and
3 * communicating with userspace via netlink.
5 * (C) 2000-2002 James Morris <jmorris@intercode.com.au>
6 * (C) 2003-2005 Netfilter Core Team <coreteam@netfilter.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 #include <linux/module.h>
13 #include <linux/skbuff.h>
14 #include <linux/init.h>
16 #include <linux/notifier.h>
17 #include <linux/netdevice.h>
18 #include <linux/netfilter.h>
19 #include <linux/netfilter_ipv4/ip_queue.h>
20 #include <linux/netfilter_ipv4/ip_tables.h>
21 #include <linux/netlink.h>
22 #include <linux/spinlock.h>
23 #include <linux/sysctl.h>
24 #include <linux/proc_fs.h>
25 #include <linux/seq_file.h>
26 #include <linux/security.h>
27 #include <linux/net.h>
28 #include <linux/mutex.h>
29 #include <linux/slab.h>
30 #include <net/net_namespace.h>
32 #include <net/route.h>
33 #include <net/netfilter/nf_queue.h>
36 #define IPQ_QMAX_DEFAULT 1024
37 #define IPQ_PROC_FS_NAME "ip_queue"
38 #define NET_IPQ_QMAX 2088
39 #define NET_IPQ_QMAX_NAME "ip_queue_maxlen"
41 typedef int (*ipq_cmpfn)(struct nf_queue_entry *, unsigned long);
43 static unsigned char copy_mode __read_mostly = IPQ_COPY_NONE;
44 static unsigned int queue_maxlen __read_mostly = IPQ_QMAX_DEFAULT;
45 static DEFINE_RWLOCK(queue_lock);
46 static int peer_pid __read_mostly;
47 static unsigned int copy_range __read_mostly;
48 static unsigned int queue_total;
49 static unsigned int queue_dropped = 0;
50 static unsigned int queue_user_dropped = 0;
51 static struct sock *ipqnl __read_mostly;
52 static LIST_HEAD(queue_list);
53 static DEFINE_MUTEX(ipqnl_mutex);
56 __ipq_enqueue_entry(struct nf_queue_entry *entry)
58 list_add_tail(&entry->list, &queue_list);
63 __ipq_set_mode(unsigned char mode, unsigned int range)
77 if (copy_range > 0xFFFF)
88 static void __ipq_flush(ipq_cmpfn cmpfn, unsigned long data);
94 net_disable_timestamp();
95 __ipq_set_mode(IPQ_COPY_NONE, 0);
99 static struct nf_queue_entry *
100 ipq_find_dequeue_entry(unsigned long id)
102 struct nf_queue_entry *entry = NULL, *i;
104 write_lock_bh(&queue_lock);
106 list_for_each_entry(i, &queue_list, list) {
107 if ((unsigned long)i == id) {
114 list_del(&entry->list);
118 write_unlock_bh(&queue_lock);
123 __ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
125 struct nf_queue_entry *entry, *next;
127 list_for_each_entry_safe(entry, next, &queue_list, list) {
128 if (!cmpfn || cmpfn(entry, data)) {
129 list_del(&entry->list);
131 nf_reinject(entry, NF_DROP);
137 ipq_flush(ipq_cmpfn cmpfn, unsigned long data)
139 write_lock_bh(&queue_lock);
140 __ipq_flush(cmpfn, data);
141 write_unlock_bh(&queue_lock);
144 static struct sk_buff *
145 ipq_build_packet_message(struct nf_queue_entry *entry, int *errp)
147 sk_buff_data_t old_tail;
151 struct ipq_packet_msg *pmsg;
152 struct nlmsghdr *nlh;
155 read_lock_bh(&queue_lock);
160 size = NLMSG_SPACE(sizeof(*pmsg));
163 case IPQ_COPY_PACKET:
164 if ((entry->skb->ip_summed == CHECKSUM_PARTIAL ||
165 entry->skb->ip_summed == CHECKSUM_COMPLETE) &&
166 (*errp = skb_checksum_help(entry->skb))) {
167 read_unlock_bh(&queue_lock);
170 if (copy_range == 0 || copy_range > entry->skb->len)
171 data_len = entry->skb->len;
173 data_len = copy_range;
175 size = NLMSG_SPACE(sizeof(*pmsg) + data_len);
180 read_unlock_bh(&queue_lock);
184 read_unlock_bh(&queue_lock);
186 skb = alloc_skb(size, GFP_ATOMIC);
190 old_tail = skb->tail;
191 nlh = NLMSG_PUT(skb, 0, 0, IPQM_PACKET, size - sizeof(*nlh));
192 pmsg = NLMSG_DATA(nlh);
193 memset(pmsg, 0, sizeof(*pmsg));
195 pmsg->packet_id = (unsigned long )entry;
196 pmsg->data_len = data_len;
197 tv = ktime_to_timeval(entry->skb->tstamp);
198 pmsg->timestamp_sec = tv.tv_sec;
199 pmsg->timestamp_usec = tv.tv_usec;
200 pmsg->mark = entry->skb->mark;
201 pmsg->hook = entry->hook;
202 pmsg->hw_protocol = entry->skb->protocol;
205 strcpy(pmsg->indev_name, entry->indev->name);
207 pmsg->indev_name[0] = '\0';
210 strcpy(pmsg->outdev_name, entry->outdev->name);
212 pmsg->outdev_name[0] = '\0';
214 if (entry->indev && entry->skb->dev) {
215 pmsg->hw_type = entry->skb->dev->type;
216 pmsg->hw_addrlen = dev_parse_header(entry->skb,
221 if (skb_copy_bits(entry->skb, 0, pmsg->payload, data_len))
224 nlh->nlmsg_len = skb->tail - old_tail;
229 printk(KERN_ERR "ip_queue: error creating packet message\n");
234 ipq_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
236 int status = -EINVAL;
237 struct sk_buff *nskb;
239 if (copy_mode == IPQ_COPY_NONE)
242 nskb = ipq_build_packet_message(entry, &status);
246 write_lock_bh(&queue_lock);
249 goto err_out_free_nskb;
251 if (queue_total >= queue_maxlen) {
255 printk (KERN_WARNING "ip_queue: full at %d entries, "
256 "dropping packets(s). Dropped: %d\n", queue_total,
258 goto err_out_free_nskb;
261 /* netlink_unicast will either free the nskb or attach it to a socket */
262 status = netlink_unicast(ipqnl, nskb, peer_pid, MSG_DONTWAIT);
264 queue_user_dropped++;
268 __ipq_enqueue_entry(entry);
270 write_unlock_bh(&queue_lock);
277 write_unlock_bh(&queue_lock);
282 ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
285 struct iphdr *user_iph = (struct iphdr *)v->payload;
286 struct sk_buff *nskb;
288 if (v->data_len < sizeof(*user_iph))
290 diff = v->data_len - e->skb->len;
292 if (pskb_trim(e->skb, v->data_len))
294 } else if (diff > 0) {
295 if (v->data_len > 0xFFFF)
297 if (diff > skb_tailroom(e->skb)) {
298 nskb = skb_copy_expand(e->skb, skb_headroom(e->skb),
301 printk(KERN_WARNING "ip_queue: error "
302 "in mangle, dropping packet\n");
308 skb_put(e->skb, diff);
310 if (!skb_make_writable(e->skb, v->data_len))
312 skb_copy_to_linear_data(e->skb, v->payload, v->data_len);
313 e->skb->ip_summed = CHECKSUM_NONE;
319 ipq_set_verdict(struct ipq_verdict_msg *vmsg, unsigned int len)
321 struct nf_queue_entry *entry;
323 if (vmsg->value > NF_MAX_VERDICT)
326 entry = ipq_find_dequeue_entry(vmsg->id);
330 int verdict = vmsg->value;
332 if (vmsg->data_len && vmsg->data_len == len)
333 if (ipq_mangle_ipv4(vmsg, entry) < 0)
336 nf_reinject(entry, verdict);
342 ipq_set_mode(unsigned char mode, unsigned int range)
346 write_lock_bh(&queue_lock);
347 status = __ipq_set_mode(mode, range);
348 write_unlock_bh(&queue_lock);
353 ipq_receive_peer(struct ipq_peer_msg *pmsg,
354 unsigned char type, unsigned int len)
358 if (len < sizeof(*pmsg))
363 status = ipq_set_mode(pmsg->msg.mode.value,
364 pmsg->msg.mode.range);
368 if (pmsg->msg.verdict.value > NF_MAX_VERDICT)
371 status = ipq_set_verdict(&pmsg->msg.verdict,
372 len - sizeof(*pmsg));
381 dev_cmp(struct nf_queue_entry *entry, unsigned long ifindex)
384 if (entry->indev->ifindex == ifindex)
387 if (entry->outdev->ifindex == ifindex)
389 #ifdef CONFIG_BRIDGE_NETFILTER
390 if (entry->skb->nf_bridge) {
391 if (entry->skb->nf_bridge->physindev &&
392 entry->skb->nf_bridge->physindev->ifindex == ifindex)
394 if (entry->skb->nf_bridge->physoutdev &&
395 entry->skb->nf_bridge->physoutdev->ifindex == ifindex)
403 ipq_dev_drop(int ifindex)
405 ipq_flush(dev_cmp, ifindex);
408 #define RCV_SKB_FAIL(err) do { netlink_ack(skb, nlh, (err)); return; } while (0)
411 __ipq_rcv_skb(struct sk_buff *skb)
413 int status, type, pid, flags, nlmsglen, skblen;
414 struct nlmsghdr *nlh;
417 if (skblen < sizeof(*nlh))
420 nlh = nlmsg_hdr(skb);
421 nlmsglen = nlh->nlmsg_len;
422 if (nlmsglen < sizeof(*nlh) || skblen < nlmsglen)
425 pid = nlh->nlmsg_pid;
426 flags = nlh->nlmsg_flags;
428 if(pid <= 0 || !(flags & NLM_F_REQUEST) || flags & NLM_F_MULTI)
429 RCV_SKB_FAIL(-EINVAL);
431 if (flags & MSG_TRUNC)
432 RCV_SKB_FAIL(-ECOMM);
434 type = nlh->nlmsg_type;
435 if (type < NLMSG_NOOP || type >= IPQM_MAX)
436 RCV_SKB_FAIL(-EINVAL);
438 if (type <= IPQM_BASE)
441 if (security_netlink_recv(skb, CAP_NET_ADMIN))
442 RCV_SKB_FAIL(-EPERM);
444 write_lock_bh(&queue_lock);
447 if (peer_pid != pid) {
448 write_unlock_bh(&queue_lock);
449 RCV_SKB_FAIL(-EBUSY);
452 net_enable_timestamp();
456 write_unlock_bh(&queue_lock);
458 status = ipq_receive_peer(NLMSG_DATA(nlh), type,
459 nlmsglen - NLMSG_LENGTH(0));
461 RCV_SKB_FAIL(status);
463 if (flags & NLM_F_ACK)
464 netlink_ack(skb, nlh, 0);
469 ipq_rcv_skb(struct sk_buff *skb)
471 mutex_lock(&ipqnl_mutex);
473 mutex_unlock(&ipqnl_mutex);
477 ipq_rcv_dev_event(struct notifier_block *this,
478 unsigned long event, void *ptr)
480 struct net_device *dev = ptr;
482 if (!net_eq(dev_net(dev), &init_net))
485 /* Drop any packets associated with the downed device */
486 if (event == NETDEV_DOWN)
487 ipq_dev_drop(dev->ifindex);
491 static struct notifier_block ipq_dev_notifier = {
492 .notifier_call = ipq_rcv_dev_event,
496 ipq_rcv_nl_event(struct notifier_block *this,
497 unsigned long event, void *ptr)
499 struct netlink_notify *n = ptr;
501 if (event == NETLINK_URELEASE && n->protocol == NETLINK_FIREWALL) {
502 write_lock_bh(&queue_lock);
503 if ((net_eq(n->net, &init_net)) && (n->pid == peer_pid))
505 write_unlock_bh(&queue_lock);
510 static struct notifier_block ipq_nl_notifier = {
511 .notifier_call = ipq_rcv_nl_event,
515 static struct ctl_table_header *ipq_sysctl_header;
517 static ctl_table ipq_table[] = {
519 .procname = NET_IPQ_QMAX_NAME,
520 .data = &queue_maxlen,
521 .maxlen = sizeof(queue_maxlen),
523 .proc_handler = proc_dointvec
529 #ifdef CONFIG_PROC_FS
530 static int ip_queue_show(struct seq_file *m, void *v)
532 read_lock_bh(&queue_lock);
538 "Queue length : %u\n"
539 "Queue max. length : %u\n"
540 "Queue dropped : %u\n"
541 "Netlink dropped : %u\n",
550 read_unlock_bh(&queue_lock);
554 static int ip_queue_open(struct inode *inode, struct file *file)
556 return single_open(file, ip_queue_show, NULL);
559 static const struct file_operations ip_queue_proc_fops = {
560 .open = ip_queue_open,
563 .release = single_release,
564 .owner = THIS_MODULE,
568 static const struct nf_queue_handler nfqh = {
570 .outfn = &ipq_enqueue_packet,
573 static int __init ip_queue_init(void)
575 int status = -ENOMEM;
576 struct proc_dir_entry *proc __maybe_unused;
578 netlink_register_notifier(&ipq_nl_notifier);
579 ipqnl = netlink_kernel_create(&init_net, NETLINK_FIREWALL, 0,
580 ipq_rcv_skb, NULL, THIS_MODULE);
582 printk(KERN_ERR "ip_queue: failed to create netlink socket\n");
583 goto cleanup_netlink_notifier;
586 #ifdef CONFIG_PROC_FS
587 proc = proc_create(IPQ_PROC_FS_NAME, 0, init_net.proc_net,
588 &ip_queue_proc_fops);
590 printk(KERN_ERR "ip_queue: failed to create proc entry\n");
594 register_netdevice_notifier(&ipq_dev_notifier);
596 ipq_sysctl_header = register_sysctl_paths(net_ipv4_ctl_path, ipq_table);
598 status = nf_register_queue_handler(NFPROTO_IPV4, &nfqh);
600 printk(KERN_ERR "ip_queue: failed to register queue handler\n");
607 unregister_sysctl_table(ipq_sysctl_header);
609 unregister_netdevice_notifier(&ipq_dev_notifier);
610 proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
611 cleanup_ipqnl: __maybe_unused
612 netlink_kernel_release(ipqnl);
613 mutex_lock(&ipqnl_mutex);
614 mutex_unlock(&ipqnl_mutex);
616 cleanup_netlink_notifier:
617 netlink_unregister_notifier(&ipq_nl_notifier);
621 static void __exit ip_queue_fini(void)
623 nf_unregister_queue_handlers(&nfqh);
628 unregister_sysctl_table(ipq_sysctl_header);
630 unregister_netdevice_notifier(&ipq_dev_notifier);
631 proc_net_remove(&init_net, IPQ_PROC_FS_NAME);
633 netlink_kernel_release(ipqnl);
634 mutex_lock(&ipqnl_mutex);
635 mutex_unlock(&ipqnl_mutex);
637 netlink_unregister_notifier(&ipq_nl_notifier);
640 MODULE_DESCRIPTION("IPv4 packet queue handler");
641 MODULE_AUTHOR("James Morris <jmorris@intercode.com.au>");
642 MODULE_LICENSE("GPL");
643 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_FIREWALL);
645 module_init(ip_queue_init);
646 module_exit(ip_queue_fini);