2 * netfilter module for userspace packet logging daemons
4 * (C) 2000-2004 by Harald Welte <laforge@netfilter.org>
5 * (C) 1999-2001 Paul `Rusty' Russell
6 * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This module accepts two parameters:
15 * The parameter specifies how big the buffer for each netlink multicast
16 * group is. e.g. If you say nlbufsiz=8192, up to eight kb of packets will
17 * get accumulated in the kernel until they are sent to userspace. It is
18 * NOT possible to allocate more than 128kB, and it is strongly discouraged,
19 * because atomically allocating 128kB inside the network rx softirq is not
20 * reliable. Please also keep in mind that this buffer size is allocated for
21 * each nlgroup you are using, so the total kernel memory usage increases
24 * Actually you should use nlbufsiz a bit smaller than PAGE_SIZE, since
25 * nlbufsiz is used with alloc_skb, which adds another
26 * sizeof(struct skb_shared_info). Use NLMSG_GOODSIZE instead.
29 * Specify, after how many hundredths of a second the queue should be
30 * flushed even if it is not full yet.
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 #include <linux/module.h>
34 #include <linux/spinlock.h>
35 #include <linux/socket.h>
36 #include <linux/slab.h>
37 #include <linux/skbuff.h>
38 #include <linux/kernel.h>
39 #include <linux/timer.h>
40 #include <net/netlink.h>
41 #include <linux/netdevice.h>
43 #include <linux/moduleparam.h>
44 #include <linux/netfilter.h>
45 #include <linux/netfilter/x_tables.h>
46 #include <linux/netfilter_ipv4/ipt_ULOG.h>
47 #include <net/netfilter/nf_log.h>
48 #include <net/netns/generic.h>
50 #include <linux/bitops.h>
51 #include <asm/unaligned.h>
53 MODULE_LICENSE("GPL");
54 MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
55 MODULE_DESCRIPTION("Xtables: packet logging to netlink using ULOG");
56 MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NFLOG);
58 #define ULOG_NL_EVENT 111 /* Harald's favorite number */
59 #define ULOG_MAXNLGROUPS 32 /* numer of nlgroups */
61 static unsigned int nlbufsiz = NLMSG_GOODSIZE;
62 module_param(nlbufsiz, uint, 0400);
63 MODULE_PARM_DESC(nlbufsiz, "netlink buffer size");
65 static unsigned int flushtimeout = 10;
66 module_param(flushtimeout, uint, 0600);
67 MODULE_PARM_DESC(flushtimeout, "buffer flush timeout (hundredths of a second)");
69 static bool nflog = true;
70 module_param(nflog, bool, 0400);
71 MODULE_PARM_DESC(nflog, "register as internal netfilter logging module");
73 /* global data structures */
76 unsigned int qlen; /* number of nlmsgs' in the skb */
77 struct nlmsghdr *lastnlh; /* netlink header of last msg in skb */
78 struct sk_buff *skb; /* the pre-allocated skb */
79 struct timer_list timer; /* the timer function */
82 static int ulog_net_id __read_mostly;
84 unsigned int nlgroup[ULOG_MAXNLGROUPS];
85 ulog_buff_t ulog_buffers[ULOG_MAXNLGROUPS];
90 static struct ulog_net *ulog_pernet(struct net *net)
92 return net_generic(net, ulog_net_id);
95 /* send one ulog_buff_t to userspace */
96 static void ulog_send(struct ulog_net *ulog, unsigned int nlgroupnum)
98 ulog_buff_t *ub = &ulog->ulog_buffers[nlgroupnum];
100 pr_debug("ulog_send: timer is deleting\n");
101 del_timer(&ub->timer);
104 pr_debug("ulog_send: nothing to send\n");
108 /* last nlmsg needs NLMSG_DONE */
110 ub->lastnlh->nlmsg_type = NLMSG_DONE;
112 NETLINK_CB(ub->skb).dst_group = nlgroupnum + 1;
113 pr_debug("throwing %d packets to netlink group %u\n",
114 ub->qlen, nlgroupnum + 1);
115 netlink_broadcast(ulog->nflognl, ub->skb, 0, nlgroupnum + 1,
124 /* timer function to flush queue in flushtimeout time */
125 static void ulog_timer(unsigned long data)
127 struct ulog_net *ulog = container_of((void *)data,
129 nlgroup[*(unsigned int *)data]);
130 pr_debug("timer function called, calling ulog_send\n");
132 /* lock to protect against somebody modifying our structure
133 * from ipt_ulog_target at the same time */
134 spin_lock_bh(&ulog->lock);
135 ulog_send(ulog, data);
136 spin_unlock_bh(&ulog->lock);
139 static struct sk_buff *ulog_alloc_skb(unsigned int size)
144 /* alloc skb which should be big enough for a whole
145 * multipart message. WARNING: has to be <= 131000
146 * due to slab allocator restrictions */
148 n = max(size, nlbufsiz);
149 skb = alloc_skb(n, GFP_ATOMIC | __GFP_NOWARN);
152 /* try to allocate only as much as we need for
155 skb = alloc_skb(size, GFP_ATOMIC);
157 pr_debug("cannot even allocate %ub\n", size);
164 static void ipt_ulog_packet(unsigned int hooknum,
165 const struct sk_buff *skb,
166 const struct net_device *in,
167 const struct net_device *out,
168 const struct ipt_ulog_info *loginfo,
172 ulog_packet_msg_t *pm;
173 size_t size, copy_len;
174 struct nlmsghdr *nlh;
176 struct net *net = dev_net(in ? in : out);
177 struct ulog_net *ulog = ulog_pernet(net);
179 /* ffs == find first bit set, necessary because userspace
180 * is already shifting groupnumber, but we need unshifted.
181 * ffs() returns [1..32], we need [0..31] */
182 unsigned int groupnum = ffs(loginfo->nl_group) - 1;
184 /* calculate the size of the skb needed */
185 if (loginfo->copy_range == 0 || loginfo->copy_range > skb->len)
188 copy_len = loginfo->copy_range;
190 size = nlmsg_total_size(sizeof(*pm) + copy_len);
192 ub = &ulog->ulog_buffers[groupnum];
194 spin_lock_bh(&ulog->lock);
197 if (!(ub->skb = ulog_alloc_skb(size)))
199 } else if (ub->qlen >= loginfo->qthreshold ||
200 size > skb_tailroom(ub->skb)) {
201 /* either the queue len is too high or we don't have
202 * enough room in nlskb left. send it to userspace. */
204 ulog_send(ulog, groupnum);
206 if (!(ub->skb = ulog_alloc_skb(size)))
210 pr_debug("qlen %d, qthreshold %Zu\n", ub->qlen, loginfo->qthreshold);
212 nlh = nlmsg_put(ub->skb, 0, ub->qlen, ULOG_NL_EVENT,
213 sizeof(*pm)+copy_len, 0);
215 pr_debug("error during nlmsg_put\n");
220 pm = nlmsg_data(nlh);
222 /* We might not have a timestamp, get one */
223 if (skb->tstamp.tv64 == 0)
224 __net_timestamp((struct sk_buff *)skb);
226 /* copy hook, prefix, timestamp, payload, etc. */
227 pm->data_len = copy_len;
228 tv = ktime_to_timeval(skb->tstamp);
229 put_unaligned(tv.tv_sec, &pm->timestamp_sec);
230 put_unaligned(tv.tv_usec, &pm->timestamp_usec);
231 put_unaligned(skb->mark, &pm->mark);
234 strncpy(pm->prefix, prefix, sizeof(pm->prefix));
235 else if (loginfo->prefix[0] != '\0')
236 strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
238 *(pm->prefix) = '\0';
240 if (in && in->hard_header_len > 0 &&
241 skb->mac_header != skb->network_header &&
242 in->hard_header_len <= ULOG_MAC_LEN) {
243 memcpy(pm->mac, skb_mac_header(skb), in->hard_header_len);
244 pm->mac_len = in->hard_header_len;
249 strncpy(pm->indev_name, in->name, sizeof(pm->indev_name));
251 pm->indev_name[0] = '\0';
254 strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
256 pm->outdev_name[0] = '\0';
258 /* copy_len <= skb->len, so can't fail. */
259 if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0)
262 /* check if we are building multi-part messages */
264 ub->lastnlh->nlmsg_flags |= NLM_F_MULTI;
268 /* if timer isn't already running, start it */
269 if (!timer_pending(&ub->timer)) {
270 ub->timer.expires = jiffies + flushtimeout * HZ / 100;
271 add_timer(&ub->timer);
274 /* if threshold is reached, send message to userspace */
275 if (ub->qlen >= loginfo->qthreshold) {
276 if (loginfo->qthreshold > 1)
277 nlh->nlmsg_type = NLMSG_DONE;
278 ulog_send(ulog, groupnum);
281 spin_unlock_bh(&ulog->lock);
286 pr_debug("Error building netlink message\n");
287 spin_unlock_bh(&ulog->lock);
291 ulog_tg(struct sk_buff *skb, const struct xt_action_param *par)
293 ipt_ulog_packet(par->hooknum, skb, par->in, par->out,
294 par->targinfo, NULL);
298 static void ipt_logfn(u_int8_t pf,
299 unsigned int hooknum,
300 const struct sk_buff *skb,
301 const struct net_device *in,
302 const struct net_device *out,
303 const struct nf_loginfo *li,
306 struct ipt_ulog_info loginfo;
308 if (!li || li->type != NF_LOG_TYPE_ULOG) {
309 loginfo.nl_group = ULOG_DEFAULT_NLGROUP;
310 loginfo.copy_range = 0;
311 loginfo.qthreshold = ULOG_DEFAULT_QTHRESHOLD;
312 loginfo.prefix[0] = '\0';
314 loginfo.nl_group = li->u.ulog.group;
315 loginfo.copy_range = li->u.ulog.copy_len;
316 loginfo.qthreshold = li->u.ulog.qthreshold;
317 strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix));
320 ipt_ulog_packet(hooknum, skb, in, out, &loginfo, prefix);
323 static int ulog_tg_check(const struct xt_tgchk_param *par)
325 const struct ipt_ulog_info *loginfo = par->targinfo;
327 if (loginfo->prefix[sizeof(loginfo->prefix) - 1] != '\0') {
328 pr_debug("prefix not null-terminated\n");
331 if (loginfo->qthreshold > ULOG_MAX_QLEN) {
332 pr_debug("queue threshold %Zu > MAX_QLEN\n",
333 loginfo->qthreshold);
340 struct compat_ipt_ulog_info {
341 compat_uint_t nl_group;
342 compat_size_t copy_range;
343 compat_size_t qthreshold;
344 char prefix[ULOG_PREFIX_LEN];
347 static void ulog_tg_compat_from_user(void *dst, const void *src)
349 const struct compat_ipt_ulog_info *cl = src;
350 struct ipt_ulog_info l = {
351 .nl_group = cl->nl_group,
352 .copy_range = cl->copy_range,
353 .qthreshold = cl->qthreshold,
356 memcpy(l.prefix, cl->prefix, sizeof(l.prefix));
357 memcpy(dst, &l, sizeof(l));
360 static int ulog_tg_compat_to_user(void __user *dst, const void *src)
362 const struct ipt_ulog_info *l = src;
363 struct compat_ipt_ulog_info cl = {
364 .nl_group = l->nl_group,
365 .copy_range = l->copy_range,
366 .qthreshold = l->qthreshold,
369 memcpy(cl.prefix, l->prefix, sizeof(cl.prefix));
370 return copy_to_user(dst, &cl, sizeof(cl)) ? -EFAULT : 0;
372 #endif /* CONFIG_COMPAT */
374 static struct xt_target ulog_tg_reg __read_mostly = {
376 .family = NFPROTO_IPV4,
378 .targetsize = sizeof(struct ipt_ulog_info),
379 .checkentry = ulog_tg_check,
381 .compatsize = sizeof(struct compat_ipt_ulog_info),
382 .compat_from_user = ulog_tg_compat_from_user,
383 .compat_to_user = ulog_tg_compat_to_user,
388 static struct nf_logger ipt_ulog_logger __read_mostly = {
394 static int __net_init ulog_tg_net_init(struct net *net)
397 struct ulog_net *ulog = ulog_pernet(net);
398 struct netlink_kernel_cfg cfg = {
399 .groups = ULOG_MAXNLGROUPS,
402 spin_lock_init(&ulog->lock);
403 /* initialize ulog_buffers */
404 for (i = 0; i < ULOG_MAXNLGROUPS; i++)
405 setup_timer(&ulog->ulog_buffers[i].timer, ulog_timer, i);
407 ulog->nflognl = netlink_kernel_create(net, NETLINK_NFLOG, &cfg);
412 nf_log_set(net, NFPROTO_IPV4, &ipt_ulog_logger);
417 static void __net_exit ulog_tg_net_exit(struct net *net)
421 struct ulog_net *ulog = ulog_pernet(net);
424 nf_log_unset(net, &ipt_ulog_logger);
426 netlink_kernel_release(ulog->nflognl);
428 /* remove pending timers and free allocated skb's */
429 for (i = 0; i < ULOG_MAXNLGROUPS; i++) {
430 ub = &ulog->ulog_buffers[i];
431 pr_debug("timer is deleting\n");
432 del_timer(&ub->timer);
441 static struct pernet_operations ulog_tg_net_ops = {
442 .init = ulog_tg_net_init,
443 .exit = ulog_tg_net_exit,
445 .size = sizeof(struct ulog_net),
448 static int __init ulog_tg_init(void)
451 pr_debug("init module\n");
453 if (nlbufsiz > 128*1024) {
454 pr_warn("Netlink buffer has to be <= 128kB\n");
458 ret = register_pernet_subsys(&ulog_tg_net_ops);
462 ret = xt_register_target(&ulog_tg_reg);
467 nf_log_register(NFPROTO_IPV4, &ipt_ulog_logger);
472 unregister_pernet_subsys(&ulog_tg_net_ops);
477 static void __exit ulog_tg_exit(void)
479 pr_debug("cleanup_module\n");
481 nf_log_unregister(&ipt_ulog_logger);
482 xt_unregister_target(&ulog_tg_reg);
483 unregister_pernet_subsys(&ulog_tg_net_ops);
486 module_init(ulog_tg_init);
487 module_exit(ulog_tg_exit);