1 /* ip_nat_helper.c - generic support functions for NAT helpers
3 * (C) 2000-2002 Harald Welte <laforge@netfilter.org>
4 * (C) 2003-2006 Netfilter Core Team <coreteam@netfilter.org>
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 #include <linux/module.h>
11 #include <linux/kmod.h>
12 #include <linux/types.h>
13 #include <linux/timer.h>
14 #include <linux/skbuff.h>
15 #include <linux/tcp.h>
16 #include <linux/udp.h>
17 #include <net/checksum.h>
19 #include <net/route.h>
21 #include <linux/netfilter_ipv4.h>
22 #include <net/netfilter/nf_conntrack.h>
23 #include <net/netfilter/nf_conntrack_helper.h>
24 #include <net/netfilter/nf_conntrack_ecache.h>
25 #include <net/netfilter/nf_conntrack_expect.h>
26 #include <net/netfilter/nf_nat.h>
27 #include <net/netfilter/nf_nat_protocol.h>
28 #include <net/netfilter/nf_nat_core.h>
29 #include <net/netfilter/nf_nat_helper.h>
31 #define DUMP_OFFSET(x) \
32 pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \
33 x->offset_before, x->offset_after, x->correction_pos);
35 static DEFINE_SPINLOCK(nf_nat_seqofs_lock);
37 /* Setup TCP sequence correction given this change at this sequence */
39 adjust_tcp_sequence(u32 seq,
42 enum ip_conntrack_info ctinfo)
44 enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
45 struct nf_conn_nat *nat = nfct_nat(ct);
46 struct nf_nat_seq *this_way = &nat->seq[dir];
48 pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n",
51 pr_debug("adjust_tcp_sequence: Seq_offset before: ");
52 DUMP_OFFSET(this_way);
54 spin_lock_bh(&nf_nat_seqofs_lock);
56 /* SYN adjust. If it's uninitialized, or this is after last
57 * correction, record it: we don't handle more than one
58 * adjustment in the window, but do deal with common case of a
60 if (this_way->offset_before == this_way->offset_after ||
61 before(this_way->correction_pos, seq)) {
62 this_way->correction_pos = seq;
63 this_way->offset_before = this_way->offset_after;
64 this_way->offset_after += sizediff;
66 spin_unlock_bh(&nf_nat_seqofs_lock);
68 pr_debug("adjust_tcp_sequence: Seq_offset after: ");
69 DUMP_OFFSET(this_way);
72 /* Get the offset value, for conntrack */
73 s16 nf_nat_get_offset(const struct nf_conn *ct,
74 enum ip_conntrack_dir dir,
77 struct nf_conn_nat *nat = nfct_nat(ct);
78 struct nf_nat_seq *this_way;
84 this_way = &nat->seq[dir];
85 spin_lock_bh(&nf_nat_seqofs_lock);
86 offset = after(seq, this_way->correction_pos)
87 ? this_way->offset_after : this_way->offset_before;
88 spin_unlock_bh(&nf_nat_seqofs_lock);
92 EXPORT_SYMBOL_GPL(nf_nat_get_offset);
94 /* Frobs data inside this packet, which is linear. */
95 static void mangle_contents(struct sk_buff *skb,
97 unsigned int match_offset,
98 unsigned int match_len,
99 const char *rep_buffer,
100 unsigned int rep_len)
104 BUG_ON(skb_is_nonlinear(skb));
105 data = skb_network_header(skb) + dataoff;
107 /* move post-replacement */
108 memmove(data + match_offset + rep_len,
109 data + match_offset + match_len,
110 skb->tail - (skb->network_header + dataoff +
111 match_offset + match_len));
113 /* insert data from buffer */
114 memcpy(data + match_offset, rep_buffer, rep_len);
116 /* update skb info */
117 if (rep_len > match_len) {
118 pr_debug("nf_nat_mangle_packet: Extending packet by "
119 "%u from %u bytes\n", rep_len - match_len, skb->len);
120 skb_put(skb, rep_len - match_len);
122 pr_debug("nf_nat_mangle_packet: Shrinking packet from "
123 "%u from %u bytes\n", match_len - rep_len, skb->len);
124 __skb_trim(skb, skb->len + rep_len - match_len);
127 /* fix IP hdr checksum information */
128 ip_hdr(skb)->tot_len = htons(skb->len);
129 ip_send_check(ip_hdr(skb));
132 /* Unusual, but possible case. */
133 static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
135 if (skb->len + extra > 65535)
138 if (pskb_expand_head(skb, 0, extra - skb_tailroom(skb), GFP_ATOMIC))
144 void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
149 set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
150 adjust_tcp_sequence(ntohl(seq), off, ct, ctinfo);
151 nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
153 EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust);
155 /* Generic function for mangling variable-length address changes inside
156 * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
159 * Takes care about all the nasty sequence number changes, checksumming,
160 * skb enlargement, ...
163 int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
165 enum ip_conntrack_info ctinfo,
166 unsigned int match_offset,
167 unsigned int match_len,
168 const char *rep_buffer,
169 unsigned int rep_len, bool adjust)
171 struct rtable *rt = skb_rtable(skb);
176 if (!skb_make_writable(skb, skb->len))
179 if (rep_len > match_len &&
180 rep_len - match_len > skb_tailroom(skb) &&
181 !enlarge_skb(skb, rep_len - match_len))
184 SKB_LINEAR_ASSERT(skb);
187 tcph = (void *)iph + iph->ihl*4;
189 oldlen = skb->len - iph->ihl*4;
190 mangle_contents(skb, iph->ihl*4 + tcph->doff*4,
191 match_offset, match_len, rep_buffer, rep_len);
193 datalen = skb->len - iph->ihl*4;
194 if (skb->ip_summed != CHECKSUM_PARTIAL) {
195 if (!(rt->rt_flags & RTCF_LOCAL) &&
196 skb->dev->features & NETIF_F_V4_CSUM) {
197 skb->ip_summed = CHECKSUM_PARTIAL;
198 skb->csum_start = skb_headroom(skb) +
199 skb_network_offset(skb) +
201 skb->csum_offset = offsetof(struct tcphdr, check);
202 tcph->check = ~tcp_v4_check(datalen,
203 iph->saddr, iph->daddr, 0);
206 tcph->check = tcp_v4_check(datalen,
207 iph->saddr, iph->daddr,
212 inet_proto_csum_replace2(&tcph->check, skb,
213 htons(oldlen), htons(datalen), 1);
215 if (adjust && rep_len != match_len)
216 nf_nat_set_seq_adjust(ct, ctinfo, tcph->seq,
217 (int)rep_len - (int)match_len);
221 EXPORT_SYMBOL(__nf_nat_mangle_tcp_packet);
223 /* Generic function for mangling variable-length address changes inside
224 * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
225 * command in the Amanda protocol)
227 * Takes care about all the nasty sequence number changes, checksumming,
228 * skb enlargement, ...
230 * XXX - This function could be merged with nf_nat_mangle_tcp_packet which
231 * should be fairly easy to do.
234 nf_nat_mangle_udp_packet(struct sk_buff *skb,
236 enum ip_conntrack_info ctinfo,
237 unsigned int match_offset,
238 unsigned int match_len,
239 const char *rep_buffer,
240 unsigned int rep_len)
242 struct rtable *rt = skb_rtable(skb);
247 /* UDP helpers might accidentally mangle the wrong packet */
249 if (skb->len < iph->ihl*4 + sizeof(*udph) +
250 match_offset + match_len)
253 if (!skb_make_writable(skb, skb->len))
256 if (rep_len > match_len &&
257 rep_len - match_len > skb_tailroom(skb) &&
258 !enlarge_skb(skb, rep_len - match_len))
262 udph = (void *)iph + iph->ihl*4;
264 oldlen = skb->len - iph->ihl*4;
265 mangle_contents(skb, iph->ihl*4 + sizeof(*udph),
266 match_offset, match_len, rep_buffer, rep_len);
268 /* update the length of the UDP packet */
269 datalen = skb->len - iph->ihl*4;
270 udph->len = htons(datalen);
272 /* fix udp checksum if udp checksum was previously calculated */
273 if (!udph->check && skb->ip_summed != CHECKSUM_PARTIAL)
276 if (skb->ip_summed != CHECKSUM_PARTIAL) {
277 if (!(rt->rt_flags & RTCF_LOCAL) &&
278 skb->dev->features & NETIF_F_V4_CSUM) {
279 skb->ip_summed = CHECKSUM_PARTIAL;
280 skb->csum_start = skb_headroom(skb) +
281 skb_network_offset(skb) +
283 skb->csum_offset = offsetof(struct udphdr, check);
284 udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
285 datalen, IPPROTO_UDP,
289 udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
290 datalen, IPPROTO_UDP,
294 udph->check = CSUM_MANGLED_0;
297 inet_proto_csum_replace2(&udph->check, skb,
298 htons(oldlen), htons(datalen), 1);
302 EXPORT_SYMBOL(nf_nat_mangle_udp_packet);
304 /* Adjust one found SACK option including checksum correction */
306 sack_adjust(struct sk_buff *skb,
308 unsigned int sackoff,
309 unsigned int sackend,
310 struct nf_nat_seq *natseq)
312 while (sackoff < sackend) {
313 struct tcp_sack_block_wire *sack;
314 __be32 new_start_seq, new_end_seq;
316 sack = (void *)skb->data + sackoff;
317 if (after(ntohl(sack->start_seq) - natseq->offset_before,
318 natseq->correction_pos))
319 new_start_seq = htonl(ntohl(sack->start_seq)
320 - natseq->offset_after);
322 new_start_seq = htonl(ntohl(sack->start_seq)
323 - natseq->offset_before);
325 if (after(ntohl(sack->end_seq) - natseq->offset_before,
326 natseq->correction_pos))
327 new_end_seq = htonl(ntohl(sack->end_seq)
328 - natseq->offset_after);
330 new_end_seq = htonl(ntohl(sack->end_seq)
331 - natseq->offset_before);
333 pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
334 ntohl(sack->start_seq), new_start_seq,
335 ntohl(sack->end_seq), new_end_seq);
337 inet_proto_csum_replace4(&tcph->check, skb,
338 sack->start_seq, new_start_seq, 0);
339 inet_proto_csum_replace4(&tcph->check, skb,
340 sack->end_seq, new_end_seq, 0);
341 sack->start_seq = new_start_seq;
342 sack->end_seq = new_end_seq;
343 sackoff += sizeof(*sack);
347 /* TCP SACK sequence number adjustment */
348 static inline unsigned int
349 nf_nat_sack_adjust(struct sk_buff *skb,
352 enum ip_conntrack_info ctinfo)
354 unsigned int dir, optoff, optend;
355 struct nf_conn_nat *nat = nfct_nat(ct);
357 optoff = ip_hdrlen(skb) + sizeof(struct tcphdr);
358 optend = ip_hdrlen(skb) + tcph->doff * 4;
360 if (!skb_make_writable(skb, optend))
363 dir = CTINFO2DIR(ctinfo);
365 while (optoff < optend) {
366 /* Usually: option, length. */
367 unsigned char *op = skb->data + optoff;
376 /* no partial options */
377 if (optoff + 1 == optend ||
378 optoff + op[1] > optend ||
381 if (op[0] == TCPOPT_SACK &&
382 op[1] >= 2+TCPOLEN_SACK_PERBLOCK &&
383 ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
384 sack_adjust(skb, tcph, optoff+2,
385 optoff+op[1], &nat->seq[!dir]);
392 /* TCP sequence number adjustment. Returns 1 on success, 0 on failure */
394 nf_nat_seq_adjust(struct sk_buff *skb,
396 enum ip_conntrack_info ctinfo)
400 __be32 newseq, newack;
402 struct nf_conn_nat *nat = nfct_nat(ct);
403 struct nf_nat_seq *this_way, *other_way;
405 dir = CTINFO2DIR(ctinfo);
407 this_way = &nat->seq[dir];
408 other_way = &nat->seq[!dir];
410 if (!skb_make_writable(skb, ip_hdrlen(skb) + sizeof(*tcph)))
413 tcph = (void *)skb->data + ip_hdrlen(skb);
414 if (after(ntohl(tcph->seq), this_way->correction_pos))
415 seqoff = this_way->offset_after;
417 seqoff = this_way->offset_before;
419 if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
420 other_way->correction_pos))
421 ackoff = other_way->offset_after;
423 ackoff = other_way->offset_before;
425 newseq = htonl(ntohl(tcph->seq) + seqoff);
426 newack = htonl(ntohl(tcph->ack_seq) - ackoff);
428 inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
429 inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
431 pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
432 ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
436 tcph->ack_seq = newack;
438 return nf_nat_sack_adjust(skb, tcph, ct, ctinfo);
441 /* Setup NAT on this expected conntrack so it follows master. */
442 /* If we fail to get a free NAT slot, we'll get dropped on confirm */
443 void nf_nat_follow_master(struct nf_conn *ct,
444 struct nf_conntrack_expect *exp)
446 struct nf_nat_range range;
448 /* This must be a fresh one. */
449 BUG_ON(ct->status & IPS_NAT_DONE_MASK);
451 /* Change src to where master sends to */
452 range.flags = IP_NAT_RANGE_MAP_IPS;
453 range.min_ip = range.max_ip
454 = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
455 nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC);
457 /* For DST manip, map port here to where it's expected. */
458 range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
459 range.min = range.max = exp->saved_proto;
460 range.min_ip = range.max_ip
461 = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
462 nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST);
464 EXPORT_SYMBOL(nf_nat_follow_master);