]> git.karo-electronics.de Git - mv-sheeva.git/blob - net/ipv4/netfilter/nf_nat_helper.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mv-sheeva.git] / net / ipv4 / netfilter / nf_nat_helper.c
1 /* ip_nat_helper.c - generic support functions for NAT helpers
2  *
3  * (C) 2000-2002 Harald Welte <laforge@netfilter.org>
4  * (C) 2003-2006 Netfilter Core Team <coreteam@netfilter.org>
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/module.h>
11 #include <linux/gfp.h>
12 #include <linux/kmod.h>
13 #include <linux/types.h>
14 #include <linux/timer.h>
15 #include <linux/skbuff.h>
16 #include <linux/tcp.h>
17 #include <linux/udp.h>
18 #include <net/checksum.h>
19 #include <net/tcp.h>
20 #include <net/route.h>
21
22 #include <linux/netfilter_ipv4.h>
23 #include <net/netfilter/nf_conntrack.h>
24 #include <net/netfilter/nf_conntrack_helper.h>
25 #include <net/netfilter/nf_conntrack_ecache.h>
26 #include <net/netfilter/nf_conntrack_expect.h>
27 #include <net/netfilter/nf_nat.h>
28 #include <net/netfilter/nf_nat_protocol.h>
29 #include <net/netfilter/nf_nat_core.h>
30 #include <net/netfilter/nf_nat_helper.h>
31
32 #define DUMP_OFFSET(x) \
33         pr_debug("offset_before=%d, offset_after=%d, correction_pos=%u\n", \
34                  x->offset_before, x->offset_after, x->correction_pos);
35
36 static DEFINE_SPINLOCK(nf_nat_seqofs_lock);
37
38 /* Setup TCP sequence correction given this change at this sequence */
39 static inline void
40 adjust_tcp_sequence(u32 seq,
41                     int sizediff,
42                     struct nf_conn *ct,
43                     enum ip_conntrack_info ctinfo)
44 {
45         enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
46         struct nf_conn_nat *nat = nfct_nat(ct);
47         struct nf_nat_seq *this_way = &nat->seq[dir];
48
49         pr_debug("adjust_tcp_sequence: seq = %u, sizediff = %d\n",
50                  seq, sizediff);
51
52         pr_debug("adjust_tcp_sequence: Seq_offset before: ");
53         DUMP_OFFSET(this_way);
54
55         spin_lock_bh(&nf_nat_seqofs_lock);
56
57         /* SYN adjust. If it's uninitialized, or this is after last
58          * correction, record it: we don't handle more than one
59          * adjustment in the window, but do deal with common case of a
60          * retransmit */
61         if (this_way->offset_before == this_way->offset_after ||
62             before(this_way->correction_pos, seq)) {
63                 this_way->correction_pos = seq;
64                 this_way->offset_before = this_way->offset_after;
65                 this_way->offset_after += sizediff;
66         }
67         spin_unlock_bh(&nf_nat_seqofs_lock);
68
69         pr_debug("adjust_tcp_sequence: Seq_offset after: ");
70         DUMP_OFFSET(this_way);
71 }
72
73 /* Get the offset value, for conntrack */
74 s16 nf_nat_get_offset(const struct nf_conn *ct,
75                       enum ip_conntrack_dir dir,
76                       u32 seq)
77 {
78         struct nf_conn_nat *nat = nfct_nat(ct);
79         struct nf_nat_seq *this_way;
80         s16 offset;
81
82         if (!nat)
83                 return 0;
84
85         this_way = &nat->seq[dir];
86         spin_lock_bh(&nf_nat_seqofs_lock);
87         offset = after(seq, this_way->correction_pos)
88                  ? this_way->offset_after : this_way->offset_before;
89         spin_unlock_bh(&nf_nat_seqofs_lock);
90
91         return offset;
92 }
93 EXPORT_SYMBOL_GPL(nf_nat_get_offset);
94
95 /* Frobs data inside this packet, which is linear. */
96 static void mangle_contents(struct sk_buff *skb,
97                             unsigned int dataoff,
98                             unsigned int match_offset,
99                             unsigned int match_len,
100                             const char *rep_buffer,
101                             unsigned int rep_len)
102 {
103         unsigned char *data;
104
105         BUG_ON(skb_is_nonlinear(skb));
106         data = skb_network_header(skb) + dataoff;
107
108         /* move post-replacement */
109         memmove(data + match_offset + rep_len,
110                 data + match_offset + match_len,
111                 skb->tail - (skb->network_header + dataoff +
112                              match_offset + match_len));
113
114         /* insert data from buffer */
115         memcpy(data + match_offset, rep_buffer, rep_len);
116
117         /* update skb info */
118         if (rep_len > match_len) {
119                 pr_debug("nf_nat_mangle_packet: Extending packet by "
120                          "%u from %u bytes\n", rep_len - match_len, skb->len);
121                 skb_put(skb, rep_len - match_len);
122         } else {
123                 pr_debug("nf_nat_mangle_packet: Shrinking packet from "
124                          "%u from %u bytes\n", match_len - rep_len, skb->len);
125                 __skb_trim(skb, skb->len + rep_len - match_len);
126         }
127
128         /* fix IP hdr checksum information */
129         ip_hdr(skb)->tot_len = htons(skb->len);
130         ip_send_check(ip_hdr(skb));
131 }
132
133 /* Unusual, but possible case. */
134 static int enlarge_skb(struct sk_buff *skb, unsigned int extra)
135 {
136         if (skb->len + extra > 65535)
137                 return 0;
138
139         if (pskb_expand_head(skb, 0, extra - skb_tailroom(skb), GFP_ATOMIC))
140                 return 0;
141
142         return 1;
143 }
144
145 void nf_nat_set_seq_adjust(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
146                            __be32 seq, s16 off)
147 {
148         if (!off)
149                 return;
150         set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
151         adjust_tcp_sequence(ntohl(seq), off, ct, ctinfo);
152         nf_conntrack_event_cache(IPCT_NATSEQADJ, ct);
153 }
154 EXPORT_SYMBOL_GPL(nf_nat_set_seq_adjust);
155
156 /* Generic function for mangling variable-length address changes inside
157  * NATed TCP connections (like the PORT XXX,XXX,XXX,XXX,XXX,XXX
158  * command in FTP).
159  *
160  * Takes care about all the nasty sequence number changes, checksumming,
161  * skb enlargement, ...
162  *
163  * */
164 int __nf_nat_mangle_tcp_packet(struct sk_buff *skb,
165                                struct nf_conn *ct,
166                                enum ip_conntrack_info ctinfo,
167                                unsigned int match_offset,
168                                unsigned int match_len,
169                                const char *rep_buffer,
170                                unsigned int rep_len, bool adjust)
171 {
172         struct rtable *rt = skb_rtable(skb);
173         struct iphdr *iph;
174         struct tcphdr *tcph;
175         int oldlen, datalen;
176
177         if (!skb_make_writable(skb, skb->len))
178                 return 0;
179
180         if (rep_len > match_len &&
181             rep_len - match_len > skb_tailroom(skb) &&
182             !enlarge_skb(skb, rep_len - match_len))
183                 return 0;
184
185         SKB_LINEAR_ASSERT(skb);
186
187         iph = ip_hdr(skb);
188         tcph = (void *)iph + iph->ihl*4;
189
190         oldlen = skb->len - iph->ihl*4;
191         mangle_contents(skb, iph->ihl*4 + tcph->doff*4,
192                         match_offset, match_len, rep_buffer, rep_len);
193
194         datalen = skb->len - iph->ihl*4;
195         if (skb->ip_summed != CHECKSUM_PARTIAL) {
196                 if (!(rt->rt_flags & RTCF_LOCAL) &&
197                     skb->dev->features & NETIF_F_V4_CSUM) {
198                         skb->ip_summed = CHECKSUM_PARTIAL;
199                         skb->csum_start = skb_headroom(skb) +
200                                           skb_network_offset(skb) +
201                                           iph->ihl * 4;
202                         skb->csum_offset = offsetof(struct tcphdr, check);
203                         tcph->check = ~tcp_v4_check(datalen,
204                                                     iph->saddr, iph->daddr, 0);
205                 } else {
206                         tcph->check = 0;
207                         tcph->check = tcp_v4_check(datalen,
208                                                    iph->saddr, iph->daddr,
209                                                    csum_partial(tcph,
210                                                                 datalen, 0));
211                 }
212         } else
213                 inet_proto_csum_replace2(&tcph->check, skb,
214                                          htons(oldlen), htons(datalen), 1);
215
216         if (adjust && rep_len != match_len)
217                 nf_nat_set_seq_adjust(ct, ctinfo, tcph->seq,
218                                       (int)rep_len - (int)match_len);
219
220         return 1;
221 }
222 EXPORT_SYMBOL(__nf_nat_mangle_tcp_packet);
223
224 /* Generic function for mangling variable-length address changes inside
225  * NATed UDP connections (like the CONNECT DATA XXXXX MESG XXXXX INDEX XXXXX
226  * command in the Amanda protocol)
227  *
228  * Takes care about all the nasty sequence number changes, checksumming,
229  * skb enlargement, ...
230  *
231  * XXX - This function could be merged with nf_nat_mangle_tcp_packet which
232  *       should be fairly easy to do.
233  */
234 int
235 nf_nat_mangle_udp_packet(struct sk_buff *skb,
236                          struct nf_conn *ct,
237                          enum ip_conntrack_info ctinfo,
238                          unsigned int match_offset,
239                          unsigned int match_len,
240                          const char *rep_buffer,
241                          unsigned int rep_len)
242 {
243         struct rtable *rt = skb_rtable(skb);
244         struct iphdr *iph;
245         struct udphdr *udph;
246         int datalen, oldlen;
247
248         /* UDP helpers might accidentally mangle the wrong packet */
249         iph = ip_hdr(skb);
250         if (skb->len < iph->ihl*4 + sizeof(*udph) +
251                                match_offset + match_len)
252                 return 0;
253
254         if (!skb_make_writable(skb, skb->len))
255                 return 0;
256
257         if (rep_len > match_len &&
258             rep_len - match_len > skb_tailroom(skb) &&
259             !enlarge_skb(skb, rep_len - match_len))
260                 return 0;
261
262         iph = ip_hdr(skb);
263         udph = (void *)iph + iph->ihl*4;
264
265         oldlen = skb->len - iph->ihl*4;
266         mangle_contents(skb, iph->ihl*4 + sizeof(*udph),
267                         match_offset, match_len, rep_buffer, rep_len);
268
269         /* update the length of the UDP packet */
270         datalen = skb->len - iph->ihl*4;
271         udph->len = htons(datalen);
272
273         /* fix udp checksum if udp checksum was previously calculated */
274         if (!udph->check && skb->ip_summed != CHECKSUM_PARTIAL)
275                 return 1;
276
277         if (skb->ip_summed != CHECKSUM_PARTIAL) {
278                 if (!(rt->rt_flags & RTCF_LOCAL) &&
279                     skb->dev->features & NETIF_F_V4_CSUM) {
280                         skb->ip_summed = CHECKSUM_PARTIAL;
281                         skb->csum_start = skb_headroom(skb) +
282                                           skb_network_offset(skb) +
283                                           iph->ihl * 4;
284                         skb->csum_offset = offsetof(struct udphdr, check);
285                         udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
286                                                          datalen, IPPROTO_UDP,
287                                                          0);
288                 } else {
289                         udph->check = 0;
290                         udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
291                                                         datalen, IPPROTO_UDP,
292                                                         csum_partial(udph,
293                                                                      datalen, 0));
294                         if (!udph->check)
295                                 udph->check = CSUM_MANGLED_0;
296                 }
297         } else
298                 inet_proto_csum_replace2(&udph->check, skb,
299                                          htons(oldlen), htons(datalen), 1);
300
301         return 1;
302 }
303 EXPORT_SYMBOL(nf_nat_mangle_udp_packet);
304
305 /* Adjust one found SACK option including checksum correction */
306 static void
307 sack_adjust(struct sk_buff *skb,
308             struct tcphdr *tcph,
309             unsigned int sackoff,
310             unsigned int sackend,
311             struct nf_nat_seq *natseq)
312 {
313         while (sackoff < sackend) {
314                 struct tcp_sack_block_wire *sack;
315                 __be32 new_start_seq, new_end_seq;
316
317                 sack = (void *)skb->data + sackoff;
318                 if (after(ntohl(sack->start_seq) - natseq->offset_before,
319                           natseq->correction_pos))
320                         new_start_seq = htonl(ntohl(sack->start_seq)
321                                         - natseq->offset_after);
322                 else
323                         new_start_seq = htonl(ntohl(sack->start_seq)
324                                         - natseq->offset_before);
325
326                 if (after(ntohl(sack->end_seq) - natseq->offset_before,
327                           natseq->correction_pos))
328                         new_end_seq = htonl(ntohl(sack->end_seq)
329                                       - natseq->offset_after);
330                 else
331                         new_end_seq = htonl(ntohl(sack->end_seq)
332                                       - natseq->offset_before);
333
334                 pr_debug("sack_adjust: start_seq: %d->%d, end_seq: %d->%d\n",
335                          ntohl(sack->start_seq), new_start_seq,
336                          ntohl(sack->end_seq), new_end_seq);
337
338                 inet_proto_csum_replace4(&tcph->check, skb,
339                                          sack->start_seq, new_start_seq, 0);
340                 inet_proto_csum_replace4(&tcph->check, skb,
341                                          sack->end_seq, new_end_seq, 0);
342                 sack->start_seq = new_start_seq;
343                 sack->end_seq = new_end_seq;
344                 sackoff += sizeof(*sack);
345         }
346 }
347
348 /* TCP SACK sequence number adjustment */
349 static inline unsigned int
350 nf_nat_sack_adjust(struct sk_buff *skb,
351                    struct tcphdr *tcph,
352                    struct nf_conn *ct,
353                    enum ip_conntrack_info ctinfo)
354 {
355         unsigned int dir, optoff, optend;
356         struct nf_conn_nat *nat = nfct_nat(ct);
357
358         optoff = ip_hdrlen(skb) + sizeof(struct tcphdr);
359         optend = ip_hdrlen(skb) + tcph->doff * 4;
360
361         if (!skb_make_writable(skb, optend))
362                 return 0;
363
364         dir = CTINFO2DIR(ctinfo);
365
366         while (optoff < optend) {
367                 /* Usually: option, length. */
368                 unsigned char *op = skb->data + optoff;
369
370                 switch (op[0]) {
371                 case TCPOPT_EOL:
372                         return 1;
373                 case TCPOPT_NOP:
374                         optoff++;
375                         continue;
376                 default:
377                         /* no partial options */
378                         if (optoff + 1 == optend ||
379                             optoff + op[1] > optend ||
380                             op[1] < 2)
381                                 return 0;
382                         if (op[0] == TCPOPT_SACK &&
383                             op[1] >= 2+TCPOLEN_SACK_PERBLOCK &&
384                             ((op[1] - 2) % TCPOLEN_SACK_PERBLOCK) == 0)
385                                 sack_adjust(skb, tcph, optoff+2,
386                                             optoff+op[1], &nat->seq[!dir]);
387                         optoff += op[1];
388                 }
389         }
390         return 1;
391 }
392
393 /* TCP sequence number adjustment.  Returns 1 on success, 0 on failure */
394 int
395 nf_nat_seq_adjust(struct sk_buff *skb,
396                   struct nf_conn *ct,
397                   enum ip_conntrack_info ctinfo)
398 {
399         struct tcphdr *tcph;
400         int dir;
401         __be32 newseq, newack;
402         s16 seqoff, ackoff;
403         struct nf_conn_nat *nat = nfct_nat(ct);
404         struct nf_nat_seq *this_way, *other_way;
405
406         dir = CTINFO2DIR(ctinfo);
407
408         this_way = &nat->seq[dir];
409         other_way = &nat->seq[!dir];
410
411         if (!skb_make_writable(skb, ip_hdrlen(skb) + sizeof(*tcph)))
412                 return 0;
413
414         tcph = (void *)skb->data + ip_hdrlen(skb);
415         if (after(ntohl(tcph->seq), this_way->correction_pos))
416                 seqoff = this_way->offset_after;
417         else
418                 seqoff = this_way->offset_before;
419
420         if (after(ntohl(tcph->ack_seq) - other_way->offset_before,
421                   other_way->correction_pos))
422                 ackoff = other_way->offset_after;
423         else
424                 ackoff = other_way->offset_before;
425
426         newseq = htonl(ntohl(tcph->seq) + seqoff);
427         newack = htonl(ntohl(tcph->ack_seq) - ackoff);
428
429         inet_proto_csum_replace4(&tcph->check, skb, tcph->seq, newseq, 0);
430         inet_proto_csum_replace4(&tcph->check, skb, tcph->ack_seq, newack, 0);
431
432         pr_debug("Adjusting sequence number from %u->%u, ack from %u->%u\n",
433                  ntohl(tcph->seq), ntohl(newseq), ntohl(tcph->ack_seq),
434                  ntohl(newack));
435
436         tcph->seq = newseq;
437         tcph->ack_seq = newack;
438
439         return nf_nat_sack_adjust(skb, tcph, ct, ctinfo);
440 }
441
442 /* Setup NAT on this expected conntrack so it follows master. */
443 /* If we fail to get a free NAT slot, we'll get dropped on confirm */
444 void nf_nat_follow_master(struct nf_conn *ct,
445                           struct nf_conntrack_expect *exp)
446 {
447         struct nf_nat_range range;
448
449         /* This must be a fresh one. */
450         BUG_ON(ct->status & IPS_NAT_DONE_MASK);
451
452         /* Change src to where master sends to */
453         range.flags = IP_NAT_RANGE_MAP_IPS;
454         range.min_ip = range.max_ip
455                 = ct->master->tuplehash[!exp->dir].tuple.dst.u3.ip;
456         nf_nat_setup_info(ct, &range, IP_NAT_MANIP_SRC);
457
458         /* For DST manip, map port here to where it's expected. */
459         range.flags = (IP_NAT_RANGE_MAP_IPS | IP_NAT_RANGE_PROTO_SPECIFIED);
460         range.min = range.max = exp->saved_proto;
461         range.min_ip = range.max_ip
462                 = ct->master->tuplehash[!exp->dir].tuple.src.u3.ip;
463         nf_nat_setup_info(ct, &range, IP_NAT_MANIP_DST);
464 }
465 EXPORT_SYMBOL(nf_nat_follow_master);