]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/dccp/proto.c
[DCCP]: Rewrite dccp_sendmsg to be more like UDP
[karo-tx-linux.git] / net / dccp / proto.c
1 /*
2  *  net/dccp/proto.c
3  *
4  *  An implementation of the DCCP protocol
5  *  Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6  *
7  *      This program is free software; you can redistribute it and/or modify it
8  *      under the terms of the GNU General Public License version 2 as
9  *      published by the Free Software Foundation.
10  */
11
12 #include <linux/config.h>
13 #include <linux/dccp.h>
14 #include <linux/module.h>
15 #include <linux/types.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/skbuff.h>
19 #include <linux/netdevice.h>
20 #include <linux/in.h>
21 #include <linux/if_arp.h>
22 #include <linux/init.h>
23 #include <linux/random.h>
24 #include <net/checksum.h>
25
26 #include <net/inet_common.h>
27 #include <net/ip.h>
28 #include <net/protocol.h>
29 #include <net/sock.h>
30 #include <net/xfrm.h>
31
32 #include <asm/semaphore.h>
33 #include <linux/spinlock.h>
34 #include <linux/timer.h>
35 #include <linux/delay.h>
36 #include <linux/poll.h>
37 #include <linux/dccp.h>
38
39 #include "ccid.h"
40 #include "dccp.h"
41
42 DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics);
43
44 atomic_t dccp_orphan_count = ATOMIC_INIT(0);
45
46 static struct net_protocol dccp_protocol = {
47         .handler        = dccp_v4_rcv,
48         .err_handler    = dccp_v4_err,
49 };
50
51 const char *dccp_packet_name(const int type)
52 {
53         static const char *dccp_packet_names[] = {
54                 [DCCP_PKT_REQUEST]  = "REQUEST",
55                 [DCCP_PKT_RESPONSE] = "RESPONSE",
56                 [DCCP_PKT_DATA]     = "DATA",
57                 [DCCP_PKT_ACK]      = "ACK",
58                 [DCCP_PKT_DATAACK]  = "DATAACK",
59                 [DCCP_PKT_CLOSEREQ] = "CLOSEREQ",
60                 [DCCP_PKT_CLOSE]    = "CLOSE",
61                 [DCCP_PKT_RESET]    = "RESET",
62                 [DCCP_PKT_SYNC]     = "SYNC",
63                 [DCCP_PKT_SYNCACK]  = "SYNCACK",
64         };
65
66         if (type >= DCCP_NR_PKT_TYPES)
67                 return "INVALID";
68         else
69                 return dccp_packet_names[type];
70 }
71
72 EXPORT_SYMBOL_GPL(dccp_packet_name);
73
74 const char *dccp_state_name(const int state)
75 {
76         static char *dccp_state_names[] = {
77         [DCCP_OPEN]       = "OPEN",
78         [DCCP_REQUESTING] = "REQUESTING",
79         [DCCP_PARTOPEN]   = "PARTOPEN",
80         [DCCP_LISTEN]     = "LISTEN",
81         [DCCP_RESPOND]    = "RESPOND",
82         [DCCP_CLOSING]    = "CLOSING",
83         [DCCP_TIME_WAIT]  = "TIME_WAIT",
84         [DCCP_CLOSED]     = "CLOSED",
85         };
86
87         if (state >= DCCP_MAX_STATES)
88                 return "INVALID STATE!";
89         else
90                 return dccp_state_names[state];
91 }
92
93 EXPORT_SYMBOL_GPL(dccp_state_name);
94
95 static inline int dccp_listen_start(struct sock *sk)
96 {
97         dccp_sk(sk)->dccps_role = DCCP_ROLE_LISTEN;
98         return inet_csk_listen_start(sk, TCP_SYNQ_HSIZE);
99 }
100
101 int dccp_disconnect(struct sock *sk, int flags)
102 {
103         struct inet_connection_sock *icsk = inet_csk(sk);
104         struct inet_sock *inet = inet_sk(sk);
105         int err = 0;
106         const int old_state = sk->sk_state;
107
108         if (old_state != DCCP_CLOSED)
109                 dccp_set_state(sk, DCCP_CLOSED);
110
111         /* ABORT function of RFC793 */
112         if (old_state == DCCP_LISTEN) {
113                 inet_csk_listen_stop(sk);
114         /* FIXME: do the active reset thing */
115         } else if (old_state == DCCP_REQUESTING)
116                 sk->sk_err = ECONNRESET;
117
118         dccp_clear_xmit_timers(sk);
119         __skb_queue_purge(&sk->sk_receive_queue);
120         if (sk->sk_send_head != NULL) {
121                 __kfree_skb(sk->sk_send_head);
122                 sk->sk_send_head = NULL;
123         }
124
125         inet->dport = 0;
126
127         if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
128                 inet_reset_saddr(sk);
129
130         sk->sk_shutdown = 0;
131         sock_reset_flag(sk, SOCK_DONE);
132
133         icsk->icsk_backoff = 0;
134         inet_csk_delack_init(sk);
135         __sk_dst_reset(sk);
136
137         BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
138
139         sk->sk_error_report(sk);
140         return err;
141 }
142
143 int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
144 {
145         dccp_pr_debug("entry\n");
146         return -ENOIOCTLCMD;
147 }
148
149 int dccp_setsockopt(struct sock *sk, int level, int optname,
150                     char *optval, int optlen)
151 {
152         dccp_pr_debug("entry\n");
153
154         if (level != SOL_DCCP)
155                 return ip_setsockopt(sk, level, optname, optval, optlen);
156
157         return -EOPNOTSUPP;
158 }
159
160 int dccp_getsockopt(struct sock *sk, int level, int optname,
161                     char *optval, int *optlen)
162 {
163         dccp_pr_debug("entry\n");
164
165         if (level != SOL_DCCP)
166                 return ip_getsockopt(sk, level, optname, optval, optlen);
167
168         return -EOPNOTSUPP;
169 }
170
171 int dccp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
172                  size_t len)
173 {
174         const struct dccp_sock *dp = dccp_sk(sk);
175         const int flags = msg->msg_flags;
176         const int noblock = flags & MSG_DONTWAIT;
177         struct sk_buff *skb;
178         int rc, size;
179         long timeo;
180
181         if (len > dp->dccps_mss_cache)
182                 return -EMSGSIZE;
183
184         lock_sock(sk);
185         timeo = sock_sndtimeo(sk, noblock);
186
187         /*
188          * We have to use sk_stream_wait_connect here to set sk_write_pending,
189          * so that the trick in dccp_rcv_request_sent_state_process.
190          */
191         /* Wait for a connection to finish. */
192         if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN | DCCPF_CLOSING))
193                 if ((rc = sk_stream_wait_connect(sk, &timeo)) != 0)
194                         goto out_release;
195
196         size = sk->sk_prot->max_header + len;
197         release_sock(sk);
198         skb = sock_alloc_send_skb(sk, size, noblock, &rc);
199         lock_sock(sk);
200         if (skb == NULL)
201                 goto out_release;
202
203         skb_reserve(skb, sk->sk_prot->max_header);
204         rc = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
205         if (rc != 0)
206                 goto out_discard;
207
208         rc = dccp_write_xmit(sk, skb, len);
209 out_release:
210         release_sock(sk);
211         return rc ? : len;
212 out_discard:
213         kfree_skb(skb);
214         goto out_release;
215 }
216
217 int dccp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
218                  size_t len, int nonblock, int flags, int *addr_len)
219 {
220         const struct dccp_hdr *dh;
221         long timeo;
222
223         lock_sock(sk);
224
225         if (sk->sk_state == DCCP_LISTEN) {
226                 len = -ENOTCONN;
227                 goto out;
228         }
229
230         timeo = sock_rcvtimeo(sk, nonblock);
231
232         do {
233                 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
234
235                 if (skb == NULL)
236                         goto verify_sock_status;
237
238                 dh = dccp_hdr(skb);
239
240                 if (dh->dccph_type == DCCP_PKT_DATA ||
241                     dh->dccph_type == DCCP_PKT_DATAACK)
242                         goto found_ok_skb;
243
244                 if (dh->dccph_type == DCCP_PKT_RESET ||
245                     dh->dccph_type == DCCP_PKT_CLOSE) {
246                         dccp_pr_debug("found fin ok!\n");
247                         len = 0;
248                         goto found_fin_ok;
249                 }
250                 dccp_pr_debug("packet_type=%s\n",
251                               dccp_packet_name(dh->dccph_type));
252                 sk_eat_skb(sk, skb);
253 verify_sock_status:
254                 if (sock_flag(sk, SOCK_DONE)) {
255                         len = 0;
256                         break;
257                 }
258
259                 if (sk->sk_err) {
260                         len = sock_error(sk);
261                         break;
262                 }
263
264                 if (sk->sk_shutdown & RCV_SHUTDOWN) {
265                         len = 0;
266                         break;
267                 }
268
269                 if (sk->sk_state == DCCP_CLOSED) {
270                         if (!sock_flag(sk, SOCK_DONE)) {
271                                 /* This occurs when user tries to read
272                                  * from never connected socket.
273                                  */
274                                 len = -ENOTCONN;
275                                 break;
276                         }
277                         len = 0;
278                         break;
279                 }
280
281                 if (!timeo) {
282                         len = -EAGAIN;
283                         break;
284                 }
285
286                 if (signal_pending(current)) {
287                         len = sock_intr_errno(timeo);
288                         break;
289                 }
290
291                 sk_wait_data(sk, &timeo);
292                 continue;
293         found_ok_skb:
294                 if (len > skb->len)
295                         len = skb->len;
296                 else if (len < skb->len)
297                         msg->msg_flags |= MSG_TRUNC;
298
299                 if (skb_copy_datagram_iovec(skb, 0, msg->msg_iov, len)) {
300                         /* Exception. Bailout! */
301                         len = -EFAULT;
302                         break;
303                 }
304         found_fin_ok:
305                 if (!(flags & MSG_PEEK))
306                         sk_eat_skb(sk, skb);
307                 break;
308         } while (1);
309 out:
310         release_sock(sk);
311         return len;
312 }
313
314 static int inet_dccp_listen(struct socket *sock, int backlog)
315 {
316         struct sock *sk = sock->sk;
317         unsigned char old_state;
318         int err;
319
320         lock_sock(sk);
321
322         err = -EINVAL;
323         if (sock->state != SS_UNCONNECTED || sock->type != SOCK_DCCP)
324                 goto out;
325
326         old_state = sk->sk_state;
327         if (!((1 << old_state) & (DCCPF_CLOSED | DCCPF_LISTEN)))
328                 goto out;
329
330         /* Really, if the socket is already in listen state
331          * we can only allow the backlog to be adjusted.
332          */
333         if (old_state != DCCP_LISTEN) {
334                 /*
335                  * FIXME: here it probably should be sk->sk_prot->listen_start
336                  * see tcp_listen_start
337                  */
338                 err = dccp_listen_start(sk);
339                 if (err)
340                         goto out;
341         }
342         sk->sk_max_ack_backlog = backlog;
343         err = 0;
344
345 out:
346         release_sock(sk);
347         return err;
348 }
349
350 static const unsigned char dccp_new_state[] = {
351         /* current state:   new state:      action:     */
352         [0]               = DCCP_CLOSED,
353         [DCCP_OPEN]       = DCCP_CLOSING | DCCP_ACTION_FIN,
354         [DCCP_REQUESTING] = DCCP_CLOSED,
355         [DCCP_PARTOPEN]   = DCCP_CLOSING | DCCP_ACTION_FIN,
356         [DCCP_LISTEN]     = DCCP_CLOSED,
357         [DCCP_RESPOND]    = DCCP_CLOSED,
358         [DCCP_CLOSING]    = DCCP_CLOSED,
359         [DCCP_TIME_WAIT]  = DCCP_CLOSED,
360         [DCCP_CLOSED]     = DCCP_CLOSED,
361 };
362
363 static int dccp_close_state(struct sock *sk)
364 {
365         const int next = dccp_new_state[sk->sk_state];
366         const int ns = next & DCCP_STATE_MASK;
367
368         if (ns != sk->sk_state)
369                 dccp_set_state(sk, ns);
370
371         return next & DCCP_ACTION_FIN;
372 }
373
374 void dccp_close(struct sock *sk, long timeout)
375 {
376         struct sk_buff *skb;
377
378         lock_sock(sk);
379
380         sk->sk_shutdown = SHUTDOWN_MASK;
381
382         if (sk->sk_state == DCCP_LISTEN) {
383                 dccp_set_state(sk, DCCP_CLOSED);
384
385                 /* Special case. */
386                 inet_csk_listen_stop(sk);
387
388                 goto adjudge_to_death;
389         }
390
391         /*
392          * We need to flush the recv. buffs.  We do this only on the
393          * descriptor close, not protocol-sourced closes, because the
394           *reader process may not have drained the data yet!
395          */
396         /* FIXME: check for unread data */
397         while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
398                 __kfree_skb(skb);
399         }
400
401         if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
402                 /* Check zero linger _after_ checking for unread data. */
403                 sk->sk_prot->disconnect(sk, 0);
404         } else if (dccp_close_state(sk)) {
405                 dccp_send_close(sk);
406         }
407
408         sk_stream_wait_close(sk, timeout);
409
410 adjudge_to_death:
411         release_sock(sk);
412         /*
413          * Now socket is owned by kernel and we acquire BH lock
414          * to finish close. No need to check for user refs.
415          */
416         local_bh_disable();
417         bh_lock_sock(sk);
418         BUG_TRAP(!sock_owned_by_user(sk));
419
420         sock_hold(sk);
421         sock_orphan(sk);
422                                                 
423         if (sk->sk_state != DCCP_CLOSED)
424                 dccp_set_state(sk, DCCP_CLOSED);
425
426         atomic_inc(&dccp_orphan_count);
427         if (sk->sk_state == DCCP_CLOSED)
428                 inet_csk_destroy_sock(sk);
429
430         /* Otherwise, socket is reprieved until protocol close. */
431
432         bh_unlock_sock(sk);
433         local_bh_enable();
434         sock_put(sk);
435 }
436
437 void dccp_shutdown(struct sock *sk, int how)
438 {
439         dccp_pr_debug("entry\n");
440 }
441
442 struct proto_ops inet_dccp_ops = {
443         .family         = PF_INET,
444         .owner          = THIS_MODULE,
445         .release        = inet_release,
446         .bind           = inet_bind,
447         .connect        = inet_stream_connect,
448         .socketpair     = sock_no_socketpair,
449         .accept         = inet_accept,
450         .getname        = inet_getname,
451         .poll           = sock_no_poll,
452         .ioctl          = inet_ioctl,
453         /* FIXME: work on inet_listen to rename it to sock_common_listen */
454         .listen         = inet_dccp_listen,
455         .shutdown       = inet_shutdown,
456         .setsockopt     = sock_common_setsockopt,
457         .getsockopt     = sock_common_getsockopt,
458         .sendmsg        = inet_sendmsg,
459         .recvmsg        = sock_common_recvmsg,
460         .mmap           = sock_no_mmap,
461         .sendpage       = sock_no_sendpage,
462 };
463
464 extern struct net_proto_family inet_family_ops;
465
466 static struct inet_protosw dccp_v4_protosw = {
467         .type           = SOCK_DCCP,
468         .protocol       = IPPROTO_DCCP,
469         .prot           = &dccp_v4_prot,
470         .ops            = &inet_dccp_ops,
471         .capability     = -1,
472         .no_check       = 0,
473         .flags          = 0,
474 };
475
476 /*
477  * This is the global socket data structure used for responding to
478  * the Out-of-the-blue (OOTB) packets. A control sock will be created
479  * for this socket at the initialization time.
480  */
481 struct socket *dccp_ctl_socket;
482
483 static char dccp_ctl_socket_err_msg[] __initdata =
484         KERN_ERR "DCCP: Failed to create the control socket.\n";
485
486 static int __init dccp_ctl_sock_init(void)
487 {
488         int rc = sock_create_kern(PF_INET, SOCK_DCCP, IPPROTO_DCCP,
489                                   &dccp_ctl_socket);
490         if (rc < 0)
491                 printk(dccp_ctl_socket_err_msg);
492         else {
493                 dccp_ctl_socket->sk->sk_allocation = GFP_ATOMIC;
494                 inet_sk(dccp_ctl_socket->sk)->uc_ttl = -1;
495
496                 /* Unhash it so that IP input processing does not even
497                  * see it, we do not wish this socket to see incoming
498                  * packets.
499                  */
500                 dccp_ctl_socket->sk->sk_prot->unhash(dccp_ctl_socket->sk);
501         }
502
503         return rc;
504 }
505
506 static void __exit dccp_ctl_sock_exit(void)
507 {
508         if (dccp_ctl_socket != NULL)
509                 sock_release(dccp_ctl_socket);
510 }
511
512 static int __init init_dccp_v4_mibs(void)
513 {
514         int rc = -ENOMEM;
515
516         dccp_statistics[0] = alloc_percpu(struct dccp_mib);
517         if (dccp_statistics[0] == NULL)
518                 goto out;
519
520         dccp_statistics[1] = alloc_percpu(struct dccp_mib);
521         if (dccp_statistics[1] == NULL)
522                 goto out_free_one;
523
524         rc = 0;
525 out:
526         return rc;
527 out_free_one:
528         free_percpu(dccp_statistics[0]);
529         dccp_statistics[0] = NULL;
530         goto out;
531
532 }
533
534 static int thash_entries;
535 module_param(thash_entries, int, 0444);
536 MODULE_PARM_DESC(thash_entries, "Number of ehash buckets");
537
538 int dccp_debug;
539 module_param(dccp_debug, int, 0444);
540 MODULE_PARM_DESC(dccp_debug, "Enable debug messages");
541
542 static int __init dccp_init(void)
543 {
544         unsigned long goal;
545         int ehash_order, bhash_order, i;
546         int rc = proto_register(&dccp_v4_prot, 1);
547
548         if (rc)
549                 goto out;
550
551         dccp_hashinfo.bind_bucket_cachep =
552                 kmem_cache_create("dccp_bind_bucket",
553                                   sizeof(struct inet_bind_bucket), 0,
554                                   SLAB_HWCACHE_ALIGN, NULL, NULL);
555         if (!dccp_hashinfo.bind_bucket_cachep)
556                 goto out_proto_unregister;
557
558         /*
559          * Size and allocate the main established and bind bucket
560          * hash tables.
561          *
562          * The methodology is similar to that of the buffer cache.
563          */
564         if (num_physpages >= (128 * 1024))
565                 goal = num_physpages >> (21 - PAGE_SHIFT);
566         else
567                 goal = num_physpages >> (23 - PAGE_SHIFT);
568
569         if (thash_entries)
570                 goal = (thash_entries *
571                         sizeof(struct inet_ehash_bucket)) >> PAGE_SHIFT;
572         for (ehash_order = 0; (1UL << ehash_order) < goal; ehash_order++)
573                 ;
574         do {
575                 dccp_hashinfo.ehash_size = (1UL << ehash_order) * PAGE_SIZE /
576                                         sizeof(struct inet_ehash_bucket);
577                 dccp_hashinfo.ehash_size >>= 1;
578                 while (dccp_hashinfo.ehash_size &
579                        (dccp_hashinfo.ehash_size - 1))
580                         dccp_hashinfo.ehash_size--;
581                 dccp_hashinfo.ehash = (struct inet_ehash_bucket *)
582                         __get_free_pages(GFP_ATOMIC, ehash_order);
583         } while (!dccp_hashinfo.ehash && --ehash_order > 0);
584
585         if (!dccp_hashinfo.ehash) {
586                 printk(KERN_CRIT "Failed to allocate DCCP "
587                                  "established hash table\n");
588                 goto out_free_bind_bucket_cachep;
589         }
590
591         for (i = 0; i < (dccp_hashinfo.ehash_size << 1); i++) {
592                 rwlock_init(&dccp_hashinfo.ehash[i].lock);
593                 INIT_HLIST_HEAD(&dccp_hashinfo.ehash[i].chain);
594         }
595
596         bhash_order = ehash_order;
597
598         do {
599                 dccp_hashinfo.bhash_size = (1UL << bhash_order) * PAGE_SIZE /
600                                         sizeof(struct inet_bind_hashbucket);
601                 if ((dccp_hashinfo.bhash_size > (64 * 1024)) &&
602                     bhash_order > 0)
603                         continue;
604                 dccp_hashinfo.bhash = (struct inet_bind_hashbucket *)
605                         __get_free_pages(GFP_ATOMIC, bhash_order);
606         } while (!dccp_hashinfo.bhash && --bhash_order >= 0);
607
608         if (!dccp_hashinfo.bhash) {
609                 printk(KERN_CRIT "Failed to allocate DCCP bind hash table\n");
610                 goto out_free_dccp_ehash;
611         }
612
613         for (i = 0; i < dccp_hashinfo.bhash_size; i++) {
614                 spin_lock_init(&dccp_hashinfo.bhash[i].lock);
615                 INIT_HLIST_HEAD(&dccp_hashinfo.bhash[i].chain);
616         }
617
618         if (init_dccp_v4_mibs())
619                 goto out_free_dccp_bhash;
620
621         rc = -EAGAIN;
622         if (inet_add_protocol(&dccp_protocol, IPPROTO_DCCP))
623                 goto out_free_dccp_v4_mibs;
624
625         inet_register_protosw(&dccp_v4_protosw);
626
627         rc = dccp_ctl_sock_init();
628         if (rc)
629                 goto out_unregister_protosw;
630 out:
631         return rc;
632 out_unregister_protosw:
633         inet_unregister_protosw(&dccp_v4_protosw);
634         inet_del_protocol(&dccp_protocol, IPPROTO_DCCP);
635 out_free_dccp_v4_mibs:
636         free_percpu(dccp_statistics[0]);
637         free_percpu(dccp_statistics[1]);
638         dccp_statistics[0] = dccp_statistics[1] = NULL;
639 out_free_dccp_bhash:
640         free_pages((unsigned long)dccp_hashinfo.bhash, bhash_order);
641         dccp_hashinfo.bhash = NULL;
642 out_free_dccp_ehash:
643         free_pages((unsigned long)dccp_hashinfo.ehash, ehash_order);
644         dccp_hashinfo.ehash = NULL;
645 out_free_bind_bucket_cachep:
646         kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
647         dccp_hashinfo.bind_bucket_cachep = NULL;
648 out_proto_unregister:
649         proto_unregister(&dccp_v4_prot);
650         goto out;
651 }
652
653 static const char dccp_del_proto_err_msg[] __exitdata =
654         KERN_ERR "can't remove dccp net_protocol\n";
655
656 static void __exit dccp_fini(void)
657 {
658         dccp_ctl_sock_exit();
659
660         inet_unregister_protosw(&dccp_v4_protosw);
661
662         if (inet_del_protocol(&dccp_protocol, IPPROTO_DCCP) < 0)
663                 printk(dccp_del_proto_err_msg);
664
665         /* Free the control endpoint.  */
666         sock_release(dccp_ctl_socket);
667
668         proto_unregister(&dccp_v4_prot);
669
670         kmem_cache_destroy(dccp_hashinfo.bind_bucket_cachep);
671 }
672
673 module_init(dccp_init);
674 module_exit(dccp_fini);
675
676 /*
677  * __stringify doesn't likes enums, so use SOCK_DCCP (6) and IPPROTO_DCCP (33)
678  * values directly, Also cover the case where the protocol is not specified,
679  * i.e. net-pf-PF_INET-proto-0-type-SOCK_DCCP
680  */
681 MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-33-type-6");
682 MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-0-type-6");
683 MODULE_LICENSE("GPL");
684 MODULE_AUTHOR("Arnaldo Carvalho de Melo <acme@conectiva.com.br>");
685 MODULE_DESCRIPTION("DCCP - Datagram Congestion Controlled Protocol");