2 * Copyright (c) 2005, 2006 Andrea Bittau <a.bittau@cs.ucl.ac.uk>
4 * Changes to meet Linux coding standards, and DCCP infrastructure fixes.
6 * Copyright (c) 2006 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * This implementation should follow RFC 4341
26 #include <linux/slab.h>
31 #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
32 static int ccid2_debug;
33 #define ccid2_pr_debug(format, a...) DCCP_PR_DEBUG(ccid2_debug, format, ##a)
35 #define ccid2_pr_debug(format, a...)
38 static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc)
40 struct ccid2_seq *seqp;
43 /* check if we have space to preserve the pointer to the buffer */
44 if (hc->tx_seqbufc >= (sizeof(hc->tx_seqbuf) /
45 sizeof(struct ccid2_seq *)))
48 /* allocate buffer and initialize linked list */
49 seqp = kmalloc(CCID2_SEQBUF_LEN * sizeof(struct ccid2_seq), gfp_any());
53 for (i = 0; i < (CCID2_SEQBUF_LEN - 1); i++) {
54 seqp[i].ccid2s_next = &seqp[i + 1];
55 seqp[i + 1].ccid2s_prev = &seqp[i];
57 seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = seqp;
58 seqp->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1];
60 /* This is the first allocation. Initiate the head and tail. */
61 if (hc->tx_seqbufc == 0)
62 hc->tx_seqh = hc->tx_seqt = seqp;
64 /* link the existing list with the one we just created */
65 hc->tx_seqh->ccid2s_next = seqp;
66 seqp->ccid2s_prev = hc->tx_seqh;
68 hc->tx_seqt->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1];
69 seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = hc->tx_seqt;
72 /* store the original pointer to the buffer so we can free it */
73 hc->tx_seqbuf[hc->tx_seqbufc] = seqp;
79 static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
81 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
83 if (hc->tx_pipe < hc->tx_cwnd)
86 return 1; /* XXX CCID should dequeue when ready instead of polling */
89 static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
91 struct dccp_sock *dp = dccp_sk(sk);
92 u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2);
95 * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from
96 * RFC 4341, 6.1.2. We ignore the statement that Ack Ratio 2 is always
97 * acceptable since this causes starvation/deadlock whenever cwnd < 2.
98 * The same problem arises when Ack Ratio is 0 (ie. Ack Ratio disabled).
100 if (val == 0 || val > max_ratio) {
101 DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio);
104 if (val > DCCPF_ACK_RATIO_MAX)
105 val = DCCPF_ACK_RATIO_MAX;
107 if (val == dp->dccps_l_ack_ratio)
110 ccid2_pr_debug("changing local ack ratio to %u\n", val);
111 dp->dccps_l_ack_ratio = val;
114 static void ccid2_start_rto_timer(struct sock *sk);
116 static void ccid2_hc_tx_rto_expire(unsigned long data)
118 struct sock *sk = (struct sock *)data;
119 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
122 if (sock_owned_by_user(sk)) {
123 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + HZ / 5);
127 ccid2_pr_debug("RTO_EXPIRE\n");
131 if (hc->tx_rto > DCCP_RTO_MAX)
132 hc->tx_rto = DCCP_RTO_MAX;
134 ccid2_start_rto_timer(sk);
136 /* adjust pipe, cwnd etc */
137 hc->tx_ssthresh = hc->tx_cwnd / 2;
138 if (hc->tx_ssthresh < 2)
143 /* clear state about stuff we sent */
144 hc->tx_seqt = hc->tx_seqh;
145 hc->tx_packets_acked = 0;
147 /* clear ack ratio state. */
149 hc->tx_rpdupack = -1;
150 ccid2_change_l_ack_ratio(sk, 1);
156 static void ccid2_start_rto_timer(struct sock *sk)
158 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
160 ccid2_pr_debug("setting RTO timeout=%u\n", hc->tx_rto);
162 BUG_ON(timer_pending(&hc->tx_rtotimer));
163 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
166 static void ccid2_hc_tx_packet_sent(struct sock *sk, int more, unsigned int len)
168 struct dccp_sock *dp = dccp_sk(sk);
169 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
170 struct ccid2_seq *next;
174 hc->tx_seqh->ccid2s_seq = dp->dccps_gss;
175 hc->tx_seqh->ccid2s_acked = 0;
176 hc->tx_seqh->ccid2s_sent = ccid2_time_stamp;
178 next = hc->tx_seqh->ccid2s_next;
179 /* check if we need to alloc more space */
180 if (next == hc->tx_seqt) {
181 if (ccid2_hc_tx_alloc_seq(hc)) {
182 DCCP_CRIT("packet history - out of memory!");
183 /* FIXME: find a more graceful way to bail out */
186 next = hc->tx_seqh->ccid2s_next;
187 BUG_ON(next == hc->tx_seqt);
191 ccid2_pr_debug("cwnd=%d pipe=%d\n", hc->tx_cwnd, hc->tx_pipe);
194 * FIXME: The code below is broken and the variables have been removed
195 * from the socket struct. The `ackloss' variable was always set to 0,
196 * and with arsent there are several problems:
197 * (i) it doesn't just count the number of Acks, but all sent packets;
198 * (ii) it is expressed in # of packets, not # of windows, so the
199 * comparison below uses the wrong formula: Appendix A of RFC 4341
200 * comes up with the number K = cwnd / (R^2 - R) of consecutive windows
201 * of data with no lost or marked Ack packets. If arsent were the # of
202 * consecutive Acks received without loss, then Ack Ratio needs to be
203 * decreased by 1 when
204 * arsent >= K * cwnd / R = cwnd^2 / (R^3 - R^2)
205 * where cwnd / R is the number of Acks received per window of data
206 * (cf. RFC 4341, App. A). The problems are that
207 * - arsent counts other packets as well;
208 * - the comparison uses a formula different from RFC 4341;
209 * - computing a cubic/quadratic equation each time is too complicated.
210 * Hence a different algorithm is needed.
213 /* Ack Ratio. Need to maintain a concept of how many windows we sent */
215 /* We had an ack loss in this window... */
216 if (hc->tx_ackloss) {
217 if (hc->tx_arsent >= hc->tx_cwnd) {
222 /* No acks lost up to now... */
223 /* decrease ack ratio if enough packets were sent */
224 if (dp->dccps_l_ack_ratio > 1) {
225 /* XXX don't calculate denominator each time */
226 int denom = dp->dccps_l_ack_ratio * dp->dccps_l_ack_ratio -
227 dp->dccps_l_ack_ratio;
229 denom = hc->tx_cwnd * hc->tx_cwnd / denom;
231 if (hc->tx_arsent >= denom) {
232 ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio - 1);
236 /* we can't increase ack ratio further [1] */
237 hc->tx_arsent = 0; /* or maybe set it to cwnd*/
242 /* setup RTO timer */
243 if (!timer_pending(&hc->tx_rtotimer))
244 ccid2_start_rto_timer(sk);
246 #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
248 struct ccid2_seq *seqp = hc->tx_seqt;
250 while (seqp != hc->tx_seqh) {
251 ccid2_pr_debug("out seq=%llu acked=%d time=%u\n",
252 (unsigned long long)seqp->ccid2s_seq,
253 seqp->ccid2s_acked, seqp->ccid2s_sent);
254 seqp = seqp->ccid2s_next;
257 ccid2_pr_debug("=========\n");
261 /* XXX Lame code duplication!
262 * returns -1 if none was found.
263 * else returns the next offset to use in the function call.
265 static int ccid2_ackvector(struct sock *sk, struct sk_buff *skb, int offset,
266 unsigned char **vec, unsigned char *veclen)
268 const struct dccp_hdr *dh = dccp_hdr(skb);
269 unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb);
270 unsigned char *opt_ptr;
271 const unsigned char *opt_end = (unsigned char *)dh +
272 (dh->dccph_doff * 4);
273 unsigned char opt, len;
274 unsigned char *value;
279 if (opt_ptr >= opt_end)
282 while (opt_ptr != opt_end) {
287 /* Check if this isn't a single byte option */
288 if (opt > DCCPO_MAX_RESERVED) {
289 if (opt_ptr == opt_end)
290 goto out_invalid_option;
294 goto out_invalid_option;
296 * Remove the type and len fields, leaving
297 * just the value size
303 if (opt_ptr > opt_end)
304 goto out_invalid_option;
308 case DCCPO_ACK_VECTOR_0:
309 case DCCPO_ACK_VECTOR_1:
312 return offset + (opt_ptr - options);
319 DCCP_BUG("Invalid option - this should not happen (previous parsing)!");
323 static void ccid2_hc_tx_kill_rto_timer(struct sock *sk)
325 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
327 sk_stop_timer(sk, &hc->tx_rtotimer);
328 ccid2_pr_debug("deleted RTO timer\n");
332 * ccid2_rtt_estimator - Sample RTT and compute RTO using RFC2988 algorithm
333 * This code is almost identical with TCP's tcp_rtt_estimator(), since
334 * - it has a higher sampling frequency (recommended by RFC 1323),
335 * - the RTO does not collapse into RTT due to RTTVAR going towards zero,
336 * - it is simple (cf. more complex proposals such as Eifel timer or research
337 * which suggests that the gain should be set according to window size),
338 * - in tests it was found to work well with CCID2 [gerrit].
340 static void ccid2_rtt_estimator(struct sock *sk, const long mrtt)
342 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
345 if (hc->tx_srtt == 0) {
346 /* First measurement m */
347 hc->tx_srtt = m << 3;
348 hc->tx_mdev = m << 1;
350 hc->tx_mdev_max = max(TCP_RTO_MIN, hc->tx_mdev);
351 hc->tx_rttvar = hc->tx_mdev_max;
352 hc->tx_rtt_seq = dccp_sk(sk)->dccps_gss;
354 /* Update scaled SRTT as SRTT += 1/8 * (m - SRTT) */
355 m -= (hc->tx_srtt >> 3);
358 /* Similarly, update scaled mdev with regard to |m| */
361 m -= (hc->tx_mdev >> 2);
363 * This neutralises RTO increase when RTT < SRTT - mdev
364 * (see P. Sarolahti, A. Kuznetsov,"Congestion Control
365 * in Linux TCP", USENIX 2002, pp. 49-62).
370 m -= (hc->tx_mdev >> 2);
374 if (hc->tx_mdev > hc->tx_mdev_max) {
375 hc->tx_mdev_max = hc->tx_mdev;
376 if (hc->tx_mdev_max > hc->tx_rttvar)
377 hc->tx_rttvar = hc->tx_mdev_max;
381 * Decay RTTVAR at most once per flight, exploiting that
382 * 1) pipe <= cwnd <= Sequence_Window = W (RFC 4340, 7.5.2)
383 * 2) AWL = GSS-W+1 <= GAR <= GSS (RFC 4340, 7.5.1)
384 * GAR is a useful bound for FlightSize = pipe.
385 * AWL is probably too low here, as it over-estimates pipe.
387 if (after48(dccp_sk(sk)->dccps_gar, hc->tx_rtt_seq)) {
388 if (hc->tx_mdev_max < hc->tx_rttvar)
389 hc->tx_rttvar -= (hc->tx_rttvar -
390 hc->tx_mdev_max) >> 2;
391 hc->tx_rtt_seq = dccp_sk(sk)->dccps_gss;
392 hc->tx_mdev_max = TCP_RTO_MIN;
397 * Set RTO from SRTT and RTTVAR
398 * As in TCP, 4 * RTTVAR >= TCP_RTO_MIN, giving a minimum RTO of 200 ms.
399 * This agrees with RFC 4341, 5:
400 * "Because DCCP does not retransmit data, DCCP does not require
401 * TCP's recommended minimum timeout of one second".
403 hc->tx_rto = (hc->tx_srtt >> 3) + hc->tx_rttvar;
405 if (hc->tx_rto > DCCP_RTO_MAX)
406 hc->tx_rto = DCCP_RTO_MAX;
409 static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp,
410 unsigned int *maxincr)
412 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
414 if (hc->tx_cwnd < hc->tx_ssthresh) {
415 if (*maxincr > 0 && ++hc->tx_packets_acked == 2) {
418 hc->tx_packets_acked = 0;
420 } else if (++hc->tx_packets_acked >= hc->tx_cwnd) {
422 hc->tx_packets_acked = 0;
425 * FIXME: RTT is sampled several times per acknowledgment (for each
426 * entry in the Ack Vector), instead of once per Ack (as in TCP SACK).
427 * This causes the RTT to be over-estimated, since the older entries
428 * in the Ack Vector have earlier sending times.
429 * The cleanest solution is to not use the ccid2s_sent field at all
430 * and instead use DCCP timestamps: requires changes in other places.
432 ccid2_rtt_estimator(sk, ccid2_time_stamp - seqp->ccid2s_sent);
435 static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
437 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
439 if ((s32)(seqp->ccid2s_sent - hc->tx_last_cong) < 0) {
440 ccid2_pr_debug("Multiple losses in an RTT---treating as one\n");
444 hc->tx_last_cong = ccid2_time_stamp;
446 hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U;
447 hc->tx_ssthresh = max(hc->tx_cwnd, 2U);
449 /* Avoid spurious timeouts resulting from Ack Ratio > cwnd */
450 if (dccp_sk(sk)->dccps_l_ack_ratio > hc->tx_cwnd)
451 ccid2_change_l_ack_ratio(sk, hc->tx_cwnd);
454 static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
456 struct dccp_sock *dp = dccp_sk(sk);
457 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
459 struct ccid2_seq *seqp;
460 unsigned char *vector;
461 unsigned char veclen;
464 unsigned int maxincr = 0;
466 /* check reverse path congestion */
467 seqno = DCCP_SKB_CB(skb)->dccpd_seq;
469 /* XXX this whole "algorithm" is broken. Need to fix it to keep track
470 * of the seqnos of the dupacks so that rpseq and rpdupack are correct
473 /* need to bootstrap */
474 if (hc->tx_rpdupack == -1) {
476 hc->tx_rpseq = seqno;
478 /* check if packet is consecutive */
479 if (dccp_delta_seqno(hc->tx_rpseq, seqno) == 1)
480 hc->tx_rpseq = seqno;
481 /* it's a later packet */
482 else if (after48(seqno, hc->tx_rpseq)) {
485 /* check if we got enough dupacks */
486 if (hc->tx_rpdupack >= NUMDUPACK) {
487 hc->tx_rpdupack = -1; /* XXX lame */
490 ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio);
495 /* check forward path congestion */
496 /* still didn't send out new data packets */
497 if (hc->tx_seqh == hc->tx_seqt)
500 switch (DCCP_SKB_CB(skb)->dccpd_type) {
502 case DCCP_PKT_DATAACK:
508 ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
509 if (after48(ackno, hc->tx_high_ack))
510 hc->tx_high_ack = ackno;
513 while (before48(seqp->ccid2s_seq, ackno)) {
514 seqp = seqp->ccid2s_next;
515 if (seqp == hc->tx_seqh) {
516 seqp = hc->tx_seqh->ccid2s_prev;
522 * In slow-start, cwnd can increase up to a maximum of Ack Ratio/2
523 * packets per acknowledgement. Rounding up avoids that cwnd is not
524 * advanced when Ack Ratio is 1 and gives a slight edge otherwise.
526 if (hc->tx_cwnd < hc->tx_ssthresh)
527 maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2);
529 /* go through all ack vectors */
530 while ((offset = ccid2_ackvector(sk, skb, offset,
531 &vector, &veclen)) != -1) {
532 /* go through this ack vector */
534 const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK;
535 u64 ackno_end_rl = SUB48(ackno, rl);
537 ccid2_pr_debug("ackvec start:%llu end:%llu\n",
538 (unsigned long long)ackno,
539 (unsigned long long)ackno_end_rl);
540 /* if the seqno we are analyzing is larger than the
541 * current ackno, then move towards the tail of our
544 while (after48(seqp->ccid2s_seq, ackno)) {
545 if (seqp == hc->tx_seqt) {
549 seqp = seqp->ccid2s_prev;
554 /* check all seqnos in the range of the vector
557 while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) {
558 const u8 state = *vector &
559 DCCP_ACKVEC_STATE_MASK;
561 /* new packet received or marked */
562 if (state != DCCP_ACKVEC_STATE_NOT_RECEIVED &&
563 !seqp->ccid2s_acked) {
565 DCCP_ACKVEC_STATE_ECN_MARKED) {
566 ccid2_congestion_event(sk,
569 ccid2_new_ack(sk, seqp,
572 seqp->ccid2s_acked = 1;
573 ccid2_pr_debug("Got ack for %llu\n",
574 (unsigned long long)seqp->ccid2s_seq);
577 if (seqp == hc->tx_seqt) {
581 seqp = seqp->ccid2s_prev;
586 ackno = SUB48(ackno_end_rl, 1);
593 /* The state about what is acked should be correct now
594 * Check for NUMDUPACK
597 while (before48(seqp->ccid2s_seq, hc->tx_high_ack)) {
598 seqp = seqp->ccid2s_next;
599 if (seqp == hc->tx_seqh) {
600 seqp = hc->tx_seqh->ccid2s_prev;
606 if (seqp->ccid2s_acked) {
608 if (done == NUMDUPACK)
611 if (seqp == hc->tx_seqt)
613 seqp = seqp->ccid2s_prev;
616 /* If there are at least 3 acknowledgements, anything unacknowledged
617 * below the last sequence number is considered lost
619 if (done == NUMDUPACK) {
620 struct ccid2_seq *last_acked = seqp;
622 /* check for lost packets */
624 if (!seqp->ccid2s_acked) {
625 ccid2_pr_debug("Packet lost: %llu\n",
626 (unsigned long long)seqp->ccid2s_seq);
627 /* XXX need to traverse from tail -> head in
628 * order to detect multiple congestion events in
631 ccid2_congestion_event(sk, seqp);
634 if (seqp == hc->tx_seqt)
636 seqp = seqp->ccid2s_prev;
639 hc->tx_seqt = last_acked;
642 /* trim acked packets in tail */
643 while (hc->tx_seqt != hc->tx_seqh) {
644 if (!hc->tx_seqt->ccid2s_acked)
647 hc->tx_seqt = hc->tx_seqt->ccid2s_next;
650 /* restart RTO timer if not all outstanding data has been acked */
651 if (hc->tx_pipe == 0)
652 sk_stop_timer(sk, &hc->tx_rtotimer);
654 sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
657 static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
659 struct ccid2_hc_tx_sock *hc = ccid_priv(ccid);
660 struct dccp_sock *dp = dccp_sk(sk);
663 /* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */
664 hc->tx_ssthresh = ~0U;
667 * RFC 4341, 5: "The cwnd parameter is initialized to at most four
668 * packets for new connections, following the rules from [RFC3390]".
669 * We need to convert the bytes of RFC3390 into the packets of RFC 4341.
671 hc->tx_cwnd = clamp(4380U / dp->dccps_mss_cache, 2U, 4U);
673 /* Make sure that Ack Ratio is enabled and within bounds. */
674 max_ratio = DIV_ROUND_UP(hc->tx_cwnd, 2);
675 if (dp->dccps_l_ack_ratio == 0 || dp->dccps_l_ack_ratio > max_ratio)
676 dp->dccps_l_ack_ratio = max_ratio;
678 /* XXX init ~ to window size... */
679 if (ccid2_hc_tx_alloc_seq(hc))
682 hc->tx_rto = DCCP_TIMEOUT_INIT;
683 hc->tx_rpdupack = -1;
684 hc->tx_last_cong = ccid2_time_stamp;
685 setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire,
690 static void ccid2_hc_tx_exit(struct sock *sk)
692 struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
695 ccid2_hc_tx_kill_rto_timer(sk);
697 for (i = 0; i < hc->tx_seqbufc; i++)
698 kfree(hc->tx_seqbuf[i]);
702 static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
704 const struct dccp_sock *dp = dccp_sk(sk);
705 struct ccid2_hc_rx_sock *hc = ccid2_hc_rx_sk(sk);
707 switch (DCCP_SKB_CB(skb)->dccpd_type) {
709 case DCCP_PKT_DATAACK:
711 if (hc->rx_data >= dp->dccps_r_ack_ratio) {
719 struct ccid_operations ccid2_ops = {
720 .ccid_id = DCCPC_CCID2,
721 .ccid_name = "TCP-like",
722 .ccid_hc_tx_obj_size = sizeof(struct ccid2_hc_tx_sock),
723 .ccid_hc_tx_init = ccid2_hc_tx_init,
724 .ccid_hc_tx_exit = ccid2_hc_tx_exit,
725 .ccid_hc_tx_send_packet = ccid2_hc_tx_send_packet,
726 .ccid_hc_tx_packet_sent = ccid2_hc_tx_packet_sent,
727 .ccid_hc_tx_packet_recv = ccid2_hc_tx_packet_recv,
728 .ccid_hc_rx_obj_size = sizeof(struct ccid2_hc_rx_sock),
729 .ccid_hc_rx_packet_recv = ccid2_hc_rx_packet_recv,
732 #ifdef CONFIG_IP_DCCP_CCID2_DEBUG
733 module_param(ccid2_debug, bool, 0644);
734 MODULE_PARM_DESC(ccid2_debug, "Enable CCID-2 debug messages");