]> git.karo-electronics.de Git - karo-tx-linux.git/blob - net/tipc/link.c
Merge remote-tracking branch 'md/for-next'
[karo-tx-linux.git] / net / tipc / link.c
1 /*
2  * net/tipc/link.c: TIPC link code
3  *
4  * Copyright (c) 1996-2007, 2012, Ericsson AB
5  * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include "core.h"
38 #include "link.h"
39 #include "port.h"
40 #include "name_distr.h"
41 #include "discover.h"
42 #include "config.h"
43
44 #include <linux/pkt_sched.h>
45
46 /*
47  * Error message prefixes
48  */
49 static const char *link_co_err = "Link changeover error, ";
50 static const char *link_rst_msg = "Resetting link ";
51 static const char *link_unk_evt = "Unknown link event ";
52
53 /*
54  * Out-of-range value for link session numbers
55  */
56 #define INVALID_SESSION 0x10000
57
58 /*
59  * Link state events:
60  */
61 #define  STARTING_EVT    856384768      /* link processing trigger */
62 #define  TRAFFIC_MSG_EVT 560815u        /* rx'd ??? */
63 #define  TIMEOUT_EVT     560817u        /* link timer expired */
64
65 /*
66  * The following two 'message types' is really just implementation
67  * data conveniently stored in the message header.
68  * They must not be considered part of the protocol
69  */
70 #define OPEN_MSG   0
71 #define CLOSED_MSG 1
72
73 /*
74  * State value stored in 'exp_msg_count'
75  */
76 #define START_CHANGEOVER 100000u
77
78 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
79                                        struct sk_buff *buf);
80 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf);
81 static int  link_recv_changeover_msg(struct tipc_link **l_ptr,
82                                      struct sk_buff **buf);
83 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
84 static int  link_send_sections_long(struct tipc_port *sender,
85                                     struct iovec const *msg_sect,
86                                     unsigned int len, u32 destnode);
87 static void link_state_event(struct tipc_link *l_ptr, u32 event);
88 static void link_reset_statistics(struct tipc_link *l_ptr);
89 static void link_print(struct tipc_link *l_ptr, const char *str);
90 static void link_start(struct tipc_link *l_ptr);
91 static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
92 static void tipc_link_send_sync(struct tipc_link *l);
93 static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf);
94
95 /*
96  *  Simple link routines
97  */
98 static unsigned int align(unsigned int i)
99 {
100         return (i + 3) & ~3u;
101 }
102
103 static void link_init_max_pkt(struct tipc_link *l_ptr)
104 {
105         u32 max_pkt;
106
107         max_pkt = (l_ptr->b_ptr->mtu & ~3);
108         if (max_pkt > MAX_MSG_SIZE)
109                 max_pkt = MAX_MSG_SIZE;
110
111         l_ptr->max_pkt_target = max_pkt;
112         if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
113                 l_ptr->max_pkt = l_ptr->max_pkt_target;
114         else
115                 l_ptr->max_pkt = MAX_PKT_DEFAULT;
116
117         l_ptr->max_pkt_probes = 0;
118 }
119
120 static u32 link_next_sent(struct tipc_link *l_ptr)
121 {
122         if (l_ptr->next_out)
123                 return buf_seqno(l_ptr->next_out);
124         return mod(l_ptr->next_out_no);
125 }
126
127 static u32 link_last_sent(struct tipc_link *l_ptr)
128 {
129         return mod(link_next_sent(l_ptr) - 1);
130 }
131
132 /*
133  *  Simple non-static link routines (i.e. referenced outside this file)
134  */
135 int tipc_link_is_up(struct tipc_link *l_ptr)
136 {
137         if (!l_ptr)
138                 return 0;
139         return link_working_working(l_ptr) || link_working_unknown(l_ptr);
140 }
141
142 int tipc_link_is_active(struct tipc_link *l_ptr)
143 {
144         return  (l_ptr->owner->active_links[0] == l_ptr) ||
145                 (l_ptr->owner->active_links[1] == l_ptr);
146 }
147
148 /**
149  * link_timeout - handle expiration of link timer
150  * @l_ptr: pointer to link
151  *
152  * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict
153  * with tipc_link_delete().  (There is no risk that the node will be deleted by
154  * another thread because tipc_link_delete() always cancels the link timer before
155  * tipc_node_delete() is called.)
156  */
157 static void link_timeout(struct tipc_link *l_ptr)
158 {
159         tipc_node_lock(l_ptr->owner);
160
161         /* update counters used in statistical profiling of send traffic */
162         l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
163         l_ptr->stats.queue_sz_counts++;
164
165         if (l_ptr->first_out) {
166                 struct tipc_msg *msg = buf_msg(l_ptr->first_out);
167                 u32 length = msg_size(msg);
168
169                 if ((msg_user(msg) == MSG_FRAGMENTER) &&
170                     (msg_type(msg) == FIRST_FRAGMENT)) {
171                         length = msg_size(msg_get_wrapped(msg));
172                 }
173                 if (length) {
174                         l_ptr->stats.msg_lengths_total += length;
175                         l_ptr->stats.msg_length_counts++;
176                         if (length <= 64)
177                                 l_ptr->stats.msg_length_profile[0]++;
178                         else if (length <= 256)
179                                 l_ptr->stats.msg_length_profile[1]++;
180                         else if (length <= 1024)
181                                 l_ptr->stats.msg_length_profile[2]++;
182                         else if (length <= 4096)
183                                 l_ptr->stats.msg_length_profile[3]++;
184                         else if (length <= 16384)
185                                 l_ptr->stats.msg_length_profile[4]++;
186                         else if (length <= 32768)
187                                 l_ptr->stats.msg_length_profile[5]++;
188                         else
189                                 l_ptr->stats.msg_length_profile[6]++;
190                 }
191         }
192
193         /* do all other link processing performed on a periodic basis */
194
195         link_state_event(l_ptr, TIMEOUT_EVT);
196
197         if (l_ptr->next_out)
198                 tipc_link_push_queue(l_ptr);
199
200         tipc_node_unlock(l_ptr->owner);
201 }
202
203 static void link_set_timer(struct tipc_link *l_ptr, u32 time)
204 {
205         k_start_timer(&l_ptr->timer, time);
206 }
207
208 /**
209  * tipc_link_create - create a new link
210  * @n_ptr: pointer to associated node
211  * @b_ptr: pointer to associated bearer
212  * @media_addr: media address to use when sending messages over link
213  *
214  * Returns pointer to link.
215  */
216 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
217                               struct tipc_bearer *b_ptr,
218                               const struct tipc_media_addr *media_addr)
219 {
220         struct tipc_link *l_ptr;
221         struct tipc_msg *msg;
222         char *if_name;
223         char addr_string[16];
224         u32 peer = n_ptr->addr;
225
226         if (n_ptr->link_cnt >= 2) {
227                 tipc_addr_string_fill(addr_string, n_ptr->addr);
228                 pr_err("Attempt to establish third link to %s\n", addr_string);
229                 return NULL;
230         }
231
232         if (n_ptr->links[b_ptr->identity]) {
233                 tipc_addr_string_fill(addr_string, n_ptr->addr);
234                 pr_err("Attempt to establish second link on <%s> to %s\n",
235                        b_ptr->name, addr_string);
236                 return NULL;
237         }
238
239         l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
240         if (!l_ptr) {
241                 pr_warn("Link creation failed, no memory\n");
242                 return NULL;
243         }
244
245         l_ptr->addr = peer;
246         if_name = strchr(b_ptr->name, ':') + 1;
247         sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
248                 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
249                 tipc_node(tipc_own_addr),
250                 if_name,
251                 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
252                 /* note: peer i/f name is updated by reset/activate message */
253         memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
254         l_ptr->owner = n_ptr;
255         l_ptr->checkpoint = 1;
256         l_ptr->peer_session = INVALID_SESSION;
257         l_ptr->b_ptr = b_ptr;
258         link_set_supervision_props(l_ptr, b_ptr->tolerance);
259         l_ptr->state = RESET_UNKNOWN;
260
261         l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
262         msg = l_ptr->pmsg;
263         tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
264         msg_set_size(msg, sizeof(l_ptr->proto_msg));
265         msg_set_session(msg, (tipc_random & 0xffff));
266         msg_set_bearer_id(msg, b_ptr->identity);
267         strcpy((char *)msg_data(msg), if_name);
268
269         l_ptr->priority = b_ptr->priority;
270         tipc_link_set_queue_limits(l_ptr, b_ptr->window);
271
272         link_init_max_pkt(l_ptr);
273
274         l_ptr->next_out_no = 1;
275         INIT_LIST_HEAD(&l_ptr->waiting_ports);
276
277         link_reset_statistics(l_ptr);
278
279         tipc_node_attach_link(n_ptr, l_ptr);
280
281         k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
282         list_add_tail(&l_ptr->link_list, &b_ptr->links);
283         tipc_k_signal((Handler)link_start, (unsigned long)l_ptr);
284
285         return l_ptr;
286 }
287
288 /**
289  * tipc_link_delete - delete a link
290  * @l_ptr: pointer to link
291  *
292  * Note: 'tipc_net_lock' is write_locked, bearer is locked.
293  * This routine must not grab the node lock until after link timer cancellation
294  * to avoid a potential deadlock situation.
295  */
296 void tipc_link_delete(struct tipc_link *l_ptr)
297 {
298         if (!l_ptr) {
299                 pr_err("Attempt to delete non-existent link\n");
300                 return;
301         }
302
303         k_cancel_timer(&l_ptr->timer);
304
305         tipc_node_lock(l_ptr->owner);
306         tipc_link_reset(l_ptr);
307         tipc_node_detach_link(l_ptr->owner, l_ptr);
308         tipc_link_stop(l_ptr);
309         list_del_init(&l_ptr->link_list);
310         tipc_node_unlock(l_ptr->owner);
311         k_term_timer(&l_ptr->timer);
312         kfree(l_ptr);
313 }
314
315 static void link_start(struct tipc_link *l_ptr)
316 {
317         tipc_node_lock(l_ptr->owner);
318         link_state_event(l_ptr, STARTING_EVT);
319         tipc_node_unlock(l_ptr->owner);
320 }
321
322 /**
323  * link_schedule_port - schedule port for deferred sending
324  * @l_ptr: pointer to link
325  * @origport: reference to sending port
326  * @sz: amount of data to be sent
327  *
328  * Schedules port for renewed sending of messages after link congestion
329  * has abated.
330  */
331 static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz)
332 {
333         struct tipc_port *p_ptr;
334
335         spin_lock_bh(&tipc_port_list_lock);
336         p_ptr = tipc_port_lock(origport);
337         if (p_ptr) {
338                 if (!p_ptr->wakeup)
339                         goto exit;
340                 if (!list_empty(&p_ptr->wait_list))
341                         goto exit;
342                 p_ptr->congested = 1;
343                 p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
344                 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
345                 l_ptr->stats.link_congs++;
346 exit:
347                 tipc_port_unlock(p_ptr);
348         }
349         spin_unlock_bh(&tipc_port_list_lock);
350         return -ELINKCONG;
351 }
352
353 void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all)
354 {
355         struct tipc_port *p_ptr;
356         struct tipc_port *temp_p_ptr;
357         int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
358
359         if (all)
360                 win = 100000;
361         if (win <= 0)
362                 return;
363         if (!spin_trylock_bh(&tipc_port_list_lock))
364                 return;
365         if (link_congested(l_ptr))
366                 goto exit;
367         list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports,
368                                  wait_list) {
369                 if (win <= 0)
370                         break;
371                 list_del_init(&p_ptr->wait_list);
372                 spin_lock_bh(p_ptr->lock);
373                 p_ptr->congested = 0;
374                 p_ptr->wakeup(p_ptr);
375                 win -= p_ptr->waiting_pkts;
376                 spin_unlock_bh(p_ptr->lock);
377         }
378
379 exit:
380         spin_unlock_bh(&tipc_port_list_lock);
381 }
382
383 /**
384  * link_release_outqueue - purge link's outbound message queue
385  * @l_ptr: pointer to link
386  */
387 static void link_release_outqueue(struct tipc_link *l_ptr)
388 {
389         struct sk_buff *buf = l_ptr->first_out;
390         struct sk_buff *next;
391
392         while (buf) {
393                 next = buf->next;
394                 kfree_skb(buf);
395                 buf = next;
396         }
397         l_ptr->first_out = NULL;
398         l_ptr->out_queue_size = 0;
399 }
400
401 /**
402  * tipc_link_reset_fragments - purge link's inbound message fragments queue
403  * @l_ptr: pointer to link
404  */
405 void tipc_link_reset_fragments(struct tipc_link *l_ptr)
406 {
407         struct sk_buff *buf = l_ptr->defragm_buf;
408         struct sk_buff *next;
409
410         while (buf) {
411                 next = buf->next;
412                 kfree_skb(buf);
413                 buf = next;
414         }
415         l_ptr->defragm_buf = NULL;
416 }
417
418 /**
419  * tipc_link_stop - purge all inbound and outbound messages associated with link
420  * @l_ptr: pointer to link
421  */
422 void tipc_link_stop(struct tipc_link *l_ptr)
423 {
424         struct sk_buff *buf;
425         struct sk_buff *next;
426
427         buf = l_ptr->oldest_deferred_in;
428         while (buf) {
429                 next = buf->next;
430                 kfree_skb(buf);
431                 buf = next;
432         }
433
434         buf = l_ptr->first_out;
435         while (buf) {
436                 next = buf->next;
437                 kfree_skb(buf);
438                 buf = next;
439         }
440
441         tipc_link_reset_fragments(l_ptr);
442
443         kfree_skb(l_ptr->proto_msg_queue);
444         l_ptr->proto_msg_queue = NULL;
445 }
446
447 void tipc_link_reset(struct tipc_link *l_ptr)
448 {
449         struct sk_buff *buf;
450         u32 prev_state = l_ptr->state;
451         u32 checkpoint = l_ptr->next_in_no;
452         int was_active_link = tipc_link_is_active(l_ptr);
453
454         msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
455
456         /* Link is down, accept any session */
457         l_ptr->peer_session = INVALID_SESSION;
458
459         /* Prepare for max packet size negotiation */
460         link_init_max_pkt(l_ptr);
461
462         l_ptr->state = RESET_UNKNOWN;
463
464         if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
465                 return;
466
467         tipc_node_link_down(l_ptr->owner, l_ptr);
468         tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
469
470         if (was_active_link && tipc_node_active_links(l_ptr->owner) &&
471             l_ptr->owner->permit_changeover) {
472                 l_ptr->reset_checkpoint = checkpoint;
473                 l_ptr->exp_msg_count = START_CHANGEOVER;
474         }
475
476         /* Clean up all queues: */
477         link_release_outqueue(l_ptr);
478         kfree_skb(l_ptr->proto_msg_queue);
479         l_ptr->proto_msg_queue = NULL;
480         buf = l_ptr->oldest_deferred_in;
481         while (buf) {
482                 struct sk_buff *next = buf->next;
483                 kfree_skb(buf);
484                 buf = next;
485         }
486         if (!list_empty(&l_ptr->waiting_ports))
487                 tipc_link_wakeup_ports(l_ptr, 1);
488
489         l_ptr->retransm_queue_head = 0;
490         l_ptr->retransm_queue_size = 0;
491         l_ptr->last_out = NULL;
492         l_ptr->first_out = NULL;
493         l_ptr->next_out = NULL;
494         l_ptr->unacked_window = 0;
495         l_ptr->checkpoint = 1;
496         l_ptr->next_out_no = 1;
497         l_ptr->deferred_inqueue_sz = 0;
498         l_ptr->oldest_deferred_in = NULL;
499         l_ptr->newest_deferred_in = NULL;
500         l_ptr->fsm_msg_cnt = 0;
501         l_ptr->stale_count = 0;
502         link_reset_statistics(l_ptr);
503 }
504
505
506 static void link_activate(struct tipc_link *l_ptr)
507 {
508         l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
509         tipc_node_link_up(l_ptr->owner, l_ptr);
510         tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
511 }
512
513 /**
514  * link_state_event - link finite state machine
515  * @l_ptr: pointer to link
516  * @event: state machine event to process
517  */
518 static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
519 {
520         struct tipc_link *other;
521         u32 cont_intv = l_ptr->continuity_interval;
522
523         if (!l_ptr->started && (event != STARTING_EVT))
524                 return;         /* Not yet. */
525
526         if (link_blocked(l_ptr)) {
527                 if (event == TIMEOUT_EVT)
528                         link_set_timer(l_ptr, cont_intv);
529                 return;   /* Changeover going on */
530         }
531
532         switch (l_ptr->state) {
533         case WORKING_WORKING:
534                 switch (event) {
535                 case TRAFFIC_MSG_EVT:
536                 case ACTIVATE_MSG:
537                         break;
538                 case TIMEOUT_EVT:
539                         if (l_ptr->next_in_no != l_ptr->checkpoint) {
540                                 l_ptr->checkpoint = l_ptr->next_in_no;
541                                 if (tipc_bclink_acks_missing(l_ptr->owner)) {
542                                         tipc_link_send_proto_msg(l_ptr, STATE_MSG,
543                                                                  0, 0, 0, 0, 0);
544                                         l_ptr->fsm_msg_cnt++;
545                                 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
546                                         tipc_link_send_proto_msg(l_ptr, STATE_MSG,
547                                                                  1, 0, 0, 0, 0);
548                                         l_ptr->fsm_msg_cnt++;
549                                 }
550                                 link_set_timer(l_ptr, cont_intv);
551                                 break;
552                         }
553                         l_ptr->state = WORKING_UNKNOWN;
554                         l_ptr->fsm_msg_cnt = 0;
555                         tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
556                         l_ptr->fsm_msg_cnt++;
557                         link_set_timer(l_ptr, cont_intv / 4);
558                         break;
559                 case RESET_MSG:
560                         pr_info("%s<%s>, requested by peer\n", link_rst_msg,
561                                 l_ptr->name);
562                         tipc_link_reset(l_ptr);
563                         l_ptr->state = RESET_RESET;
564                         l_ptr->fsm_msg_cnt = 0;
565                         tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
566                         l_ptr->fsm_msg_cnt++;
567                         link_set_timer(l_ptr, cont_intv);
568                         break;
569                 default:
570                         pr_err("%s%u in WW state\n", link_unk_evt, event);
571                 }
572                 break;
573         case WORKING_UNKNOWN:
574                 switch (event) {
575                 case TRAFFIC_MSG_EVT:
576                 case ACTIVATE_MSG:
577                         l_ptr->state = WORKING_WORKING;
578                         l_ptr->fsm_msg_cnt = 0;
579                         link_set_timer(l_ptr, cont_intv);
580                         break;
581                 case RESET_MSG:
582                         pr_info("%s<%s>, requested by peer while probing\n",
583                                 link_rst_msg, l_ptr->name);
584                         tipc_link_reset(l_ptr);
585                         l_ptr->state = RESET_RESET;
586                         l_ptr->fsm_msg_cnt = 0;
587                         tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
588                         l_ptr->fsm_msg_cnt++;
589                         link_set_timer(l_ptr, cont_intv);
590                         break;
591                 case TIMEOUT_EVT:
592                         if (l_ptr->next_in_no != l_ptr->checkpoint) {
593                                 l_ptr->state = WORKING_WORKING;
594                                 l_ptr->fsm_msg_cnt = 0;
595                                 l_ptr->checkpoint = l_ptr->next_in_no;
596                                 if (tipc_bclink_acks_missing(l_ptr->owner)) {
597                                         tipc_link_send_proto_msg(l_ptr, STATE_MSG,
598                                                                  0, 0, 0, 0, 0);
599                                         l_ptr->fsm_msg_cnt++;
600                                 }
601                                 link_set_timer(l_ptr, cont_intv);
602                         } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
603                                 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
604                                                          1, 0, 0, 0, 0);
605                                 l_ptr->fsm_msg_cnt++;
606                                 link_set_timer(l_ptr, cont_intv / 4);
607                         } else {        /* Link has failed */
608                                 pr_warn("%s<%s>, peer not responding\n",
609                                         link_rst_msg, l_ptr->name);
610                                 tipc_link_reset(l_ptr);
611                                 l_ptr->state = RESET_UNKNOWN;
612                                 l_ptr->fsm_msg_cnt = 0;
613                                 tipc_link_send_proto_msg(l_ptr, RESET_MSG,
614                                                          0, 0, 0, 0, 0);
615                                 l_ptr->fsm_msg_cnt++;
616                                 link_set_timer(l_ptr, cont_intv);
617                         }
618                         break;
619                 default:
620                         pr_err("%s%u in WU state\n", link_unk_evt, event);
621                 }
622                 break;
623         case RESET_UNKNOWN:
624                 switch (event) {
625                 case TRAFFIC_MSG_EVT:
626                         break;
627                 case ACTIVATE_MSG:
628                         other = l_ptr->owner->active_links[0];
629                         if (other && link_working_unknown(other))
630                                 break;
631                         l_ptr->state = WORKING_WORKING;
632                         l_ptr->fsm_msg_cnt = 0;
633                         link_activate(l_ptr);
634                         tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
635                         l_ptr->fsm_msg_cnt++;
636                         if (l_ptr->owner->working_links == 1)
637                                 tipc_link_send_sync(l_ptr);
638                         link_set_timer(l_ptr, cont_intv);
639                         break;
640                 case RESET_MSG:
641                         l_ptr->state = RESET_RESET;
642                         l_ptr->fsm_msg_cnt = 0;
643                         tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
644                         l_ptr->fsm_msg_cnt++;
645                         link_set_timer(l_ptr, cont_intv);
646                         break;
647                 case STARTING_EVT:
648                         l_ptr->started = 1;
649                         /* fall through */
650                 case TIMEOUT_EVT:
651                         tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
652                         l_ptr->fsm_msg_cnt++;
653                         link_set_timer(l_ptr, cont_intv);
654                         break;
655                 default:
656                         pr_err("%s%u in RU state\n", link_unk_evt, event);
657                 }
658                 break;
659         case RESET_RESET:
660                 switch (event) {
661                 case TRAFFIC_MSG_EVT:
662                 case ACTIVATE_MSG:
663                         other = l_ptr->owner->active_links[0];
664                         if (other && link_working_unknown(other))
665                                 break;
666                         l_ptr->state = WORKING_WORKING;
667                         l_ptr->fsm_msg_cnt = 0;
668                         link_activate(l_ptr);
669                         tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
670                         l_ptr->fsm_msg_cnt++;
671                         if (l_ptr->owner->working_links == 1)
672                                 tipc_link_send_sync(l_ptr);
673                         link_set_timer(l_ptr, cont_intv);
674                         break;
675                 case RESET_MSG:
676                         break;
677                 case TIMEOUT_EVT:
678                         tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
679                         l_ptr->fsm_msg_cnt++;
680                         link_set_timer(l_ptr, cont_intv);
681                         break;
682                 default:
683                         pr_err("%s%u in RR state\n", link_unk_evt, event);
684                 }
685                 break;
686         default:
687                 pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
688         }
689 }
690
691 /*
692  * link_bundle_buf(): Append contents of a buffer to
693  * the tail of an existing one.
694  */
695 static int link_bundle_buf(struct tipc_link *l_ptr, struct sk_buff *bundler,
696                            struct sk_buff *buf)
697 {
698         struct tipc_msg *bundler_msg = buf_msg(bundler);
699         struct tipc_msg *msg = buf_msg(buf);
700         u32 size = msg_size(msg);
701         u32 bundle_size = msg_size(bundler_msg);
702         u32 to_pos = align(bundle_size);
703         u32 pad = to_pos - bundle_size;
704
705         if (msg_user(bundler_msg) != MSG_BUNDLER)
706                 return 0;
707         if (msg_type(bundler_msg) != OPEN_MSG)
708                 return 0;
709         if (skb_tailroom(bundler) < (pad + size))
710                 return 0;
711         if (l_ptr->max_pkt < (to_pos + size))
712                 return 0;
713
714         skb_put(bundler, pad + size);
715         skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size);
716         msg_set_size(bundler_msg, to_pos + size);
717         msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
718         kfree_skb(buf);
719         l_ptr->stats.sent_bundled++;
720         return 1;
721 }
722
723 static void link_add_to_outqueue(struct tipc_link *l_ptr,
724                                  struct sk_buff *buf,
725                                  struct tipc_msg *msg)
726 {
727         u32 ack = mod(l_ptr->next_in_no - 1);
728         u32 seqno = mod(l_ptr->next_out_no++);
729
730         msg_set_word(msg, 2, ((ack << 16) | seqno));
731         msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
732         buf->next = NULL;
733         if (l_ptr->first_out) {
734                 l_ptr->last_out->next = buf;
735                 l_ptr->last_out = buf;
736         } else
737                 l_ptr->first_out = l_ptr->last_out = buf;
738
739         l_ptr->out_queue_size++;
740         if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
741                 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
742 }
743
744 static void link_add_chain_to_outqueue(struct tipc_link *l_ptr,
745                                        struct sk_buff *buf_chain,
746                                        u32 long_msgno)
747 {
748         struct sk_buff *buf;
749         struct tipc_msg *msg;
750
751         if (!l_ptr->next_out)
752                 l_ptr->next_out = buf_chain;
753         while (buf_chain) {
754                 buf = buf_chain;
755                 buf_chain = buf_chain->next;
756
757                 msg = buf_msg(buf);
758                 msg_set_long_msgno(msg, long_msgno);
759                 link_add_to_outqueue(l_ptr, buf, msg);
760         }
761 }
762
763 /*
764  * tipc_link_send_buf() is the 'full path' for messages, called from
765  * inside TIPC when the 'fast path' in tipc_send_buf
766  * has failed, and from link_send()
767  */
768 int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
769 {
770         struct tipc_msg *msg = buf_msg(buf);
771         u32 size = msg_size(msg);
772         u32 dsz = msg_data_sz(msg);
773         u32 queue_size = l_ptr->out_queue_size;
774         u32 imp = tipc_msg_tot_importance(msg);
775         u32 queue_limit = l_ptr->queue_limit[imp];
776         u32 max_packet = l_ptr->max_pkt;
777
778         /* Match msg importance against queue limits: */
779         if (unlikely(queue_size >= queue_limit)) {
780                 if (imp <= TIPC_CRITICAL_IMPORTANCE) {
781                         link_schedule_port(l_ptr, msg_origport(msg), size);
782                         kfree_skb(buf);
783                         return -ELINKCONG;
784                 }
785                 kfree_skb(buf);
786                 if (imp > CONN_MANAGER) {
787                         pr_warn("%s<%s>, send queue full", link_rst_msg,
788                                 l_ptr->name);
789                         tipc_link_reset(l_ptr);
790                 }
791                 return dsz;
792         }
793
794         /* Fragmentation needed ? */
795         if (size > max_packet)
796                 return link_send_long_buf(l_ptr, buf);
797
798         /* Packet can be queued or sent. */
799         if (likely(!tipc_bearer_blocked(l_ptr->b_ptr) &&
800                    !link_congested(l_ptr))) {
801                 link_add_to_outqueue(l_ptr, buf, msg);
802
803                 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
804                 l_ptr->unacked_window = 0;
805                 return dsz;
806         }
807         /* Congestion: can message be bundled ? */
808         if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
809             (msg_user(msg) != MSG_FRAGMENTER)) {
810
811                 /* Try adding message to an existing bundle */
812                 if (l_ptr->next_out &&
813                     link_bundle_buf(l_ptr, l_ptr->last_out, buf))
814                         return dsz;
815
816                 /* Try creating a new bundle */
817                 if (size <= max_packet * 2 / 3) {
818                         struct sk_buff *bundler = tipc_buf_acquire(max_packet);
819                         struct tipc_msg bundler_hdr;
820
821                         if (bundler) {
822                                 tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
823                                          INT_H_SIZE, l_ptr->addr);
824                                 skb_copy_to_linear_data(bundler, &bundler_hdr,
825                                                         INT_H_SIZE);
826                                 skb_trim(bundler, INT_H_SIZE);
827                                 link_bundle_buf(l_ptr, bundler, buf);
828                                 buf = bundler;
829                                 msg = buf_msg(buf);
830                                 l_ptr->stats.sent_bundles++;
831                         }
832                 }
833         }
834         if (!l_ptr->next_out)
835                 l_ptr->next_out = buf;
836         link_add_to_outqueue(l_ptr, buf, msg);
837         return dsz;
838 }
839
840 /*
841  * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has
842  * not been selected yet, and the the owner node is not locked
843  * Called by TIPC internal users, e.g. the name distributor
844  */
845 int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
846 {
847         struct tipc_link *l_ptr;
848         struct tipc_node *n_ptr;
849         int res = -ELINKCONG;
850
851         read_lock_bh(&tipc_net_lock);
852         n_ptr = tipc_node_find(dest);
853         if (n_ptr) {
854                 tipc_node_lock(n_ptr);
855                 l_ptr = n_ptr->active_links[selector & 1];
856                 if (l_ptr)
857                         res = tipc_link_send_buf(l_ptr, buf);
858                 else
859                         kfree_skb(buf);
860                 tipc_node_unlock(n_ptr);
861         } else {
862                 kfree_skb(buf);
863         }
864         read_unlock_bh(&tipc_net_lock);
865         return res;
866 }
867
868 /*
869  * tipc_link_send_sync - synchronize broadcast link endpoints.
870  *
871  * Give a newly added peer node the sequence number where it should
872  * start receiving and acking broadcast packets.
873  *
874  * Called with node locked
875  */
876 static void tipc_link_send_sync(struct tipc_link *l)
877 {
878         struct sk_buff *buf;
879         struct tipc_msg *msg;
880
881         buf = tipc_buf_acquire(INT_H_SIZE);
882         if (!buf)
883                 return;
884
885         msg = buf_msg(buf);
886         tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, l->addr);
887         msg_set_last_bcast(msg, l->owner->bclink.acked);
888         link_add_chain_to_outqueue(l, buf, 0);
889         tipc_link_push_queue(l);
890 }
891
892 /*
893  * tipc_link_recv_sync - synchronize broadcast link endpoints.
894  * Receive the sequence number where we should start receiving and
895  * acking broadcast packets from a newly added peer node, and open
896  * up for reception of such packets.
897  *
898  * Called with node locked
899  */
900 static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf)
901 {
902         struct tipc_msg *msg = buf_msg(buf);
903
904         n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
905         n->bclink.recv_permitted = true;
906         kfree_skb(buf);
907 }
908
909 /*
910  * tipc_link_send_names - send name table entries to new neighbor
911  *
912  * Send routine for bulk delivery of name table messages when contact
913  * with a new neighbor occurs. No link congestion checking is performed
914  * because name table messages *must* be delivered. The messages must be
915  * small enough not to require fragmentation.
916  * Called without any locks held.
917  */
918 void tipc_link_send_names(struct list_head *message_list, u32 dest)
919 {
920         struct tipc_node *n_ptr;
921         struct tipc_link *l_ptr;
922         struct sk_buff *buf;
923         struct sk_buff *temp_buf;
924
925         if (list_empty(message_list))
926                 return;
927
928         read_lock_bh(&tipc_net_lock);
929         n_ptr = tipc_node_find(dest);
930         if (n_ptr) {
931                 tipc_node_lock(n_ptr);
932                 l_ptr = n_ptr->active_links[0];
933                 if (l_ptr) {
934                         /* convert circular list to linear list */
935                         ((struct sk_buff *)message_list->prev)->next = NULL;
936                         link_add_chain_to_outqueue(l_ptr,
937                                 (struct sk_buff *)message_list->next, 0);
938                         tipc_link_push_queue(l_ptr);
939                         INIT_LIST_HEAD(message_list);
940                 }
941                 tipc_node_unlock(n_ptr);
942         }
943         read_unlock_bh(&tipc_net_lock);
944
945         /* discard the messages if they couldn't be sent */
946         list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
947                 list_del((struct list_head *)buf);
948                 kfree_skb(buf);
949         }
950 }
951
952 /*
953  * link_send_buf_fast: Entry for data messages where the
954  * destination link is known and the header is complete,
955  * inclusive total message length. Very time critical.
956  * Link is locked. Returns user data length.
957  */
958 static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
959                               u32 *used_max_pkt)
960 {
961         struct tipc_msg *msg = buf_msg(buf);
962         int res = msg_data_sz(msg);
963
964         if (likely(!link_congested(l_ptr))) {
965                 if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
966                         if (likely(!tipc_bearer_blocked(l_ptr->b_ptr))) {
967                                 link_add_to_outqueue(l_ptr, buf, msg);
968                                 tipc_bearer_send(l_ptr->b_ptr, buf,
969                                                  &l_ptr->media_addr);
970                                 l_ptr->unacked_window = 0;
971                                 return res;
972                         }
973                 } else
974                         *used_max_pkt = l_ptr->max_pkt;
975         }
976         return tipc_link_send_buf(l_ptr, buf);  /* All other cases */
977 }
978
979 /*
980  * tipc_link_send_sections_fast: Entry for messages where the
981  * destination processor is known and the header is complete,
982  * except for total message length.
983  * Returns user data length or errno.
984  */
985 int tipc_link_send_sections_fast(struct tipc_port *sender,
986                                  struct iovec const *msg_sect,
987                                  unsigned int len, u32 destaddr)
988 {
989         struct tipc_msg *hdr = &sender->phdr;
990         struct tipc_link *l_ptr;
991         struct sk_buff *buf;
992         struct tipc_node *node;
993         int res;
994         u32 selector = msg_origport(hdr) & 1;
995
996 again:
997         /*
998          * Try building message using port's max_pkt hint.
999          * (Must not hold any locks while building message.)
1000          */
1001         res = tipc_msg_build(hdr, msg_sect, len, sender->max_pkt, &buf);
1002         /* Exit if build request was invalid */
1003         if (unlikely(res < 0))
1004                 return res;
1005
1006         read_lock_bh(&tipc_net_lock);
1007         node = tipc_node_find(destaddr);
1008         if (likely(node)) {
1009                 tipc_node_lock(node);
1010                 l_ptr = node->active_links[selector];
1011                 if (likely(l_ptr)) {
1012                         if (likely(buf)) {
1013                                 res = link_send_buf_fast(l_ptr, buf,
1014                                                          &sender->max_pkt);
1015 exit:
1016                                 tipc_node_unlock(node);
1017                                 read_unlock_bh(&tipc_net_lock);
1018                                 return res;
1019                         }
1020
1021                         /* Exit if link (or bearer) is congested */
1022                         if (link_congested(l_ptr) ||
1023                             tipc_bearer_blocked(l_ptr->b_ptr)) {
1024                                 res = link_schedule_port(l_ptr,
1025                                                          sender->ref, res);
1026                                 goto exit;
1027                         }
1028
1029                         /*
1030                          * Message size exceeds max_pkt hint; update hint,
1031                          * then re-try fast path or fragment the message
1032                          */
1033                         sender->max_pkt = l_ptr->max_pkt;
1034                         tipc_node_unlock(node);
1035                         read_unlock_bh(&tipc_net_lock);
1036
1037
1038                         if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
1039                                 goto again;
1040
1041                         return link_send_sections_long(sender, msg_sect, len,
1042                                                        destaddr);
1043                 }
1044                 tipc_node_unlock(node);
1045         }
1046         read_unlock_bh(&tipc_net_lock);
1047
1048         /* Couldn't find a link to the destination node */
1049         if (buf)
1050                 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1051         if (res >= 0)
1052                 return tipc_port_reject_sections(sender, hdr, msg_sect,
1053                                                  len, TIPC_ERR_NO_NODE);
1054         return res;
1055 }
1056
1057 /*
1058  * link_send_sections_long(): Entry for long messages where the
1059  * destination node is known and the header is complete,
1060  * inclusive total message length.
1061  * Link and bearer congestion status have been checked to be ok,
1062  * and are ignored if they change.
1063  *
1064  * Note that fragments do not use the full link MTU so that they won't have
1065  * to undergo refragmentation if link changeover causes them to be sent
1066  * over another link with an additional tunnel header added as prefix.
1067  * (Refragmentation will still occur if the other link has a smaller MTU.)
1068  *
1069  * Returns user data length or errno.
1070  */
1071 static int link_send_sections_long(struct tipc_port *sender,
1072                                    struct iovec const *msg_sect,
1073                                    unsigned int len, u32 destaddr)
1074 {
1075         struct tipc_link *l_ptr;
1076         struct tipc_node *node;
1077         struct tipc_msg *hdr = &sender->phdr;
1078         u32 dsz = len;
1079         u32 max_pkt, fragm_sz, rest;
1080         struct tipc_msg fragm_hdr;
1081         struct sk_buff *buf, *buf_chain, *prev;
1082         u32 fragm_crs, fragm_rest, hsz, sect_rest;
1083         const unchar __user *sect_crs;
1084         int curr_sect;
1085         u32 fragm_no;
1086         int res = 0;
1087
1088 again:
1089         fragm_no = 1;
1090         max_pkt = sender->max_pkt - INT_H_SIZE;
1091                 /* leave room for tunnel header in case of link changeover */
1092         fragm_sz = max_pkt - INT_H_SIZE;
1093                 /* leave room for fragmentation header in each fragment */
1094         rest = dsz;
1095         fragm_crs = 0;
1096         fragm_rest = 0;
1097         sect_rest = 0;
1098         sect_crs = NULL;
1099         curr_sect = -1;
1100
1101         /* Prepare reusable fragment header */
1102         tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
1103                  INT_H_SIZE, msg_destnode(hdr));
1104         msg_set_size(&fragm_hdr, max_pkt);
1105         msg_set_fragm_no(&fragm_hdr, 1);
1106
1107         /* Prepare header of first fragment */
1108         buf_chain = buf = tipc_buf_acquire(max_pkt);
1109         if (!buf)
1110                 return -ENOMEM;
1111         buf->next = NULL;
1112         skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
1113         hsz = msg_hdr_sz(hdr);
1114         skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz);
1115
1116         /* Chop up message */
1117         fragm_crs = INT_H_SIZE + hsz;
1118         fragm_rest = fragm_sz - hsz;
1119
1120         do {            /* For all sections */
1121                 u32 sz;
1122
1123                 if (!sect_rest) {
1124                         sect_rest = msg_sect[++curr_sect].iov_len;
1125                         sect_crs = msg_sect[curr_sect].iov_base;
1126                 }
1127
1128                 if (sect_rest < fragm_rest)
1129                         sz = sect_rest;
1130                 else
1131                         sz = fragm_rest;
1132
1133                 if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
1134                         res = -EFAULT;
1135 error:
1136                         for (; buf_chain; buf_chain = buf) {
1137                                 buf = buf_chain->next;
1138                                 kfree_skb(buf_chain);
1139                         }
1140                         return res;
1141                 }
1142                 sect_crs += sz;
1143                 sect_rest -= sz;
1144                 fragm_crs += sz;
1145                 fragm_rest -= sz;
1146                 rest -= sz;
1147
1148                 if (!fragm_rest && rest) {
1149
1150                         /* Initiate new fragment: */
1151                         if (rest <= fragm_sz) {
1152                                 fragm_sz = rest;
1153                                 msg_set_type(&fragm_hdr, LAST_FRAGMENT);
1154                         } else {
1155                                 msg_set_type(&fragm_hdr, FRAGMENT);
1156                         }
1157                         msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
1158                         msg_set_fragm_no(&fragm_hdr, ++fragm_no);
1159                         prev = buf;
1160                         buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
1161                         if (!buf) {
1162                                 res = -ENOMEM;
1163                                 goto error;
1164                         }
1165
1166                         buf->next = NULL;
1167                         prev->next = buf;
1168                         skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
1169                         fragm_crs = INT_H_SIZE;
1170                         fragm_rest = fragm_sz;
1171                 }
1172         } while (rest > 0);
1173
1174         /*
1175          * Now we have a buffer chain. Select a link and check
1176          * that packet size is still OK
1177          */
1178         node = tipc_node_find(destaddr);
1179         if (likely(node)) {
1180                 tipc_node_lock(node);
1181                 l_ptr = node->active_links[sender->ref & 1];
1182                 if (!l_ptr) {
1183                         tipc_node_unlock(node);
1184                         goto reject;
1185                 }
1186                 if (l_ptr->max_pkt < max_pkt) {
1187                         sender->max_pkt = l_ptr->max_pkt;
1188                         tipc_node_unlock(node);
1189                         for (; buf_chain; buf_chain = buf) {
1190                                 buf = buf_chain->next;
1191                                 kfree_skb(buf_chain);
1192                         }
1193                         goto again;
1194                 }
1195         } else {
1196 reject:
1197                 for (; buf_chain; buf_chain = buf) {
1198                         buf = buf_chain->next;
1199                         kfree_skb(buf_chain);
1200                 }
1201                 return tipc_port_reject_sections(sender, hdr, msg_sect,
1202                                                  len, TIPC_ERR_NO_NODE);
1203         }
1204
1205         /* Append chain of fragments to send queue & send them */
1206         l_ptr->long_msg_seq_no++;
1207         link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
1208         l_ptr->stats.sent_fragments += fragm_no;
1209         l_ptr->stats.sent_fragmented++;
1210         tipc_link_push_queue(l_ptr);
1211         tipc_node_unlock(node);
1212         return dsz;
1213 }
1214
1215 /*
1216  * tipc_link_push_packet: Push one unsent packet to the media
1217  */
1218 u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1219 {
1220         struct sk_buff *buf = l_ptr->first_out;
1221         u32 r_q_size = l_ptr->retransm_queue_size;
1222         u32 r_q_head = l_ptr->retransm_queue_head;
1223
1224         /* Step to position where retransmission failed, if any,    */
1225         /* consider that buffers may have been released in meantime */
1226         if (r_q_size && buf) {
1227                 u32 last = lesser(mod(r_q_head + r_q_size),
1228                                   link_last_sent(l_ptr));
1229                 u32 first = buf_seqno(buf);
1230
1231                 while (buf && less(first, r_q_head)) {
1232                         first = mod(first + 1);
1233                         buf = buf->next;
1234                 }
1235                 l_ptr->retransm_queue_head = r_q_head = first;
1236                 l_ptr->retransm_queue_size = r_q_size = mod(last - first);
1237         }
1238
1239         /* Continue retransmission now, if there is anything: */
1240         if (r_q_size && buf) {
1241                 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1242                 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1243                 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1244                 l_ptr->retransm_queue_head = mod(++r_q_head);
1245                 l_ptr->retransm_queue_size = --r_q_size;
1246                 l_ptr->stats.retransmitted++;
1247                 return 0;
1248         }
1249
1250         /* Send deferred protocol message, if any: */
1251         buf = l_ptr->proto_msg_queue;
1252         if (buf) {
1253                 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1254                 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1255                 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1256                 l_ptr->unacked_window = 0;
1257                 kfree_skb(buf);
1258                 l_ptr->proto_msg_queue = NULL;
1259                 return 0;
1260         }
1261
1262         /* Send one deferred data message, if send window not full: */
1263         buf = l_ptr->next_out;
1264         if (buf) {
1265                 struct tipc_msg *msg = buf_msg(buf);
1266                 u32 next = msg_seqno(msg);
1267                 u32 first = buf_seqno(l_ptr->first_out);
1268
1269                 if (mod(next - first) < l_ptr->queue_limit[0]) {
1270                         msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1271                         msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1272                         tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1273                         if (msg_user(msg) == MSG_BUNDLER)
1274                                 msg_set_type(msg, CLOSED_MSG);
1275                         l_ptr->next_out = buf->next;
1276                         return 0;
1277                 }
1278         }
1279         return 1;
1280 }
1281
1282 /*
1283  * push_queue(): push out the unsent messages of a link where
1284  *               congestion has abated. Node is locked
1285  */
1286 void tipc_link_push_queue(struct tipc_link *l_ptr)
1287 {
1288         u32 res;
1289
1290         if (tipc_bearer_blocked(l_ptr->b_ptr))
1291                 return;
1292
1293         do {
1294                 res = tipc_link_push_packet(l_ptr);
1295         } while (!res);
1296 }
1297
1298 static void link_reset_all(unsigned long addr)
1299 {
1300         struct tipc_node *n_ptr;
1301         char addr_string[16];
1302         u32 i;
1303
1304         read_lock_bh(&tipc_net_lock);
1305         n_ptr = tipc_node_find((u32)addr);
1306         if (!n_ptr) {
1307                 read_unlock_bh(&tipc_net_lock);
1308                 return; /* node no longer exists */
1309         }
1310
1311         tipc_node_lock(n_ptr);
1312
1313         pr_warn("Resetting all links to %s\n",
1314                 tipc_addr_string_fill(addr_string, n_ptr->addr));
1315
1316         for (i = 0; i < MAX_BEARERS; i++) {
1317                 if (n_ptr->links[i]) {
1318                         link_print(n_ptr->links[i], "Resetting link\n");
1319                         tipc_link_reset(n_ptr->links[i]);
1320                 }
1321         }
1322
1323         tipc_node_unlock(n_ptr);
1324         read_unlock_bh(&tipc_net_lock);
1325 }
1326
1327 static void link_retransmit_failure(struct tipc_link *l_ptr,
1328                                     struct sk_buff *buf)
1329 {
1330         struct tipc_msg *msg = buf_msg(buf);
1331
1332         pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
1333
1334         if (l_ptr->addr) {
1335                 /* Handle failure on standard link */
1336                 link_print(l_ptr, "Resetting link\n");
1337                 tipc_link_reset(l_ptr);
1338
1339         } else {
1340                 /* Handle failure on broadcast link */
1341                 struct tipc_node *n_ptr;
1342                 char addr_string[16];
1343
1344                 pr_info("Msg seq number: %u,  ", msg_seqno(msg));
1345                 pr_cont("Outstanding acks: %lu\n",
1346                         (unsigned long) TIPC_SKB_CB(buf)->handle);
1347
1348                 n_ptr = tipc_bclink_retransmit_to();
1349                 tipc_node_lock(n_ptr);
1350
1351                 tipc_addr_string_fill(addr_string, n_ptr->addr);
1352                 pr_info("Broadcast link info for %s\n", addr_string);
1353                 pr_info("Reception permitted: %d,  Acked: %u\n",
1354                         n_ptr->bclink.recv_permitted,
1355                         n_ptr->bclink.acked);
1356                 pr_info("Last in: %u,  Oos state: %u,  Last sent: %u\n",
1357                         n_ptr->bclink.last_in,
1358                         n_ptr->bclink.oos_state,
1359                         n_ptr->bclink.last_sent);
1360
1361                 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
1362
1363                 tipc_node_unlock(n_ptr);
1364
1365                 l_ptr->stale_count = 0;
1366         }
1367 }
1368
1369 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
1370                           u32 retransmits)
1371 {
1372         struct tipc_msg *msg;
1373
1374         if (!buf)
1375                 return;
1376
1377         msg = buf_msg(buf);
1378
1379         if (tipc_bearer_blocked(l_ptr->b_ptr)) {
1380                 if (l_ptr->retransm_queue_size == 0) {
1381                         l_ptr->retransm_queue_head = msg_seqno(msg);
1382                         l_ptr->retransm_queue_size = retransmits;
1383                 } else {
1384                         pr_err("Unexpected retransmit on link %s (qsize=%d)\n",
1385                                l_ptr->name, l_ptr->retransm_queue_size);
1386                 }
1387                 return;
1388         } else {
1389                 /* Detect repeated retransmit failures on unblocked bearer */
1390                 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1391                         if (++l_ptr->stale_count > 100) {
1392                                 link_retransmit_failure(l_ptr, buf);
1393                                 return;
1394                         }
1395                 } else {
1396                         l_ptr->last_retransmitted = msg_seqno(msg);
1397                         l_ptr->stale_count = 1;
1398                 }
1399         }
1400
1401         while (retransmits && (buf != l_ptr->next_out) && buf) {
1402                 msg = buf_msg(buf);
1403                 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1404                 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1405                 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1406                 buf = buf->next;
1407                 retransmits--;
1408                 l_ptr->stats.retransmitted++;
1409         }
1410
1411         l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1412 }
1413
1414 /**
1415  * link_insert_deferred_queue - insert deferred messages back into receive chain
1416  */
1417 static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
1418                                                   struct sk_buff *buf)
1419 {
1420         u32 seq_no;
1421
1422         if (l_ptr->oldest_deferred_in == NULL)
1423                 return buf;
1424
1425         seq_no = buf_seqno(l_ptr->oldest_deferred_in);
1426         if (seq_no == mod(l_ptr->next_in_no)) {
1427                 l_ptr->newest_deferred_in->next = buf;
1428                 buf = l_ptr->oldest_deferred_in;
1429                 l_ptr->oldest_deferred_in = NULL;
1430                 l_ptr->deferred_inqueue_sz = 0;
1431         }
1432         return buf;
1433 }
1434
1435 /**
1436  * link_recv_buf_validate - validate basic format of received message
1437  *
1438  * This routine ensures a TIPC message has an acceptable header, and at least
1439  * as much data as the header indicates it should.  The routine also ensures
1440  * that the entire message header is stored in the main fragment of the message
1441  * buffer, to simplify future access to message header fields.
1442  *
1443  * Note: Having extra info present in the message header or data areas is OK.
1444  * TIPC will ignore the excess, under the assumption that it is optional info
1445  * introduced by a later release of the protocol.
1446  */
1447 static int link_recv_buf_validate(struct sk_buff *buf)
1448 {
1449         static u32 min_data_hdr_size[8] = {
1450                 SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE,
1451                 MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
1452                 };
1453
1454         struct tipc_msg *msg;
1455         u32 tipc_hdr[2];
1456         u32 size;
1457         u32 hdr_size;
1458         u32 min_hdr_size;
1459
1460         if (unlikely(buf->len < MIN_H_SIZE))
1461                 return 0;
1462
1463         msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr);
1464         if (msg == NULL)
1465                 return 0;
1466
1467         if (unlikely(msg_version(msg) != TIPC_VERSION))
1468                 return 0;
1469
1470         size = msg_size(msg);
1471         hdr_size = msg_hdr_sz(msg);
1472         min_hdr_size = msg_isdata(msg) ?
1473                 min_data_hdr_size[msg_type(msg)] : INT_H_SIZE;
1474
1475         if (unlikely((hdr_size < min_hdr_size) ||
1476                      (size < hdr_size) ||
1477                      (buf->len < size) ||
1478                      (size - hdr_size > TIPC_MAX_USER_MSG_SIZE)))
1479                 return 0;
1480
1481         return pskb_may_pull(buf, hdr_size);
1482 }
1483
1484 /**
1485  * tipc_recv_msg - process TIPC messages arriving from off-node
1486  * @head: pointer to message buffer chain
1487  * @tb_ptr: pointer to bearer message arrived on
1488  *
1489  * Invoked with no locks held.  Bearer pointer must point to a valid bearer
1490  * structure (i.e. cannot be NULL), but bearer can be inactive.
1491  */
1492 void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1493 {
1494         read_lock_bh(&tipc_net_lock);
1495         while (head) {
1496                 struct tipc_node *n_ptr;
1497                 struct tipc_link *l_ptr;
1498                 struct sk_buff *crs;
1499                 struct sk_buff *buf = head;
1500                 struct tipc_msg *msg;
1501                 u32 seq_no;
1502                 u32 ackd;
1503                 u32 released = 0;
1504                 int type;
1505
1506                 head = head->next;
1507
1508                 /* Ensure bearer is still enabled */
1509                 if (unlikely(!b_ptr->active))
1510                         goto cont;
1511
1512                 /* Ensure message is well-formed */
1513                 if (unlikely(!link_recv_buf_validate(buf)))
1514                         goto cont;
1515
1516                 /* Ensure message data is a single contiguous unit */
1517                 if (unlikely(skb_linearize(buf)))
1518                         goto cont;
1519
1520                 /* Handle arrival of a non-unicast link message */
1521                 msg = buf_msg(buf);
1522
1523                 if (unlikely(msg_non_seq(msg))) {
1524                         if (msg_user(msg) ==  LINK_CONFIG)
1525                                 tipc_disc_recv_msg(buf, b_ptr);
1526                         else
1527                                 tipc_bclink_recv_pkt(buf);
1528                         continue;
1529                 }
1530
1531                 /* Discard unicast link messages destined for another node */
1532                 if (unlikely(!msg_short(msg) &&
1533                              (msg_destnode(msg) != tipc_own_addr)))
1534                         goto cont;
1535
1536                 /* Locate neighboring node that sent message */
1537                 n_ptr = tipc_node_find(msg_prevnode(msg));
1538                 if (unlikely(!n_ptr))
1539                         goto cont;
1540                 tipc_node_lock(n_ptr);
1541
1542                 /* Locate unicast link endpoint that should handle message */
1543                 l_ptr = n_ptr->links[b_ptr->identity];
1544                 if (unlikely(!l_ptr)) {
1545                         tipc_node_unlock(n_ptr);
1546                         goto cont;
1547                 }
1548
1549                 /* Verify that communication with node is currently allowed */
1550                 if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
1551                         msg_user(msg) == LINK_PROTOCOL &&
1552                         (msg_type(msg) == RESET_MSG ||
1553                                         msg_type(msg) == ACTIVATE_MSG) &&
1554                         !msg_redundant_link(msg))
1555                         n_ptr->block_setup &= ~WAIT_PEER_DOWN;
1556
1557                 if (n_ptr->block_setup) {
1558                         tipc_node_unlock(n_ptr);
1559                         goto cont;
1560                 }
1561
1562                 /* Validate message sequence number info */
1563                 seq_no = msg_seqno(msg);
1564                 ackd = msg_ack(msg);
1565
1566                 /* Release acked messages */
1567                 if (n_ptr->bclink.recv_permitted)
1568                         tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1569
1570                 crs = l_ptr->first_out;
1571                 while ((crs != l_ptr->next_out) &&
1572                        less_eq(buf_seqno(crs), ackd)) {
1573                         struct sk_buff *next = crs->next;
1574
1575                         kfree_skb(crs);
1576                         crs = next;
1577                         released++;
1578                 }
1579                 if (released) {
1580                         l_ptr->first_out = crs;
1581                         l_ptr->out_queue_size -= released;
1582                 }
1583
1584                 /* Try sending any messages link endpoint has pending */
1585                 if (unlikely(l_ptr->next_out))
1586                         tipc_link_push_queue(l_ptr);
1587                 if (unlikely(!list_empty(&l_ptr->waiting_ports)))
1588                         tipc_link_wakeup_ports(l_ptr, 0);
1589                 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1590                         l_ptr->stats.sent_acks++;
1591                         tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1592                 }
1593
1594                 /* Now (finally!) process the incoming message */
1595 protocol_check:
1596                 if (likely(link_working_working(l_ptr))) {
1597                         if (likely(seq_no == mod(l_ptr->next_in_no))) {
1598                                 l_ptr->next_in_no++;
1599                                 if (unlikely(l_ptr->oldest_deferred_in))
1600                                         head = link_insert_deferred_queue(l_ptr,
1601                                                                           head);
1602 deliver:
1603                                 if (likely(msg_isdata(msg))) {
1604                                         tipc_node_unlock(n_ptr);
1605                                         tipc_port_recv_msg(buf);
1606                                         continue;
1607                                 }
1608                                 switch (msg_user(msg)) {
1609                                         int ret;
1610                                 case MSG_BUNDLER:
1611                                         l_ptr->stats.recv_bundles++;
1612                                         l_ptr->stats.recv_bundled +=
1613                                                 msg_msgcnt(msg);
1614                                         tipc_node_unlock(n_ptr);
1615                                         tipc_link_recv_bundle(buf);
1616                                         continue;
1617                                 case NAME_DISTRIBUTOR:
1618                                         n_ptr->bclink.recv_permitted = true;
1619                                         tipc_node_unlock(n_ptr);
1620                                         tipc_named_recv(buf);
1621                                         continue;
1622                                 case BCAST_PROTOCOL:
1623                                         tipc_link_recv_sync(n_ptr, buf);
1624                                         tipc_node_unlock(n_ptr);
1625                                         continue;
1626                                 case CONN_MANAGER:
1627                                         tipc_node_unlock(n_ptr);
1628                                         tipc_port_recv_proto_msg(buf);
1629                                         continue;
1630                                 case MSG_FRAGMENTER:
1631                                         l_ptr->stats.recv_fragments++;
1632                                         ret = tipc_link_recv_fragment(
1633                                                 &l_ptr->defragm_buf,
1634                                                 &buf, &msg);
1635                                         if (ret == 1) {
1636                                                 l_ptr->stats.recv_fragmented++;
1637                                                 goto deliver;
1638                                         }
1639                                         if (ret == -1)
1640                                                 l_ptr->next_in_no--;
1641                                         break;
1642                                 case CHANGEOVER_PROTOCOL:
1643                                         type = msg_type(msg);
1644                                         if (link_recv_changeover_msg(&l_ptr,
1645                                                                      &buf)) {
1646                                                 msg = buf_msg(buf);
1647                                                 seq_no = msg_seqno(msg);
1648                                                 if (type == ORIGINAL_MSG)
1649                                                         goto deliver;
1650                                                 goto protocol_check;
1651                                         }
1652                                         break;
1653                                 default:
1654                                         kfree_skb(buf);
1655                                         buf = NULL;
1656                                         break;
1657                                 }
1658                                 tipc_node_unlock(n_ptr);
1659                                 tipc_net_route_msg(buf);
1660                                 continue;
1661                         }
1662                         link_handle_out_of_seq_msg(l_ptr, buf);
1663                         head = link_insert_deferred_queue(l_ptr, head);
1664                         tipc_node_unlock(n_ptr);
1665                         continue;
1666                 }
1667
1668                 /* Link is not in state WORKING_WORKING */
1669                 if (msg_user(msg) == LINK_PROTOCOL) {
1670                         link_recv_proto_msg(l_ptr, buf);
1671                         head = link_insert_deferred_queue(l_ptr, head);
1672                         tipc_node_unlock(n_ptr);
1673                         continue;
1674                 }
1675
1676                 /* Traffic message. Conditionally activate link */
1677                 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1678
1679                 if (link_working_working(l_ptr)) {
1680                         /* Re-insert buffer in front of queue */
1681                         buf->next = head;
1682                         head = buf;
1683                         tipc_node_unlock(n_ptr);
1684                         continue;
1685                 }
1686                 tipc_node_unlock(n_ptr);
1687 cont:
1688                 kfree_skb(buf);
1689         }
1690         read_unlock_bh(&tipc_net_lock);
1691 }
1692
1693 /**
1694  * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1695  *
1696  * Returns increase in queue length (i.e. 0 or 1)
1697  */
1698 u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
1699                         struct sk_buff *buf)
1700 {
1701         struct sk_buff *queue_buf;
1702         struct sk_buff **prev;
1703         u32 seq_no = buf_seqno(buf);
1704
1705         buf->next = NULL;
1706
1707         /* Empty queue ? */
1708         if (*head == NULL) {
1709                 *head = *tail = buf;
1710                 return 1;
1711         }
1712
1713         /* Last ? */
1714         if (less(buf_seqno(*tail), seq_no)) {
1715                 (*tail)->next = buf;
1716                 *tail = buf;
1717                 return 1;
1718         }
1719
1720         /* Locate insertion point in queue, then insert; discard if duplicate */
1721         prev = head;
1722         queue_buf = *head;
1723         for (;;) {
1724                 u32 curr_seqno = buf_seqno(queue_buf);
1725
1726                 if (seq_no == curr_seqno) {
1727                         kfree_skb(buf);
1728                         return 0;
1729                 }
1730
1731                 if (less(seq_no, curr_seqno))
1732                         break;
1733
1734                 prev = &queue_buf->next;
1735                 queue_buf = queue_buf->next;
1736         }
1737
1738         buf->next = queue_buf;
1739         *prev = buf;
1740         return 1;
1741 }
1742
1743 /*
1744  * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1745  */
1746 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1747                                        struct sk_buff *buf)
1748 {
1749         u32 seq_no = buf_seqno(buf);
1750
1751         if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1752                 link_recv_proto_msg(l_ptr, buf);
1753                 return;
1754         }
1755
1756         /* Record OOS packet arrival (force mismatch on next timeout) */
1757         l_ptr->checkpoint--;
1758
1759         /*
1760          * Discard packet if a duplicate; otherwise add it to deferred queue
1761          * and notify peer of gap as per protocol specification
1762          */
1763         if (less(seq_no, mod(l_ptr->next_in_no))) {
1764                 l_ptr->stats.duplicates++;
1765                 kfree_skb(buf);
1766                 return;
1767         }
1768
1769         if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
1770                                 &l_ptr->newest_deferred_in, buf)) {
1771                 l_ptr->deferred_inqueue_sz++;
1772                 l_ptr->stats.deferred_recv++;
1773                 if ((l_ptr->deferred_inqueue_sz % 16) == 1)
1774                         tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1775         } else
1776                 l_ptr->stats.duplicates++;
1777 }
1778
1779 /*
1780  * Send protocol message to the other endpoint.
1781  */
1782 void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
1783                               int probe_msg, u32 gap, u32 tolerance,
1784                               u32 priority, u32 ack_mtu)
1785 {
1786         struct sk_buff *buf = NULL;
1787         struct tipc_msg *msg = l_ptr->pmsg;
1788         u32 msg_size = sizeof(l_ptr->proto_msg);
1789         int r_flag;
1790
1791         /* Discard any previous message that was deferred due to congestion */
1792         if (l_ptr->proto_msg_queue) {
1793                 kfree_skb(l_ptr->proto_msg_queue);
1794                 l_ptr->proto_msg_queue = NULL;
1795         }
1796
1797         if (link_blocked(l_ptr))
1798                 return;
1799
1800         /* Abort non-RESET send if communication with node is prohibited */
1801         if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG))
1802                 return;
1803
1804         /* Create protocol message with "out-of-sequence" sequence number */
1805         msg_set_type(msg, msg_typ);
1806         msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
1807         msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1808         msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
1809
1810         if (msg_typ == STATE_MSG) {
1811                 u32 next_sent = mod(l_ptr->next_out_no);
1812
1813                 if (!tipc_link_is_up(l_ptr))
1814                         return;
1815                 if (l_ptr->next_out)
1816                         next_sent = buf_seqno(l_ptr->next_out);
1817                 msg_set_next_sent(msg, next_sent);
1818                 if (l_ptr->oldest_deferred_in) {
1819                         u32 rec = buf_seqno(l_ptr->oldest_deferred_in);
1820                         gap = mod(rec - mod(l_ptr->next_in_no));
1821                 }
1822                 msg_set_seq_gap(msg, gap);
1823                 if (gap)
1824                         l_ptr->stats.sent_nacks++;
1825                 msg_set_link_tolerance(msg, tolerance);
1826                 msg_set_linkprio(msg, priority);
1827                 msg_set_max_pkt(msg, ack_mtu);
1828                 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1829                 msg_set_probe(msg, probe_msg != 0);
1830                 if (probe_msg) {
1831                         u32 mtu = l_ptr->max_pkt;
1832
1833                         if ((mtu < l_ptr->max_pkt_target) &&
1834                             link_working_working(l_ptr) &&
1835                             l_ptr->fsm_msg_cnt) {
1836                                 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1837                                 if (l_ptr->max_pkt_probes == 10) {
1838                                         l_ptr->max_pkt_target = (msg_size - 4);
1839                                         l_ptr->max_pkt_probes = 0;
1840                                         msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1841                                 }
1842                                 l_ptr->max_pkt_probes++;
1843                         }
1844
1845                         l_ptr->stats.sent_probes++;
1846                 }
1847                 l_ptr->stats.sent_states++;
1848         } else {                /* RESET_MSG or ACTIVATE_MSG */
1849                 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
1850                 msg_set_seq_gap(msg, 0);
1851                 msg_set_next_sent(msg, 1);
1852                 msg_set_probe(msg, 0);
1853                 msg_set_link_tolerance(msg, l_ptr->tolerance);
1854                 msg_set_linkprio(msg, l_ptr->priority);
1855                 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
1856         }
1857
1858         r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
1859         msg_set_redundant_link(msg, r_flag);
1860         msg_set_linkprio(msg, l_ptr->priority);
1861         msg_set_size(msg, msg_size);
1862
1863         msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
1864
1865         buf = tipc_buf_acquire(msg_size);
1866         if (!buf)
1867                 return;
1868
1869         skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1870         buf->priority = TC_PRIO_CONTROL;
1871
1872         /* Defer message if bearer is already blocked */
1873         if (tipc_bearer_blocked(l_ptr->b_ptr)) {
1874                 l_ptr->proto_msg_queue = buf;
1875                 return;
1876         }
1877
1878         tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1879         l_ptr->unacked_window = 0;
1880         kfree_skb(buf);
1881 }
1882
1883 /*
1884  * Receive protocol message :
1885  * Note that network plane id propagates through the network, and may
1886  * change at any time. The node with lowest address rules
1887  */
1888 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
1889 {
1890         u32 rec_gap = 0;
1891         u32 max_pkt_info;
1892         u32 max_pkt_ack;
1893         u32 msg_tol;
1894         struct tipc_msg *msg = buf_msg(buf);
1895
1896         if (link_blocked(l_ptr))
1897                 goto exit;
1898
1899         /* record unnumbered packet arrival (force mismatch on next timeout) */
1900         l_ptr->checkpoint--;
1901
1902         if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
1903                 if (tipc_own_addr > msg_prevnode(msg))
1904                         l_ptr->b_ptr->net_plane = msg_net_plane(msg);
1905
1906         l_ptr->owner->permit_changeover = msg_redundant_link(msg);
1907
1908         switch (msg_type(msg)) {
1909
1910         case RESET_MSG:
1911                 if (!link_working_unknown(l_ptr) &&
1912                     (l_ptr->peer_session != INVALID_SESSION)) {
1913                         if (less_eq(msg_session(msg), l_ptr->peer_session))
1914                                 break; /* duplicate or old reset: ignore */
1915                 }
1916
1917                 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
1918                                 link_working_unknown(l_ptr))) {
1919                         /*
1920                          * peer has lost contact -- don't allow peer's links
1921                          * to reactivate before we recognize loss & clean up
1922                          */
1923                         l_ptr->owner->block_setup = WAIT_NODE_DOWN;
1924                 }
1925
1926                 link_state_event(l_ptr, RESET_MSG);
1927
1928                 /* fall thru' */
1929         case ACTIVATE_MSG:
1930                 /* Update link settings according other endpoint's values */
1931                 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
1932
1933                 msg_tol = msg_link_tolerance(msg);
1934                 if (msg_tol > l_ptr->tolerance)
1935                         link_set_supervision_props(l_ptr, msg_tol);
1936
1937                 if (msg_linkprio(msg) > l_ptr->priority)
1938                         l_ptr->priority = msg_linkprio(msg);
1939
1940                 max_pkt_info = msg_max_pkt(msg);
1941                 if (max_pkt_info) {
1942                         if (max_pkt_info < l_ptr->max_pkt_target)
1943                                 l_ptr->max_pkt_target = max_pkt_info;
1944                         if (l_ptr->max_pkt > l_ptr->max_pkt_target)
1945                                 l_ptr->max_pkt = l_ptr->max_pkt_target;
1946                 } else {
1947                         l_ptr->max_pkt = l_ptr->max_pkt_target;
1948                 }
1949
1950                 /* Synchronize broadcast link info, if not done previously */
1951                 if (!tipc_node_is_up(l_ptr->owner)) {
1952                         l_ptr->owner->bclink.last_sent =
1953                                 l_ptr->owner->bclink.last_in =
1954                                 msg_last_bcast(msg);
1955                         l_ptr->owner->bclink.oos_state = 0;
1956                 }
1957
1958                 l_ptr->peer_session = msg_session(msg);
1959                 l_ptr->peer_bearer_id = msg_bearer_id(msg);
1960
1961                 if (msg_type(msg) == ACTIVATE_MSG)
1962                         link_state_event(l_ptr, ACTIVATE_MSG);
1963                 break;
1964         case STATE_MSG:
1965
1966                 msg_tol = msg_link_tolerance(msg);
1967                 if (msg_tol)
1968                         link_set_supervision_props(l_ptr, msg_tol);
1969
1970                 if (msg_linkprio(msg) &&
1971                     (msg_linkprio(msg) != l_ptr->priority)) {
1972                         pr_warn("%s<%s>, priority change %u->%u\n",
1973                                 link_rst_msg, l_ptr->name, l_ptr->priority,
1974                                 msg_linkprio(msg));
1975                         l_ptr->priority = msg_linkprio(msg);
1976                         tipc_link_reset(l_ptr); /* Enforce change to take effect */
1977                         break;
1978                 }
1979                 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1980                 l_ptr->stats.recv_states++;
1981                 if (link_reset_unknown(l_ptr))
1982                         break;
1983
1984                 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
1985                         rec_gap = mod(msg_next_sent(msg) -
1986                                       mod(l_ptr->next_in_no));
1987                 }
1988
1989                 max_pkt_ack = msg_max_pkt(msg);
1990                 if (max_pkt_ack > l_ptr->max_pkt) {
1991                         l_ptr->max_pkt = max_pkt_ack;
1992                         l_ptr->max_pkt_probes = 0;
1993                 }
1994
1995                 max_pkt_ack = 0;
1996                 if (msg_probe(msg)) {
1997                         l_ptr->stats.recv_probes++;
1998                         if (msg_size(msg) > sizeof(l_ptr->proto_msg))
1999                                 max_pkt_ack = msg_size(msg);
2000                 }
2001
2002                 /* Protocol message before retransmits, reduce loss risk */
2003                 if (l_ptr->owner->bclink.recv_permitted)
2004                         tipc_bclink_update_link_state(l_ptr->owner,
2005                                                       msg_last_bcast(msg));
2006
2007                 if (rec_gap || (msg_probe(msg))) {
2008                         tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2009                                                  0, rec_gap, 0, 0, max_pkt_ack);
2010                 }
2011                 if (msg_seq_gap(msg)) {
2012                         l_ptr->stats.recv_nacks++;
2013                         tipc_link_retransmit(l_ptr, l_ptr->first_out,
2014                                              msg_seq_gap(msg));
2015                 }
2016                 break;
2017         }
2018 exit:
2019         kfree_skb(buf);
2020 }
2021
2022
2023 /*
2024  * tipc_link_tunnel(): Send one message via a link belonging to
2025  * another bearer. Owner node is locked.
2026  */
2027 static void tipc_link_tunnel(struct tipc_link *l_ptr,
2028                              struct tipc_msg *tunnel_hdr, struct tipc_msg *msg,
2029                              u32 selector)
2030 {
2031         struct tipc_link *tunnel;
2032         struct sk_buff *buf;
2033         u32 length = msg_size(msg);
2034
2035         tunnel = l_ptr->owner->active_links[selector & 1];
2036         if (!tipc_link_is_up(tunnel)) {
2037                 pr_warn("%stunnel link no longer available\n", link_co_err);
2038                 return;
2039         }
2040         msg_set_size(tunnel_hdr, length + INT_H_SIZE);
2041         buf = tipc_buf_acquire(length + INT_H_SIZE);
2042         if (!buf) {
2043                 pr_warn("%sunable to send tunnel msg\n", link_co_err);
2044                 return;
2045         }
2046         skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
2047         skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
2048         tipc_link_send_buf(tunnel, buf);
2049 }
2050
2051
2052
2053 /*
2054  * changeover(): Send whole message queue via the remaining link
2055  *               Owner node is locked.
2056  */
2057 void tipc_link_changeover(struct tipc_link *l_ptr)
2058 {
2059         u32 msgcount = l_ptr->out_queue_size;
2060         struct sk_buff *crs = l_ptr->first_out;
2061         struct tipc_link *tunnel = l_ptr->owner->active_links[0];
2062         struct tipc_msg tunnel_hdr;
2063         int split_bundles;
2064
2065         if (!tunnel)
2066                 return;
2067
2068         if (!l_ptr->owner->permit_changeover) {
2069                 pr_warn("%speer did not permit changeover\n", link_co_err);
2070                 return;
2071         }
2072
2073         tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2074                  ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
2075         msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2076         msg_set_msgcnt(&tunnel_hdr, msgcount);
2077
2078         if (!l_ptr->first_out) {
2079                 struct sk_buff *buf;
2080
2081                 buf = tipc_buf_acquire(INT_H_SIZE);
2082                 if (buf) {
2083                         skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
2084                         msg_set_size(&tunnel_hdr, INT_H_SIZE);
2085                         tipc_link_send_buf(tunnel, buf);
2086                 } else {
2087                         pr_warn("%sunable to send changeover msg\n",
2088                                 link_co_err);
2089                 }
2090                 return;
2091         }
2092
2093         split_bundles = (l_ptr->owner->active_links[0] !=
2094                          l_ptr->owner->active_links[1]);
2095
2096         while (crs) {
2097                 struct tipc_msg *msg = buf_msg(crs);
2098
2099                 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
2100                         struct tipc_msg *m = msg_get_wrapped(msg);
2101                         unchar *pos = (unchar *)m;
2102
2103                         msgcount = msg_msgcnt(msg);
2104                         while (msgcount--) {
2105                                 msg_set_seqno(m, msg_seqno(msg));
2106                                 tipc_link_tunnel(l_ptr, &tunnel_hdr, m,
2107                                                  msg_link_selector(m));
2108                                 pos += align(msg_size(m));
2109                                 m = (struct tipc_msg *)pos;
2110                         }
2111                 } else {
2112                         tipc_link_tunnel(l_ptr, &tunnel_hdr, msg,
2113                                          msg_link_selector(msg));
2114                 }
2115                 crs = crs->next;
2116         }
2117 }
2118
2119 void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel)
2120 {
2121         struct sk_buff *iter;
2122         struct tipc_msg tunnel_hdr;
2123
2124         tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2125                  DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
2126         msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
2127         msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2128         iter = l_ptr->first_out;
2129         while (iter) {
2130                 struct sk_buff *outbuf;
2131                 struct tipc_msg *msg = buf_msg(iter);
2132                 u32 length = msg_size(msg);
2133
2134                 if (msg_user(msg) == MSG_BUNDLER)
2135                         msg_set_type(msg, CLOSED_MSG);
2136                 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));   /* Update */
2137                 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
2138                 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
2139                 outbuf = tipc_buf_acquire(length + INT_H_SIZE);
2140                 if (outbuf == NULL) {
2141                         pr_warn("%sunable to send duplicate msg\n",
2142                                 link_co_err);
2143                         return;
2144                 }
2145                 skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
2146                 skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
2147                                                length);
2148                 tipc_link_send_buf(tunnel, outbuf);
2149                 if (!tipc_link_is_up(l_ptr))
2150                         return;
2151                 iter = iter->next;
2152         }
2153 }
2154
2155 /**
2156  * buf_extract - extracts embedded TIPC message from another message
2157  * @skb: encapsulating message buffer
2158  * @from_pos: offset to extract from
2159  *
2160  * Returns a new message buffer containing an embedded message.  The
2161  * encapsulating message itself is left unchanged.
2162  */
2163 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2164 {
2165         struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
2166         u32 size = msg_size(msg);
2167         struct sk_buff *eb;
2168
2169         eb = tipc_buf_acquire(size);
2170         if (eb)
2171                 skb_copy_to_linear_data(eb, msg, size);
2172         return eb;
2173 }
2174
2175 /*
2176  *  link_recv_changeover_msg(): Receive tunneled packet sent
2177  *  via other link. Node is locked. Return extracted buffer.
2178  */
2179 static int link_recv_changeover_msg(struct tipc_link **l_ptr,
2180                                     struct sk_buff **buf)
2181 {
2182         struct sk_buff *tunnel_buf = *buf;
2183         struct tipc_link *dest_link;
2184         struct tipc_msg *msg;
2185         struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
2186         u32 msg_typ = msg_type(tunnel_msg);
2187         u32 msg_count = msg_msgcnt(tunnel_msg);
2188         u32 bearer_id = msg_bearer_id(tunnel_msg);
2189
2190         if (bearer_id >= MAX_BEARERS)
2191                 goto exit;
2192         dest_link = (*l_ptr)->owner->links[bearer_id];
2193         if (!dest_link)
2194                 goto exit;
2195         if (dest_link == *l_ptr) {
2196                 pr_err("Unexpected changeover message on link <%s>\n",
2197                        (*l_ptr)->name);
2198                 goto exit;
2199         }
2200         *l_ptr = dest_link;
2201         msg = msg_get_wrapped(tunnel_msg);
2202
2203         if (msg_typ == DUPLICATE_MSG) {
2204                 if (less(msg_seqno(msg), mod(dest_link->next_in_no)))
2205                         goto exit;
2206                 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2207                 if (*buf == NULL) {
2208                         pr_warn("%sduplicate msg dropped\n", link_co_err);
2209                         goto exit;
2210                 }
2211                 kfree_skb(tunnel_buf);
2212                 return 1;
2213         }
2214
2215         /* First original message ?: */
2216         if (tipc_link_is_up(dest_link)) {
2217                 pr_info("%s<%s>, changeover initiated by peer\n", link_rst_msg,
2218                         dest_link->name);
2219                 tipc_link_reset(dest_link);
2220                 dest_link->exp_msg_count = msg_count;
2221                 if (!msg_count)
2222                         goto exit;
2223         } else if (dest_link->exp_msg_count == START_CHANGEOVER) {
2224                 dest_link->exp_msg_count = msg_count;
2225                 if (!msg_count)
2226                         goto exit;
2227         }
2228
2229         /* Receive original message */
2230         if (dest_link->exp_msg_count == 0) {
2231                 pr_warn("%sgot too many tunnelled messages\n", link_co_err);
2232                 goto exit;
2233         }
2234         dest_link->exp_msg_count--;
2235         if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
2236                 goto exit;
2237         } else {
2238                 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2239                 if (*buf != NULL) {
2240                         kfree_skb(tunnel_buf);
2241                         return 1;
2242                 } else {
2243                         pr_warn("%soriginal msg dropped\n", link_co_err);
2244                 }
2245         }
2246 exit:
2247         *buf = NULL;
2248         kfree_skb(tunnel_buf);
2249         return 0;
2250 }
2251
2252 /*
2253  *  Bundler functionality:
2254  */
2255 void tipc_link_recv_bundle(struct sk_buff *buf)
2256 {
2257         u32 msgcount = msg_msgcnt(buf_msg(buf));
2258         u32 pos = INT_H_SIZE;
2259         struct sk_buff *obuf;
2260
2261         while (msgcount--) {
2262                 obuf = buf_extract(buf, pos);
2263                 if (obuf == NULL) {
2264                         pr_warn("Link unable to unbundle message(s)\n");
2265                         break;
2266                 }
2267                 pos += align(msg_size(buf_msg(obuf)));
2268                 tipc_net_route_msg(obuf);
2269         }
2270         kfree_skb(buf);
2271 }
2272
2273 /*
2274  *  Fragmentation/defragmentation:
2275  */
2276
2277 /*
2278  * link_send_long_buf: Entry for buffers needing fragmentation.
2279  * The buffer is complete, inclusive total message length.
2280  * Returns user data length.
2281  */
2282 static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
2283 {
2284         struct sk_buff *buf_chain = NULL;
2285         struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
2286         struct tipc_msg *inmsg = buf_msg(buf);
2287         struct tipc_msg fragm_hdr;
2288         u32 insize = msg_size(inmsg);
2289         u32 dsz = msg_data_sz(inmsg);
2290         unchar *crs = buf->data;
2291         u32 rest = insize;
2292         u32 pack_sz = l_ptr->max_pkt;
2293         u32 fragm_sz = pack_sz - INT_H_SIZE;
2294         u32 fragm_no = 0;
2295         u32 destaddr;
2296
2297         if (msg_short(inmsg))
2298                 destaddr = l_ptr->addr;
2299         else
2300                 destaddr = msg_destnode(inmsg);
2301
2302         /* Prepare reusable fragment header: */
2303         tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
2304                  INT_H_SIZE, destaddr);
2305
2306         /* Chop up message: */
2307         while (rest > 0) {
2308                 struct sk_buff *fragm;
2309
2310                 if (rest <= fragm_sz) {
2311                         fragm_sz = rest;
2312                         msg_set_type(&fragm_hdr, LAST_FRAGMENT);
2313                 }
2314                 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
2315                 if (fragm == NULL) {
2316                         kfree_skb(buf);
2317                         while (buf_chain) {
2318                                 buf = buf_chain;
2319                                 buf_chain = buf_chain->next;
2320                                 kfree_skb(buf);
2321                         }
2322                         return -ENOMEM;
2323                 }
2324                 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
2325                 fragm_no++;
2326                 msg_set_fragm_no(&fragm_hdr, fragm_no);
2327                 skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE);
2328                 skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs,
2329                                                fragm_sz);
2330                 buf_chain_tail->next = fragm;
2331                 buf_chain_tail = fragm;
2332
2333                 rest -= fragm_sz;
2334                 crs += fragm_sz;
2335                 msg_set_type(&fragm_hdr, FRAGMENT);
2336         }
2337         kfree_skb(buf);
2338
2339         /* Append chain of fragments to send queue & send them */
2340         l_ptr->long_msg_seq_no++;
2341         link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
2342         l_ptr->stats.sent_fragments += fragm_no;
2343         l_ptr->stats.sent_fragmented++;
2344         tipc_link_push_queue(l_ptr);
2345
2346         return dsz;
2347 }
2348
2349 /*
2350  * A pending message being re-assembled must store certain values
2351  * to handle subsequent fragments correctly. The following functions
2352  * help storing these values in unused, available fields in the
2353  * pending message. This makes dynamic memory allocation unnecessary.
2354  */
2355 static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
2356 {
2357         msg_set_seqno(buf_msg(buf), seqno);
2358 }
2359
2360 static u32 get_fragm_size(struct sk_buff *buf)
2361 {
2362         return msg_ack(buf_msg(buf));
2363 }
2364
2365 static void set_fragm_size(struct sk_buff *buf, u32 sz)
2366 {
2367         msg_set_ack(buf_msg(buf), sz);
2368 }
2369
2370 static u32 get_expected_frags(struct sk_buff *buf)
2371 {
2372         return msg_bcast_ack(buf_msg(buf));
2373 }
2374
2375 static void set_expected_frags(struct sk_buff *buf, u32 exp)
2376 {
2377         msg_set_bcast_ack(buf_msg(buf), exp);
2378 }
2379
2380 /*
2381  * tipc_link_recv_fragment(): Called with node lock on. Returns
2382  * the reassembled buffer if message is complete.
2383  */
2384 int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2385                             struct tipc_msg **m)
2386 {
2387         struct sk_buff *prev = NULL;
2388         struct sk_buff *fbuf = *fb;
2389         struct tipc_msg *fragm = buf_msg(fbuf);
2390         struct sk_buff *pbuf = *pending;
2391         u32 long_msg_seq_no = msg_long_msgno(fragm);
2392
2393         *fb = NULL;
2394
2395         /* Is there an incomplete message waiting for this fragment? */
2396         while (pbuf && ((buf_seqno(pbuf) != long_msg_seq_no) ||
2397                         (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
2398                 prev = pbuf;
2399                 pbuf = pbuf->next;
2400         }
2401
2402         if (!pbuf && (msg_type(fragm) == FIRST_FRAGMENT)) {
2403                 struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
2404                 u32 msg_sz = msg_size(imsg);
2405                 u32 fragm_sz = msg_data_sz(fragm);
2406                 u32 exp_fragm_cnt;
2407                 u32 max =  TIPC_MAX_USER_MSG_SIZE + NAMED_H_SIZE;
2408
2409                 if (msg_type(imsg) == TIPC_MCAST_MSG)
2410                         max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
2411                 if (fragm_sz == 0 || msg_size(imsg) > max) {
2412                         kfree_skb(fbuf);
2413                         return 0;
2414                 }
2415                 exp_fragm_cnt = msg_sz / fragm_sz + !!(msg_sz % fragm_sz);
2416                 pbuf = tipc_buf_acquire(msg_size(imsg));
2417                 if (pbuf != NULL) {
2418                         pbuf->next = *pending;
2419                         *pending = pbuf;
2420                         skb_copy_to_linear_data(pbuf, imsg,
2421                                                 msg_data_sz(fragm));
2422                         /*  Prepare buffer for subsequent fragments. */
2423                         set_long_msg_seqno(pbuf, long_msg_seq_no);
2424                         set_fragm_size(pbuf, fragm_sz);
2425                         set_expected_frags(pbuf, exp_fragm_cnt - 1);
2426                 } else {
2427                         pr_debug("Link unable to reassemble fragmented message\n");
2428                         kfree_skb(fbuf);
2429                         return -1;
2430                 }
2431                 kfree_skb(fbuf);
2432                 return 0;
2433         } else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) {
2434                 u32 dsz = msg_data_sz(fragm);
2435                 u32 fsz = get_fragm_size(pbuf);
2436                 u32 crs = ((msg_fragm_no(fragm) - 1) * fsz);
2437                 u32 exp_frags = get_expected_frags(pbuf) - 1;
2438                 skb_copy_to_linear_data_offset(pbuf, crs,
2439                                                msg_data(fragm), dsz);
2440                 kfree_skb(fbuf);
2441
2442                 /* Is message complete? */
2443                 if (exp_frags == 0) {
2444                         if (prev)
2445                                 prev->next = pbuf->next;
2446                         else
2447                                 *pending = pbuf->next;
2448                         msg_reset_reroute_cnt(buf_msg(pbuf));
2449                         *fb = pbuf;
2450                         *m = buf_msg(pbuf);
2451                         return 1;
2452                 }
2453                 set_expected_frags(pbuf, exp_frags);
2454                 return 0;
2455         }
2456         kfree_skb(fbuf);
2457         return 0;
2458 }
2459
2460 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
2461 {
2462         if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
2463                 return;
2464
2465         l_ptr->tolerance = tolerance;
2466         l_ptr->continuity_interval =
2467                 ((tolerance / 4) > 500) ? 500 : tolerance / 4;
2468         l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
2469 }
2470
2471 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
2472 {
2473         /* Data messages from this node, inclusive FIRST_FRAGM */
2474         l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
2475         l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
2476         l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
2477         l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
2478         /* Transiting data messages,inclusive FIRST_FRAGM */
2479         l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
2480         l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
2481         l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
2482         l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
2483         l_ptr->queue_limit[CONN_MANAGER] = 1200;
2484         l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
2485         l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
2486         /* FRAGMENT and LAST_FRAGMENT packets */
2487         l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
2488 }
2489
2490 /**
2491  * link_find_link - locate link by name
2492  * @name: ptr to link name string
2493  * @node: ptr to area to be filled with ptr to associated node
2494  *
2495  * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
2496  * this also prevents link deletion.
2497  *
2498  * Returns pointer to link (or 0 if invalid link name).
2499  */
2500 static struct tipc_link *link_find_link(const char *name,
2501                                         struct tipc_node **node)
2502 {
2503         struct tipc_link *l_ptr;
2504         struct tipc_node *n_ptr;
2505         int i;
2506
2507         list_for_each_entry(n_ptr, &tipc_node_list, list) {
2508                 for (i = 0; i < MAX_BEARERS; i++) {
2509                         l_ptr = n_ptr->links[i];
2510                         if (l_ptr && !strcmp(l_ptr->name, name))
2511                                 goto found;
2512                 }
2513         }
2514         l_ptr = NULL;
2515         n_ptr = NULL;
2516 found:
2517         *node = n_ptr;
2518         return l_ptr;
2519 }
2520
2521 /**
2522  * link_value_is_valid -- validate proposed link tolerance/priority/window
2523  *
2524  * @cmd: value type (TIPC_CMD_SET_LINK_*)
2525  * @new_value: the new value
2526  *
2527  * Returns 1 if value is within range, 0 if not.
2528  */
2529 static int link_value_is_valid(u16 cmd, u32 new_value)
2530 {
2531         switch (cmd) {
2532         case TIPC_CMD_SET_LINK_TOL:
2533                 return (new_value >= TIPC_MIN_LINK_TOL) &&
2534                         (new_value <= TIPC_MAX_LINK_TOL);
2535         case TIPC_CMD_SET_LINK_PRI:
2536                 return (new_value <= TIPC_MAX_LINK_PRI);
2537         case TIPC_CMD_SET_LINK_WINDOW:
2538                 return (new_value >= TIPC_MIN_LINK_WIN) &&
2539                         (new_value <= TIPC_MAX_LINK_WIN);
2540         }
2541         return 0;
2542 }
2543
2544 /**
2545  * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
2546  * @name: ptr to link, bearer, or media name
2547  * @new_value: new value of link, bearer, or media setting
2548  * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
2549  *
2550  * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted.
2551  *
2552  * Returns 0 if value updated and negative value on error.
2553  */
2554 static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
2555 {
2556         struct tipc_node *node;
2557         struct tipc_link *l_ptr;
2558         struct tipc_bearer *b_ptr;
2559         struct tipc_media *m_ptr;
2560         int res = 0;
2561
2562         l_ptr = link_find_link(name, &node);
2563         if (l_ptr) {
2564                 /*
2565                  * acquire node lock for tipc_link_send_proto_msg().
2566                  * see "TIPC locking policy" in net.c.
2567                  */
2568                 tipc_node_lock(node);
2569                 switch (cmd) {
2570                 case TIPC_CMD_SET_LINK_TOL:
2571                         link_set_supervision_props(l_ptr, new_value);
2572                         tipc_link_send_proto_msg(l_ptr,
2573                                 STATE_MSG, 0, 0, new_value, 0, 0);
2574                         break;
2575                 case TIPC_CMD_SET_LINK_PRI:
2576                         l_ptr->priority = new_value;
2577                         tipc_link_send_proto_msg(l_ptr,
2578                                 STATE_MSG, 0, 0, 0, new_value, 0);
2579                         break;
2580                 case TIPC_CMD_SET_LINK_WINDOW:
2581                         tipc_link_set_queue_limits(l_ptr, new_value);
2582                         break;
2583                 default:
2584                         res = -EINVAL;
2585                         break;
2586                 }
2587                 tipc_node_unlock(node);
2588                 return res;
2589         }
2590
2591         b_ptr = tipc_bearer_find(name);
2592         if (b_ptr) {
2593                 switch (cmd) {
2594                 case TIPC_CMD_SET_LINK_TOL:
2595                         b_ptr->tolerance = new_value;
2596                         break;
2597                 case TIPC_CMD_SET_LINK_PRI:
2598                         b_ptr->priority = new_value;
2599                         break;
2600                 case TIPC_CMD_SET_LINK_WINDOW:
2601                         b_ptr->window = new_value;
2602                         break;
2603                 default:
2604                         res = -EINVAL;
2605                         break;
2606                 }
2607                 return res;
2608         }
2609
2610         m_ptr = tipc_media_find(name);
2611         if (!m_ptr)
2612                 return -ENODEV;
2613         switch (cmd) {
2614         case TIPC_CMD_SET_LINK_TOL:
2615                 m_ptr->tolerance = new_value;
2616                 break;
2617         case TIPC_CMD_SET_LINK_PRI:
2618                 m_ptr->priority = new_value;
2619                 break;
2620         case TIPC_CMD_SET_LINK_WINDOW:
2621                 m_ptr->window = new_value;
2622                 break;
2623         default:
2624                 res = -EINVAL;
2625                 break;
2626         }
2627         return res;
2628 }
2629
2630 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
2631                                      u16 cmd)
2632 {
2633         struct tipc_link_config *args;
2634         u32 new_value;
2635         int res;
2636
2637         if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
2638                 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2639
2640         args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
2641         new_value = ntohl(args->value);
2642
2643         if (!link_value_is_valid(cmd, new_value))
2644                 return tipc_cfg_reply_error_string(
2645                         "cannot change, value invalid");
2646
2647         if (!strcmp(args->name, tipc_bclink_name)) {
2648                 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
2649                     (tipc_bclink_set_queue_limits(new_value) == 0))
2650                         return tipc_cfg_reply_none();
2651                 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
2652                                                    " (cannot change setting on broadcast link)");
2653         }
2654
2655         read_lock_bh(&tipc_net_lock);
2656         res = link_cmd_set_value(args->name, new_value, cmd);
2657         read_unlock_bh(&tipc_net_lock);
2658         if (res)
2659                 return tipc_cfg_reply_error_string("cannot change link setting");
2660
2661         return tipc_cfg_reply_none();
2662 }
2663
2664 /**
2665  * link_reset_statistics - reset link statistics
2666  * @l_ptr: pointer to link
2667  */
2668 static void link_reset_statistics(struct tipc_link *l_ptr)
2669 {
2670         memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
2671         l_ptr->stats.sent_info = l_ptr->next_out_no;
2672         l_ptr->stats.recv_info = l_ptr->next_in_no;
2673 }
2674
2675 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
2676 {
2677         char *link_name;
2678         struct tipc_link *l_ptr;
2679         struct tipc_node *node;
2680
2681         if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2682                 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2683
2684         link_name = (char *)TLV_DATA(req_tlv_area);
2685         if (!strcmp(link_name, tipc_bclink_name)) {
2686                 if (tipc_bclink_reset_stats())
2687                         return tipc_cfg_reply_error_string("link not found");
2688                 return tipc_cfg_reply_none();
2689         }
2690
2691         read_lock_bh(&tipc_net_lock);
2692         l_ptr = link_find_link(link_name, &node);
2693         if (!l_ptr) {
2694                 read_unlock_bh(&tipc_net_lock);
2695                 return tipc_cfg_reply_error_string("link not found");
2696         }
2697
2698         tipc_node_lock(node);
2699         link_reset_statistics(l_ptr);
2700         tipc_node_unlock(node);
2701         read_unlock_bh(&tipc_net_lock);
2702         return tipc_cfg_reply_none();
2703 }
2704
2705 /**
2706  * percent - convert count to a percentage of total (rounding up or down)
2707  */
2708 static u32 percent(u32 count, u32 total)
2709 {
2710         return (count * 100 + (total / 2)) / total;
2711 }
2712
2713 /**
2714  * tipc_link_stats - print link statistics
2715  * @name: link name
2716  * @buf: print buffer area
2717  * @buf_size: size of print buffer area
2718  *
2719  * Returns length of print buffer data string (or 0 if error)
2720  */
2721 static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2722 {
2723         struct tipc_link *l;
2724         struct tipc_stats *s;
2725         struct tipc_node *node;
2726         char *status;
2727         u32 profile_total = 0;
2728         int ret;
2729
2730         if (!strcmp(name, tipc_bclink_name))
2731                 return tipc_bclink_stats(buf, buf_size);
2732
2733         read_lock_bh(&tipc_net_lock);
2734         l = link_find_link(name, &node);
2735         if (!l) {
2736                 read_unlock_bh(&tipc_net_lock);
2737                 return 0;
2738         }
2739         tipc_node_lock(node);
2740         s = &l->stats;
2741
2742         if (tipc_link_is_active(l))
2743                 status = "ACTIVE";
2744         else if (tipc_link_is_up(l))
2745                 status = "STANDBY";
2746         else
2747                 status = "DEFUNCT";
2748
2749         ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
2750                             "  %s  MTU:%u  Priority:%u  Tolerance:%u ms"
2751                             "  Window:%u packets\n",
2752                             l->name, status, l->max_pkt, l->priority,
2753                             l->tolerance, l->queue_limit[0]);
2754
2755         ret += tipc_snprintf(buf + ret, buf_size - ret,
2756                              "  RX packets:%u fragments:%u/%u bundles:%u/%u\n",
2757                              l->next_in_no - s->recv_info, s->recv_fragments,
2758                              s->recv_fragmented, s->recv_bundles,
2759                              s->recv_bundled);
2760
2761         ret += tipc_snprintf(buf + ret, buf_size - ret,
2762                              "  TX packets:%u fragments:%u/%u bundles:%u/%u\n",
2763                              l->next_out_no - s->sent_info, s->sent_fragments,
2764                              s->sent_fragmented, s->sent_bundles,
2765                              s->sent_bundled);
2766
2767         profile_total = s->msg_length_counts;
2768         if (!profile_total)
2769                 profile_total = 1;
2770
2771         ret += tipc_snprintf(buf + ret, buf_size - ret,
2772                              "  TX profile sample:%u packets  average:%u octets\n"
2773                              "  0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
2774                              "-16384:%u%% -32768:%u%% -66000:%u%%\n",
2775                              s->msg_length_counts,
2776                              s->msg_lengths_total / profile_total,
2777                              percent(s->msg_length_profile[0], profile_total),
2778                              percent(s->msg_length_profile[1], profile_total),
2779                              percent(s->msg_length_profile[2], profile_total),
2780                              percent(s->msg_length_profile[3], profile_total),
2781                              percent(s->msg_length_profile[4], profile_total),
2782                              percent(s->msg_length_profile[5], profile_total),
2783                              percent(s->msg_length_profile[6], profile_total));
2784
2785         ret += tipc_snprintf(buf + ret, buf_size - ret,
2786                              "  RX states:%u probes:%u naks:%u defs:%u"
2787                              " dups:%u\n", s->recv_states, s->recv_probes,
2788                              s->recv_nacks, s->deferred_recv, s->duplicates);
2789
2790         ret += tipc_snprintf(buf + ret, buf_size - ret,
2791                              "  TX states:%u probes:%u naks:%u acks:%u"
2792                              " dups:%u\n", s->sent_states, s->sent_probes,
2793                              s->sent_nacks, s->sent_acks, s->retransmitted);
2794
2795         ret += tipc_snprintf(buf + ret, buf_size - ret,
2796                              "  Congestion link:%u  Send queue"
2797                              " max:%u avg:%u\n", s->link_congs,
2798                              s->max_queue_sz, s->queue_sz_counts ?
2799                              (s->accu_queue_sz / s->queue_sz_counts) : 0);
2800
2801         tipc_node_unlock(node);
2802         read_unlock_bh(&tipc_net_lock);
2803         return ret;
2804 }
2805
2806 struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
2807 {
2808         struct sk_buff *buf;
2809         struct tlv_desc *rep_tlv;
2810         int str_len;
2811         int pb_len;
2812         char *pb;
2813
2814         if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2815                 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2816
2817         buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
2818         if (!buf)
2819                 return NULL;
2820
2821         rep_tlv = (struct tlv_desc *)buf->data;
2822         pb = TLV_DATA(rep_tlv);
2823         pb_len = ULTRA_STRING_MAX_LEN;
2824         str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
2825                                   pb, pb_len);
2826         if (!str_len) {
2827                 kfree_skb(buf);
2828                 return tipc_cfg_reply_error_string("link not found");
2829         }
2830         str_len += 1;   /* for "\0" */
2831         skb_put(buf, TLV_SPACE(str_len));
2832         TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
2833
2834         return buf;
2835 }
2836
2837 /**
2838  * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
2839  * @dest: network address of destination node
2840  * @selector: used to select from set of active links
2841  *
2842  * If no active link can be found, uses default maximum packet size.
2843  */
2844 u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
2845 {
2846         struct tipc_node *n_ptr;
2847         struct tipc_link *l_ptr;
2848         u32 res = MAX_PKT_DEFAULT;
2849
2850         if (dest == tipc_own_addr)
2851                 return MAX_MSG_SIZE;
2852
2853         read_lock_bh(&tipc_net_lock);
2854         n_ptr = tipc_node_find(dest);
2855         if (n_ptr) {
2856                 tipc_node_lock(n_ptr);
2857                 l_ptr = n_ptr->active_links[selector & 1];
2858                 if (l_ptr)
2859                         res = l_ptr->max_pkt;
2860                 tipc_node_unlock(n_ptr);
2861         }
2862         read_unlock_bh(&tipc_net_lock);
2863         return res;
2864 }
2865
2866 static void link_print(struct tipc_link *l_ptr, const char *str)
2867 {
2868         pr_info("%s Link %x<%s>:", str, l_ptr->addr, l_ptr->b_ptr->name);
2869
2870         if (link_working_unknown(l_ptr))
2871                 pr_cont(":WU\n");
2872         else if (link_reset_reset(l_ptr))
2873                 pr_cont(":RR\n");
2874         else if (link_reset_unknown(l_ptr))
2875                 pr_cont(":RU\n");
2876         else if (link_working_working(l_ptr))
2877                 pr_cont(":WW\n");
2878         else
2879                 pr_cont("\n");
2880 }