2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, 2012, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
40 #include "name_distr.h"
44 #include <linux/pkt_sched.h>
47 * Error message prefixes
49 static const char *link_co_err = "Link changeover error, ";
50 static const char *link_rst_msg = "Resetting link ";
51 static const char *link_unk_evt = "Unknown link event ";
54 * Out-of-range value for link session numbers
56 #define INVALID_SESSION 0x10000
61 #define STARTING_EVT 856384768 /* link processing trigger */
62 #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
63 #define TIMEOUT_EVT 560817u /* link timer expired */
66 * The following two 'message types' is really just implementation
67 * data conveniently stored in the message header.
68 * They must not be considered part of the protocol
74 * State value stored in 'exp_msg_count'
76 #define START_CHANGEOVER 100000u
78 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
80 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf);
81 static int link_recv_changeover_msg(struct tipc_link **l_ptr,
82 struct sk_buff **buf);
83 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
84 static int link_send_sections_long(struct tipc_port *sender,
85 struct iovec const *msg_sect,
86 unsigned int len, u32 destnode);
87 static void link_state_event(struct tipc_link *l_ptr, u32 event);
88 static void link_reset_statistics(struct tipc_link *l_ptr);
89 static void link_print(struct tipc_link *l_ptr, const char *str);
90 static void link_start(struct tipc_link *l_ptr);
91 static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
92 static void tipc_link_send_sync(struct tipc_link *l);
93 static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf);
96 * Simple link routines
98 static unsigned int align(unsigned int i)
100 return (i + 3) & ~3u;
103 static void link_init_max_pkt(struct tipc_link *l_ptr)
107 max_pkt = (l_ptr->b_ptr->mtu & ~3);
108 if (max_pkt > MAX_MSG_SIZE)
109 max_pkt = MAX_MSG_SIZE;
111 l_ptr->max_pkt_target = max_pkt;
112 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
113 l_ptr->max_pkt = l_ptr->max_pkt_target;
115 l_ptr->max_pkt = MAX_PKT_DEFAULT;
117 l_ptr->max_pkt_probes = 0;
120 static u32 link_next_sent(struct tipc_link *l_ptr)
123 return buf_seqno(l_ptr->next_out);
124 return mod(l_ptr->next_out_no);
127 static u32 link_last_sent(struct tipc_link *l_ptr)
129 return mod(link_next_sent(l_ptr) - 1);
133 * Simple non-static link routines (i.e. referenced outside this file)
135 int tipc_link_is_up(struct tipc_link *l_ptr)
139 return link_working_working(l_ptr) || link_working_unknown(l_ptr);
142 int tipc_link_is_active(struct tipc_link *l_ptr)
144 return (l_ptr->owner->active_links[0] == l_ptr) ||
145 (l_ptr->owner->active_links[1] == l_ptr);
149 * link_timeout - handle expiration of link timer
150 * @l_ptr: pointer to link
152 * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict
153 * with tipc_link_delete(). (There is no risk that the node will be deleted by
154 * another thread because tipc_link_delete() always cancels the link timer before
155 * tipc_node_delete() is called.)
157 static void link_timeout(struct tipc_link *l_ptr)
159 tipc_node_lock(l_ptr->owner);
161 /* update counters used in statistical profiling of send traffic */
162 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
163 l_ptr->stats.queue_sz_counts++;
165 if (l_ptr->first_out) {
166 struct tipc_msg *msg = buf_msg(l_ptr->first_out);
167 u32 length = msg_size(msg);
169 if ((msg_user(msg) == MSG_FRAGMENTER) &&
170 (msg_type(msg) == FIRST_FRAGMENT)) {
171 length = msg_size(msg_get_wrapped(msg));
174 l_ptr->stats.msg_lengths_total += length;
175 l_ptr->stats.msg_length_counts++;
177 l_ptr->stats.msg_length_profile[0]++;
178 else if (length <= 256)
179 l_ptr->stats.msg_length_profile[1]++;
180 else if (length <= 1024)
181 l_ptr->stats.msg_length_profile[2]++;
182 else if (length <= 4096)
183 l_ptr->stats.msg_length_profile[3]++;
184 else if (length <= 16384)
185 l_ptr->stats.msg_length_profile[4]++;
186 else if (length <= 32768)
187 l_ptr->stats.msg_length_profile[5]++;
189 l_ptr->stats.msg_length_profile[6]++;
193 /* do all other link processing performed on a periodic basis */
195 link_state_event(l_ptr, TIMEOUT_EVT);
198 tipc_link_push_queue(l_ptr);
200 tipc_node_unlock(l_ptr->owner);
203 static void link_set_timer(struct tipc_link *l_ptr, u32 time)
205 k_start_timer(&l_ptr->timer, time);
209 * tipc_link_create - create a new link
210 * @n_ptr: pointer to associated node
211 * @b_ptr: pointer to associated bearer
212 * @media_addr: media address to use when sending messages over link
214 * Returns pointer to link.
216 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
217 struct tipc_bearer *b_ptr,
218 const struct tipc_media_addr *media_addr)
220 struct tipc_link *l_ptr;
221 struct tipc_msg *msg;
223 char addr_string[16];
224 u32 peer = n_ptr->addr;
226 if (n_ptr->link_cnt >= 2) {
227 tipc_addr_string_fill(addr_string, n_ptr->addr);
228 pr_err("Attempt to establish third link to %s\n", addr_string);
232 if (n_ptr->links[b_ptr->identity]) {
233 tipc_addr_string_fill(addr_string, n_ptr->addr);
234 pr_err("Attempt to establish second link on <%s> to %s\n",
235 b_ptr->name, addr_string);
239 l_ptr = kzalloc(sizeof(*l_ptr), GFP_ATOMIC);
241 pr_warn("Link creation failed, no memory\n");
246 if_name = strchr(b_ptr->name, ':') + 1;
247 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
248 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
249 tipc_node(tipc_own_addr),
251 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
252 /* note: peer i/f name is updated by reset/activate message */
253 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
254 l_ptr->owner = n_ptr;
255 l_ptr->checkpoint = 1;
256 l_ptr->peer_session = INVALID_SESSION;
257 l_ptr->b_ptr = b_ptr;
258 link_set_supervision_props(l_ptr, b_ptr->tolerance);
259 l_ptr->state = RESET_UNKNOWN;
261 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
263 tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr);
264 msg_set_size(msg, sizeof(l_ptr->proto_msg));
265 msg_set_session(msg, (tipc_random & 0xffff));
266 msg_set_bearer_id(msg, b_ptr->identity);
267 strcpy((char *)msg_data(msg), if_name);
269 l_ptr->priority = b_ptr->priority;
270 tipc_link_set_queue_limits(l_ptr, b_ptr->window);
272 link_init_max_pkt(l_ptr);
274 l_ptr->next_out_no = 1;
275 INIT_LIST_HEAD(&l_ptr->waiting_ports);
277 link_reset_statistics(l_ptr);
279 tipc_node_attach_link(n_ptr, l_ptr);
281 k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
282 list_add_tail(&l_ptr->link_list, &b_ptr->links);
283 tipc_k_signal((Handler)link_start, (unsigned long)l_ptr);
289 * tipc_link_delete - delete a link
290 * @l_ptr: pointer to link
292 * Note: 'tipc_net_lock' is write_locked, bearer is locked.
293 * This routine must not grab the node lock until after link timer cancellation
294 * to avoid a potential deadlock situation.
296 void tipc_link_delete(struct tipc_link *l_ptr)
299 pr_err("Attempt to delete non-existent link\n");
303 k_cancel_timer(&l_ptr->timer);
305 tipc_node_lock(l_ptr->owner);
306 tipc_link_reset(l_ptr);
307 tipc_node_detach_link(l_ptr->owner, l_ptr);
308 tipc_link_stop(l_ptr);
309 list_del_init(&l_ptr->link_list);
310 tipc_node_unlock(l_ptr->owner);
311 k_term_timer(&l_ptr->timer);
315 static void link_start(struct tipc_link *l_ptr)
317 tipc_node_lock(l_ptr->owner);
318 link_state_event(l_ptr, STARTING_EVT);
319 tipc_node_unlock(l_ptr->owner);
323 * link_schedule_port - schedule port for deferred sending
324 * @l_ptr: pointer to link
325 * @origport: reference to sending port
326 * @sz: amount of data to be sent
328 * Schedules port for renewed sending of messages after link congestion
331 static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz)
333 struct tipc_port *p_ptr;
335 spin_lock_bh(&tipc_port_list_lock);
336 p_ptr = tipc_port_lock(origport);
340 if (!list_empty(&p_ptr->wait_list))
342 p_ptr->congested = 1;
343 p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
344 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
345 l_ptr->stats.link_congs++;
347 tipc_port_unlock(p_ptr);
349 spin_unlock_bh(&tipc_port_list_lock);
353 void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all)
355 struct tipc_port *p_ptr;
356 struct tipc_port *temp_p_ptr;
357 int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
363 if (!spin_trylock_bh(&tipc_port_list_lock))
365 if (link_congested(l_ptr))
367 list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports,
371 list_del_init(&p_ptr->wait_list);
372 spin_lock_bh(p_ptr->lock);
373 p_ptr->congested = 0;
374 p_ptr->wakeup(p_ptr);
375 win -= p_ptr->waiting_pkts;
376 spin_unlock_bh(p_ptr->lock);
380 spin_unlock_bh(&tipc_port_list_lock);
384 * link_release_outqueue - purge link's outbound message queue
385 * @l_ptr: pointer to link
387 static void link_release_outqueue(struct tipc_link *l_ptr)
389 struct sk_buff *buf = l_ptr->first_out;
390 struct sk_buff *next;
397 l_ptr->first_out = NULL;
398 l_ptr->out_queue_size = 0;
402 * tipc_link_reset_fragments - purge link's inbound message fragments queue
403 * @l_ptr: pointer to link
405 void tipc_link_reset_fragments(struct tipc_link *l_ptr)
407 struct sk_buff *buf = l_ptr->defragm_buf;
408 struct sk_buff *next;
415 l_ptr->defragm_buf = NULL;
419 * tipc_link_stop - purge all inbound and outbound messages associated with link
420 * @l_ptr: pointer to link
422 void tipc_link_stop(struct tipc_link *l_ptr)
425 struct sk_buff *next;
427 buf = l_ptr->oldest_deferred_in;
434 buf = l_ptr->first_out;
441 tipc_link_reset_fragments(l_ptr);
443 kfree_skb(l_ptr->proto_msg_queue);
444 l_ptr->proto_msg_queue = NULL;
447 void tipc_link_reset(struct tipc_link *l_ptr)
450 u32 prev_state = l_ptr->state;
451 u32 checkpoint = l_ptr->next_in_no;
452 int was_active_link = tipc_link_is_active(l_ptr);
454 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
456 /* Link is down, accept any session */
457 l_ptr->peer_session = INVALID_SESSION;
459 /* Prepare for max packet size negotiation */
460 link_init_max_pkt(l_ptr);
462 l_ptr->state = RESET_UNKNOWN;
464 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
467 tipc_node_link_down(l_ptr->owner, l_ptr);
468 tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
470 if (was_active_link && tipc_node_active_links(l_ptr->owner) &&
471 l_ptr->owner->permit_changeover) {
472 l_ptr->reset_checkpoint = checkpoint;
473 l_ptr->exp_msg_count = START_CHANGEOVER;
476 /* Clean up all queues: */
477 link_release_outqueue(l_ptr);
478 kfree_skb(l_ptr->proto_msg_queue);
479 l_ptr->proto_msg_queue = NULL;
480 buf = l_ptr->oldest_deferred_in;
482 struct sk_buff *next = buf->next;
486 if (!list_empty(&l_ptr->waiting_ports))
487 tipc_link_wakeup_ports(l_ptr, 1);
489 l_ptr->retransm_queue_head = 0;
490 l_ptr->retransm_queue_size = 0;
491 l_ptr->last_out = NULL;
492 l_ptr->first_out = NULL;
493 l_ptr->next_out = NULL;
494 l_ptr->unacked_window = 0;
495 l_ptr->checkpoint = 1;
496 l_ptr->next_out_no = 1;
497 l_ptr->deferred_inqueue_sz = 0;
498 l_ptr->oldest_deferred_in = NULL;
499 l_ptr->newest_deferred_in = NULL;
500 l_ptr->fsm_msg_cnt = 0;
501 l_ptr->stale_count = 0;
502 link_reset_statistics(l_ptr);
506 static void link_activate(struct tipc_link *l_ptr)
508 l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
509 tipc_node_link_up(l_ptr->owner, l_ptr);
510 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
514 * link_state_event - link finite state machine
515 * @l_ptr: pointer to link
516 * @event: state machine event to process
518 static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
520 struct tipc_link *other;
521 u32 cont_intv = l_ptr->continuity_interval;
523 if (!l_ptr->started && (event != STARTING_EVT))
524 return; /* Not yet. */
526 if (link_blocked(l_ptr)) {
527 if (event == TIMEOUT_EVT)
528 link_set_timer(l_ptr, cont_intv);
529 return; /* Changeover going on */
532 switch (l_ptr->state) {
533 case WORKING_WORKING:
535 case TRAFFIC_MSG_EVT:
539 if (l_ptr->next_in_no != l_ptr->checkpoint) {
540 l_ptr->checkpoint = l_ptr->next_in_no;
541 if (tipc_bclink_acks_missing(l_ptr->owner)) {
542 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
544 l_ptr->fsm_msg_cnt++;
545 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
546 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
548 l_ptr->fsm_msg_cnt++;
550 link_set_timer(l_ptr, cont_intv);
553 l_ptr->state = WORKING_UNKNOWN;
554 l_ptr->fsm_msg_cnt = 0;
555 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
556 l_ptr->fsm_msg_cnt++;
557 link_set_timer(l_ptr, cont_intv / 4);
560 pr_info("%s<%s>, requested by peer\n", link_rst_msg,
562 tipc_link_reset(l_ptr);
563 l_ptr->state = RESET_RESET;
564 l_ptr->fsm_msg_cnt = 0;
565 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
566 l_ptr->fsm_msg_cnt++;
567 link_set_timer(l_ptr, cont_intv);
570 pr_err("%s%u in WW state\n", link_unk_evt, event);
573 case WORKING_UNKNOWN:
575 case TRAFFIC_MSG_EVT:
577 l_ptr->state = WORKING_WORKING;
578 l_ptr->fsm_msg_cnt = 0;
579 link_set_timer(l_ptr, cont_intv);
582 pr_info("%s<%s>, requested by peer while probing\n",
583 link_rst_msg, l_ptr->name);
584 tipc_link_reset(l_ptr);
585 l_ptr->state = RESET_RESET;
586 l_ptr->fsm_msg_cnt = 0;
587 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
588 l_ptr->fsm_msg_cnt++;
589 link_set_timer(l_ptr, cont_intv);
592 if (l_ptr->next_in_no != l_ptr->checkpoint) {
593 l_ptr->state = WORKING_WORKING;
594 l_ptr->fsm_msg_cnt = 0;
595 l_ptr->checkpoint = l_ptr->next_in_no;
596 if (tipc_bclink_acks_missing(l_ptr->owner)) {
597 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
599 l_ptr->fsm_msg_cnt++;
601 link_set_timer(l_ptr, cont_intv);
602 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
603 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
605 l_ptr->fsm_msg_cnt++;
606 link_set_timer(l_ptr, cont_intv / 4);
607 } else { /* Link has failed */
608 pr_warn("%s<%s>, peer not responding\n",
609 link_rst_msg, l_ptr->name);
610 tipc_link_reset(l_ptr);
611 l_ptr->state = RESET_UNKNOWN;
612 l_ptr->fsm_msg_cnt = 0;
613 tipc_link_send_proto_msg(l_ptr, RESET_MSG,
615 l_ptr->fsm_msg_cnt++;
616 link_set_timer(l_ptr, cont_intv);
620 pr_err("%s%u in WU state\n", link_unk_evt, event);
625 case TRAFFIC_MSG_EVT:
628 other = l_ptr->owner->active_links[0];
629 if (other && link_working_unknown(other))
631 l_ptr->state = WORKING_WORKING;
632 l_ptr->fsm_msg_cnt = 0;
633 link_activate(l_ptr);
634 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
635 l_ptr->fsm_msg_cnt++;
636 if (l_ptr->owner->working_links == 1)
637 tipc_link_send_sync(l_ptr);
638 link_set_timer(l_ptr, cont_intv);
641 l_ptr->state = RESET_RESET;
642 l_ptr->fsm_msg_cnt = 0;
643 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
644 l_ptr->fsm_msg_cnt++;
645 link_set_timer(l_ptr, cont_intv);
651 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
652 l_ptr->fsm_msg_cnt++;
653 link_set_timer(l_ptr, cont_intv);
656 pr_err("%s%u in RU state\n", link_unk_evt, event);
661 case TRAFFIC_MSG_EVT:
663 other = l_ptr->owner->active_links[0];
664 if (other && link_working_unknown(other))
666 l_ptr->state = WORKING_WORKING;
667 l_ptr->fsm_msg_cnt = 0;
668 link_activate(l_ptr);
669 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
670 l_ptr->fsm_msg_cnt++;
671 if (l_ptr->owner->working_links == 1)
672 tipc_link_send_sync(l_ptr);
673 link_set_timer(l_ptr, cont_intv);
678 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
679 l_ptr->fsm_msg_cnt++;
680 link_set_timer(l_ptr, cont_intv);
683 pr_err("%s%u in RR state\n", link_unk_evt, event);
687 pr_err("Unknown link state %u/%u\n", l_ptr->state, event);
692 * link_bundle_buf(): Append contents of a buffer to
693 * the tail of an existing one.
695 static int link_bundle_buf(struct tipc_link *l_ptr, struct sk_buff *bundler,
698 struct tipc_msg *bundler_msg = buf_msg(bundler);
699 struct tipc_msg *msg = buf_msg(buf);
700 u32 size = msg_size(msg);
701 u32 bundle_size = msg_size(bundler_msg);
702 u32 to_pos = align(bundle_size);
703 u32 pad = to_pos - bundle_size;
705 if (msg_user(bundler_msg) != MSG_BUNDLER)
707 if (msg_type(bundler_msg) != OPEN_MSG)
709 if (skb_tailroom(bundler) < (pad + size))
711 if (l_ptr->max_pkt < (to_pos + size))
714 skb_put(bundler, pad + size);
715 skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size);
716 msg_set_size(bundler_msg, to_pos + size);
717 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
719 l_ptr->stats.sent_bundled++;
723 static void link_add_to_outqueue(struct tipc_link *l_ptr,
725 struct tipc_msg *msg)
727 u32 ack = mod(l_ptr->next_in_no - 1);
728 u32 seqno = mod(l_ptr->next_out_no++);
730 msg_set_word(msg, 2, ((ack << 16) | seqno));
731 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
733 if (l_ptr->first_out) {
734 l_ptr->last_out->next = buf;
735 l_ptr->last_out = buf;
737 l_ptr->first_out = l_ptr->last_out = buf;
739 l_ptr->out_queue_size++;
740 if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
741 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
744 static void link_add_chain_to_outqueue(struct tipc_link *l_ptr,
745 struct sk_buff *buf_chain,
749 struct tipc_msg *msg;
751 if (!l_ptr->next_out)
752 l_ptr->next_out = buf_chain;
755 buf_chain = buf_chain->next;
758 msg_set_long_msgno(msg, long_msgno);
759 link_add_to_outqueue(l_ptr, buf, msg);
764 * tipc_link_send_buf() is the 'full path' for messages, called from
765 * inside TIPC when the 'fast path' in tipc_send_buf
766 * has failed, and from link_send()
768 int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
770 struct tipc_msg *msg = buf_msg(buf);
771 u32 size = msg_size(msg);
772 u32 dsz = msg_data_sz(msg);
773 u32 queue_size = l_ptr->out_queue_size;
774 u32 imp = tipc_msg_tot_importance(msg);
775 u32 queue_limit = l_ptr->queue_limit[imp];
776 u32 max_packet = l_ptr->max_pkt;
778 /* Match msg importance against queue limits: */
779 if (unlikely(queue_size >= queue_limit)) {
780 if (imp <= TIPC_CRITICAL_IMPORTANCE) {
781 link_schedule_port(l_ptr, msg_origport(msg), size);
786 if (imp > CONN_MANAGER) {
787 pr_warn("%s<%s>, send queue full", link_rst_msg,
789 tipc_link_reset(l_ptr);
794 /* Fragmentation needed ? */
795 if (size > max_packet)
796 return link_send_long_buf(l_ptr, buf);
798 /* Packet can be queued or sent. */
799 if (likely(!tipc_bearer_blocked(l_ptr->b_ptr) &&
800 !link_congested(l_ptr))) {
801 link_add_to_outqueue(l_ptr, buf, msg);
803 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
804 l_ptr->unacked_window = 0;
807 /* Congestion: can message be bundled ? */
808 if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
809 (msg_user(msg) != MSG_FRAGMENTER)) {
811 /* Try adding message to an existing bundle */
812 if (l_ptr->next_out &&
813 link_bundle_buf(l_ptr, l_ptr->last_out, buf))
816 /* Try creating a new bundle */
817 if (size <= max_packet * 2 / 3) {
818 struct sk_buff *bundler = tipc_buf_acquire(max_packet);
819 struct tipc_msg bundler_hdr;
822 tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
823 INT_H_SIZE, l_ptr->addr);
824 skb_copy_to_linear_data(bundler, &bundler_hdr,
826 skb_trim(bundler, INT_H_SIZE);
827 link_bundle_buf(l_ptr, bundler, buf);
830 l_ptr->stats.sent_bundles++;
834 if (!l_ptr->next_out)
835 l_ptr->next_out = buf;
836 link_add_to_outqueue(l_ptr, buf, msg);
841 * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has
842 * not been selected yet, and the the owner node is not locked
843 * Called by TIPC internal users, e.g. the name distributor
845 int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
847 struct tipc_link *l_ptr;
848 struct tipc_node *n_ptr;
849 int res = -ELINKCONG;
851 read_lock_bh(&tipc_net_lock);
852 n_ptr = tipc_node_find(dest);
854 tipc_node_lock(n_ptr);
855 l_ptr = n_ptr->active_links[selector & 1];
857 res = tipc_link_send_buf(l_ptr, buf);
860 tipc_node_unlock(n_ptr);
864 read_unlock_bh(&tipc_net_lock);
869 * tipc_link_send_sync - synchronize broadcast link endpoints.
871 * Give a newly added peer node the sequence number where it should
872 * start receiving and acking broadcast packets.
874 * Called with node locked
876 static void tipc_link_send_sync(struct tipc_link *l)
879 struct tipc_msg *msg;
881 buf = tipc_buf_acquire(INT_H_SIZE);
886 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, l->addr);
887 msg_set_last_bcast(msg, l->owner->bclink.acked);
888 link_add_chain_to_outqueue(l, buf, 0);
889 tipc_link_push_queue(l);
893 * tipc_link_recv_sync - synchronize broadcast link endpoints.
894 * Receive the sequence number where we should start receiving and
895 * acking broadcast packets from a newly added peer node, and open
896 * up for reception of such packets.
898 * Called with node locked
900 static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf)
902 struct tipc_msg *msg = buf_msg(buf);
904 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
905 n->bclink.recv_permitted = true;
910 * tipc_link_send_names - send name table entries to new neighbor
912 * Send routine for bulk delivery of name table messages when contact
913 * with a new neighbor occurs. No link congestion checking is performed
914 * because name table messages *must* be delivered. The messages must be
915 * small enough not to require fragmentation.
916 * Called without any locks held.
918 void tipc_link_send_names(struct list_head *message_list, u32 dest)
920 struct tipc_node *n_ptr;
921 struct tipc_link *l_ptr;
923 struct sk_buff *temp_buf;
925 if (list_empty(message_list))
928 read_lock_bh(&tipc_net_lock);
929 n_ptr = tipc_node_find(dest);
931 tipc_node_lock(n_ptr);
932 l_ptr = n_ptr->active_links[0];
934 /* convert circular list to linear list */
935 ((struct sk_buff *)message_list->prev)->next = NULL;
936 link_add_chain_to_outqueue(l_ptr,
937 (struct sk_buff *)message_list->next, 0);
938 tipc_link_push_queue(l_ptr);
939 INIT_LIST_HEAD(message_list);
941 tipc_node_unlock(n_ptr);
943 read_unlock_bh(&tipc_net_lock);
945 /* discard the messages if they couldn't be sent */
946 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
947 list_del((struct list_head *)buf);
953 * link_send_buf_fast: Entry for data messages where the
954 * destination link is known and the header is complete,
955 * inclusive total message length. Very time critical.
956 * Link is locked. Returns user data length.
958 static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
961 struct tipc_msg *msg = buf_msg(buf);
962 int res = msg_data_sz(msg);
964 if (likely(!link_congested(l_ptr))) {
965 if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
966 if (likely(!tipc_bearer_blocked(l_ptr->b_ptr))) {
967 link_add_to_outqueue(l_ptr, buf, msg);
968 tipc_bearer_send(l_ptr->b_ptr, buf,
970 l_ptr->unacked_window = 0;
974 *used_max_pkt = l_ptr->max_pkt;
976 return tipc_link_send_buf(l_ptr, buf); /* All other cases */
980 * tipc_link_send_sections_fast: Entry for messages where the
981 * destination processor is known and the header is complete,
982 * except for total message length.
983 * Returns user data length or errno.
985 int tipc_link_send_sections_fast(struct tipc_port *sender,
986 struct iovec const *msg_sect,
987 unsigned int len, u32 destaddr)
989 struct tipc_msg *hdr = &sender->phdr;
990 struct tipc_link *l_ptr;
992 struct tipc_node *node;
994 u32 selector = msg_origport(hdr) & 1;
998 * Try building message using port's max_pkt hint.
999 * (Must not hold any locks while building message.)
1001 res = tipc_msg_build(hdr, msg_sect, len, sender->max_pkt, &buf);
1002 /* Exit if build request was invalid */
1003 if (unlikely(res < 0))
1006 read_lock_bh(&tipc_net_lock);
1007 node = tipc_node_find(destaddr);
1009 tipc_node_lock(node);
1010 l_ptr = node->active_links[selector];
1011 if (likely(l_ptr)) {
1013 res = link_send_buf_fast(l_ptr, buf,
1016 tipc_node_unlock(node);
1017 read_unlock_bh(&tipc_net_lock);
1021 /* Exit if link (or bearer) is congested */
1022 if (link_congested(l_ptr) ||
1023 tipc_bearer_blocked(l_ptr->b_ptr)) {
1024 res = link_schedule_port(l_ptr,
1030 * Message size exceeds max_pkt hint; update hint,
1031 * then re-try fast path or fragment the message
1033 sender->max_pkt = l_ptr->max_pkt;
1034 tipc_node_unlock(node);
1035 read_unlock_bh(&tipc_net_lock);
1038 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
1041 return link_send_sections_long(sender, msg_sect, len,
1044 tipc_node_unlock(node);
1046 read_unlock_bh(&tipc_net_lock);
1048 /* Couldn't find a link to the destination node */
1050 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1052 return tipc_port_reject_sections(sender, hdr, msg_sect,
1053 len, TIPC_ERR_NO_NODE);
1058 * link_send_sections_long(): Entry for long messages where the
1059 * destination node is known and the header is complete,
1060 * inclusive total message length.
1061 * Link and bearer congestion status have been checked to be ok,
1062 * and are ignored if they change.
1064 * Note that fragments do not use the full link MTU so that they won't have
1065 * to undergo refragmentation if link changeover causes them to be sent
1066 * over another link with an additional tunnel header added as prefix.
1067 * (Refragmentation will still occur if the other link has a smaller MTU.)
1069 * Returns user data length or errno.
1071 static int link_send_sections_long(struct tipc_port *sender,
1072 struct iovec const *msg_sect,
1073 unsigned int len, u32 destaddr)
1075 struct tipc_link *l_ptr;
1076 struct tipc_node *node;
1077 struct tipc_msg *hdr = &sender->phdr;
1079 u32 max_pkt, fragm_sz, rest;
1080 struct tipc_msg fragm_hdr;
1081 struct sk_buff *buf, *buf_chain, *prev;
1082 u32 fragm_crs, fragm_rest, hsz, sect_rest;
1083 const unchar __user *sect_crs;
1090 max_pkt = sender->max_pkt - INT_H_SIZE;
1091 /* leave room for tunnel header in case of link changeover */
1092 fragm_sz = max_pkt - INT_H_SIZE;
1093 /* leave room for fragmentation header in each fragment */
1101 /* Prepare reusable fragment header */
1102 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
1103 INT_H_SIZE, msg_destnode(hdr));
1104 msg_set_size(&fragm_hdr, max_pkt);
1105 msg_set_fragm_no(&fragm_hdr, 1);
1107 /* Prepare header of first fragment */
1108 buf_chain = buf = tipc_buf_acquire(max_pkt);
1112 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
1113 hsz = msg_hdr_sz(hdr);
1114 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz);
1116 /* Chop up message */
1117 fragm_crs = INT_H_SIZE + hsz;
1118 fragm_rest = fragm_sz - hsz;
1120 do { /* For all sections */
1124 sect_rest = msg_sect[++curr_sect].iov_len;
1125 sect_crs = msg_sect[curr_sect].iov_base;
1128 if (sect_rest < fragm_rest)
1133 if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
1136 for (; buf_chain; buf_chain = buf) {
1137 buf = buf_chain->next;
1138 kfree_skb(buf_chain);
1148 if (!fragm_rest && rest) {
1150 /* Initiate new fragment: */
1151 if (rest <= fragm_sz) {
1153 msg_set_type(&fragm_hdr, LAST_FRAGMENT);
1155 msg_set_type(&fragm_hdr, FRAGMENT);
1157 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
1158 msg_set_fragm_no(&fragm_hdr, ++fragm_no);
1160 buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
1168 skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
1169 fragm_crs = INT_H_SIZE;
1170 fragm_rest = fragm_sz;
1175 * Now we have a buffer chain. Select a link and check
1176 * that packet size is still OK
1178 node = tipc_node_find(destaddr);
1180 tipc_node_lock(node);
1181 l_ptr = node->active_links[sender->ref & 1];
1183 tipc_node_unlock(node);
1186 if (l_ptr->max_pkt < max_pkt) {
1187 sender->max_pkt = l_ptr->max_pkt;
1188 tipc_node_unlock(node);
1189 for (; buf_chain; buf_chain = buf) {
1190 buf = buf_chain->next;
1191 kfree_skb(buf_chain);
1197 for (; buf_chain; buf_chain = buf) {
1198 buf = buf_chain->next;
1199 kfree_skb(buf_chain);
1201 return tipc_port_reject_sections(sender, hdr, msg_sect,
1202 len, TIPC_ERR_NO_NODE);
1205 /* Append chain of fragments to send queue & send them */
1206 l_ptr->long_msg_seq_no++;
1207 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
1208 l_ptr->stats.sent_fragments += fragm_no;
1209 l_ptr->stats.sent_fragmented++;
1210 tipc_link_push_queue(l_ptr);
1211 tipc_node_unlock(node);
1216 * tipc_link_push_packet: Push one unsent packet to the media
1218 u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1220 struct sk_buff *buf = l_ptr->first_out;
1221 u32 r_q_size = l_ptr->retransm_queue_size;
1222 u32 r_q_head = l_ptr->retransm_queue_head;
1224 /* Step to position where retransmission failed, if any, */
1225 /* consider that buffers may have been released in meantime */
1226 if (r_q_size && buf) {
1227 u32 last = lesser(mod(r_q_head + r_q_size),
1228 link_last_sent(l_ptr));
1229 u32 first = buf_seqno(buf);
1231 while (buf && less(first, r_q_head)) {
1232 first = mod(first + 1);
1235 l_ptr->retransm_queue_head = r_q_head = first;
1236 l_ptr->retransm_queue_size = r_q_size = mod(last - first);
1239 /* Continue retransmission now, if there is anything: */
1240 if (r_q_size && buf) {
1241 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1242 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1243 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1244 l_ptr->retransm_queue_head = mod(++r_q_head);
1245 l_ptr->retransm_queue_size = --r_q_size;
1246 l_ptr->stats.retransmitted++;
1250 /* Send deferred protocol message, if any: */
1251 buf = l_ptr->proto_msg_queue;
1253 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1254 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1255 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1256 l_ptr->unacked_window = 0;
1258 l_ptr->proto_msg_queue = NULL;
1262 /* Send one deferred data message, if send window not full: */
1263 buf = l_ptr->next_out;
1265 struct tipc_msg *msg = buf_msg(buf);
1266 u32 next = msg_seqno(msg);
1267 u32 first = buf_seqno(l_ptr->first_out);
1269 if (mod(next - first) < l_ptr->queue_limit[0]) {
1270 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1271 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1272 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1273 if (msg_user(msg) == MSG_BUNDLER)
1274 msg_set_type(msg, CLOSED_MSG);
1275 l_ptr->next_out = buf->next;
1283 * push_queue(): push out the unsent messages of a link where
1284 * congestion has abated. Node is locked
1286 void tipc_link_push_queue(struct tipc_link *l_ptr)
1290 if (tipc_bearer_blocked(l_ptr->b_ptr))
1294 res = tipc_link_push_packet(l_ptr);
1298 static void link_reset_all(unsigned long addr)
1300 struct tipc_node *n_ptr;
1301 char addr_string[16];
1304 read_lock_bh(&tipc_net_lock);
1305 n_ptr = tipc_node_find((u32)addr);
1307 read_unlock_bh(&tipc_net_lock);
1308 return; /* node no longer exists */
1311 tipc_node_lock(n_ptr);
1313 pr_warn("Resetting all links to %s\n",
1314 tipc_addr_string_fill(addr_string, n_ptr->addr));
1316 for (i = 0; i < MAX_BEARERS; i++) {
1317 if (n_ptr->links[i]) {
1318 link_print(n_ptr->links[i], "Resetting link\n");
1319 tipc_link_reset(n_ptr->links[i]);
1323 tipc_node_unlock(n_ptr);
1324 read_unlock_bh(&tipc_net_lock);
1327 static void link_retransmit_failure(struct tipc_link *l_ptr,
1328 struct sk_buff *buf)
1330 struct tipc_msg *msg = buf_msg(buf);
1332 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
1335 /* Handle failure on standard link */
1336 link_print(l_ptr, "Resetting link\n");
1337 tipc_link_reset(l_ptr);
1340 /* Handle failure on broadcast link */
1341 struct tipc_node *n_ptr;
1342 char addr_string[16];
1344 pr_info("Msg seq number: %u, ", msg_seqno(msg));
1345 pr_cont("Outstanding acks: %lu\n",
1346 (unsigned long) TIPC_SKB_CB(buf)->handle);
1348 n_ptr = tipc_bclink_retransmit_to();
1349 tipc_node_lock(n_ptr);
1351 tipc_addr_string_fill(addr_string, n_ptr->addr);
1352 pr_info("Broadcast link info for %s\n", addr_string);
1353 pr_info("Reception permitted: %d, Acked: %u\n",
1354 n_ptr->bclink.recv_permitted,
1355 n_ptr->bclink.acked);
1356 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
1357 n_ptr->bclink.last_in,
1358 n_ptr->bclink.oos_state,
1359 n_ptr->bclink.last_sent);
1361 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
1363 tipc_node_unlock(n_ptr);
1365 l_ptr->stale_count = 0;
1369 void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
1372 struct tipc_msg *msg;
1379 if (tipc_bearer_blocked(l_ptr->b_ptr)) {
1380 if (l_ptr->retransm_queue_size == 0) {
1381 l_ptr->retransm_queue_head = msg_seqno(msg);
1382 l_ptr->retransm_queue_size = retransmits;
1384 pr_err("Unexpected retransmit on link %s (qsize=%d)\n",
1385 l_ptr->name, l_ptr->retransm_queue_size);
1389 /* Detect repeated retransmit failures on unblocked bearer */
1390 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1391 if (++l_ptr->stale_count > 100) {
1392 link_retransmit_failure(l_ptr, buf);
1396 l_ptr->last_retransmitted = msg_seqno(msg);
1397 l_ptr->stale_count = 1;
1401 while (retransmits && (buf != l_ptr->next_out) && buf) {
1403 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1404 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1405 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1408 l_ptr->stats.retransmitted++;
1411 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1415 * link_insert_deferred_queue - insert deferred messages back into receive chain
1417 static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
1418 struct sk_buff *buf)
1422 if (l_ptr->oldest_deferred_in == NULL)
1425 seq_no = buf_seqno(l_ptr->oldest_deferred_in);
1426 if (seq_no == mod(l_ptr->next_in_no)) {
1427 l_ptr->newest_deferred_in->next = buf;
1428 buf = l_ptr->oldest_deferred_in;
1429 l_ptr->oldest_deferred_in = NULL;
1430 l_ptr->deferred_inqueue_sz = 0;
1436 * link_recv_buf_validate - validate basic format of received message
1438 * This routine ensures a TIPC message has an acceptable header, and at least
1439 * as much data as the header indicates it should. The routine also ensures
1440 * that the entire message header is stored in the main fragment of the message
1441 * buffer, to simplify future access to message header fields.
1443 * Note: Having extra info present in the message header or data areas is OK.
1444 * TIPC will ignore the excess, under the assumption that it is optional info
1445 * introduced by a later release of the protocol.
1447 static int link_recv_buf_validate(struct sk_buff *buf)
1449 static u32 min_data_hdr_size[8] = {
1450 SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE,
1451 MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE
1454 struct tipc_msg *msg;
1460 if (unlikely(buf->len < MIN_H_SIZE))
1463 msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr);
1467 if (unlikely(msg_version(msg) != TIPC_VERSION))
1470 size = msg_size(msg);
1471 hdr_size = msg_hdr_sz(msg);
1472 min_hdr_size = msg_isdata(msg) ?
1473 min_data_hdr_size[msg_type(msg)] : INT_H_SIZE;
1475 if (unlikely((hdr_size < min_hdr_size) ||
1476 (size < hdr_size) ||
1477 (buf->len < size) ||
1478 (size - hdr_size > TIPC_MAX_USER_MSG_SIZE)))
1481 return pskb_may_pull(buf, hdr_size);
1485 * tipc_recv_msg - process TIPC messages arriving from off-node
1486 * @head: pointer to message buffer chain
1487 * @tb_ptr: pointer to bearer message arrived on
1489 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1490 * structure (i.e. cannot be NULL), but bearer can be inactive.
1492 void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *b_ptr)
1494 read_lock_bh(&tipc_net_lock);
1496 struct tipc_node *n_ptr;
1497 struct tipc_link *l_ptr;
1498 struct sk_buff *crs;
1499 struct sk_buff *buf = head;
1500 struct tipc_msg *msg;
1508 /* Ensure bearer is still enabled */
1509 if (unlikely(!b_ptr->active))
1512 /* Ensure message is well-formed */
1513 if (unlikely(!link_recv_buf_validate(buf)))
1516 /* Ensure message data is a single contiguous unit */
1517 if (unlikely(skb_linearize(buf)))
1520 /* Handle arrival of a non-unicast link message */
1523 if (unlikely(msg_non_seq(msg))) {
1524 if (msg_user(msg) == LINK_CONFIG)
1525 tipc_disc_recv_msg(buf, b_ptr);
1527 tipc_bclink_recv_pkt(buf);
1531 /* Discard unicast link messages destined for another node */
1532 if (unlikely(!msg_short(msg) &&
1533 (msg_destnode(msg) != tipc_own_addr)))
1536 /* Locate neighboring node that sent message */
1537 n_ptr = tipc_node_find(msg_prevnode(msg));
1538 if (unlikely(!n_ptr))
1540 tipc_node_lock(n_ptr);
1542 /* Locate unicast link endpoint that should handle message */
1543 l_ptr = n_ptr->links[b_ptr->identity];
1544 if (unlikely(!l_ptr)) {
1545 tipc_node_unlock(n_ptr);
1549 /* Verify that communication with node is currently allowed */
1550 if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
1551 msg_user(msg) == LINK_PROTOCOL &&
1552 (msg_type(msg) == RESET_MSG ||
1553 msg_type(msg) == ACTIVATE_MSG) &&
1554 !msg_redundant_link(msg))
1555 n_ptr->block_setup &= ~WAIT_PEER_DOWN;
1557 if (n_ptr->block_setup) {
1558 tipc_node_unlock(n_ptr);
1562 /* Validate message sequence number info */
1563 seq_no = msg_seqno(msg);
1564 ackd = msg_ack(msg);
1566 /* Release acked messages */
1567 if (n_ptr->bclink.recv_permitted)
1568 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1570 crs = l_ptr->first_out;
1571 while ((crs != l_ptr->next_out) &&
1572 less_eq(buf_seqno(crs), ackd)) {
1573 struct sk_buff *next = crs->next;
1580 l_ptr->first_out = crs;
1581 l_ptr->out_queue_size -= released;
1584 /* Try sending any messages link endpoint has pending */
1585 if (unlikely(l_ptr->next_out))
1586 tipc_link_push_queue(l_ptr);
1587 if (unlikely(!list_empty(&l_ptr->waiting_ports)))
1588 tipc_link_wakeup_ports(l_ptr, 0);
1589 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1590 l_ptr->stats.sent_acks++;
1591 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1594 /* Now (finally!) process the incoming message */
1596 if (likely(link_working_working(l_ptr))) {
1597 if (likely(seq_no == mod(l_ptr->next_in_no))) {
1598 l_ptr->next_in_no++;
1599 if (unlikely(l_ptr->oldest_deferred_in))
1600 head = link_insert_deferred_queue(l_ptr,
1603 if (likely(msg_isdata(msg))) {
1604 tipc_node_unlock(n_ptr);
1605 tipc_port_recv_msg(buf);
1608 switch (msg_user(msg)) {
1611 l_ptr->stats.recv_bundles++;
1612 l_ptr->stats.recv_bundled +=
1614 tipc_node_unlock(n_ptr);
1615 tipc_link_recv_bundle(buf);
1617 case NAME_DISTRIBUTOR:
1618 n_ptr->bclink.recv_permitted = true;
1619 tipc_node_unlock(n_ptr);
1620 tipc_named_recv(buf);
1622 case BCAST_PROTOCOL:
1623 tipc_link_recv_sync(n_ptr, buf);
1624 tipc_node_unlock(n_ptr);
1627 tipc_node_unlock(n_ptr);
1628 tipc_port_recv_proto_msg(buf);
1630 case MSG_FRAGMENTER:
1631 l_ptr->stats.recv_fragments++;
1632 ret = tipc_link_recv_fragment(
1633 &l_ptr->defragm_buf,
1636 l_ptr->stats.recv_fragmented++;
1640 l_ptr->next_in_no--;
1642 case CHANGEOVER_PROTOCOL:
1643 type = msg_type(msg);
1644 if (link_recv_changeover_msg(&l_ptr,
1647 seq_no = msg_seqno(msg);
1648 if (type == ORIGINAL_MSG)
1650 goto protocol_check;
1658 tipc_node_unlock(n_ptr);
1659 tipc_net_route_msg(buf);
1662 link_handle_out_of_seq_msg(l_ptr, buf);
1663 head = link_insert_deferred_queue(l_ptr, head);
1664 tipc_node_unlock(n_ptr);
1668 /* Link is not in state WORKING_WORKING */
1669 if (msg_user(msg) == LINK_PROTOCOL) {
1670 link_recv_proto_msg(l_ptr, buf);
1671 head = link_insert_deferred_queue(l_ptr, head);
1672 tipc_node_unlock(n_ptr);
1676 /* Traffic message. Conditionally activate link */
1677 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1679 if (link_working_working(l_ptr)) {
1680 /* Re-insert buffer in front of queue */
1683 tipc_node_unlock(n_ptr);
1686 tipc_node_unlock(n_ptr);
1690 read_unlock_bh(&tipc_net_lock);
1694 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1696 * Returns increase in queue length (i.e. 0 or 1)
1698 u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
1699 struct sk_buff *buf)
1701 struct sk_buff *queue_buf;
1702 struct sk_buff **prev;
1703 u32 seq_no = buf_seqno(buf);
1708 if (*head == NULL) {
1709 *head = *tail = buf;
1714 if (less(buf_seqno(*tail), seq_no)) {
1715 (*tail)->next = buf;
1720 /* Locate insertion point in queue, then insert; discard if duplicate */
1724 u32 curr_seqno = buf_seqno(queue_buf);
1726 if (seq_no == curr_seqno) {
1731 if (less(seq_no, curr_seqno))
1734 prev = &queue_buf->next;
1735 queue_buf = queue_buf->next;
1738 buf->next = queue_buf;
1744 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1746 static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1747 struct sk_buff *buf)
1749 u32 seq_no = buf_seqno(buf);
1751 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1752 link_recv_proto_msg(l_ptr, buf);
1756 /* Record OOS packet arrival (force mismatch on next timeout) */
1757 l_ptr->checkpoint--;
1760 * Discard packet if a duplicate; otherwise add it to deferred queue
1761 * and notify peer of gap as per protocol specification
1763 if (less(seq_no, mod(l_ptr->next_in_no))) {
1764 l_ptr->stats.duplicates++;
1769 if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
1770 &l_ptr->newest_deferred_in, buf)) {
1771 l_ptr->deferred_inqueue_sz++;
1772 l_ptr->stats.deferred_recv++;
1773 if ((l_ptr->deferred_inqueue_sz % 16) == 1)
1774 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1776 l_ptr->stats.duplicates++;
1780 * Send protocol message to the other endpoint.
1782 void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
1783 int probe_msg, u32 gap, u32 tolerance,
1784 u32 priority, u32 ack_mtu)
1786 struct sk_buff *buf = NULL;
1787 struct tipc_msg *msg = l_ptr->pmsg;
1788 u32 msg_size = sizeof(l_ptr->proto_msg);
1791 /* Discard any previous message that was deferred due to congestion */
1792 if (l_ptr->proto_msg_queue) {
1793 kfree_skb(l_ptr->proto_msg_queue);
1794 l_ptr->proto_msg_queue = NULL;
1797 if (link_blocked(l_ptr))
1800 /* Abort non-RESET send if communication with node is prohibited */
1801 if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG))
1804 /* Create protocol message with "out-of-sequence" sequence number */
1805 msg_set_type(msg, msg_typ);
1806 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
1807 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1808 msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
1810 if (msg_typ == STATE_MSG) {
1811 u32 next_sent = mod(l_ptr->next_out_no);
1813 if (!tipc_link_is_up(l_ptr))
1815 if (l_ptr->next_out)
1816 next_sent = buf_seqno(l_ptr->next_out);
1817 msg_set_next_sent(msg, next_sent);
1818 if (l_ptr->oldest_deferred_in) {
1819 u32 rec = buf_seqno(l_ptr->oldest_deferred_in);
1820 gap = mod(rec - mod(l_ptr->next_in_no));
1822 msg_set_seq_gap(msg, gap);
1824 l_ptr->stats.sent_nacks++;
1825 msg_set_link_tolerance(msg, tolerance);
1826 msg_set_linkprio(msg, priority);
1827 msg_set_max_pkt(msg, ack_mtu);
1828 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1829 msg_set_probe(msg, probe_msg != 0);
1831 u32 mtu = l_ptr->max_pkt;
1833 if ((mtu < l_ptr->max_pkt_target) &&
1834 link_working_working(l_ptr) &&
1835 l_ptr->fsm_msg_cnt) {
1836 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1837 if (l_ptr->max_pkt_probes == 10) {
1838 l_ptr->max_pkt_target = (msg_size - 4);
1839 l_ptr->max_pkt_probes = 0;
1840 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1842 l_ptr->max_pkt_probes++;
1845 l_ptr->stats.sent_probes++;
1847 l_ptr->stats.sent_states++;
1848 } else { /* RESET_MSG or ACTIVATE_MSG */
1849 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
1850 msg_set_seq_gap(msg, 0);
1851 msg_set_next_sent(msg, 1);
1852 msg_set_probe(msg, 0);
1853 msg_set_link_tolerance(msg, l_ptr->tolerance);
1854 msg_set_linkprio(msg, l_ptr->priority);
1855 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
1858 r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr));
1859 msg_set_redundant_link(msg, r_flag);
1860 msg_set_linkprio(msg, l_ptr->priority);
1861 msg_set_size(msg, msg_size);
1863 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
1865 buf = tipc_buf_acquire(msg_size);
1869 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1870 buf->priority = TC_PRIO_CONTROL;
1872 /* Defer message if bearer is already blocked */
1873 if (tipc_bearer_blocked(l_ptr->b_ptr)) {
1874 l_ptr->proto_msg_queue = buf;
1878 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1879 l_ptr->unacked_window = 0;
1884 * Receive protocol message :
1885 * Note that network plane id propagates through the network, and may
1886 * change at any time. The node with lowest address rules
1888 static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
1894 struct tipc_msg *msg = buf_msg(buf);
1896 if (link_blocked(l_ptr))
1899 /* record unnumbered packet arrival (force mismatch on next timeout) */
1900 l_ptr->checkpoint--;
1902 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
1903 if (tipc_own_addr > msg_prevnode(msg))
1904 l_ptr->b_ptr->net_plane = msg_net_plane(msg);
1906 l_ptr->owner->permit_changeover = msg_redundant_link(msg);
1908 switch (msg_type(msg)) {
1911 if (!link_working_unknown(l_ptr) &&
1912 (l_ptr->peer_session != INVALID_SESSION)) {
1913 if (less_eq(msg_session(msg), l_ptr->peer_session))
1914 break; /* duplicate or old reset: ignore */
1917 if (!msg_redundant_link(msg) && (link_working_working(l_ptr) ||
1918 link_working_unknown(l_ptr))) {
1920 * peer has lost contact -- don't allow peer's links
1921 * to reactivate before we recognize loss & clean up
1923 l_ptr->owner->block_setup = WAIT_NODE_DOWN;
1926 link_state_event(l_ptr, RESET_MSG);
1930 /* Update link settings according other endpoint's values */
1931 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
1933 msg_tol = msg_link_tolerance(msg);
1934 if (msg_tol > l_ptr->tolerance)
1935 link_set_supervision_props(l_ptr, msg_tol);
1937 if (msg_linkprio(msg) > l_ptr->priority)
1938 l_ptr->priority = msg_linkprio(msg);
1940 max_pkt_info = msg_max_pkt(msg);
1942 if (max_pkt_info < l_ptr->max_pkt_target)
1943 l_ptr->max_pkt_target = max_pkt_info;
1944 if (l_ptr->max_pkt > l_ptr->max_pkt_target)
1945 l_ptr->max_pkt = l_ptr->max_pkt_target;
1947 l_ptr->max_pkt = l_ptr->max_pkt_target;
1950 /* Synchronize broadcast link info, if not done previously */
1951 if (!tipc_node_is_up(l_ptr->owner)) {
1952 l_ptr->owner->bclink.last_sent =
1953 l_ptr->owner->bclink.last_in =
1954 msg_last_bcast(msg);
1955 l_ptr->owner->bclink.oos_state = 0;
1958 l_ptr->peer_session = msg_session(msg);
1959 l_ptr->peer_bearer_id = msg_bearer_id(msg);
1961 if (msg_type(msg) == ACTIVATE_MSG)
1962 link_state_event(l_ptr, ACTIVATE_MSG);
1966 msg_tol = msg_link_tolerance(msg);
1968 link_set_supervision_props(l_ptr, msg_tol);
1970 if (msg_linkprio(msg) &&
1971 (msg_linkprio(msg) != l_ptr->priority)) {
1972 pr_warn("%s<%s>, priority change %u->%u\n",
1973 link_rst_msg, l_ptr->name, l_ptr->priority,
1975 l_ptr->priority = msg_linkprio(msg);
1976 tipc_link_reset(l_ptr); /* Enforce change to take effect */
1979 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1980 l_ptr->stats.recv_states++;
1981 if (link_reset_unknown(l_ptr))
1984 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
1985 rec_gap = mod(msg_next_sent(msg) -
1986 mod(l_ptr->next_in_no));
1989 max_pkt_ack = msg_max_pkt(msg);
1990 if (max_pkt_ack > l_ptr->max_pkt) {
1991 l_ptr->max_pkt = max_pkt_ack;
1992 l_ptr->max_pkt_probes = 0;
1996 if (msg_probe(msg)) {
1997 l_ptr->stats.recv_probes++;
1998 if (msg_size(msg) > sizeof(l_ptr->proto_msg))
1999 max_pkt_ack = msg_size(msg);
2002 /* Protocol message before retransmits, reduce loss risk */
2003 if (l_ptr->owner->bclink.recv_permitted)
2004 tipc_bclink_update_link_state(l_ptr->owner,
2005 msg_last_bcast(msg));
2007 if (rec_gap || (msg_probe(msg))) {
2008 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2009 0, rec_gap, 0, 0, max_pkt_ack);
2011 if (msg_seq_gap(msg)) {
2012 l_ptr->stats.recv_nacks++;
2013 tipc_link_retransmit(l_ptr, l_ptr->first_out,
2024 * tipc_link_tunnel(): Send one message via a link belonging to
2025 * another bearer. Owner node is locked.
2027 static void tipc_link_tunnel(struct tipc_link *l_ptr,
2028 struct tipc_msg *tunnel_hdr, struct tipc_msg *msg,
2031 struct tipc_link *tunnel;
2032 struct sk_buff *buf;
2033 u32 length = msg_size(msg);
2035 tunnel = l_ptr->owner->active_links[selector & 1];
2036 if (!tipc_link_is_up(tunnel)) {
2037 pr_warn("%stunnel link no longer available\n", link_co_err);
2040 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
2041 buf = tipc_buf_acquire(length + INT_H_SIZE);
2043 pr_warn("%sunable to send tunnel msg\n", link_co_err);
2046 skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
2047 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
2048 tipc_link_send_buf(tunnel, buf);
2054 * changeover(): Send whole message queue via the remaining link
2055 * Owner node is locked.
2057 void tipc_link_changeover(struct tipc_link *l_ptr)
2059 u32 msgcount = l_ptr->out_queue_size;
2060 struct sk_buff *crs = l_ptr->first_out;
2061 struct tipc_link *tunnel = l_ptr->owner->active_links[0];
2062 struct tipc_msg tunnel_hdr;
2068 if (!l_ptr->owner->permit_changeover) {
2069 pr_warn("%speer did not permit changeover\n", link_co_err);
2073 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2074 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
2075 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2076 msg_set_msgcnt(&tunnel_hdr, msgcount);
2078 if (!l_ptr->first_out) {
2079 struct sk_buff *buf;
2081 buf = tipc_buf_acquire(INT_H_SIZE);
2083 skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
2084 msg_set_size(&tunnel_hdr, INT_H_SIZE);
2085 tipc_link_send_buf(tunnel, buf);
2087 pr_warn("%sunable to send changeover msg\n",
2093 split_bundles = (l_ptr->owner->active_links[0] !=
2094 l_ptr->owner->active_links[1]);
2097 struct tipc_msg *msg = buf_msg(crs);
2099 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
2100 struct tipc_msg *m = msg_get_wrapped(msg);
2101 unchar *pos = (unchar *)m;
2103 msgcount = msg_msgcnt(msg);
2104 while (msgcount--) {
2105 msg_set_seqno(m, msg_seqno(msg));
2106 tipc_link_tunnel(l_ptr, &tunnel_hdr, m,
2107 msg_link_selector(m));
2108 pos += align(msg_size(m));
2109 m = (struct tipc_msg *)pos;
2112 tipc_link_tunnel(l_ptr, &tunnel_hdr, msg,
2113 msg_link_selector(msg));
2119 void tipc_link_send_duplicate(struct tipc_link *l_ptr, struct tipc_link *tunnel)
2121 struct sk_buff *iter;
2122 struct tipc_msg tunnel_hdr;
2124 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2125 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
2126 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
2127 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2128 iter = l_ptr->first_out;
2130 struct sk_buff *outbuf;
2131 struct tipc_msg *msg = buf_msg(iter);
2132 u32 length = msg_size(msg);
2134 if (msg_user(msg) == MSG_BUNDLER)
2135 msg_set_type(msg, CLOSED_MSG);
2136 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */
2137 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
2138 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
2139 outbuf = tipc_buf_acquire(length + INT_H_SIZE);
2140 if (outbuf == NULL) {
2141 pr_warn("%sunable to send duplicate msg\n",
2145 skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
2146 skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
2148 tipc_link_send_buf(tunnel, outbuf);
2149 if (!tipc_link_is_up(l_ptr))
2156 * buf_extract - extracts embedded TIPC message from another message
2157 * @skb: encapsulating message buffer
2158 * @from_pos: offset to extract from
2160 * Returns a new message buffer containing an embedded message. The
2161 * encapsulating message itself is left unchanged.
2163 static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2165 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
2166 u32 size = msg_size(msg);
2169 eb = tipc_buf_acquire(size);
2171 skb_copy_to_linear_data(eb, msg, size);
2176 * link_recv_changeover_msg(): Receive tunneled packet sent
2177 * via other link. Node is locked. Return extracted buffer.
2179 static int link_recv_changeover_msg(struct tipc_link **l_ptr,
2180 struct sk_buff **buf)
2182 struct sk_buff *tunnel_buf = *buf;
2183 struct tipc_link *dest_link;
2184 struct tipc_msg *msg;
2185 struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
2186 u32 msg_typ = msg_type(tunnel_msg);
2187 u32 msg_count = msg_msgcnt(tunnel_msg);
2188 u32 bearer_id = msg_bearer_id(tunnel_msg);
2190 if (bearer_id >= MAX_BEARERS)
2192 dest_link = (*l_ptr)->owner->links[bearer_id];
2195 if (dest_link == *l_ptr) {
2196 pr_err("Unexpected changeover message on link <%s>\n",
2201 msg = msg_get_wrapped(tunnel_msg);
2203 if (msg_typ == DUPLICATE_MSG) {
2204 if (less(msg_seqno(msg), mod(dest_link->next_in_no)))
2206 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2208 pr_warn("%sduplicate msg dropped\n", link_co_err);
2211 kfree_skb(tunnel_buf);
2215 /* First original message ?: */
2216 if (tipc_link_is_up(dest_link)) {
2217 pr_info("%s<%s>, changeover initiated by peer\n", link_rst_msg,
2219 tipc_link_reset(dest_link);
2220 dest_link->exp_msg_count = msg_count;
2223 } else if (dest_link->exp_msg_count == START_CHANGEOVER) {
2224 dest_link->exp_msg_count = msg_count;
2229 /* Receive original message */
2230 if (dest_link->exp_msg_count == 0) {
2231 pr_warn("%sgot too many tunnelled messages\n", link_co_err);
2234 dest_link->exp_msg_count--;
2235 if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
2238 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2240 kfree_skb(tunnel_buf);
2243 pr_warn("%soriginal msg dropped\n", link_co_err);
2248 kfree_skb(tunnel_buf);
2253 * Bundler functionality:
2255 void tipc_link_recv_bundle(struct sk_buff *buf)
2257 u32 msgcount = msg_msgcnt(buf_msg(buf));
2258 u32 pos = INT_H_SIZE;
2259 struct sk_buff *obuf;
2261 while (msgcount--) {
2262 obuf = buf_extract(buf, pos);
2264 pr_warn("Link unable to unbundle message(s)\n");
2267 pos += align(msg_size(buf_msg(obuf)));
2268 tipc_net_route_msg(obuf);
2274 * Fragmentation/defragmentation:
2278 * link_send_long_buf: Entry for buffers needing fragmentation.
2279 * The buffer is complete, inclusive total message length.
2280 * Returns user data length.
2282 static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
2284 struct sk_buff *buf_chain = NULL;
2285 struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
2286 struct tipc_msg *inmsg = buf_msg(buf);
2287 struct tipc_msg fragm_hdr;
2288 u32 insize = msg_size(inmsg);
2289 u32 dsz = msg_data_sz(inmsg);
2290 unchar *crs = buf->data;
2292 u32 pack_sz = l_ptr->max_pkt;
2293 u32 fragm_sz = pack_sz - INT_H_SIZE;
2297 if (msg_short(inmsg))
2298 destaddr = l_ptr->addr;
2300 destaddr = msg_destnode(inmsg);
2302 /* Prepare reusable fragment header: */
2303 tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
2304 INT_H_SIZE, destaddr);
2306 /* Chop up message: */
2308 struct sk_buff *fragm;
2310 if (rest <= fragm_sz) {
2312 msg_set_type(&fragm_hdr, LAST_FRAGMENT);
2314 fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
2315 if (fragm == NULL) {
2319 buf_chain = buf_chain->next;
2324 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
2326 msg_set_fragm_no(&fragm_hdr, fragm_no);
2327 skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE);
2328 skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs,
2330 buf_chain_tail->next = fragm;
2331 buf_chain_tail = fragm;
2335 msg_set_type(&fragm_hdr, FRAGMENT);
2339 /* Append chain of fragments to send queue & send them */
2340 l_ptr->long_msg_seq_no++;
2341 link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
2342 l_ptr->stats.sent_fragments += fragm_no;
2343 l_ptr->stats.sent_fragmented++;
2344 tipc_link_push_queue(l_ptr);
2350 * A pending message being re-assembled must store certain values
2351 * to handle subsequent fragments correctly. The following functions
2352 * help storing these values in unused, available fields in the
2353 * pending message. This makes dynamic memory allocation unnecessary.
2355 static void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
2357 msg_set_seqno(buf_msg(buf), seqno);
2360 static u32 get_fragm_size(struct sk_buff *buf)
2362 return msg_ack(buf_msg(buf));
2365 static void set_fragm_size(struct sk_buff *buf, u32 sz)
2367 msg_set_ack(buf_msg(buf), sz);
2370 static u32 get_expected_frags(struct sk_buff *buf)
2372 return msg_bcast_ack(buf_msg(buf));
2375 static void set_expected_frags(struct sk_buff *buf, u32 exp)
2377 msg_set_bcast_ack(buf_msg(buf), exp);
2381 * tipc_link_recv_fragment(): Called with node lock on. Returns
2382 * the reassembled buffer if message is complete.
2384 int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2385 struct tipc_msg **m)
2387 struct sk_buff *prev = NULL;
2388 struct sk_buff *fbuf = *fb;
2389 struct tipc_msg *fragm = buf_msg(fbuf);
2390 struct sk_buff *pbuf = *pending;
2391 u32 long_msg_seq_no = msg_long_msgno(fragm);
2395 /* Is there an incomplete message waiting for this fragment? */
2396 while (pbuf && ((buf_seqno(pbuf) != long_msg_seq_no) ||
2397 (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
2402 if (!pbuf && (msg_type(fragm) == FIRST_FRAGMENT)) {
2403 struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
2404 u32 msg_sz = msg_size(imsg);
2405 u32 fragm_sz = msg_data_sz(fragm);
2407 u32 max = TIPC_MAX_USER_MSG_SIZE + NAMED_H_SIZE;
2409 if (msg_type(imsg) == TIPC_MCAST_MSG)
2410 max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
2411 if (fragm_sz == 0 || msg_size(imsg) > max) {
2415 exp_fragm_cnt = msg_sz / fragm_sz + !!(msg_sz % fragm_sz);
2416 pbuf = tipc_buf_acquire(msg_size(imsg));
2418 pbuf->next = *pending;
2420 skb_copy_to_linear_data(pbuf, imsg,
2421 msg_data_sz(fragm));
2422 /* Prepare buffer for subsequent fragments. */
2423 set_long_msg_seqno(pbuf, long_msg_seq_no);
2424 set_fragm_size(pbuf, fragm_sz);
2425 set_expected_frags(pbuf, exp_fragm_cnt - 1);
2427 pr_debug("Link unable to reassemble fragmented message\n");
2433 } else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) {
2434 u32 dsz = msg_data_sz(fragm);
2435 u32 fsz = get_fragm_size(pbuf);
2436 u32 crs = ((msg_fragm_no(fragm) - 1) * fsz);
2437 u32 exp_frags = get_expected_frags(pbuf) - 1;
2438 skb_copy_to_linear_data_offset(pbuf, crs,
2439 msg_data(fragm), dsz);
2442 /* Is message complete? */
2443 if (exp_frags == 0) {
2445 prev->next = pbuf->next;
2447 *pending = pbuf->next;
2448 msg_reset_reroute_cnt(buf_msg(pbuf));
2453 set_expected_frags(pbuf, exp_frags);
2460 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
2462 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
2465 l_ptr->tolerance = tolerance;
2466 l_ptr->continuity_interval =
2467 ((tolerance / 4) > 500) ? 500 : tolerance / 4;
2468 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
2471 void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
2473 /* Data messages from this node, inclusive FIRST_FRAGM */
2474 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window;
2475 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4;
2476 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5;
2477 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6;
2478 /* Transiting data messages,inclusive FIRST_FRAGM */
2479 l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300;
2480 l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600;
2481 l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900;
2482 l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200;
2483 l_ptr->queue_limit[CONN_MANAGER] = 1200;
2484 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
2485 l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
2486 /* FRAGMENT and LAST_FRAGMENT packets */
2487 l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
2491 * link_find_link - locate link by name
2492 * @name: ptr to link name string
2493 * @node: ptr to area to be filled with ptr to associated node
2495 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
2496 * this also prevents link deletion.
2498 * Returns pointer to link (or 0 if invalid link name).
2500 static struct tipc_link *link_find_link(const char *name,
2501 struct tipc_node **node)
2503 struct tipc_link *l_ptr;
2504 struct tipc_node *n_ptr;
2507 list_for_each_entry(n_ptr, &tipc_node_list, list) {
2508 for (i = 0; i < MAX_BEARERS; i++) {
2509 l_ptr = n_ptr->links[i];
2510 if (l_ptr && !strcmp(l_ptr->name, name))
2522 * link_value_is_valid -- validate proposed link tolerance/priority/window
2524 * @cmd: value type (TIPC_CMD_SET_LINK_*)
2525 * @new_value: the new value
2527 * Returns 1 if value is within range, 0 if not.
2529 static int link_value_is_valid(u16 cmd, u32 new_value)
2532 case TIPC_CMD_SET_LINK_TOL:
2533 return (new_value >= TIPC_MIN_LINK_TOL) &&
2534 (new_value <= TIPC_MAX_LINK_TOL);
2535 case TIPC_CMD_SET_LINK_PRI:
2536 return (new_value <= TIPC_MAX_LINK_PRI);
2537 case TIPC_CMD_SET_LINK_WINDOW:
2538 return (new_value >= TIPC_MIN_LINK_WIN) &&
2539 (new_value <= TIPC_MAX_LINK_WIN);
2545 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
2546 * @name: ptr to link, bearer, or media name
2547 * @new_value: new value of link, bearer, or media setting
2548 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
2550 * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted.
2552 * Returns 0 if value updated and negative value on error.
2554 static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
2556 struct tipc_node *node;
2557 struct tipc_link *l_ptr;
2558 struct tipc_bearer *b_ptr;
2559 struct tipc_media *m_ptr;
2562 l_ptr = link_find_link(name, &node);
2565 * acquire node lock for tipc_link_send_proto_msg().
2566 * see "TIPC locking policy" in net.c.
2568 tipc_node_lock(node);
2570 case TIPC_CMD_SET_LINK_TOL:
2571 link_set_supervision_props(l_ptr, new_value);
2572 tipc_link_send_proto_msg(l_ptr,
2573 STATE_MSG, 0, 0, new_value, 0, 0);
2575 case TIPC_CMD_SET_LINK_PRI:
2576 l_ptr->priority = new_value;
2577 tipc_link_send_proto_msg(l_ptr,
2578 STATE_MSG, 0, 0, 0, new_value, 0);
2580 case TIPC_CMD_SET_LINK_WINDOW:
2581 tipc_link_set_queue_limits(l_ptr, new_value);
2587 tipc_node_unlock(node);
2591 b_ptr = tipc_bearer_find(name);
2594 case TIPC_CMD_SET_LINK_TOL:
2595 b_ptr->tolerance = new_value;
2597 case TIPC_CMD_SET_LINK_PRI:
2598 b_ptr->priority = new_value;
2600 case TIPC_CMD_SET_LINK_WINDOW:
2601 b_ptr->window = new_value;
2610 m_ptr = tipc_media_find(name);
2614 case TIPC_CMD_SET_LINK_TOL:
2615 m_ptr->tolerance = new_value;
2617 case TIPC_CMD_SET_LINK_PRI:
2618 m_ptr->priority = new_value;
2620 case TIPC_CMD_SET_LINK_WINDOW:
2621 m_ptr->window = new_value;
2630 struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
2633 struct tipc_link_config *args;
2637 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
2638 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2640 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
2641 new_value = ntohl(args->value);
2643 if (!link_value_is_valid(cmd, new_value))
2644 return tipc_cfg_reply_error_string(
2645 "cannot change, value invalid");
2647 if (!strcmp(args->name, tipc_bclink_name)) {
2648 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
2649 (tipc_bclink_set_queue_limits(new_value) == 0))
2650 return tipc_cfg_reply_none();
2651 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
2652 " (cannot change setting on broadcast link)");
2655 read_lock_bh(&tipc_net_lock);
2656 res = link_cmd_set_value(args->name, new_value, cmd);
2657 read_unlock_bh(&tipc_net_lock);
2659 return tipc_cfg_reply_error_string("cannot change link setting");
2661 return tipc_cfg_reply_none();
2665 * link_reset_statistics - reset link statistics
2666 * @l_ptr: pointer to link
2668 static void link_reset_statistics(struct tipc_link *l_ptr)
2670 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
2671 l_ptr->stats.sent_info = l_ptr->next_out_no;
2672 l_ptr->stats.recv_info = l_ptr->next_in_no;
2675 struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
2678 struct tipc_link *l_ptr;
2679 struct tipc_node *node;
2681 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2682 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2684 link_name = (char *)TLV_DATA(req_tlv_area);
2685 if (!strcmp(link_name, tipc_bclink_name)) {
2686 if (tipc_bclink_reset_stats())
2687 return tipc_cfg_reply_error_string("link not found");
2688 return tipc_cfg_reply_none();
2691 read_lock_bh(&tipc_net_lock);
2692 l_ptr = link_find_link(link_name, &node);
2694 read_unlock_bh(&tipc_net_lock);
2695 return tipc_cfg_reply_error_string("link not found");
2698 tipc_node_lock(node);
2699 link_reset_statistics(l_ptr);
2700 tipc_node_unlock(node);
2701 read_unlock_bh(&tipc_net_lock);
2702 return tipc_cfg_reply_none();
2706 * percent - convert count to a percentage of total (rounding up or down)
2708 static u32 percent(u32 count, u32 total)
2710 return (count * 100 + (total / 2)) / total;
2714 * tipc_link_stats - print link statistics
2716 * @buf: print buffer area
2717 * @buf_size: size of print buffer area
2719 * Returns length of print buffer data string (or 0 if error)
2721 static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2723 struct tipc_link *l;
2724 struct tipc_stats *s;
2725 struct tipc_node *node;
2727 u32 profile_total = 0;
2730 if (!strcmp(name, tipc_bclink_name))
2731 return tipc_bclink_stats(buf, buf_size);
2733 read_lock_bh(&tipc_net_lock);
2734 l = link_find_link(name, &node);
2736 read_unlock_bh(&tipc_net_lock);
2739 tipc_node_lock(node);
2742 if (tipc_link_is_active(l))
2744 else if (tipc_link_is_up(l))
2749 ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
2750 " %s MTU:%u Priority:%u Tolerance:%u ms"
2751 " Window:%u packets\n",
2752 l->name, status, l->max_pkt, l->priority,
2753 l->tolerance, l->queue_limit[0]);
2755 ret += tipc_snprintf(buf + ret, buf_size - ret,
2756 " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
2757 l->next_in_no - s->recv_info, s->recv_fragments,
2758 s->recv_fragmented, s->recv_bundles,
2761 ret += tipc_snprintf(buf + ret, buf_size - ret,
2762 " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
2763 l->next_out_no - s->sent_info, s->sent_fragments,
2764 s->sent_fragmented, s->sent_bundles,
2767 profile_total = s->msg_length_counts;
2771 ret += tipc_snprintf(buf + ret, buf_size - ret,
2772 " TX profile sample:%u packets average:%u octets\n"
2773 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
2774 "-16384:%u%% -32768:%u%% -66000:%u%%\n",
2775 s->msg_length_counts,
2776 s->msg_lengths_total / profile_total,
2777 percent(s->msg_length_profile[0], profile_total),
2778 percent(s->msg_length_profile[1], profile_total),
2779 percent(s->msg_length_profile[2], profile_total),
2780 percent(s->msg_length_profile[3], profile_total),
2781 percent(s->msg_length_profile[4], profile_total),
2782 percent(s->msg_length_profile[5], profile_total),
2783 percent(s->msg_length_profile[6], profile_total));
2785 ret += tipc_snprintf(buf + ret, buf_size - ret,
2786 " RX states:%u probes:%u naks:%u defs:%u"
2787 " dups:%u\n", s->recv_states, s->recv_probes,
2788 s->recv_nacks, s->deferred_recv, s->duplicates);
2790 ret += tipc_snprintf(buf + ret, buf_size - ret,
2791 " TX states:%u probes:%u naks:%u acks:%u"
2792 " dups:%u\n", s->sent_states, s->sent_probes,
2793 s->sent_nacks, s->sent_acks, s->retransmitted);
2795 ret += tipc_snprintf(buf + ret, buf_size - ret,
2796 " Congestion link:%u Send queue"
2797 " max:%u avg:%u\n", s->link_congs,
2798 s->max_queue_sz, s->queue_sz_counts ?
2799 (s->accu_queue_sz / s->queue_sz_counts) : 0);
2801 tipc_node_unlock(node);
2802 read_unlock_bh(&tipc_net_lock);
2806 struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
2808 struct sk_buff *buf;
2809 struct tlv_desc *rep_tlv;
2814 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2815 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2817 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
2821 rep_tlv = (struct tlv_desc *)buf->data;
2822 pb = TLV_DATA(rep_tlv);
2823 pb_len = ULTRA_STRING_MAX_LEN;
2824 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
2828 return tipc_cfg_reply_error_string("link not found");
2830 str_len += 1; /* for "\0" */
2831 skb_put(buf, TLV_SPACE(str_len));
2832 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
2838 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
2839 * @dest: network address of destination node
2840 * @selector: used to select from set of active links
2842 * If no active link can be found, uses default maximum packet size.
2844 u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
2846 struct tipc_node *n_ptr;
2847 struct tipc_link *l_ptr;
2848 u32 res = MAX_PKT_DEFAULT;
2850 if (dest == tipc_own_addr)
2851 return MAX_MSG_SIZE;
2853 read_lock_bh(&tipc_net_lock);
2854 n_ptr = tipc_node_find(dest);
2856 tipc_node_lock(n_ptr);
2857 l_ptr = n_ptr->active_links[selector & 1];
2859 res = l_ptr->max_pkt;
2860 tipc_node_unlock(n_ptr);
2862 read_unlock_bh(&tipc_net_lock);
2866 static void link_print(struct tipc_link *l_ptr, const char *str)
2868 pr_info("%s Link %x<%s>:", str, l_ptr->addr, l_ptr->b_ptr->name);
2870 if (link_working_unknown(l_ptr))
2872 else if (link_reset_reset(l_ptr))
2874 else if (link_reset_unknown(l_ptr))
2876 else if (link_working_working(l_ptr))