2 * net/tipc/cluster.c: TIPC cluster management routines
4 * Copyright (c) 2003-2005, Ericsson Research Canada
5 * Copyright (c) 2005, Wind River Systems
6 * Copyright (c) 2005-2006, Ericsson AB
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
12 * Redistributions of source code must retain the above copyright notice, this
13 * list of conditions and the following disclaimer.
14 * Redistributions in binary form must reproduce the above copyright notice,
15 * this list of conditions and the following disclaimer in the documentation
16 * and/or other materials provided with the distribution.
17 * Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
37 #include "node_subscr.h"
44 void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf,
45 u32 lower, u32 upper);
46 struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest);
48 struct node **local_nodes = 0;
49 struct node_map cluster_bcast_nodes = {0,{0,}};
50 u32 highest_allowed_slave = 0;
52 struct cluster *cluster_create(u32 addr)
55 struct cluster *c_ptr;
59 c_ptr = (struct cluster *)kmalloc(sizeof(*c_ptr), GFP_ATOMIC);
62 memset(c_ptr, 0, sizeof(*c_ptr));
64 c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
65 if (in_own_cluster(addr))
66 max_nodes = LOWEST_SLAVE + tipc_max_slaves;
68 max_nodes = tipc_max_nodes + 1;
69 alloc = sizeof(void *) * (max_nodes + 1);
70 c_ptr->nodes = (struct node **)kmalloc(alloc, GFP_ATOMIC);
71 if (c_ptr->nodes == NULL) {
75 memset(c_ptr->nodes, 0, alloc);
76 if (in_own_cluster(addr))
77 local_nodes = c_ptr->nodes;
78 c_ptr->highest_slave = LOWEST_SLAVE - 1;
79 c_ptr->highest_node = 0;
81 z_ptr = zone_find(tipc_zone(addr));
83 z_ptr = zone_create(addr);
86 zone_attach_cluster(z_ptr, c_ptr);
97 void cluster_delete(struct cluster *c_ptr)
103 for (n_num = 1; n_num <= c_ptr->highest_node; n_num++) {
104 node_delete(c_ptr->nodes[n_num]);
106 for (n_num = LOWEST_SLAVE; n_num <= c_ptr->highest_slave; n_num++) {
107 node_delete(c_ptr->nodes[n_num]);
113 u32 cluster_next_node(struct cluster *c_ptr, u32 addr)
116 u32 n_num = tipc_node(addr) + 1;
120 for (; n_num <= c_ptr->highest_node; n_num++) {
121 n_ptr = c_ptr->nodes[n_num];
122 if (n_ptr && node_has_active_links(n_ptr))
125 for (n_num = 1; n_num < tipc_node(addr); n_num++) {
126 n_ptr = c_ptr->nodes[n_num];
127 if (n_ptr && node_has_active_links(n_ptr))
133 void cluster_attach_node(struct cluster *c_ptr, struct node *n_ptr)
135 u32 n_num = tipc_node(n_ptr->addr);
136 u32 max_n_num = tipc_max_nodes;
138 if (in_own_cluster(n_ptr->addr))
139 max_n_num = highest_allowed_slave;
141 assert(n_num <= max_n_num);
142 assert(c_ptr->nodes[n_num] == 0);
143 c_ptr->nodes[n_num] = n_ptr;
144 if (n_num > c_ptr->highest_node)
145 c_ptr->highest_node = n_num;
149 * cluster_select_router - select router to a cluster
151 * Uses deterministic and fair algorithm.
154 u32 cluster_select_router(struct cluster *c_ptr, u32 ref)
157 u32 ulim = c_ptr->highest_node;
161 assert(!in_own_cluster(c_ptr->addr));
165 /* Start entry must be random */
166 mask = tipc_max_nodes;
172 /* Lookup upwards with wrap-around */
174 if (node_is_up(c_ptr->nodes[n_num]))
176 } while (++n_num <= ulim);
180 if (node_is_up(c_ptr->nodes[n_num]))
182 } while (++n_num < tstart);
186 assert(n_num <= ulim);
187 return node_select_router(c_ptr->nodes[n_num], ref);
191 * cluster_select_node - select destination node within a remote cluster
193 * Uses deterministic and fair algorithm.
196 struct node *cluster_select_node(struct cluster *c_ptr, u32 selector)
199 u32 mask = tipc_max_nodes;
202 assert(!in_own_cluster(c_ptr->addr));
203 if (!c_ptr->highest_node)
206 /* Start entry must be random */
207 while (mask > c_ptr->highest_node) {
210 start_entry = (selector & mask) ? selector & mask : 1u;
211 assert(start_entry <= c_ptr->highest_node);
213 /* Lookup upwards with wrap-around */
214 for (n_num = start_entry; n_num <= c_ptr->highest_node; n_num++) {
215 if (node_has_active_links(c_ptr->nodes[n_num]))
216 return c_ptr->nodes[n_num];
218 for (n_num = 1; n_num < start_entry; n_num++) {
219 if (node_has_active_links(c_ptr->nodes[n_num]))
220 return c_ptr->nodes[n_num];
226 * Routing table management: See description in node.c
229 struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest)
231 u32 size = INT_H_SIZE + data_size;
232 struct sk_buff *buf = buf_acquire(size);
233 struct tipc_msg *msg;
237 memset((char *)msg, 0, size);
238 msg_init(msg, ROUTE_DISTRIBUTOR, 0, TIPC_OK, INT_H_SIZE, dest);
243 void cluster_bcast_new_route(struct cluster *c_ptr, u32 dest,
244 u32 lower, u32 upper)
246 struct sk_buff *buf = cluster_prepare_routing_msg(0, c_ptr->addr);
247 struct tipc_msg *msg;
251 msg_set_remote_node(msg, dest);
252 msg_set_type(msg, ROUTE_ADDITION);
253 cluster_multicast(c_ptr, buf, lower, upper);
255 warn("Memory squeeze: broadcast of new route failed\n");
259 void cluster_bcast_lost_route(struct cluster *c_ptr, u32 dest,
260 u32 lower, u32 upper)
262 struct sk_buff *buf = cluster_prepare_routing_msg(0, c_ptr->addr);
263 struct tipc_msg *msg;
267 msg_set_remote_node(msg, dest);
268 msg_set_type(msg, ROUTE_REMOVAL);
269 cluster_multicast(c_ptr, buf, lower, upper);
271 warn("Memory squeeze: broadcast of lost route failed\n");
275 void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest)
278 struct tipc_msg *msg;
279 u32 highest = c_ptr->highest_slave;
283 assert(!is_slave(dest));
284 assert(in_own_cluster(dest));
285 assert(in_own_cluster(c_ptr->addr));
286 if (highest <= LOWEST_SLAVE)
288 buf = cluster_prepare_routing_msg(highest - LOWEST_SLAVE + 1,
292 msg_set_remote_node(msg, c_ptr->addr);
293 msg_set_type(msg, SLAVE_ROUTING_TABLE);
294 for (n_num = LOWEST_SLAVE; n_num <= highest; n_num++) {
295 if (c_ptr->nodes[n_num] &&
296 node_has_active_links(c_ptr->nodes[n_num])) {
298 msg_set_dataoctet(msg, n_num);
302 link_send(buf, dest, dest);
306 warn("Memory squeeze: broadcast of lost route failed\n");
310 void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest)
313 struct tipc_msg *msg;
314 u32 highest = c_ptr->highest_node;
318 if (in_own_cluster(c_ptr->addr))
320 assert(!is_slave(dest));
321 assert(in_own_cluster(dest));
322 highest = c_ptr->highest_node;
323 buf = cluster_prepare_routing_msg(highest + 1, c_ptr->addr);
326 msg_set_remote_node(msg, c_ptr->addr);
327 msg_set_type(msg, EXT_ROUTING_TABLE);
328 for (n_num = 1; n_num <= highest; n_num++) {
329 if (c_ptr->nodes[n_num] &&
330 node_has_active_links(c_ptr->nodes[n_num])) {
332 msg_set_dataoctet(msg, n_num);
336 link_send(buf, dest, dest);
340 warn("Memory squeeze: broadcast of external route failed\n");
344 void cluster_send_local_routes(struct cluster *c_ptr, u32 dest)
347 struct tipc_msg *msg;
348 u32 highest = c_ptr->highest_node;
352 assert(is_slave(dest));
353 assert(in_own_cluster(c_ptr->addr));
354 buf = cluster_prepare_routing_msg(highest, c_ptr->addr);
357 msg_set_remote_node(msg, c_ptr->addr);
358 msg_set_type(msg, LOCAL_ROUTING_TABLE);
359 for (n_num = 1; n_num <= highest; n_num++) {
360 if (c_ptr->nodes[n_num] &&
361 node_has_active_links(c_ptr->nodes[n_num])) {
363 msg_set_dataoctet(msg, n_num);
367 link_send(buf, dest, dest);
371 warn("Memory squeeze: broadcast of local route failed\n");
375 void cluster_recv_routing_table(struct sk_buff *buf)
377 struct tipc_msg *msg = buf_msg(buf);
378 struct cluster *c_ptr;
383 u32 rem_node = msg_remote_node(msg);
388 c_ptr = cluster_find(rem_node);
390 c_ptr = cluster_create(rem_node);
397 node_table = buf->data + msg_hdr_sz(msg);
398 table_size = msg_size(msg) - msg_hdr_sz(msg);
399 router = msg_prevnode(msg);
400 z_num = tipc_zone(rem_node);
401 c_num = tipc_cluster(rem_node);
403 switch (msg_type(msg)) {
404 case LOCAL_ROUTING_TABLE:
405 assert(is_slave(tipc_own_addr));
406 case EXT_ROUTING_TABLE:
407 for (n_num = 1; n_num < table_size; n_num++) {
408 if (node_table[n_num]) {
409 u32 addr = tipc_addr(z_num, c_num, n_num);
410 n_ptr = c_ptr->nodes[n_num];
412 n_ptr = node_create(addr);
415 node_add_router(n_ptr, router);
419 case SLAVE_ROUTING_TABLE:
420 assert(!is_slave(tipc_own_addr));
421 assert(in_own_cluster(c_ptr->addr));
422 for (n_num = 1; n_num < table_size; n_num++) {
423 if (node_table[n_num]) {
424 u32 slave_num = n_num + LOWEST_SLAVE;
425 u32 addr = tipc_addr(z_num, c_num, slave_num);
426 n_ptr = c_ptr->nodes[slave_num];
428 n_ptr = node_create(addr);
431 node_add_router(n_ptr, router);
436 if (!is_slave(tipc_own_addr)) {
437 assert(!in_own_cluster(c_ptr->addr)
438 || is_slave(rem_node));
440 assert(in_own_cluster(c_ptr->addr)
441 && !is_slave(rem_node));
443 n_ptr = c_ptr->nodes[tipc_node(rem_node)];
445 n_ptr = node_create(rem_node);
447 node_add_router(n_ptr, router);
450 if (!is_slave(tipc_own_addr)) {
451 assert(!in_own_cluster(c_ptr->addr)
452 || is_slave(rem_node));
454 assert(in_own_cluster(c_ptr->addr)
455 && !is_slave(rem_node));
457 n_ptr = c_ptr->nodes[tipc_node(rem_node)];
459 node_remove_router(n_ptr, router);
462 assert(!"Illegal routing manager message received\n");
467 void cluster_remove_as_router(struct cluster *c_ptr, u32 router)
473 if (is_slave(router))
474 return; /* Slave nodes can not be routers */
476 if (in_own_cluster(c_ptr->addr)) {
477 start_entry = LOWEST_SLAVE;
478 tstop = c_ptr->highest_slave;
481 tstop = c_ptr->highest_node;
484 for (n_num = start_entry; n_num <= tstop; n_num++) {
485 if (c_ptr->nodes[n_num]) {
486 node_remove_router(c_ptr->nodes[n_num], router);
492 * cluster_multicast - multicast message to local nodes
495 void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf,
496 u32 lower, u32 upper)
498 struct sk_buff *buf_copy;
503 assert(lower <= upper);
504 assert(((lower >= 1) && (lower <= tipc_max_nodes)) ||
505 ((lower >= LOWEST_SLAVE) && (lower <= highest_allowed_slave)));
506 assert(((upper >= 1) && (upper <= tipc_max_nodes)) ||
507 ((upper >= LOWEST_SLAVE) && (upper <= highest_allowed_slave)));
508 assert(in_own_cluster(c_ptr->addr));
510 tstop = is_slave(upper) ? c_ptr->highest_slave : c_ptr->highest_node;
513 for (n_num = lower; n_num <= tstop; n_num++) {
514 n_ptr = c_ptr->nodes[n_num];
515 if (n_ptr && node_has_active_links(n_ptr)) {
516 buf_copy = skb_copy(buf, GFP_ATOMIC);
517 if (buf_copy == NULL)
519 msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
520 link_send(buf_copy, n_ptr->addr, n_ptr->addr);
527 * cluster_broadcast - broadcast message to all nodes within cluster
530 void cluster_broadcast(struct sk_buff *buf)
532 struct sk_buff *buf_copy;
533 struct cluster *c_ptr;
540 if (tipc_mode == TIPC_NET_MODE) {
541 c_ptr = cluster_find(tipc_own_addr);
542 assert(in_own_cluster(c_ptr->addr)); /* For now */
544 /* Send to standard nodes, then repeat loop sending to slaves */
546 tstop = c_ptr->highest_node;
547 for (node_type = 1; node_type <= 2; node_type++) {
548 for (n_num = tstart; n_num <= tstop; n_num++) {
549 n_ptr = c_ptr->nodes[n_num];
550 if (n_ptr && node_has_active_links(n_ptr)) {
551 buf_copy = skb_copy(buf, GFP_ATOMIC);
552 if (buf_copy == NULL)
554 msg_set_destnode(buf_msg(buf_copy),
556 link_send(buf_copy, n_ptr->addr,
560 tstart = LOWEST_SLAVE;
561 tstop = c_ptr->highest_slave;
568 int cluster_init(void)
570 highest_allowed_slave = LOWEST_SLAVE + tipc_max_slaves;
571 return cluster_create(tipc_own_addr) ? TIPC_OK : -ENOMEM;