2 * net/tipc/name_distr.c: TIPC name distribution code
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
39 #include "name_distr.h"
42 * struct publ_list - list of publications made by this node
43 * @list: circular list of publications
44 * @list_size: number of entries in list
47 struct list_head list;
51 static struct publ_list publ_zone = {
52 .list = LIST_HEAD_INIT(publ_zone.list),
56 static struct publ_list publ_cluster = {
57 .list = LIST_HEAD_INIT(publ_cluster.list),
61 static struct publ_list publ_node = {
62 .list = LIST_HEAD_INIT(publ_node.list),
66 static struct publ_list *publ_lists[] = {
68 &publ_zone, /* publ_lists[TIPC_ZONE_SCOPE] */
69 &publ_cluster, /* publ_lists[TIPC_CLUSTER_SCOPE] */
70 &publ_node /* publ_lists[TIPC_NODE_SCOPE] */
75 * publ_to_item - add publication info to a publication message
77 static void publ_to_item(struct distr_item *i, struct publication *p)
79 i->type = htonl(p->type);
80 i->lower = htonl(p->lower);
81 i->upper = htonl(p->upper);
82 i->ref = htonl(p->ref);
83 i->key = htonl(p->key);
87 * named_prepare_buf - allocate & initialize a publication message
89 static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
91 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size);
96 tipc_msg_init(msg, NAME_DISTRIBUTOR, type, INT_H_SIZE, dest);
97 msg_set_size(msg, INT_H_SIZE + size);
102 void named_cluster_distribute(struct sk_buff *buf)
104 struct sk_buff *obuf;
105 struct tipc_node *node;
109 list_for_each_entry_rcu(node, &tipc_node_list, list) {
111 if (in_own_node(dnode))
113 if (!tipc_node_active_links(node))
115 obuf = skb_copy(buf, GFP_ATOMIC);
118 msg_set_destnode(buf_msg(obuf), dnode);
119 tipc_link_xmit(obuf, dnode, dnode);
127 * tipc_named_publish - tell other nodes about a new publication by this node
129 struct sk_buff *tipc_named_publish(struct publication *publ)
132 struct distr_item *item;
134 list_add_tail(&publ->local_list, &publ_lists[publ->scope]->list);
135 publ_lists[publ->scope]->size++;
137 if (publ->scope == TIPC_NODE_SCOPE)
140 buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
142 pr_warn("Publication distribution failure\n");
146 item = (struct distr_item *)msg_data(buf_msg(buf));
147 publ_to_item(item, publ);
152 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
154 struct sk_buff *tipc_named_withdraw(struct publication *publ)
157 struct distr_item *item;
159 list_del(&publ->local_list);
160 publ_lists[publ->scope]->size--;
162 if (publ->scope == TIPC_NODE_SCOPE)
165 buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
167 pr_warn("Withdrawal distribution failure\n");
171 item = (struct distr_item *)msg_data(buf_msg(buf));
172 publ_to_item(item, publ);
177 * named_distribute - prepare name info for bulk distribution to another node
178 * @msg_list: list of messages (buffers) to be returned from this function
179 * @dnode: node to be updated
180 * @pls: linked list of publication items to be packed into buffer chain
182 static void named_distribute(struct list_head *msg_list, u32 dnode,
183 struct publ_list *pls)
185 struct publication *publ;
186 struct sk_buff *buf = NULL;
187 struct distr_item *item = NULL;
188 uint dsz = pls->size * ITEM_SIZE;
189 uint msg_dsz = (tipc_node_get_mtu(dnode, 0) / ITEM_SIZE) * ITEM_SIZE;
193 list_for_each_entry(publ, &pls->list, local_list) {
194 /* Prepare next buffer: */
196 msg_rem = min_t(uint, rem, msg_dsz);
198 buf = named_prepare_buf(PUBLICATION, msg_rem, dnode);
200 pr_warn("Bulk publication failure\n");
203 item = (struct distr_item *)msg_data(buf_msg(buf));
206 /* Pack publication into message: */
207 publ_to_item(item, publ);
209 msg_rem -= ITEM_SIZE;
211 /* Append full buffer to list: */
213 list_add_tail((struct list_head *)buf, msg_list);
220 * tipc_named_node_up - tell specified node about all publications by this node
222 void tipc_named_node_up(u32 dnode)
225 struct sk_buff *buf_chain;
227 read_lock_bh(&tipc_nametbl_lock);
228 named_distribute(&msg_list, dnode, &publ_cluster);
229 named_distribute(&msg_list, dnode, &publ_zone);
230 read_unlock_bh(&tipc_nametbl_lock);
232 /* Convert circular list to linear list and send: */
233 buf_chain = (struct sk_buff *)msg_list.next;
234 ((struct sk_buff *)msg_list.prev)->next = NULL;
235 tipc_link_xmit(buf_chain, dnode, dnode);
239 * named_purge_publ - remove publication associated with a failed node
241 * Invoked for each publication issued by a newly failed node.
242 * Removes publication structure from name table & deletes it.
244 static void named_purge_publ(struct publication *publ)
246 struct publication *p;
248 write_lock_bh(&tipc_nametbl_lock);
249 p = tipc_nametbl_remove_publ(publ->type, publ->lower,
250 publ->node, publ->ref, publ->key);
252 tipc_nodesub_unsubscribe(&p->subscr);
253 write_unlock_bh(&tipc_nametbl_lock);
256 pr_err("Unable to remove publication from failed node\n"
257 " (type=%u, lower=%u, node=0x%x, ref=%u, key=%u)\n",
258 publ->type, publ->lower, publ->node, publ->ref,
266 * tipc_named_rcv - process name table update message sent by another node
268 void tipc_named_rcv(struct sk_buff *buf)
270 struct publication *publ;
271 struct tipc_msg *msg = buf_msg(buf);
272 struct distr_item *item = (struct distr_item *)msg_data(msg);
273 u32 count = msg_data_sz(msg) / ITEM_SIZE;
275 write_lock_bh(&tipc_nametbl_lock);
277 if (msg_type(msg) == PUBLICATION) {
278 publ = tipc_nametbl_insert_publ(ntohl(item->type),
286 tipc_nodesub_subscribe(&publ->subscr,
292 } else if (msg_type(msg) == WITHDRAWAL) {
293 publ = tipc_nametbl_remove_publ(ntohl(item->type),
300 tipc_nodesub_unsubscribe(&publ->subscr);
303 pr_err("Unable to remove publication by node 0x%x\n"
304 " (type=%u, lower=%u, ref=%u, key=%u)\n",
305 msg_orignode(msg), ntohl(item->type),
306 ntohl(item->lower), ntohl(item->ref),
310 pr_warn("Unrecognized name table message received\n");
314 write_unlock_bh(&tipc_nametbl_lock);
319 * tipc_named_reinit - re-initialize local publications
321 * This routine is called whenever TIPC networking is enabled.
322 * All name table entries published by this node are updated to reflect
323 * the node's new network address.
325 void tipc_named_reinit(void)
327 struct publication *publ;
330 write_lock_bh(&tipc_nametbl_lock);
332 for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++)
333 list_for_each_entry(publ, &publ_lists[scope]->list, local_list)
334 publ->node = tipc_own_addr;
336 write_unlock_bh(&tipc_nametbl_lock);