*
* RCU and node lock set
*/
-void tipc_bclink_update_link_state(struct net *net, struct tipc_node *n_ptr,
+void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
u32 last_sent)
{
struct sk_buff *buf;
+ struct net *net = n_ptr->net;
struct tipc_net *tn = net_generic(net, tipc_net_id);
/* Ignore "stale" link state info */
struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferred_queue);
u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
- tipc_msg_init(net, msg, BCAST_PROTOCOL, STATE_MSG,
+ tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
INT_H_SIZE, n_ptr->addr);
msg_set_non_seq(msg, 1);
msg_set_mc_netid(msg, tn->net_id);
bcl->bearer_id = MAX_BEARERS;
rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
bcl->state = WORKING_WORKING;
+ bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
+ msg_set_prevnode(bcl->pmsg, tn->own_addr);
strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
tn->bcbearer = bcbearer;
tn->bclink = bclink;
void tipc_bclink_rcv(struct net *net, struct sk_buff *buf);
u32 tipc_bclink_get_last_sent(struct net *net);
u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr);
-void tipc_bclink_update_link_state(struct net *net, struct tipc_node *n_ptr,
+void tipc_bclink_update_link_state(struct tipc_node *node,
u32 last_sent);
int tipc_bclink_stats(struct net *net, char *stats_buf, const u32 buf_size);
int tipc_bclink_reset_stats(struct net *net);
u32 dest_domain = b_ptr->domain;
msg = buf_msg(buf);
- tipc_msg_init(net, msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain);
+ tipc_msg_init(tn->own_addr, msg, LINK_CONFIG, type,
+ INT_H_SIZE, dest_domain);
msg_set_non_seq(msg, 1);
msg_set_node_sig(msg, tn->random);
msg_set_dest_domain(msg, dest_domain);
*/
#define START_CHANGEOVER 100000u
-static void link_handle_out_of_seq_msg(struct net *net,
- struct tipc_link *l_ptr,
- struct sk_buff *buf);
-static void tipc_link_proto_rcv(struct net *net, struct tipc_link *l_ptr,
- struct sk_buff *buf);
-static int tipc_link_tunnel_rcv(struct net *net, struct tipc_node *n_ptr,
- struct sk_buff **buf);
+static void link_handle_out_of_seq_msg(struct tipc_link *link,
+ struct sk_buff *skb);
+static void tipc_link_proto_rcv(struct tipc_link *link,
+ struct sk_buff *skb);
+static int tipc_link_tunnel_rcv(struct tipc_node *node,
+ struct sk_buff **skb);
static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
static void link_state_event(struct tipc_link *l_ptr, u32 event);
static void link_reset_statistics(struct tipc_link *l_ptr);
l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
msg = l_ptr->pmsg;
- tipc_msg_init(n_ptr->net, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
+ tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
l_ptr->addr);
msg_set_size(msg, sizeof(l_ptr->proto_msg));
msg_set_session(msg, (tn->random & 0xffff));
static bool link_schedule_user(struct tipc_link *link, u32 oport,
uint chain_sz, uint imp)
{
- struct net *net = link->owner->net;
- struct tipc_net *tn = net_generic(net, tipc_net_id);
struct sk_buff *buf;
- buf = tipc_msg_create(net, SOCK_WAKEUP, 0, INT_H_SIZE, 0, tn->own_addr,
- tn->own_addr, oport, 0, 0);
+ buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
+ link_own_addr(link), link_own_addr(link),
+ oport, 0, 0);
if (!buf)
return false;
TIPC_SKB_CB(buf)->chain_sz = chain_sz;
} else if (tipc_msg_bundle(outqueue, skb, mtu)) {
link->stats.sent_bundled++;
continue;
- } else if (tipc_msg_make_bundle(net, outqueue, skb, mtu,
+ } else if (tipc_msg_make_bundle(outqueue, skb, mtu,
link->addr)) {
link->stats.sent_bundled++;
link->stats.sent_bundles++;
return;
msg = buf_msg(skb);
- tipc_msg_init(link->owner->net, msg, BCAST_PROTOCOL, STATE_MSG,
+ tipc_msg_init(link_own_addr(link), msg, BCAST_PROTOCOL, STATE_MSG,
INT_H_SIZE, link->addr);
msg_set_last_bcast(msg, link->owner->bclink.acked);
__tipc_link_xmit_skb(link, skb);
/* Process the incoming packet */
if (unlikely(!link_working_working(l_ptr))) {
if (msg_user(msg) == LINK_PROTOCOL) {
- tipc_link_proto_rcv(net, l_ptr, skb);
+ tipc_link_proto_rcv(l_ptr, skb);
link_retrieve_defq(l_ptr, &head);
tipc_node_unlock(n_ptr);
continue;
/* Link is now in state WORKING_WORKING */
if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
- link_handle_out_of_seq_msg(net, l_ptr, skb);
+ link_handle_out_of_seq_msg(l_ptr, skb);
link_retrieve_defq(l_ptr, &head);
tipc_node_unlock(n_ptr);
continue;
msg = buf_msg(*buf);
switch (msg_user(msg)) {
case CHANGEOVER_PROTOCOL:
- if (tipc_link_tunnel_rcv(net, n, buf))
+ if (tipc_link_tunnel_rcv(n, buf))
res = 0;
break;
case MSG_FRAGMENTER:
/*
* link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
*/
-static void link_handle_out_of_seq_msg(struct net *net,
- struct tipc_link *l_ptr,
+static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
struct sk_buff *buf)
{
u32 seq_no = buf_seqno(buf);
if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
- tipc_link_proto_rcv(net, l_ptr, buf);
+ tipc_link_proto_rcv(l_ptr, buf);
return;
}
* Note that network plane id propagates through the network, and may
* change at any time. The node with lowest address rules
*/
-static void tipc_link_proto_rcv(struct net *net, struct tipc_link *l_ptr,
+static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
struct sk_buff *buf)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
u32 rec_gap = 0;
u32 max_pkt_info;
u32 max_pkt_ack;
goto exit;
if (l_ptr->net_plane != msg_net_plane(msg))
- if (tn->own_addr > msg_prevnode(msg))
+ if (link_own_addr(l_ptr) > msg_prevnode(msg))
l_ptr->net_plane = msg_net_plane(msg);
switch (msg_type(msg)) {
/* Protocol message before retransmits, reduce loss risk */
if (l_ptr->owner->bclink.recv_permitted)
- tipc_bclink_update_link_state(net, l_ptr->owner,
+ tipc_bclink_update_link_state(l_ptr->owner,
msg_last_bcast(msg));
if (rec_gap || (msg_probe(msg))) {
if (!tunnel)
return;
- tipc_msg_init(l_ptr->owner->net, &tunnel_hdr, CHANGEOVER_PROTOCOL,
+ tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
msg_set_msgcnt(&tunnel_hdr, msgcount);
struct sk_buff *skb;
struct tipc_msg tunnel_hdr;
- tipc_msg_init(l_ptr->owner->net, &tunnel_hdr, CHANGEOVER_PROTOCOL,
+ tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
msg_set_msgcnt(&tunnel_hdr, skb_queue_len(&l_ptr->outqueue));
msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
/* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
* Owner node is locked.
*/
-static void tipc_link_dup_rcv(struct net *net, struct tipc_link *l_ptr,
+static void tipc_link_dup_rcv(struct tipc_link *l_ptr,
struct sk_buff *t_buf)
{
struct sk_buff *buf;
}
/* Add buffer to deferred queue, if applicable: */
- link_handle_out_of_seq_msg(net, l_ptr, buf);
+ link_handle_out_of_seq_msg(l_ptr, buf);
}
/* tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
* returned to the active link for delivery upwards.
* Owner node is locked.
*/
-static int tipc_link_tunnel_rcv(struct net *net, struct tipc_node *n_ptr,
+static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
struct sk_buff **buf)
{
struct sk_buff *t_buf = *buf;
goto exit;
if (msg_type(t_msg) == DUPLICATE_MSG)
- tipc_link_dup_rcv(net, l_ptr, t_buf);
+ tipc_link_dup_rcv(l_ptr, t_buf);
else if (msg_type(t_msg) == ORIGINAL_MSG)
*buf = tipc_link_failover_rcv(l_ptr, t_buf);
else
return less_eq(left, right) ? left : right;
}
+static inline u32 link_own_addr(struct tipc_link *l)
+{
+ return msg_prevnode(l->pmsg);
+}
/*
* Link status checking routines
return skb;
}
-void tipc_msg_init(struct net *net, struct tipc_msg *m, u32 user, u32 type,
- u32 hsize, u32 destnode)
+void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
+ u32 hsize, u32 dnode)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
-
memset(m, 0, hsize);
msg_set_version(m);
msg_set_user(m, user);
msg_set_hdr_sz(m, hsize);
msg_set_size(m, hsize);
- msg_set_prevnode(m, tn->own_addr);
+ msg_set_prevnode(m, own_node);
msg_set_type(m, type);
if (hsize > SHORT_H_SIZE) {
- msg_set_orignode(m, tn->own_addr);
- msg_set_destnode(m, destnode);
+ msg_set_orignode(m, own_node);
+ msg_set_destnode(m, dnode);
}
}
-struct sk_buff *tipc_msg_create(struct net *net, uint user, uint type,
+struct sk_buff *tipc_msg_create(uint user, uint type,
uint hdr_sz, uint data_sz, u32 dnode,
u32 onode, u32 dport, u32 oport, int errcode)
{
return NULL;
msg = buf_msg(buf);
- tipc_msg_init(net, msg, user, type, hdr_sz, dnode);
+ tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
msg_set_size(msg, hdr_sz + data_sz);
- msg_set_prevnode(msg, onode);
msg_set_origport(msg, oport);
msg_set_destport(msg, dport);
msg_set_errcode(msg, errcode);
*
* Returns message data size or errno: -ENOMEM, -EFAULT
*/
-int tipc_msg_build(struct net *net, struct tipc_msg *mhdr, struct msghdr *m,
+int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
int offset, int dsz, int pktmax, struct sk_buff_head *list)
{
int mhsz = msg_hdr_sz(mhdr);
}
/* Prepare reusable fragment header */
- tipc_msg_init(net, &pkthdr, MSG_FRAGMENTER, FIRST_FRAGMENT, INT_H_SIZE,
- msg_destnode(mhdr));
+ tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
+ FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
msg_set_size(&pkthdr, pktmax);
msg_set_fragm_no(&pkthdr, pktno);
* Replaces buffer if successful
* Returns true if success, otherwise false
*/
-bool tipc_msg_make_bundle(struct net *net, struct sk_buff_head *list,
+bool tipc_msg_make_bundle(struct sk_buff_head *list,
struct sk_buff *skb, u32 mtu, u32 dnode)
{
struct sk_buff *bskb;
skb_trim(bskb, INT_H_SIZE);
bmsg = buf_msg(bskb);
- tipc_msg_init(net, bmsg, MSG_BUNDLER, 0, INT_H_SIZE, dnode);
+ tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
+ INT_H_SIZE, dnode);
msg_set_seqno(bmsg, msg_seqno(msg));
msg_set_ack(bmsg, msg_ack(msg));
msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
* Consumes buffer if failure
* Returns true if success, otherwise false
*/
-bool tipc_msg_reverse(struct net *net, struct sk_buff *buf, u32 *dnode,
+bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode,
int err)
{
- struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_msg *msg = buf_msg(buf);
uint imp = msg_importance(msg);
struct tipc_msg ohdr;
msg_set_errcode(msg, err);
msg_set_origport(msg, msg_destport(&ohdr));
msg_set_destport(msg, msg_origport(&ohdr));
- msg_set_prevnode(msg, tn->own_addr);
+ msg_set_prevnode(msg, own_addr);
if (!msg_short(msg)) {
msg_set_orignode(msg, msg_destnode(&ohdr));
msg_set_destnode(msg, msg_orignode(&ohdr));
}
struct sk_buff *tipc_buf_acquire(u32 size);
-bool tipc_msg_reverse(struct net *net, struct sk_buff *buf, u32 *dnode,
+bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode,
int err);
int tipc_msg_eval(struct net *net, struct sk_buff *buf, u32 *dnode);
-void tipc_msg_init(struct net *net, struct tipc_msg *m, u32 user, u32 type,
+void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type,
u32 hsize, u32 destnode);
-struct sk_buff *tipc_msg_create(struct net *net, uint user, uint type,
+struct sk_buff *tipc_msg_create(uint user, uint type,
uint hdr_sz, uint data_sz, u32 dnode,
u32 onode, u32 dport, u32 oport, int errcode);
int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu);
-bool tipc_msg_make_bundle(struct net *net, struct sk_buff_head *list,
+bool tipc_msg_make_bundle(struct sk_buff_head *list,
struct sk_buff *skb, u32 mtu, u32 dnode);
-int tipc_msg_build(struct net *net, struct tipc_msg *mhdr, struct msghdr *m,
+int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
int offset, int dsz, int mtu, struct sk_buff_head *list);
struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
u32 dest)
{
+ struct tipc_net *tn = net_generic(net, tipc_net_id);
struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size);
struct tipc_msg *msg;
if (buf != NULL) {
msg = buf_msg(buf);
- tipc_msg_init(net, msg, NAME_DISTRIBUTOR, type, INT_H_SIZE,
- dest);
+ tipc_msg_init(tn->own_addr, msg, NAME_DISTRIBUTOR, type,
+ INT_H_SIZE, dest);
msg_set_size(msg, INT_H_SIZE + size);
}
return buf;
struct sk_buff *buf;
list_for_each_entry_safe(conn, safe, conns, list) {
- buf = tipc_msg_create(net, TIPC_CRITICAL_IMPORTANCE,
+ buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
TIPC_CONN_MSG, SHORT_H_SIZE, 0,
tn->own_addr, conn->peer_node,
conn->port, conn->peer_port,
* - port reference
*/
+static u32 tsk_own_node(struct tipc_sock *tsk)
+{
+ return msg_prevnode(&tsk->phdr);
+}
+
static u32 tsk_peer_node(struct tipc_sock *tsk)
{
return msg_destnode(&tsk->phdr);
{
struct sk_buff *skb;
u32 dnode;
- struct net *net = sock_net(sk);
+ u32 own_node = tsk_own_node(tipc_sk(sk));
while ((skb = __skb_dequeue(&sk->sk_receive_queue))) {
- if (tipc_msg_reverse(net, skb, &dnode, TIPC_ERR_NO_PORT))
- tipc_link_xmit_skb(net, skb, dnode, 0);
+ if (tipc_msg_reverse(own_node, skb, &dnode, TIPC_ERR_NO_PORT))
+ tipc_link_xmit_skb(sock_net(sk), skb, dnode, 0);
}
}
static int tipc_sk_create(struct net *net, struct socket *sock,
int protocol, int kern)
{
+ struct tipc_net *tn;
const struct proto_ops *ops;
socket_state state;
struct sock *sk;
tsk->max_pkt = MAX_PKT_DEFAULT;
INIT_LIST_HEAD(&tsk->publications);
msg = &tsk->phdr;
- tipc_msg_init(net, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
+ tn = net_generic(sock_net(sk), tipc_net_id);
+ tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
NAMED_H_SIZE, 0);
/* Finish initializing socket data structures */
{
struct sock *sk = sock->sk;
struct net *net;
- struct tipc_net *tn;
struct tipc_sock *tsk;
struct sk_buff *skb;
u32 dnode, probing_state;
return 0;
net = sock_net(sk);
- tn = net_generic(net, tipc_net_id);
-
tsk = tipc_sk(sk);
lock_sock(sk);
tsk->connected = 0;
tipc_node_remove_conn(net, dnode, tsk->portid);
}
- if (tipc_msg_reverse(net, skb, &dnode,
+ if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode,
TIPC_ERR_NO_PORT))
tipc_link_xmit_skb(net, skb, dnode, 0);
}
sock_put(sk);
tipc_sk_remove(tsk);
if (tsk->connected) {
- skb = tipc_msg_create(net, TIPC_CRITICAL_IMPORTANCE,
+ skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
- tn->own_addr, tsk_peer_port(tsk),
+ tsk_own_node(tsk), tsk_peer_port(tsk),
tsk->portid, TIPC_ERR_NO_PORT);
if (skb)
tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
struct msghdr *msg, size_t dsz, long timeo)
{
struct sock *sk = sock->sk;
+ struct tipc_sock *tsk = tipc_sk(sk);
struct net *net = sock_net(sk);
- struct tipc_msg *mhdr = &tipc_sk(sk)->phdr;
+ struct tipc_msg *mhdr = &tsk->phdr;
struct sk_buff_head head;
struct iov_iter save = msg->msg_iter;
uint mtu;
new_mtu:
mtu = tipc_bclink_get_mtu();
__skb_queue_head_init(&head);
- rc = tipc_msg_build(net, mhdr, msg, 0, dsz, mtu, &head);
+ rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &head);
if (unlikely(rc < 0))
return rc;
if (conn_cong)
tsk->sk.sk_write_space(&tsk->sk);
} else if (msg_type(msg) == CONN_PROBE) {
- if (!tipc_msg_reverse(sock_net(&tsk->sk), buf, dnode, TIPC_OK))
+ if (!tipc_msg_reverse(tsk_own_node(tsk), buf, dnode, TIPC_OK))
return TIPC_OK;
msg_set_type(msg, CONN_PROBE_REPLY);
return TIPC_FWD_MSG;
new_mtu:
mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
__skb_queue_head_init(&head);
- rc = tipc_msg_build(net, mhdr, m, 0, dsz, mtu, &head);
+ rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &head);
if (rc < 0)
goto exit;
mtu = tsk->max_pkt;
send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
__skb_queue_head_init(&head);
- rc = tipc_msg_build(net, mhdr, m, sent, send, mtu, &head);
+ rc = tipc_msg_build(mhdr, m, sent, send, mtu, &head);
if (unlikely(rc < 0))
goto exit;
do {
static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
{
struct net *net = sock_net(&tsk->sk);
- struct tipc_net *tn = net_generic(net, tipc_net_id);
struct sk_buff *skb = NULL;
struct tipc_msg *msg;
u32 peer_port = tsk_peer_port(tsk);
if (!tsk->connected)
return;
- skb = tipc_msg_create(net, CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
- dnode, tn->own_addr, peer_port, tsk->portid,
- TIPC_OK);
+ skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
+ dnode, tsk_own_node(tsk), peer_port,
+ tsk->portid, TIPC_OK);
if (!skb)
return;
msg = buf_msg(skb);
return 0;
}
- if ((rc < 0) && !tipc_msg_reverse(net, skb, &onode, -rc))
+ if ((rc < 0) && !tipc_msg_reverse(tsk_own_node(tsk), skb, &onode, -rc))
return 0;
tipc_link_xmit_skb(net, skb, onode, 0);
int tipc_sk_rcv(struct net *net, struct sk_buff *skb)
{
struct tipc_sock *tsk;
+ struct tipc_net *tn;
struct sock *sk;
u32 dport = msg_destport(buf_msg(skb));
int rc = TIPC_OK;
if (likely(!rc))
return 0;
exit:
- if ((rc < 0) && !tipc_msg_reverse(net, skb, &dnode, -rc))
+ tn = net_generic(net, tipc_net_id);
+ if ((rc < 0) && !tipc_msg_reverse(tn->own_addr, skb, &dnode, -rc))
return -EHOSTUNREACH;
tipc_link_xmit_skb(net, skb, dnode, 0);
{
struct sock *sk = sock->sk;
struct net *net = sock_net(sk);
- struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_sock *tsk = tipc_sk(sk);
struct sk_buff *skb;
u32 dnode;
kfree_skb(skb);
goto restart;
}
- if (tipc_msg_reverse(net, skb, &dnode,
+ if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode,
TIPC_CONN_SHUTDOWN))
tipc_link_xmit_skb(net, skb, dnode,
tsk->portid);
tipc_node_remove_conn(net, dnode, tsk->portid);
} else {
dnode = tsk_peer_node(tsk);
- skb = tipc_msg_create(net, TIPC_CRITICAL_IMPORTANCE,
+
+ skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
TIPC_CONN_MSG, SHORT_H_SIZE,
- 0, dnode, tn->own_addr,
+ 0, dnode, tsk_own_node(tsk),
tsk_peer_port(tsk),
tsk->portid, TIPC_CONN_SHUTDOWN);
tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
{
struct tipc_sock *tsk = (struct tipc_sock *)data;
struct sock *sk = &tsk->sk;
- struct net *net = sock_net(sk);
- struct tipc_net *tn = net_generic(net, tipc_net_id);
struct sk_buff *skb = NULL;
u32 peer_port, peer_node;
+ u32 own_node = tsk_own_node(tsk);
bh_lock_sock(sk);
if (!tsk->connected) {
if (tsk->probing_state == TIPC_CONN_PROBING) {
/* Previous probe not answered -> self abort */
- skb = tipc_msg_create(net, TIPC_CRITICAL_IMPORTANCE,
+ skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
TIPC_CONN_MSG, SHORT_H_SIZE, 0,
- tn->own_addr, peer_node, tsk->portid,
+ own_node, peer_node, tsk->portid,
peer_port, TIPC_ERR_NO_PORT);
} else {
- skb = tipc_msg_create(net, CONN_MANAGER, CONN_PROBE, INT_H_SIZE,
- 0, peer_node, tn->own_addr,
+ skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE,
+ INT_H_SIZE, 0, peer_node, own_node,
peer_port, tsk->portid, TIPC_OK);
tsk->probing_state = TIPC_CONN_PROBING;
sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv);