}
static int
-ksocknal_create_peer(struct ksock_peer **peerp, lnet_ni_t *ni, lnet_process_id_t id)
+ksocknal_create_peer(struct ksock_peer **peerp, lnet_ni_t *ni,
+ lnet_process_id_t id)
{
int cpt = lnet_cpt_of_nid(id.nid);
struct ksock_net *net = ni->ni_data;
}
static void
-ksocknal_associate_route_conn_locked(struct ksock_route *route, struct ksock_conn *conn)
+ksocknal_associate_route_conn_locked(struct ksock_route *route,
+ struct ksock_conn *conn)
{
struct ksock_peer *peer = route->ksnr_peer;
int type = conn->ksnc_type;
if (k < peer->ksnp_n_passive_ips) /* using it already */
continue;
- k = ksocknal_match_peerip(iface, peerips, n_peerips);
+ k = ksocknal_match_peerip(iface, peerips,
+ n_peerips);
xor = ip ^ peerips[k];
this_netmatch = !(xor & iface->ksni_netmask) ? 1 : 0;
/* Take packets blocking for this connection. */
list_for_each_entry_safe(tx, txtmp, &peer->ksnp_tx_queue, tx_list) {
- if (conn->ksnc_proto->pro_match_tx(conn, tx, tx->tx_nonblk) == SOCKNAL_MATCH_NO)
- continue;
+ int match = conn->ksnc_proto->pro_match_tx(conn, tx,
+ tx->tx_nonblk);
+
+ if (match == SOCKNAL_MATCH_NO)
+ continue;
list_del(&tx->tx_list);
ksocknal_queue_tx_locked(tx, conn);
spin_unlock_bh(&conn->ksnc_scheduler->kss_lock);
}
- peer->ksnp_proto = NULL; /* renegotiate protocol version */
- peer->ksnp_error = error; /* stash last conn close reason */
+ peer->ksnp_proto = NULL; /* renegotiate protocol version */
+ peer->ksnp_error = error; /* stash last conn close reason */
if (list_empty(&peer->ksnp_routes)) {
/*
(id.pid == LNET_PID_ANY || id.pid == peer->ksnp_id.pid)))
continue;
- count += ksocknal_close_peer_conns_locked(peer, ipaddr, 0);
+ count += ksocknal_close_peer_conns_locked(peer, ipaddr,
+ 0);
}
}
}
rc = 0;
- /* NB only new connections will pay attention to the new interface! */
+ /*
+ * NB only new connections will pay attention to the
+ * new interface!
+ */
}
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
int txmem;
int rxmem;
int nagle;
- struct ksock_conn *conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
+ struct ksock_conn *conn;
+ conn = ksocknal_get_conn_by_idx(ni, data->ioc_count);
if (!conn)
return -ENOENT;
}
struct ksock_conn *
-ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx, int nonblk)
+ksocknal_find_conn_locked(struct ksock_peer *peer, struct ksock_tx *tx,
+ int nonblk)
{
struct list_head *tmp;
struct ksock_conn *conn;
int fnob = 0;
list_for_each(tmp, &peer->ksnp_conns) {
- struct ksock_conn *c = list_entry(tmp, struct ksock_conn, ksnc_list);
- int nob = atomic_read(&c->ksnc_tx_nob) +
- c->ksnc_sock->sk->sk_wmem_queued;
- int rc;
+ struct ksock_conn *c;
+ int nob, rc;
+
+ c = list_entry(tmp, struct ksock_conn, ksnc_list);
+ nob = atomic_read(&c->ksnc_tx_nob) +
+ c->ksnc_sock->sk->sk_wmem_queued;
LASSERT(!c->ksnc_closing);
LASSERT(c->ksnc_proto &&
LASSERT(msg->ksm_zc_cookies[1]);
LASSERT(conn->ksnc_proto->pro_queue_tx_zcack);
+ /* ZC ACK piggybacked on ztx release tx later */
if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0))
- ztx = tx; /* ZC ACK piggybacked on ztx release tx later */
-
+ ztx = tx;
} else {
/*
* It's a normal packet - can it piggback a noop zc-ack that
LASSERT(!route->ksnr_connecting || route->ksnr_scheduled);
- if (route->ksnr_scheduled) /* connections being established */
+ /* connections being established */
+ if (route->ksnr_scheduled)
continue;
/* all route types connected ? */
rc = ksocknal_process_transmit(conn, tx);
if (rc == -ENOMEM || rc == -EAGAIN) {
- /* Incomplete send: replace tx on HEAD of tx_queue */
+ /*
+ * Incomplete send: replace tx on HEAD of
+ * tx_queue
+ */
spin_lock_bh(&sched->kss_lock);
list_add(&tx->tx_list, &conn->ksnc_tx_queue);
} else {
timeout = active ? *ksocknal_tunables.ksnd_timeout :
lnet_acceptor_timeout();
- rc = lnet_sock_read(sock, &hello->kshm_magic, sizeof(hello->kshm_magic), timeout);
+ rc = lnet_sock_read(sock, &hello->kshm_magic,
+ sizeof(hello->kshm_magic), timeout);
if (rc) {
CERROR("Error %d reading HELLO from %pI4h\n",
rc, &conn->ksnc_ipaddr);
conn->ksnc_port > LNET_ACCEPTOR_MAX_RESERVED_PORT) {
/* Userspace NAL assigns peer process ID from socket */
recv_id.pid = conn->ksnc_port | LNET_PID_USERFLAG;
- recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid), conn->ksnc_ipaddr);
+ recv_id.nid = LNET_MKNID(LNET_NIDNET(ni->ni_nid),
+ conn->ksnc_ipaddr);
} else {
recv_id.nid = hello->kshm_src_nid;
recv_id.pid = hello->kshm_src_pid;
if (peer->ksnp_accepting > 0) {
CDEBUG(D_NET,
"peer %s(%d) already connecting to me, retry later.\n",
- libcfs_nid2str(peer->ksnp_id.nid), peer->ksnp_accepting);
+ libcfs_nid2str(peer->ksnp_id.nid),
+ peer->ksnp_accepting);
retry_later = 1;
}
/* Nothing to do for 'timeout' */
set_current_state(TASK_INTERRUPTIBLE);
- add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq, &wait);
+ add_wait_queue_exclusive(&ksocknal_data.ksnd_connd_waitq,
+ &wait);
spin_unlock_bh(connd_lock);
nloops = 0;
struct ksock_conn *conn;
struct ksock_tx *tx;
- if (list_empty(&peer->ksnp_conns)) /* last_alive will be updated by create_conn */
+ /* last_alive will be updated by create_conn */
+ if (list_empty(&peer->ksnp_conns))
return 0;
if (peer->ksnp_proto != &ksocknal_protocol_v3x)
fragnob = sum;
conn->ksnc_rx_csum = ksocknal_csum(conn->ksnc_rx_csum,
- iov[i].iov_base, fragnob);
+ iov[i].iov_base,
+ fragnob);
}
conn->ksnc_msg.ksm_csum = saved_csum;
}
}
int
-ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem, int *rxmem, int *nagle)
+ksocknal_lib_get_conn_tunables(struct ksock_conn *conn, int *txmem,
+ int *rxmem, int *nagle)
{
struct socket *sock = conn->ksnc_sock;
int len;
}
if (!tx->tx_msg.ksm_zc_cookies[0]) {
- /* NOOP tx has only one ZC-ACK cookie, can carry at least one more */
+ /*
+ * NOOP tx has only one ZC-ACK cookie,
+ * can carry at least one more
+ */
if (tx->tx_msg.ksm_zc_cookies[1] > cookie) {
tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1];
tx->tx_msg.ksm_zc_cookies[1] = cookie;
}
if (tx->tx_msg.ksm_zc_cookies[0] - tx->tx_msg.ksm_zc_cookies[1] > 2) {
- /* not likely to carry more ACKs, skip it to simplify logic */
+ /*
+ * not likely to carry more ACKs, skip it
+ * to simplify logic
+ */
ksocknal_next_tx_carrier(conn);
}
}
} else {
- /* ksm_zc_cookies[0] < ksm_zc_cookies[1], it is range of cookies */
+ /*
+ * ksm_zc_cookies[0] < ksm_zc_cookies[1],
+ * it is range of cookies
+ */
if (cookie >= tx->tx_msg.ksm_zc_cookies[0] &&
cookie <= tx->tx_msg.ksm_zc_cookies[1]) {
CWARN("%s: duplicated ZC cookie: %llu\n",
tx_zc_list) {
__u64 c = tx->tx_msg.ksm_zc_cookies[0];
- if (c == cookie1 || c == cookie2 || (cookie1 < c && c < cookie2)) {
+ if (c == cookie1 || c == cookie2 ||
+ (cookie1 < c && c < cookie2)) {
tx->tx_msg.ksm_zc_cookies[0] = 0;
list_del(&tx->tx_zc_list);
list_add(&tx->tx_zc_list, &zlist);
}
static int
-ksocknal_recv_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello, int timeout)
+ksocknal_recv_hello_v2(struct ksock_conn *conn, ksock_hello_msg_t *hello,
+ int timeout)
{
struct socket *sock = conn->ksnc_sock;
int rc;
tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
tx->tx_resid = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr);
}
- /* Don't checksum before start sending, because packet can be piggybacked with ACK */
+ /*
+ * Don't checksum before start sending, because packet can be
+ * piggybacked with ACK
+ */
}
static void
static int
lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned int feats,
- int bulk_npg, int bulk_len, int embedded, struct lstcon_rpc *crpc)
+ int bulk_npg, int bulk_len, int embedded,
+ struct lstcon_rpc *crpc)
{
crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service,
feats, bulk_npg, bulk_len,
spin_lock(&rpc->crpc_lock);
- if (!crpc->crp_posted || /* not posted */
- crpc->crp_stamp) { /* rpc done or aborted already */
+ if (!crpc->crp_posted || /* not posted */
+ crpc->crp_stamp) { /* rpc done or aborted already */
if (!crpc->crp_stamp) {
crpc->crp_stamp = cfs_time_current();
crpc->crp_status = -EINTR;
}
static int
-lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req)
+lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param,
+ struct srpc_test_reqst *req)
{
struct test_bulk_req *brq = &req->tsr_u.bulk_v0;
lstcon_rpc_readent_func_t readent);
void lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error);
void lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans);
-void lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans, struct lstcon_rpc *req);
+void lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans,
+ struct lstcon_rpc *req);
int lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout);
int lstcon_rpc_pinger_start(void);
void lstcon_rpc_pinger_stop(void);
if (!create)
return -ENOENT;
- LIBCFS_ALLOC(*ndpp, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink));
+ LIBCFS_ALLOC(*ndpp, sizeof(*ndpp) + sizeof(*ndl));
if (!*ndpp)
return -ENOMEM;
list_del(&ndl->ndl_link);
list_del(&ndl->ndl_hlink);
- LIBCFS_FREE(nd, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink));
+ LIBCFS_FREE(nd, sizeof(*nd) + sizeof(*ndl));
}
static int
-lstcon_ndlink_find(struct list_head *hash,
- lnet_process_id_t id, struct lstcon_ndlink **ndlpp, int create)
+lstcon_ndlink_find(struct list_head *hash, lnet_process_id_t id,
+ struct lstcon_ndlink **ndlpp, int create)
{
unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
struct lstcon_ndlink *ndl;
grp->grp_ref++;
}
-static void lstcon_group_ndlink_release(struct lstcon_group *, struct lstcon_ndlink *);
+static void lstcon_group_ndlink_release(struct lstcon_group *,
+ struct lstcon_ndlink *);
static void
lstcon_group_drain(struct lstcon_group *grp, int keep)
}
static int
-lstcon_test_nodes_add(struct lstcon_test *test, struct list_head __user *result_up)
+lstcon_test_nodes_add(struct lstcon_test *test,
+ struct list_head __user *result_up)
{
struct lstcon_rpc_trans *trans;
struct lstcon_group *grp;
}
static int
-lstcon_test_find(struct lstcon_batch *batch, int idx, struct lstcon_test **testpp)
+lstcon_test_find(struct lstcon_batch *batch, int idx,
+ struct lstcon_test **testpp)
{
struct lstcon_test *test;
}
static int
-sfw_register_test(struct srpc_service *service, struct sfw_test_client_ops *cliops)
+sfw_register_test(struct srpc_service *service,
+ struct sfw_test_client_ops *cliops)
{
struct sfw_test_case *tsc;
}
static int
-sfw_remove_session(struct srpc_rmsn_reqst *request, struct srpc_rmsn_reply *reply)
+sfw_remove_session(struct srpc_rmsn_reqst *request,
+ struct srpc_rmsn_reply *reply)
{
struct sfw_session *sn = sfw_data.fw_session;
}
static int
-sfw_debug_session(struct srpc_debug_reqst *request, struct srpc_debug_reply *reply)
+sfw_debug_session(struct srpc_debug_reqst *request,
+ struct srpc_debug_reply *reply)
{
struct sfw_session *sn = sfw_data.fw_session;
}
static int
-sfw_query_batch(struct sfw_batch *tsb, int testidx, struct srpc_batch_reply *reply)
+sfw_query_batch(struct sfw_batch *tsb, int testidx,
+ struct srpc_batch_reply *reply)
{
struct sfw_test_instance *tsi;
}
static int
-sfw_control_batch(struct srpc_batch_reqst *request, struct srpc_batch_reply *reply)
+sfw_control_batch(struct srpc_batch_reqst *request,
+ struct srpc_batch_reply *reply)
{
struct sfw_session *sn = sfw_data.fw_session;
int rc = 0;
/* called with sv->sv_lock held */
static void
-srpc_service_recycle_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
+srpc_service_recycle_buffer(struct srpc_service_cd *scd,
+ struct srpc_buffer *buf)
__must_hold(&scd->scd_lock)
{
if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {