struct list_head *paa_reqs_array; /** array to hold requests */
__u32 paa_size; /** the size of array */
__u32 paa_count; /** the total count of reqs */
- time_t paa_deadline; /** the earliest deadline of reqs */
+ time64_t paa_deadline; /** the earliest deadline of reqs */
__u32 *paa_reqs_count; /** the count of reqs in each entry */
};
/* server-side... */
/** request arrival time */
- struct timeval rq_arrival_time;
+ struct timespec64 rq_arrival_time;
/** separated reply state */
struct ptlrpc_reply_state *rq_reply_state;
/** incoming request buffer */
/**
* when request/reply sent (secs), or time when request should be sent
*/
- time_t rq_sent;
+ time64_t rq_sent;
/** time for request really sent out */
- time_t rq_real_sent;
+ time64_t rq_real_sent;
/** when request must finish. volatile
* so that servers' early reply updates to the deadline aren't
* kept in per-cpu cache */
- volatile time_t rq_deadline;
+ volatile time64_t rq_deadline;
/** when req reply unlink must finish. */
- time_t rq_reply_deadline;
+ time64_t rq_reply_deadline;
/** when req bulk unlink must finish. */
- time_t rq_bulk_deadline;
+ time64_t rq_bulk_deadline;
/**
* service time estimate (secs)
* If the requestsis not served by this time, it is marked as timed out.
desc = req->rq_bulk;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
- req->rq_bulk_deadline > get_seconds())
+ req->rq_bulk_deadline > ktime_get_real_seconds())
return 1;
if (!desc)
ptlrpc_client_early(struct ptlrpc_request *req)
{
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > get_seconds())
+ req->rq_reply_deadline > ktime_get_real_seconds())
return 0;
return req->rq_early;
}
ptlrpc_client_replied(struct ptlrpc_request *req)
{
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > get_seconds())
+ req->rq_reply_deadline > ktime_get_real_seconds())
return 0;
return req->rq_replied;
}
ptlrpc_client_recv(struct ptlrpc_request *req)
{
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > get_seconds())
+ req->rq_reply_deadline > ktime_get_real_seconds())
return 1;
return req->rq_receiving_reply;
}
spin_lock(&req->rq_lock);
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
- req->rq_reply_deadline > get_seconds()) {
+ req->rq_reply_deadline > ktime_get_real_seconds()) {
spin_unlock(&req->rq_lock);
return 1;
}
if (resends) {
req->rq_generation_set = 1;
req->rq_import_generation = generation;
- req->rq_sent = get_seconds() + resends;
+ req->rq_sent = ktime_get_real_seconds() + resends;
}
/* It is important to obtain rpc_lock first (if applicable), so that
if (resends) {
req->rq_generation_set = 1;
req->rq_import_generation = generation;
- req->rq_sent = get_seconds() + resends;
+ req->rq_sent = ktime_get_real_seconds() + resends;
}
level = LUSTRE_IMP_FULL;
resend:
/* cap resend delay to the current request timeout, this is similar to
* what ptlrpc does (see after_reply()) */
if (aa->aa_resends > new_req->rq_timeout)
- new_req->rq_sent = get_seconds() + new_req->rq_timeout;
+ new_req->rq_sent = ktime_get_real_seconds() + new_req->rq_timeout;
else
- new_req->rq_sent = get_seconds() + aa->aa_resends;
+ new_req->rq_sent = ktime_get_real_seconds() + aa->aa_resends;
new_req->rq_generation_set = 1;
new_req->rq_import_generation = request->rq_import_generation;
{
unsigned int nl, oldnl;
struct imp_at *at;
- time_t now = get_seconds();
+ time64_t now = ktime_get_real_seconds();
LASSERT(req->rq_import);
D_ADAPTTO : D_WARNING,
"Reported service time %u > total measured time "
CFS_DURATION_T"\n", service_time,
- cfs_time_sub(now, req->rq_sent));
+ (long)(now - req->rq_sent));
return;
}
static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req)
{
struct ptlrpc_request *early_req;
- time_t olddl;
+ time64_t olddl;
int rc;
req->rq_early = 0;
olddl = req->rq_deadline;
/* server assumes it now has rq_timeout from when it sent the
* early reply, so client should give it at least that long. */
- req->rq_deadline = get_seconds() + req->rq_timeout +
+ req->rq_deadline = ktime_get_real_seconds() + req->rq_timeout +
ptlrpc_at_get_net_latency(req);
DEBUG_REQ(D_ADAPTTO, req,
- "Early reply #%d, new deadline in " CFS_DURATION_T "s (" CFS_DURATION_T "s)",
+ "Early reply #%d, new deadline in %lds (%lds)",
req->rq_early_count,
- cfs_time_sub(req->rq_deadline, get_seconds()),
- cfs_time_sub(req->rq_deadline, olddl));
+ (long)(req->rq_deadline - ktime_get_real_seconds()),
+ (long)(req->rq_deadline - olddl));
return rc;
}
struct obd_import *imp = req->rq_import;
struct obd_device *obd = req->rq_import->imp_obd;
int rc;
- struct timeval work_start;
+ struct timespec64 work_start;
long timediff;
LASSERT(obd != NULL);
/* retry indefinitely on EINPROGRESS */
if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS &&
ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) {
- time_t now = get_seconds();
+ time64_t now = ktime_get_real_seconds();
DEBUG_REQ(D_RPCTRACE, req, "Resending request on EINPROGRESS");
spin_lock(&req->rq_lock);
return 0;
}
- do_gettimeofday(&work_start);
- timediff = cfs_timeval_sub(&work_start, &req->rq_arrival_time, NULL);
+ ktime_get_real_ts64(&work_start);
+ timediff = (work_start.tv_sec - req->rq_arrival_time.tv_sec) * USEC_PER_SEC +
+ (work_start.tv_nsec - req->rq_arrival_time.tv_nsec) / NSEC_PER_USEC;
if (obd->obd_svc_stats != NULL) {
lprocfs_counter_add(obd->obd_svc_stats, PTLRPC_REQWAIT_CNTR,
timediff);
int rc;
LASSERT(req->rq_phase == RQ_PHASE_NEW);
- if (req->rq_sent && (req->rq_sent > get_seconds()) &&
+ if (req->rq_sent && (req->rq_sent > ktime_get_real_seconds()) &&
(!req->rq_generation_set ||
req->rq_import_generation == imp->imp_generation))
return 0;
/* delayed resend - skip */
if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend &&
- req->rq_sent > get_seconds())
+ req->rq_sent > ktime_get_real_seconds())
continue;
if (!(req->rq_phase == RQ_PHASE_RPC ||
req->rq_timedout = 1;
spin_unlock(&req->rq_lock);
- DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent "CFS_DURATION_T
- "/real "CFS_DURATION_T"]",
+ DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent %lld/real %lld]",
req->rq_net_err ? "failed due to network error" :
((req->rq_real_sent == 0 ||
- time_before((unsigned long)req->rq_real_sent, (unsigned long)req->rq_sent) ||
- cfs_time_aftereq(req->rq_real_sent, req->rq_deadline)) ?
+ req->rq_real_sent < req->rq_sent ||
+ req->rq_real_sent >= req->rq_deadline) ?
"timed out for sent delay" : "timed out for slow reply"),
- req->rq_sent, req->rq_real_sent);
+ (s64)req->rq_sent, (s64)req->rq_real_sent);
if (imp != NULL && obd_debug_peer_on_timeout)
LNetCtl(IOC_LIBCFS_DEBUG_PEER, &imp->imp_connection->c_peer);
{
struct ptlrpc_request_set *set = data;
struct list_head *tmp;
- time_t now = get_seconds();
+ time64_t now = ktime_get_real_seconds();
LASSERT(set != NULL);
int ptlrpc_set_next_timeout(struct ptlrpc_request_set *set)
{
struct list_head *tmp;
- time_t now = get_seconds();
+ time64_t now = ktime_get_real_seconds();
int timeout = 0;
struct ptlrpc_request *req;
- int deadline;
+ time64_t deadline;
list_for_each(tmp, &set->set_requests) {
req = list_entry(tmp, struct ptlrpc_request, rq_set_chain);
*/
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_REPL_UNLINK) &&
async && request->rq_reply_deadline == 0)
- request->rq_reply_deadline = get_seconds()+LONG_UNLINK;
+ request->rq_reply_deadline = ktime_get_real_seconds()+LONG_UNLINK;
/*
* Nothing left to do.
#define YEAR_2004 (1ULL << 30)
void ptlrpc_init_xid(void)
{
- time_t now = get_seconds();
+ time64_t now = ktime_get_real_seconds();
spin_lock_init(&ptlrpc_last_xid_lock);
if (now < YEAR_2004) {
{
/* re-initialize the req */
req->rq_timeout = obd_timeout;
- req->rq_sent = get_seconds();
+ req->rq_sent = ktime_get_real_seconds();
req->rq_deadline = req->rq_sent + req->rq_timeout;
req->rq_reply_deadline = req->rq_deadline;
req->rq_phase = RQ_PHASE_INTERPRET;
sptlrpc_request_out_callback(req);
spin_lock(&req->rq_lock);
- req->rq_real_sent = get_seconds();
+ req->rq_real_sent = ktime_get_real_seconds();
if (ev->unlinked)
req->rq_req_unlink = 0;
struct ptlrpc_request *req)
{
__u64 sec = req->rq_arrival_time.tv_sec;
- __u32 usec = req->rq_arrival_time.tv_usec >> 4; /* usec / 16 */
+ __u32 usec = req->rq_arrival_time.tv_nsec / NSEC_PER_USEC / 16; /* usec / 16 */
__u64 new_seq;
/* set sequence ID for request and add it to history list,
req->rq_reqbuf = ev->md.start + ev->offset;
if (ev->type == LNET_EVENT_PUT && ev->status == 0)
req->rq_reqdata_len = ev->mlength;
- do_gettimeofday(&req->rq_arrival_time);
+ ktime_get_real_ts64(&req->rq_arrival_time);
req->rq_peer = ev->initiator;
req->rq_self = ev->target.nid;
req->rq_rqbd = rqbd;
EXPORT_SYMBOL(ptlrpc_deactivate_import);
static unsigned int
-ptlrpc_inflight_deadline(struct ptlrpc_request *req, time_t now)
+ptlrpc_inflight_deadline(struct ptlrpc_request *req, time64_t now)
{
long dl;
static unsigned int ptlrpc_inflight_timeout(struct obd_import *imp)
{
- time_t now = get_seconds();
+ time64_t now = ktime_get_real_seconds();
struct list_head *tmp, *n;
struct ptlrpc_request *req;
unsigned int timeout = 0;
* must be just as careful as the service's request
* parser. Currently I only print stuff here I know is OK
* to look at coz it was set up in request_in_callback()!!! */
- seq_printf(s, "%lld:%s:%s:x%llu:%d:%s:%ld:%lds(%+lds) ",
+ seq_printf(s, "%lld:%s:%s:x%llu:%d:%s:%lld:%lds(%+lds) ",
req->rq_history_seq, libcfs_nid2str(req->rq_self),
libcfs_id2str(req->rq_peer), req->rq_xid,
req->rq_reqlen, ptlrpc_rqphase2str(req),
- req->rq_arrival_time.tv_sec,
- req->rq_sent - req->rq_arrival_time.tv_sec,
- req->rq_sent - req->rq_deadline);
+ (s64)req->rq_arrival_time.tv_sec,
+ (long)(req->rq_sent - req->rq_arrival_time.tv_sec),
+ (long)(req->rq_sent - req->rq_deadline));
if (svc->srv_ops.so_req_printer == NULL)
seq_printf(s, "\n");
else
/* Let's setup deadline for reply unlink. */
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_LONG_BULK_UNLINK) &&
async && req->rq_bulk_deadline == 0)
- req->rq_bulk_deadline = get_seconds() + LONG_UNLINK;
+ req->rq_bulk_deadline = ktime_get_real_seconds() + LONG_UNLINK;
if (ptlrpc_client_bulk_active(req) == 0) /* completed or */
return 1; /* never registered */
{
struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
struct ptlrpc_service *svc = svcpt->scp_service;
- int service_time = max_t(int, get_seconds() -
+ int service_time = max_t(int, ktime_get_real_seconds() -
req->rq_arrival_time.tv_sec, 1);
if (!(flags & PTLRPC_REPLY_EARLY) &&
if (unlikely(rc))
goto out;
- req->rq_sent = get_seconds();
+ req->rq_sent = ktime_get_real_seconds();
rc = ptl_send_buf(&rs->rs_md_h, rs->rs_repbuf, rs->rs_repdata_len,
(rs->rs_difficult && !rs->rs_no_ack) ?
OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_DELAY_SEND, request->rq_timeout + 5);
- do_gettimeofday(&request->rq_arrival_time);
- request->rq_sent = get_seconds();
+ ktime_get_real_ts64(&request->rq_arrival_time);
+ request->rq_sent = ktime_get_real_seconds();
/* We give the server rq_timeout secs to process the req, and
add the network latency for our local timeout. */
request->rq_deadline = request->rq_sent + request->rq_timeout +
va_start(args, fmt);
libcfs_debug_vmsg2(msgdata, fmt, args,
- " req@%p x%llu/t%lld(%lld) o%d->%s@%s:%d/%d lens %d/%d e %d to %d dl " CFS_TIME_T " ref %d fl " REQ_FLAGS_FMT "/%x/%x rc %d/%d\n",
+ " req@%p x%llu/t%lld(%lld) o%d->%s@%s:%d/%d lens %d/%d e %d to %lld dl %lld ref %d fl " REQ_FLAGS_FMT "/%x/%x rc %d/%d\n",
req, req->rq_xid, req->rq_transno,
req_ok ? lustre_msg_get_transno(req->rq_reqmsg) : 0,
req_ok ? lustre_msg_get_opc(req->rq_reqmsg) : -1,
libcfs_nid2str(nid),
req->rq_request_portal, req->rq_reply_portal,
req->rq_reqlen, req->rq_replen,
- req->rq_early_count, req->rq_timedout,
- req->rq_deadline,
+ req->rq_early_count, (s64)req->rq_timedout,
+ (s64)req->rq_deadline,
atomic_read(&req->rq_refcount),
DEBUG_REQ_FLAGS(req),
req_ok ? lustre_msg_get_flags(req->rq_reqmsg) : -1,
}
/* Set timer for closest deadline */
- next = (__s32)(array->paa_deadline - get_seconds() -
+ next = (__s32)(array->paa_deadline - ktime_get_real_seconds() -
at_early_margin);
if (next <= 0) {
ptlrpc_at_timer((unsigned long)svcpt);
spin_lock(&svcpt->scp_at_lock);
LASSERT(list_empty(&req->rq_timed_list));
- index = (unsigned long)req->rq_deadline % array->paa_size;
+ div_u64_rem(req->rq_deadline, array->paa_size, &index);
if (array->paa_reqs_count[index] > 0) {
/* latest rpcs will have the latest deadlines in the list,
* so search backward. */
struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
struct ptlrpc_request *reqcopy;
struct lustre_msg *reqmsg;
- long olddl = req->rq_deadline - get_seconds();
- time_t newdl;
+ long olddl = req->rq_deadline - ktime_get_real_seconds();
+ time64_t newdl;
int rc;
/* deadline is when the client expects us to reply, margin is the
/* Fake our processing time into the future to ask the clients
* for some extra amount of time */
at_measured(&svcpt->scp_at_estimate, at_extra +
- get_seconds() -
+ ktime_get_real_seconds() -
req->rq_arrival_time.tv_sec);
/* Check to see if we've actually increased the deadline -
* we may be past adaptive_max */
if (req->rq_deadline >= req->rq_arrival_time.tv_sec +
at_get(&svcpt->scp_at_estimate)) {
- DEBUG_REQ(D_WARNING, req, "Couldn't add any time (%ld/%ld), not sending early reply\n",
+ DEBUG_REQ(D_WARNING, req, "Couldn't add any time (%ld/%lld), not sending early reply\n",
olddl, req->rq_arrival_time.tv_sec +
at_get(&svcpt->scp_at_estimate) -
- get_seconds());
+ ktime_get_real_seconds());
return -ETIMEDOUT;
}
}
- newdl = get_seconds() + at_get(&svcpt->scp_at_estimate);
+ newdl = ktime_get_real_seconds() + at_get(&svcpt->scp_at_estimate);
reqcopy = ptlrpc_request_cache_alloc(GFP_NOFS);
if (reqcopy == NULL)
struct ptlrpc_request *rq, *n;
struct list_head work_list;
__u32 index, count;
- time_t deadline;
- time_t now = get_seconds();
+ time64_t deadline;
+ time64_t now = ktime_get_real_seconds();
long delay;
int first, counter = 0;
server will take. Send early replies to everyone expiring soon. */
INIT_LIST_HEAD(&work_list);
deadline = -1;
- index = (unsigned long)array->paa_deadline % array->paa_size;
+ div_u64_rem(array->paa_deadline, array->paa_size, &index);
count = array->paa_count;
while (count > 0) {
count -= array->paa_reqs_count[index];
chance to send early replies */
LCONSOLE_WARN("%s: This server is not able to keep up with request traffic (cpu-bound).\n",
svcpt->scp_service->srv_name);
- CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, delay=" CFS_DURATION_T "(jiff)\n",
+ CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, delay=%ld(jiff)\n",
counter, svcpt->scp_nreqs_incoming,
svcpt->scp_nreqs_active,
at_get(&svcpt->scp_at_estimate), delay);
}
/* req_in handling should/must be fast */
- if (get_seconds() - req->rq_arrival_time.tv_sec > 5)
+ if (ktime_get_real_seconds() - req->rq_arrival_time.tv_sec > 5)
DEBUG_REQ(D_WARNING, req, "Slow req_in handling "CFS_DURATION_T"s",
- cfs_time_sub(get_seconds(),
- req->rq_arrival_time.tv_sec));
+ (long)(ktime_get_real_seconds() -
+ req->rq_arrival_time.tv_sec));
/* Set rpc server deadline and add it to the timed list */
deadline = (lustre_msghdr_get_flags(req->rq_reqmsg) &
{
struct ptlrpc_service *svc = svcpt->scp_service;
struct ptlrpc_request *request;
- struct timeval work_start;
- struct timeval work_end;
- long timediff;
+ struct timespec64 work_start;
+ struct timespec64 work_end;
+ struct timespec64 timediff;
+ struct timespec64 arrived;
+ unsigned long timediff_usecs;
+ unsigned long arrived_usecs;
int rc;
int fail_opc = 0;
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
libcfs_debug_dumplog();
- do_gettimeofday(&work_start);
- timediff = cfs_timeval_sub(&work_start, &request->rq_arrival_time,
- NULL);
+ ktime_get_real_ts64(&work_start);
+ timediff = timespec64_sub(work_start, request->rq_arrival_time);
+ timediff_usecs = timediff.tv_sec * USEC_PER_SEC +
+ timediff.tv_nsec / NSEC_PER_USEC;
if (likely(svc->srv_stats != NULL)) {
lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
- timediff);
+ timediff_usecs);
lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR,
svcpt->scp_nreqs_incoming);
lprocfs_counter_add(svc->srv_stats, PTLRPC_REQACTIVE_CNTR,
if (likely(request->rq_export)) {
if (unlikely(ptlrpc_check_req(request)))
goto put_conn;
- ptlrpc_update_export_timer(request->rq_export, timediff >> 19);
+ ptlrpc_update_export_timer(request->rq_export,
+ timediff_usecs >> 19);
}
/* Discard requests queued for longer than the deadline.
The deadline is increased if we send an early reply. */
- if (get_seconds() > request->rq_deadline) {
+ if (ktime_get_real_seconds() > request->rq_deadline) {
DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s: deadline " CFS_DURATION_T ":" CFS_DURATION_T "s ago\n",
libcfs_id2str(request->rq_peer),
- cfs_time_sub(request->rq_deadline,
- request->rq_arrival_time.tv_sec),
- cfs_time_sub(get_seconds(),
- request->rq_deadline));
+ (long)(request->rq_deadline -
+ request->rq_arrival_time.tv_sec),
+ (long)(ktime_get_real_seconds() -
+ request->rq_deadline));
goto put_conn;
}
lu_context_exit(&request->rq_session);
lu_context_fini(&request->rq_session);
- if (unlikely(get_seconds() > request->rq_deadline)) {
+ if (unlikely(ktime_get_real_seconds() > request->rq_deadline)) {
DEBUG_REQ(D_WARNING, request,
- "Request took longer than estimated ("
- CFS_DURATION_T":"CFS_DURATION_T
- "s); client may timeout.",
- cfs_time_sub(request->rq_deadline,
- request->rq_arrival_time.tv_sec),
- cfs_time_sub(get_seconds(),
- request->rq_deadline));
- }
-
- do_gettimeofday(&work_end);
- timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
+ "Request took longer than estimated (%lld:%llds); "
+ "client may timeout.",
+ (s64)request->rq_deadline -
+ request->rq_arrival_time.tv_sec,
+ (s64)ktime_get_real_seconds() - request->rq_deadline);
+ }
+
+ ktime_get_real_ts64(&work_end);
+ timediff = timespec64_sub(work_end, work_start);
+ timediff_usecs = timediff.tv_sec * USEC_PER_SEC +
+ timediff.tv_nsec / NSEC_PER_USEC;
+ arrived = timespec64_sub(work_end, request->rq_arrival_time);
+ arrived_usecs = arrived.tv_sec * USEC_PER_SEC +
+ arrived.tv_nsec / NSEC_PER_USEC;
CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc %s:%s+%d:%d:x%llu:%s:%d Request processed in %ldus (%ldus total) trans %llu rc %d/%d\n",
current_comm(),
(request->rq_export ?
request->rq_xid,
libcfs_id2str(request->rq_peer),
lustre_msg_get_opc(request->rq_reqmsg),
- timediff,
- cfs_timeval_sub(&work_end, &request->rq_arrival_time, NULL),
+ timediff_usecs,
+ arrived_usecs,
(request->rq_repmsg ?
lustre_msg_get_transno(request->rq_repmsg) :
request->rq_transno),
LASSERT(opc < LUSTRE_MAX_OPCODES);
lprocfs_counter_add(svc->srv_stats,
opc + EXTRA_MAX_OPCODES,
- timediff);
+ timediff_usecs);
}
}
if (unlikely(request->rq_early_count)) {
DEBUG_REQ(D_ADAPTTO, request,
- "sent %d early replies before finishing in "
- CFS_DURATION_T"s",
+ "sent %d early replies before finishing in %llds",
request->rq_early_count,
- cfs_time_sub(work_end.tv_sec,
- request->rq_arrival_time.tv_sec));
+ (s64)work_end.tv_sec -
+ request->rq_arrival_time.tv_sec);
}
out_req:
static int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt)
{
struct ptlrpc_request *request = NULL;
- struct timeval right_now;
- long timediff;
+ struct timespec64 right_now;
+ struct timespec64 timediff;
- do_gettimeofday(&right_now);
+ ktime_get_real_ts64(&right_now);
spin_lock(&svcpt->scp_req_lock);
/* How long has the next entry been waiting? */
return 0;
}
- timediff = cfs_timeval_sub(&right_now, &request->rq_arrival_time, NULL);
+ timediff = timespec64_sub(right_now, request->rq_arrival_time);
spin_unlock(&svcpt->scp_req_lock);
- if ((timediff / ONE_MILLION) >
+ if ((timediff.tv_sec) >
(AT_OFF ? obd_timeout * 3 / 2 : at_max)) {
- CERROR("%s: unhealthy - request has been waiting %lds\n",
- svcpt->scp_service->srv_name, timediff / ONE_MILLION);
+ CERROR("%s: unhealthy - request has been waiting %llds\n",
+ svcpt->scp_service->srv_name, (s64)timediff.tv_sec);
return -1;
}