Cleanup in general headers.
* use size_t in cfs_size_round*()
* make unsigned index and len in lustre_cfg_*()
* make iteration variable the same type as comparing value
* make unsigned pages counters
Signed-off-by: Dmitry Eremin <dmitry.eremin@intel.com>
Intel-bug-id: https://jira.hpdd.intel.com/browse/LU-5417
Reviewed-on: http://review.whamcloud.com/11327
Reviewed-by: John L. Hammond <john.hammond@intel.com>
Reviewed-by: Fan Yong <fan.yong@intel.com>
Reviewed-by: Oleg Drokin <oleg.drokin@intel.com>
Signed-off-by: James Simmons <jsimmons@infradead.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
#define MKSTR(ptr) ((ptr)) ? (ptr) : ""
-static inline int cfs_size_round4(int val)
+static inline size_t cfs_size_round4(int val)
{
return (val + 3) & (~0x3);
}
#ifndef HAVE_CFS_SIZE_ROUND
-static inline int cfs_size_round(int val)
+static inline size_t cfs_size_round(int val)
{
return (val + 7) & (~0x7);
}
#define HAVE_CFS_SIZE_ROUND
#endif
-static inline int cfs_size_round16(int val)
+static inline size_t cfs_size_round16(int val)
{
return (val + 0xf) & (~0xf);
}
-static inline int cfs_size_round32(int val)
+static inline size_t cfs_size_round32(int val)
{
return (val + 0x1f) & (~0x1f);
}
-static inline int cfs_size_round0(int val)
+static inline size_t cfs_size_round0(int val)
{
if (!val)
return 0;
static inline size_t cfs_round_strlen(char *fset)
{
- return (size_t)cfs_size_round((int)strlen(fset) + 1);
+ return cfs_size_round((int)strlen(fset) + 1);
}
#define LOGL(var, len, ptr) \
int idx,
enum lprocfs_fields_flags field)
{
- int i;
+ unsigned int i;
unsigned int num_cpu;
unsigned long flags = 0;
__u64 ret = 0;
lustre_cfg_bufs_set_string(bufs, 0, name);
}
-static inline void *lustre_cfg_buf(struct lustre_cfg *lcfg, int index)
+static inline void *lustre_cfg_buf(struct lustre_cfg *lcfg, __u32 index)
{
- int i;
- int offset;
- int bufcount;
-
- LASSERT(index >= 0);
+ __u32 i;
+ size_t offset;
+ __u32 bufcount;
bufcount = lcfg->lcfg_bufcount;
if (index >= bufcount)
static inline void lustre_cfg_bufs_init(struct lustre_cfg_bufs *bufs,
struct lustre_cfg *lcfg)
{
- int i;
+ __u32 i;
bufs->lcfg_bufcount = lcfg->lcfg_bufcount;
for (i = 0; i < bufs->lcfg_bufcount; i++) {
}
}
-static inline char *lustre_cfg_string(struct lustre_cfg *lcfg, int index)
+static inline char *lustre_cfg_string(struct lustre_cfg *lcfg, __u32 index)
{
char *s;
* of data. Try to use the padding first though.
*/
if (s[lcfg->lcfg_buflens[index] - 1] != '\0') {
- int last = min((int)lcfg->lcfg_buflens[index],
- cfs_size_round(lcfg->lcfg_buflens[index]) - 1);
+ size_t last = min((size_t)lcfg->lcfg_buflens[index],
+ cfs_size_round(lcfg->lcfg_buflens[index]) - 1);
char lost = s[last];
s[last] = '\0';
return s;
}
-static inline int lustre_cfg_len(__u32 bufcount, __u32 *buflens)
+static inline __u32 lustre_cfg_len(__u32 bufcount, __u32 *buflens)
{
- int i;
- int len;
+ __u32 i;
+ __u32 len;
len = LCFG_HDR_SIZE(bufcount);
for (i = 0; i < bufcount; i++)
return;
}
-static inline int lustre_cfg_sanity_check(void *buf, int len)
+static inline int lustre_cfg_sanity_check(void *buf, size_t len)
{
struct lustre_cfg *lcfg = (struct lustre_cfg *)buf;
static inline bool
lsm_md_eq(const struct lmv_stripe_md *lsm1, const struct lmv_stripe_md *lsm2)
{
- int idx;
+ __u32 idx;
if (lsm1->lsm_md_magic != lsm2->lsm_md_magic ||
lsm1->lsm_md_stripe_count != lsm2->lsm_md_stripe_count ||
static inline void lmv1_le_to_cpu(struct lmv_mds_md_v1 *lmv_dst,
const struct lmv_mds_md_v1 *lmv_src)
{
- int i;
+ __u32 i;
lmv_dst->lmv_magic = le32_to_cpu(lmv_src->lmv_magic);
lmv_dst->lmv_stripe_count = le32_to_cpu(lmv_src->lmv_stripe_count);
/**
* Returns 1 if request buffer at offset \a index was already swabbed
*/
-static inline int lustre_req_swabbed(struct ptlrpc_request *req, int index)
+static inline int lustre_req_swabbed(struct ptlrpc_request *req, size_t index)
{
LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
return req->rq_req_swab_mask & (1 << index);
/**
* Returns 1 if request reply buffer at offset \a index was already swabbed
*/
-static inline int lustre_rep_swabbed(struct ptlrpc_request *req, int index)
+static inline int lustre_rep_swabbed(struct ptlrpc_request *req, size_t index)
{
LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
return req->rq_rep_swab_mask & (1 << index);
/**
* Mark request buffer at offset \a index that it was already swabbed
*/
-static inline void lustre_set_req_swabbed(struct ptlrpc_request *req, int index)
+static inline void lustre_set_req_swabbed(struct ptlrpc_request *req,
+ size_t index)
{
LASSERT(index < sizeof(req->rq_req_swab_mask) * 8);
LASSERT((req->rq_req_swab_mask & (1 << index)) == 0);
/**
* Mark request reply buffer at offset \a index that it was already swabbed
*/
-static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req, int index)
+static inline void lustre_set_rep_swabbed(struct ptlrpc_request *req,
+ size_t index)
{
LASSERT(index < sizeof(req->rq_rep_swab_mask) * 8);
LASSERT((req->rq_rep_swab_mask & (1 << index)) == 0);
struct sptlrpc_flavor cl_flvr_mgc; /* fixed flavor of mgc->mgs */
/* the grant values are protected by loi_list_lock below */
- long cl_dirty_pages; /* all _dirty_ in pahges */
- long cl_dirty_max_pages;/* allowed w/o rpc */
- long cl_dirty_transit; /* dirty synchronous */
- long cl_avail_grant; /* bytes of credit for ost */
- long cl_lost_grant; /* lost credits (trunc) */
+ unsigned long cl_dirty_pages; /* all _dirty_ in pahges */
+ unsigned long cl_dirty_max_pages; /* allowed w/o rpc */
+ unsigned long cl_dirty_transit; /* dirty synchronous */
+ unsigned long cl_avail_grant; /* bytes of credit for ost */
+ unsigned long cl_lost_grant; /* lost credits (trunc) */
/* since we allocate grant by blocks, we don't know how many grant will
* be used to add a page into cache. As a solution, we reserve maximum
struct list_head cl_loi_hp_ready_list;
struct list_head cl_loi_write_list;
struct list_head cl_loi_read_list;
- int cl_r_in_flight;
- int cl_w_in_flight;
+ __u32 cl_r_in_flight;
+ __u32 cl_w_in_flight;
/* just a sum of the loi/lop pending numbers to be exported by sysfs */
atomic_t cl_pending_w_pages;
atomic_t cl_pending_r_pages;
__u32 cl_max_pages_per_rpc;
- int cl_max_rpcs_in_flight;
+ __u32 cl_max_rpcs_in_flight;
struct obd_histogram cl_read_rpc_hist;
struct obd_histogram cl_write_rpc_hist;
struct obd_histogram cl_read_page_hist;
cli->cl_dirty_max_pages =
(OSC_MAX_DIRTY_DEFAULT * 1024 * 1024) >> PAGE_SHIFT;
else {
- long dirty_max = cli->cl_max_rpcs_in_flight *
- cli->cl_max_pages_per_rpc;
+ unsigned long dirty_max = cli->cl_max_rpcs_in_flight *
+ cli->cl_max_pages_per_rpc;
if (dirty_max > cli->cl_dirty_max_pages)
cli->cl_dirty_max_pages = dirty_max;
cli->cl_dirty_pages, cli->cl_dirty_max_pages);
oa->o_undirty = 0;
} else {
- long max_in_flight = (cli->cl_max_pages_per_rpc <<
- PAGE_SHIFT) *
- (cli->cl_max_rpcs_in_flight + 1);
+ unsigned long max_in_flight;
+
+ max_in_flight = (cli->cl_max_pages_per_rpc << PAGE_SHIFT) *
+ (cli->cl_max_rpcs_in_flight + 1);
oa->o_undirty = max(cli->cl_dirty_max_pages << PAGE_SHIFT,
max_in_flight);
}
}
spin_unlock(&cli->cl_loi_list_lock);
- DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight",
+ DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %ur/%dw in flight",
page_count, aa, cli->cl_r_in_flight,
cli->cl_w_in_flight);
void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *entry)
{
- int i;
+ __u8 i;
__swab64s(&entry->mne_version);
__swab32s(&entry->mne_instance);
void lustre_swab_fiemap(struct ll_user_fiemap *fiemap)
{
- int i;
+ __u32 i;
__swab64s(&fiemap->fm_start);
__swab64s(&fiemap->fm_length);