From: Masanari Iida Date: Fri, 13 Dec 2013 17:24:04 +0000 (+0900) Subject: staging: lustre: Fix typo in lustre/lustre/osc X-Git-Url: https://git.karo-electronics.de/?a=commitdiff_plain;h=11d66e89264024ca52f6128d38650c93dbda5c78;p=linux-beck.git staging: lustre: Fix typo in lustre/lustre/osc Correct spelling typo in lustre/lustre/osc Signed-off-by: Masanari Iida Signed-off-by: Greg Kroah-Hartman --- diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c index 00295da4ab3d..be4511e78c04 100644 --- a/drivers/staging/lustre/lustre/osc/osc_cache.c +++ b/drivers/staging/lustre/lustre/osc/osc_cache.c @@ -1703,7 +1703,7 @@ static int osc_list_maint(struct client_obd *cli, struct osc_object *osc) return is_ready; } -/* this is trying to propogate async writeback errors back up to the +/* this is trying to propagate async writeback errors back up to the * application. As an async write fails we record the error code for later if * the app does an fsync. As long as errors persist we force future rpcs to be * sync so that the app can get a sync error and break the cycle of queueing @@ -2006,7 +2006,7 @@ static struct osc_object *osc_next_obj(struct client_obd *cli) /* then if we have cache waiters, return all objects with queued * writes. This is especially important when many small files * have filled up the cache and not been fired into rpcs because - * they don't pass the nr_pending/object threshhold */ + * they don't pass the nr_pending/object threshold */ if (!list_empty(&cli->cl_cache_waiters) && !list_empty(&cli->cl_loi_write_list)) return list_to_obj(&cli->cl_loi_write_list, write_item); @@ -2226,7 +2226,7 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io, /* Add this page into extent by the following steps: * 1. if there exists an active extent for this IO, mostly this page * can be added to the active extent and sometimes we need to - * expand extent to accomodate this page; + * expand extent to accommodate this page; * 2. otherwise, a new extent will be allocated. */ ext = oio->oi_active; diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h index a3aa9b6596ef..9e7899fa4cc4 100644 --- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h +++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h @@ -299,7 +299,7 @@ struct osc_lock { ols_flush:1, /** * if set, the osc_lock is a glimpse lock. For glimpse locks, we treat - * the EVAVAIL error as torerable, this will make upper logic happy + * the EVAVAIL error as tolerable, this will make upper logic happy * to wait all glimpse locks to each OSTs to be completed. * Glimpse lock converts to normal lock if the server lock is * granted. diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c index c90abfbb1d7a..ef7b9c2b208e 100644 --- a/drivers/staging/lustre/lustre/osc/osc_lock.c +++ b/drivers/staging/lustre/lustre/osc/osc_lock.c @@ -929,7 +929,7 @@ static void osc_lock_build_einfo(const struct lu_env *env, * Determine if the lock should be converted into a lockless lock. * * Steps to check: - * - if the lock has an explicite requirment for a non-lockless lock; + * - if the lock has an explicit requirement for a non-lockless lock; * - if the io lock request type ci_lockreq; * - send the enqueue rpc to ost to make the further decision; * - special treat to truncate lockless lock diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c index 6c20b8ecfb82..4909e486dc5c 100644 --- a/drivers/staging/lustre/lustre/osc/osc_page.c +++ b/drivers/staging/lustre/lustre/osc/osc_page.c @@ -587,7 +587,7 @@ static atomic_t osc_lru_waiters = ATOMIC_INIT(0); /* LRU pages are freed in batch mode. OSC should at least free this * number of pages to avoid running out of LRU budget, and.. */ static const int lru_shrink_min = 2 << (20 - PAGE_CACHE_SHIFT); /* 2M */ -/* free this number at most otherwise it will take too long time to finsih. */ +/* free this number at most otherwise it will take too long time to finish. */ static const int lru_shrink_max = 32 << (20 - PAGE_CACHE_SHIFT); /* 32M */ /* Check if we can free LRU slots from this OSC. If there exists LRU waiters, @@ -606,7 +606,7 @@ static int osc_cache_too_much(struct client_obd *cli) return min(pages, lru_shrink_max); /* if it's going to run out LRU slots, we should free some, but not - * too much to maintain faireness among OSCs. */ + * too much to maintain fairness among OSCs. */ if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) { unsigned long tmp; diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c index c837a0d71e0f..c0d9ac6b1bb6 100644 --- a/drivers/staging/lustre/lustre/osc/osc_request.c +++ b/drivers/staging/lustre/lustre/osc/osc_request.c @@ -773,7 +773,7 @@ static int osc_destroy(const struct lu_env *env, struct obd_export *exp, osc_pack_capa(req, body, (struct obd_capa *)capa); ptlrpc_request_set_replen(req); - /* If osc_destory is for destroying the unlink orphan, + /* If osc_destroy is for destroying the unlink orphan, * sent from MDT to OST, which should not be blocked here, * because the process might be triggered by ptlrpcd, and * it is not good to block ptlrpcd thread (b=16006)*/