4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/obdclass/lu_object.c
39 * These are the only exported functions, they provide some generic
40 * infrastructure for managing object devices
42 * Author: Nikita Danilov <nikita.danilov@sun.com>
45 #define DEBUG_SUBSYSTEM S_CLASS
47 #include <linux/libcfs/libcfs.h>
49 # include <linux/module.h>
52 #include <linux/libcfs/libcfs_hash.h>
53 #include <obd_class.h>
54 #include <obd_support.h>
55 #include <lustre_disk.h>
56 #include <lustre_fid.h>
57 #include <lu_object.h>
59 #include <linux/list.h>
61 static void lu_object_free(const struct lu_env *env, struct lu_object *o);
64 * Decrease reference counter on object. If last reference is freed, return
65 * object to the cache, unless lu_object_is_dying(o) holds. In the latter
66 * case, free object immediately.
68 void lu_object_put(const struct lu_env *env, struct lu_object *o)
70 struct lu_site_bkt_data *bkt;
71 struct lu_object_header *top;
73 struct lu_object *orig;
75 const struct lu_fid *fid;
78 site = o->lo_dev->ld_site;
82 * till we have full fids-on-OST implemented anonymous objects
83 * are possible in OSP. such an object isn't listed in the site
84 * so we should not remove it from the site.
86 fid = lu_object_fid(o);
87 if (fid_is_zero(fid)) {
88 LASSERT(top->loh_hash.next == NULL
89 && top->loh_hash.pprev == NULL);
90 LASSERT(list_empty(&top->loh_lru));
91 if (!atomic_dec_and_test(&top->loh_ref))
93 list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
94 if (o->lo_ops->loo_object_release != NULL)
95 o->lo_ops->loo_object_release(env, o);
97 lu_object_free(env, orig);
101 cfs_hash_bd_get(site->ls_obj_hash, &top->loh_fid, &bd);
102 bkt = cfs_hash_bd_extra_get(site->ls_obj_hash, &bd);
104 if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
105 if (lu_object_is_dying(top)) {
108 * somebody may be waiting for this, currently only
109 * used for cl_object, see cl_object_put_last().
111 wake_up_all(&bkt->lsb_marche_funebre);
116 LASSERT(bkt->lsb_busy > 0);
119 * When last reference is released, iterate over object
120 * layers, and notify them that object is no longer busy.
122 list_for_each_entry_reverse(o, &top->loh_layers, lo_linkage) {
123 if (o->lo_ops->loo_object_release != NULL)
124 o->lo_ops->loo_object_release(env, o);
127 if (!lu_object_is_dying(top)) {
128 LASSERT(list_empty(&top->loh_lru));
129 list_add_tail(&top->loh_lru, &bkt->lsb_lru);
130 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
135 * If object is dying (will not be cached), removed it
136 * from hash table and LRU.
138 * This is done with hash table and LRU lists locked. As the only
139 * way to acquire first reference to previously unreferenced
140 * object is through hash-table lookup (lu_object_find()),
141 * or LRU scanning (lu_site_purge()), that are done under hash-table
142 * and LRU lock, no race with concurrent object lookup is possible
143 * and we can safely destroy object below.
145 if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags))
146 cfs_hash_bd_del_locked(site->ls_obj_hash, &bd, &top->loh_hash);
147 cfs_hash_bd_unlock(site->ls_obj_hash, &bd, 1);
149 * Object was already removed from hash and lru above, can
152 lu_object_free(env, orig);
154 EXPORT_SYMBOL(lu_object_put);
157 * Put object and don't keep in cache. This is temporary solution for
158 * multi-site objects when its layering is not constant.
160 void lu_object_put_nocache(const struct lu_env *env, struct lu_object *o)
162 set_bit(LU_OBJECT_HEARD_BANSHEE, &o->lo_header->loh_flags);
163 return lu_object_put(env, o);
165 EXPORT_SYMBOL(lu_object_put_nocache);
168 * Kill the object and take it out of LRU cache.
169 * Currently used by client code for layout change.
171 void lu_object_unhash(const struct lu_env *env, struct lu_object *o)
173 struct lu_object_header *top;
176 set_bit(LU_OBJECT_HEARD_BANSHEE, &top->loh_flags);
177 if (!test_and_set_bit(LU_OBJECT_UNHASHED, &top->loh_flags)) {
178 cfs_hash_t *obj_hash = o->lo_dev->ld_site->ls_obj_hash;
181 cfs_hash_bd_get_and_lock(obj_hash, &top->loh_fid, &bd, 1);
182 list_del_init(&top->loh_lru);
183 cfs_hash_bd_del_locked(obj_hash, &bd, &top->loh_hash);
184 cfs_hash_bd_unlock(obj_hash, &bd, 1);
187 EXPORT_SYMBOL(lu_object_unhash);
190 * Allocate new object.
192 * This follows object creation protocol, described in the comment within
193 * struct lu_device_operations definition.
195 static struct lu_object *lu_object_alloc(const struct lu_env *env,
196 struct lu_device *dev,
197 const struct lu_fid *f,
198 const struct lu_object_conf *conf)
200 struct lu_object *scan;
201 struct lu_object *top;
202 struct list_head *layers;
207 * Create top-level object slice. This will also create
210 top = dev->ld_ops->ldo_object_alloc(env, NULL, dev);
212 return ERR_PTR(-ENOMEM);
216 * This is the only place where object fid is assigned. It's constant
219 top->lo_header->loh_fid = *f;
220 layers = &top->lo_header->loh_layers;
223 * Call ->loo_object_init() repeatedly, until no more new
224 * object slices are created.
227 list_for_each_entry(scan, layers, lo_linkage) {
228 if (scan->lo_flags & LU_OBJECT_ALLOCATED)
231 scan->lo_header = top->lo_header;
232 result = scan->lo_ops->loo_object_init(env, scan, conf);
234 lu_object_free(env, top);
235 return ERR_PTR(result);
237 scan->lo_flags |= LU_OBJECT_ALLOCATED;
241 list_for_each_entry_reverse(scan, layers, lo_linkage) {
242 if (scan->lo_ops->loo_object_start != NULL) {
243 result = scan->lo_ops->loo_object_start(env, scan);
245 lu_object_free(env, top);
246 return ERR_PTR(result);
251 lprocfs_counter_incr(dev->ld_site->ls_stats, LU_SS_CREATED);
258 static void lu_object_free(const struct lu_env *env, struct lu_object *o)
260 struct lu_site_bkt_data *bkt;
261 struct lu_site *site;
262 struct lu_object *scan;
263 struct list_head *layers;
264 struct list_head splice;
266 site = o->lo_dev->ld_site;
267 layers = &o->lo_header->loh_layers;
268 bkt = lu_site_bkt_from_fid(site, &o->lo_header->loh_fid);
270 * First call ->loo_object_delete() method to release all resources.
272 list_for_each_entry_reverse(scan, layers, lo_linkage) {
273 if (scan->lo_ops->loo_object_delete != NULL)
274 scan->lo_ops->loo_object_delete(env, scan);
278 * Then, splice object layers into stand-alone list, and call
279 * ->loo_object_free() on all layers to free memory. Splice is
280 * necessary, because lu_object_header is freed together with the
283 INIT_LIST_HEAD(&splice);
284 list_splice_init(layers, &splice);
285 while (!list_empty(&splice)) {
287 * Free layers in bottom-to-top order, so that object header
288 * lives as long as possible and ->loo_object_free() methods
289 * can look at its contents.
291 o = container_of0(splice.prev, struct lu_object, lo_linkage);
292 list_del_init(&o->lo_linkage);
293 LASSERT(o->lo_ops->loo_object_free != NULL);
294 o->lo_ops->loo_object_free(env, o);
297 if (waitqueue_active(&bkt->lsb_marche_funebre))
298 wake_up_all(&bkt->lsb_marche_funebre);
302 * Free \a nr objects from the cold end of the site LRU list.
304 int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
306 struct lu_object_header *h;
307 struct lu_object_header *temp;
308 struct lu_site_bkt_data *bkt;
311 struct list_head dispose;
318 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_NO_LRU))
321 INIT_LIST_HEAD(&dispose);
323 * Under LRU list lock, scan LRU list and move unreferenced objects to
324 * the dispose list, removing them from LRU and hash table.
326 start = s->ls_purge_start;
327 bnr = (nr == ~0) ? -1 : nr / CFS_HASH_NBKT(s->ls_obj_hash) + 1;
330 cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
334 cfs_hash_bd_lock(s->ls_obj_hash, &bd, 1);
335 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
337 list_for_each_entry_safe(h, temp, &bkt->lsb_lru, loh_lru) {
338 LASSERT(atomic_read(&h->loh_ref) == 0);
340 cfs_hash_bd_get(s->ls_obj_hash, &h->loh_fid, &bd2);
341 LASSERT(bd.bd_bucket == bd2.bd_bucket);
343 cfs_hash_bd_del_locked(s->ls_obj_hash,
345 list_move(&h->loh_lru, &dispose);
349 if (nr != ~0 && --nr == 0)
352 if (count > 0 && --count == 0)
356 cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
359 * Free everything on the dispose list. This is safe against
360 * races due to the reasons described in lu_object_put().
362 while (!list_empty(&dispose)) {
363 h = container_of0(dispose.next,
364 struct lu_object_header, loh_lru);
365 list_del_init(&h->loh_lru);
366 lu_object_free(env, lu_object_top(h));
367 lprocfs_counter_incr(s->ls_stats, LU_SS_LRU_PURGED);
374 if (nr != 0 && did_sth && start != 0) {
375 start = 0; /* restart from the first bucket */
378 /* race on s->ls_purge_start, but nobody cares */
379 s->ls_purge_start = i % CFS_HASH_NBKT(s->ls_obj_hash);
383 EXPORT_SYMBOL(lu_site_purge);
388 * Code below has to jump through certain loops to output object description
389 * into libcfs_debug_msg-based log. The problem is that lu_object_print()
390 * composes object description from strings that are parts of _lines_ of
391 * output (i.e., strings that are not terminated by newline). This doesn't fit
392 * very well into libcfs_debug_msg() interface that assumes that each message
393 * supplied to it is a self-contained output line.
395 * To work around this, strings are collected in a temporary buffer
396 * (implemented as a value of lu_cdebug_key key), until terminating newline
397 * character is detected.
405 * XXX overflow is not handled correctly.
410 struct lu_cdebug_data {
414 char lck_area[LU_CDEBUG_LINE];
417 /* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
418 LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
421 * Key, holding temporary buffer. This key is registered very early by
424 struct lu_context_key lu_global_key = {
425 .lct_tags = LCT_MD_THREAD | LCT_DT_THREAD |
426 LCT_MG_THREAD | LCT_CL_THREAD,
427 .lct_init = lu_global_key_init,
428 .lct_fini = lu_global_key_fini
432 * Printer function emitting messages through libcfs_debug_msg().
434 int lu_cdebug_printer(const struct lu_env *env,
435 void *cookie, const char *format, ...)
437 struct libcfs_debug_msg_data *msgdata = cookie;
438 struct lu_cdebug_data *key;
443 va_start(args, format);
445 key = lu_context_key_get(&env->le_ctx, &lu_global_key);
446 LASSERT(key != NULL);
448 used = strlen(key->lck_area);
449 complete = format[strlen(format) - 1] == '\n';
451 * Append new chunk to the buffer.
453 vsnprintf(key->lck_area + used,
454 ARRAY_SIZE(key->lck_area) - used, format, args);
456 if (cfs_cdebug_show(msgdata->msg_mask, msgdata->msg_subsys))
457 libcfs_debug_msg(msgdata, "%s", key->lck_area);
458 key->lck_area[0] = 0;
463 EXPORT_SYMBOL(lu_cdebug_printer);
466 * Print object header.
468 void lu_object_header_print(const struct lu_env *env, void *cookie,
469 lu_printer_t printer,
470 const struct lu_object_header *hdr)
472 (*printer)(env, cookie, "header@%p[%#lx, %d, "DFID"%s%s%s]",
473 hdr, hdr->loh_flags, atomic_read(&hdr->loh_ref),
475 hlist_unhashed(&hdr->loh_hash) ? "" : " hash",
476 list_empty((struct list_head *)&hdr->loh_lru) ? \
478 hdr->loh_attr & LOHA_EXISTS ? " exist":"");
480 EXPORT_SYMBOL(lu_object_header_print);
483 * Print human readable representation of the \a o to the \a printer.
485 void lu_object_print(const struct lu_env *env, void *cookie,
486 lu_printer_t printer, const struct lu_object *o)
488 static const char ruler[] = "........................................";
489 struct lu_object_header *top;
493 lu_object_header_print(env, cookie, printer, top);
494 (*printer)(env, cookie, "{ \n");
495 list_for_each_entry(o, &top->loh_layers, lo_linkage) {
496 depth = o->lo_depth + 4;
499 * print `.' \a depth times followed by type name and address
501 (*printer)(env, cookie, "%*.*s%s@%p", depth, depth, ruler,
502 o->lo_dev->ld_type->ldt_name, o);
503 if (o->lo_ops->loo_object_print != NULL)
504 o->lo_ops->loo_object_print(env, cookie, printer, o);
505 (*printer)(env, cookie, "\n");
507 (*printer)(env, cookie, "} header@%p\n", top);
509 EXPORT_SYMBOL(lu_object_print);
512 * Check object consistency.
514 int lu_object_invariant(const struct lu_object *o)
516 struct lu_object_header *top;
519 list_for_each_entry(o, &top->loh_layers, lo_linkage) {
520 if (o->lo_ops->loo_object_invariant != NULL &&
521 !o->lo_ops->loo_object_invariant(o))
526 EXPORT_SYMBOL(lu_object_invariant);
528 static struct lu_object *htable_lookup(struct lu_site *s,
530 const struct lu_fid *f,
531 wait_queue_t *waiter,
534 struct lu_site_bkt_data *bkt;
535 struct lu_object_header *h;
536 struct hlist_node *hnode;
537 __u64 ver = cfs_hash_bd_version_get(bd);
540 return ERR_PTR(-ENOENT);
543 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, bd);
544 /* cfs_hash_bd_peek_locked is a somehow "internal" function
545 * of cfs_hash, it doesn't add refcount on object. */
546 hnode = cfs_hash_bd_peek_locked(s->ls_obj_hash, bd, (void *)f);
548 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_MISS);
549 return ERR_PTR(-ENOENT);
552 h = container_of0(hnode, struct lu_object_header, loh_hash);
553 if (likely(!lu_object_is_dying(h))) {
554 cfs_hash_get(s->ls_obj_hash, hnode);
555 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_HIT);
556 list_del_init(&h->loh_lru);
557 return lu_object_top(h);
561 * Lookup found an object being destroyed this object cannot be
562 * returned (to assure that references to dying objects are eventually
563 * drained), and moreover, lookup has to wait until object is freed.
566 init_waitqueue_entry_current(waiter);
567 add_wait_queue(&bkt->lsb_marche_funebre, waiter);
568 set_current_state(TASK_UNINTERRUPTIBLE);
569 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_DEATH_RACE);
570 return ERR_PTR(-EAGAIN);
574 * Search cache for an object with the fid \a f. If such object is found,
575 * return it. Otherwise, create new object, insert it into cache and return
576 * it. In any case, additional reference is acquired on the returned object.
578 struct lu_object *lu_object_find(const struct lu_env *env,
579 struct lu_device *dev, const struct lu_fid *f,
580 const struct lu_object_conf *conf)
582 return lu_object_find_at(env, dev->ld_site->ls_top_dev, f, conf);
584 EXPORT_SYMBOL(lu_object_find);
586 static struct lu_object *lu_object_new(const struct lu_env *env,
587 struct lu_device *dev,
588 const struct lu_fid *f,
589 const struct lu_object_conf *conf)
594 struct lu_site_bkt_data *bkt;
596 o = lu_object_alloc(env, dev, f, conf);
597 if (unlikely(IS_ERR(o)))
600 hs = dev->ld_site->ls_obj_hash;
601 cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
602 bkt = cfs_hash_bd_extra_get(hs, &bd);
603 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
605 cfs_hash_bd_unlock(hs, &bd, 1);
610 * Core logic of lu_object_find*() functions.
612 static struct lu_object *lu_object_find_try(const struct lu_env *env,
613 struct lu_device *dev,
614 const struct lu_fid *f,
615 const struct lu_object_conf *conf,
616 wait_queue_t *waiter)
619 struct lu_object *shadow;
626 * This uses standard index maintenance protocol:
628 * - search index under lock, and return object if found;
629 * - otherwise, unlock index, allocate new object;
630 * - lock index and search again;
631 * - if nothing is found (usual case), insert newly created
633 * - otherwise (race: other thread inserted object), free
634 * object just allocated.
638 * For "LOC_F_NEW" case, we are sure the object is new established.
639 * It is unnecessary to perform lookup-alloc-lookup-insert, instead,
640 * just alloc and insert directly.
642 * If dying object is found during index search, add @waiter to the
643 * site wait-queue and return ERR_PTR(-EAGAIN).
645 if (conf != NULL && conf->loc_flags & LOC_F_NEW)
646 return lu_object_new(env, dev, f, conf);
650 cfs_hash_bd_get_and_lock(hs, (void *)f, &bd, 1);
651 o = htable_lookup(s, &bd, f, waiter, &version);
652 cfs_hash_bd_unlock(hs, &bd, 1);
653 if (!IS_ERR(o) || PTR_ERR(o) != -ENOENT)
657 * Allocate new object. This may result in rather complicated
658 * operations, including fld queries, inode loading, etc.
660 o = lu_object_alloc(env, dev, f, conf);
661 if (unlikely(IS_ERR(o)))
664 LASSERT(lu_fid_eq(lu_object_fid(o), f));
666 cfs_hash_bd_lock(hs, &bd, 1);
668 shadow = htable_lookup(s, &bd, f, waiter, &version);
669 if (likely(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT)) {
670 struct lu_site_bkt_data *bkt;
672 bkt = cfs_hash_bd_extra_get(hs, &bd);
673 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
675 cfs_hash_bd_unlock(hs, &bd, 1);
679 lprocfs_counter_incr(s->ls_stats, LU_SS_CACHE_RACE);
680 cfs_hash_bd_unlock(hs, &bd, 1);
681 lu_object_free(env, o);
686 * Much like lu_object_find(), but top level device of object is specifically
687 * \a dev rather than top level device of the site. This interface allows
688 * objects of different "stacking" to be created within the same site.
690 struct lu_object *lu_object_find_at(const struct lu_env *env,
691 struct lu_device *dev,
692 const struct lu_fid *f,
693 const struct lu_object_conf *conf)
695 struct lu_site_bkt_data *bkt;
696 struct lu_object *obj;
700 obj = lu_object_find_try(env, dev, f, conf, &wait);
701 if (obj != ERR_PTR(-EAGAIN))
704 * lu_object_find_try() already added waiter into the
707 waitq_wait(&wait, TASK_UNINTERRUPTIBLE);
708 bkt = lu_site_bkt_from_fid(dev->ld_site, (void *)f);
709 remove_wait_queue(&bkt->lsb_marche_funebre, &wait);
712 EXPORT_SYMBOL(lu_object_find_at);
715 * Find object with given fid, and return its slice belonging to given device.
717 struct lu_object *lu_object_find_slice(const struct lu_env *env,
718 struct lu_device *dev,
719 const struct lu_fid *f,
720 const struct lu_object_conf *conf)
722 struct lu_object *top;
723 struct lu_object *obj;
725 top = lu_object_find(env, dev, f, conf);
727 obj = lu_object_locate(top->lo_header, dev->ld_type);
729 lu_object_put(env, top);
734 EXPORT_SYMBOL(lu_object_find_slice);
737 * Global list of all device types.
739 static LIST_HEAD(lu_device_types);
741 int lu_device_type_init(struct lu_device_type *ldt)
745 INIT_LIST_HEAD(&ldt->ldt_linkage);
746 if (ldt->ldt_ops->ldto_init)
747 result = ldt->ldt_ops->ldto_init(ldt);
749 list_add(&ldt->ldt_linkage, &lu_device_types);
752 EXPORT_SYMBOL(lu_device_type_init);
754 void lu_device_type_fini(struct lu_device_type *ldt)
756 list_del_init(&ldt->ldt_linkage);
757 if (ldt->ldt_ops->ldto_fini)
758 ldt->ldt_ops->ldto_fini(ldt);
760 EXPORT_SYMBOL(lu_device_type_fini);
762 void lu_types_stop(void)
764 struct lu_device_type *ldt;
766 list_for_each_entry(ldt, &lu_device_types, ldt_linkage) {
767 if (ldt->ldt_device_nr == 0 && ldt->ldt_ops->ldto_stop)
768 ldt->ldt_ops->ldto_stop(ldt);
771 EXPORT_SYMBOL(lu_types_stop);
774 * Global list of all sites on this node
776 static LIST_HEAD(lu_sites);
777 static DEFINE_MUTEX(lu_sites_guard);
780 * Global environment used by site shrinker.
782 static struct lu_env lu_shrink_env;
784 struct lu_site_print_arg {
785 struct lu_env *lsp_env;
787 lu_printer_t lsp_printer;
791 lu_site_obj_print(cfs_hash_t *hs, cfs_hash_bd_t *bd,
792 struct hlist_node *hnode, void *data)
794 struct lu_site_print_arg *arg = (struct lu_site_print_arg *)data;
795 struct lu_object_header *h;
797 h = hlist_entry(hnode, struct lu_object_header, loh_hash);
798 if (!list_empty(&h->loh_layers)) {
799 const struct lu_object *o;
801 o = lu_object_top(h);
802 lu_object_print(arg->lsp_env, arg->lsp_cookie,
803 arg->lsp_printer, o);
805 lu_object_header_print(arg->lsp_env, arg->lsp_cookie,
806 arg->lsp_printer, h);
812 * Print all objects in \a s.
814 void lu_site_print(const struct lu_env *env, struct lu_site *s, void *cookie,
815 lu_printer_t printer)
817 struct lu_site_print_arg arg = {
818 .lsp_env = (struct lu_env *)env,
819 .lsp_cookie = cookie,
820 .lsp_printer = printer,
823 cfs_hash_for_each(s->ls_obj_hash, lu_site_obj_print, &arg);
825 EXPORT_SYMBOL(lu_site_print);
828 LU_CACHE_PERCENT_MAX = 50,
829 LU_CACHE_PERCENT_DEFAULT = 20
832 static unsigned int lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
833 CFS_MODULE_PARM(lu_cache_percent, "i", int, 0644,
834 "Percentage of memory to be used as lu_object cache");
837 * Return desired hash table order.
839 static int lu_htable_order(void)
841 unsigned long cache_size;
845 * Calculate hash table size, assuming that we want reasonable
846 * performance when 20% of total memory is occupied by cache of
849 * Size of lu_object is (arbitrary) taken as 1K (together with inode).
851 cache_size = totalram_pages;
853 #if BITS_PER_LONG == 32
854 /* limit hashtable size for lowmem systems to low RAM */
855 if (cache_size > 1 << (30 - PAGE_CACHE_SHIFT))
856 cache_size = 1 << (30 - PAGE_CACHE_SHIFT) * 3 / 4;
859 /* clear off unreasonable cache setting. */
860 if (lu_cache_percent == 0 || lu_cache_percent > LU_CACHE_PERCENT_MAX) {
861 CWARN("obdclass: invalid lu_cache_percent: %u, it must be in"
862 " the range of (0, %u]. Will use default value: %u.\n",
863 lu_cache_percent, LU_CACHE_PERCENT_MAX,
864 LU_CACHE_PERCENT_DEFAULT);
866 lu_cache_percent = LU_CACHE_PERCENT_DEFAULT;
868 cache_size = cache_size / 100 * lu_cache_percent *
869 (PAGE_CACHE_SIZE / 1024);
871 for (bits = 1; (1 << bits) < cache_size; ++bits) {
877 static unsigned lu_obj_hop_hash(cfs_hash_t *hs,
878 const void *key, unsigned mask)
880 struct lu_fid *fid = (struct lu_fid *)key;
883 hash = fid_flatten32(fid);
884 hash += (hash >> 4) + (hash << 12); /* mixing oid and seq */
885 hash = cfs_hash_long(hash, hs->hs_bkt_bits);
887 /* give me another random factor */
888 hash -= cfs_hash_long((unsigned long)hs, fid_oid(fid) % 11 + 3);
890 hash <<= hs->hs_cur_bits - hs->hs_bkt_bits;
891 hash |= (fid_seq(fid) + fid_oid(fid)) & (CFS_HASH_NBKT(hs) - 1);
896 static void *lu_obj_hop_object(struct hlist_node *hnode)
898 return hlist_entry(hnode, struct lu_object_header, loh_hash);
901 static void *lu_obj_hop_key(struct hlist_node *hnode)
903 struct lu_object_header *h;
905 h = hlist_entry(hnode, struct lu_object_header, loh_hash);
909 static int lu_obj_hop_keycmp(const void *key, struct hlist_node *hnode)
911 struct lu_object_header *h;
913 h = hlist_entry(hnode, struct lu_object_header, loh_hash);
914 return lu_fid_eq(&h->loh_fid, (struct lu_fid *)key);
917 static void lu_obj_hop_get(cfs_hash_t *hs, struct hlist_node *hnode)
919 struct lu_object_header *h;
921 h = hlist_entry(hnode, struct lu_object_header, loh_hash);
922 if (atomic_add_return(1, &h->loh_ref) == 1) {
923 struct lu_site_bkt_data *bkt;
926 cfs_hash_bd_get(hs, &h->loh_fid, &bd);
927 bkt = cfs_hash_bd_extra_get(hs, &bd);
932 static void lu_obj_hop_put_locked(cfs_hash_t *hs, struct hlist_node *hnode)
934 LBUG(); /* we should never called it */
937 cfs_hash_ops_t lu_site_hash_ops = {
938 .hs_hash = lu_obj_hop_hash,
939 .hs_key = lu_obj_hop_key,
940 .hs_keycmp = lu_obj_hop_keycmp,
941 .hs_object = lu_obj_hop_object,
942 .hs_get = lu_obj_hop_get,
943 .hs_put_locked = lu_obj_hop_put_locked,
946 void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d)
948 spin_lock(&s->ls_ld_lock);
949 if (list_empty(&d->ld_linkage))
950 list_add(&d->ld_linkage, &s->ls_ld_linkage);
951 spin_unlock(&s->ls_ld_lock);
953 EXPORT_SYMBOL(lu_dev_add_linkage);
955 void lu_dev_del_linkage(struct lu_site *s, struct lu_device *d)
957 spin_lock(&s->ls_ld_lock);
958 list_del_init(&d->ld_linkage);
959 spin_unlock(&s->ls_ld_lock);
961 EXPORT_SYMBOL(lu_dev_del_linkage);
964 * Initialize site \a s, with \a d as the top level device.
966 #define LU_SITE_BITS_MIN 12
967 #define LU_SITE_BITS_MAX 24
969 * total 256 buckets, we don't want too many buckets because:
970 * - consume too much memory
971 * - avoid unbalanced LRU list
973 #define LU_SITE_BKT_BITS 8
975 int lu_site_init(struct lu_site *s, struct lu_device *top)
977 struct lu_site_bkt_data *bkt;
983 memset(s, 0, sizeof *s);
984 bits = lu_htable_order();
985 snprintf(name, 16, "lu_site_%s", top->ld_type->ldt_name);
986 for (bits = min(max(LU_SITE_BITS_MIN, bits), LU_SITE_BITS_MAX);
987 bits >= LU_SITE_BITS_MIN; bits--) {
988 s->ls_obj_hash = cfs_hash_create(name, bits, bits,
989 bits - LU_SITE_BKT_BITS,
992 CFS_HASH_SPIN_BKTLOCK |
993 CFS_HASH_NO_ITEMREF |
995 CFS_HASH_ASSERT_EMPTY);
996 if (s->ls_obj_hash != NULL)
1000 if (s->ls_obj_hash == NULL) {
1001 CERROR("failed to create lu_site hash with bits: %d\n", bits);
1005 cfs_hash_for_each_bucket(s->ls_obj_hash, &bd, i) {
1006 bkt = cfs_hash_bd_extra_get(s->ls_obj_hash, &bd);
1007 INIT_LIST_HEAD(&bkt->lsb_lru);
1008 init_waitqueue_head(&bkt->lsb_marche_funebre);
1011 s->ls_stats = lprocfs_alloc_stats(LU_SS_LAST_STAT, 0);
1012 if (s->ls_stats == NULL) {
1013 cfs_hash_putref(s->ls_obj_hash);
1014 s->ls_obj_hash = NULL;
1018 lprocfs_counter_init(s->ls_stats, LU_SS_CREATED,
1019 0, "created", "created");
1020 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_HIT,
1021 0, "cache_hit", "cache_hit");
1022 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_MISS,
1023 0, "cache_miss", "cache_miss");
1024 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_RACE,
1025 0, "cache_race", "cache_race");
1026 lprocfs_counter_init(s->ls_stats, LU_SS_CACHE_DEATH_RACE,
1027 0, "cache_death_race", "cache_death_race");
1028 lprocfs_counter_init(s->ls_stats, LU_SS_LRU_PURGED,
1029 0, "lru_purged", "lru_purged");
1031 INIT_LIST_HEAD(&s->ls_linkage);
1032 s->ls_top_dev = top;
1035 lu_ref_add(&top->ld_reference, "site-top", s);
1037 INIT_LIST_HEAD(&s->ls_ld_linkage);
1038 spin_lock_init(&s->ls_ld_lock);
1040 lu_dev_add_linkage(s, top);
1044 EXPORT_SYMBOL(lu_site_init);
1047 * Finalize \a s and release its resources.
1049 void lu_site_fini(struct lu_site *s)
1051 mutex_lock(&lu_sites_guard);
1052 list_del_init(&s->ls_linkage);
1053 mutex_unlock(&lu_sites_guard);
1055 if (s->ls_obj_hash != NULL) {
1056 cfs_hash_putref(s->ls_obj_hash);
1057 s->ls_obj_hash = NULL;
1060 if (s->ls_top_dev != NULL) {
1061 s->ls_top_dev->ld_site = NULL;
1062 lu_ref_del(&s->ls_top_dev->ld_reference, "site-top", s);
1063 lu_device_put(s->ls_top_dev);
1064 s->ls_top_dev = NULL;
1067 if (s->ls_stats != NULL)
1068 lprocfs_free_stats(&s->ls_stats);
1070 EXPORT_SYMBOL(lu_site_fini);
1073 * Called when initialization of stack for this site is completed.
1075 int lu_site_init_finish(struct lu_site *s)
1078 mutex_lock(&lu_sites_guard);
1079 result = lu_context_refill(&lu_shrink_env.le_ctx);
1081 list_add(&s->ls_linkage, &lu_sites);
1082 mutex_unlock(&lu_sites_guard);
1085 EXPORT_SYMBOL(lu_site_init_finish);
1088 * Acquire additional reference on device \a d
1090 void lu_device_get(struct lu_device *d)
1092 atomic_inc(&d->ld_ref);
1094 EXPORT_SYMBOL(lu_device_get);
1097 * Release reference on device \a d.
1099 void lu_device_put(struct lu_device *d)
1101 LASSERT(atomic_read(&d->ld_ref) > 0);
1102 atomic_dec(&d->ld_ref);
1104 EXPORT_SYMBOL(lu_device_put);
1107 * Initialize device \a d of type \a t.
1109 int lu_device_init(struct lu_device *d, struct lu_device_type *t)
1111 if (t->ldt_device_nr++ == 0 && t->ldt_ops->ldto_start != NULL)
1112 t->ldt_ops->ldto_start(t);
1113 memset(d, 0, sizeof *d);
1114 atomic_set(&d->ld_ref, 0);
1116 lu_ref_init(&d->ld_reference);
1117 INIT_LIST_HEAD(&d->ld_linkage);
1120 EXPORT_SYMBOL(lu_device_init);
1123 * Finalize device \a d.
1125 void lu_device_fini(struct lu_device *d)
1127 struct lu_device_type *t;
1130 if (d->ld_obd != NULL) {
1131 d->ld_obd->obd_lu_dev = NULL;
1135 lu_ref_fini(&d->ld_reference);
1136 LASSERTF(atomic_read(&d->ld_ref) == 0,
1137 "Refcount is %u\n", atomic_read(&d->ld_ref));
1138 LASSERT(t->ldt_device_nr > 0);
1139 if (--t->ldt_device_nr == 0 && t->ldt_ops->ldto_stop != NULL)
1140 t->ldt_ops->ldto_stop(t);
1142 EXPORT_SYMBOL(lu_device_fini);
1145 * Initialize object \a o that is part of compound object \a h and was created
1148 int lu_object_init(struct lu_object *o, struct lu_object_header *h,
1149 struct lu_device *d)
1151 memset(o, 0, sizeof(*o));
1155 lu_ref_add_at(&d->ld_reference, &o->lo_dev_ref, "lu_object", o);
1156 INIT_LIST_HEAD(&o->lo_linkage);
1160 EXPORT_SYMBOL(lu_object_init);
1163 * Finalize object and release its resources.
1165 void lu_object_fini(struct lu_object *o)
1167 struct lu_device *dev = o->lo_dev;
1169 LASSERT(list_empty(&o->lo_linkage));
1172 lu_ref_del_at(&dev->ld_reference, &o->lo_dev_ref,
1178 EXPORT_SYMBOL(lu_object_fini);
1181 * Add object \a o as first layer of compound object \a h
1183 * This is typically called by the ->ldo_object_alloc() method of top-level
1186 void lu_object_add_top(struct lu_object_header *h, struct lu_object *o)
1188 list_move(&o->lo_linkage, &h->loh_layers);
1190 EXPORT_SYMBOL(lu_object_add_top);
1193 * Add object \a o as a layer of compound object, going after \a before.
1195 * This is typically called by the ->ldo_object_alloc() method of \a
1198 void lu_object_add(struct lu_object *before, struct lu_object *o)
1200 list_move(&o->lo_linkage, &before->lo_linkage);
1202 EXPORT_SYMBOL(lu_object_add);
1205 * Initialize compound object.
1207 int lu_object_header_init(struct lu_object_header *h)
1209 memset(h, 0, sizeof *h);
1210 atomic_set(&h->loh_ref, 1);
1211 INIT_HLIST_NODE(&h->loh_hash);
1212 INIT_LIST_HEAD(&h->loh_lru);
1213 INIT_LIST_HEAD(&h->loh_layers);
1214 lu_ref_init(&h->loh_reference);
1217 EXPORT_SYMBOL(lu_object_header_init);
1220 * Finalize compound object.
1222 void lu_object_header_fini(struct lu_object_header *h)
1224 LASSERT(list_empty(&h->loh_layers));
1225 LASSERT(list_empty(&h->loh_lru));
1226 LASSERT(hlist_unhashed(&h->loh_hash));
1227 lu_ref_fini(&h->loh_reference);
1229 EXPORT_SYMBOL(lu_object_header_fini);
1232 * Given a compound object, find its slice, corresponding to the device type
1235 struct lu_object *lu_object_locate(struct lu_object_header *h,
1236 const struct lu_device_type *dtype)
1238 struct lu_object *o;
1240 list_for_each_entry(o, &h->loh_layers, lo_linkage) {
1241 if (o->lo_dev->ld_type == dtype)
1246 EXPORT_SYMBOL(lu_object_locate);
1251 * Finalize and free devices in the device stack.
1253 * Finalize device stack by purging object cache, and calling
1254 * lu_device_type_operations::ldto_device_fini() and
1255 * lu_device_type_operations::ldto_device_free() on all devices in the stack.
1257 void lu_stack_fini(const struct lu_env *env, struct lu_device *top)
1259 struct lu_site *site = top->ld_site;
1260 struct lu_device *scan;
1261 struct lu_device *next;
1263 lu_site_purge(env, site, ~0);
1264 for (scan = top; scan != NULL; scan = next) {
1265 next = scan->ld_type->ldt_ops->ldto_device_fini(env, scan);
1266 lu_ref_del(&scan->ld_reference, "lu-stack", &lu_site_init);
1267 lu_device_put(scan);
1271 lu_site_purge(env, site, ~0);
1273 for (scan = top; scan != NULL; scan = next) {
1274 const struct lu_device_type *ldt = scan->ld_type;
1275 struct obd_type *type;
1277 next = ldt->ldt_ops->ldto_device_free(env, scan);
1278 type = ldt->ldt_obd_type;
1281 class_put_type(type);
1285 EXPORT_SYMBOL(lu_stack_fini);
1289 * Maximal number of tld slots.
1291 LU_CONTEXT_KEY_NR = 40
1294 static struct lu_context_key *lu_keys[LU_CONTEXT_KEY_NR] = { NULL, };
1296 static DEFINE_SPINLOCK(lu_keys_guard);
1299 * Global counter incremented whenever key is registered, unregistered,
1300 * revived or quiesced. This is used to void unnecessary calls to
1301 * lu_context_refill(). No locking is provided, as initialization and shutdown
1302 * are supposed to be externally serialized.
1304 static unsigned key_set_version = 0;
1309 int lu_context_key_register(struct lu_context_key *key)
1314 LASSERT(key->lct_init != NULL);
1315 LASSERT(key->lct_fini != NULL);
1316 LASSERT(key->lct_tags != 0);
1319 spin_lock(&lu_keys_guard);
1320 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1321 if (lu_keys[i] == NULL) {
1323 atomic_set(&key->lct_used, 1);
1325 lu_ref_init(&key->lct_reference);
1331 spin_unlock(&lu_keys_guard);
1334 EXPORT_SYMBOL(lu_context_key_register);
1336 static void key_fini(struct lu_context *ctx, int index)
1338 if (ctx->lc_value != NULL && ctx->lc_value[index] != NULL) {
1339 struct lu_context_key *key;
1341 key = lu_keys[index];
1342 LASSERT(key != NULL);
1343 LASSERT(key->lct_fini != NULL);
1344 LASSERT(atomic_read(&key->lct_used) > 1);
1346 key->lct_fini(ctx, key, ctx->lc_value[index]);
1347 lu_ref_del(&key->lct_reference, "ctx", ctx);
1348 atomic_dec(&key->lct_used);
1350 if ((ctx->lc_tags & LCT_NOREF) == 0) {
1351 #ifdef CONFIG_MODULE_UNLOAD
1352 LINVRNT(module_refcount(key->lct_owner) > 0);
1354 module_put(key->lct_owner);
1356 ctx->lc_value[index] = NULL;
1363 void lu_context_key_degister(struct lu_context_key *key)
1365 LASSERT(atomic_read(&key->lct_used) >= 1);
1366 LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1368 lu_context_key_quiesce(key);
1371 spin_lock(&lu_keys_guard);
1372 key_fini(&lu_shrink_env.le_ctx, key->lct_index);
1373 if (lu_keys[key->lct_index]) {
1374 lu_keys[key->lct_index] = NULL;
1375 lu_ref_fini(&key->lct_reference);
1377 spin_unlock(&lu_keys_guard);
1379 LASSERTF(atomic_read(&key->lct_used) == 1,
1380 "key has instances: %d\n",
1381 atomic_read(&key->lct_used));
1383 EXPORT_SYMBOL(lu_context_key_degister);
1386 * Register a number of keys. This has to be called after all keys have been
1387 * initialized by a call to LU_CONTEXT_KEY_INIT().
1389 int lu_context_key_register_many(struct lu_context_key *k, ...)
1391 struct lu_context_key *key = k;
1397 result = lu_context_key_register(key);
1400 key = va_arg(args, struct lu_context_key *);
1401 } while (key != NULL);
1407 lu_context_key_degister(k);
1408 k = va_arg(args, struct lu_context_key *);
1415 EXPORT_SYMBOL(lu_context_key_register_many);
1418 * De-register a number of keys. This is a dual to
1419 * lu_context_key_register_many().
1421 void lu_context_key_degister_many(struct lu_context_key *k, ...)
1427 lu_context_key_degister(k);
1428 k = va_arg(args, struct lu_context_key*);
1429 } while (k != NULL);
1432 EXPORT_SYMBOL(lu_context_key_degister_many);
1435 * Revive a number of keys.
1437 void lu_context_key_revive_many(struct lu_context_key *k, ...)
1443 lu_context_key_revive(k);
1444 k = va_arg(args, struct lu_context_key*);
1445 } while (k != NULL);
1448 EXPORT_SYMBOL(lu_context_key_revive_many);
1451 * Quiescent a number of keys.
1453 void lu_context_key_quiesce_many(struct lu_context_key *k, ...)
1459 lu_context_key_quiesce(k);
1460 k = va_arg(args, struct lu_context_key*);
1461 } while (k != NULL);
1464 EXPORT_SYMBOL(lu_context_key_quiesce_many);
1467 * Return value associated with key \a key in context \a ctx.
1469 void *lu_context_key_get(const struct lu_context *ctx,
1470 const struct lu_context_key *key)
1472 LINVRNT(ctx->lc_state == LCS_ENTERED);
1473 LINVRNT(0 <= key->lct_index && key->lct_index < ARRAY_SIZE(lu_keys));
1474 LASSERT(lu_keys[key->lct_index] == key);
1475 return ctx->lc_value[key->lct_index];
1477 EXPORT_SYMBOL(lu_context_key_get);
1480 * List of remembered contexts. XXX document me.
1482 static LIST_HEAD(lu_context_remembered);
1485 * Destroy \a key in all remembered contexts. This is used to destroy key
1486 * values in "shared" contexts (like service threads), when a module owning
1487 * the key is about to be unloaded.
1489 void lu_context_key_quiesce(struct lu_context_key *key)
1491 struct lu_context *ctx;
1493 if (!(key->lct_tags & LCT_QUIESCENT)) {
1495 * XXX layering violation.
1497 key->lct_tags |= LCT_QUIESCENT;
1499 * XXX memory barrier has to go here.
1501 spin_lock(&lu_keys_guard);
1502 list_for_each_entry(ctx, &lu_context_remembered,
1504 key_fini(ctx, key->lct_index);
1505 spin_unlock(&lu_keys_guard);
1509 EXPORT_SYMBOL(lu_context_key_quiesce);
1511 void lu_context_key_revive(struct lu_context_key *key)
1513 key->lct_tags &= ~LCT_QUIESCENT;
1516 EXPORT_SYMBOL(lu_context_key_revive);
1518 static void keys_fini(struct lu_context *ctx)
1522 if (ctx->lc_value == NULL)
1525 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i)
1528 OBD_FREE(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1529 ctx->lc_value = NULL;
1532 static int keys_fill(struct lu_context *ctx)
1536 LINVRNT(ctx->lc_value != NULL);
1537 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1538 struct lu_context_key *key;
1541 if (ctx->lc_value[i] == NULL && key != NULL &&
1542 (key->lct_tags & ctx->lc_tags) &&
1544 * Don't create values for a LCT_QUIESCENT key, as this
1545 * will pin module owning a key.
1547 !(key->lct_tags & LCT_QUIESCENT)) {
1550 LINVRNT(key->lct_init != NULL);
1551 LINVRNT(key->lct_index == i);
1553 value = key->lct_init(ctx, key);
1554 if (unlikely(IS_ERR(value)))
1555 return PTR_ERR(value);
1557 if (!(ctx->lc_tags & LCT_NOREF))
1558 try_module_get(key->lct_owner);
1559 lu_ref_add_atomic(&key->lct_reference, "ctx", ctx);
1560 atomic_inc(&key->lct_used);
1562 * This is the only place in the code, where an
1563 * element of ctx->lc_value[] array is set to non-NULL
1566 ctx->lc_value[i] = value;
1567 if (key->lct_exit != NULL)
1568 ctx->lc_tags |= LCT_HAS_EXIT;
1570 ctx->lc_version = key_set_version;
1575 static int keys_init(struct lu_context *ctx)
1577 OBD_ALLOC(ctx->lc_value, ARRAY_SIZE(lu_keys) * sizeof ctx->lc_value[0]);
1578 if (likely(ctx->lc_value != NULL))
1579 return keys_fill(ctx);
1585 * Initialize context data-structure. Create values for all keys.
1587 int lu_context_init(struct lu_context *ctx, __u32 tags)
1591 memset(ctx, 0, sizeof *ctx);
1592 ctx->lc_state = LCS_INITIALIZED;
1593 ctx->lc_tags = tags;
1594 if (tags & LCT_REMEMBER) {
1595 spin_lock(&lu_keys_guard);
1596 list_add(&ctx->lc_remember, &lu_context_remembered);
1597 spin_unlock(&lu_keys_guard);
1599 INIT_LIST_HEAD(&ctx->lc_remember);
1602 rc = keys_init(ctx);
1604 lu_context_fini(ctx);
1608 EXPORT_SYMBOL(lu_context_init);
1611 * Finalize context data-structure. Destroy key values.
1613 void lu_context_fini(struct lu_context *ctx)
1615 LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1616 ctx->lc_state = LCS_FINALIZED;
1618 if ((ctx->lc_tags & LCT_REMEMBER) == 0) {
1619 LASSERT(list_empty(&ctx->lc_remember));
1622 } else { /* could race with key degister */
1623 spin_lock(&lu_keys_guard);
1625 list_del_init(&ctx->lc_remember);
1626 spin_unlock(&lu_keys_guard);
1629 EXPORT_SYMBOL(lu_context_fini);
1632 * Called before entering context.
1634 void lu_context_enter(struct lu_context *ctx)
1636 LINVRNT(ctx->lc_state == LCS_INITIALIZED || ctx->lc_state == LCS_LEFT);
1637 ctx->lc_state = LCS_ENTERED;
1639 EXPORT_SYMBOL(lu_context_enter);
1642 * Called after exiting from \a ctx
1644 void lu_context_exit(struct lu_context *ctx)
1648 LINVRNT(ctx->lc_state == LCS_ENTERED);
1649 ctx->lc_state = LCS_LEFT;
1650 if (ctx->lc_tags & LCT_HAS_EXIT && ctx->lc_value != NULL) {
1651 for (i = 0; i < ARRAY_SIZE(lu_keys); ++i) {
1652 if (ctx->lc_value[i] != NULL) {
1653 struct lu_context_key *key;
1656 LASSERT(key != NULL);
1657 if (key->lct_exit != NULL)
1659 key, ctx->lc_value[i]);
1664 EXPORT_SYMBOL(lu_context_exit);
1667 * Allocate for context all missing keys that were registered after context
1668 * creation. key_set_version is only changed in rare cases when modules
1669 * are loaded and removed.
1671 int lu_context_refill(struct lu_context *ctx)
1673 return likely(ctx->lc_version == key_set_version) ? 0 : keys_fill(ctx);
1675 EXPORT_SYMBOL(lu_context_refill);
1678 * lu_ctx_tags/lu_ses_tags will be updated if there are new types of
1679 * obd being added. Currently, this is only used on client side, specifically
1680 * for echo device client, for other stack (like ptlrpc threads), context are
1681 * predefined when the lu_device type are registered, during the module probe
1684 __u32 lu_context_tags_default = 0;
1685 __u32 lu_session_tags_default = 0;
1687 void lu_context_tags_update(__u32 tags)
1689 spin_lock(&lu_keys_guard);
1690 lu_context_tags_default |= tags;
1692 spin_unlock(&lu_keys_guard);
1694 EXPORT_SYMBOL(lu_context_tags_update);
1696 void lu_context_tags_clear(__u32 tags)
1698 spin_lock(&lu_keys_guard);
1699 lu_context_tags_default &= ~tags;
1701 spin_unlock(&lu_keys_guard);
1703 EXPORT_SYMBOL(lu_context_tags_clear);
1705 void lu_session_tags_update(__u32 tags)
1707 spin_lock(&lu_keys_guard);
1708 lu_session_tags_default |= tags;
1710 spin_unlock(&lu_keys_guard);
1712 EXPORT_SYMBOL(lu_session_tags_update);
1714 void lu_session_tags_clear(__u32 tags)
1716 spin_lock(&lu_keys_guard);
1717 lu_session_tags_default &= ~tags;
1719 spin_unlock(&lu_keys_guard);
1721 EXPORT_SYMBOL(lu_session_tags_clear);
1723 int lu_env_init(struct lu_env *env, __u32 tags)
1728 result = lu_context_init(&env->le_ctx, tags);
1729 if (likely(result == 0))
1730 lu_context_enter(&env->le_ctx);
1733 EXPORT_SYMBOL(lu_env_init);
1735 void lu_env_fini(struct lu_env *env)
1737 lu_context_exit(&env->le_ctx);
1738 lu_context_fini(&env->le_ctx);
1741 EXPORT_SYMBOL(lu_env_fini);
1743 int lu_env_refill(struct lu_env *env)
1747 result = lu_context_refill(&env->le_ctx);
1748 if (result == 0 && env->le_ses != NULL)
1749 result = lu_context_refill(env->le_ses);
1752 EXPORT_SYMBOL(lu_env_refill);
1755 * Currently, this API will only be used by echo client.
1756 * Because echo client and normal lustre client will share
1757 * same cl_env cache. So echo client needs to refresh
1758 * the env context after it get one from the cache, especially
1759 * when normal client and echo client co-exist in the same client.
1761 int lu_env_refill_by_tags(struct lu_env *env, __u32 ctags,
1766 if ((env->le_ctx.lc_tags & ctags) != ctags) {
1767 env->le_ctx.lc_version = 0;
1768 env->le_ctx.lc_tags |= ctags;
1771 if (env->le_ses && (env->le_ses->lc_tags & stags) != stags) {
1772 env->le_ses->lc_version = 0;
1773 env->le_ses->lc_tags |= stags;
1776 result = lu_env_refill(env);
1780 EXPORT_SYMBOL(lu_env_refill_by_tags);
1783 typedef struct lu_site_stats{
1784 unsigned lss_populated;
1785 unsigned lss_max_search;
1790 static void lu_site_stats_get(cfs_hash_t *hs,
1791 lu_site_stats_t *stats, int populated)
1796 cfs_hash_for_each_bucket(hs, &bd, i) {
1797 struct lu_site_bkt_data *bkt = cfs_hash_bd_extra_get(hs, &bd);
1798 struct hlist_head *hhead;
1800 cfs_hash_bd_lock(hs, &bd, 1);
1801 stats->lss_busy += bkt->lsb_busy;
1802 stats->lss_total += cfs_hash_bd_count_get(&bd);
1803 stats->lss_max_search = max((int)stats->lss_max_search,
1804 cfs_hash_bd_depmax_get(&bd));
1806 cfs_hash_bd_unlock(hs, &bd, 1);
1810 cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
1811 if (!hlist_empty(hhead))
1812 stats->lss_populated++;
1814 cfs_hash_bd_unlock(hs, &bd, 1);
1820 * There exists a potential lock inversion deadlock scenario when using
1821 * Lustre on top of ZFS. This occurs between one of ZFS's
1822 * buf_hash_table.ht_lock's, and Lustre's lu_sites_guard lock. Essentially,
1823 * thread A will take the lu_sites_guard lock and sleep on the ht_lock,
1824 * while thread B will take the ht_lock and sleep on the lu_sites_guard
1825 * lock. Obviously neither thread will wake and drop their respective hold
1828 * To prevent this from happening we must ensure the lu_sites_guard lock is
1829 * not taken while down this code path. ZFS reliably does not set the
1830 * __GFP_FS bit in its code paths, so this can be used to determine if it
1831 * is safe to take the lu_sites_guard lock.
1833 * Ideally we should accurately return the remaining number of cached
1834 * objects without taking the lu_sites_guard lock, but this is not
1835 * possible in the current implementation.
1837 static unsigned long lu_cache_shrink_count(struct shrinker *sk,
1838 struct shrink_control *sc)
1840 lu_site_stats_t stats;
1842 struct lu_site *tmp;
1843 unsigned long cached = 0;
1845 if (!(sc->gfp_mask & __GFP_FS))
1848 mutex_lock(&lu_sites_guard);
1849 list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
1850 memset(&stats, 0, sizeof(stats));
1851 lu_site_stats_get(s->ls_obj_hash, &stats, 0);
1852 cached += stats.lss_total - stats.lss_busy;
1854 mutex_unlock(&lu_sites_guard);
1856 cached = (cached / 100) * sysctl_vfs_cache_pressure;
1857 CDEBUG(D_INODE, "%ld objects cached\n", cached);
1861 static unsigned long lu_cache_shrink_scan(struct shrinker *sk,
1862 struct shrink_control *sc)
1865 struct lu_site *tmp;
1866 unsigned long remain = sc->nr_to_scan, freed = 0;
1869 if (!(sc->gfp_mask & __GFP_FS))
1870 /* We must not take the lu_sites_guard lock when
1871 * __GFP_FS is *not* set because of the deadlock
1872 * possibility detailed above. Additionally,
1873 * since we cannot determine the number of
1874 * objects in the cache without taking this
1875 * lock, we're in a particularly tough spot. As
1876 * a result, we'll just lie and say our cache is
1877 * empty. This _should_ be ok, as we can't
1878 * reclaim objects when __GFP_FS is *not* set
1883 mutex_lock(&lu_sites_guard);
1884 list_for_each_entry_safe(s, tmp, &lu_sites, ls_linkage) {
1885 freed = lu_site_purge(&lu_shrink_env, s, remain);
1888 * Move just shrunk site to the tail of site list to
1889 * assure shrinking fairness.
1891 list_move_tail(&s->ls_linkage, &splice);
1893 list_splice(&splice, lu_sites.prev);
1894 mutex_unlock(&lu_sites_guard);
1896 return sc->nr_to_scan - remain;
1904 * Environment to be used in debugger, contains all tags.
1906 struct lu_env lu_debugging_env;
1909 * Debugging printer function using printk().
1911 int lu_printk_printer(const struct lu_env *env,
1912 void *unused, const char *format, ...)
1916 va_start(args, format);
1917 vprintk(format, args);
1922 static struct shrinker lu_site_shrinker = {
1923 .count_objects = lu_cache_shrink_count,
1924 .scan_objects = lu_cache_shrink_scan,
1925 .seeks = DEFAULT_SEEKS,
1929 * Initialization of global lu_* data.
1931 int lu_global_init(void)
1935 CDEBUG(D_INFO, "Lustre LU module (%p).\n", &lu_keys);
1937 result = lu_ref_global_init();
1941 LU_CONTEXT_KEY_INIT(&lu_global_key);
1942 result = lu_context_key_register(&lu_global_key);
1947 * At this level, we don't know what tags are needed, so allocate them
1948 * conservatively. This should not be too bad, because this
1949 * environment is global.
1951 mutex_lock(&lu_sites_guard);
1952 result = lu_env_init(&lu_shrink_env, LCT_SHRINKER);
1953 mutex_unlock(&lu_sites_guard);
1958 * seeks estimation: 3 seeks to read a record from oi, one to read
1959 * inode, one for ea. Unfortunately setting this high value results in
1960 * lu_object/inode cache consuming all the memory.
1962 register_shrinker(&lu_site_shrinker);
1968 * Dual to lu_global_init().
1970 void lu_global_fini(void)
1972 unregister_shrinker(&lu_site_shrinker);
1973 lu_context_key_degister(&lu_global_key);
1976 * Tear shrinker environment down _after_ de-registering
1977 * lu_global_key, because the latter has a value in the former.
1979 mutex_lock(&lu_sites_guard);
1980 lu_env_fini(&lu_shrink_env);
1981 mutex_unlock(&lu_sites_guard);
1983 lu_ref_global_fini();
1986 static __u32 ls_stats_read(struct lprocfs_stats *stats, int idx)
1989 struct lprocfs_counter ret;
1991 lprocfs_stats_collect(stats, idx, &ret);
1992 return (__u32)ret.lc_count;
1999 * Output site statistical counters into a buffer. Suitable for
2000 * lprocfs_rd_*()-style functions.
2002 int lu_site_stats_print(const struct lu_site *s, struct seq_file *m)
2004 lu_site_stats_t stats;
2006 memset(&stats, 0, sizeof(stats));
2007 lu_site_stats_get(s->ls_obj_hash, &stats, 1);
2009 return seq_printf(m, "%d/%d %d/%d %d %d %d %d %d %d %d\n",
2012 stats.lss_populated,
2013 CFS_HASH_NHLIST(s->ls_obj_hash),
2014 stats.lss_max_search,
2015 ls_stats_read(s->ls_stats, LU_SS_CREATED),
2016 ls_stats_read(s->ls_stats, LU_SS_CACHE_HIT),
2017 ls_stats_read(s->ls_stats, LU_SS_CACHE_MISS),
2018 ls_stats_read(s->ls_stats, LU_SS_CACHE_RACE),
2019 ls_stats_read(s->ls_stats, LU_SS_CACHE_DEATH_RACE),
2020 ls_stats_read(s->ls_stats, LU_SS_LRU_PURGED));
2022 EXPORT_SYMBOL(lu_site_stats_print);
2025 * Helper function to initialize a number of kmem slab caches at once.
2027 int lu_kmem_init(struct lu_kmem_descr *caches)
2030 struct lu_kmem_descr *iter = caches;
2032 for (result = 0; iter->ckd_cache != NULL; ++iter) {
2033 *iter->ckd_cache = kmem_cache_create(iter->ckd_name,
2036 if (*iter->ckd_cache == NULL) {
2038 /* free all previously allocated caches */
2039 lu_kmem_fini(caches);
2045 EXPORT_SYMBOL(lu_kmem_init);
2048 * Helper function to finalize a number of kmem slab cached at once. Dual to
2051 void lu_kmem_fini(struct lu_kmem_descr *caches)
2053 for (; caches->ckd_cache != NULL; ++caches) {
2054 if (*caches->ckd_cache != NULL) {
2055 kmem_cache_destroy(*caches->ckd_cache);
2056 *caches->ckd_cache = NULL;
2060 EXPORT_SYMBOL(lu_kmem_fini);
2063 * Temporary solution to be able to assign fid in ->do_create()
2064 * till we have fully-functional OST fids
2066 void lu_object_assign_fid(const struct lu_env *env, struct lu_object *o,
2067 const struct lu_fid *fid)
2069 struct lu_site *s = o->lo_dev->ld_site;
2070 struct lu_fid *old = &o->lo_header->loh_fid;
2071 struct lu_site_bkt_data *bkt;
2072 struct lu_object *shadow;
2073 wait_queue_t waiter;
2078 LASSERT(fid_is_zero(old));
2080 hs = s->ls_obj_hash;
2081 cfs_hash_bd_get_and_lock(hs, (void *)fid, &bd, 1);
2082 shadow = htable_lookup(s, &bd, fid, &waiter, &version);
2083 /* supposed to be unique */
2084 LASSERT(IS_ERR(shadow) && PTR_ERR(shadow) == -ENOENT);
2086 bkt = cfs_hash_bd_extra_get(hs, &bd);
2087 cfs_hash_bd_add_locked(hs, &bd, &o->lo_header->loh_hash);
2089 cfs_hash_bd_unlock(hs, &bd, 1);
2091 EXPORT_SYMBOL(lu_object_assign_fid);
2094 * allocates object with 0 (non-assiged) fid
2095 * XXX: temporary solution to be able to assign fid in ->do_create()
2096 * till we have fully-functional OST fids
2098 struct lu_object *lu_object_anon(const struct lu_env *env,
2099 struct lu_device *dev,
2100 const struct lu_object_conf *conf)
2103 struct lu_object *o;
2106 o = lu_object_alloc(env, dev, &fid, conf);
2110 EXPORT_SYMBOL(lu_object_anon);
2112 struct lu_buf LU_BUF_NULL = {
2116 EXPORT_SYMBOL(LU_BUF_NULL);
2118 void lu_buf_free(struct lu_buf *buf)
2122 LASSERT(buf->lb_len > 0);
2123 OBD_FREE_LARGE(buf->lb_buf, buf->lb_len);
2128 EXPORT_SYMBOL(lu_buf_free);
2130 void lu_buf_alloc(struct lu_buf *buf, int size)
2133 LASSERT(buf->lb_buf == NULL);
2134 LASSERT(buf->lb_len == 0);
2135 OBD_ALLOC_LARGE(buf->lb_buf, size);
2136 if (likely(buf->lb_buf))
2139 EXPORT_SYMBOL(lu_buf_alloc);
2141 void lu_buf_realloc(struct lu_buf *buf, int size)
2144 lu_buf_alloc(buf, size);
2146 EXPORT_SYMBOL(lu_buf_realloc);
2148 struct lu_buf *lu_buf_check_and_alloc(struct lu_buf *buf, int len)
2150 if (buf->lb_buf == NULL && buf->lb_len == 0)
2151 lu_buf_alloc(buf, len);
2153 if ((len > buf->lb_len) && (buf->lb_buf != NULL))
2154 lu_buf_realloc(buf, len);
2158 EXPORT_SYMBOL(lu_buf_check_and_alloc);
2161 * Increase the size of the \a buf.
2162 * preserves old data in buffer
2163 * old buffer remains unchanged on error
2164 * \retval 0 or -ENOMEM
2166 int lu_buf_check_and_grow(struct lu_buf *buf, int len)
2170 if (len <= buf->lb_len)
2173 OBD_ALLOC_LARGE(ptr, len);
2177 /* Free the old buf */
2178 if (buf->lb_buf != NULL) {
2179 memcpy(ptr, buf->lb_buf, buf->lb_len);
2180 OBD_FREE_LARGE(buf->lb_buf, buf->lb_len);
2187 EXPORT_SYMBOL(lu_buf_check_and_grow);