2 * Ceph - scalable distributed file system
4 * Copyright (C) 2015 Intel Corporation All Rights Reserved
6 * This is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License version 2.1, as published by the Free Software
9 * Foundation. See file COPYING.
14 # include <linux/string.h>
15 # include <linux/slab.h>
16 # include <linux/bug.h>
17 # include <linux/kernel.h>
18 # include <linux/crush/crush.h>
19 # include <linux/crush/hash.h>
20 # include <linux/crush/mapper.h>
22 # include "crush_compat.h"
27 #include "crush_ln_table.h"
29 #define dprintk(args...) /* printf(args) */
32 * Implement the core CRUSH mapping algorithm.
36 * crush_find_rule - find a crush_rule id for a given ruleset, type, and size.
38 * @ruleset: the storage ruleset id (user defined)
39 * @type: storage ruleset type (user defined)
40 * @size: output set size
42 int crush_find_rule(const struct crush_map *map, int ruleset, int type, int size)
46 for (i = 0; i < map->max_rules; i++) {
48 map->rules[i]->mask.ruleset == ruleset &&
49 map->rules[i]->mask.type == type &&
50 map->rules[i]->mask.min_size <= size &&
51 map->rules[i]->mask.max_size >= size)
59 * bucket choose methods
61 * For each bucket algorithm, we have a "choose" method that, given a
62 * crush input @x and replica position (usually, position in output set) @r,
63 * will produce an item in the bucket.
67 * Choose based on a random permutation of the bucket.
69 * We used to use some prime number arithmetic to do this, but it
70 * wasn't very random, and had some other bad behaviors. Instead, we
71 * calculate an actual random permutation of the bucket members.
72 * Since this is expensive, we optimize for the r=0 case, which
73 * captures the vast majority of calls.
75 static int bucket_perm_choose(struct crush_bucket *bucket,
78 unsigned int pr = r % bucket->size;
81 /* start a new permutation if @x has changed */
82 if (bucket->perm_x != (__u32)x || bucket->perm_n == 0) {
83 dprintk("bucket %d new x=%d\n", bucket->id, x);
86 /* optimize common r=0 case */
88 s = crush_hash32_3(bucket->hash, x, bucket->id, 0) %
91 bucket->perm_n = 0xffff; /* magic value, see below */
95 for (i = 0; i < bucket->size; i++)
98 } else if (bucket->perm_n == 0xffff) {
99 /* clean up after the r=0 case above */
100 for (i = 1; i < bucket->size; i++)
102 bucket->perm[bucket->perm[0]] = 0;
106 /* calculate permutation up to pr */
107 for (i = 0; i < bucket->perm_n; i++)
108 dprintk(" perm_choose have %d: %d\n", i, bucket->perm[i]);
109 while (bucket->perm_n <= pr) {
110 unsigned int p = bucket->perm_n;
111 /* no point in swapping the final entry */
112 if (p < bucket->size - 1) {
113 i = crush_hash32_3(bucket->hash, x, bucket->id, p) %
116 unsigned int t = bucket->perm[p + i];
117 bucket->perm[p + i] = bucket->perm[p];
120 dprintk(" perm_choose swap %d with %d\n", p, p+i);
124 for (i = 0; i < bucket->size; i++)
125 dprintk(" perm_choose %d: %d\n", i, bucket->perm[i]);
127 s = bucket->perm[pr];
129 dprintk(" perm_choose %d sz=%d x=%d r=%d (%d) s=%d\n", bucket->id,
130 bucket->size, x, r, pr, s);
131 return bucket->items[s];
135 static int bucket_uniform_choose(struct crush_bucket_uniform *bucket,
138 return bucket_perm_choose(&bucket->h, x, r);
142 static int bucket_list_choose(struct crush_bucket_list *bucket,
147 for (i = bucket->h.size-1; i >= 0; i--) {
148 __u64 w = crush_hash32_4(bucket->h.hash, x, bucket->h.items[i],
151 dprintk("list_choose i=%d x=%d r=%d item %d weight %x "
153 i, x, r, bucket->h.items[i], bucket->item_weights[i],
154 bucket->sum_weights[i], w);
155 w *= bucket->sum_weights[i];
157 /*dprintk(" scaled %llx\n", w);*/
158 if (w < bucket->item_weights[i])
159 return bucket->h.items[i];
162 dprintk("bad list sums for bucket %d\n", bucket->h.id);
163 return bucket->h.items[0];
168 static int height(int n)
171 while ((n & 1) == 0) {
178 static int left(int x)
181 return x - (1 << (h-1));
184 static int right(int x)
187 return x + (1 << (h-1));
190 static int terminal(int x)
195 static int bucket_tree_choose(struct crush_bucket_tree *bucket,
203 n = bucket->num_nodes >> 1;
205 while (!terminal(n)) {
207 /* pick point in [0, w) */
208 w = bucket->node_weights[n];
209 t = (__u64)crush_hash32_4(bucket->h.hash, x, n, r,
210 bucket->h.id) * (__u64)w;
213 /* descend to the left or right? */
215 if (t < bucket->node_weights[l])
221 return bucket->h.items[n >> 1];
227 static int bucket_straw_choose(struct crush_bucket_straw *bucket,
235 for (i = 0; i < bucket->h.size; i++) {
236 draw = crush_hash32_3(bucket->h.hash, x, bucket->h.items[i], r);
238 draw *= bucket->straws[i];
239 if (i == 0 || draw > high_draw) {
244 return bucket->h.items[high];
247 /* compute 2^44*log2(input+1) */
248 static __u64 crush_ln(unsigned int xin)
250 unsigned int x = xin;
251 int iexpon, index1, index2;
252 __u64 RH, LH, LL, xl64, result;
256 /* normalize input */
260 * figure out number of bits we need to shift and
261 * do it in one step instead of iteratively
263 if (!(x & 0x18000)) {
264 int bits = __builtin_clz(x & 0x1FFFF) - 16;
269 index1 = (x >> 8) << 1;
270 /* RH ~ 2^56/index1 */
271 RH = __RH_LH_tbl[index1 - 256];
272 /* LH ~ 2^48 * log2(index1/256) */
273 LH = __RH_LH_tbl[index1 + 1 - 256];
275 /* RH*x ~ 2^48 * (2^15 + xf), xf<2^8 */
276 xl64 = (__s64)x * RH;
280 result <<= (12 + 32);
282 index2 = xl64 & 0xff;
283 /* LL ~ 2^48*log2(1.0+index2/2^15) */
284 LL = __LL_tbl[index2];
288 LH >>= (48 - 12 - 32);
298 * for reference, see:
300 * http://en.wikipedia.org/wiki/Exponential_distribution#Distribution_of_the_minimum_of_exponential_random_variables
304 static int bucket_straw2_choose(struct crush_bucket_straw2 *bucket,
307 unsigned int i, high = 0;
310 __s64 ln, draw, high_draw = 0;
312 for (i = 0; i < bucket->h.size; i++) {
313 w = bucket->item_weights[i];
315 u = crush_hash32_3(bucket->h.hash, x,
316 bucket->h.items[i], r);
320 * for some reason slightly less than 0x10000 produces
321 * a slightly more accurate distribution... probably a
324 * the natural log lookup table maps [0,0xffff]
325 * (corresponding to real numbers [1/0x10000, 1] to
326 * [0, 0xffffffffffff] (corresponding to real numbers
329 ln = crush_ln(u) - 0x1000000000000ll;
332 * divide by 16.16 fixed-point weight. note
333 * that the ln value is negative, so a larger
334 * weight means a larger (less negative) value
337 draw = div64_s64(ln, w);
342 if (i == 0 || draw > high_draw) {
347 return bucket->h.items[high];
351 static int crush_bucket_choose(struct crush_bucket *in, int x, int r)
353 dprintk(" crush_bucket_choose %d x=%d r=%d\n", in->id, x, r);
354 BUG_ON(in->size == 0);
356 case CRUSH_BUCKET_UNIFORM:
357 return bucket_uniform_choose((struct crush_bucket_uniform *)in,
359 case CRUSH_BUCKET_LIST:
360 return bucket_list_choose((struct crush_bucket_list *)in,
362 case CRUSH_BUCKET_TREE:
363 return bucket_tree_choose((struct crush_bucket_tree *)in,
365 case CRUSH_BUCKET_STRAW:
366 return bucket_straw_choose((struct crush_bucket_straw *)in,
368 case CRUSH_BUCKET_STRAW2:
369 return bucket_straw2_choose((struct crush_bucket_straw2 *)in,
372 dprintk("unknown bucket %d alg %d\n", in->id, in->alg);
379 * true if device is marked "out" (failed, fully offloaded)
382 static int is_out(const struct crush_map *map,
383 const __u32 *weight, int weight_max,
386 if (item >= weight_max)
388 if (weight[item] >= 0x10000)
390 if (weight[item] == 0)
392 if ((crush_hash32_2(CRUSH_HASH_RJENKINS1, x, item) & 0xffff)
399 * crush_choose_firstn - choose numrep distinct items of given type
400 * @map: the crush_map
401 * @bucket: the bucket we are choose an item from
402 * @x: crush input value
403 * @numrep: the number of items to choose
404 * @type: the type of item to choose
405 * @out: pointer to output vector
406 * @outpos: our position in that vector
407 * @out_size: size of the out vector
408 * @tries: number of attempts to make
409 * @recurse_tries: number of attempts to have recursive chooseleaf make
410 * @local_retries: localized retries
411 * @local_fallback_retries: localized fallback retries
412 * @recurse_to_leaf: true if we want one device under each item of given type (chooseleaf instead of choose)
413 * @stable: stable mode starts rep=0 in the recursive call for all replicas
414 * @vary_r: pass r to recursive calls
415 * @out2: second output vector for leaf items (if @recurse_to_leaf)
416 * @parent_r: r value passed from the parent
418 static int crush_choose_firstn(const struct crush_map *map,
419 struct crush_bucket *bucket,
420 const __u32 *weight, int weight_max,
421 int x, int numrep, int type,
422 int *out, int outpos,
425 unsigned int recurse_tries,
426 unsigned int local_retries,
427 unsigned int local_fallback_retries,
435 unsigned int ftotal, flocal;
436 int retry_descent, retry_bucket, skip_rep;
437 struct crush_bucket *in = bucket;
443 int count = out_size;
445 dprintk("CHOOSE%s bucket %d x %d outpos %d numrep %d tries %d recurse_tries %d local_retries %d local_fallback_retries %d parent_r %d stable %d\n",
446 recurse_to_leaf ? "_LEAF" : "",
447 bucket->id, x, outpos, numrep,
448 tries, recurse_tries, local_retries, local_fallback_retries,
451 for (rep = stable ? 0 : outpos; rep < numrep && count > 0 ; rep++) {
452 /* keep trying until we get a non-out, non-colliding item */
457 in = bucket; /* initial bucket */
459 /* choose through intervening buckets */
465 /* r' = r + f_total */
473 if (local_fallback_retries > 0 &&
474 flocal >= (in->size>>1) &&
475 flocal > local_fallback_retries)
476 item = bucket_perm_choose(in, x, r);
478 item = crush_bucket_choose(in, x, r);
479 if (item >= map->max_devices) {
480 dprintk(" bad item %d\n", item);
487 itemtype = map->buckets[-1-item]->type;
490 dprintk(" item %d type %d\n", item, itemtype);
493 if (itemtype != type) {
495 (-1-item) >= map->max_buckets) {
496 dprintk(" bad item type %d\n", type);
500 in = map->buckets[-1-item];
506 for (i = 0; i < outpos; i++) {
507 if (out[i] == item) {
514 if (!collide && recurse_to_leaf) {
518 sub_r = r >> (vary_r-1);
521 if (crush_choose_firstn(map,
522 map->buckets[-1-item],
524 x, stable ? 1 : outpos+1, 0,
528 local_fallback_retries,
534 /* didn't get leaf */
537 /* we already have a leaf! */
545 reject = is_out(map, weight,
553 if (reject || collide) {
557 if (collide && flocal <= local_retries)
558 /* retry locally a few times */
560 else if (local_fallback_retries > 0 &&
561 flocal <= in->size + local_fallback_retries)
562 /* exhaustive bucket search */
564 else if (ftotal < tries)
565 /* then retry descent */
570 dprintk(" reject %d collide %d "
571 "ftotal %u flocal %u\n",
572 reject, collide, ftotal,
575 } while (retry_bucket);
576 } while (retry_descent);
579 dprintk("skip rep\n");
583 dprintk("CHOOSE got %d\n", item);
588 if (map->choose_tries && ftotal <= map->choose_total_tries)
589 map->choose_tries[ftotal]++;
593 dprintk("CHOOSE returns %d\n", outpos);
599 * crush_choose_indep: alternative breadth-first positionally stable mapping
602 static void crush_choose_indep(const struct crush_map *map,
603 struct crush_bucket *bucket,
604 const __u32 *weight, int weight_max,
605 int x, int left, int numrep, int type,
606 int *out, int outpos,
608 unsigned int recurse_tries,
613 struct crush_bucket *in = bucket;
614 int endpos = outpos + left;
623 dprintk("CHOOSE%s INDEP bucket %d x %d outpos %d numrep %d\n", recurse_to_leaf ? "_LEAF" : "",
624 bucket->id, x, outpos, numrep);
626 /* initially my result is undefined */
627 for (rep = outpos; rep < endpos; rep++) {
628 out[rep] = CRUSH_ITEM_UNDEF;
630 out2[rep] = CRUSH_ITEM_UNDEF;
633 for (ftotal = 0; left > 0 && ftotal < tries; ftotal++) {
635 if (out2 && ftotal) {
636 dprintk("%u %d a: ", ftotal, left);
637 for (rep = outpos; rep < endpos; rep++) {
638 dprintk(" %d", out[rep]);
641 dprintk("%u %d b: ", ftotal, left);
642 for (rep = outpos; rep < endpos; rep++) {
643 dprintk(" %d", out2[rep]);
648 for (rep = outpos; rep < endpos; rep++) {
649 if (out[rep] != CRUSH_ITEM_UNDEF)
652 in = bucket; /* initial bucket */
654 /* choose through intervening buckets */
656 /* note: we base the choice on the position
657 * even in the nested call. that means that
658 * if the first layer chooses the same bucket
659 * in a different position, we will tend to
660 * choose a different item in that bucket.
661 * this will involve more devices in data
662 * movement and tend to distribute the load.
667 if (in->alg == CRUSH_BUCKET_UNIFORM &&
668 in->size % numrep == 0)
669 /* r'=r+(n+1)*f_total */
670 r += (numrep+1) * ftotal;
672 /* r' = r + n*f_total */
673 r += numrep * ftotal;
677 dprintk(" empty bucket\n");
681 item = crush_bucket_choose(in, x, r);
682 if (item >= map->max_devices) {
683 dprintk(" bad item %d\n", item);
684 out[rep] = CRUSH_ITEM_NONE;
686 out2[rep] = CRUSH_ITEM_NONE;
693 itemtype = map->buckets[-1-item]->type;
696 dprintk(" item %d type %d\n", item, itemtype);
699 if (itemtype != type) {
701 (-1-item) >= map->max_buckets) {
702 dprintk(" bad item type %d\n", type);
703 out[rep] = CRUSH_ITEM_NONE;
710 in = map->buckets[-1-item];
716 for (i = outpos; i < endpos; i++) {
717 if (out[i] == item) {
725 if (recurse_to_leaf) {
727 crush_choose_indep(map,
728 map->buckets[-1-item],
734 if (out2[rep] == CRUSH_ITEM_NONE) {
735 /* placed nothing; no leaf */
739 /* we already have a leaf! */
746 is_out(map, weight, weight_max, item, x))
756 for (rep = outpos; rep < endpos; rep++) {
757 if (out[rep] == CRUSH_ITEM_UNDEF) {
758 out[rep] = CRUSH_ITEM_NONE;
760 if (out2 && out2[rep] == CRUSH_ITEM_UNDEF) {
761 out2[rep] = CRUSH_ITEM_NONE;
765 if (map->choose_tries && ftotal <= map->choose_total_tries)
766 map->choose_tries[ftotal]++;
770 dprintk("%u %d a: ", ftotal, left);
771 for (rep = outpos; rep < endpos; rep++) {
772 dprintk(" %d", out[rep]);
775 dprintk("%u %d b: ", ftotal, left);
776 for (rep = outpos; rep < endpos; rep++) {
777 dprintk(" %d", out2[rep]);
785 * crush_do_rule - calculate a mapping with the given input and rule
786 * @map: the crush_map
787 * @ruleno: the rule id
789 * @result: pointer to result vector
790 * @result_max: maximum result size
791 * @weight: weight vector (for map leaves)
792 * @weight_max: size of weight vector
793 * @scratch: scratch vector for private use; must be >= 3 * result_max
795 int crush_do_rule(const struct crush_map *map,
796 int ruleno, int x, int *result, int result_max,
797 const __u32 *weight, int weight_max,
802 int *b = scratch + result_max;
803 int *c = scratch + result_max*2;
810 struct crush_rule *rule;
816 * the original choose_total_tries value was off by one (it
817 * counted "retries" and not "tries"). add one.
819 int choose_tries = map->choose_total_tries + 1;
820 int choose_leaf_tries = 0;
822 * the local tries values were counted as "retries", though,
823 * and need no adjustment
825 int choose_local_retries = map->choose_local_tries;
826 int choose_local_fallback_retries = map->choose_local_fallback_tries;
828 int vary_r = map->chooseleaf_vary_r;
829 int stable = map->chooseleaf_stable;
831 if ((__u32)ruleno >= map->max_rules) {
832 dprintk(" bad ruleno %d\n", ruleno);
836 rule = map->rules[ruleno];
841 for (step = 0; step < rule->len; step++) {
843 struct crush_rule_step *curstep = &rule->steps[step];
845 switch (curstep->op) {
846 case CRUSH_RULE_TAKE:
847 if ((curstep->arg1 >= 0 &&
848 curstep->arg1 < map->max_devices) ||
849 (-1-curstep->arg1 >= 0 &&
850 -1-curstep->arg1 < map->max_buckets &&
851 map->buckets[-1-curstep->arg1])) {
852 w[0] = curstep->arg1;
855 dprintk(" bad take value %d\n", curstep->arg1);
859 case CRUSH_RULE_SET_CHOOSE_TRIES:
860 if (curstep->arg1 > 0)
861 choose_tries = curstep->arg1;
864 case CRUSH_RULE_SET_CHOOSELEAF_TRIES:
865 if (curstep->arg1 > 0)
866 choose_leaf_tries = curstep->arg1;
869 case CRUSH_RULE_SET_CHOOSE_LOCAL_TRIES:
870 if (curstep->arg1 >= 0)
871 choose_local_retries = curstep->arg1;
874 case CRUSH_RULE_SET_CHOOSE_LOCAL_FALLBACK_TRIES:
875 if (curstep->arg1 >= 0)
876 choose_local_fallback_retries = curstep->arg1;
879 case CRUSH_RULE_SET_CHOOSELEAF_VARY_R:
880 if (curstep->arg1 >= 0)
881 vary_r = curstep->arg1;
884 case CRUSH_RULE_SET_CHOOSELEAF_STABLE:
885 if (curstep->arg1 >= 0)
886 stable = curstep->arg1;
889 case CRUSH_RULE_CHOOSELEAF_FIRSTN:
890 case CRUSH_RULE_CHOOSE_FIRSTN:
893 case CRUSH_RULE_CHOOSELEAF_INDEP:
894 case CRUSH_RULE_CHOOSE_INDEP:
900 CRUSH_RULE_CHOOSELEAF_FIRSTN ||
902 CRUSH_RULE_CHOOSELEAF_INDEP;
907 for (i = 0; i < wsize; i++) {
910 * see CRUSH_N, CRUSH_N_MINUS macros.
911 * basically, numrep <= 0 means relative to
912 * the provided result_max
914 numrep = curstep->arg1;
916 numrep += result_max;
921 /* make sure bucket id is valid */
923 if (bno < 0 || bno >= map->max_buckets) {
924 /* w[i] is probably CRUSH_ITEM_NONE */
925 dprintk(" bad w[i] %d\n", w[i]);
930 if (choose_leaf_tries)
933 else if (map->chooseleaf_descend_once)
936 recurse_tries = choose_tries;
937 osize += crush_choose_firstn(
947 choose_local_retries,
948 choose_local_fallback_retries,
955 out_size = ((numrep < (result_max-osize)) ?
956 numrep : (result_max-osize));
966 choose_leaf_tries : 1,
975 /* copy final _leaf_ values to output set */
976 memcpy(o, c, osize*sizeof(*o));
978 /* swap o and w arrays */
986 case CRUSH_RULE_EMIT:
987 for (i = 0; i < wsize && result_len < result_max; i++) {
988 result[result_len] = w[i];
995 dprintk(" unknown op %d at step %d\n",