2 * Copyright (C) 2015 Red Hat. All rights reserved.
4 * This file is released under the GPL.
7 #include "dm-cache-background-tracker.h"
8 #include "dm-cache-policy-internal.h"
9 #include "dm-cache-policy.h"
12 #include <linux/hash.h>
13 #include <linux/jiffies.h>
14 #include <linux/module.h>
15 #include <linux/mutex.h>
16 #include <linux/vmalloc.h>
17 #include <linux/math64.h>
19 #define DM_MSG_PREFIX "cache-policy-smq"
21 /*----------------------------------------------------------------*/
24 * Safe division functions that return zero on divide by zero.
26 static unsigned safe_div(unsigned n, unsigned d)
28 return d ? n / d : 0u;
31 static unsigned safe_mod(unsigned n, unsigned d)
33 return d ? n % d : 0u;
36 /*----------------------------------------------------------------*/
39 unsigned hash_next:28;
51 /*----------------------------------------------------------------*/
53 #define INDEXER_NULL ((1u << 28u) - 1u)
56 * An entry_space manages a set of entries that we use for the queues.
57 * The clean and dirty queues share entries, so this object is separate
58 * from the queue itself.
65 static int space_init(struct entry_space *es, unsigned nr_entries)
68 es->begin = es->end = NULL;
72 es->begin = vzalloc(sizeof(struct entry) * nr_entries);
76 es->end = es->begin + nr_entries;
80 static void space_exit(struct entry_space *es)
85 static struct entry *__get_entry(struct entry_space *es, unsigned block)
89 e = es->begin + block;
95 static unsigned to_index(struct entry_space *es, struct entry *e)
97 BUG_ON(e < es->begin || e >= es->end);
101 static struct entry *to_entry(struct entry_space *es, unsigned block)
103 if (block == INDEXER_NULL)
106 return __get_entry(es, block);
109 /*----------------------------------------------------------------*/
112 unsigned nr_elts; /* excluding sentinel entries */
116 static void l_init(struct ilist *l)
119 l->head = l->tail = INDEXER_NULL;
122 static struct entry *l_head(struct entry_space *es, struct ilist *l)
124 return to_entry(es, l->head);
127 static struct entry *l_tail(struct entry_space *es, struct ilist *l)
129 return to_entry(es, l->tail);
132 static struct entry *l_next(struct entry_space *es, struct entry *e)
134 return to_entry(es, e->next);
137 static struct entry *l_prev(struct entry_space *es, struct entry *e)
139 return to_entry(es, e->prev);
142 static bool l_empty(struct ilist *l)
144 return l->head == INDEXER_NULL;
147 static void l_add_head(struct entry_space *es, struct ilist *l, struct entry *e)
149 struct entry *head = l_head(es, l);
152 e->prev = INDEXER_NULL;
155 head->prev = l->head = to_index(es, e);
157 l->head = l->tail = to_index(es, e);
163 static void l_add_tail(struct entry_space *es, struct ilist *l, struct entry *e)
165 struct entry *tail = l_tail(es, l);
167 e->next = INDEXER_NULL;
171 tail->next = l->tail = to_index(es, e);
173 l->head = l->tail = to_index(es, e);
179 static void l_add_before(struct entry_space *es, struct ilist *l,
180 struct entry *old, struct entry *e)
182 struct entry *prev = l_prev(es, old);
185 l_add_head(es, l, e);
189 e->next = to_index(es, old);
190 prev->next = old->prev = to_index(es, e);
197 static void l_del(struct entry_space *es, struct ilist *l, struct entry *e)
199 struct entry *prev = l_prev(es, e);
200 struct entry *next = l_next(es, e);
203 prev->next = e->next;
208 next->prev = e->prev;
216 static struct entry *l_pop_tail(struct entry_space *es, struct ilist *l)
220 for (e = l_tail(es, l); e; e = l_prev(es, e))
229 /*----------------------------------------------------------------*/
232 * The stochastic-multi-queue is a set of lru lists stacked into levels.
233 * Entries are moved up levels when they are used, which loosely orders the
234 * most accessed entries in the top levels and least in the bottom. This
235 * structure is *much* better than a single lru list.
237 #define MAX_LEVELS 64u
240 struct entry_space *es;
244 struct ilist qs[MAX_LEVELS];
247 * We maintain a count of the number of entries we would like in each
250 unsigned last_target_nr_elts;
251 unsigned nr_top_levels;
252 unsigned nr_in_top_levels;
253 unsigned target_count[MAX_LEVELS];
256 static void q_init(struct queue *q, struct entry_space *es, unsigned nr_levels)
262 q->nr_levels = nr_levels;
264 for (i = 0; i < q->nr_levels; i++) {
266 q->target_count[i] = 0u;
269 q->last_target_nr_elts = 0u;
270 q->nr_top_levels = 0u;
271 q->nr_in_top_levels = 0u;
274 static unsigned q_size(struct queue *q)
280 * Insert an entry to the back of the given level.
282 static void q_push(struct queue *q, struct entry *e)
284 BUG_ON(e->pending_work);
289 l_add_tail(q->es, q->qs + e->level, e);
292 static void q_push_front(struct queue *q, struct entry *e)
294 BUG_ON(e->pending_work);
299 l_add_head(q->es, q->qs + e->level, e);
302 static void q_push_before(struct queue *q, struct entry *old, struct entry *e)
304 BUG_ON(e->pending_work);
309 l_add_before(q->es, q->qs + e->level, old, e);
312 static void q_del(struct queue *q, struct entry *e)
314 l_del(q->es, q->qs + e->level, e);
320 * Return the oldest entry of the lowest populated level.
322 static struct entry *q_peek(struct queue *q, unsigned max_level, bool can_cross_sentinel)
327 max_level = min(max_level, q->nr_levels);
329 for (level = 0; level < max_level; level++)
330 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e)) {
332 if (can_cross_sentinel)
344 static struct entry *q_pop(struct queue *q)
346 struct entry *e = q_peek(q, q->nr_levels, true);
355 * This function assumes there is a non-sentinel entry to pop. It's only
356 * used by redistribute, so we know this is true. It also doesn't adjust
357 * the q->nr_elts count.
359 static struct entry *__redist_pop_from(struct queue *q, unsigned level)
363 for (; level < q->nr_levels; level++)
364 for (e = l_head(q->es, q->qs + level); e; e = l_next(q->es, e))
366 l_del(q->es, q->qs + e->level, e);
373 static void q_set_targets_subrange_(struct queue *q, unsigned nr_elts, unsigned lbegin, unsigned lend)
375 unsigned level, nr_levels, entries_per_level, remainder;
377 BUG_ON(lbegin > lend);
378 BUG_ON(lend > q->nr_levels);
379 nr_levels = lend - lbegin;
380 entries_per_level = safe_div(nr_elts, nr_levels);
381 remainder = safe_mod(nr_elts, nr_levels);
383 for (level = lbegin; level < lend; level++)
384 q->target_count[level] =
385 (level < (lbegin + remainder)) ? entries_per_level + 1u : entries_per_level;
389 * Typically we have fewer elements in the top few levels which allows us
390 * to adjust the promote threshold nicely.
392 static void q_set_targets(struct queue *q)
394 if (q->last_target_nr_elts == q->nr_elts)
397 q->last_target_nr_elts = q->nr_elts;
399 if (q->nr_top_levels > q->nr_levels)
400 q_set_targets_subrange_(q, q->nr_elts, 0, q->nr_levels);
403 q_set_targets_subrange_(q, q->nr_in_top_levels,
404 q->nr_levels - q->nr_top_levels, q->nr_levels);
406 if (q->nr_in_top_levels < q->nr_elts)
407 q_set_targets_subrange_(q, q->nr_elts - q->nr_in_top_levels,
408 0, q->nr_levels - q->nr_top_levels);
410 q_set_targets_subrange_(q, 0, 0, q->nr_levels - q->nr_top_levels);
414 static void q_redistribute(struct queue *q)
416 unsigned target, level;
417 struct ilist *l, *l_above;
422 for (level = 0u; level < q->nr_levels - 1u; level++) {
424 target = q->target_count[level];
427 * Pull down some entries from the level above.
429 while (l->nr_elts < target) {
430 e = __redist_pop_from(q, level + 1u);
437 l_add_tail(q->es, l, e);
441 * Push some entries up.
443 l_above = q->qs + level + 1u;
444 while (l->nr_elts > target) {
445 e = l_pop_tail(q->es, l);
451 e->level = level + 1u;
452 l_add_tail(q->es, l_above, e);
457 static void q_requeue(struct queue *q, struct entry *e, unsigned extra_levels,
458 struct entry *s1, struct entry *s2)
461 unsigned sentinels_passed = 0;
462 unsigned new_level = min(q->nr_levels - 1u, e->level + extra_levels);
464 /* try and find an entry to swap with */
465 if (extra_levels && (e->level < q->nr_levels - 1u)) {
466 for (de = l_head(q->es, q->qs + new_level); de && de->sentinel; de = l_next(q->es, de))
471 de->level = e->level;
473 switch (sentinels_passed) {
475 q_push_before(q, s1, de);
479 q_push_before(q, s2, de);
491 e->level = new_level;
495 /*----------------------------------------------------------------*/
498 #define SIXTEENTH (1u << (FP_SHIFT - 4u))
499 #define EIGHTH (1u << (FP_SHIFT - 3u))
502 unsigned hit_threshold;
513 static void stats_init(struct stats *s, unsigned nr_levels)
515 s->hit_threshold = (nr_levels * 3u) / 4u;
520 static void stats_reset(struct stats *s)
522 s->hits = s->misses = 0u;
525 static void stats_level_accessed(struct stats *s, unsigned level)
527 if (level >= s->hit_threshold)
533 static void stats_miss(struct stats *s)
539 * There are times when we don't have any confidence in the hotspot queue.
540 * Such as when a fresh cache is created and the blocks have been spread
541 * out across the levels, or if an io load changes. We detect this by
542 * seeing how often a lookup is in the top levels of the hotspot queue.
544 static enum performance stats_assess(struct stats *s)
546 unsigned confidence = safe_div(s->hits << FP_SHIFT, s->hits + s->misses);
548 if (confidence < SIXTEENTH)
551 else if (confidence < EIGHTH)
558 /*----------------------------------------------------------------*/
560 struct smq_hash_table {
561 struct entry_space *es;
562 unsigned long long hash_bits;
567 * All cache entries are stored in a chained hash table. To save space we
568 * use indexing again, and only store indexes to the next entry.
570 static int h_init(struct smq_hash_table *ht, struct entry_space *es, unsigned nr_entries)
572 unsigned i, nr_buckets;
575 nr_buckets = roundup_pow_of_two(max(nr_entries / 4u, 16u));
576 ht->hash_bits = __ffs(nr_buckets);
578 ht->buckets = vmalloc(sizeof(*ht->buckets) * nr_buckets);
582 for (i = 0; i < nr_buckets; i++)
583 ht->buckets[i] = INDEXER_NULL;
588 static void h_exit(struct smq_hash_table *ht)
593 static struct entry *h_head(struct smq_hash_table *ht, unsigned bucket)
595 return to_entry(ht->es, ht->buckets[bucket]);
598 static struct entry *h_next(struct smq_hash_table *ht, struct entry *e)
600 return to_entry(ht->es, e->hash_next);
603 static void __h_insert(struct smq_hash_table *ht, unsigned bucket, struct entry *e)
605 e->hash_next = ht->buckets[bucket];
606 ht->buckets[bucket] = to_index(ht->es, e);
609 static void h_insert(struct smq_hash_table *ht, struct entry *e)
611 unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits);
612 __h_insert(ht, h, e);
615 static struct entry *__h_lookup(struct smq_hash_table *ht, unsigned h, dm_oblock_t oblock,
621 for (e = h_head(ht, h); e; e = h_next(ht, e)) {
622 if (e->oblock == oblock)
631 static void __h_unlink(struct smq_hash_table *ht, unsigned h,
632 struct entry *e, struct entry *prev)
635 prev->hash_next = e->hash_next;
637 ht->buckets[h] = e->hash_next;
641 * Also moves each entry to the front of the bucket.
643 static struct entry *h_lookup(struct smq_hash_table *ht, dm_oblock_t oblock)
645 struct entry *e, *prev;
646 unsigned h = hash_64(from_oblock(oblock), ht->hash_bits);
648 e = __h_lookup(ht, h, oblock, &prev);
651 * Move to the front because this entry is likely
654 __h_unlink(ht, h, e, prev);
655 __h_insert(ht, h, e);
661 static void h_remove(struct smq_hash_table *ht, struct entry *e)
663 unsigned h = hash_64(from_oblock(e->oblock), ht->hash_bits);
667 * The down side of using a singly linked list is we have to
668 * iterate the bucket to remove an item.
670 e = __h_lookup(ht, h, e->oblock, &prev);
672 __h_unlink(ht, h, e, prev);
675 /*----------------------------------------------------------------*/
678 struct entry_space *es;
681 unsigned nr_allocated;
685 static void init_allocator(struct entry_alloc *ea, struct entry_space *es,
686 unsigned begin, unsigned end)
691 ea->nr_allocated = 0u;
695 for (i = begin; i != end; i++)
696 l_add_tail(ea->es, &ea->free, __get_entry(ea->es, i));
699 static void init_entry(struct entry *e)
702 * We can't memset because that would clear the hotspot and
703 * sentinel bits which remain constant.
705 e->hash_next = INDEXER_NULL;
706 e->next = INDEXER_NULL;
707 e->prev = INDEXER_NULL;
709 e->dirty = true; /* FIXME: audit */
712 e->pending_work = false;
715 static struct entry *alloc_entry(struct entry_alloc *ea)
719 if (l_empty(&ea->free))
722 e = l_pop_tail(ea->es, &ea->free);
730 * This assumes the cblock hasn't already been allocated.
732 static struct entry *alloc_particular_entry(struct entry_alloc *ea, unsigned i)
734 struct entry *e = __get_entry(ea->es, ea->begin + i);
736 BUG_ON(e->allocated);
738 l_del(ea->es, &ea->free, e);
745 static void free_entry(struct entry_alloc *ea, struct entry *e)
747 BUG_ON(!ea->nr_allocated);
748 BUG_ON(!e->allocated);
751 e->allocated = false;
752 l_add_tail(ea->es, &ea->free, e);
755 static bool allocator_empty(struct entry_alloc *ea)
757 return l_empty(&ea->free);
760 static unsigned get_index(struct entry_alloc *ea, struct entry *e)
762 return to_index(ea->es, e) - ea->begin;
765 static struct entry *get_entry(struct entry_alloc *ea, unsigned index)
767 return __get_entry(ea->es, ea->begin + index);
770 /*----------------------------------------------------------------*/
772 #define NR_HOTSPOT_LEVELS 64u
773 #define NR_CACHE_LEVELS 64u
775 #define WRITEBACK_PERIOD (10ul * HZ)
776 #define DEMOTE_PERIOD (60ul * HZ)
778 #define HOTSPOT_UPDATE_PERIOD (HZ)
779 #define CACHE_UPDATE_PERIOD (60ul * HZ)
782 struct dm_cache_policy policy;
784 /* protects everything */
786 dm_cblock_t cache_size;
787 sector_t cache_block_size;
789 sector_t hotspot_block_size;
790 unsigned nr_hotspot_blocks;
791 unsigned cache_blocks_per_hotspot_block;
792 unsigned hotspot_level_jump;
794 struct entry_space es;
795 struct entry_alloc writeback_sentinel_alloc;
796 struct entry_alloc demote_sentinel_alloc;
797 struct entry_alloc hotspot_alloc;
798 struct entry_alloc cache_alloc;
800 unsigned long *hotspot_hit_bits;
801 unsigned long *cache_hit_bits;
804 * We maintain three queues of entries. The cache proper,
805 * consisting of a clean and dirty queue, containing the currently
806 * active mappings. The hotspot queue uses a larger block size to
807 * track blocks that are being hit frequently and potential
808 * candidates for promotion to the cache.
810 struct queue hotspot;
814 struct stats hotspot_stats;
815 struct stats cache_stats;
818 * Keeps track of time, incremented by the core. We use this to
819 * avoid attributing multiple hits within the same tick.
824 * The hash tables allows us to quickly find an entry by origin
827 struct smq_hash_table table;
828 struct smq_hash_table hotspot_table;
830 bool current_writeback_sentinels;
831 unsigned long next_writeback_period;
833 bool current_demote_sentinels;
834 unsigned long next_demote_period;
836 unsigned write_promote_level;
837 unsigned read_promote_level;
839 unsigned long next_hotspot_period;
840 unsigned long next_cache_period;
842 struct background_tracker *bg_work;
844 bool migrations_allowed;
847 /*----------------------------------------------------------------*/
849 static struct entry *get_sentinel(struct entry_alloc *ea, unsigned level, bool which)
851 return get_entry(ea, which ? level : NR_CACHE_LEVELS + level);
854 static struct entry *writeback_sentinel(struct smq_policy *mq, unsigned level)
856 return get_sentinel(&mq->writeback_sentinel_alloc, level, mq->current_writeback_sentinels);
859 static struct entry *demote_sentinel(struct smq_policy *mq, unsigned level)
861 return get_sentinel(&mq->demote_sentinel_alloc, level, mq->current_demote_sentinels);
864 static void __update_writeback_sentinels(struct smq_policy *mq)
867 struct queue *q = &mq->dirty;
868 struct entry *sentinel;
870 for (level = 0; level < q->nr_levels; level++) {
871 sentinel = writeback_sentinel(mq, level);
877 static void __update_demote_sentinels(struct smq_policy *mq)
880 struct queue *q = &mq->clean;
881 struct entry *sentinel;
883 for (level = 0; level < q->nr_levels; level++) {
884 sentinel = demote_sentinel(mq, level);
890 static void update_sentinels(struct smq_policy *mq)
892 if (time_after(jiffies, mq->next_writeback_period)) {
893 mq->next_writeback_period = jiffies + WRITEBACK_PERIOD;
894 mq->current_writeback_sentinels = !mq->current_writeback_sentinels;
895 __update_writeback_sentinels(mq);
898 if (time_after(jiffies, mq->next_demote_period)) {
899 mq->next_demote_period = jiffies + DEMOTE_PERIOD;
900 mq->current_demote_sentinels = !mq->current_demote_sentinels;
901 __update_demote_sentinels(mq);
905 static void __sentinels_init(struct smq_policy *mq)
908 struct entry *sentinel;
910 for (level = 0; level < NR_CACHE_LEVELS; level++) {
911 sentinel = writeback_sentinel(mq, level);
912 sentinel->level = level;
913 q_push(&mq->dirty, sentinel);
915 sentinel = demote_sentinel(mq, level);
916 sentinel->level = level;
917 q_push(&mq->clean, sentinel);
921 static void sentinels_init(struct smq_policy *mq)
923 mq->next_writeback_period = jiffies + WRITEBACK_PERIOD;
924 mq->next_demote_period = jiffies + DEMOTE_PERIOD;
926 mq->current_writeback_sentinels = false;
927 mq->current_demote_sentinels = false;
928 __sentinels_init(mq);
930 mq->current_writeback_sentinels = !mq->current_writeback_sentinels;
931 mq->current_demote_sentinels = !mq->current_demote_sentinels;
932 __sentinels_init(mq);
935 /*----------------------------------------------------------------*/
937 static void del_queue(struct smq_policy *mq, struct entry *e)
939 q_del(e->dirty ? &mq->dirty : &mq->clean, e);
942 static void push_queue(struct smq_policy *mq, struct entry *e)
945 q_push(&mq->dirty, e);
947 q_push(&mq->clean, e);
950 // !h, !q, a -> h, q, a
951 static void push(struct smq_policy *mq, struct entry *e)
953 h_insert(&mq->table, e);
954 if (!e->pending_work)
958 static void push_queue_front(struct smq_policy *mq, struct entry *e)
961 q_push_front(&mq->dirty, e);
963 q_push_front(&mq->clean, e);
966 static void push_front(struct smq_policy *mq, struct entry *e)
968 h_insert(&mq->table, e);
969 if (!e->pending_work)
970 push_queue_front(mq, e);
973 static dm_cblock_t infer_cblock(struct smq_policy *mq, struct entry *e)
975 return to_cblock(get_index(&mq->cache_alloc, e));
978 static void requeue(struct smq_policy *mq, struct entry *e)
981 * Pending work has temporarily been taken out of the queues.
986 if (!test_and_set_bit(from_cblock(infer_cblock(mq, e)), mq->cache_hit_bits)) {
988 q_requeue(&mq->clean, e, 1u, NULL, NULL);
992 q_requeue(&mq->dirty, e, 1u,
993 get_sentinel(&mq->writeback_sentinel_alloc, e->level, !mq->current_writeback_sentinels),
994 get_sentinel(&mq->writeback_sentinel_alloc, e->level, mq->current_writeback_sentinels));
998 static unsigned default_promote_level(struct smq_policy *mq)
1001 * The promote level depends on the current performance of the
1004 * If the cache is performing badly, then we can't afford
1005 * to promote much without causing performance to drop below that
1006 * of the origin device.
1008 * If the cache is performing well, then we don't need to promote
1009 * much. If it isn't broken, don't fix it.
1011 * If the cache is middling then we promote more.
1013 * This scheme reminds me of a graph of entropy vs probability of a
1016 static unsigned table[] = {1, 1, 1, 2, 4, 6, 7, 8, 7, 6, 4, 4, 3, 3, 2, 2, 1};
1018 unsigned hits = mq->cache_stats.hits;
1019 unsigned misses = mq->cache_stats.misses;
1020 unsigned index = safe_div(hits << 4u, hits + misses);
1021 return table[index];
1024 static void update_promote_levels(struct smq_policy *mq)
1027 * If there are unused cache entries then we want to be really
1030 unsigned threshold_level = allocator_empty(&mq->cache_alloc) ?
1031 default_promote_level(mq) : (NR_HOTSPOT_LEVELS / 2u);
1033 threshold_level = max(threshold_level, NR_HOTSPOT_LEVELS);
1036 * If the hotspot queue is performing badly then we have little
1037 * confidence that we know which blocks to promote. So we cut down
1038 * the amount of promotions.
1040 switch (stats_assess(&mq->hotspot_stats)) {
1042 threshold_level /= 4u;
1046 threshold_level /= 2u;
1053 mq->read_promote_level = NR_HOTSPOT_LEVELS - threshold_level;
1054 mq->write_promote_level = (NR_HOTSPOT_LEVELS - threshold_level);
1058 * If the hotspot queue is performing badly, then we try and move entries
1059 * around more quickly.
1061 static void update_level_jump(struct smq_policy *mq)
1063 switch (stats_assess(&mq->hotspot_stats)) {
1065 mq->hotspot_level_jump = 4u;
1069 mq->hotspot_level_jump = 2u;
1073 mq->hotspot_level_jump = 1u;
1078 static void end_hotspot_period(struct smq_policy *mq)
1080 clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks);
1081 update_promote_levels(mq);
1083 if (time_after(jiffies, mq->next_hotspot_period)) {
1084 update_level_jump(mq);
1085 q_redistribute(&mq->hotspot);
1086 stats_reset(&mq->hotspot_stats);
1087 mq->next_hotspot_period = jiffies + HOTSPOT_UPDATE_PERIOD;
1091 static void end_cache_period(struct smq_policy *mq)
1093 if (time_after(jiffies, mq->next_cache_period)) {
1094 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size));
1096 q_redistribute(&mq->dirty);
1097 q_redistribute(&mq->clean);
1098 stats_reset(&mq->cache_stats);
1100 mq->next_cache_period = jiffies + CACHE_UPDATE_PERIOD;
1104 /*----------------------------------------------------------------*/
1107 * Targets are given as a percentage.
1109 #define CLEAN_TARGET 25u
1110 #define FREE_TARGET 25u
1112 static unsigned percent_to_target(struct smq_policy *mq, unsigned p)
1114 return from_cblock(mq->cache_size) * p / 100u;
1117 static bool clean_target_met(struct smq_policy *mq, bool idle)
1120 * Cache entries may not be populated. So we cannot rely on the
1121 * size of the clean queue.
1127 * We'd like to clean everything.
1129 return q_size(&mq->dirty) == 0u;
1132 nr_clean = from_cblock(mq->cache_size) - q_size(&mq->dirty);
1133 return (nr_clean + btracker_nr_writebacks_queued(mq->bg_work)) >=
1134 percent_to_target(mq, CLEAN_TARGET);
1137 static bool free_target_met(struct smq_policy *mq, bool idle)
1144 nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
1145 return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
1146 percent_to_target(mq, FREE_TARGET);
1149 /*----------------------------------------------------------------*/
1151 static void mark_pending(struct smq_policy *mq, struct entry *e)
1153 BUG_ON(e->sentinel);
1154 BUG_ON(!e->allocated);
1155 BUG_ON(e->pending_work);
1156 e->pending_work = true;
1159 static void clear_pending(struct smq_policy *mq, struct entry *e)
1161 BUG_ON(!e->pending_work);
1162 e->pending_work = false;
1165 static void queue_writeback(struct smq_policy *mq)
1168 struct policy_work work;
1171 e = q_peek(&mq->dirty, mq->dirty.nr_levels, !mq->migrations_allowed);
1173 mark_pending(mq, e);
1174 q_del(&mq->dirty, e);
1176 work.op = POLICY_WRITEBACK;
1177 work.oblock = e->oblock;
1178 work.cblock = infer_cblock(mq, e);
1180 r = btracker_queue(mq->bg_work, &work, NULL);
1181 WARN_ON_ONCE(r); // FIXME: finish, I think we have to get rid of this race.
1185 static void queue_demotion(struct smq_policy *mq)
1187 struct policy_work work;
1190 if (unlikely(WARN_ON_ONCE(!mq->migrations_allowed)))
1193 e = q_peek(&mq->clean, mq->clean.nr_levels, true);
1195 if (!clean_target_met(mq, false))
1196 queue_writeback(mq);
1200 mark_pending(mq, e);
1201 q_del(&mq->clean, e);
1203 work.op = POLICY_DEMOTE;
1204 work.oblock = e->oblock;
1205 work.cblock = infer_cblock(mq, e);
1206 btracker_queue(mq->bg_work, &work, NULL);
1209 static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
1210 struct policy_work **workp)
1213 struct policy_work work;
1215 if (!mq->migrations_allowed)
1218 if (allocator_empty(&mq->cache_alloc)) {
1220 * We always claim to be 'idle' to ensure some demotions happen
1221 * with continuous loads.
1223 if (!free_target_met(mq, true))
1228 if (btracker_promotion_already_present(mq->bg_work, oblock))
1232 * We allocate the entry now to reserve the cblock. If the
1233 * background work is aborted we must remember to free it.
1235 e = alloc_entry(&mq->cache_alloc);
1237 e->pending_work = true;
1238 work.op = POLICY_PROMOTE;
1239 work.oblock = oblock;
1240 work.cblock = infer_cblock(mq, e);
1241 btracker_queue(mq->bg_work, &work, workp);
1244 /*----------------------------------------------------------------*/
1246 enum promote_result {
1253 * Converts a boolean into a promote result.
1255 static enum promote_result maybe_promote(bool promote)
1257 return promote ? PROMOTE_PERMANENT : PROMOTE_NOT;
1260 static enum promote_result should_promote(struct smq_policy *mq, struct entry *hs_e,
1261 int data_dir, bool fast_promote)
1263 if (data_dir == WRITE) {
1264 if (!allocator_empty(&mq->cache_alloc) && fast_promote)
1265 return PROMOTE_TEMPORARY;
1267 return maybe_promote(hs_e->level >= mq->write_promote_level);
1269 return maybe_promote(hs_e->level >= mq->read_promote_level);
1272 static dm_oblock_t to_hblock(struct smq_policy *mq, dm_oblock_t b)
1274 sector_t r = from_oblock(b);
1275 (void) sector_div(r, mq->cache_blocks_per_hotspot_block);
1276 return to_oblock(r);
1279 static struct entry *update_hotspot_queue(struct smq_policy *mq, dm_oblock_t b)
1282 dm_oblock_t hb = to_hblock(mq, b);
1283 struct entry *e = h_lookup(&mq->hotspot_table, hb);
1286 stats_level_accessed(&mq->hotspot_stats, e->level);
1288 hi = get_index(&mq->hotspot_alloc, e);
1289 q_requeue(&mq->hotspot, e,
1290 test_and_set_bit(hi, mq->hotspot_hit_bits) ?
1291 0u : mq->hotspot_level_jump,
1295 stats_miss(&mq->hotspot_stats);
1297 e = alloc_entry(&mq->hotspot_alloc);
1299 e = q_pop(&mq->hotspot);
1301 h_remove(&mq->hotspot_table, e);
1302 hi = get_index(&mq->hotspot_alloc, e);
1303 clear_bit(hi, mq->hotspot_hit_bits);
1310 q_push(&mq->hotspot, e);
1311 h_insert(&mq->hotspot_table, e);
1318 /*----------------------------------------------------------------*/
1321 * Public interface, via the policy struct. See dm-cache-policy.h for a
1322 * description of these.
1325 static struct smq_policy *to_smq_policy(struct dm_cache_policy *p)
1327 return container_of(p, struct smq_policy, policy);
1330 static void smq_destroy(struct dm_cache_policy *p)
1332 struct smq_policy *mq = to_smq_policy(p);
1334 btracker_destroy(mq->bg_work);
1335 h_exit(&mq->hotspot_table);
1337 free_bitset(mq->hotspot_hit_bits);
1338 free_bitset(mq->cache_hit_bits);
1339 space_exit(&mq->es);
1343 /*----------------------------------------------------------------*/
1345 static int __lookup(struct smq_policy *mq, dm_oblock_t oblock, dm_cblock_t *cblock,
1346 int data_dir, bool fast_copy,
1347 struct policy_work **work, bool *background_work)
1349 struct entry *e, *hs_e;
1350 enum promote_result pr;
1352 *background_work = false;
1354 e = h_lookup(&mq->table, oblock);
1356 stats_level_accessed(&mq->cache_stats, e->level);
1359 *cblock = infer_cblock(mq, e);
1363 stats_miss(&mq->cache_stats);
1366 * The hotspot queue only gets updated with misses.
1368 hs_e = update_hotspot_queue(mq, oblock);
1370 pr = should_promote(mq, hs_e, data_dir, fast_copy);
1371 if (pr != PROMOTE_NOT) {
1372 queue_promotion(mq, oblock, work);
1373 *background_work = true;
1380 static int smq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock,
1381 int data_dir, bool fast_copy,
1382 bool *background_work)
1385 unsigned long flags;
1386 struct smq_policy *mq = to_smq_policy(p);
1388 spin_lock_irqsave(&mq->lock, flags);
1389 r = __lookup(mq, oblock, cblock,
1390 data_dir, fast_copy,
1391 NULL, background_work);
1392 spin_unlock_irqrestore(&mq->lock, flags);
1397 static int smq_lookup_with_work(struct dm_cache_policy *p,
1398 dm_oblock_t oblock, dm_cblock_t *cblock,
1399 int data_dir, bool fast_copy,
1400 struct policy_work **work)
1403 bool background_queued;
1404 unsigned long flags;
1405 struct smq_policy *mq = to_smq_policy(p);
1407 spin_lock_irqsave(&mq->lock, flags);
1408 r = __lookup(mq, oblock, cblock, data_dir, fast_copy, work, &background_queued);
1409 spin_unlock_irqrestore(&mq->lock, flags);
1414 static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
1415 struct policy_work **result)
1418 unsigned long flags;
1419 struct smq_policy *mq = to_smq_policy(p);
1421 spin_lock_irqsave(&mq->lock, flags);
1422 r = btracker_issue(mq->bg_work, result);
1423 if (r == -ENODATA) {
1424 /* find some writeback work to do */
1425 if (mq->migrations_allowed && !free_target_met(mq, idle))
1428 else if (!clean_target_met(mq, idle))
1429 queue_writeback(mq);
1431 r = btracker_issue(mq->bg_work, result);
1433 spin_unlock_irqrestore(&mq->lock, flags);
1439 * We need to clear any pending work flags that have been set, and in the
1440 * case of promotion free the entry for the destination cblock.
1442 static void __complete_background_work(struct smq_policy *mq,
1443 struct policy_work *work,
1446 struct entry *e = get_entry(&mq->cache_alloc,
1447 from_cblock(work->cblock));
1450 case POLICY_PROMOTE:
1452 clear_pending(mq, e);
1454 e->oblock = work->oblock;
1458 free_entry(&mq->cache_alloc, e);
1466 h_remove(&mq->table, e);
1467 free_entry(&mq->cache_alloc, e);
1470 clear_pending(mq, e);
1476 case POLICY_WRITEBACK:
1478 clear_pending(mq, e);
1484 btracker_complete(mq->bg_work, work);
1487 static void smq_complete_background_work(struct dm_cache_policy *p,
1488 struct policy_work *work,
1491 unsigned long flags;
1492 struct smq_policy *mq = to_smq_policy(p);
1494 spin_lock_irqsave(&mq->lock, flags);
1495 __complete_background_work(mq, work, success);
1496 spin_unlock_irqrestore(&mq->lock, flags);
1499 // in_hash(oblock) -> in_hash(oblock)
1500 static void __smq_set_clear_dirty(struct smq_policy *mq, dm_cblock_t cblock, bool set)
1502 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
1504 if (e->pending_work)
1513 static void smq_set_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
1515 unsigned long flags;
1516 struct smq_policy *mq = to_smq_policy(p);
1518 spin_lock_irqsave(&mq->lock, flags);
1519 __smq_set_clear_dirty(mq, cblock, true);
1520 spin_unlock_irqrestore(&mq->lock, flags);
1523 static void smq_clear_dirty(struct dm_cache_policy *p, dm_cblock_t cblock)
1525 struct smq_policy *mq = to_smq_policy(p);
1526 unsigned long flags;
1528 spin_lock_irqsave(&mq->lock, flags);
1529 __smq_set_clear_dirty(mq, cblock, false);
1530 spin_unlock_irqrestore(&mq->lock, flags);
1533 static unsigned random_level(dm_cblock_t cblock)
1535 return hash_32(from_cblock(cblock), 9) & (NR_CACHE_LEVELS - 1);
1538 static int smq_load_mapping(struct dm_cache_policy *p,
1539 dm_oblock_t oblock, dm_cblock_t cblock,
1540 bool dirty, uint32_t hint, bool hint_valid)
1542 struct smq_policy *mq = to_smq_policy(p);
1545 e = alloc_particular_entry(&mq->cache_alloc, from_cblock(cblock));
1548 e->level = hint_valid ? min(hint, NR_CACHE_LEVELS - 1) : random_level(cblock);
1549 e->pending_work = false;
1552 * When we load mappings we push ahead of both sentinels in order to
1553 * allow demotions and cleaning to occur immediately.
1560 static int smq_invalidate_mapping(struct dm_cache_policy *p, dm_cblock_t cblock)
1562 struct smq_policy *mq = to_smq_policy(p);
1563 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
1568 // FIXME: what if this block has pending background work?
1570 h_remove(&mq->table, e);
1571 free_entry(&mq->cache_alloc, e);
1575 static uint32_t smq_get_hint(struct dm_cache_policy *p, dm_cblock_t cblock)
1577 struct smq_policy *mq = to_smq_policy(p);
1578 struct entry *e = get_entry(&mq->cache_alloc, from_cblock(cblock));
1586 static dm_cblock_t smq_residency(struct dm_cache_policy *p)
1589 unsigned long flags;
1590 struct smq_policy *mq = to_smq_policy(p);
1592 spin_lock_irqsave(&mq->lock, flags);
1593 r = to_cblock(mq->cache_alloc.nr_allocated);
1594 spin_unlock_irqrestore(&mq->lock, flags);
1599 static void smq_tick(struct dm_cache_policy *p, bool can_block)
1601 struct smq_policy *mq = to_smq_policy(p);
1602 unsigned long flags;
1604 spin_lock_irqsave(&mq->lock, flags);
1606 update_sentinels(mq);
1607 end_hotspot_period(mq);
1608 end_cache_period(mq);
1609 spin_unlock_irqrestore(&mq->lock, flags);
1612 static void smq_allow_migrations(struct dm_cache_policy *p, bool allow)
1614 struct smq_policy *mq = to_smq_policy(p);
1615 mq->migrations_allowed = allow;
1619 * smq has no config values, but the old mq policy did. To avoid breaking
1620 * software we continue to accept these configurables for the mq policy,
1621 * but they have no effect.
1623 static int mq_set_config_value(struct dm_cache_policy *p,
1624 const char *key, const char *value)
1628 if (kstrtoul(value, 10, &tmp))
1631 if (!strcasecmp(key, "random_threshold") ||
1632 !strcasecmp(key, "sequential_threshold") ||
1633 !strcasecmp(key, "discard_promote_adjustment") ||
1634 !strcasecmp(key, "read_promote_adjustment") ||
1635 !strcasecmp(key, "write_promote_adjustment")) {
1636 DMWARN("tunable '%s' no longer has any effect, mq policy is now an alias for smq", key);
1643 static int mq_emit_config_values(struct dm_cache_policy *p, char *result,
1644 unsigned maxlen, ssize_t *sz_ptr)
1646 ssize_t sz = *sz_ptr;
1648 DMEMIT("10 random_threshold 0 "
1649 "sequential_threshold 0 "
1650 "discard_promote_adjustment 0 "
1651 "read_promote_adjustment 0 "
1652 "write_promote_adjustment 0 ");
1658 /* Init the policy plugin interface function pointers. */
1659 static void init_policy_functions(struct smq_policy *mq, bool mimic_mq)
1661 mq->policy.destroy = smq_destroy;
1662 mq->policy.lookup = smq_lookup;
1663 mq->policy.lookup_with_work = smq_lookup_with_work;
1664 mq->policy.get_background_work = smq_get_background_work;
1665 mq->policy.complete_background_work = smq_complete_background_work;
1666 mq->policy.set_dirty = smq_set_dirty;
1667 mq->policy.clear_dirty = smq_clear_dirty;
1668 mq->policy.load_mapping = smq_load_mapping;
1669 mq->policy.invalidate_mapping = smq_invalidate_mapping;
1670 mq->policy.get_hint = smq_get_hint;
1671 mq->policy.residency = smq_residency;
1672 mq->policy.tick = smq_tick;
1673 mq->policy.allow_migrations = smq_allow_migrations;
1676 mq->policy.set_config_value = mq_set_config_value;
1677 mq->policy.emit_config_values = mq_emit_config_values;
1681 static bool too_many_hotspot_blocks(sector_t origin_size,
1682 sector_t hotspot_block_size,
1683 unsigned nr_hotspot_blocks)
1685 return (hotspot_block_size * nr_hotspot_blocks) > origin_size;
1688 static void calc_hotspot_params(sector_t origin_size,
1689 sector_t cache_block_size,
1690 unsigned nr_cache_blocks,
1691 sector_t *hotspot_block_size,
1692 unsigned *nr_hotspot_blocks)
1694 *hotspot_block_size = cache_block_size * 16u;
1695 *nr_hotspot_blocks = max(nr_cache_blocks / 4u, 1024u);
1697 while ((*hotspot_block_size > cache_block_size) &&
1698 too_many_hotspot_blocks(origin_size, *hotspot_block_size, *nr_hotspot_blocks))
1699 *hotspot_block_size /= 2u;
1702 static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size,
1703 sector_t origin_size,
1704 sector_t cache_block_size,
1706 bool migrations_allowed)
1709 unsigned nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS;
1710 unsigned total_sentinels = 2u * nr_sentinels_per_queue;
1711 struct smq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
1716 init_policy_functions(mq, mimic_mq);
1717 mq->cache_size = cache_size;
1718 mq->cache_block_size = cache_block_size;
1720 calc_hotspot_params(origin_size, cache_block_size, from_cblock(cache_size),
1721 &mq->hotspot_block_size, &mq->nr_hotspot_blocks);
1723 mq->cache_blocks_per_hotspot_block = div64_u64(mq->hotspot_block_size, mq->cache_block_size);
1724 mq->hotspot_level_jump = 1u;
1725 if (space_init(&mq->es, total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size))) {
1726 DMERR("couldn't initialize entry space");
1730 init_allocator(&mq->writeback_sentinel_alloc, &mq->es, 0, nr_sentinels_per_queue);
1731 for (i = 0; i < nr_sentinels_per_queue; i++)
1732 get_entry(&mq->writeback_sentinel_alloc, i)->sentinel = true;
1734 init_allocator(&mq->demote_sentinel_alloc, &mq->es, nr_sentinels_per_queue, total_sentinels);
1735 for (i = 0; i < nr_sentinels_per_queue; i++)
1736 get_entry(&mq->demote_sentinel_alloc, i)->sentinel = true;
1738 init_allocator(&mq->hotspot_alloc, &mq->es, total_sentinels,
1739 total_sentinels + mq->nr_hotspot_blocks);
1741 init_allocator(&mq->cache_alloc, &mq->es,
1742 total_sentinels + mq->nr_hotspot_blocks,
1743 total_sentinels + mq->nr_hotspot_blocks + from_cblock(cache_size));
1745 mq->hotspot_hit_bits = alloc_bitset(mq->nr_hotspot_blocks);
1746 if (!mq->hotspot_hit_bits) {
1747 DMERR("couldn't allocate hotspot hit bitset");
1748 goto bad_hotspot_hit_bits;
1750 clear_bitset(mq->hotspot_hit_bits, mq->nr_hotspot_blocks);
1752 if (from_cblock(cache_size)) {
1753 mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size));
1754 if (!mq->cache_hit_bits) {
1755 DMERR("couldn't allocate cache hit bitset");
1756 goto bad_cache_hit_bits;
1758 clear_bitset(mq->cache_hit_bits, from_cblock(mq->cache_size));
1760 mq->cache_hit_bits = NULL;
1763 spin_lock_init(&mq->lock);
1765 q_init(&mq->hotspot, &mq->es, NR_HOTSPOT_LEVELS);
1766 mq->hotspot.nr_top_levels = 8;
1767 mq->hotspot.nr_in_top_levels = min(mq->nr_hotspot_blocks / NR_HOTSPOT_LEVELS,
1768 from_cblock(mq->cache_size) / mq->cache_blocks_per_hotspot_block);
1770 q_init(&mq->clean, &mq->es, NR_CACHE_LEVELS);
1771 q_init(&mq->dirty, &mq->es, NR_CACHE_LEVELS);
1773 stats_init(&mq->hotspot_stats, NR_HOTSPOT_LEVELS);
1774 stats_init(&mq->cache_stats, NR_CACHE_LEVELS);
1776 if (h_init(&mq->table, &mq->es, from_cblock(cache_size)))
1777 goto bad_alloc_table;
1779 if (h_init(&mq->hotspot_table, &mq->es, mq->nr_hotspot_blocks))
1780 goto bad_alloc_hotspot_table;
1783 mq->write_promote_level = mq->read_promote_level = NR_HOTSPOT_LEVELS;
1785 mq->next_hotspot_period = jiffies;
1786 mq->next_cache_period = jiffies;
1788 mq->bg_work = btracker_create(10240); /* FIXME: hard coded value */
1792 mq->migrations_allowed = migrations_allowed;
1797 h_exit(&mq->hotspot_table);
1798 bad_alloc_hotspot_table:
1801 free_bitset(mq->cache_hit_bits);
1803 free_bitset(mq->hotspot_hit_bits);
1804 bad_hotspot_hit_bits:
1805 space_exit(&mq->es);
1812 static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
1813 sector_t origin_size,
1814 sector_t cache_block_size)
1816 return __smq_create(cache_size, origin_size, cache_block_size, false, true);
1819 static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
1820 sector_t origin_size,
1821 sector_t cache_block_size)
1823 return __smq_create(cache_size, origin_size, cache_block_size, true, true);
1826 static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size,
1827 sector_t origin_size,
1828 sector_t cache_block_size)
1830 return __smq_create(cache_size, origin_size, cache_block_size, false, false);
1833 /*----------------------------------------------------------------*/
1835 static struct dm_cache_policy_type smq_policy_type = {
1837 .version = {2, 0, 0},
1839 .owner = THIS_MODULE,
1840 .create = smq_create
1843 static struct dm_cache_policy_type mq_policy_type = {
1845 .version = {2, 0, 0},
1847 .owner = THIS_MODULE,
1848 .create = mq_create,
1851 static struct dm_cache_policy_type cleaner_policy_type = {
1853 .version = {2, 0, 0},
1855 .owner = THIS_MODULE,
1856 .create = cleaner_create,
1859 static struct dm_cache_policy_type default_policy_type = {
1861 .version = {2, 0, 0},
1863 .owner = THIS_MODULE,
1864 .create = smq_create,
1865 .real = &smq_policy_type
1868 static int __init smq_init(void)
1872 r = dm_cache_policy_register(&smq_policy_type);
1874 DMERR("register failed %d", r);
1878 r = dm_cache_policy_register(&mq_policy_type);
1880 DMERR("register failed (as mq) %d", r);
1884 r = dm_cache_policy_register(&cleaner_policy_type);
1886 DMERR("register failed (as cleaner) %d", r);
1890 r = dm_cache_policy_register(&default_policy_type);
1892 DMERR("register failed (as default) %d", r);
1899 dm_cache_policy_unregister(&cleaner_policy_type);
1901 dm_cache_policy_unregister(&mq_policy_type);
1903 dm_cache_policy_unregister(&smq_policy_type);
1908 static void __exit smq_exit(void)
1910 dm_cache_policy_unregister(&cleaner_policy_type);
1911 dm_cache_policy_unregister(&smq_policy_type);
1912 dm_cache_policy_unregister(&mq_policy_type);
1913 dm_cache_policy_unregister(&default_policy_type);
1916 module_init(smq_init);
1917 module_exit(smq_exit);
1919 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1920 MODULE_LICENSE("GPL");
1921 MODULE_DESCRIPTION("smq cache policy");
1923 MODULE_ALIAS("dm-cache-default");
1924 MODULE_ALIAS("dm-cache-mq");
1925 MODULE_ALIAS("dm-cache-cleaner");