2 * soc-cache.c -- ASoC register cache helpers
4 * Copyright 2009 Wolfson Microelectronics PLC.
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
14 #include <linux/i2c.h>
15 #include <linux/spi/spi.h>
16 #include <sound/soc.h>
17 #include <linux/bitmap.h>
18 #include <linux/rbtree.h>
19 #include <linux/export.h>
21 #include <trace/events/asoc.h>
23 static bool snd_soc_set_cache_val(void *base, unsigned int idx,
24 unsigned int val, unsigned int word_size)
29 if (cache[idx] == val)
36 if (cache[idx] == val)
47 static unsigned int snd_soc_get_cache_val(const void *base, unsigned int idx,
48 unsigned int word_size)
55 const u8 *cache = base;
59 const u16 *cache = base;
69 struct snd_soc_rbtree_node {
70 struct rb_node node; /* the actual rbtree node holding this block */
71 unsigned int base_reg; /* base register handled by this block */
72 unsigned int word_size; /* number of bytes needed to represent the register index */
73 void *block; /* block of adjacent registers */
74 unsigned int blklen; /* number of registers available in the block */
75 } __attribute__ ((packed));
77 struct snd_soc_rbtree_ctx {
79 struct snd_soc_rbtree_node *cached_rbnode;
82 static inline void snd_soc_rbtree_get_base_top_reg(
83 struct snd_soc_rbtree_node *rbnode,
84 unsigned int *base, unsigned int *top)
86 *base = rbnode->base_reg;
87 *top = rbnode->base_reg + rbnode->blklen - 1;
90 static unsigned int snd_soc_rbtree_get_register(
91 struct snd_soc_rbtree_node *rbnode, unsigned int idx)
95 switch (rbnode->word_size) {
97 u8 *p = rbnode->block;
102 u16 *p = rbnode->block;
113 static void snd_soc_rbtree_set_register(struct snd_soc_rbtree_node *rbnode,
114 unsigned int idx, unsigned int val)
116 switch (rbnode->word_size) {
118 u8 *p = rbnode->block;
123 u16 *p = rbnode->block;
133 static struct snd_soc_rbtree_node *snd_soc_rbtree_lookup(
134 struct rb_root *root, unsigned int reg)
136 struct rb_node *node;
137 struct snd_soc_rbtree_node *rbnode;
138 unsigned int base_reg, top_reg;
140 node = root->rb_node;
142 rbnode = container_of(node, struct snd_soc_rbtree_node, node);
143 snd_soc_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
144 if (reg >= base_reg && reg <= top_reg)
146 else if (reg > top_reg)
147 node = node->rb_right;
148 else if (reg < base_reg)
149 node = node->rb_left;
155 static int snd_soc_rbtree_insert(struct rb_root *root,
156 struct snd_soc_rbtree_node *rbnode)
158 struct rb_node **new, *parent;
159 struct snd_soc_rbtree_node *rbnode_tmp;
160 unsigned int base_reg_tmp, top_reg_tmp;
161 unsigned int base_reg;
164 new = &root->rb_node;
166 rbnode_tmp = container_of(*new, struct snd_soc_rbtree_node,
168 /* base and top registers of the current rbnode */
169 snd_soc_rbtree_get_base_top_reg(rbnode_tmp, &base_reg_tmp,
171 /* base register of the rbnode to be added */
172 base_reg = rbnode->base_reg;
174 /* if this register has already been inserted, just return */
175 if (base_reg >= base_reg_tmp &&
176 base_reg <= top_reg_tmp)
178 else if (base_reg > top_reg_tmp)
179 new = &((*new)->rb_right);
180 else if (base_reg < base_reg_tmp)
181 new = &((*new)->rb_left);
184 /* insert the node into the rbtree */
185 rb_link_node(&rbnode->node, parent, new);
186 rb_insert_color(&rbnode->node, root);
191 static int snd_soc_rbtree_cache_sync(struct snd_soc_codec *codec)
193 struct snd_soc_rbtree_ctx *rbtree_ctx;
194 struct rb_node *node;
195 struct snd_soc_rbtree_node *rbnode;
197 unsigned int val, def;
201 rbtree_ctx = codec->reg_cache;
202 for (node = rb_first(&rbtree_ctx->root); node; node = rb_next(node)) {
203 rbnode = rb_entry(node, struct snd_soc_rbtree_node, node);
204 for (i = 0; i < rbnode->blklen; ++i) {
205 regtmp = rbnode->base_reg + i;
206 val = snd_soc_rbtree_get_register(rbnode, i);
207 def = snd_soc_get_cache_val(codec->reg_def_copy, i,
212 WARN_ON(!snd_soc_codec_writable_register(codec, regtmp));
214 codec->cache_bypass = 1;
215 ret = snd_soc_write(codec, regtmp, val);
216 codec->cache_bypass = 0;
219 dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
227 static int snd_soc_rbtree_insert_to_block(struct snd_soc_rbtree_node *rbnode,
228 unsigned int pos, unsigned int reg,
233 blk = krealloc(rbnode->block,
234 (rbnode->blklen + 1) * rbnode->word_size, GFP_KERNEL);
238 /* insert the register value in the correct place in the rbnode block */
239 memmove(blk + (pos + 1) * rbnode->word_size,
240 blk + pos * rbnode->word_size,
241 (rbnode->blklen - pos) * rbnode->word_size);
243 /* update the rbnode block, its size and the base register */
247 rbnode->base_reg = reg;
249 snd_soc_rbtree_set_register(rbnode, pos, value);
253 static int snd_soc_rbtree_cache_write(struct snd_soc_codec *codec,
254 unsigned int reg, unsigned int value)
256 struct snd_soc_rbtree_ctx *rbtree_ctx;
257 struct snd_soc_rbtree_node *rbnode, *rbnode_tmp;
258 struct rb_node *node;
260 unsigned int reg_tmp;
261 unsigned int base_reg, top_reg;
266 rbtree_ctx = codec->reg_cache;
267 /* look up the required register in the cached rbnode */
268 rbnode = rbtree_ctx->cached_rbnode;
270 snd_soc_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
271 if (reg >= base_reg && reg <= top_reg) {
272 reg_tmp = reg - base_reg;
273 val = snd_soc_rbtree_get_register(rbnode, reg_tmp);
276 snd_soc_rbtree_set_register(rbnode, reg_tmp, value);
280 /* if we can't locate it in the cached rbnode we'll have
281 * to traverse the rbtree looking for it.
283 rbnode = snd_soc_rbtree_lookup(&rbtree_ctx->root, reg);
285 reg_tmp = reg - rbnode->base_reg;
286 val = snd_soc_rbtree_get_register(rbnode, reg_tmp);
289 snd_soc_rbtree_set_register(rbnode, reg_tmp, value);
290 rbtree_ctx->cached_rbnode = rbnode;
292 /* bail out early, no need to create the rbnode yet */
295 /* look for an adjacent register to the one we are about to add */
296 for (node = rb_first(&rbtree_ctx->root); node;
297 node = rb_next(node)) {
298 rbnode_tmp = rb_entry(node, struct snd_soc_rbtree_node, node);
299 for (i = 0; i < rbnode_tmp->blklen; ++i) {
300 reg_tmp = rbnode_tmp->base_reg + i;
301 if (abs(reg_tmp - reg) != 1)
303 /* decide where in the block to place our register */
304 if (reg_tmp + 1 == reg)
308 ret = snd_soc_rbtree_insert_to_block(rbnode_tmp, pos,
312 rbtree_ctx->cached_rbnode = rbnode_tmp;
316 /* we did not manage to find a place to insert it in an existing
317 * block so create a new rbnode with a single register in its block.
318 * This block will get populated further if any other adjacent
319 * registers get modified in the future.
321 rbnode = kzalloc(sizeof *rbnode, GFP_KERNEL);
325 rbnode->base_reg = reg;
326 rbnode->word_size = codec->driver->reg_word_size;
327 rbnode->block = kmalloc(rbnode->blklen * rbnode->word_size,
329 if (!rbnode->block) {
333 snd_soc_rbtree_set_register(rbnode, 0, value);
334 snd_soc_rbtree_insert(&rbtree_ctx->root, rbnode);
335 rbtree_ctx->cached_rbnode = rbnode;
341 static int snd_soc_rbtree_cache_read(struct snd_soc_codec *codec,
342 unsigned int reg, unsigned int *value)
344 struct snd_soc_rbtree_ctx *rbtree_ctx;
345 struct snd_soc_rbtree_node *rbnode;
346 unsigned int base_reg, top_reg;
347 unsigned int reg_tmp;
349 rbtree_ctx = codec->reg_cache;
350 /* look up the required register in the cached rbnode */
351 rbnode = rbtree_ctx->cached_rbnode;
353 snd_soc_rbtree_get_base_top_reg(rbnode, &base_reg, &top_reg);
354 if (reg >= base_reg && reg <= top_reg) {
355 reg_tmp = reg - base_reg;
356 *value = snd_soc_rbtree_get_register(rbnode, reg_tmp);
360 /* if we can't locate it in the cached rbnode we'll have
361 * to traverse the rbtree looking for it.
363 rbnode = snd_soc_rbtree_lookup(&rbtree_ctx->root, reg);
365 reg_tmp = reg - rbnode->base_reg;
366 *value = snd_soc_rbtree_get_register(rbnode, reg_tmp);
367 rbtree_ctx->cached_rbnode = rbnode;
369 /* uninitialized registers default to 0 */
376 static int snd_soc_rbtree_cache_exit(struct snd_soc_codec *codec)
378 struct rb_node *next;
379 struct snd_soc_rbtree_ctx *rbtree_ctx;
380 struct snd_soc_rbtree_node *rbtree_node;
382 /* if we've already been called then just return */
383 rbtree_ctx = codec->reg_cache;
387 /* free up the rbtree */
388 next = rb_first(&rbtree_ctx->root);
390 rbtree_node = rb_entry(next, struct snd_soc_rbtree_node, node);
391 next = rb_next(&rbtree_node->node);
392 rb_erase(&rbtree_node->node, &rbtree_ctx->root);
393 kfree(rbtree_node->block);
397 /* release the resources */
398 kfree(codec->reg_cache);
399 codec->reg_cache = NULL;
404 static int snd_soc_rbtree_cache_init(struct snd_soc_codec *codec)
406 struct snd_soc_rbtree_ctx *rbtree_ctx;
407 unsigned int word_size;
412 codec->reg_cache = kmalloc(sizeof *rbtree_ctx, GFP_KERNEL);
413 if (!codec->reg_cache)
416 rbtree_ctx = codec->reg_cache;
417 rbtree_ctx->root = RB_ROOT;
418 rbtree_ctx->cached_rbnode = NULL;
420 if (!codec->reg_def_copy)
423 word_size = codec->driver->reg_word_size;
424 for (i = 0; i < codec->driver->reg_cache_size; ++i) {
425 val = snd_soc_get_cache_val(codec->reg_def_copy, i,
429 ret = snd_soc_rbtree_cache_write(codec, i, val);
437 snd_soc_cache_exit(codec);
441 static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
445 const struct snd_soc_codec_driver *codec_drv;
448 codec_drv = codec->driver;
449 for (i = 0; i < codec_drv->reg_cache_size; ++i) {
450 ret = snd_soc_cache_read(codec, i, &val);
453 if (codec->reg_def_copy)
454 if (snd_soc_get_cache_val(codec->reg_def_copy,
455 i, codec_drv->reg_word_size) == val)
458 WARN_ON(!snd_soc_codec_writable_register(codec, i));
460 ret = snd_soc_write(codec, i, val);
463 dev_dbg(codec->dev, "Synced register %#x, value = %#x\n",
469 static int snd_soc_flat_cache_write(struct snd_soc_codec *codec,
470 unsigned int reg, unsigned int value)
472 snd_soc_set_cache_val(codec->reg_cache, reg, value,
473 codec->driver->reg_word_size);
477 static int snd_soc_flat_cache_read(struct snd_soc_codec *codec,
478 unsigned int reg, unsigned int *value)
480 *value = snd_soc_get_cache_val(codec->reg_cache, reg,
481 codec->driver->reg_word_size);
485 static int snd_soc_flat_cache_exit(struct snd_soc_codec *codec)
487 if (!codec->reg_cache)
489 kfree(codec->reg_cache);
490 codec->reg_cache = NULL;
494 static int snd_soc_flat_cache_init(struct snd_soc_codec *codec)
496 if (codec->reg_def_copy)
497 codec->reg_cache = kmemdup(codec->reg_def_copy,
498 codec->reg_size, GFP_KERNEL);
500 codec->reg_cache = kzalloc(codec->reg_size, GFP_KERNEL);
501 if (!codec->reg_cache)
507 /* an array of all supported compression types */
508 static const struct snd_soc_cache_ops cache_types[] = {
509 /* Flat *must* be the first entry for fallback */
511 .id = SND_SOC_FLAT_COMPRESSION,
513 .init = snd_soc_flat_cache_init,
514 .exit = snd_soc_flat_cache_exit,
515 .read = snd_soc_flat_cache_read,
516 .write = snd_soc_flat_cache_write,
517 .sync = snd_soc_flat_cache_sync
520 .id = SND_SOC_RBTREE_COMPRESSION,
522 .init = snd_soc_rbtree_cache_init,
523 .exit = snd_soc_rbtree_cache_exit,
524 .read = snd_soc_rbtree_cache_read,
525 .write = snd_soc_rbtree_cache_write,
526 .sync = snd_soc_rbtree_cache_sync
530 int snd_soc_cache_init(struct snd_soc_codec *codec)
534 for (i = 0; i < ARRAY_SIZE(cache_types); ++i)
535 if (cache_types[i].id == codec->compress_type)
538 /* Fall back to flat compression */
539 if (i == ARRAY_SIZE(cache_types)) {
540 dev_warn(codec->dev, "Could not match compress type: %d\n",
541 codec->compress_type);
545 mutex_init(&codec->cache_rw_mutex);
546 codec->cache_ops = &cache_types[i];
548 if (codec->cache_ops->init) {
549 if (codec->cache_ops->name)
550 dev_dbg(codec->dev, "Initializing %s cache for %s codec\n",
551 codec->cache_ops->name, codec->name);
552 return codec->cache_ops->init(codec);
558 * NOTE: keep in mind that this function might be called
561 int snd_soc_cache_exit(struct snd_soc_codec *codec)
563 if (codec->cache_ops && codec->cache_ops->exit) {
564 if (codec->cache_ops->name)
565 dev_dbg(codec->dev, "Destroying %s cache for %s codec\n",
566 codec->cache_ops->name, codec->name);
567 return codec->cache_ops->exit(codec);
573 * snd_soc_cache_read: Fetch the value of a given register from the cache.
575 * @codec: CODEC to configure.
576 * @reg: The register index.
577 * @value: The value to be returned.
579 int snd_soc_cache_read(struct snd_soc_codec *codec,
580 unsigned int reg, unsigned int *value)
584 mutex_lock(&codec->cache_rw_mutex);
586 if (value && codec->cache_ops && codec->cache_ops->read) {
587 ret = codec->cache_ops->read(codec, reg, value);
588 mutex_unlock(&codec->cache_rw_mutex);
592 mutex_unlock(&codec->cache_rw_mutex);
595 EXPORT_SYMBOL_GPL(snd_soc_cache_read);
598 * snd_soc_cache_write: Set the value of a given register in the cache.
600 * @codec: CODEC to configure.
601 * @reg: The register index.
602 * @value: The new register value.
604 int snd_soc_cache_write(struct snd_soc_codec *codec,
605 unsigned int reg, unsigned int value)
609 mutex_lock(&codec->cache_rw_mutex);
611 if (codec->cache_ops && codec->cache_ops->write) {
612 ret = codec->cache_ops->write(codec, reg, value);
613 mutex_unlock(&codec->cache_rw_mutex);
617 mutex_unlock(&codec->cache_rw_mutex);
620 EXPORT_SYMBOL_GPL(snd_soc_cache_write);
623 * snd_soc_cache_sync: Sync the register cache with the hardware.
625 * @codec: CODEC to configure.
627 * Any registers that should not be synced should be marked as
628 * volatile. In general drivers can choose not to use the provided
629 * syncing functionality if they so require.
631 int snd_soc_cache_sync(struct snd_soc_codec *codec)
636 if (!codec->cache_sync) {
640 if (!codec->cache_ops || !codec->cache_ops->sync)
643 if (codec->cache_ops->name)
644 name = codec->cache_ops->name;
648 if (codec->cache_ops->name)
649 dev_dbg(codec->dev, "Syncing %s cache for %s codec\n",
650 codec->cache_ops->name, codec->name);
651 trace_snd_soc_cache_sync(codec, name, "start");
652 ret = codec->cache_ops->sync(codec);
654 codec->cache_sync = 0;
655 trace_snd_soc_cache_sync(codec, name, "end");
658 EXPORT_SYMBOL_GPL(snd_soc_cache_sync);
660 static int snd_soc_get_reg_access_index(struct snd_soc_codec *codec,
663 const struct snd_soc_codec_driver *codec_drv;
664 unsigned int min, max, index;
666 codec_drv = codec->driver;
668 max = codec_drv->reg_access_size - 1;
670 index = (min + max) / 2;
671 if (codec_drv->reg_access_default[index].reg == reg)
673 if (codec_drv->reg_access_default[index].reg < reg)
677 } while (min <= max);
681 int snd_soc_default_volatile_register(struct snd_soc_codec *codec,
686 if (reg >= codec->driver->reg_cache_size)
688 index = snd_soc_get_reg_access_index(codec, reg);
691 return codec->driver->reg_access_default[index].vol;
693 EXPORT_SYMBOL_GPL(snd_soc_default_volatile_register);
695 int snd_soc_default_readable_register(struct snd_soc_codec *codec,
700 if (reg >= codec->driver->reg_cache_size)
702 index = snd_soc_get_reg_access_index(codec, reg);
705 return codec->driver->reg_access_default[index].read;
707 EXPORT_SYMBOL_GPL(snd_soc_default_readable_register);
709 int snd_soc_default_writable_register(struct snd_soc_codec *codec,
714 if (reg >= codec->driver->reg_cache_size)
716 index = snd_soc_get_reg_access_index(codec, reg);
719 return codec->driver->reg_access_default[index].write;
721 EXPORT_SYMBOL_GPL(snd_soc_default_writable_register);