unsigned int (*parse_val)(void *buf);
};
+typedef void (*regmap_lock)(struct regmap *map);
+typedef void (*regmap_unlock)(struct regmap *map);
+
struct regmap {
- struct mutex lock;
+ struct mutex mutex;
+ spinlock_t spinlock;
+ regmap_lock lock;
+ regmap_unlock unlock;
struct device *dev; /* Device we do I/O on */
void *work_buf; /* Scratch buffer used to format I/O */
int nodes = 0;
int registers = 0;
- mutex_lock(&map->lock);
+ map->lock(map);
for (node = rb_first(&rbtree_ctx->root); node != NULL;
node = rb_next(node)) {
seq_printf(s, "%d nodes, %d registers, average %d registers\n",
nodes, registers, registers / nodes);
- mutex_unlock(&map->lock);
+ map->unlock(map);
return 0;
}
BUG_ON(!map->cache_ops || !map->cache_ops->sync);
- mutex_lock(&map->lock);
+ map->lock(map);
/* Remember the initial bypass state */
bypass = map->cache_bypass;
dev_dbg(map->dev, "Syncing %s cache\n",
trace_regcache_sync(map->dev, name, "stop");
/* Restore the bypass state */
map->cache_bypass = bypass;
- mutex_unlock(&map->lock);
+ map->unlock(map);
return ret;
}
BUG_ON(!map->cache_ops || !map->cache_ops->sync);
- mutex_lock(&map->lock);
+ map->lock(map);
/* Remember the initial bypass state */
bypass = map->cache_bypass;
trace_regcache_sync(map->dev, name, "stop region");
/* Restore the bypass state */
map->cache_bypass = bypass;
- mutex_unlock(&map->lock);
+ map->unlock(map);
return ret;
}
*/
void regcache_cache_only(struct regmap *map, bool enable)
{
- mutex_lock(&map->lock);
+ map->lock(map);
WARN_ON(map->cache_bypass && enable);
map->cache_only = enable;
trace_regmap_cache_only(map->dev, enable);
- mutex_unlock(&map->lock);
+ map->unlock(map);
}
EXPORT_SYMBOL_GPL(regcache_cache_only);
*/
void regcache_mark_dirty(struct regmap *map)
{
- mutex_lock(&map->lock);
+ map->lock(map);
map->cache_dirty = true;
- mutex_unlock(&map->lock);
+ map->unlock(map);
}
EXPORT_SYMBOL_GPL(regcache_mark_dirty);
*/
void regcache_cache_bypass(struct regmap *map, bool enable)
{
- mutex_lock(&map->lock);
+ map->lock(map);
WARN_ON(map->cache_only && enable);
map->cache_bypass = enable;
trace_regmap_cache_bypass(map->dev, enable);
- mutex_unlock(&map->lock);
+ map->unlock(map);
}
EXPORT_SYMBOL_GPL(regcache_cache_bypass);
return b[0];
}
+static void regmap_lock_mutex(struct regmap *map)
+{
+ mutex_lock(&map->mutex);
+}
+
+static void regmap_unlock_mutex(struct regmap *map)
+{
+ mutex_unlock(&map->mutex);
+}
+
+static void regmap_lock_spinlock(struct regmap *map)
+{
+ spin_lock(&map->spinlock);
+}
+
+static void regmap_unlock_spinlock(struct regmap *map)
+{
+ spin_unlock(&map->spinlock);
+}
+
/**
* regmap_init(): Initialise register map
*
goto err;
}
- mutex_init(&map->lock);
+ if (bus->fast_io) {
+ spin_lock_init(&map->spinlock);
+ map->lock = regmap_lock_spinlock;
+ map->unlock = regmap_unlock_spinlock;
+ } else {
+ mutex_init(&map->mutex);
+ map->lock = regmap_lock_mutex;
+ map->unlock = regmap_unlock_mutex;
+ }
map->format.buf_size = (config->reg_bits + config->val_bits) / 8;
map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
map->format.pad_bytes = config->pad_bits / 8;
{
int ret;
- mutex_lock(&map->lock);
+ map->lock(map);
regcache_exit(map);
regmap_debugfs_exit(map);
ret = regcache_init(map, config);
- mutex_unlock(&map->lock);
+ map->unlock(map);
return ret;
}
{
int ret;
- mutex_lock(&map->lock);
+ map->lock(map);
ret = _regmap_write(map, reg, val);
- mutex_unlock(&map->lock);
+ map->unlock(map);
return ret;
}
{
int ret;
- mutex_lock(&map->lock);
+ map->lock(map);
ret = _regmap_raw_write(map, reg, val, val_len);
- mutex_unlock(&map->lock);
+ map->unlock(map);
return ret;
}
if (!map->format.parse_val)
return -EINVAL;
- mutex_lock(&map->lock);
+ map->lock(map);
/* No formatting is require if val_byte is 1 */
if (val_bytes == 1) {
kfree(wval);
out:
- mutex_unlock(&map->lock);
+ map->unlock(map);
return ret;
}
EXPORT_SYMBOL_GPL(regmap_bulk_write);
{
int ret;
- mutex_lock(&map->lock);
+ map->lock(map);
ret = _regmap_read(map, reg, val);
- mutex_unlock(&map->lock);
+ map->unlock(map);
return ret;
}
unsigned int v;
int ret, i;
- mutex_lock(&map->lock);
+ map->lock(map);
if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
map->cache_type == REGCACHE_NONE) {
}
out:
- mutex_unlock(&map->lock);
+ map->unlock(map);
return ret;
}
int ret;
unsigned int tmp, orig;
- mutex_lock(&map->lock);
+ map->lock(map);
ret = _regmap_read(map, reg, &orig);
if (ret != 0)
}
out:
- mutex_unlock(&map->lock);
+ map->unlock(map);
return ret;
}
if (map->patch)
return -EBUSY;
- mutex_lock(&map->lock);
+ map->lock(map);
bypass = map->cache_bypass;
out:
map->cache_bypass = bypass;
- mutex_unlock(&map->lock);
+ map->unlock(map);
return ret;
}
/**
* Description of a hardware bus for the register map infrastructure.
*
+ * @fast_io: Register IO is fast. Use a spinlock instead of a mutex
+ * to perform locking.
* @write: Write operation.
* @gather_write: Write operation with split register/value, return -ENOTSUPP
* if not implemented on a given device.
* a read.
*/
struct regmap_bus {
+ bool fast_io;
regmap_hw_write write;
regmap_hw_gather_write gather_write;
regmap_hw_read read;