blob: fb84d711fbb4098f51fdff33a2681b6c655bf5df [file] [log] [blame]
Dimitris Papastamos9fabe242011-09-19 14:34:00 +01001/*
2 * Register cache access API
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/slab.h>
Paul Gortmaker1b6bc322011-05-27 07:12:15 -040014#include <linux/export.h>
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010015#include <trace/events/regmap.h>
Mark Brownf094fea2011-10-04 22:05:47 +010016#include <linux/bsearch.h>
Dimitris Papastamosc08604b2011-10-03 10:50:14 +010017#include <linux/sort.h>
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010018
19#include "internal.h"
20
21static const struct regcache_ops *cache_types[] = {
Dimitris Papastamos28644c82011-09-19 14:34:02 +010022 &regcache_rbtree_ops,
Dimitris Papastamos2cbbb572011-09-19 14:34:03 +010023 &regcache_lzo_ops,
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010024};
25
26static int regcache_hw_init(struct regmap *map)
27{
28 int i, j;
29 int ret;
30 int count;
31 unsigned int val;
32 void *tmp_buf;
33
34 if (!map->num_reg_defaults_raw)
35 return -EINVAL;
36
37 if (!map->reg_defaults_raw) {
38 dev_warn(map->dev, "No cache defaults, reading back from HW\n");
39 tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
40 if (!tmp_buf)
41 return -EINVAL;
42 ret = regmap_bulk_read(map, 0, tmp_buf,
43 map->num_reg_defaults_raw);
44 if (ret < 0) {
45 kfree(tmp_buf);
46 return ret;
47 }
48 map->reg_defaults_raw = tmp_buf;
49 map->cache_free = 1;
50 }
51
52 /* calculate the size of reg_defaults */
53 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
54 val = regcache_get_val(map->reg_defaults_raw,
55 i, map->cache_word_size);
Lars-Peter Clausen61cddc52012-02-15 10:23:25 +010056 if (regmap_volatile(map, i))
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010057 continue;
58 count++;
59 }
60
61 map->reg_defaults = kmalloc(count * sizeof(struct reg_default),
62 GFP_KERNEL);
Lars-Peter Clausen021cd612011-11-14 10:40:16 +010063 if (!map->reg_defaults) {
64 ret = -ENOMEM;
65 goto err_free;
66 }
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010067
68 /* fill the reg_defaults */
69 map->num_reg_defaults = count;
70 for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
71 val = regcache_get_val(map->reg_defaults_raw,
72 i, map->cache_word_size);
Lars-Peter Clausen61cddc52012-02-15 10:23:25 +010073 if (regmap_volatile(map, i))
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010074 continue;
75 map->reg_defaults[j].reg = i;
76 map->reg_defaults[j].def = val;
77 j++;
78 }
79
80 return 0;
Lars-Peter Clausen021cd612011-11-14 10:40:16 +010081
82err_free:
83 if (map->cache_free)
84 kfree(map->reg_defaults_raw);
85
86 return ret;
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010087}
88
Lars-Peter Clausene5e3b8a2011-11-16 16:28:16 +010089int regcache_init(struct regmap *map, const struct regmap_config *config)
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010090{
91 int ret;
92 int i;
93 void *tmp_buf;
94
Mark Browne7a6db32011-09-19 16:08:03 +010095 if (map->cache_type == REGCACHE_NONE) {
96 map->cache_bypass = true;
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010097 return 0;
Mark Browne7a6db32011-09-19 16:08:03 +010098 }
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010099
100 for (i = 0; i < ARRAY_SIZE(cache_types); i++)
101 if (cache_types[i]->type == map->cache_type)
102 break;
103
104 if (i == ARRAY_SIZE(cache_types)) {
105 dev_err(map->dev, "Could not match compress type: %d\n",
106 map->cache_type);
107 return -EINVAL;
108 }
109
Lars-Peter Clausene5e3b8a2011-11-16 16:28:16 +0100110 map->num_reg_defaults = config->num_reg_defaults;
111 map->num_reg_defaults_raw = config->num_reg_defaults_raw;
112 map->reg_defaults_raw = config->reg_defaults_raw;
Lars-Peter Clausen064d4db2011-11-16 20:34:03 +0100113 map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
114 map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
Lars-Peter Clausene5e3b8a2011-11-16 16:28:16 +0100115
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100116 map->cache = NULL;
117 map->cache_ops = cache_types[i];
118
119 if (!map->cache_ops->read ||
120 !map->cache_ops->write ||
121 !map->cache_ops->name)
122 return -EINVAL;
123
124 /* We still need to ensure that the reg_defaults
125 * won't vanish from under us. We'll need to make
126 * a copy of it.
127 */
Lars-Peter Clausen720e4612011-11-16 16:28:17 +0100128 if (config->reg_defaults) {
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100129 if (!map->num_reg_defaults)
130 return -EINVAL;
Lars-Peter Clausen720e4612011-11-16 16:28:17 +0100131 tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100132 sizeof(struct reg_default), GFP_KERNEL);
133 if (!tmp_buf)
134 return -ENOMEM;
135 map->reg_defaults = tmp_buf;
Mark Brown8528bdd2011-10-09 13:13:58 +0100136 } else if (map->num_reg_defaults_raw) {
Mark Brown5fcd2562011-09-29 15:24:54 +0100137 /* Some devices such as PMICs don't have cache defaults,
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100138 * we cope with this by reading back the HW registers and
139 * crafting the cache defaults by hand.
140 */
141 ret = regcache_hw_init(map);
142 if (ret < 0)
143 return ret;
144 }
145
146 if (!map->max_register)
147 map->max_register = map->num_reg_defaults_raw;
148
149 if (map->cache_ops->init) {
150 dev_dbg(map->dev, "Initializing %s cache\n",
151 map->cache_ops->name);
Lars-Peter Clausenbd061c72011-11-14 10:40:17 +0100152 ret = map->cache_ops->init(map);
153 if (ret)
154 goto err_free;
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100155 }
156 return 0;
Lars-Peter Clausenbd061c72011-11-14 10:40:17 +0100157
158err_free:
159 kfree(map->reg_defaults);
160 if (map->cache_free)
161 kfree(map->reg_defaults_raw);
162
163 return ret;
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100164}
165
166void regcache_exit(struct regmap *map)
167{
168 if (map->cache_type == REGCACHE_NONE)
169 return;
170
171 BUG_ON(!map->cache_ops);
172
173 kfree(map->reg_defaults);
174 if (map->cache_free)
175 kfree(map->reg_defaults_raw);
176
177 if (map->cache_ops->exit) {
178 dev_dbg(map->dev, "Destroying %s cache\n",
179 map->cache_ops->name);
180 map->cache_ops->exit(map);
181 }
182}
183
184/**
185 * regcache_read: Fetch the value of a given register from the cache.
186 *
187 * @map: map to configure.
188 * @reg: The register index.
189 * @value: The value to be returned.
190 *
191 * Return a negative value on failure, 0 on success.
192 */
193int regcache_read(struct regmap *map,
194 unsigned int reg, unsigned int *value)
195{
Mark Brownbc7ee552011-11-30 14:27:08 +0000196 int ret;
197
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100198 if (map->cache_type == REGCACHE_NONE)
199 return -ENOSYS;
200
201 BUG_ON(!map->cache_ops);
202
Mark Brownbc7ee552011-11-30 14:27:08 +0000203 if (!regmap_volatile(map, reg)) {
204 ret = map->cache_ops->read(map, reg, value);
205
206 if (ret == 0)
207 trace_regmap_reg_read_cache(map->dev, reg, *value);
208
209 return ret;
210 }
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100211
212 return -EINVAL;
213}
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100214
215/**
216 * regcache_write: Set the value of a given register in the cache.
217 *
218 * @map: map to configure.
219 * @reg: The register index.
220 * @value: The new register value.
221 *
222 * Return a negative value on failure, 0 on success.
223 */
224int regcache_write(struct regmap *map,
225 unsigned int reg, unsigned int value)
226{
227 if (map->cache_type == REGCACHE_NONE)
228 return 0;
229
230 BUG_ON(!map->cache_ops);
231
232 if (!regmap_writeable(map, reg))
233 return -EIO;
234
235 if (!regmap_volatile(map, reg))
236 return map->cache_ops->write(map, reg, value);
237
238 return 0;
239}
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100240
241/**
242 * regcache_sync: Sync the register cache with the hardware.
243 *
244 * @map: map to configure.
245 *
246 * Any registers that should not be synced should be marked as
247 * volatile. In general drivers can choose not to use the provided
248 * syncing functionality if they so require.
249 *
250 * Return a negative value on failure, 0 on success.
251 */
252int regcache_sync(struct regmap *map)
253{
Dimitris Papastamos954757d2011-09-27 11:25:06 +0100254 int ret = 0;
Dimitris Papastamos954757d2011-09-27 11:25:06 +0100255 unsigned int i;
Dimitris Papastamos59360082011-09-19 14:34:04 +0100256 const char *name;
Dimitris Papastamosbeb1a102011-09-29 14:36:26 +0100257 unsigned int bypass;
Dimitris Papastamos59360082011-09-19 14:34:04 +0100258
Mark Brownc3ec2322012-02-23 20:48:40 +0000259 BUG_ON(!map->cache_ops || !map->cache_ops->sync);
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100260
Dimitris Papastamos13753a92011-09-29 14:36:25 +0100261 mutex_lock(&map->lock);
Dimitris Papastamosbeb1a102011-09-29 14:36:26 +0100262 /* Remember the initial bypass state */
263 bypass = map->cache_bypass;
Dimitris Papastamos954757d2011-09-27 11:25:06 +0100264 dev_dbg(map->dev, "Syncing %s cache\n",
265 map->cache_ops->name);
266 name = map->cache_ops->name;
267 trace_regcache_sync(map->dev, name, "start");
Mark Brown22f0d902012-01-21 12:01:14 +0000268
Mark Brown8ae0d7e2011-10-26 10:34:22 +0200269 if (!map->cache_dirty)
270 goto out;
Mark Brownd9db7622012-01-25 21:06:33 +0000271
Mark Brown22f0d902012-01-21 12:01:14 +0000272 /* Apply any patch first */
Mark Brown8a892d62012-01-25 21:05:48 +0000273 map->cache_bypass = 1;
Mark Brown22f0d902012-01-21 12:01:14 +0000274 for (i = 0; i < map->patch_regs; i++) {
275 ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
276 if (ret != 0) {
277 dev_err(map->dev, "Failed to write %x = %x: %d\n",
278 map->patch[i].reg, map->patch[i].def, ret);
279 goto out;
280 }
281 }
Mark Brown8a892d62012-01-25 21:05:48 +0000282 map->cache_bypass = 0;
Mark Brown22f0d902012-01-21 12:01:14 +0000283
Mark Brownac8d91c2012-02-23 19:31:04 +0000284 ret = map->cache_ops->sync(map, 0, map->max_register);
Dimitris Papastamos954757d2011-09-27 11:25:06 +0100285
Mark Brown6ff73732012-02-23 22:05:59 +0000286 if (ret == 0)
287 map->cache_dirty = false;
288
Dimitris Papastamos954757d2011-09-27 11:25:06 +0100289out:
290 trace_regcache_sync(map->dev, name, "stop");
Dimitris Papastamosbeb1a102011-09-29 14:36:26 +0100291 /* Restore the bypass state */
292 map->cache_bypass = bypass;
Dimitris Papastamos13753a92011-09-29 14:36:25 +0100293 mutex_unlock(&map->lock);
Dimitris Papastamos954757d2011-09-27 11:25:06 +0100294
295 return ret;
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100296}
297EXPORT_SYMBOL_GPL(regcache_sync);
298
Mark Brown92afb282011-09-19 18:22:14 +0100299/**
Mark Brown4d4cfd12012-02-23 20:53:37 +0000300 * regcache_sync_region: Sync part of the register cache with the hardware.
301 *
302 * @map: map to sync.
303 * @min: first register to sync
304 * @max: last register to sync
305 *
306 * Write all non-default register values in the specified region to
307 * the hardware.
308 *
309 * Return a negative value on failure, 0 on success.
310 */
311int regcache_sync_region(struct regmap *map, unsigned int min,
312 unsigned int max)
313{
314 int ret = 0;
315 const char *name;
316 unsigned int bypass;
317
318 BUG_ON(!map->cache_ops || !map->cache_ops->sync);
319
320 mutex_lock(&map->lock);
321
322 /* Remember the initial bypass state */
323 bypass = map->cache_bypass;
324
325 name = map->cache_ops->name;
326 dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
327
328 trace_regcache_sync(map->dev, name, "start region");
329
330 if (!map->cache_dirty)
331 goto out;
332
333 ret = map->cache_ops->sync(map, min, max);
334
335out:
336 trace_regcache_sync(map->dev, name, "stop region");
337 /* Restore the bypass state */
338 map->cache_bypass = bypass;
339 mutex_unlock(&map->lock);
340
341 return ret;
342}
343
344/**
Mark Brown92afb282011-09-19 18:22:14 +0100345 * regcache_cache_only: Put a register map into cache only mode
346 *
347 * @map: map to configure
348 * @cache_only: flag if changes should be written to the hardware
349 *
350 * When a register map is marked as cache only writes to the register
351 * map API will only update the register cache, they will not cause
352 * any hardware changes. This is useful for allowing portions of
353 * drivers to act as though the device were functioning as normal when
354 * it is disabled for power saving reasons.
355 */
356void regcache_cache_only(struct regmap *map, bool enable)
357{
Mark Brown2cd148f2011-09-29 10:40:55 +0100358 mutex_lock(&map->lock);
Dimitris Papastamosac77a762011-09-29 14:36:28 +0100359 WARN_ON(map->cache_bypass && enable);
Mark Brown92afb282011-09-19 18:22:14 +0100360 map->cache_only = enable;
Mark Brown5d5b7d42012-02-23 22:02:57 +0000361 trace_regmap_cache_only(map->dev, enable);
Mark Brown2cd148f2011-09-29 10:40:55 +0100362 mutex_unlock(&map->lock);
Mark Brown92afb282011-09-19 18:22:14 +0100363}
364EXPORT_SYMBOL_GPL(regcache_cache_only);
365
Dimitris Papastamos6eb0f5e2011-09-29 14:36:27 +0100366/**
Mark Brown8ae0d7e2011-10-26 10:34:22 +0200367 * regcache_mark_dirty: Mark the register cache as dirty
368 *
369 * @map: map to mark
370 *
371 * Mark the register cache as dirty, for example due to the device
372 * having been powered down for suspend. If the cache is not marked
373 * as dirty then the cache sync will be suppressed.
374 */
375void regcache_mark_dirty(struct regmap *map)
376{
377 mutex_lock(&map->lock);
378 map->cache_dirty = true;
379 mutex_unlock(&map->lock);
380}
381EXPORT_SYMBOL_GPL(regcache_mark_dirty);
382
383/**
Dimitris Papastamos6eb0f5e2011-09-29 14:36:27 +0100384 * regcache_cache_bypass: Put a register map into cache bypass mode
385 *
386 * @map: map to configure
Dimitris Papastamos0eef6b02011-10-03 06:54:16 +0100387 * @cache_bypass: flag if changes should not be written to the hardware
Dimitris Papastamos6eb0f5e2011-09-29 14:36:27 +0100388 *
389 * When a register map is marked with the cache bypass option, writes
390 * to the register map API will only update the hardware and not the
391 * the cache directly. This is useful when syncing the cache back to
392 * the hardware.
393 */
394void regcache_cache_bypass(struct regmap *map, bool enable)
395{
396 mutex_lock(&map->lock);
Dimitris Papastamosac77a762011-09-29 14:36:28 +0100397 WARN_ON(map->cache_only && enable);
Dimitris Papastamos6eb0f5e2011-09-29 14:36:27 +0100398 map->cache_bypass = enable;
Mark Brown5d5b7d42012-02-23 22:02:57 +0000399 trace_regmap_cache_bypass(map->dev, enable);
Dimitris Papastamos6eb0f5e2011-09-29 14:36:27 +0100400 mutex_unlock(&map->lock);
401}
402EXPORT_SYMBOL_GPL(regcache_cache_bypass);
403
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100404bool regcache_set_val(void *base, unsigned int idx,
405 unsigned int val, unsigned int word_size)
406{
407 switch (word_size) {
408 case 1: {
409 u8 *cache = base;
410 if (cache[idx] == val)
411 return true;
412 cache[idx] = val;
413 break;
414 }
415 case 2: {
416 u16 *cache = base;
417 if (cache[idx] == val)
418 return true;
419 cache[idx] = val;
420 break;
421 }
Mark Brown7d5e5252012-02-17 15:58:25 -0800422 case 4: {
423 u32 *cache = base;
424 if (cache[idx] == val)
425 return true;
426 cache[idx] = val;
427 break;
428 }
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100429 default:
430 BUG();
431 }
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100432 return false;
433}
434
435unsigned int regcache_get_val(const void *base, unsigned int idx,
436 unsigned int word_size)
437{
438 if (!base)
439 return -EINVAL;
440
441 switch (word_size) {
442 case 1: {
443 const u8 *cache = base;
444 return cache[idx];
445 }
446 case 2: {
447 const u16 *cache = base;
448 return cache[idx];
449 }
Mark Brown7d5e5252012-02-17 15:58:25 -0800450 case 4: {
451 const u32 *cache = base;
452 return cache[idx];
453 }
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100454 default:
455 BUG();
456 }
457 /* unreachable */
458 return -1;
459}
460
Mark Brownf094fea2011-10-04 22:05:47 +0100461static int regcache_default_cmp(const void *a, const void *b)
Dimitris Papastamosc08604b2011-10-03 10:50:14 +0100462{
463 const struct reg_default *_a = a;
464 const struct reg_default *_b = b;
465
466 return _a->reg - _b->reg;
467}
468
Mark Brownf094fea2011-10-04 22:05:47 +0100469int regcache_lookup_reg(struct regmap *map, unsigned int reg)
470{
471 struct reg_default key;
472 struct reg_default *r;
473
474 key.reg = reg;
475 key.def = 0;
476
477 r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
478 sizeof(struct reg_default), regcache_default_cmp);
479
480 if (r)
481 return r - map->reg_defaults;
482 else
Mark Brown6e6ace02011-10-09 13:23:31 +0100483 return -ENOENT;
Mark Brownf094fea2011-10-04 22:05:47 +0100484}