blob: 938cb1d2ea2658426d6d9beec86372d7f017086c [file] [log] [blame]
Dimitris Papastamos9fabe242011-09-19 14:34:00 +01001/*
2 * Register cache access API
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/slab.h>
Paul Gortmaker1b6bc322011-05-27 07:12:15 -040014#include <linux/export.h>
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010015#include <trace/events/regmap.h>
Mark Brownf094fea2011-10-04 22:05:47 +010016#include <linux/bsearch.h>
Dimitris Papastamosc08604b2011-10-03 10:50:14 +010017#include <linux/sort.h>
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010018
19#include "internal.h"
20
21static const struct regcache_ops *cache_types[] = {
Dimitris Papastamos28644c82011-09-19 14:34:02 +010022 &regcache_rbtree_ops,
Dimitris Papastamos2cbbb572011-09-19 14:34:03 +010023 &regcache_lzo_ops,
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010024};
25
26static int regcache_hw_init(struct regmap *map)
27{
28 int i, j;
29 int ret;
30 int count;
31 unsigned int val;
32 void *tmp_buf;
33
34 if (!map->num_reg_defaults_raw)
35 return -EINVAL;
36
37 if (!map->reg_defaults_raw) {
Laxman Dewangandf00c792012-02-17 18:57:26 +053038 u32 cache_bypass = map->cache_bypass;
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010039 dev_warn(map->dev, "No cache defaults, reading back from HW\n");
Laxman Dewangandf00c792012-02-17 18:57:26 +053040
41 /* Bypass the cache access till data read from HW*/
42 map->cache_bypass = 1;
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010043 tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
44 if (!tmp_buf)
45 return -EINVAL;
46 ret = regmap_bulk_read(map, 0, tmp_buf,
47 map->num_reg_defaults_raw);
Laxman Dewangandf00c792012-02-17 18:57:26 +053048 map->cache_bypass = cache_bypass;
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010049 if (ret < 0) {
50 kfree(tmp_buf);
51 return ret;
52 }
53 map->reg_defaults_raw = tmp_buf;
54 map->cache_free = 1;
55 }
56
57 /* calculate the size of reg_defaults */
58 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
59 val = regcache_get_val(map->reg_defaults_raw,
60 i, map->cache_word_size);
Lars-Peter Clausen61cddc52012-02-15 10:23:25 +010061 if (regmap_volatile(map, i))
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010062 continue;
63 count++;
64 }
65
66 map->reg_defaults = kmalloc(count * sizeof(struct reg_default),
67 GFP_KERNEL);
Lars-Peter Clausen021cd612011-11-14 10:40:16 +010068 if (!map->reg_defaults) {
69 ret = -ENOMEM;
70 goto err_free;
71 }
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010072
73 /* fill the reg_defaults */
74 map->num_reg_defaults = count;
75 for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
76 val = regcache_get_val(map->reg_defaults_raw,
77 i, map->cache_word_size);
Lars-Peter Clausen61cddc52012-02-15 10:23:25 +010078 if (regmap_volatile(map, i))
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010079 continue;
80 map->reg_defaults[j].reg = i;
81 map->reg_defaults[j].def = val;
82 j++;
83 }
84
85 return 0;
Lars-Peter Clausen021cd612011-11-14 10:40:16 +010086
87err_free:
88 if (map->cache_free)
89 kfree(map->reg_defaults_raw);
90
91 return ret;
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010092}
93
Lars-Peter Clausene5e3b8a2011-11-16 16:28:16 +010094int regcache_init(struct regmap *map, const struct regmap_config *config)
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010095{
96 int ret;
97 int i;
98 void *tmp_buf;
99
Mark Browne7a6db32011-09-19 16:08:03 +0100100 if (map->cache_type == REGCACHE_NONE) {
101 map->cache_bypass = true;
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100102 return 0;
Mark Browne7a6db32011-09-19 16:08:03 +0100103 }
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100104
105 for (i = 0; i < ARRAY_SIZE(cache_types); i++)
106 if (cache_types[i]->type == map->cache_type)
107 break;
108
109 if (i == ARRAY_SIZE(cache_types)) {
110 dev_err(map->dev, "Could not match compress type: %d\n",
111 map->cache_type);
112 return -EINVAL;
113 }
114
Lars-Peter Clausene5e3b8a2011-11-16 16:28:16 +0100115 map->num_reg_defaults = config->num_reg_defaults;
116 map->num_reg_defaults_raw = config->num_reg_defaults_raw;
117 map->reg_defaults_raw = config->reg_defaults_raw;
Lars-Peter Clausen064d4db2011-11-16 20:34:03 +0100118 map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
119 map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
Lars-Peter Clausene5e3b8a2011-11-16 16:28:16 +0100120
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100121 map->cache = NULL;
122 map->cache_ops = cache_types[i];
123
124 if (!map->cache_ops->read ||
125 !map->cache_ops->write ||
126 !map->cache_ops->name)
127 return -EINVAL;
128
129 /* We still need to ensure that the reg_defaults
130 * won't vanish from under us. We'll need to make
131 * a copy of it.
132 */
Lars-Peter Clausen720e4612011-11-16 16:28:17 +0100133 if (config->reg_defaults) {
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100134 if (!map->num_reg_defaults)
135 return -EINVAL;
Lars-Peter Clausen720e4612011-11-16 16:28:17 +0100136 tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100137 sizeof(struct reg_default), GFP_KERNEL);
138 if (!tmp_buf)
139 return -ENOMEM;
140 map->reg_defaults = tmp_buf;
Mark Brown8528bdd2011-10-09 13:13:58 +0100141 } else if (map->num_reg_defaults_raw) {
Mark Brown5fcd2562011-09-29 15:24:54 +0100142 /* Some devices such as PMICs don't have cache defaults,
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100143 * we cope with this by reading back the HW registers and
144 * crafting the cache defaults by hand.
145 */
146 ret = regcache_hw_init(map);
147 if (ret < 0)
148 return ret;
149 }
150
151 if (!map->max_register)
152 map->max_register = map->num_reg_defaults_raw;
153
154 if (map->cache_ops->init) {
155 dev_dbg(map->dev, "Initializing %s cache\n",
156 map->cache_ops->name);
Lars-Peter Clausenbd061c72011-11-14 10:40:17 +0100157 ret = map->cache_ops->init(map);
158 if (ret)
159 goto err_free;
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100160 }
161 return 0;
Lars-Peter Clausenbd061c72011-11-14 10:40:17 +0100162
163err_free:
164 kfree(map->reg_defaults);
165 if (map->cache_free)
166 kfree(map->reg_defaults_raw);
167
168 return ret;
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100169}
170
171void regcache_exit(struct regmap *map)
172{
173 if (map->cache_type == REGCACHE_NONE)
174 return;
175
176 BUG_ON(!map->cache_ops);
177
178 kfree(map->reg_defaults);
179 if (map->cache_free)
180 kfree(map->reg_defaults_raw);
181
182 if (map->cache_ops->exit) {
183 dev_dbg(map->dev, "Destroying %s cache\n",
184 map->cache_ops->name);
185 map->cache_ops->exit(map);
186 }
187}
188
189/**
190 * regcache_read: Fetch the value of a given register from the cache.
191 *
192 * @map: map to configure.
193 * @reg: The register index.
194 * @value: The value to be returned.
195 *
196 * Return a negative value on failure, 0 on success.
197 */
198int regcache_read(struct regmap *map,
199 unsigned int reg, unsigned int *value)
200{
Mark Brownbc7ee552011-11-30 14:27:08 +0000201 int ret;
202
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100203 if (map->cache_type == REGCACHE_NONE)
204 return -ENOSYS;
205
206 BUG_ON(!map->cache_ops);
207
Mark Brownbc7ee552011-11-30 14:27:08 +0000208 if (!regmap_volatile(map, reg)) {
209 ret = map->cache_ops->read(map, reg, value);
210
211 if (ret == 0)
212 trace_regmap_reg_read_cache(map->dev, reg, *value);
213
214 return ret;
215 }
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100216
217 return -EINVAL;
218}
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100219
220/**
221 * regcache_write: Set the value of a given register in the cache.
222 *
223 * @map: map to configure.
224 * @reg: The register index.
225 * @value: The new register value.
226 *
227 * Return a negative value on failure, 0 on success.
228 */
229int regcache_write(struct regmap *map,
230 unsigned int reg, unsigned int value)
231{
232 if (map->cache_type == REGCACHE_NONE)
233 return 0;
234
235 BUG_ON(!map->cache_ops);
236
237 if (!regmap_writeable(map, reg))
238 return -EIO;
239
240 if (!regmap_volatile(map, reg))
241 return map->cache_ops->write(map, reg, value);
242
243 return 0;
244}
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100245
246/**
247 * regcache_sync: Sync the register cache with the hardware.
248 *
249 * @map: map to configure.
250 *
251 * Any registers that should not be synced should be marked as
252 * volatile. In general drivers can choose not to use the provided
253 * syncing functionality if they so require.
254 *
255 * Return a negative value on failure, 0 on success.
256 */
257int regcache_sync(struct regmap *map)
258{
Dimitris Papastamos954757d2011-09-27 11:25:06 +0100259 int ret = 0;
Dimitris Papastamos954757d2011-09-27 11:25:06 +0100260 unsigned int i;
Dimitris Papastamos59360082011-09-19 14:34:04 +0100261 const char *name;
Dimitris Papastamosbeb1a102011-09-29 14:36:26 +0100262 unsigned int bypass;
Dimitris Papastamos59360082011-09-19 14:34:04 +0100263
Mark Brownc3ec2322012-02-23 20:48:40 +0000264 BUG_ON(!map->cache_ops || !map->cache_ops->sync);
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100265
Dimitris Papastamos13753a92011-09-29 14:36:25 +0100266 mutex_lock(&map->lock);
Dimitris Papastamosbeb1a102011-09-29 14:36:26 +0100267 /* Remember the initial bypass state */
268 bypass = map->cache_bypass;
Dimitris Papastamos954757d2011-09-27 11:25:06 +0100269 dev_dbg(map->dev, "Syncing %s cache\n",
270 map->cache_ops->name);
271 name = map->cache_ops->name;
272 trace_regcache_sync(map->dev, name, "start");
Mark Brown22f0d902012-01-21 12:01:14 +0000273
Mark Brown8ae0d7e2011-10-26 10:34:22 +0200274 if (!map->cache_dirty)
275 goto out;
Mark Brownd9db7622012-01-25 21:06:33 +0000276
Mark Brown22f0d902012-01-21 12:01:14 +0000277 /* Apply any patch first */
Mark Brown8a892d62012-01-25 21:05:48 +0000278 map->cache_bypass = 1;
Mark Brown22f0d902012-01-21 12:01:14 +0000279 for (i = 0; i < map->patch_regs; i++) {
280 ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
281 if (ret != 0) {
282 dev_err(map->dev, "Failed to write %x = %x: %d\n",
283 map->patch[i].reg, map->patch[i].def, ret);
284 goto out;
285 }
286 }
Mark Brown8a892d62012-01-25 21:05:48 +0000287 map->cache_bypass = 0;
Mark Brown22f0d902012-01-21 12:01:14 +0000288
Mark Brownac8d91c2012-02-23 19:31:04 +0000289 ret = map->cache_ops->sync(map, 0, map->max_register);
Dimitris Papastamos954757d2011-09-27 11:25:06 +0100290
Mark Brown6ff73732012-02-23 22:05:59 +0000291 if (ret == 0)
292 map->cache_dirty = false;
293
Dimitris Papastamos954757d2011-09-27 11:25:06 +0100294out:
295 trace_regcache_sync(map->dev, name, "stop");
Dimitris Papastamosbeb1a102011-09-29 14:36:26 +0100296 /* Restore the bypass state */
297 map->cache_bypass = bypass;
Dimitris Papastamos13753a92011-09-29 14:36:25 +0100298 mutex_unlock(&map->lock);
Dimitris Papastamos954757d2011-09-27 11:25:06 +0100299
300 return ret;
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100301}
302EXPORT_SYMBOL_GPL(regcache_sync);
303
Mark Brown92afb282011-09-19 18:22:14 +0100304/**
Mark Brown4d4cfd12012-02-23 20:53:37 +0000305 * regcache_sync_region: Sync part of the register cache with the hardware.
306 *
307 * @map: map to sync.
308 * @min: first register to sync
309 * @max: last register to sync
310 *
311 * Write all non-default register values in the specified region to
312 * the hardware.
313 *
314 * Return a negative value on failure, 0 on success.
315 */
316int regcache_sync_region(struct regmap *map, unsigned int min,
317 unsigned int max)
318{
319 int ret = 0;
320 const char *name;
321 unsigned int bypass;
322
323 BUG_ON(!map->cache_ops || !map->cache_ops->sync);
324
325 mutex_lock(&map->lock);
326
327 /* Remember the initial bypass state */
328 bypass = map->cache_bypass;
329
330 name = map->cache_ops->name;
331 dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
332
333 trace_regcache_sync(map->dev, name, "start region");
334
335 if (!map->cache_dirty)
336 goto out;
337
338 ret = map->cache_ops->sync(map, min, max);
339
340out:
341 trace_regcache_sync(map->dev, name, "stop region");
342 /* Restore the bypass state */
343 map->cache_bypass = bypass;
344 mutex_unlock(&map->lock);
345
346 return ret;
347}
348
349/**
Mark Brown92afb282011-09-19 18:22:14 +0100350 * regcache_cache_only: Put a register map into cache only mode
351 *
352 * @map: map to configure
353 * @cache_only: flag if changes should be written to the hardware
354 *
355 * When a register map is marked as cache only writes to the register
356 * map API will only update the register cache, they will not cause
357 * any hardware changes. This is useful for allowing portions of
358 * drivers to act as though the device were functioning as normal when
359 * it is disabled for power saving reasons.
360 */
361void regcache_cache_only(struct regmap *map, bool enable)
362{
Mark Brown2cd148f2011-09-29 10:40:55 +0100363 mutex_lock(&map->lock);
Dimitris Papastamosac77a762011-09-29 14:36:28 +0100364 WARN_ON(map->cache_bypass && enable);
Mark Brown92afb282011-09-19 18:22:14 +0100365 map->cache_only = enable;
Mark Brown5d5b7d42012-02-23 22:02:57 +0000366 trace_regmap_cache_only(map->dev, enable);
Mark Brown2cd148f2011-09-29 10:40:55 +0100367 mutex_unlock(&map->lock);
Mark Brown92afb282011-09-19 18:22:14 +0100368}
369EXPORT_SYMBOL_GPL(regcache_cache_only);
370
Dimitris Papastamos6eb0f5e2011-09-29 14:36:27 +0100371/**
Mark Brown8ae0d7e2011-10-26 10:34:22 +0200372 * regcache_mark_dirty: Mark the register cache as dirty
373 *
374 * @map: map to mark
375 *
376 * Mark the register cache as dirty, for example due to the device
377 * having been powered down for suspend. If the cache is not marked
378 * as dirty then the cache sync will be suppressed.
379 */
380void regcache_mark_dirty(struct regmap *map)
381{
382 mutex_lock(&map->lock);
383 map->cache_dirty = true;
384 mutex_unlock(&map->lock);
385}
386EXPORT_SYMBOL_GPL(regcache_mark_dirty);
387
388/**
Dimitris Papastamos6eb0f5e2011-09-29 14:36:27 +0100389 * regcache_cache_bypass: Put a register map into cache bypass mode
390 *
391 * @map: map to configure
Dimitris Papastamos0eef6b02011-10-03 06:54:16 +0100392 * @cache_bypass: flag if changes should not be written to the hardware
Dimitris Papastamos6eb0f5e2011-09-29 14:36:27 +0100393 *
394 * When a register map is marked with the cache bypass option, writes
395 * to the register map API will only update the hardware and not the
396 * the cache directly. This is useful when syncing the cache back to
397 * the hardware.
398 */
399void regcache_cache_bypass(struct regmap *map, bool enable)
400{
401 mutex_lock(&map->lock);
Dimitris Papastamosac77a762011-09-29 14:36:28 +0100402 WARN_ON(map->cache_only && enable);
Dimitris Papastamos6eb0f5e2011-09-29 14:36:27 +0100403 map->cache_bypass = enable;
Mark Brown5d5b7d42012-02-23 22:02:57 +0000404 trace_regmap_cache_bypass(map->dev, enable);
Dimitris Papastamos6eb0f5e2011-09-29 14:36:27 +0100405 mutex_unlock(&map->lock);
406}
407EXPORT_SYMBOL_GPL(regcache_cache_bypass);
408
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100409bool regcache_set_val(void *base, unsigned int idx,
410 unsigned int val, unsigned int word_size)
411{
412 switch (word_size) {
413 case 1: {
414 u8 *cache = base;
415 if (cache[idx] == val)
416 return true;
417 cache[idx] = val;
418 break;
419 }
420 case 2: {
421 u16 *cache = base;
422 if (cache[idx] == val)
423 return true;
424 cache[idx] = val;
425 break;
426 }
Mark Brown7d5e5252012-02-17 15:58:25 -0800427 case 4: {
428 u32 *cache = base;
429 if (cache[idx] == val)
430 return true;
431 cache[idx] = val;
432 break;
433 }
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100434 default:
435 BUG();
436 }
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100437 return false;
438}
439
440unsigned int regcache_get_val(const void *base, unsigned int idx,
441 unsigned int word_size)
442{
443 if (!base)
444 return -EINVAL;
445
446 switch (word_size) {
447 case 1: {
448 const u8 *cache = base;
449 return cache[idx];
450 }
451 case 2: {
452 const u16 *cache = base;
453 return cache[idx];
454 }
Mark Brown7d5e5252012-02-17 15:58:25 -0800455 case 4: {
456 const u32 *cache = base;
457 return cache[idx];
458 }
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100459 default:
460 BUG();
461 }
462 /* unreachable */
463 return -1;
464}
465
Mark Brownf094fea2011-10-04 22:05:47 +0100466static int regcache_default_cmp(const void *a, const void *b)
Dimitris Papastamosc08604b2011-10-03 10:50:14 +0100467{
468 const struct reg_default *_a = a;
469 const struct reg_default *_b = b;
470
471 return _a->reg - _b->reg;
472}
473
Mark Brownf094fea2011-10-04 22:05:47 +0100474int regcache_lookup_reg(struct regmap *map, unsigned int reg)
475{
476 struct reg_default key;
477 struct reg_default *r;
478
479 key.reg = reg;
480 key.def = 0;
481
482 r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
483 sizeof(struct reg_default), regcache_default_cmp);
484
485 if (r)
486 return r - map->reg_defaults;
487 else
Mark Brown6e6ace02011-10-09 13:23:31 +0100488 return -ENOENT;
Mark Brownf094fea2011-10-04 22:05:47 +0100489}