blob: 1ca2d7a1051fd2f56ea3db00895c27eb67b86128 [file] [log] [blame]
Dimitris Papastamos9fabe242011-09-19 14:34:00 +01001/*
2 * Register cache access API
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/slab.h>
Paul Gortmaker1b6bc322011-05-27 07:12:15 -040014#include <linux/export.h>
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010015#include <trace/events/regmap.h>
Mark Brownf094fea2011-10-04 22:05:47 +010016#include <linux/bsearch.h>
Dimitris Papastamosc08604b2011-10-03 10:50:14 +010017#include <linux/sort.h>
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010018
19#include "internal.h"
20
21static const struct regcache_ops *cache_types[] = {
Dimitris Papastamos28644c82011-09-19 14:34:02 +010022 &regcache_rbtree_ops,
Dimitris Papastamos2cbbb572011-09-19 14:34:03 +010023 &regcache_lzo_ops,
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010024};
25
26static int regcache_hw_init(struct regmap *map)
27{
28 int i, j;
29 int ret;
30 int count;
31 unsigned int val;
32 void *tmp_buf;
33
34 if (!map->num_reg_defaults_raw)
35 return -EINVAL;
36
37 if (!map->reg_defaults_raw) {
38 dev_warn(map->dev, "No cache defaults, reading back from HW\n");
39 tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
40 if (!tmp_buf)
41 return -EINVAL;
42 ret = regmap_bulk_read(map, 0, tmp_buf,
43 map->num_reg_defaults_raw);
44 if (ret < 0) {
45 kfree(tmp_buf);
46 return ret;
47 }
48 map->reg_defaults_raw = tmp_buf;
49 map->cache_free = 1;
50 }
51
52 /* calculate the size of reg_defaults */
53 for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
54 val = regcache_get_val(map->reg_defaults_raw,
55 i, map->cache_word_size);
56 if (!val)
57 continue;
58 count++;
59 }
60
61 map->reg_defaults = kmalloc(count * sizeof(struct reg_default),
62 GFP_KERNEL);
Lars-Peter Clausen021cd612011-11-14 10:40:16 +010063 if (!map->reg_defaults) {
64 ret = -ENOMEM;
65 goto err_free;
66 }
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010067
68 /* fill the reg_defaults */
69 map->num_reg_defaults = count;
70 for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
71 val = regcache_get_val(map->reg_defaults_raw,
72 i, map->cache_word_size);
73 if (!val)
74 continue;
75 map->reg_defaults[j].reg = i;
76 map->reg_defaults[j].def = val;
77 j++;
78 }
79
80 return 0;
Lars-Peter Clausen021cd612011-11-14 10:40:16 +010081
82err_free:
83 if (map->cache_free)
84 kfree(map->reg_defaults_raw);
85
86 return ret;
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010087}
88
Lars-Peter Clausene5e3b8a2011-11-16 16:28:16 +010089int regcache_init(struct regmap *map, const struct regmap_config *config)
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010090{
91 int ret;
92 int i;
93 void *tmp_buf;
94
Mark Browne7a6db32011-09-19 16:08:03 +010095 if (map->cache_type == REGCACHE_NONE) {
96 map->cache_bypass = true;
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010097 return 0;
Mark Browne7a6db32011-09-19 16:08:03 +010098 }
Dimitris Papastamos9fabe242011-09-19 14:34:00 +010099
100 for (i = 0; i < ARRAY_SIZE(cache_types); i++)
101 if (cache_types[i]->type == map->cache_type)
102 break;
103
104 if (i == ARRAY_SIZE(cache_types)) {
105 dev_err(map->dev, "Could not match compress type: %d\n",
106 map->cache_type);
107 return -EINVAL;
108 }
109
Lars-Peter Clausene5e3b8a2011-11-16 16:28:16 +0100110 map->num_reg_defaults = config->num_reg_defaults;
111 map->num_reg_defaults_raw = config->num_reg_defaults_raw;
112 map->reg_defaults_raw = config->reg_defaults_raw;
Lars-Peter Clausen064d4db2011-11-16 20:34:03 +0100113 map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
114 map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
Lars-Peter Clausene5e3b8a2011-11-16 16:28:16 +0100115
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100116 map->cache = NULL;
117 map->cache_ops = cache_types[i];
118
119 if (!map->cache_ops->read ||
120 !map->cache_ops->write ||
121 !map->cache_ops->name)
122 return -EINVAL;
123
124 /* We still need to ensure that the reg_defaults
125 * won't vanish from under us. We'll need to make
126 * a copy of it.
127 */
Lars-Peter Clausen720e4612011-11-16 16:28:17 +0100128 if (config->reg_defaults) {
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100129 if (!map->num_reg_defaults)
130 return -EINVAL;
Lars-Peter Clausen720e4612011-11-16 16:28:17 +0100131 tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100132 sizeof(struct reg_default), GFP_KERNEL);
133 if (!tmp_buf)
134 return -ENOMEM;
135 map->reg_defaults = tmp_buf;
Mark Brown8528bdd2011-10-09 13:13:58 +0100136 } else if (map->num_reg_defaults_raw) {
Mark Brown5fcd2562011-09-29 15:24:54 +0100137 /* Some devices such as PMICs don't have cache defaults,
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100138 * we cope with this by reading back the HW registers and
139 * crafting the cache defaults by hand.
140 */
141 ret = regcache_hw_init(map);
142 if (ret < 0)
143 return ret;
144 }
145
146 if (!map->max_register)
147 map->max_register = map->num_reg_defaults_raw;
148
149 if (map->cache_ops->init) {
150 dev_dbg(map->dev, "Initializing %s cache\n",
151 map->cache_ops->name);
Lars-Peter Clausenbd061c72011-11-14 10:40:17 +0100152 ret = map->cache_ops->init(map);
153 if (ret)
154 goto err_free;
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100155 }
156 return 0;
Lars-Peter Clausenbd061c72011-11-14 10:40:17 +0100157
158err_free:
159 kfree(map->reg_defaults);
160 if (map->cache_free)
161 kfree(map->reg_defaults_raw);
162
163 return ret;
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100164}
165
166void regcache_exit(struct regmap *map)
167{
168 if (map->cache_type == REGCACHE_NONE)
169 return;
170
171 BUG_ON(!map->cache_ops);
172
173 kfree(map->reg_defaults);
174 if (map->cache_free)
175 kfree(map->reg_defaults_raw);
176
177 if (map->cache_ops->exit) {
178 dev_dbg(map->dev, "Destroying %s cache\n",
179 map->cache_ops->name);
180 map->cache_ops->exit(map);
181 }
182}
183
184/**
185 * regcache_read: Fetch the value of a given register from the cache.
186 *
187 * @map: map to configure.
188 * @reg: The register index.
189 * @value: The value to be returned.
190 *
191 * Return a negative value on failure, 0 on success.
192 */
193int regcache_read(struct regmap *map,
194 unsigned int reg, unsigned int *value)
195{
196 if (map->cache_type == REGCACHE_NONE)
197 return -ENOSYS;
198
199 BUG_ON(!map->cache_ops);
200
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100201 if (!regmap_volatile(map, reg))
202 return map->cache_ops->read(map, reg, value);
203
204 return -EINVAL;
205}
206EXPORT_SYMBOL_GPL(regcache_read);
207
208/**
209 * regcache_write: Set the value of a given register in the cache.
210 *
211 * @map: map to configure.
212 * @reg: The register index.
213 * @value: The new register value.
214 *
215 * Return a negative value on failure, 0 on success.
216 */
217int regcache_write(struct regmap *map,
218 unsigned int reg, unsigned int value)
219{
220 if (map->cache_type == REGCACHE_NONE)
221 return 0;
222
223 BUG_ON(!map->cache_ops);
224
225 if (!regmap_writeable(map, reg))
226 return -EIO;
227
228 if (!regmap_volatile(map, reg))
229 return map->cache_ops->write(map, reg, value);
230
231 return 0;
232}
233EXPORT_SYMBOL_GPL(regcache_write);
234
235/**
236 * regcache_sync: Sync the register cache with the hardware.
237 *
238 * @map: map to configure.
239 *
240 * Any registers that should not be synced should be marked as
241 * volatile. In general drivers can choose not to use the provided
242 * syncing functionality if they so require.
243 *
244 * Return a negative value on failure, 0 on success.
245 */
246int regcache_sync(struct regmap *map)
247{
Dimitris Papastamos954757d2011-09-27 11:25:06 +0100248 int ret = 0;
249 unsigned int val;
250 unsigned int i;
Dimitris Papastamos59360082011-09-19 14:34:04 +0100251 const char *name;
Dimitris Papastamosbeb1a102011-09-29 14:36:26 +0100252 unsigned int bypass;
Dimitris Papastamos59360082011-09-19 14:34:04 +0100253
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100254 BUG_ON(!map->cache_ops);
255
Dimitris Papastamos13753a92011-09-29 14:36:25 +0100256 mutex_lock(&map->lock);
Dimitris Papastamosbeb1a102011-09-29 14:36:26 +0100257 /* Remember the initial bypass state */
258 bypass = map->cache_bypass;
Dimitris Papastamos954757d2011-09-27 11:25:06 +0100259 dev_dbg(map->dev, "Syncing %s cache\n",
260 map->cache_ops->name);
261 name = map->cache_ops->name;
262 trace_regcache_sync(map->dev, name, "start");
Mark Brown8ae0d7e2011-10-26 10:34:22 +0200263 if (!map->cache_dirty)
264 goto out;
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100265 if (map->cache_ops->sync) {
Dimitris Papastamos59360082011-09-19 14:34:04 +0100266 ret = map->cache_ops->sync(map);
Dimitris Papastamos954757d2011-09-27 11:25:06 +0100267 } else {
268 for (i = 0; i < map->num_reg_defaults; i++) {
269 ret = regcache_read(map, i, &val);
270 if (ret < 0)
271 goto out;
Dimitris Papastamosec8a3652011-09-28 11:43:42 +0100272 map->cache_bypass = 1;
Dimitris Papastamos13753a92011-09-29 14:36:25 +0100273 ret = _regmap_write(map, i, val);
Dimitris Papastamosec8a3652011-09-28 11:43:42 +0100274 map->cache_bypass = 0;
Dimitris Papastamos954757d2011-09-27 11:25:06 +0100275 if (ret < 0)
276 goto out;
277 dev_dbg(map->dev, "Synced register %#x, value %#x\n",
278 map->reg_defaults[i].reg,
279 map->reg_defaults[i].def);
280 }
281
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100282 }
Dimitris Papastamos954757d2011-09-27 11:25:06 +0100283out:
284 trace_regcache_sync(map->dev, name, "stop");
Dimitris Papastamosbeb1a102011-09-29 14:36:26 +0100285 /* Restore the bypass state */
286 map->cache_bypass = bypass;
Dimitris Papastamos13753a92011-09-29 14:36:25 +0100287 mutex_unlock(&map->lock);
Dimitris Papastamos954757d2011-09-27 11:25:06 +0100288
289 return ret;
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100290}
291EXPORT_SYMBOL_GPL(regcache_sync);
292
Mark Brown92afb282011-09-19 18:22:14 +0100293/**
294 * regcache_cache_only: Put a register map into cache only mode
295 *
296 * @map: map to configure
297 * @cache_only: flag if changes should be written to the hardware
298 *
299 * When a register map is marked as cache only writes to the register
300 * map API will only update the register cache, they will not cause
301 * any hardware changes. This is useful for allowing portions of
302 * drivers to act as though the device were functioning as normal when
303 * it is disabled for power saving reasons.
304 */
305void regcache_cache_only(struct regmap *map, bool enable)
306{
Mark Brown2cd148f2011-09-29 10:40:55 +0100307 mutex_lock(&map->lock);
Dimitris Papastamosac77a762011-09-29 14:36:28 +0100308 WARN_ON(map->cache_bypass && enable);
Mark Brown92afb282011-09-19 18:22:14 +0100309 map->cache_only = enable;
Mark Brown2cd148f2011-09-29 10:40:55 +0100310 mutex_unlock(&map->lock);
Mark Brown92afb282011-09-19 18:22:14 +0100311}
312EXPORT_SYMBOL_GPL(regcache_cache_only);
313
Dimitris Papastamos6eb0f5e2011-09-29 14:36:27 +0100314/**
Mark Brown8ae0d7e2011-10-26 10:34:22 +0200315 * regcache_mark_dirty: Mark the register cache as dirty
316 *
317 * @map: map to mark
318 *
319 * Mark the register cache as dirty, for example due to the device
320 * having been powered down for suspend. If the cache is not marked
321 * as dirty then the cache sync will be suppressed.
322 */
323void regcache_mark_dirty(struct regmap *map)
324{
325 mutex_lock(&map->lock);
326 map->cache_dirty = true;
327 mutex_unlock(&map->lock);
328}
329EXPORT_SYMBOL_GPL(regcache_mark_dirty);
330
331/**
Dimitris Papastamos6eb0f5e2011-09-29 14:36:27 +0100332 * regcache_cache_bypass: Put a register map into cache bypass mode
333 *
334 * @map: map to configure
Dimitris Papastamos0eef6b02011-10-03 06:54:16 +0100335 * @cache_bypass: flag if changes should not be written to the hardware
Dimitris Papastamos6eb0f5e2011-09-29 14:36:27 +0100336 *
337 * When a register map is marked with the cache bypass option, writes
338 * to the register map API will only update the hardware and not the
339 * the cache directly. This is useful when syncing the cache back to
340 * the hardware.
341 */
342void regcache_cache_bypass(struct regmap *map, bool enable)
343{
344 mutex_lock(&map->lock);
Dimitris Papastamosac77a762011-09-29 14:36:28 +0100345 WARN_ON(map->cache_only && enable);
Dimitris Papastamos6eb0f5e2011-09-29 14:36:27 +0100346 map->cache_bypass = enable;
347 mutex_unlock(&map->lock);
348}
349EXPORT_SYMBOL_GPL(regcache_cache_bypass);
350
Dimitris Papastamos9fabe242011-09-19 14:34:00 +0100351bool regcache_set_val(void *base, unsigned int idx,
352 unsigned int val, unsigned int word_size)
353{
354 switch (word_size) {
355 case 1: {
356 u8 *cache = base;
357 if (cache[idx] == val)
358 return true;
359 cache[idx] = val;
360 break;
361 }
362 case 2: {
363 u16 *cache = base;
364 if (cache[idx] == val)
365 return true;
366 cache[idx] = val;
367 break;
368 }
369 default:
370 BUG();
371 }
372 /* unreachable */
373 return false;
374}
375
376unsigned int regcache_get_val(const void *base, unsigned int idx,
377 unsigned int word_size)
378{
379 if (!base)
380 return -EINVAL;
381
382 switch (word_size) {
383 case 1: {
384 const u8 *cache = base;
385 return cache[idx];
386 }
387 case 2: {
388 const u16 *cache = base;
389 return cache[idx];
390 }
391 default:
392 BUG();
393 }
394 /* unreachable */
395 return -1;
396}
397
Mark Brownf094fea2011-10-04 22:05:47 +0100398static int regcache_default_cmp(const void *a, const void *b)
Dimitris Papastamosc08604b2011-10-03 10:50:14 +0100399{
400 const struct reg_default *_a = a;
401 const struct reg_default *_b = b;
402
403 return _a->reg - _b->reg;
404}
405
Mark Brownf094fea2011-10-04 22:05:47 +0100406int regcache_lookup_reg(struct regmap *map, unsigned int reg)
407{
408 struct reg_default key;
409 struct reg_default *r;
410
411 key.reg = reg;
412 key.def = 0;
413
414 r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
415 sizeof(struct reg_default), regcache_default_cmp);
416
417 if (r)
418 return r - map->reg_defaults;
419 else
Mark Brown6e6ace02011-10-09 13:23:31 +0100420 return -ENOENT;
Mark Brownf094fea2011-10-04 22:05:47 +0100421}