blob: 5d8b75255b50550e0fb7e0edb55e7b2887a45068 [file] [log] [blame]
Mark Brownb83a3132011-05-11 19:59:58 +02001/*
2 * Register map access API
3 *
4 * Copyright 2011 Wolfson Microelectronics plc
5 *
6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
Stephen Warrenf5d6eba2012-03-09 13:17:28 -070013#include <linux/device.h>
Mark Brownb83a3132011-05-11 19:59:58 +020014#include <linux/slab.h>
Paul Gortmaker19694b52012-02-28 19:28:02 -050015#include <linux/export.h>
Mark Brownb83a3132011-05-11 19:59:58 +020016#include <linux/mutex.h>
17#include <linux/err.h>
Krystian Garbaciak6863ca62012-06-15 11:23:56 +010018#include <linux/rbtree.h>
Mark Brownb83a3132011-05-11 19:59:58 +020019
Mark Brownfb2736b2011-07-24 21:30:55 +010020#define CREATE_TRACE_POINTS
21#include <trace/events/regmap.h>
22
Mark Brown93de9122011-07-20 22:35:37 +010023#include "internal.h"
Mark Brownb83a3132011-05-11 19:59:58 +020024
Mark Brown1044c182012-07-06 14:10:23 +010025/*
26 * Sometimes for failures during very early init the trace
27 * infrastructure isn't available early enough to be used. For this
28 * sort of problem defining LOG_DEVICE will add printks for basic
29 * register I/O on a specific device.
30 */
31#undef LOG_DEVICE
32
33static int _regmap_update_bits(struct regmap *map, unsigned int reg,
34 unsigned int mask, unsigned int val,
35 bool *change);
36
Mark Brown8de2f082011-08-10 17:14:41 +090037bool regmap_writeable(struct regmap *map, unsigned int reg)
38{
39 if (map->max_register && reg > map->max_register)
40 return false;
41
42 if (map->writeable_reg)
43 return map->writeable_reg(map->dev, reg);
44
45 return true;
46}
47
48bool regmap_readable(struct regmap *map, unsigned int reg)
49{
50 if (map->max_register && reg > map->max_register)
51 return false;
52
Wolfram Sang4191f192012-01-30 15:08:16 +010053 if (map->format.format_write)
54 return false;
55
Mark Brown8de2f082011-08-10 17:14:41 +090056 if (map->readable_reg)
57 return map->readable_reg(map->dev, reg);
58
59 return true;
60}
61
62bool regmap_volatile(struct regmap *map, unsigned int reg)
63{
Wolfram Sang4191f192012-01-30 15:08:16 +010064 if (!regmap_readable(map, reg))
Mark Brown8de2f082011-08-10 17:14:41 +090065 return false;
66
67 if (map->volatile_reg)
68 return map->volatile_reg(map->dev, reg);
69
70 return true;
71}
72
73bool regmap_precious(struct regmap *map, unsigned int reg)
74{
Wolfram Sang4191f192012-01-30 15:08:16 +010075 if (!regmap_readable(map, reg))
Mark Brown8de2f082011-08-10 17:14:41 +090076 return false;
77
78 if (map->precious_reg)
79 return map->precious_reg(map->dev, reg);
80
81 return false;
82}
83
Lars-Peter Clausen82cd9962011-11-08 18:37:25 +010084static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
85 unsigned int num)
86{
87 unsigned int i;
88
89 for (i = 0; i < num; i++)
90 if (!regmap_volatile(map, reg + i))
91 return false;
92
93 return true;
94}
95
Wolfram Sang9aa50752012-01-27 16:10:22 +010096static void regmap_format_2_6_write(struct regmap *map,
97 unsigned int reg, unsigned int val)
98{
99 u8 *out = map->work_buf;
100
101 *out = (reg << 6) | val;
102}
103
Mark Brownb83a3132011-05-11 19:59:58 +0200104static void regmap_format_4_12_write(struct regmap *map,
105 unsigned int reg, unsigned int val)
106{
107 __be16 *out = map->work_buf;
108 *out = cpu_to_be16((reg << 12) | val);
109}
110
111static void regmap_format_7_9_write(struct regmap *map,
112 unsigned int reg, unsigned int val)
113{
114 __be16 *out = map->work_buf;
115 *out = cpu_to_be16((reg << 9) | val);
116}
117
Lars-Peter Clausen7e5ec632011-11-16 16:28:21 +0100118static void regmap_format_10_14_write(struct regmap *map,
119 unsigned int reg, unsigned int val)
120{
121 u8 *out = map->work_buf;
122
123 out[2] = val;
124 out[1] = (val >> 8) | (reg << 6);
125 out[0] = reg >> 2;
126}
127
Marc Reillyd939fb92012-03-16 12:11:43 +1100128static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
Mark Brownb83a3132011-05-11 19:59:58 +0200129{
130 u8 *b = buf;
131
Marc Reillyd939fb92012-03-16 12:11:43 +1100132 b[0] = val << shift;
Mark Brownb83a3132011-05-11 19:59:58 +0200133}
134
Stephen Warren141eba2e2012-05-24 10:47:26 -0600135static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
Mark Brownb83a3132011-05-11 19:59:58 +0200136{
137 __be16 *b = buf;
138
Marc Reillyd939fb92012-03-16 12:11:43 +1100139 b[0] = cpu_to_be16(val << shift);
Mark Brownb83a3132011-05-11 19:59:58 +0200140}
141
Stephen Warren141eba2e2012-05-24 10:47:26 -0600142static void regmap_format_16_native(void *buf, unsigned int val,
143 unsigned int shift)
144{
145 *(u16 *)buf = val << shift;
146}
147
Marc Reillyd939fb92012-03-16 12:11:43 +1100148static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
Marc Reillyea279fc2012-03-16 12:11:42 +1100149{
150 u8 *b = buf;
151
Marc Reillyd939fb92012-03-16 12:11:43 +1100152 val <<= shift;
153
Marc Reillyea279fc2012-03-16 12:11:42 +1100154 b[0] = val >> 16;
155 b[1] = val >> 8;
156 b[2] = val;
157}
158
Stephen Warren141eba2e2012-05-24 10:47:26 -0600159static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
Mark Brown7d5e5252012-02-17 15:58:25 -0800160{
161 __be32 *b = buf;
162
Marc Reillyd939fb92012-03-16 12:11:43 +1100163 b[0] = cpu_to_be32(val << shift);
Mark Brown7d5e5252012-02-17 15:58:25 -0800164}
165
Stephen Warren141eba2e2012-05-24 10:47:26 -0600166static void regmap_format_32_native(void *buf, unsigned int val,
167 unsigned int shift)
168{
169 *(u32 *)buf = val << shift;
170}
171
Mark Brownb83a3132011-05-11 19:59:58 +0200172static unsigned int regmap_parse_8(void *buf)
173{
174 u8 *b = buf;
175
176 return b[0];
177}
178
Stephen Warren141eba2e2012-05-24 10:47:26 -0600179static unsigned int regmap_parse_16_be(void *buf)
Mark Brownb83a3132011-05-11 19:59:58 +0200180{
181 __be16 *b = buf;
182
183 b[0] = be16_to_cpu(b[0]);
184
185 return b[0];
186}
187
Stephen Warren141eba2e2012-05-24 10:47:26 -0600188static unsigned int regmap_parse_16_native(void *buf)
189{
190 return *(u16 *)buf;
191}
192
Marc Reillyea279fc2012-03-16 12:11:42 +1100193static unsigned int regmap_parse_24(void *buf)
194{
195 u8 *b = buf;
196 unsigned int ret = b[2];
197 ret |= ((unsigned int)b[1]) << 8;
198 ret |= ((unsigned int)b[0]) << 16;
199
200 return ret;
201}
202
Stephen Warren141eba2e2012-05-24 10:47:26 -0600203static unsigned int regmap_parse_32_be(void *buf)
Mark Brown7d5e5252012-02-17 15:58:25 -0800204{
205 __be32 *b = buf;
206
207 b[0] = be32_to_cpu(b[0]);
208
209 return b[0];
210}
211
Stephen Warren141eba2e2012-05-24 10:47:26 -0600212static unsigned int regmap_parse_32_native(void *buf)
213{
214 return *(u32 *)buf;
215}
216
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200217static void regmap_lock_mutex(void *__map)
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600218{
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200219 struct regmap *map = __map;
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600220 mutex_lock(&map->mutex);
221}
222
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200223static void regmap_unlock_mutex(void *__map)
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600224{
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200225 struct regmap *map = __map;
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600226 mutex_unlock(&map->mutex);
227}
228
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200229static void regmap_lock_spinlock(void *__map)
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600230{
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200231 struct regmap *map = __map;
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600232 spin_lock(&map->spinlock);
233}
234
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200235static void regmap_unlock_spinlock(void *__map)
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600236{
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200237 struct regmap *map = __map;
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600238 spin_unlock(&map->spinlock);
239}
240
Mark Brown72b39f62012-05-08 17:44:40 +0100241static void dev_get_regmap_release(struct device *dev, void *res)
242{
243 /*
244 * We don't actually have anything to do here; the goal here
245 * is not to manage the regmap but to provide a simple way to
246 * get the regmap back given a struct device.
247 */
248}
249
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100250static bool _regmap_range_add(struct regmap *map,
251 struct regmap_range_node *data)
252{
253 struct rb_root *root = &map->range_tree;
254 struct rb_node **new = &(root->rb_node), *parent = NULL;
255
256 while (*new) {
257 struct regmap_range_node *this =
258 container_of(*new, struct regmap_range_node, node);
259
260 parent = *new;
261 if (data->range_max < this->range_min)
262 new = &((*new)->rb_left);
263 else if (data->range_min > this->range_max)
264 new = &((*new)->rb_right);
265 else
266 return false;
267 }
268
269 rb_link_node(&data->node, parent, new);
270 rb_insert_color(&data->node, root);
271
272 return true;
273}
274
275static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
276 unsigned int reg)
277{
278 struct rb_node *node = map->range_tree.rb_node;
279
280 while (node) {
281 struct regmap_range_node *this =
282 container_of(node, struct regmap_range_node, node);
283
284 if (reg < this->range_min)
285 node = node->rb_left;
286 else if (reg > this->range_max)
287 node = node->rb_right;
288 else
289 return this;
290 }
291
292 return NULL;
293}
294
295static void regmap_range_exit(struct regmap *map)
296{
297 struct rb_node *next;
298 struct regmap_range_node *range_node;
299
300 next = rb_first(&map->range_tree);
301 while (next) {
302 range_node = rb_entry(next, struct regmap_range_node, node);
303 next = rb_next(&range_node->node);
304 rb_erase(&range_node->node, &map->range_tree);
305 kfree(range_node);
306 }
307
308 kfree(map->selector_work_buf);
309}
310
Mark Brownb83a3132011-05-11 19:59:58 +0200311/**
312 * regmap_init(): Initialise register map
313 *
314 * @dev: Device that will be interacted with
315 * @bus: Bus-specific callbacks to use with device
Stephen Warren0135bbc2012-04-04 15:48:30 -0600316 * @bus_context: Data passed to bus-specific callbacks
Mark Brownb83a3132011-05-11 19:59:58 +0200317 * @config: Configuration for register map
318 *
319 * The return value will be an ERR_PTR() on error or a valid pointer to
320 * a struct regmap. This function should generally not be called
321 * directly, it should be called by bus-specific init functions.
322 */
323struct regmap *regmap_init(struct device *dev,
324 const struct regmap_bus *bus,
Stephen Warren0135bbc2012-04-04 15:48:30 -0600325 void *bus_context,
Mark Brownb83a3132011-05-11 19:59:58 +0200326 const struct regmap_config *config)
327{
Mark Brown72b39f62012-05-08 17:44:40 +0100328 struct regmap *map, **m;
Mark Brownb83a3132011-05-11 19:59:58 +0200329 int ret = -EINVAL;
Stephen Warren141eba2e2012-05-24 10:47:26 -0600330 enum regmap_endian reg_endian, val_endian;
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100331 int i, j;
Mark Brownb83a3132011-05-11 19:59:58 +0200332
333 if (!bus || !config)
Lars-Peter Clausenabbb18f2011-11-14 10:40:15 +0100334 goto err;
Mark Brownb83a3132011-05-11 19:59:58 +0200335
336 map = kzalloc(sizeof(*map), GFP_KERNEL);
337 if (map == NULL) {
338 ret = -ENOMEM;
339 goto err;
340 }
341
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200342 if (config->lock && config->unlock) {
343 map->lock = config->lock;
344 map->unlock = config->unlock;
345 map->lock_arg = config->lock_arg;
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600346 } else {
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200347 if (bus->fast_io) {
348 spin_lock_init(&map->spinlock);
349 map->lock = regmap_lock_spinlock;
350 map->unlock = regmap_unlock_spinlock;
351 } else {
352 mutex_init(&map->mutex);
353 map->lock = regmap_lock_mutex;
354 map->unlock = regmap_unlock_mutex;
355 }
356 map->lock_arg = map;
Stephen Warrenbacdbe02012-04-04 15:48:28 -0600357 }
Wolfram Sangc212acc2012-01-28 02:16:41 +0100358 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
Mark Brown82159ba2012-01-18 10:52:25 +0000359 map->format.pad_bytes = config->pad_bits / 8;
Wolfram Sangc212acc2012-01-28 02:16:41 +0100360 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
Fabio Estevam5494a982012-05-31 21:10:30 -0300361 map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
362 config->val_bits + config->pad_bits, 8);
Marc Reillyd939fb92012-03-16 12:11:43 +1100363 map->reg_shift = config->pad_bits % 8;
Stephen Warrenf01ee602012-04-09 13:40:24 -0600364 if (config->reg_stride)
365 map->reg_stride = config->reg_stride;
366 else
367 map->reg_stride = 1;
Ashish Jangam2e33caf2012-04-30 23:23:40 +0100368 map->use_single_rw = config->use_single_rw;
Mark Brownb83a3132011-05-11 19:59:58 +0200369 map->dev = dev;
370 map->bus = bus;
Stephen Warren0135bbc2012-04-04 15:48:30 -0600371 map->bus_context = bus_context;
Mark Brown2e2ae662011-07-20 22:33:39 +0100372 map->max_register = config->max_register;
373 map->writeable_reg = config->writeable_reg;
374 map->readable_reg = config->readable_reg;
375 map->volatile_reg = config->volatile_reg;
Mark Brown2efe1642011-08-08 15:41:46 +0900376 map->precious_reg = config->precious_reg;
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +0100377 map->cache_type = config->cache_type;
Mark Brown72b39f62012-05-08 17:44:40 +0100378 map->name = config->name;
Mark Brownb83a3132011-05-11 19:59:58 +0200379
Lars-Peter Clausen6f306442011-09-05 20:46:32 +0200380 if (config->read_flag_mask || config->write_flag_mask) {
381 map->read_flag_mask = config->read_flag_mask;
382 map->write_flag_mask = config->write_flag_mask;
383 } else {
384 map->read_flag_mask = bus->read_flag_mask;
385 }
386
Stephen Warren141eba2e2012-05-24 10:47:26 -0600387 reg_endian = config->reg_format_endian;
388 if (reg_endian == REGMAP_ENDIAN_DEFAULT)
389 reg_endian = bus->reg_format_endian_default;
390 if (reg_endian == REGMAP_ENDIAN_DEFAULT)
391 reg_endian = REGMAP_ENDIAN_BIG;
392
393 val_endian = config->val_format_endian;
394 if (val_endian == REGMAP_ENDIAN_DEFAULT)
395 val_endian = bus->val_format_endian_default;
396 if (val_endian == REGMAP_ENDIAN_DEFAULT)
397 val_endian = REGMAP_ENDIAN_BIG;
398
Marc Reillyd939fb92012-03-16 12:11:43 +1100399 switch (config->reg_bits + map->reg_shift) {
Wolfram Sang9aa50752012-01-27 16:10:22 +0100400 case 2:
401 switch (config->val_bits) {
402 case 6:
403 map->format.format_write = regmap_format_2_6_write;
404 break;
405 default:
406 goto err_map;
407 }
408 break;
409
Mark Brownb83a3132011-05-11 19:59:58 +0200410 case 4:
411 switch (config->val_bits) {
412 case 12:
413 map->format.format_write = regmap_format_4_12_write;
414 break;
415 default:
416 goto err_map;
417 }
418 break;
419
420 case 7:
421 switch (config->val_bits) {
422 case 9:
423 map->format.format_write = regmap_format_7_9_write;
424 break;
425 default:
426 goto err_map;
427 }
428 break;
429
Lars-Peter Clausen7e5ec632011-11-16 16:28:21 +0100430 case 10:
431 switch (config->val_bits) {
432 case 14:
433 map->format.format_write = regmap_format_10_14_write;
434 break;
435 default:
436 goto err_map;
437 }
438 break;
439
Mark Brownb83a3132011-05-11 19:59:58 +0200440 case 8:
441 map->format.format_reg = regmap_format_8;
442 break;
443
444 case 16:
Stephen Warren141eba2e2012-05-24 10:47:26 -0600445 switch (reg_endian) {
446 case REGMAP_ENDIAN_BIG:
447 map->format.format_reg = regmap_format_16_be;
448 break;
449 case REGMAP_ENDIAN_NATIVE:
450 map->format.format_reg = regmap_format_16_native;
451 break;
452 default:
453 goto err_map;
454 }
Mark Brownb83a3132011-05-11 19:59:58 +0200455 break;
456
Mark Brown7d5e5252012-02-17 15:58:25 -0800457 case 32:
Stephen Warren141eba2e2012-05-24 10:47:26 -0600458 switch (reg_endian) {
459 case REGMAP_ENDIAN_BIG:
460 map->format.format_reg = regmap_format_32_be;
461 break;
462 case REGMAP_ENDIAN_NATIVE:
463 map->format.format_reg = regmap_format_32_native;
464 break;
465 default:
466 goto err_map;
467 }
Mark Brown7d5e5252012-02-17 15:58:25 -0800468 break;
469
Mark Brownb83a3132011-05-11 19:59:58 +0200470 default:
471 goto err_map;
472 }
473
474 switch (config->val_bits) {
475 case 8:
476 map->format.format_val = regmap_format_8;
477 map->format.parse_val = regmap_parse_8;
478 break;
479 case 16:
Stephen Warren141eba2e2012-05-24 10:47:26 -0600480 switch (val_endian) {
481 case REGMAP_ENDIAN_BIG:
482 map->format.format_val = regmap_format_16_be;
483 map->format.parse_val = regmap_parse_16_be;
484 break;
485 case REGMAP_ENDIAN_NATIVE:
486 map->format.format_val = regmap_format_16_native;
487 map->format.parse_val = regmap_parse_16_native;
488 break;
489 default:
490 goto err_map;
491 }
Mark Brownb83a3132011-05-11 19:59:58 +0200492 break;
Marc Reillyea279fc2012-03-16 12:11:42 +1100493 case 24:
Stephen Warren141eba2e2012-05-24 10:47:26 -0600494 if (val_endian != REGMAP_ENDIAN_BIG)
495 goto err_map;
Marc Reillyea279fc2012-03-16 12:11:42 +1100496 map->format.format_val = regmap_format_24;
497 map->format.parse_val = regmap_parse_24;
498 break;
Mark Brown7d5e5252012-02-17 15:58:25 -0800499 case 32:
Stephen Warren141eba2e2012-05-24 10:47:26 -0600500 switch (val_endian) {
501 case REGMAP_ENDIAN_BIG:
502 map->format.format_val = regmap_format_32_be;
503 map->format.parse_val = regmap_parse_32_be;
504 break;
505 case REGMAP_ENDIAN_NATIVE:
506 map->format.format_val = regmap_format_32_native;
507 map->format.parse_val = regmap_parse_32_native;
508 break;
509 default:
510 goto err_map;
511 }
Mark Brown7d5e5252012-02-17 15:58:25 -0800512 break;
Mark Brownb83a3132011-05-11 19:59:58 +0200513 }
514
Stephen Warren141eba2e2012-05-24 10:47:26 -0600515 if (map->format.format_write) {
516 if ((reg_endian != REGMAP_ENDIAN_BIG) ||
517 (val_endian != REGMAP_ENDIAN_BIG))
518 goto err_map;
Mark Brown7a647612012-04-30 23:26:32 +0100519 map->use_single_rw = true;
Stephen Warren141eba2e2012-05-24 10:47:26 -0600520 }
Mark Brown7a647612012-04-30 23:26:32 +0100521
Mark Brownb83a3132011-05-11 19:59:58 +0200522 if (!map->format.format_write &&
523 !(map->format.format_reg && map->format.format_val))
524 goto err_map;
525
Mark Brown82159ba2012-01-18 10:52:25 +0000526 map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
Mark Brownb83a3132011-05-11 19:59:58 +0200527 if (map->work_buf == NULL) {
528 ret = -ENOMEM;
Mark Brown5204f5e2011-09-05 08:07:47 -0700529 goto err_map;
Mark Brownb83a3132011-05-11 19:59:58 +0200530 }
531
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100532 map->range_tree = RB_ROOT;
533 for (i = 0; i < config->n_ranges; i++) {
534 const struct regmap_range_cfg *range_cfg = &config->ranges[i];
535 struct regmap_range_node *new;
536
537 /* Sanity check */
538 if (range_cfg->range_max < range_cfg->range_min ||
539 range_cfg->range_max > map->max_register ||
540 range_cfg->selector_reg > map->max_register ||
541 range_cfg->window_len == 0)
542 goto err_range;
543
544 /* Make sure, that this register range has no selector
545 or data window within its boundary */
546 for (j = 0; j < config->n_ranges; j++) {
547 unsigned sel_reg = config->ranges[j].selector_reg;
548 unsigned win_min = config->ranges[j].window_start;
549 unsigned win_max = win_min +
550 config->ranges[j].window_len - 1;
551
552 if (range_cfg->range_min <= sel_reg &&
553 sel_reg <= range_cfg->range_max) {
554 goto err_range;
555 }
556
557 if (!(win_max < range_cfg->range_min ||
558 win_min > range_cfg->range_max)) {
559 goto err_range;
560 }
561 }
562
563 new = kzalloc(sizeof(*new), GFP_KERNEL);
564 if (new == NULL) {
565 ret = -ENOMEM;
566 goto err_range;
567 }
568
569 new->range_min = range_cfg->range_min;
570 new->range_max = range_cfg->range_max;
571 new->selector_reg = range_cfg->selector_reg;
572 new->selector_mask = range_cfg->selector_mask;
573 new->selector_shift = range_cfg->selector_shift;
574 new->window_start = range_cfg->window_start;
575 new->window_len = range_cfg->window_len;
576
577 if (_regmap_range_add(map, new) == false) {
578 kfree(new);
579 goto err_range;
580 }
581
582 if (map->selector_work_buf == NULL) {
583 map->selector_work_buf =
584 kzalloc(map->format.buf_size, GFP_KERNEL);
585 if (map->selector_work_buf == NULL) {
586 ret = -ENOMEM;
587 goto err_range;
588 }
589 }
590 }
Mark Brown052d2cd2011-11-21 19:05:13 +0000591
Lars-Peter Clausene5e3b8a2011-11-16 16:28:16 +0100592 ret = regcache_init(map, config);
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +0100593 if (ret < 0)
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100594 goto err_range;
595
596 regmap_debugfs_init(map, config->name);
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +0100597
Mark Brown72b39f62012-05-08 17:44:40 +0100598 /* Add a devres resource for dev_get_regmap() */
599 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
600 if (!m) {
601 ret = -ENOMEM;
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100602 goto err_debugfs;
Mark Brown72b39f62012-05-08 17:44:40 +0100603 }
604 *m = map;
605 devres_add(dev, m);
606
Mark Brownb83a3132011-05-11 19:59:58 +0200607 return map;
608
Stephen Warrenbfaa25f2012-05-23 16:30:53 -0600609err_debugfs:
610 regmap_debugfs_exit(map);
Mark Brown72b39f62012-05-08 17:44:40 +0100611 regcache_exit(map);
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100612err_range:
613 regmap_range_exit(map);
Lars-Peter Clausen58072cb2011-11-10 18:15:15 +0100614 kfree(map->work_buf);
Mark Brownb83a3132011-05-11 19:59:58 +0200615err_map:
616 kfree(map);
617err:
618 return ERR_PTR(ret);
619}
620EXPORT_SYMBOL_GPL(regmap_init);
621
Mark Brownc0eb4672012-01-30 19:56:52 +0000622static void devm_regmap_release(struct device *dev, void *res)
623{
624 regmap_exit(*(struct regmap **)res);
625}
626
627/**
628 * devm_regmap_init(): Initialise managed register map
629 *
630 * @dev: Device that will be interacted with
631 * @bus: Bus-specific callbacks to use with device
Stephen Warren0135bbc2012-04-04 15:48:30 -0600632 * @bus_context: Data passed to bus-specific callbacks
Mark Brownc0eb4672012-01-30 19:56:52 +0000633 * @config: Configuration for register map
634 *
635 * The return value will be an ERR_PTR() on error or a valid pointer
636 * to a struct regmap. This function should generally not be called
637 * directly, it should be called by bus-specific init functions. The
638 * map will be automatically freed by the device management code.
639 */
640struct regmap *devm_regmap_init(struct device *dev,
641 const struct regmap_bus *bus,
Stephen Warren0135bbc2012-04-04 15:48:30 -0600642 void *bus_context,
Mark Brownc0eb4672012-01-30 19:56:52 +0000643 const struct regmap_config *config)
644{
645 struct regmap **ptr, *regmap;
646
647 ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
648 if (!ptr)
649 return ERR_PTR(-ENOMEM);
650
Stephen Warren0135bbc2012-04-04 15:48:30 -0600651 regmap = regmap_init(dev, bus, bus_context, config);
Mark Brownc0eb4672012-01-30 19:56:52 +0000652 if (!IS_ERR(regmap)) {
653 *ptr = regmap;
654 devres_add(dev, ptr);
655 } else {
656 devres_free(ptr);
657 }
658
659 return regmap;
660}
661EXPORT_SYMBOL_GPL(devm_regmap_init);
662
Mark Brownb83a3132011-05-11 19:59:58 +0200663/**
Mark Brownbf315172011-12-03 17:06:20 +0000664 * regmap_reinit_cache(): Reinitialise the current register cache
665 *
666 * @map: Register map to operate on.
667 * @config: New configuration. Only the cache data will be used.
668 *
669 * Discard any existing register cache for the map and initialize a
670 * new cache. This can be used to restore the cache to defaults or to
671 * update the cache configuration to reflect runtime discovery of the
672 * hardware.
Dimitris Papastamos4d879512012-07-27 14:54:15 +0100673 *
674 * No explicit locking is done here, the user needs to ensure that
675 * this function will not race with other calls to regmap.
Mark Brownbf315172011-12-03 17:06:20 +0000676 */
677int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
678{
Mark Brownbf315172011-12-03 17:06:20 +0000679 regcache_exit(map);
Mark Browna24f64a2012-01-26 18:30:16 +0000680 regmap_debugfs_exit(map);
Mark Brownbf315172011-12-03 17:06:20 +0000681
682 map->max_register = config->max_register;
683 map->writeable_reg = config->writeable_reg;
684 map->readable_reg = config->readable_reg;
685 map->volatile_reg = config->volatile_reg;
686 map->precious_reg = config->precious_reg;
687 map->cache_type = config->cache_type;
688
Stephen Warrend3c242e2012-04-04 15:48:29 -0600689 regmap_debugfs_init(map, config->name);
Mark Browna24f64a2012-01-26 18:30:16 +0000690
Mark Brown421e8d22012-01-20 13:39:37 +0000691 map->cache_bypass = false;
692 map->cache_only = false;
693
Dimitris Papastamos4d879512012-07-27 14:54:15 +0100694 return regcache_init(map, config);
Mark Brownbf315172011-12-03 17:06:20 +0000695}
Mark Brown752a6a52012-05-14 10:00:12 +0100696EXPORT_SYMBOL_GPL(regmap_reinit_cache);
Mark Brownbf315172011-12-03 17:06:20 +0000697
698/**
Mark Brownb83a3132011-05-11 19:59:58 +0200699 * regmap_exit(): Free a previously allocated register map
700 */
701void regmap_exit(struct regmap *map)
702{
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +0100703 regcache_exit(map);
Mark Brown31244e32011-07-20 22:56:53 +0100704 regmap_debugfs_exit(map);
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100705 regmap_range_exit(map);
Stephen Warren0135bbc2012-04-04 15:48:30 -0600706 if (map->bus->free_context)
707 map->bus->free_context(map->bus_context);
Mark Brownb83a3132011-05-11 19:59:58 +0200708 kfree(map->work_buf);
Mark Brownb83a3132011-05-11 19:59:58 +0200709 kfree(map);
710}
711EXPORT_SYMBOL_GPL(regmap_exit);
712
Mark Brown72b39f62012-05-08 17:44:40 +0100713static int dev_get_regmap_match(struct device *dev, void *res, void *data)
714{
715 struct regmap **r = res;
716 if (!r || !*r) {
717 WARN_ON(!r || !*r);
718 return 0;
719 }
720
721 /* If the user didn't specify a name match any */
722 if (data)
723 return (*r)->name == data;
724 else
725 return 1;
726}
727
728/**
729 * dev_get_regmap(): Obtain the regmap (if any) for a device
730 *
731 * @dev: Device to retrieve the map for
732 * @name: Optional name for the register map, usually NULL.
733 *
734 * Returns the regmap for the device if one is present, or NULL. If
735 * name is specified then it must match the name specified when
736 * registering the device, if it is NULL then the first regmap found
737 * will be used. Devices with multiple register maps are very rare,
738 * generic code should normally not need to specify a name.
739 */
740struct regmap *dev_get_regmap(struct device *dev, const char *name)
741{
742 struct regmap **r = devres_find(dev, dev_get_regmap_release,
743 dev_get_regmap_match, (void *)name);
744
745 if (!r)
746 return NULL;
747 return *r;
748}
749EXPORT_SYMBOL_GPL(dev_get_regmap);
750
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100751static int _regmap_select_page(struct regmap *map, unsigned int *reg,
752 unsigned int val_num)
753{
754 struct regmap_range_node *range;
755 void *orig_work_buf;
756 unsigned int win_offset;
757 unsigned int win_page;
758 bool page_chg;
759 int ret;
760
761 range = _regmap_range_lookup(map, *reg);
762 if (range) {
763 win_offset = (*reg - range->range_min) % range->window_len;
764 win_page = (*reg - range->range_min) / range->window_len;
765
766 if (val_num > 1) {
767 /* Bulk write shouldn't cross range boundary */
768 if (*reg + val_num - 1 > range->range_max)
769 return -EINVAL;
770
771 /* ... or single page boundary */
772 if (val_num > range->window_len - win_offset)
773 return -EINVAL;
774 }
775
776 /* It is possible to have selector register inside data window.
777 In that case, selector register is located on every page and
778 it needs no page switching, when accessed alone. */
779 if (val_num > 1 ||
780 range->window_start + win_offset != range->selector_reg) {
781 /* Use separate work_buf during page switching */
782 orig_work_buf = map->work_buf;
783 map->work_buf = map->selector_work_buf;
784
785 ret = _regmap_update_bits(map, range->selector_reg,
786 range->selector_mask,
787 win_page << range->selector_shift,
788 &page_chg);
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100789
790 map->work_buf = orig_work_buf;
Krystian Garbaciak632a5b02012-06-18 13:04:29 +0100791
792 if (ret < 0)
793 return ret;
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100794 }
795
796 *reg = range->window_start + win_offset;
797 }
798
799 return 0;
800}
801
Mark Brownb83a3132011-05-11 19:59:58 +0200802static int _regmap_raw_write(struct regmap *map, unsigned int reg,
803 const void *val, size_t val_len)
804{
Lars-Peter Clausen6f306442011-09-05 20:46:32 +0200805 u8 *u8 = map->work_buf;
Mark Brownb83a3132011-05-11 19:59:58 +0200806 void *buf;
807 int ret = -ENOTSUPP;
808 size_t len;
Mark Brown73304782011-07-24 11:46:20 +0100809 int i;
810
811 /* Check for unwritable registers before we start */
812 if (map->writeable_reg)
813 for (i = 0; i < val_len / map->format.val_bytes; i++)
Stephen Warrenf01ee602012-04-09 13:40:24 -0600814 if (!map->writeable_reg(map->dev,
815 reg + (i * map->reg_stride)))
Mark Brown73304782011-07-24 11:46:20 +0100816 return -EINVAL;
Mark Brownb83a3132011-05-11 19:59:58 +0200817
Laxman Dewanganc9157192012-02-10 21:30:27 +0530818 if (!map->cache_bypass && map->format.parse_val) {
819 unsigned int ival;
820 int val_bytes = map->format.val_bytes;
821 for (i = 0; i < val_len / val_bytes; i++) {
822 memcpy(map->work_buf, val + (i * val_bytes), val_bytes);
823 ival = map->format.parse_val(map->work_buf);
Stephen Warrenf01ee602012-04-09 13:40:24 -0600824 ret = regcache_write(map, reg + (i * map->reg_stride),
825 ival);
Laxman Dewanganc9157192012-02-10 21:30:27 +0530826 if (ret) {
827 dev_err(map->dev,
828 "Error in caching of register: %u ret: %d\n",
829 reg + i, ret);
830 return ret;
831 }
832 }
833 if (map->cache_only) {
834 map->cache_dirty = true;
835 return 0;
836 }
837 }
838
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100839 ret = _regmap_select_page(map, &reg, val_len / map->format.val_bytes);
840 if (ret < 0)
841 return ret;
842
Marc Reillyd939fb92012-03-16 12:11:43 +1100843 map->format.format_reg(map->work_buf, reg, map->reg_shift);
Mark Brownb83a3132011-05-11 19:59:58 +0200844
Lars-Peter Clausen6f306442011-09-05 20:46:32 +0200845 u8[0] |= map->write_flag_mask;
846
Mark Brownfb2736b2011-07-24 21:30:55 +0100847 trace_regmap_hw_write_start(map->dev, reg,
848 val_len / map->format.val_bytes);
849
Mark Brown2547e202011-07-20 21:47:22 +0100850 /* If we're doing a single register write we can probably just
851 * send the work_buf directly, otherwise try to do a gather
852 * write.
853 */
Mark Brown82159ba2012-01-18 10:52:25 +0000854 if (val == (map->work_buf + map->format.pad_bytes +
855 map->format.reg_bytes))
Stephen Warren0135bbc2012-04-04 15:48:30 -0600856 ret = map->bus->write(map->bus_context, map->work_buf,
Mark Brown82159ba2012-01-18 10:52:25 +0000857 map->format.reg_bytes +
858 map->format.pad_bytes +
859 val_len);
Mark Brown2547e202011-07-20 21:47:22 +0100860 else if (map->bus->gather_write)
Stephen Warren0135bbc2012-04-04 15:48:30 -0600861 ret = map->bus->gather_write(map->bus_context, map->work_buf,
Mark Brown82159ba2012-01-18 10:52:25 +0000862 map->format.reg_bytes +
863 map->format.pad_bytes,
Mark Brownb83a3132011-05-11 19:59:58 +0200864 val, val_len);
865
Mark Brown2547e202011-07-20 21:47:22 +0100866 /* If that didn't work fall back on linearising by hand. */
Mark Brownb83a3132011-05-11 19:59:58 +0200867 if (ret == -ENOTSUPP) {
Mark Brown82159ba2012-01-18 10:52:25 +0000868 len = map->format.reg_bytes + map->format.pad_bytes + val_len;
869 buf = kzalloc(len, GFP_KERNEL);
Mark Brownb83a3132011-05-11 19:59:58 +0200870 if (!buf)
871 return -ENOMEM;
872
873 memcpy(buf, map->work_buf, map->format.reg_bytes);
Mark Brown82159ba2012-01-18 10:52:25 +0000874 memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
875 val, val_len);
Stephen Warren0135bbc2012-04-04 15:48:30 -0600876 ret = map->bus->write(map->bus_context, buf, len);
Mark Brownb83a3132011-05-11 19:59:58 +0200877
878 kfree(buf);
879 }
880
Mark Brownfb2736b2011-07-24 21:30:55 +0100881 trace_regmap_hw_write_done(map->dev, reg,
882 val_len / map->format.val_bytes);
883
Mark Brownb83a3132011-05-11 19:59:58 +0200884 return ret;
885}
886
Dimitris Papastamos4d2dc092011-09-29 10:39:07 +0100887int _regmap_write(struct regmap *map, unsigned int reg,
888 unsigned int val)
Mark Brownb83a3132011-05-11 19:59:58 +0200889{
Mark Brownfb2736b2011-07-24 21:30:55 +0100890 int ret;
Mark Brownb83a3132011-05-11 19:59:58 +0200891 BUG_ON(!map->format.format_write && !map->format.format_val);
892
Laxman Dewanganc9157192012-02-10 21:30:27 +0530893 if (!map->cache_bypass && map->format.format_write) {
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +0100894 ret = regcache_write(map, reg, val);
895 if (ret != 0)
896 return ret;
Mark Brown8ae0d7e2011-10-26 10:34:22 +0200897 if (map->cache_only) {
898 map->cache_dirty = true;
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +0100899 return 0;
Mark Brown8ae0d7e2011-10-26 10:34:22 +0200900 }
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +0100901 }
902
Mark Brown1044c182012-07-06 14:10:23 +0100903#ifdef LOG_DEVICE
904 if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
905 dev_info(map->dev, "%x <= %x\n", reg, val);
906#endif
907
Mark Brownfb2736b2011-07-24 21:30:55 +0100908 trace_regmap_reg_write(map->dev, reg, val);
909
Mark Brownb83a3132011-05-11 19:59:58 +0200910 if (map->format.format_write) {
Krystian Garbaciak6863ca62012-06-15 11:23:56 +0100911 ret = _regmap_select_page(map, &reg, 1);
912 if (ret < 0)
913 return ret;
914
Mark Brownb83a3132011-05-11 19:59:58 +0200915 map->format.format_write(map, reg, val);
916
Mark Brownfb2736b2011-07-24 21:30:55 +0100917 trace_regmap_hw_write_start(map->dev, reg, 1);
918
Stephen Warren0135bbc2012-04-04 15:48:30 -0600919 ret = map->bus->write(map->bus_context, map->work_buf,
Mark Brownfb2736b2011-07-24 21:30:55 +0100920 map->format.buf_size);
921
922 trace_regmap_hw_write_done(map->dev, reg, 1);
923
924 return ret;
Mark Brownb83a3132011-05-11 19:59:58 +0200925 } else {
Mark Brown82159ba2012-01-18 10:52:25 +0000926 map->format.format_val(map->work_buf + map->format.reg_bytes
Marc Reillyd939fb92012-03-16 12:11:43 +1100927 + map->format.pad_bytes, val, 0);
Mark Brownb83a3132011-05-11 19:59:58 +0200928 return _regmap_raw_write(map, reg,
Mark Brown82159ba2012-01-18 10:52:25 +0000929 map->work_buf +
930 map->format.reg_bytes +
931 map->format.pad_bytes,
Mark Brownb83a3132011-05-11 19:59:58 +0200932 map->format.val_bytes);
933 }
934}
935
936/**
937 * regmap_write(): Write a value to a single register
938 *
939 * @map: Register map to write to
940 * @reg: Register to write to
941 * @val: Value to be written
942 *
943 * A value of zero will be returned on success, a negative errno will
944 * be returned in error cases.
945 */
946int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
947{
948 int ret;
949
Stephen Warrenf01ee602012-04-09 13:40:24 -0600950 if (reg % map->reg_stride)
951 return -EINVAL;
952
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200953 map->lock(map->lock_arg);
Mark Brownb83a3132011-05-11 19:59:58 +0200954
955 ret = _regmap_write(map, reg, val);
956
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200957 map->unlock(map->lock_arg);
Mark Brownb83a3132011-05-11 19:59:58 +0200958
959 return ret;
960}
961EXPORT_SYMBOL_GPL(regmap_write);
962
963/**
964 * regmap_raw_write(): Write raw values to one or more registers
965 *
966 * @map: Register map to write to
967 * @reg: Initial register to write to
968 * @val: Block of data to be written, laid out for direct transmission to the
969 * device
970 * @val_len: Length of data pointed to by val.
971 *
972 * This function is intended to be used for things like firmware
973 * download where a large block of data needs to be transferred to the
974 * device. No formatting will be done on the data provided.
975 *
976 * A value of zero will be returned on success, a negative errno will
977 * be returned in error cases.
978 */
979int regmap_raw_write(struct regmap *map, unsigned int reg,
980 const void *val, size_t val_len)
981{
982 int ret;
983
Stephen Warren851960b2012-04-06 15:16:03 -0600984 if (val_len % map->format.val_bytes)
985 return -EINVAL;
Stephen Warrenf01ee602012-04-09 13:40:24 -0600986 if (reg % map->reg_stride)
987 return -EINVAL;
Stephen Warren851960b2012-04-06 15:16:03 -0600988
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200989 map->lock(map->lock_arg);
Mark Brownb83a3132011-05-11 19:59:58 +0200990
991 ret = _regmap_raw_write(map, reg, val, val_len);
992
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +0200993 map->unlock(map->lock_arg);
Mark Brownb83a3132011-05-11 19:59:58 +0200994
995 return ret;
996}
997EXPORT_SYMBOL_GPL(regmap_raw_write);
998
Laxman Dewangan8eaeb212012-02-12 19:49:43 +0530999/*
1000 * regmap_bulk_write(): Write multiple registers to the device
1001 *
1002 * @map: Register map to write to
1003 * @reg: First register to be write from
1004 * @val: Block of data to be written, in native register size for device
1005 * @val_count: Number of registers to write
1006 *
1007 * This function is intended to be used for writing a large block of
1008 * data to be device either in single transfer or multiple transfer.
1009 *
1010 * A value of zero will be returned on success, a negative errno will
1011 * be returned in error cases.
1012 */
1013int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
1014 size_t val_count)
1015{
1016 int ret = 0, i;
1017 size_t val_bytes = map->format.val_bytes;
1018 void *wval;
1019
1020 if (!map->format.parse_val)
1021 return -EINVAL;
Stephen Warrenf01ee602012-04-09 13:40:24 -06001022 if (reg % map->reg_stride)
1023 return -EINVAL;
Laxman Dewangan8eaeb212012-02-12 19:49:43 +05301024
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001025 map->lock(map->lock_arg);
Laxman Dewangan8eaeb212012-02-12 19:49:43 +05301026
1027 /* No formatting is require if val_byte is 1 */
1028 if (val_bytes == 1) {
1029 wval = (void *)val;
1030 } else {
1031 wval = kmemdup(val, val_count * val_bytes, GFP_KERNEL);
1032 if (!wval) {
1033 ret = -ENOMEM;
1034 dev_err(map->dev, "Error in memory allocation\n");
1035 goto out;
1036 }
1037 for (i = 0; i < val_count * val_bytes; i += val_bytes)
1038 map->format.parse_val(wval + i);
1039 }
Ashish Jangam2e33caf2012-04-30 23:23:40 +01001040 /*
1041 * Some devices does not support bulk write, for
1042 * them we have a series of single write operations.
1043 */
1044 if (map->use_single_rw) {
1045 for (i = 0; i < val_count; i++) {
1046 ret = regmap_raw_write(map,
1047 reg + (i * map->reg_stride),
1048 val + (i * val_bytes),
1049 val_bytes);
1050 if (ret != 0)
1051 return ret;
1052 }
1053 } else {
1054 ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
1055 }
Laxman Dewangan8eaeb212012-02-12 19:49:43 +05301056
1057 if (val_bytes != 1)
1058 kfree(wval);
1059
1060out:
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001061 map->unlock(map->lock_arg);
Laxman Dewangan8eaeb212012-02-12 19:49:43 +05301062 return ret;
1063}
1064EXPORT_SYMBOL_GPL(regmap_bulk_write);
1065
Mark Brownb83a3132011-05-11 19:59:58 +02001066static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1067 unsigned int val_len)
1068{
1069 u8 *u8 = map->work_buf;
1070 int ret;
1071
Krystian Garbaciak6863ca62012-06-15 11:23:56 +01001072 ret = _regmap_select_page(map, &reg, val_len / map->format.val_bytes);
1073 if (ret < 0)
1074 return ret;
1075
Marc Reillyd939fb92012-03-16 12:11:43 +11001076 map->format.format_reg(map->work_buf, reg, map->reg_shift);
Mark Brownb83a3132011-05-11 19:59:58 +02001077
1078 /*
Lars-Peter Clausen6f306442011-09-05 20:46:32 +02001079 * Some buses or devices flag reads by setting the high bits in the
Mark Brownb83a3132011-05-11 19:59:58 +02001080 * register addresss; since it's always the high bits for all
1081 * current formats we can do this here rather than in
1082 * formatting. This may break if we get interesting formats.
1083 */
Lars-Peter Clausen6f306442011-09-05 20:46:32 +02001084 u8[0] |= map->read_flag_mask;
Mark Brownb83a3132011-05-11 19:59:58 +02001085
Mark Brownfb2736b2011-07-24 21:30:55 +01001086 trace_regmap_hw_read_start(map->dev, reg,
1087 val_len / map->format.val_bytes);
1088
Stephen Warren0135bbc2012-04-04 15:48:30 -06001089 ret = map->bus->read(map->bus_context, map->work_buf,
Mark Brown82159ba2012-01-18 10:52:25 +00001090 map->format.reg_bytes + map->format.pad_bytes,
Mark Brown40c5cc22011-07-24 22:39:12 +01001091 val, val_len);
Mark Brownb83a3132011-05-11 19:59:58 +02001092
Mark Brownfb2736b2011-07-24 21:30:55 +01001093 trace_regmap_hw_read_done(map->dev, reg,
1094 val_len / map->format.val_bytes);
1095
1096 return ret;
Mark Brownb83a3132011-05-11 19:59:58 +02001097}
1098
1099static int _regmap_read(struct regmap *map, unsigned int reg,
1100 unsigned int *val)
1101{
1102 int ret;
1103
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +01001104 if (!map->cache_bypass) {
1105 ret = regcache_read(map, reg, val);
1106 if (ret == 0)
1107 return 0;
1108 }
1109
Lars-Peter Clausen19254412011-11-16 16:28:19 +01001110 if (!map->format.parse_val)
1111 return -EINVAL;
1112
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +01001113 if (map->cache_only)
1114 return -EBUSY;
1115
Mark Brownb83a3132011-05-11 19:59:58 +02001116 ret = _regmap_raw_read(map, reg, map->work_buf, map->format.val_bytes);
Mark Brownfb2736b2011-07-24 21:30:55 +01001117 if (ret == 0) {
Mark Brownb83a3132011-05-11 19:59:58 +02001118 *val = map->format.parse_val(map->work_buf);
Mark Brown1044c182012-07-06 14:10:23 +01001119
1120#ifdef LOG_DEVICE
1121 if (strcmp(dev_name(map->dev), LOG_DEVICE) == 0)
1122 dev_info(map->dev, "%x => %x\n", reg, *val);
1123#endif
1124
Mark Brownfb2736b2011-07-24 21:30:55 +01001125 trace_regmap_reg_read(map->dev, reg, *val);
1126 }
Mark Brownb83a3132011-05-11 19:59:58 +02001127
Mark Brownf2985362012-04-30 21:25:05 +01001128 if (ret == 0 && !map->cache_bypass)
1129 regcache_write(map, reg, *val);
1130
Mark Brownb83a3132011-05-11 19:59:58 +02001131 return ret;
1132}
1133
1134/**
1135 * regmap_read(): Read a value from a single register
1136 *
1137 * @map: Register map to write to
1138 * @reg: Register to be read from
1139 * @val: Pointer to store read value
1140 *
1141 * A value of zero will be returned on success, a negative errno will
1142 * be returned in error cases.
1143 */
1144int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
1145{
1146 int ret;
1147
Stephen Warrenf01ee602012-04-09 13:40:24 -06001148 if (reg % map->reg_stride)
1149 return -EINVAL;
1150
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001151 map->lock(map->lock_arg);
Mark Brownb83a3132011-05-11 19:59:58 +02001152
1153 ret = _regmap_read(map, reg, val);
1154
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001155 map->unlock(map->lock_arg);
Mark Brownb83a3132011-05-11 19:59:58 +02001156
1157 return ret;
1158}
1159EXPORT_SYMBOL_GPL(regmap_read);
1160
1161/**
1162 * regmap_raw_read(): Read raw data from the device
1163 *
1164 * @map: Register map to write to
1165 * @reg: First register to be read from
1166 * @val: Pointer to store read value
1167 * @val_len: Size of data to read
1168 *
1169 * A value of zero will be returned on success, a negative errno will
1170 * be returned in error cases.
1171 */
1172int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
1173 size_t val_len)
1174{
Mark Brownb8fb5ab2012-02-21 19:12:47 +00001175 size_t val_bytes = map->format.val_bytes;
1176 size_t val_count = val_len / val_bytes;
1177 unsigned int v;
1178 int ret, i;
Mark Brown04e016a2011-10-09 13:35:43 +01001179
Stephen Warren851960b2012-04-06 15:16:03 -06001180 if (val_len % map->format.val_bytes)
1181 return -EINVAL;
Stephen Warrenf01ee602012-04-09 13:40:24 -06001182 if (reg % map->reg_stride)
1183 return -EINVAL;
Stephen Warren851960b2012-04-06 15:16:03 -06001184
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001185 map->lock(map->lock_arg);
Mark Brownb83a3132011-05-11 19:59:58 +02001186
Mark Brownb8fb5ab2012-02-21 19:12:47 +00001187 if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
1188 map->cache_type == REGCACHE_NONE) {
1189 /* Physical block read if there's no cache involved */
1190 ret = _regmap_raw_read(map, reg, val, val_len);
Mark Brownb83a3132011-05-11 19:59:58 +02001191
Mark Brownb8fb5ab2012-02-21 19:12:47 +00001192 } else {
1193 /* Otherwise go word by word for the cache; should be low
1194 * cost as we expect to hit the cache.
1195 */
1196 for (i = 0; i < val_count; i++) {
Stephen Warrenf01ee602012-04-09 13:40:24 -06001197 ret = _regmap_read(map, reg + (i * map->reg_stride),
1198 &v);
Mark Brownb8fb5ab2012-02-21 19:12:47 +00001199 if (ret != 0)
1200 goto out;
1201
Marc Reillyd939fb92012-03-16 12:11:43 +11001202 map->format.format_val(val + (i * val_bytes), v, 0);
Mark Brownb8fb5ab2012-02-21 19:12:47 +00001203 }
1204 }
1205
1206 out:
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001207 map->unlock(map->lock_arg);
Mark Brownb83a3132011-05-11 19:59:58 +02001208
1209 return ret;
1210}
1211EXPORT_SYMBOL_GPL(regmap_raw_read);
1212
1213/**
1214 * regmap_bulk_read(): Read multiple registers from the device
1215 *
1216 * @map: Register map to write to
1217 * @reg: First register to be read from
1218 * @val: Pointer to store read value, in native register size for device
1219 * @val_count: Number of registers to read
1220 *
1221 * A value of zero will be returned on success, a negative errno will
1222 * be returned in error cases.
1223 */
1224int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
1225 size_t val_count)
1226{
1227 int ret, i;
1228 size_t val_bytes = map->format.val_bytes;
Lars-Peter Clausen82cd9962011-11-08 18:37:25 +01001229 bool vol = regmap_volatile_range(map, reg, val_count);
Dimitris Papastamos5d1729e2011-09-19 14:34:05 +01001230
Mark Brownb83a3132011-05-11 19:59:58 +02001231 if (!map->format.parse_val)
1232 return -EINVAL;
Stephen Warrenf01ee602012-04-09 13:40:24 -06001233 if (reg % map->reg_stride)
1234 return -EINVAL;
Mark Brownb83a3132011-05-11 19:59:58 +02001235
Mark Brownde2d8082011-10-10 13:24:52 +01001236 if (vol || map->cache_type == REGCACHE_NONE) {
Ashish Jangam2e33caf2012-04-30 23:23:40 +01001237 /*
1238 * Some devices does not support bulk read, for
1239 * them we have a series of single read operations.
1240 */
1241 if (map->use_single_rw) {
1242 for (i = 0; i < val_count; i++) {
1243 ret = regmap_raw_read(map,
1244 reg + (i * map->reg_stride),
1245 val + (i * val_bytes),
1246 val_bytes);
1247 if (ret != 0)
1248 return ret;
1249 }
1250 } else {
1251 ret = regmap_raw_read(map, reg, val,
1252 val_bytes * val_count);
1253 if (ret != 0)
1254 return ret;
1255 }
Mark Brownde2d8082011-10-10 13:24:52 +01001256
1257 for (i = 0; i < val_count * val_bytes; i += val_bytes)
1258 map->format.parse_val(val + i);
1259 } else {
1260 for (i = 0; i < val_count; i++) {
Laxman Dewangan6560ffd2012-05-09 17:43:12 +05301261 unsigned int ival;
Stephen Warrenf01ee602012-04-09 13:40:24 -06001262 ret = regmap_read(map, reg + (i * map->reg_stride),
Mark Brown25061d22012-05-12 13:06:08 +01001263 &ival);
Mark Brownde2d8082011-10-10 13:24:52 +01001264 if (ret != 0)
1265 return ret;
Laxman Dewangan6560ffd2012-05-09 17:43:12 +05301266 memcpy(val + (i * val_bytes), &ival, val_bytes);
Mark Brownde2d8082011-10-10 13:24:52 +01001267 }
1268 }
Mark Brownb83a3132011-05-11 19:59:58 +02001269
1270 return 0;
1271}
1272EXPORT_SYMBOL_GPL(regmap_bulk_read);
1273
Mark Brown018690d2011-11-29 20:10:36 +00001274static int _regmap_update_bits(struct regmap *map, unsigned int reg,
1275 unsigned int mask, unsigned int val,
1276 bool *change)
Mark Brownb83a3132011-05-11 19:59:58 +02001277{
1278 int ret;
Mark Brownd91e8db2011-11-18 16:03:50 +00001279 unsigned int tmp, orig;
Mark Brownb83a3132011-05-11 19:59:58 +02001280
Mark Brownd91e8db2011-11-18 16:03:50 +00001281 ret = _regmap_read(map, reg, &orig);
Mark Brownb83a3132011-05-11 19:59:58 +02001282 if (ret != 0)
Krystian Garbaciakfc3ebd72012-06-15 11:23:56 +01001283 return ret;
Mark Brownb83a3132011-05-11 19:59:58 +02001284
Mark Brownd91e8db2011-11-18 16:03:50 +00001285 tmp = orig & ~mask;
Mark Brownb83a3132011-05-11 19:59:58 +02001286 tmp |= val & mask;
1287
Mark Brown018690d2011-11-29 20:10:36 +00001288 if (tmp != orig) {
Mark Brownd91e8db2011-11-18 16:03:50 +00001289 ret = _regmap_write(map, reg, tmp);
Mark Brown018690d2011-11-29 20:10:36 +00001290 *change = true;
1291 } else {
1292 *change = false;
1293 }
Mark Brownb83a3132011-05-11 19:59:58 +02001294
Mark Brownb83a3132011-05-11 19:59:58 +02001295 return ret;
1296}
Mark Brown018690d2011-11-29 20:10:36 +00001297
1298/**
1299 * regmap_update_bits: Perform a read/modify/write cycle on the register map
1300 *
1301 * @map: Register map to update
1302 * @reg: Register to update
1303 * @mask: Bitmask to change
1304 * @val: New value for bitmask
1305 *
1306 * Returns zero for success, a negative number on error.
1307 */
1308int regmap_update_bits(struct regmap *map, unsigned int reg,
1309 unsigned int mask, unsigned int val)
1310{
1311 bool change;
Krystian Garbaciakfc3ebd72012-06-15 11:23:56 +01001312 int ret;
1313
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001314 map->lock(map->lock_arg);
Krystian Garbaciakfc3ebd72012-06-15 11:23:56 +01001315 ret = _regmap_update_bits(map, reg, mask, val, &change);
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001316 map->unlock(map->lock_arg);
Krystian Garbaciakfc3ebd72012-06-15 11:23:56 +01001317
1318 return ret;
Mark Brown018690d2011-11-29 20:10:36 +00001319}
Mark Brownb83a3132011-05-11 19:59:58 +02001320EXPORT_SYMBOL_GPL(regmap_update_bits);
Mark Brown31244e32011-07-20 22:56:53 +01001321
Mark Brown018690d2011-11-29 20:10:36 +00001322/**
1323 * regmap_update_bits_check: Perform a read/modify/write cycle on the
1324 * register map and report if updated
1325 *
1326 * @map: Register map to update
1327 * @reg: Register to update
1328 * @mask: Bitmask to change
1329 * @val: New value for bitmask
1330 * @change: Boolean indicating if a write was done
1331 *
1332 * Returns zero for success, a negative number on error.
1333 */
1334int regmap_update_bits_check(struct regmap *map, unsigned int reg,
1335 unsigned int mask, unsigned int val,
1336 bool *change)
1337{
Krystian Garbaciakfc3ebd72012-06-15 11:23:56 +01001338 int ret;
1339
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001340 map->lock(map->lock_arg);
Krystian Garbaciakfc3ebd72012-06-15 11:23:56 +01001341 ret = _regmap_update_bits(map, reg, mask, val, change);
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001342 map->unlock(map->lock_arg);
Krystian Garbaciakfc3ebd72012-06-15 11:23:56 +01001343 return ret;
Mark Brown018690d2011-11-29 20:10:36 +00001344}
1345EXPORT_SYMBOL_GPL(regmap_update_bits_check);
1346
Mark Brown22f0d902012-01-21 12:01:14 +00001347/**
1348 * regmap_register_patch: Register and apply register updates to be applied
1349 * on device initialistion
1350 *
1351 * @map: Register map to apply updates to.
1352 * @regs: Values to update.
1353 * @num_regs: Number of entries in regs.
1354 *
1355 * Register a set of register updates to be applied to the device
1356 * whenever the device registers are synchronised with the cache and
1357 * apply them immediately. Typically this is used to apply
1358 * corrections to be applied to the device defaults on startup, such
1359 * as the updates some vendors provide to undocumented registers.
1360 */
1361int regmap_register_patch(struct regmap *map, const struct reg_default *regs,
1362 int num_regs)
1363{
1364 int i, ret;
1365 bool bypass;
1366
1367 /* If needed the implementation can be extended to support this */
1368 if (map->patch)
1369 return -EBUSY;
1370
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001371 map->lock(map->lock_arg);
Mark Brown22f0d902012-01-21 12:01:14 +00001372
1373 bypass = map->cache_bypass;
1374
1375 map->cache_bypass = true;
1376
1377 /* Write out first; it's useful to apply even if we fail later. */
1378 for (i = 0; i < num_regs; i++) {
1379 ret = _regmap_write(map, regs[i].reg, regs[i].def);
1380 if (ret != 0) {
1381 dev_err(map->dev, "Failed to write %x = %x: %d\n",
1382 regs[i].reg, regs[i].def, ret);
1383 goto out;
1384 }
1385 }
1386
Axel Lin2a14d7d2012-02-10 19:29:55 +08001387 map->patch = kcalloc(num_regs, sizeof(struct reg_default), GFP_KERNEL);
Mark Brown22f0d902012-01-21 12:01:14 +00001388 if (map->patch != NULL) {
1389 memcpy(map->patch, regs,
1390 num_regs * sizeof(struct reg_default));
1391 map->patch_regs = num_regs;
1392 } else {
1393 ret = -ENOMEM;
1394 }
1395
1396out:
1397 map->cache_bypass = bypass;
1398
Davide Ciminaghi0d4529c2012-10-16 15:56:59 +02001399 map->unlock(map->lock_arg);
Mark Brown22f0d902012-01-21 12:01:14 +00001400
1401 return ret;
1402}
1403EXPORT_SYMBOL_GPL(regmap_register_patch);
1404
Mark Browneae4b512012-03-14 13:15:03 +00001405/*
Mark Browna6539c32012-02-17 14:20:14 -08001406 * regmap_get_val_bytes(): Report the size of a register value
1407 *
1408 * Report the size of a register value, mainly intended to for use by
1409 * generic infrastructure built on top of regmap.
1410 */
1411int regmap_get_val_bytes(struct regmap *map)
1412{
1413 if (map->format.format_write)
1414 return -EINVAL;
1415
1416 return map->format.val_bytes;
1417}
1418EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
1419
Mark Brown31244e32011-07-20 22:56:53 +01001420static int __init regmap_initcall(void)
1421{
1422 regmap_debugfs_initcall();
1423
1424 return 0;
1425}
1426postcore_initcall(regmap_initcall);