blob: 865e30c6db6d3478f5049d94a0e127ff92cba775 [file] [log] [blame]
Anji Jonnaladb2cdad2013-03-09 09:49:11 +05301/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13
14#include <linux/module.h>
15#include <linux/kernel.h>
16#include <linux/delay.h>
17#include <linux/init.h>
18#include <linux/io.h>
19#include <linux/slab.h>
Praveen Chidambaramaa9d52b2012-04-02 11:09:47 -060020#include <linux/of.h>
21#include <linux/of_address.h>
22#include <linux/platform_device.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070023#include <mach/msm_iomap.h>
Praveen Chidambaram76679d42011-12-16 14:19:02 -070024#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070025#include "spm.h"
26#include "spm_driver.h"
27
28struct msm_spm_power_modes {
29 uint32_t mode;
30 bool notify_rpm;
31 uint32_t start_addr;
32
33};
34
35struct msm_spm_device {
36 struct msm_spm_driver_data reg_data;
37 struct msm_spm_power_modes *modes;
38 uint32_t num_modes;
Anji Jonnaladb2cdad2013-03-09 09:49:11 +053039 uint32_t cpu_vdd;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070040};
41
Praveen Chidambaram6a8fb3b2012-09-16 14:54:35 -060042struct msm_spm_vdd_info {
43 uint32_t cpu;
44 uint32_t vlevel;
45 int err;
46};
47
Praveen Chidambaramaa9d52b2012-04-02 11:09:47 -060048static struct msm_spm_device msm_spm_l2_device;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049static DEFINE_PER_CPU_SHARED_ALIGNED(struct msm_spm_device, msm_cpu_spm_device);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050
Praveen Chidambaram6a8fb3b2012-09-16 14:54:35 -060051
Praveen Chidambaram6a8fb3b2012-09-16 14:54:35 -060052static void msm_spm_smp_set_vdd(void *data)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070054 struct msm_spm_device *dev;
Praveen Chidambaram6a8fb3b2012-09-16 14:54:35 -060055 struct msm_spm_vdd_info *info = (struct msm_spm_vdd_info *)data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056
Praveen Chidambaram6a8fb3b2012-09-16 14:54:35 -060057 dev = &per_cpu(msm_cpu_spm_device, info->cpu);
Anji Jonnaladb2cdad2013-03-09 09:49:11 +053058 dev->cpu_vdd = info->vlevel;
Praveen Chidambaram6a8fb3b2012-09-16 14:54:35 -060059 info->err = msm_spm_drv_set_vdd(&dev->reg_data, info->vlevel);
60}
61
62int msm_spm_set_vdd(unsigned int cpu, unsigned int vlevel)
63{
64 struct msm_spm_vdd_info info;
65 int ret;
66
67 info.cpu = cpu;
68 info.vlevel = vlevel;
69
Venkat Devarasetty8ad40ba2013-04-29 21:50:57 +053070 if ((smp_processor_id() != cpu) && cpu_online(cpu)) {
Praveen Chidambaram78164cf2012-10-05 17:11:33 -060071 /**
72 * We do not want to set the voltage of another core from
73 * this core, as its possible that we may race the vdd change
74 * with the SPM state machine of that core, which could also
75 * be changing the voltage of that core during power collapse.
76 * Hence, set the function to be executed on that core and block
77 * until the vdd change is complete.
78 */
79 ret = smp_call_function_single(cpu, msm_spm_smp_set_vdd,
80 &info, true);
81 if (!ret)
82 ret = info.err;
83 } else {
84 /**
85 * Since the core is not online, it is safe to set the vdd
86 * directly.
87 */
88 msm_spm_smp_set_vdd(&info);
Praveen Chidambaram6a8fb3b2012-09-16 14:54:35 -060089 ret = info.err;
Praveen Chidambaram78164cf2012-10-05 17:11:33 -060090 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070091
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070092 return ret;
93}
Praveen Chidambaramaa9d52b2012-04-02 11:09:47 -060094EXPORT_SYMBOL(msm_spm_set_vdd);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095
Praveen Chidambaramebbf2122012-09-29 22:27:03 -060096unsigned int msm_spm_get_vdd(unsigned int cpu)
97{
98 struct msm_spm_device *dev;
99
100 dev = &per_cpu(msm_cpu_spm_device, cpu);
Anji Jonnaladb2cdad2013-03-09 09:49:11 +0530101 return dev->cpu_vdd;
Praveen Chidambaramebbf2122012-09-29 22:27:03 -0600102}
103EXPORT_SYMBOL(msm_spm_get_vdd);
104
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700105static int msm_spm_dev_set_low_power_mode(struct msm_spm_device *dev,
106 unsigned int mode, bool notify_rpm)
107{
108 uint32_t i;
109 uint32_t start_addr = 0;
110 int ret = -EINVAL;
111
112 if (mode == MSM_SPM_MODE_DISABLED) {
113 ret = msm_spm_drv_set_spm_enable(&dev->reg_data, false);
114 } else if (!msm_spm_drv_set_spm_enable(&dev->reg_data, true)) {
115 for (i = 0; i < dev->num_modes; i++) {
116 if ((dev->modes[i].mode == mode) &&
117 (dev->modes[i].notify_rpm == notify_rpm)) {
118 start_addr = dev->modes[i].start_addr;
119 break;
120 }
121 }
122 ret = msm_spm_drv_set_low_power_mode(&dev->reg_data,
123 start_addr);
124 }
125 return ret;
126}
127
Stephen Boyddb354112012-05-09 14:24:58 -0700128static int __devinit msm_spm_dev_init(struct msm_spm_device *dev,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700129 struct msm_spm_platform_data *data)
130{
131 int i, ret = -ENOMEM;
132 uint32_t offset = 0;
133
134 dev->num_modes = data->num_modes;
135 dev->modes = kmalloc(
136 sizeof(struct msm_spm_power_modes) * dev->num_modes,
137 GFP_KERNEL);
138
139 if (!dev->modes)
140 goto spm_failed_malloc;
141
Praveen Chidambaramaa9d52b2012-04-02 11:09:47 -0600142 dev->reg_data.ver_reg = data->ver_reg;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700143 ret = msm_spm_drv_init(&dev->reg_data, data);
144
145 if (ret)
146 goto spm_failed_init;
147
148 for (i = 0; i < dev->num_modes; i++) {
149
Praveen Chidambaramaa9d52b2012-04-02 11:09:47 -0600150 /* Default offset is 0 and gets updated as we write more
151 * sequences into SPM
152 */
153 dev->modes[i].start_addr = offset;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700154 ret = msm_spm_drv_write_seq_data(&dev->reg_data,
Praveen Chidambaramaa9d52b2012-04-02 11:09:47 -0600155 data->modes[i].cmd, &offset);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700156 if (ret < 0)
157 goto spm_failed_init;
158
159 dev->modes[i].mode = data->modes[i].mode;
160 dev->modes[i].notify_rpm = data->modes[i].notify_rpm;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700161 }
162 msm_spm_drv_flush_seq_entry(&dev->reg_data);
163 return 0;
164
165spm_failed_init:
166 kfree(dev->modes);
167spm_failed_malloc:
168 return ret;
169}
170
Praveen Chidambaramc0750ca2012-01-08 10:03:28 -0700171int msm_spm_turn_on_cpu_rail(unsigned int cpu)
172{
173 uint32_t val = 0;
174 uint32_t timeout = 0;
175 void *reg = NULL;
Stepan Moskovchenko2b0b06e2012-02-03 15:03:52 -0800176 void *saw_bases[] = {
177 0,
178 MSM_SAW1_BASE,
179 MSM_SAW2_BASE,
180 MSM_SAW3_BASE
181 };
Praveen Chidambaramc0750ca2012-01-08 10:03:28 -0700182
Stepan Moskovchenko2b0b06e2012-02-03 15:03:52 -0800183 if (cpu == 0 || cpu >= num_possible_cpus())
Praveen Chidambaramc0750ca2012-01-08 10:03:28 -0700184 return -EINVAL;
185
Stepan Moskovchenko2b0b06e2012-02-03 15:03:52 -0800186 reg = saw_bases[cpu];
Praveen Chidambaramc0750ca2012-01-08 10:03:28 -0700187
Stepan Moskovchenkoc6a603a2012-09-21 20:32:17 -0700188 if (soc_class_is_msm8960() || soc_class_is_msm8930() ||
189 soc_class_is_apq8064()) {
Stepan Moskovchenko2b0b06e2012-02-03 15:03:52 -0800190 val = 0xA4;
191 reg += 0x14;
192 timeout = 512;
Praveen Chidambaramc0750ca2012-01-08 10:03:28 -0700193 } else {
194 return -ENOSYS;
195 }
196
197 writel_relaxed(val, reg);
198 mb();
199 udelay(timeout);
200
201 return 0;
202}
203EXPORT_SYMBOL(msm_spm_turn_on_cpu_rail);
204
Praveen Chidambaramaa9d52b2012-04-02 11:09:47 -0600205void msm_spm_reinit(void)
206{
207 unsigned int cpu;
208 for_each_possible_cpu(cpu)
209 msm_spm_drv_reinit(&per_cpu(msm_cpu_spm_device.reg_data, cpu));
210}
211EXPORT_SYMBOL(msm_spm_reinit);
212
213int msm_spm_set_low_power_mode(unsigned int mode, bool notify_rpm)
214{
215 struct msm_spm_device *dev = &__get_cpu_var(msm_cpu_spm_device);
216 return msm_spm_dev_set_low_power_mode(dev, mode, notify_rpm);
217}
218EXPORT_SYMBOL(msm_spm_set_low_power_mode);
219
220/* Board file init function */
221int __init msm_spm_init(struct msm_spm_platform_data *data, int nr_devs)
222{
223 unsigned int cpu;
224 int ret = 0;
225
226 BUG_ON((nr_devs < num_possible_cpus()) || !data);
227
228 for_each_possible_cpu(cpu) {
229 struct msm_spm_device *dev = &per_cpu(msm_cpu_spm_device, cpu);
230 ret = msm_spm_dev_init(dev, &data[cpu]);
231 if (ret < 0) {
232 pr_warn("%s():failed CPU:%u ret:%d\n", __func__,
233 cpu, ret);
234 break;
235 }
236 }
237
238 return ret;
239}
240
241#ifdef CONFIG_MSM_L2_SPM
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700242
243int msm_spm_l2_set_low_power_mode(unsigned int mode, bool notify_rpm)
244{
245 return msm_spm_dev_set_low_power_mode(
246 &msm_spm_l2_device, mode, notify_rpm);
247}
Praveen Chidambaramaa9d52b2012-04-02 11:09:47 -0600248EXPORT_SYMBOL(msm_spm_l2_set_low_power_mode);
Maheshkumar Sivasubramanian4ac23762011-11-02 10:03:06 -0600249
250void msm_spm_l2_reinit(void)
251{
252 msm_spm_drv_reinit(&msm_spm_l2_device.reg_data);
253}
Praveen Chidambaramaa9d52b2012-04-02 11:09:47 -0600254EXPORT_SYMBOL(msm_spm_l2_reinit);
255
256int msm_spm_apcs_set_vdd(unsigned int vlevel)
257{
258 return msm_spm_drv_set_vdd(&msm_spm_l2_device.reg_data, vlevel);
259}
260EXPORT_SYMBOL(msm_spm_apcs_set_vdd);
261
262int msm_spm_apcs_set_phase(unsigned int phase_cnt)
263{
264 return msm_spm_drv_set_phase(&msm_spm_l2_device.reg_data, phase_cnt);
265}
266EXPORT_SYMBOL(msm_spm_apcs_set_phase);
267
268/* Board file init function */
269int __init msm_spm_l2_init(struct msm_spm_platform_data *data)
270{
271 return msm_spm_dev_init(&msm_spm_l2_device, data);
272}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700273#endif
Praveen Chidambaramaa9d52b2012-04-02 11:09:47 -0600274
Sathish Ambley86487e52012-06-11 13:46:11 -0700275static int __devinit msm_spm_dev_probe(struct platform_device *pdev)
Praveen Chidambaramaa9d52b2012-04-02 11:09:47 -0600276{
277 int ret = 0;
278 int cpu = 0;
279 int i = 0;
280 struct device_node *node = pdev->dev.of_node;
281 struct msm_spm_platform_data spm_data;
282 char *key = NULL;
283 uint32_t val = 0;
284 struct msm_spm_seq_entry modes[MSM_SPM_MODE_NR];
285 size_t len = 0;
286 struct msm_spm_device *dev = NULL;
287 struct resource *res = NULL;
288 uint32_t mode_count = 0;
289
290 struct spm_of {
291 char *key;
292 uint32_t id;
293 };
294
295 struct spm_of spm_of_data[] = {
296 {"qcom,saw2-cfg", MSM_SPM_REG_SAW2_CFG},
297 {"qcom,saw2-avs-ctl", MSM_SPM_REG_SAW2_AVS_CTL},
298 {"qcom,saw2-avs-hysteresis", MSM_SPM_REG_SAW2_AVS_HYSTERESIS},
Praveen Chidambaramaa9d52b2012-04-02 11:09:47 -0600299 {"qcom,saw2-avs-limit", MSM_SPM_REG_SAW2_AVS_LIMIT},
Praveen Chidambaramce73c372012-08-22 11:50:34 -0600300 {"qcom,saw2-avs-dly", MSM_SPM_REG_SAW2_AVS_DLY},
Praveen Chidambaramaa9d52b2012-04-02 11:09:47 -0600301 {"qcom,saw2-spm-dly", MSM_SPM_REG_SAW2_SPM_DLY},
Praveen Chidambaramce73c372012-08-22 11:50:34 -0600302 {"qcom,saw2-spm-ctl", MSM_SPM_REG_SAW2_SPM_CTL},
Praveen Chidambaramaa9d52b2012-04-02 11:09:47 -0600303 {"qcom,saw2-pmic-data0", MSM_SPM_REG_SAW2_PMIC_DATA_0},
304 {"qcom,saw2-pmic-data1", MSM_SPM_REG_SAW2_PMIC_DATA_1},
305 {"qcom,saw2-pmic-data2", MSM_SPM_REG_SAW2_PMIC_DATA_2},
306 {"qcom,saw2-pmic-data3", MSM_SPM_REG_SAW2_PMIC_DATA_3},
307 {"qcom,saw2-pmic-data4", MSM_SPM_REG_SAW2_PMIC_DATA_4},
308 {"qcom,saw2-pmic-data5", MSM_SPM_REG_SAW2_PMIC_DATA_5},
309 {"qcom,saw2-pmic-data6", MSM_SPM_REG_SAW2_PMIC_DATA_6},
310 {"qcom,saw2-pmic-data7", MSM_SPM_REG_SAW2_PMIC_DATA_7},
311 };
312
313 struct mode_of {
314 char *key;
315 uint32_t id;
316 uint32_t notify_rpm;
317 };
318
Mahesh Sivasubramanian11373322012-06-14 11:17:20 -0600319 struct mode_of of_cpu_modes[] = {
320 {"qcom,saw2-spm-cmd-wfi", MSM_SPM_MODE_CLOCK_GATING, 0},
321 {"qcom,saw2-spm-cmd-ret", MSM_SPM_MODE_POWER_RETENTION, 0},
322 {"qcom,saw2-spm-cmd-spc", MSM_SPM_MODE_POWER_COLLAPSE, 0},
323 {"qcom,saw2-spm-cmd-pc", MSM_SPM_MODE_POWER_COLLAPSE, 1},
Praveen Chidambaramaa9d52b2012-04-02 11:09:47 -0600324 };
325
Mahesh Sivasubramanian11373322012-06-14 11:17:20 -0600326 struct mode_of of_l2_modes[] = {
327 {"qcom,saw2-spm-cmd-ret", MSM_SPM_L2_MODE_RETENTION, 1},
328 {"qcom,saw2-spm-cmd-gdhs", MSM_SPM_L2_MODE_GDHS, 1},
329 {"qcom,saw2-spm-cmd-pc", MSM_SPM_L2_MODE_POWER_COLLAPSE, 1},
330 };
331
332 struct mode_of *mode_of_data;
333 int num_modes;
334
Praveen Chidambaramaa9d52b2012-04-02 11:09:47 -0600335 memset(&spm_data, 0, sizeof(struct msm_spm_platform_data));
336 memset(&modes, 0,
337 (MSM_SPM_MODE_NR - 2) * sizeof(struct msm_spm_seq_entry));
338
339 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
340 if (!res)
341 goto fail;
342
343 spm_data.reg_base_addr = devm_ioremap(&pdev->dev, res->start,
344 resource_size(res));
345 if (!spm_data.reg_base_addr)
346 return -ENOMEM;
347
348 key = "qcom,core-id";
349 ret = of_property_read_u32(node, key, &val);
350 if (ret)
351 goto fail;
352 cpu = val;
353
354 key = "qcom,saw2-ver-reg";
355 ret = of_property_read_u32(node, key, &val);
356 if (ret)
357 goto fail;
358 spm_data.ver_reg = val;
359
360 key = "qcom,vctl-timeout-us";
361 ret = of_property_read_u32(node, key, &val);
362 if (!ret)
363 spm_data.vctl_timeout_us = val;
364
365 /* optional */
366 key = "qcom,vctl-port";
367 ret = of_property_read_u32(node, key, &val);
368 if (!ret)
369 spm_data.vctl_port = val;
370
371 /* optional */
372 key = "qcom,phase-port";
373 ret = of_property_read_u32(node, key, &val);
374 if (!ret)
375 spm_data.phase_port = val;
376
377 for (i = 0; i < ARRAY_SIZE(spm_of_data); i++) {
378 ret = of_property_read_u32(node, spm_of_data[i].key, &val);
379 if (ret)
380 continue;
381 spm_data.reg_init_values[spm_of_data[i].id] = val;
382 }
383
Mahesh Sivasubramanian11373322012-06-14 11:17:20 -0600384 /*
385 * Device with id 0..NR_CPUS are SPM for apps cores
386 * Device with id 0xFFFF is for L2 SPM.
387 */
388 if (cpu >= 0 && cpu < num_possible_cpus()) {
389 mode_of_data = of_cpu_modes;
390 num_modes = ARRAY_SIZE(of_cpu_modes);
391 dev = &per_cpu(msm_cpu_spm_device, cpu);
392
393 } else {
394 mode_of_data = of_l2_modes;
395 num_modes = ARRAY_SIZE(of_l2_modes);
396 dev = &msm_spm_l2_device;
397 }
398
399 for (i = 0; i < num_modes; i++) {
Praveen Chidambaramaa9d52b2012-04-02 11:09:47 -0600400 key = mode_of_data[i].key;
401 modes[mode_count].cmd =
402 (uint8_t *)of_get_property(node, key, &len);
403 if (!modes[mode_count].cmd)
404 continue;
405 modes[mode_count].mode = mode_of_data[i].id;
406 modes[mode_count].notify_rpm = mode_of_data[i].notify_rpm;
407 mode_count++;
408 }
409
410 spm_data.modes = modes;
411 spm_data.num_modes = mode_count;
412
Praveen Chidambaramaa9d52b2012-04-02 11:09:47 -0600413 ret = msm_spm_dev_init(dev, &spm_data);
Mahesh Sivasubramanian11373322012-06-14 11:17:20 -0600414
Praveen Chidambaramaa9d52b2012-04-02 11:09:47 -0600415 if (ret < 0)
416 pr_warn("%s():failed core-id:%u ret:%d\n", __func__, cpu, ret);
417
418 return ret;
419
420fail:
421 pr_err("%s: Failed reading node=%s, key=%s\n",
422 __func__, node->full_name, key);
423 return -EFAULT;
424}
425
Sathish Ambley86487e52012-06-11 13:46:11 -0700426static struct of_device_id msm_spm_match_table[] = {
Praveen Chidambaramaa9d52b2012-04-02 11:09:47 -0600427 {.compatible = "qcom,spm-v2"},
428 {},
429};
430
Sathish Ambley86487e52012-06-11 13:46:11 -0700431static struct platform_driver msm_spm_device_driver = {
Praveen Chidambaramaa9d52b2012-04-02 11:09:47 -0600432 .probe = msm_spm_dev_probe,
433 .driver = {
434 .name = "spm-v2",
435 .owner = THIS_MODULE,
436 .of_match_table = msm_spm_match_table,
437 },
438};
439
440int __init msm_spm_device_init(void)
441{
442 return platform_driver_register(&msm_spm_device_driver);
443}