blob: d765f6a628277704854a2d21c22780fea1da61b7 [file] [log] [blame]
Laura Abbottad340ff2012-01-04 14:23:48 -08001/*
2 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#include <linux/atomic.h>
15#include <linux/export.h>
16#include <linux/kernel.h>
17#include <linux/memory_alloc.h>
18#include <linux/module.h>
19#include <linux/sched.h>
20#include <linux/slab.h>
21#include <linux/string.h>
22#include <asm/io.h>
23#include <asm-generic/sizes.h>
24#include <mach/memory.h>
25#include <mach/msm_rtb.h>
26#include <mach/system.h>
27
28#define SENTINEL_BYTE_1 0xFF
29#define SENTINEL_BYTE_2 0xAA
30#define SENTINEL_BYTE_3 0xFF
31
32/* Write
33 * 1) 3 bytes sentinel
34 * 2) 1 bytes of log type
35 * 3) 4 bytes of where the caller came from
36 * 4) 4 bytes index
37 * 4) 4 bytes extra data from the caller
38 *
39 * Total = 16 bytes.
40 */
41struct msm_rtb_layout {
42 unsigned char sentinel[3];
43 unsigned char log_type;
44 void *caller;
45 unsigned long idx;
46 void *data;
47} __attribute__ ((__packed__));
48
49
50struct msm_rtb_state {
51 struct msm_rtb_layout *rtb;
52 unsigned long phys;
53 int nentries;
54 int size;
55 int enabled;
56 uint32_t filter;
57 int step_size;
58};
59
60#if defined(CONFIG_MSM_RTB_SEPARATE_CPUS)
61DEFINE_PER_CPU(atomic_t, msm_rtb_idx_cpu);
62#else
63static atomic_t msm_rtb_idx;
64#endif
65
66struct msm_rtb_state msm_rtb = {
67 .size = SZ_1M,
68};
69
70module_param_named(filter, msm_rtb.filter, uint, 0644);
71module_param_named(enable, msm_rtb.enabled, int, 0644);
72
73int msm_rtb_event_should_log(enum logk_event_type log_type)
74{
75 return msm_rtb.enabled &&
76 ((1 << log_type) & msm_rtb.filter);
77}
78EXPORT_SYMBOL(msm_rtb_event_should_log);
79
80static void msm_rtb_emit_sentinel(struct msm_rtb_layout *start)
81{
82 start->sentinel[0] = SENTINEL_BYTE_1;
83 start->sentinel[1] = SENTINEL_BYTE_2;
84 start->sentinel[2] = SENTINEL_BYTE_3;
85}
86
87static void msm_rtb_write_type(enum logk_event_type log_type,
88 struct msm_rtb_layout *start)
89{
90 start->log_type = (char)log_type;
91}
92
93static void msm_rtb_write_caller(void *caller, struct msm_rtb_layout *start)
94{
95 start->caller = caller;
96}
97
98static void msm_rtb_write_idx(unsigned long idx,
99 struct msm_rtb_layout *start)
100{
101 start->idx = idx;
102}
103
104static void msm_rtb_write_data(void *data, struct msm_rtb_layout *start)
105{
106 start->data = data;
107}
108
109static int __init msm_rtb_set_buffer_size(char *p)
110{
111 int s;
112
113 s = memparse(p, NULL);
114 msm_rtb.size = ALIGN(s, SZ_4K);
115 return 0;
116}
117early_param("msm_rtb_size", msm_rtb_set_buffer_size);
118
119#if defined(CONFIG_MSM_RTB_SEPARATE_CPUS)
120static int msm_rtb_get_idx(void)
121{
122 int cpu, i;
123 atomic_t *index;
124
125 /*
126 * ideally we would use get_cpu but this is a close enough
127 * approximation for our purposes.
128 */
129 cpu = raw_smp_processor_id();
130
131 index = &per_cpu(msm_rtb_idx_cpu, cpu);
132
133 i = atomic_add_return(msm_rtb.step_size, index);
134 i -= msm_rtb.step_size;
135
136 return i;
137}
138#else
139static int msm_rtb_get_idx(void)
140{
141 int i;
142
143 i = atomic_inc_return(&msm_rtb_idx);
144 i--;
145
146 return i;
147}
148#endif
149
150int uncached_logk_pc(enum logk_event_type log_type, void *caller,
151 void *data)
152{
153 int i;
154 struct msm_rtb_layout *start;
155
156 if (!msm_rtb_event_should_log(log_type))
157 return 0;
158
159 i = msm_rtb_get_idx();
160
161 start = &msm_rtb.rtb[i & (msm_rtb.nentries - 1)];
162
163 msm_rtb_emit_sentinel(start);
164 msm_rtb_write_type(log_type, start);
165 msm_rtb_write_caller(caller, start);
166 msm_rtb_write_idx(i, start);
167 msm_rtb_write_data(data, start);
168 mb();
169
170 return 1;
171}
172EXPORT_SYMBOL(uncached_logk_pc);
173
174noinline int uncached_logk(enum logk_event_type log_type, void *data)
175{
176 return uncached_logk_pc(log_type, __builtin_return_address(0), data);
177}
178EXPORT_SYMBOL(uncached_logk);
179
180int msm_rtb_init(void)
181{
182#if defined(CONFIG_MSM_RTB_SEPARATE_CPUS)
183 unsigned int cpu;
184#endif
185
186 if (msm_rtb.size <= 0 || msm_rtb.size > SZ_1M)
187 return -EINVAL;
188
189 /*
190 * The ioremap call is made separately to store the physical
191 * address of the buffer. This is necessary for cases where
192 * the only way to access the buffer is a physical address.
193 */
194 msm_rtb.phys = allocate_contiguous_ebi_nomap(msm_rtb.size, SZ_4K);
195
196 if (!msm_rtb.phys)
197 return -ENOMEM;
198
199 msm_rtb.rtb = ioremap(msm_rtb.phys, msm_rtb.size);
200
201 if (!msm_rtb.rtb) {
202 free_contiguous_memory_by_paddr(msm_rtb.phys);
203 return -ENOMEM;
204 }
205
206 msm_rtb.nentries = msm_rtb.size / sizeof(struct msm_rtb_layout);
207
208 /* Round this down to a power of 2 */
209 msm_rtb.nentries = __rounddown_pow_of_two(msm_rtb.nentries);
210
211 memset(msm_rtb.rtb, 0, msm_rtb.size);
212
213
214#if defined(CONFIG_MSM_RTB_SEPARATE_CPUS)
215 for_each_possible_cpu(cpu) {
216 atomic_t *a = &per_cpu(msm_rtb_idx_cpu, cpu);
217 atomic_set(a, cpu);
218 }
219 msm_rtb.step_size = num_possible_cpus();
220#else
221 atomic_set(&msm_rtb_idx, 0);
222 msm_rtb.step_size = 1;
223#endif
224
225
226 msm_rtb.enabled = 1;
227 return 0;
228}
229module_init(msm_rtb_init)