blob: b09ec015397409fb024b5e13f460c71066a202b7 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/memory.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Duy Truonge833aca2013-02-12 13:35:08 -08004 * Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/mm.h>
18#include <linux/mm_types.h>
19#include <linux/bootmem.h>
20#include <linux/module.h>
21#include <linux/memory_alloc.h>
22#include <linux/memblock.h>
23#include <asm/pgtable.h>
24#include <asm/io.h>
25#include <asm/mach/map.h>
26#include <asm/cacheflush.h>
27#include <asm/setup.h>
28#include <asm/mach-types.h>
29#include <mach/msm_memtypes.h>
30#include <linux/hardirq.h>
31#if defined(CONFIG_MSM_NPA_REMOTE)
32#include "npa_remote.h"
33#include <linux/completion.h>
34#include <linux/err.h>
35#endif
36#include <linux/android_pmem.h>
37#include <mach/msm_iomap.h>
38#include <mach/socinfo.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070039#include <linux/sched.h>
Laura Abbottd8d0f772012-07-10 10:27:06 -070040#include <linux/of_fdt.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070041
42/* fixme */
43#include <asm/tlbflush.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044#include <../../mm/mm.h>
Laura Abbottf637aff2011-12-14 14:16:17 -080045#include <linux/fmem.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047void *strongly_ordered_page;
48char strongly_ordered_mem[PAGE_SIZE*2-4];
49
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050void map_page_strongly_ordered(void)
51{
52#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
53 long unsigned int phys;
54 struct map_desc map;
55
56 if (strongly_ordered_page)
57 return;
58
59 strongly_ordered_page = (void*)PFN_ALIGN((int)&strongly_ordered_mem);
60 phys = __pa(strongly_ordered_page);
61
62 map.pfn = __phys_to_pfn(phys);
63 map.virtual = MSM_STRONGLY_ORDERED_PAGE;
64 map.length = PAGE_SIZE;
65 map.type = MT_DEVICE_STRONGLY_ORDERED;
66 create_mapping(&map);
67
68 printk(KERN_ALERT "Initialized strongly ordered page successfully\n");
69#endif
70}
71EXPORT_SYMBOL(map_page_strongly_ordered);
72
73void write_to_strongly_ordered_memory(void)
74{
75#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
76 if (!strongly_ordered_page) {
77 if (!in_interrupt())
78 map_page_strongly_ordered();
79 else {
80 printk(KERN_ALERT "Cannot map strongly ordered page in "
81 "Interrupt Context\n");
82 /* capture it here before the allocation fails later */
83 BUG();
84 }
85 }
86 *(int *)MSM_STRONGLY_ORDERED_PAGE = 0;
87#endif
88}
89EXPORT_SYMBOL(write_to_strongly_ordered_memory);
90
Olav Haugan29bb4d52012-05-30 12:57:53 -070091/* These cache related routines make the assumption (if outer cache is
92 * available) that the associated physical memory is contiguous.
93 * They will operate on all (L1 and L2 if present) caches.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070094 */
95void clean_and_invalidate_caches(unsigned long vstart,
96 unsigned long length, unsigned long pstart)
97{
Olav Haugan29bb4d52012-05-30 12:57:53 -070098 dmac_flush_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070099 outer_flush_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700100}
101
102void clean_caches(unsigned long vstart,
103 unsigned long length, unsigned long pstart)
104{
Olav Haugan29bb4d52012-05-30 12:57:53 -0700105 dmac_clean_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700106 outer_clean_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700107}
108
109void invalidate_caches(unsigned long vstart,
110 unsigned long length, unsigned long pstart)
111{
Olav Haugan29bb4d52012-05-30 12:57:53 -0700112 dmac_inv_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700113 outer_inv_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700114}
115
Stephen Boyd50ca18e2012-02-21 01:26:00 -0800116void * __init alloc_bootmem_aligned(unsigned long size, unsigned long alignment)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700117{
118 void *unused_addr = NULL;
119 unsigned long addr, tmp_size, unused_size;
120
121 /* Allocate maximum size needed, see where it ends up.
122 * Then free it -- in this path there are no other allocators
123 * so we can depend on getting the same address back
124 * when we allocate a smaller piece that is aligned
125 * at the end (if necessary) and the piece we really want,
126 * then free the unused first piece.
127 */
128
129 tmp_size = size + alignment - PAGE_SIZE;
130 addr = (unsigned long)alloc_bootmem(tmp_size);
131 free_bootmem(__pa(addr), tmp_size);
132
133 unused_size = alignment - (addr % alignment);
134 if (unused_size)
135 unused_addr = alloc_bootmem(unused_size);
136
137 addr = (unsigned long)alloc_bootmem(size);
138 if (unused_size)
139 free_bootmem(__pa(unused_addr), unused_size);
140
141 return (void *)addr;
142}
143
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700144char *memtype_name[] = {
145 "SMI_KERNEL",
146 "SMI",
147 "EBI0",
148 "EBI1"
149};
150
151struct reserve_info *reserve_info;
152
Larry Bassel7fb0b252011-07-22 14:18:50 -0700153static unsigned long stable_size(struct membank *mb,
154 unsigned long unstable_limit)
155{
Olav Haugan5b633022011-10-28 13:26:31 -0700156 unsigned long upper_limit = mb->start + mb->size;
157
158 if (!unstable_limit)
Larry Bassel7fb0b252011-07-22 14:18:50 -0700159 return mb->size;
Olav Haugan5b633022011-10-28 13:26:31 -0700160
161 /* Check for 32 bit roll-over */
162 if (upper_limit >= mb->start) {
163 /* If we didn't roll over we can safely make the check below */
164 if (upper_limit <= unstable_limit)
165 return mb->size;
166 }
167
Larry Bassel7fb0b252011-07-22 14:18:50 -0700168 if (mb->start >= unstable_limit)
169 return 0;
170 return unstable_limit - mb->start;
171}
172
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800173/* stable size of all memory banks contiguous to and below this one */
174static unsigned long total_stable_size(unsigned long bank)
175{
176 int i;
177 struct membank *mb = &meminfo.bank[bank];
178 int memtype = reserve_info->paddr_to_memtype(mb->start);
179 unsigned long size;
180
181 size = stable_size(mb, reserve_info->low_unstable_address);
182 for (i = bank - 1, mb = &meminfo.bank[bank - 1]; i >= 0; i--, mb--) {
183 if (mb->start + mb->size != (mb + 1)->start)
184 break;
185 if (reserve_info->paddr_to_memtype(mb->start) != memtype)
186 break;
187 size += stable_size(mb, reserve_info->low_unstable_address);
188 }
189 return size;
190}
191
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700192static void __init calculate_reserve_limits(void)
193{
194 int i;
195 struct membank *mb;
196 int memtype;
197 struct memtype_reserve *mt;
Larry Bassel7fb0b252011-07-22 14:18:50 -0700198 unsigned long size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199
200 for (i = 0, mb = &meminfo.bank[0]; i < meminfo.nr_banks; i++, mb++) {
201 memtype = reserve_info->paddr_to_memtype(mb->start);
202 if (memtype == MEMTYPE_NONE) {
203 pr_warning("unknown memory type for bank at %lx\n",
204 (long unsigned int)mb->start);
205 continue;
206 }
207 mt = &reserve_info->memtype_reserve_table[memtype];
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800208 size = total_stable_size(i);
Larry Bassel7fb0b252011-07-22 14:18:50 -0700209 mt->limit = max(mt->limit, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700210 }
211}
212
213static void __init adjust_reserve_sizes(void)
214{
215 int i;
216 struct memtype_reserve *mt;
217
218 mt = &reserve_info->memtype_reserve_table[0];
219 for (i = 0; i < MEMTYPE_MAX; i++, mt++) {
220 if (mt->flags & MEMTYPE_FLAGS_1M_ALIGN)
221 mt->size = (mt->size + SECTION_SIZE - 1) & SECTION_MASK;
222 if (mt->size > mt->limit) {
223 pr_warning("%lx size for %s too large, setting to %lx\n",
224 mt->size, memtype_name[i], mt->limit);
225 mt->size = mt->limit;
226 }
227 }
228}
229
230static void __init reserve_memory_for_mempools(void)
231{
232 int i, memtype, membank_type;
233 struct memtype_reserve *mt;
234 struct membank *mb;
235 int ret;
Larry Bassel7fb0b252011-07-22 14:18:50 -0700236 unsigned long size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700237
238 mt = &reserve_info->memtype_reserve_table[0];
239 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
240 if (mt->flags & MEMTYPE_FLAGS_FIXED || !mt->size)
241 continue;
242
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800243 /* We know we will find memory bank(s) of the proper size
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700244 * as we have limited the size of the memory pool for
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800245 * each memory type to the largest total size of the memory
246 * banks which are contiguous and of the correct memory type.
247 * Choose the memory bank with the highest physical
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700248 * address which is large enough, so that we will not
249 * take memory from the lowest memory bank which the kernel
250 * is in (and cause boot problems) and so that we might
251 * be able to steal memory that would otherwise become
Larry Bassel7fb0b252011-07-22 14:18:50 -0700252 * highmem. However, do not use unstable memory.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700253 */
254 for (i = meminfo.nr_banks - 1; i >= 0; i--) {
255 mb = &meminfo.bank[i];
256 membank_type =
257 reserve_info->paddr_to_memtype(mb->start);
258 if (memtype != membank_type)
259 continue;
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800260 size = total_stable_size(i);
Larry Bassel7fb0b252011-07-22 14:18:50 -0700261 if (size >= mt->size) {
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800262 size = stable_size(mb,
263 reserve_info->low_unstable_address);
Larry Bassel4d4f4482012-04-04 11:26:09 -0700264 if (!size)
265 continue;
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800266 /* mt->size may be larger than size, all this
267 * means is that we are carving the memory pool
268 * out of multiple contiguous memory banks.
269 */
Olav Haugan5b633022011-10-28 13:26:31 -0700270 mt->start = mb->start + (size - mt->size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271 ret = memblock_remove(mt->start, mt->size);
272 BUG_ON(ret);
273 break;
274 }
275 }
276 }
277}
278
279static void __init initialize_mempools(void)
280{
281 struct mem_pool *mpool;
282 int memtype;
283 struct memtype_reserve *mt;
284
285 mt = &reserve_info->memtype_reserve_table[0];
286 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
287 if (!mt->size)
288 continue;
289 mpool = initialize_memory_pool(mt->start, mt->size, memtype);
290 if (!mpool)
291 pr_warning("failed to create %s mempool\n",
292 memtype_name[memtype]);
293 }
294}
295
Larry Bassel4d4f4482012-04-04 11:26:09 -0700296#define MAX_FIXED_AREA_SIZE 0x11000000
297
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700298void __init msm_reserve(void)
299{
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700300 unsigned long msm_fixed_area_size;
301 unsigned long msm_fixed_area_start;
302
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700303 memory_pool_init();
304 reserve_info->calculate_reserve_sizes();
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700305
306 msm_fixed_area_size = reserve_info->fixed_area_size;
307 msm_fixed_area_start = reserve_info->fixed_area_start;
308 if (msm_fixed_area_size)
Larry Bassel4d4f4482012-04-04 11:26:09 -0700309 if (msm_fixed_area_start > reserve_info->low_unstable_address
310 - MAX_FIXED_AREA_SIZE)
311 reserve_info->low_unstable_address =
312 msm_fixed_area_start;
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700313
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700314 calculate_reserve_limits();
315 adjust_reserve_sizes();
316 reserve_memory_for_mempools();
317 initialize_mempools();
318}
319
320static int get_ebi_memtype(void)
321{
322 /* on 7x30 and 8x55 "EBI1 kernel PMEM" is really on EBI0 */
323 if (cpu_is_msm7x30() || cpu_is_msm8x55())
324 return MEMTYPE_EBI0;
325 return MEMTYPE_EBI1;
326}
327
328void *allocate_contiguous_ebi(unsigned long size,
329 unsigned long align, int cached)
330{
331 return allocate_contiguous_memory(size, get_ebi_memtype(),
332 align, cached);
333}
334EXPORT_SYMBOL(allocate_contiguous_ebi);
335
336unsigned long allocate_contiguous_ebi_nomap(unsigned long size,
337 unsigned long align)
338{
Jordan Crouse8c78b132011-05-26 10:27:47 -0600339 return _allocate_contiguous_memory_nomap(size, get_ebi_memtype(),
340 align, __builtin_return_address(0));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700341}
342EXPORT_SYMBOL(allocate_contiguous_ebi_nomap);
343
Vikram Mulukutlac39c6092011-07-29 18:36:35 -0700344unsigned int msm_ttbr0;
345
346void store_ttbr0(void)
347{
348 /* Store TTBR0 for post-mortem debugging purposes. */
349 asm("mrc p15, 0, %0, c2, c0, 0\n"
350 : "=r" (msm_ttbr0));
351}
Laura Abbottf637aff2011-12-14 14:16:17 -0800352
353int request_fmem_c_region(void *unused)
354{
355 return fmem_set_state(FMEM_C_STATE);
356}
357
358int release_fmem_c_region(void *unused)
359{
360 return fmem_set_state(FMEM_T_STATE);
361}
Laura Abbottd8d0f772012-07-10 10:27:06 -0700362
363static char * const memtype_names[] = {
364 [MEMTYPE_SMI_KERNEL] = "SMI_KERNEL",
365 [MEMTYPE_SMI] = "SMI",
366 [MEMTYPE_EBI0] = "EBI0",
367 [MEMTYPE_EBI1] = "EBI1",
368};
369
Olav Haugan92862912012-08-01 11:32:48 -0700370int msm_get_memory_type_from_name(const char *memtype_name)
Laura Abbottd8d0f772012-07-10 10:27:06 -0700371{
372 int i;
373
374 for (i = 0; i < ARRAY_SIZE(memtype_names); i++) {
Olav Haugan92862912012-08-01 11:32:48 -0700375 if (memtype_names[i] &&
376 strcmp(memtype_name, memtype_names[i]) == 0)
377 return i;
Laura Abbottd8d0f772012-07-10 10:27:06 -0700378 }
379
Olav Haugan92862912012-08-01 11:32:48 -0700380 pr_err("Could not find memory type %s\n", memtype_name);
Laura Abbottd8d0f772012-07-10 10:27:06 -0700381 return -EINVAL;
382}
383
Olav Haugan92862912012-08-01 11:32:48 -0700384static int reserve_memory_type(const char *mem_name,
385 struct memtype_reserve *reserve_table,
386 int size)
387{
388 int ret = msm_get_memory_type_from_name(mem_name);
389
390 if (ret >= 0) {
391 reserve_table[ret].size += size;
392 ret = 0;
393 }
394 return ret;
395}
396
Laura Abbottd8d0f772012-07-10 10:27:06 -0700397static int check_for_compat(unsigned long node)
398{
399 char **start = __compat_exports_start;
400
401 for ( ; start < __compat_exports_end; start++)
402 if (of_flat_dt_is_compatible(node, *start))
403 return 1;
404
405 return 0;
406}
407
408int __init dt_scan_for_memory_reserve(unsigned long node, const char *uname,
409 int depth, void *data)
410{
411 char *memory_name_prop;
412 unsigned int *memory_remove_prop;
413 unsigned long memory_name_prop_length;
414 unsigned long memory_remove_prop_length;
415 unsigned long memory_size_prop_length;
416 unsigned int *memory_size_prop;
417 unsigned int memory_size;
418 unsigned int memory_start;
419 int ret;
420
421 memory_name_prop = of_get_flat_dt_prop(node,
422 "qcom,memory-reservation-type",
423 &memory_name_prop_length);
424 memory_remove_prop = of_get_flat_dt_prop(node,
425 "qcom,memblock-remove",
426 &memory_remove_prop_length);
427
428 if (memory_name_prop || memory_remove_prop) {
429 if (!check_for_compat(node))
430 goto out;
431 } else {
432 goto out;
433 }
434
435 if (memory_name_prop) {
436 if (strnlen(memory_name_prop, memory_name_prop_length) == 0) {
437 WARN(1, "Memory name was malformed\n");
438 goto mem_remove;
439 }
440
441 memory_size_prop = of_get_flat_dt_prop(node,
442 "qcom,memory-reservation-size",
443 &memory_size_prop_length);
444
445 if (memory_size_prop &&
446 (memory_size_prop_length == sizeof(unsigned int))) {
447 memory_size = be32_to_cpu(*memory_size_prop);
448
449 if (reserve_memory_type(memory_name_prop,
450 data, memory_size) == 0)
451 pr_info("%s reserved %s size %x\n",
452 uname, memory_name_prop, memory_size);
453 else
454 WARN(1, "Node %s reserve failed\n",
455 uname);
456 } else {
457 WARN(1, "Node %s specified bad/nonexistent size\n",
458 uname);
459 }
460 }
461
462mem_remove:
463
464 if (memory_remove_prop) {
465 if (memory_remove_prop_length != (2*sizeof(unsigned int))) {
466 WARN(1, "Memory remove malformed\n");
467 goto out;
468 }
469
470 memory_start = be32_to_cpu(memory_remove_prop[0]);
471 memory_size = be32_to_cpu(memory_remove_prop[1]);
472
473 ret = memblock_remove(memory_start, memory_size);
474 if (ret)
475 WARN(1, "Failed to remove memory %x-%x\n",
476 memory_start, memory_start+memory_size);
477 else
478 pr_info("Node %s removed memory %x-%x\n", uname,
479 memory_start, memory_start+memory_size);
480 }
481
482out:
483 return 0;
484}
Chintan Pandya4961de62012-08-23 17:14:32 +0530485
486unsigned long get_ddr_size(void)
487{
488 unsigned int i;
489 unsigned long ret = 0;
490
491 for (i = 0; i < meminfo.nr_banks; i++)
492 ret += meminfo.bank[i].size;
493
494 return ret;
495}