blob: 33db3fa48bf9723c39d55335219325e2691dc507 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/memory.c
2 *
3 * Copyright (C) 2007 Google, Inc.
4 * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/mm.h>
18#include <linux/mm_types.h>
19#include <linux/bootmem.h>
20#include <linux/module.h>
21#include <linux/memory_alloc.h>
22#include <linux/memblock.h>
23#include <asm/pgtable.h>
24#include <asm/io.h>
25#include <asm/mach/map.h>
26#include <asm/cacheflush.h>
27#include <asm/setup.h>
28#include <asm/mach-types.h>
29#include <mach/msm_memtypes.h>
30#include <linux/hardirq.h>
31#if defined(CONFIG_MSM_NPA_REMOTE)
32#include "npa_remote.h"
33#include <linux/completion.h>
34#include <linux/err.h>
35#endif
36#include <linux/android_pmem.h>
37#include <mach/msm_iomap.h>
38#include <mach/socinfo.h>
39#include <../../mm/mm.h>
40
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041void *strongly_ordered_page;
42char strongly_ordered_mem[PAGE_SIZE*2-4];
43
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044void map_page_strongly_ordered(void)
45{
46#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
47 long unsigned int phys;
48 struct map_desc map;
49
50 if (strongly_ordered_page)
51 return;
52
53 strongly_ordered_page = (void*)PFN_ALIGN((int)&strongly_ordered_mem);
54 phys = __pa(strongly_ordered_page);
55
56 map.pfn = __phys_to_pfn(phys);
57 map.virtual = MSM_STRONGLY_ORDERED_PAGE;
58 map.length = PAGE_SIZE;
59 map.type = MT_DEVICE_STRONGLY_ORDERED;
60 create_mapping(&map);
61
62 printk(KERN_ALERT "Initialized strongly ordered page successfully\n");
63#endif
64}
65EXPORT_SYMBOL(map_page_strongly_ordered);
66
67void write_to_strongly_ordered_memory(void)
68{
69#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
70 if (!strongly_ordered_page) {
71 if (!in_interrupt())
72 map_page_strongly_ordered();
73 else {
74 printk(KERN_ALERT "Cannot map strongly ordered page in "
75 "Interrupt Context\n");
76 /* capture it here before the allocation fails later */
77 BUG();
78 }
79 }
80 *(int *)MSM_STRONGLY_ORDERED_PAGE = 0;
81#endif
82}
83EXPORT_SYMBOL(write_to_strongly_ordered_memory);
84
85void flush_axi_bus_buffer(void)
86{
87#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
88 __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
89 : : "r" (0) : "memory");
90 write_to_strongly_ordered_memory();
91#endif
92}
93
94#define CACHE_LINE_SIZE 32
95
96/* These cache related routines make the assumption that the associated
97 * physical memory is contiguous. They will operate on all (L1
98 * and L2 if present) caches.
99 */
100void clean_and_invalidate_caches(unsigned long vstart,
101 unsigned long length, unsigned long pstart)
102{
103 unsigned long vaddr;
104
105 for (vaddr = vstart; vaddr < vstart + length; vaddr += CACHE_LINE_SIZE)
106 asm ("mcr p15, 0, %0, c7, c14, 1" : : "r" (vaddr));
107#ifdef CONFIG_OUTER_CACHE
108 outer_flush_range(pstart, pstart + length);
109#endif
110 asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
111 asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
112
113 flush_axi_bus_buffer();
114}
115
116void clean_caches(unsigned long vstart,
117 unsigned long length, unsigned long pstart)
118{
119 unsigned long vaddr;
120
121 for (vaddr = vstart; vaddr < vstart + length; vaddr += CACHE_LINE_SIZE)
122 asm ("mcr p15, 0, %0, c7, c10, 1" : : "r" (vaddr));
123#ifdef CONFIG_OUTER_CACHE
124 outer_clean_range(pstart, pstart + length);
125#endif
126 asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
127 asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
128
129 flush_axi_bus_buffer();
130}
131
132void invalidate_caches(unsigned long vstart,
133 unsigned long length, unsigned long pstart)
134{
135 unsigned long vaddr;
136
137 for (vaddr = vstart; vaddr < vstart + length; vaddr += CACHE_LINE_SIZE)
138 asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (vaddr));
139#ifdef CONFIG_OUTER_CACHE
140 outer_inv_range(pstart, pstart + length);
141#endif
142 asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
143 asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
144
145 flush_axi_bus_buffer();
146}
147
148void *alloc_bootmem_aligned(unsigned long size, unsigned long alignment)
149{
150 void *unused_addr = NULL;
151 unsigned long addr, tmp_size, unused_size;
152
153 /* Allocate maximum size needed, see where it ends up.
154 * Then free it -- in this path there are no other allocators
155 * so we can depend on getting the same address back
156 * when we allocate a smaller piece that is aligned
157 * at the end (if necessary) and the piece we really want,
158 * then free the unused first piece.
159 */
160
161 tmp_size = size + alignment - PAGE_SIZE;
162 addr = (unsigned long)alloc_bootmem(tmp_size);
163 free_bootmem(__pa(addr), tmp_size);
164
165 unused_size = alignment - (addr % alignment);
166 if (unused_size)
167 unused_addr = alloc_bootmem(unused_size);
168
169 addr = (unsigned long)alloc_bootmem(size);
170 if (unused_size)
171 free_bootmem(__pa(unused_addr), unused_size);
172
173 return (void *)addr;
174}
175
Larry Bassela4414b12011-08-04 11:11:02 -0700176int (*change_memory_power)(u64, u64, int);
Larry Bassela7eadea2011-07-14 10:46:00 -0700177
Larry Bassela4414b12011-08-04 11:11:02 -0700178int platform_physical_remove_pages(u64 start, u64 size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700179{
Larry Bassela7eadea2011-07-14 10:46:00 -0700180 if (!change_memory_power)
181 return 0;
Larry Bassela4414b12011-08-04 11:11:02 -0700182 return change_memory_power(start, size, MEMORY_DEEP_POWERDOWN);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183}
184
Larry Bassela4414b12011-08-04 11:11:02 -0700185int platform_physical_active_pages(u64 start, u64 size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700186{
Larry Bassela7eadea2011-07-14 10:46:00 -0700187 if (!change_memory_power)
188 return 0;
Larry Bassela4414b12011-08-04 11:11:02 -0700189 return change_memory_power(start, size, MEMORY_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700190}
191
Larry Bassela4414b12011-08-04 11:11:02 -0700192int platform_physical_low_power_pages(u64 start, u64 size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700193{
Larry Bassela7eadea2011-07-14 10:46:00 -0700194 if (!change_memory_power)
195 return 0;
Larry Bassela4414b12011-08-04 11:11:02 -0700196 return change_memory_power(start, size, MEMORY_SELF_REFRESH);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197}
198
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199char *memtype_name[] = {
200 "SMI_KERNEL",
201 "SMI",
202 "EBI0",
203 "EBI1"
204};
205
206struct reserve_info *reserve_info;
207
Larry Bassel7fb0b252011-07-22 14:18:50 -0700208static unsigned long stable_size(struct membank *mb,
209 unsigned long unstable_limit)
210{
Olav Haugan5b633022011-10-28 13:26:31 -0700211 unsigned long upper_limit = mb->start + mb->size;
212
213 if (!unstable_limit)
Larry Bassel7fb0b252011-07-22 14:18:50 -0700214 return mb->size;
Olav Haugan5b633022011-10-28 13:26:31 -0700215
216 /* Check for 32 bit roll-over */
217 if (upper_limit >= mb->start) {
218 /* If we didn't roll over we can safely make the check below */
219 if (upper_limit <= unstable_limit)
220 return mb->size;
221 }
222
Larry Bassel7fb0b252011-07-22 14:18:50 -0700223 if (mb->start >= unstable_limit)
224 return 0;
225 return unstable_limit - mb->start;
226}
227
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228static void __init calculate_reserve_limits(void)
229{
230 int i;
231 struct membank *mb;
232 int memtype;
233 struct memtype_reserve *mt;
Larry Bassel7fb0b252011-07-22 14:18:50 -0700234 unsigned long size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700235
236 for (i = 0, mb = &meminfo.bank[0]; i < meminfo.nr_banks; i++, mb++) {
237 memtype = reserve_info->paddr_to_memtype(mb->start);
238 if (memtype == MEMTYPE_NONE) {
239 pr_warning("unknown memory type for bank at %lx\n",
240 (long unsigned int)mb->start);
241 continue;
242 }
243 mt = &reserve_info->memtype_reserve_table[memtype];
Larry Bassel7fb0b252011-07-22 14:18:50 -0700244 size = stable_size(mb, reserve_info->low_unstable_address);
245 mt->limit = max(mt->limit, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700246 }
247}
248
249static void __init adjust_reserve_sizes(void)
250{
251 int i;
252 struct memtype_reserve *mt;
253
254 mt = &reserve_info->memtype_reserve_table[0];
255 for (i = 0; i < MEMTYPE_MAX; i++, mt++) {
256 if (mt->flags & MEMTYPE_FLAGS_1M_ALIGN)
257 mt->size = (mt->size + SECTION_SIZE - 1) & SECTION_MASK;
258 if (mt->size > mt->limit) {
259 pr_warning("%lx size for %s too large, setting to %lx\n",
260 mt->size, memtype_name[i], mt->limit);
261 mt->size = mt->limit;
262 }
263 }
264}
265
266static void __init reserve_memory_for_mempools(void)
267{
268 int i, memtype, membank_type;
269 struct memtype_reserve *mt;
270 struct membank *mb;
271 int ret;
Larry Bassel7fb0b252011-07-22 14:18:50 -0700272 unsigned long size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700273
274 mt = &reserve_info->memtype_reserve_table[0];
275 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
276 if (mt->flags & MEMTYPE_FLAGS_FIXED || !mt->size)
277 continue;
278
279 /* We know we will find a memory bank of the proper size
280 * as we have limited the size of the memory pool for
281 * each memory type to the size of the largest memory
282 * bank. Choose the memory bank with the highest physical
283 * address which is large enough, so that we will not
284 * take memory from the lowest memory bank which the kernel
285 * is in (and cause boot problems) and so that we might
286 * be able to steal memory that would otherwise become
Larry Bassel7fb0b252011-07-22 14:18:50 -0700287 * highmem. However, do not use unstable memory.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700288 */
289 for (i = meminfo.nr_banks - 1; i >= 0; i--) {
290 mb = &meminfo.bank[i];
291 membank_type =
292 reserve_info->paddr_to_memtype(mb->start);
293 if (memtype != membank_type)
294 continue;
Larry Bassel7fb0b252011-07-22 14:18:50 -0700295 size = stable_size(mb,
296 reserve_info->low_unstable_address);
297 if (size >= mt->size) {
Olav Haugan5b633022011-10-28 13:26:31 -0700298 mt->start = mb->start + (size - mt->size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700299 ret = memblock_remove(mt->start, mt->size);
300 BUG_ON(ret);
301 break;
302 }
303 }
304 }
305}
306
Larry Bassel1eb58f02011-12-06 16:01:59 -0800307unsigned long __init reserve_memory_for_fmem(unsigned long fmem_size)
308{
309 struct membank *mb;
310 int ret;
311 unsigned long fmem_phys;
312
313 if (!fmem_size)
314 return 0;
315
316 mb = &meminfo.bank[meminfo.nr_banks - 1];
317 fmem_phys = mb->start + (mb->size - fmem_size);
318 ret = memblock_remove(fmem_phys, fmem_size);
319 BUG_ON(ret);
320
321 pr_info("fmem start %lx size %lx\n", fmem_phys, fmem_size);
322 return fmem_phys;
323}
324
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700325static void __init initialize_mempools(void)
326{
327 struct mem_pool *mpool;
328 int memtype;
329 struct memtype_reserve *mt;
330
331 mt = &reserve_info->memtype_reserve_table[0];
332 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
333 if (!mt->size)
334 continue;
335 mpool = initialize_memory_pool(mt->start, mt->size, memtype);
336 if (!mpool)
337 pr_warning("failed to create %s mempool\n",
338 memtype_name[memtype]);
339 }
340}
341
342void __init msm_reserve(void)
343{
344 memory_pool_init();
345 reserve_info->calculate_reserve_sizes();
346 calculate_reserve_limits();
347 adjust_reserve_sizes();
348 reserve_memory_for_mempools();
349 initialize_mempools();
350}
351
352static int get_ebi_memtype(void)
353{
354 /* on 7x30 and 8x55 "EBI1 kernel PMEM" is really on EBI0 */
355 if (cpu_is_msm7x30() || cpu_is_msm8x55())
356 return MEMTYPE_EBI0;
357 return MEMTYPE_EBI1;
358}
359
360void *allocate_contiguous_ebi(unsigned long size,
361 unsigned long align, int cached)
362{
363 return allocate_contiguous_memory(size, get_ebi_memtype(),
364 align, cached);
365}
366EXPORT_SYMBOL(allocate_contiguous_ebi);
367
368unsigned long allocate_contiguous_ebi_nomap(unsigned long size,
369 unsigned long align)
370{
Jordan Crouse8c78b132011-05-26 10:27:47 -0600371 return _allocate_contiguous_memory_nomap(size, get_ebi_memtype(),
372 align, __builtin_return_address(0));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373}
374EXPORT_SYMBOL(allocate_contiguous_ebi_nomap);
375
376/* emulation of the deprecated pmem_kalloc and pmem_kfree */
377int32_t pmem_kalloc(const size_t size, const uint32_t flags)
378{
379 int pmem_memtype;
380 int memtype = MEMTYPE_NONE;
381 int ebi1_memtype = MEMTYPE_EBI1;
382 unsigned int align;
383 int32_t paddr;
384
385 switch (flags & PMEM_ALIGNMENT_MASK) {
386 case PMEM_ALIGNMENT_4K:
387 align = SZ_4K;
388 break;
389 case PMEM_ALIGNMENT_1M:
390 align = SZ_1M;
391 break;
392 default:
393 pr_alert("Invalid alignment %x\n",
394 (flags & PMEM_ALIGNMENT_MASK));
395 return -EINVAL;
396 }
397
398 /* on 7x30 and 8x55 "EBI1 kernel PMEM" is really on EBI0 */
399 if (cpu_is_msm7x30() || cpu_is_msm8x55())
400 ebi1_memtype = MEMTYPE_EBI0;
401
402 pmem_memtype = flags & PMEM_MEMTYPE_MASK;
403 if (pmem_memtype == PMEM_MEMTYPE_EBI1)
404 memtype = ebi1_memtype;
405 else if (pmem_memtype == PMEM_MEMTYPE_SMI)
406 memtype = MEMTYPE_SMI_KERNEL;
407 else {
408 pr_alert("Invalid memory type %x\n",
409 flags & PMEM_MEMTYPE_MASK);
410 return -EINVAL;
411 }
412
Jordan Crouse8c78b132011-05-26 10:27:47 -0600413 paddr = _allocate_contiguous_memory_nomap(size, memtype, align,
414 __builtin_return_address(0));
415
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700416 if (!paddr && pmem_memtype == PMEM_MEMTYPE_SMI)
Jordan Crouse8c78b132011-05-26 10:27:47 -0600417 paddr = _allocate_contiguous_memory_nomap(size,
418 ebi1_memtype, align, __builtin_return_address(0));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700419
420 if (!paddr)
421 return -ENOMEM;
422 return paddr;
423}
424EXPORT_SYMBOL(pmem_kalloc);
425
426int pmem_kfree(const int32_t physaddr)
427{
428 free_contiguous_memory_by_paddr(physaddr);
429
430 return 0;
431}
432EXPORT_SYMBOL(pmem_kfree);
Vikram Mulukutlac39c6092011-07-29 18:36:35 -0700433
434unsigned int msm_ttbr0;
435
436void store_ttbr0(void)
437{
438 /* Store TTBR0 for post-mortem debugging purposes. */
439 asm("mrc p15, 0, %0, c2, c0, 0\n"
440 : "=r" (msm_ttbr0));
441}