blob: 8898585ea81baab97b8c57440c418e415ad21095 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/memory.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Larry Bassel48e4f5f2012-02-14 13:54:12 -08004 * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/mm.h>
18#include <linux/mm_types.h>
19#include <linux/bootmem.h>
20#include <linux/module.h>
21#include <linux/memory_alloc.h>
22#include <linux/memblock.h>
23#include <asm/pgtable.h>
24#include <asm/io.h>
25#include <asm/mach/map.h>
26#include <asm/cacheflush.h>
27#include <asm/setup.h>
28#include <asm/mach-types.h>
29#include <mach/msm_memtypes.h>
30#include <linux/hardirq.h>
31#if defined(CONFIG_MSM_NPA_REMOTE)
32#include "npa_remote.h"
33#include <linux/completion.h>
34#include <linux/err.h>
35#endif
36#include <linux/android_pmem.h>
37#include <mach/msm_iomap.h>
38#include <mach/socinfo.h>
39#include <../../mm/mm.h>
Laura Abbottf637aff2011-12-14 14:16:17 -080040#include <linux/fmem.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070042void *strongly_ordered_page;
43char strongly_ordered_mem[PAGE_SIZE*2-4];
44
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045void map_page_strongly_ordered(void)
46{
47#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
48 long unsigned int phys;
49 struct map_desc map;
50
51 if (strongly_ordered_page)
52 return;
53
54 strongly_ordered_page = (void*)PFN_ALIGN((int)&strongly_ordered_mem);
55 phys = __pa(strongly_ordered_page);
56
57 map.pfn = __phys_to_pfn(phys);
58 map.virtual = MSM_STRONGLY_ORDERED_PAGE;
59 map.length = PAGE_SIZE;
60 map.type = MT_DEVICE_STRONGLY_ORDERED;
61 create_mapping(&map);
62
63 printk(KERN_ALERT "Initialized strongly ordered page successfully\n");
64#endif
65}
66EXPORT_SYMBOL(map_page_strongly_ordered);
67
68void write_to_strongly_ordered_memory(void)
69{
70#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
71 if (!strongly_ordered_page) {
72 if (!in_interrupt())
73 map_page_strongly_ordered();
74 else {
75 printk(KERN_ALERT "Cannot map strongly ordered page in "
76 "Interrupt Context\n");
77 /* capture it here before the allocation fails later */
78 BUG();
79 }
80 }
81 *(int *)MSM_STRONGLY_ORDERED_PAGE = 0;
82#endif
83}
84EXPORT_SYMBOL(write_to_strongly_ordered_memory);
85
Olav Haugan29bb4d52012-05-30 12:57:53 -070086/* These cache related routines make the assumption (if outer cache is
87 * available) that the associated physical memory is contiguous.
88 * They will operate on all (L1 and L2 if present) caches.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070089 */
90void clean_and_invalidate_caches(unsigned long vstart,
91 unsigned long length, unsigned long pstart)
92{
Olav Haugan29bb4d52012-05-30 12:57:53 -070093 dmac_flush_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070094 outer_flush_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095}
96
97void clean_caches(unsigned long vstart,
98 unsigned long length, unsigned long pstart)
99{
Olav Haugan29bb4d52012-05-30 12:57:53 -0700100 dmac_clean_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700101 outer_clean_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700102}
103
104void invalidate_caches(unsigned long vstart,
105 unsigned long length, unsigned long pstart)
106{
Olav Haugan29bb4d52012-05-30 12:57:53 -0700107 dmac_inv_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700108 outer_inv_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700109}
110
Stephen Boyd50ca18e2012-02-21 01:26:00 -0800111void * __init alloc_bootmem_aligned(unsigned long size, unsigned long alignment)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700112{
113 void *unused_addr = NULL;
114 unsigned long addr, tmp_size, unused_size;
115
116 /* Allocate maximum size needed, see where it ends up.
117 * Then free it -- in this path there are no other allocators
118 * so we can depend on getting the same address back
119 * when we allocate a smaller piece that is aligned
120 * at the end (if necessary) and the piece we really want,
121 * then free the unused first piece.
122 */
123
124 tmp_size = size + alignment - PAGE_SIZE;
125 addr = (unsigned long)alloc_bootmem(tmp_size);
126 free_bootmem(__pa(addr), tmp_size);
127
128 unused_size = alignment - (addr % alignment);
129 if (unused_size)
130 unused_addr = alloc_bootmem(unused_size);
131
132 addr = (unsigned long)alloc_bootmem(size);
133 if (unused_size)
134 free_bootmem(__pa(unused_addr), unused_size);
135
136 return (void *)addr;
137}
138
Larry Bassela4414b12011-08-04 11:11:02 -0700139int (*change_memory_power)(u64, u64, int);
Larry Bassela7eadea2011-07-14 10:46:00 -0700140
Larry Bassela4414b12011-08-04 11:11:02 -0700141int platform_physical_remove_pages(u64 start, u64 size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142{
Larry Bassela7eadea2011-07-14 10:46:00 -0700143 if (!change_memory_power)
144 return 0;
Larry Bassela4414b12011-08-04 11:11:02 -0700145 return change_memory_power(start, size, MEMORY_DEEP_POWERDOWN);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700146}
147
Larry Bassela4414b12011-08-04 11:11:02 -0700148int platform_physical_active_pages(u64 start, u64 size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700149{
Larry Bassela7eadea2011-07-14 10:46:00 -0700150 if (!change_memory_power)
151 return 0;
Larry Bassela4414b12011-08-04 11:11:02 -0700152 return change_memory_power(start, size, MEMORY_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700153}
154
Larry Bassela4414b12011-08-04 11:11:02 -0700155int platform_physical_low_power_pages(u64 start, u64 size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700156{
Larry Bassela7eadea2011-07-14 10:46:00 -0700157 if (!change_memory_power)
158 return 0;
Larry Bassela4414b12011-08-04 11:11:02 -0700159 return change_memory_power(start, size, MEMORY_SELF_REFRESH);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700160}
161
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700162char *memtype_name[] = {
163 "SMI_KERNEL",
164 "SMI",
165 "EBI0",
166 "EBI1"
167};
168
169struct reserve_info *reserve_info;
170
Larry Bassel7fb0b252011-07-22 14:18:50 -0700171static unsigned long stable_size(struct membank *mb,
172 unsigned long unstable_limit)
173{
Olav Haugan5b633022011-10-28 13:26:31 -0700174 unsigned long upper_limit = mb->start + mb->size;
175
176 if (!unstable_limit)
Larry Bassel7fb0b252011-07-22 14:18:50 -0700177 return mb->size;
Olav Haugan5b633022011-10-28 13:26:31 -0700178
179 /* Check for 32 bit roll-over */
180 if (upper_limit >= mb->start) {
181 /* If we didn't roll over we can safely make the check below */
182 if (upper_limit <= unstable_limit)
183 return mb->size;
184 }
185
Larry Bassel7fb0b252011-07-22 14:18:50 -0700186 if (mb->start >= unstable_limit)
187 return 0;
188 return unstable_limit - mb->start;
189}
190
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800191/* stable size of all memory banks contiguous to and below this one */
192static unsigned long total_stable_size(unsigned long bank)
193{
194 int i;
195 struct membank *mb = &meminfo.bank[bank];
196 int memtype = reserve_info->paddr_to_memtype(mb->start);
197 unsigned long size;
198
199 size = stable_size(mb, reserve_info->low_unstable_address);
200 for (i = bank - 1, mb = &meminfo.bank[bank - 1]; i >= 0; i--, mb--) {
201 if (mb->start + mb->size != (mb + 1)->start)
202 break;
203 if (reserve_info->paddr_to_memtype(mb->start) != memtype)
204 break;
205 size += stable_size(mb, reserve_info->low_unstable_address);
206 }
207 return size;
208}
209
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700210static void __init calculate_reserve_limits(void)
211{
212 int i;
213 struct membank *mb;
214 int memtype;
215 struct memtype_reserve *mt;
Larry Bassel7fb0b252011-07-22 14:18:50 -0700216 unsigned long size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700217
218 for (i = 0, mb = &meminfo.bank[0]; i < meminfo.nr_banks; i++, mb++) {
219 memtype = reserve_info->paddr_to_memtype(mb->start);
220 if (memtype == MEMTYPE_NONE) {
221 pr_warning("unknown memory type for bank at %lx\n",
222 (long unsigned int)mb->start);
223 continue;
224 }
225 mt = &reserve_info->memtype_reserve_table[memtype];
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800226 size = total_stable_size(i);
Larry Bassel7fb0b252011-07-22 14:18:50 -0700227 mt->limit = max(mt->limit, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700228 }
229}
230
231static void __init adjust_reserve_sizes(void)
232{
233 int i;
234 struct memtype_reserve *mt;
235
236 mt = &reserve_info->memtype_reserve_table[0];
237 for (i = 0; i < MEMTYPE_MAX; i++, mt++) {
238 if (mt->flags & MEMTYPE_FLAGS_1M_ALIGN)
239 mt->size = (mt->size + SECTION_SIZE - 1) & SECTION_MASK;
240 if (mt->size > mt->limit) {
241 pr_warning("%lx size for %s too large, setting to %lx\n",
242 mt->size, memtype_name[i], mt->limit);
243 mt->size = mt->limit;
244 }
245 }
246}
247
248static void __init reserve_memory_for_mempools(void)
249{
250 int i, memtype, membank_type;
251 struct memtype_reserve *mt;
252 struct membank *mb;
253 int ret;
Larry Bassel7fb0b252011-07-22 14:18:50 -0700254 unsigned long size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700255
256 mt = &reserve_info->memtype_reserve_table[0];
257 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
258 if (mt->flags & MEMTYPE_FLAGS_FIXED || !mt->size)
259 continue;
260
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800261 /* We know we will find memory bank(s) of the proper size
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262 * as we have limited the size of the memory pool for
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800263 * each memory type to the largest total size of the memory
264 * banks which are contiguous and of the correct memory type.
265 * Choose the memory bank with the highest physical
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266 * address which is large enough, so that we will not
267 * take memory from the lowest memory bank which the kernel
268 * is in (and cause boot problems) and so that we might
269 * be able to steal memory that would otherwise become
Larry Bassel7fb0b252011-07-22 14:18:50 -0700270 * highmem. However, do not use unstable memory.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271 */
272 for (i = meminfo.nr_banks - 1; i >= 0; i--) {
273 mb = &meminfo.bank[i];
274 membank_type =
275 reserve_info->paddr_to_memtype(mb->start);
276 if (memtype != membank_type)
277 continue;
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800278 size = total_stable_size(i);
Larry Bassel7fb0b252011-07-22 14:18:50 -0700279 if (size >= mt->size) {
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800280 size = stable_size(mb,
281 reserve_info->low_unstable_address);
Larry Bassel4d4f4482012-04-04 11:26:09 -0700282 if (!size)
283 continue;
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800284 /* mt->size may be larger than size, all this
285 * means is that we are carving the memory pool
286 * out of multiple contiguous memory banks.
287 */
Olav Haugan5b633022011-10-28 13:26:31 -0700288 mt->start = mb->start + (size - mt->size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700289 ret = memblock_remove(mt->start, mt->size);
290 BUG_ON(ret);
291 break;
292 }
293 }
294 }
295}
296
297static void __init initialize_mempools(void)
298{
299 struct mem_pool *mpool;
300 int memtype;
301 struct memtype_reserve *mt;
302
303 mt = &reserve_info->memtype_reserve_table[0];
304 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
305 if (!mt->size)
306 continue;
307 mpool = initialize_memory_pool(mt->start, mt->size, memtype);
308 if (!mpool)
309 pr_warning("failed to create %s mempool\n",
310 memtype_name[memtype]);
311 }
312}
313
Larry Bassel4d4f4482012-04-04 11:26:09 -0700314#define MAX_FIXED_AREA_SIZE 0x11000000
315
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700316void __init msm_reserve(void)
317{
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700318 unsigned long msm_fixed_area_size;
319 unsigned long msm_fixed_area_start;
320
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700321 memory_pool_init();
322 reserve_info->calculate_reserve_sizes();
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700323
324 msm_fixed_area_size = reserve_info->fixed_area_size;
325 msm_fixed_area_start = reserve_info->fixed_area_start;
326 if (msm_fixed_area_size)
Larry Bassel4d4f4482012-04-04 11:26:09 -0700327 if (msm_fixed_area_start > reserve_info->low_unstable_address
328 - MAX_FIXED_AREA_SIZE)
329 reserve_info->low_unstable_address =
330 msm_fixed_area_start;
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700331
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700332 calculate_reserve_limits();
333 adjust_reserve_sizes();
334 reserve_memory_for_mempools();
335 initialize_mempools();
336}
337
338static int get_ebi_memtype(void)
339{
340 /* on 7x30 and 8x55 "EBI1 kernel PMEM" is really on EBI0 */
341 if (cpu_is_msm7x30() || cpu_is_msm8x55())
342 return MEMTYPE_EBI0;
343 return MEMTYPE_EBI1;
344}
345
346void *allocate_contiguous_ebi(unsigned long size,
347 unsigned long align, int cached)
348{
349 return allocate_contiguous_memory(size, get_ebi_memtype(),
350 align, cached);
351}
352EXPORT_SYMBOL(allocate_contiguous_ebi);
353
354unsigned long allocate_contiguous_ebi_nomap(unsigned long size,
355 unsigned long align)
356{
Jordan Crouse8c78b132011-05-26 10:27:47 -0600357 return _allocate_contiguous_memory_nomap(size, get_ebi_memtype(),
358 align, __builtin_return_address(0));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700359}
360EXPORT_SYMBOL(allocate_contiguous_ebi_nomap);
361
362/* emulation of the deprecated pmem_kalloc and pmem_kfree */
363int32_t pmem_kalloc(const size_t size, const uint32_t flags)
364{
365 int pmem_memtype;
366 int memtype = MEMTYPE_NONE;
367 int ebi1_memtype = MEMTYPE_EBI1;
368 unsigned int align;
369 int32_t paddr;
370
371 switch (flags & PMEM_ALIGNMENT_MASK) {
372 case PMEM_ALIGNMENT_4K:
373 align = SZ_4K;
374 break;
375 case PMEM_ALIGNMENT_1M:
376 align = SZ_1M;
377 break;
378 default:
379 pr_alert("Invalid alignment %x\n",
380 (flags & PMEM_ALIGNMENT_MASK));
381 return -EINVAL;
382 }
383
384 /* on 7x30 and 8x55 "EBI1 kernel PMEM" is really on EBI0 */
385 if (cpu_is_msm7x30() || cpu_is_msm8x55())
386 ebi1_memtype = MEMTYPE_EBI0;
387
388 pmem_memtype = flags & PMEM_MEMTYPE_MASK;
389 if (pmem_memtype == PMEM_MEMTYPE_EBI1)
390 memtype = ebi1_memtype;
391 else if (pmem_memtype == PMEM_MEMTYPE_SMI)
392 memtype = MEMTYPE_SMI_KERNEL;
393 else {
394 pr_alert("Invalid memory type %x\n",
395 flags & PMEM_MEMTYPE_MASK);
396 return -EINVAL;
397 }
398
Jordan Crouse8c78b132011-05-26 10:27:47 -0600399 paddr = _allocate_contiguous_memory_nomap(size, memtype, align,
400 __builtin_return_address(0));
401
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700402 if (!paddr && pmem_memtype == PMEM_MEMTYPE_SMI)
Jordan Crouse8c78b132011-05-26 10:27:47 -0600403 paddr = _allocate_contiguous_memory_nomap(size,
404 ebi1_memtype, align, __builtin_return_address(0));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700405
406 if (!paddr)
407 return -ENOMEM;
408 return paddr;
409}
410EXPORT_SYMBOL(pmem_kalloc);
411
412int pmem_kfree(const int32_t physaddr)
413{
414 free_contiguous_memory_by_paddr(physaddr);
415
416 return 0;
417}
418EXPORT_SYMBOL(pmem_kfree);
Vikram Mulukutlac39c6092011-07-29 18:36:35 -0700419
420unsigned int msm_ttbr0;
421
422void store_ttbr0(void)
423{
424 /* Store TTBR0 for post-mortem debugging purposes. */
425 asm("mrc p15, 0, %0, c2, c0, 0\n"
426 : "=r" (msm_ttbr0));
427}
Laura Abbottf637aff2011-12-14 14:16:17 -0800428
429int request_fmem_c_region(void *unused)
430{
431 return fmem_set_state(FMEM_C_STATE);
432}
433
434int release_fmem_c_region(void *unused)
435{
436 return fmem_set_state(FMEM_T_STATE);
437}