blob: 40845d78c2e7e9837de222742c39b8544dadebf9 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/memory.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Larry Bassel48e4f5f2012-02-14 13:54:12 -08004 * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/mm.h>
18#include <linux/mm_types.h>
19#include <linux/bootmem.h>
20#include <linux/module.h>
21#include <linux/memory_alloc.h>
22#include <linux/memblock.h>
23#include <asm/pgtable.h>
24#include <asm/io.h>
25#include <asm/mach/map.h>
26#include <asm/cacheflush.h>
27#include <asm/setup.h>
28#include <asm/mach-types.h>
29#include <mach/msm_memtypes.h>
30#include <linux/hardirq.h>
31#if defined(CONFIG_MSM_NPA_REMOTE)
32#include "npa_remote.h"
33#include <linux/completion.h>
34#include <linux/err.h>
35#endif
36#include <linux/android_pmem.h>
37#include <mach/msm_iomap.h>
38#include <mach/socinfo.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070039#include <linux/sched.h>
40
41/* fixme */
42#include <asm/tlbflush.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070043#include <../../mm/mm.h>
Laura Abbottf637aff2011-12-14 14:16:17 -080044#include <linux/fmem.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046void *strongly_ordered_page;
47char strongly_ordered_mem[PAGE_SIZE*2-4];
48
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049void map_page_strongly_ordered(void)
50{
51#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
52 long unsigned int phys;
53 struct map_desc map;
54
55 if (strongly_ordered_page)
56 return;
57
58 strongly_ordered_page = (void*)PFN_ALIGN((int)&strongly_ordered_mem);
59 phys = __pa(strongly_ordered_page);
60
61 map.pfn = __phys_to_pfn(phys);
62 map.virtual = MSM_STRONGLY_ORDERED_PAGE;
63 map.length = PAGE_SIZE;
64 map.type = MT_DEVICE_STRONGLY_ORDERED;
65 create_mapping(&map);
66
67 printk(KERN_ALERT "Initialized strongly ordered page successfully\n");
68#endif
69}
70EXPORT_SYMBOL(map_page_strongly_ordered);
71
72void write_to_strongly_ordered_memory(void)
73{
74#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
75 if (!strongly_ordered_page) {
76 if (!in_interrupt())
77 map_page_strongly_ordered();
78 else {
79 printk(KERN_ALERT "Cannot map strongly ordered page in "
80 "Interrupt Context\n");
81 /* capture it here before the allocation fails later */
82 BUG();
83 }
84 }
85 *(int *)MSM_STRONGLY_ORDERED_PAGE = 0;
86#endif
87}
88EXPORT_SYMBOL(write_to_strongly_ordered_memory);
89
Olav Haugan29bb4d52012-05-30 12:57:53 -070090/* These cache related routines make the assumption (if outer cache is
91 * available) that the associated physical memory is contiguous.
92 * They will operate on all (L1 and L2 if present) caches.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070093 */
94void clean_and_invalidate_caches(unsigned long vstart,
95 unsigned long length, unsigned long pstart)
96{
Olav Haugan29bb4d52012-05-30 12:57:53 -070097 dmac_flush_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070098 outer_flush_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070099}
100
101void clean_caches(unsigned long vstart,
102 unsigned long length, unsigned long pstart)
103{
Olav Haugan29bb4d52012-05-30 12:57:53 -0700104 dmac_clean_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700105 outer_clean_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700106}
107
108void invalidate_caches(unsigned long vstart,
109 unsigned long length, unsigned long pstart)
110{
Olav Haugan29bb4d52012-05-30 12:57:53 -0700111 dmac_inv_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700112 outer_inv_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700113}
114
Stephen Boyd50ca18e2012-02-21 01:26:00 -0800115void * __init alloc_bootmem_aligned(unsigned long size, unsigned long alignment)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700116{
117 void *unused_addr = NULL;
118 unsigned long addr, tmp_size, unused_size;
119
120 /* Allocate maximum size needed, see where it ends up.
121 * Then free it -- in this path there are no other allocators
122 * so we can depend on getting the same address back
123 * when we allocate a smaller piece that is aligned
124 * at the end (if necessary) and the piece we really want,
125 * then free the unused first piece.
126 */
127
128 tmp_size = size + alignment - PAGE_SIZE;
129 addr = (unsigned long)alloc_bootmem(tmp_size);
130 free_bootmem(__pa(addr), tmp_size);
131
132 unused_size = alignment - (addr % alignment);
133 if (unused_size)
134 unused_addr = alloc_bootmem(unused_size);
135
136 addr = (unsigned long)alloc_bootmem(size);
137 if (unused_size)
138 free_bootmem(__pa(unused_addr), unused_size);
139
140 return (void *)addr;
141}
142
Larry Bassela4414b12011-08-04 11:11:02 -0700143int (*change_memory_power)(u64, u64, int);
Larry Bassela7eadea2011-07-14 10:46:00 -0700144
Larry Bassela4414b12011-08-04 11:11:02 -0700145int platform_physical_remove_pages(u64 start, u64 size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700146{
Larry Bassela7eadea2011-07-14 10:46:00 -0700147 if (!change_memory_power)
148 return 0;
Larry Bassela4414b12011-08-04 11:11:02 -0700149 return change_memory_power(start, size, MEMORY_DEEP_POWERDOWN);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700150}
151
Larry Bassela4414b12011-08-04 11:11:02 -0700152int platform_physical_active_pages(u64 start, u64 size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700153{
Larry Bassela7eadea2011-07-14 10:46:00 -0700154 if (!change_memory_power)
155 return 0;
Larry Bassela4414b12011-08-04 11:11:02 -0700156 return change_memory_power(start, size, MEMORY_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157}
158
Larry Bassela4414b12011-08-04 11:11:02 -0700159int platform_physical_low_power_pages(u64 start, u64 size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700160{
Larry Bassela7eadea2011-07-14 10:46:00 -0700161 if (!change_memory_power)
162 return 0;
Larry Bassela4414b12011-08-04 11:11:02 -0700163 return change_memory_power(start, size, MEMORY_SELF_REFRESH);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700164}
165
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700166char *memtype_name[] = {
167 "SMI_KERNEL",
168 "SMI",
169 "EBI0",
170 "EBI1"
171};
172
173struct reserve_info *reserve_info;
174
Larry Bassel7fb0b252011-07-22 14:18:50 -0700175static unsigned long stable_size(struct membank *mb,
176 unsigned long unstable_limit)
177{
Olav Haugan5b633022011-10-28 13:26:31 -0700178 unsigned long upper_limit = mb->start + mb->size;
179
180 if (!unstable_limit)
Larry Bassel7fb0b252011-07-22 14:18:50 -0700181 return mb->size;
Olav Haugan5b633022011-10-28 13:26:31 -0700182
183 /* Check for 32 bit roll-over */
184 if (upper_limit >= mb->start) {
185 /* If we didn't roll over we can safely make the check below */
186 if (upper_limit <= unstable_limit)
187 return mb->size;
188 }
189
Larry Bassel7fb0b252011-07-22 14:18:50 -0700190 if (mb->start >= unstable_limit)
191 return 0;
192 return unstable_limit - mb->start;
193}
194
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800195/* stable size of all memory banks contiguous to and below this one */
196static unsigned long total_stable_size(unsigned long bank)
197{
198 int i;
199 struct membank *mb = &meminfo.bank[bank];
200 int memtype = reserve_info->paddr_to_memtype(mb->start);
201 unsigned long size;
202
203 size = stable_size(mb, reserve_info->low_unstable_address);
204 for (i = bank - 1, mb = &meminfo.bank[bank - 1]; i >= 0; i--, mb--) {
205 if (mb->start + mb->size != (mb + 1)->start)
206 break;
207 if (reserve_info->paddr_to_memtype(mb->start) != memtype)
208 break;
209 size += stable_size(mb, reserve_info->low_unstable_address);
210 }
211 return size;
212}
213
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700214static void __init calculate_reserve_limits(void)
215{
216 int i;
217 struct membank *mb;
218 int memtype;
219 struct memtype_reserve *mt;
Larry Bassel7fb0b252011-07-22 14:18:50 -0700220 unsigned long size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700221
222 for (i = 0, mb = &meminfo.bank[0]; i < meminfo.nr_banks; i++, mb++) {
223 memtype = reserve_info->paddr_to_memtype(mb->start);
224 if (memtype == MEMTYPE_NONE) {
225 pr_warning("unknown memory type for bank at %lx\n",
226 (long unsigned int)mb->start);
227 continue;
228 }
229 mt = &reserve_info->memtype_reserve_table[memtype];
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800230 size = total_stable_size(i);
Larry Bassel7fb0b252011-07-22 14:18:50 -0700231 mt->limit = max(mt->limit, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700232 }
233}
234
235static void __init adjust_reserve_sizes(void)
236{
237 int i;
238 struct memtype_reserve *mt;
239
240 mt = &reserve_info->memtype_reserve_table[0];
241 for (i = 0; i < MEMTYPE_MAX; i++, mt++) {
242 if (mt->flags & MEMTYPE_FLAGS_1M_ALIGN)
243 mt->size = (mt->size + SECTION_SIZE - 1) & SECTION_MASK;
244 if (mt->size > mt->limit) {
245 pr_warning("%lx size for %s too large, setting to %lx\n",
246 mt->size, memtype_name[i], mt->limit);
247 mt->size = mt->limit;
248 }
249 }
250}
251
252static void __init reserve_memory_for_mempools(void)
253{
254 int i, memtype, membank_type;
255 struct memtype_reserve *mt;
256 struct membank *mb;
257 int ret;
Larry Bassel7fb0b252011-07-22 14:18:50 -0700258 unsigned long size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700259
260 mt = &reserve_info->memtype_reserve_table[0];
261 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
262 if (mt->flags & MEMTYPE_FLAGS_FIXED || !mt->size)
263 continue;
264
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800265 /* We know we will find memory bank(s) of the proper size
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266 * as we have limited the size of the memory pool for
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800267 * each memory type to the largest total size of the memory
268 * banks which are contiguous and of the correct memory type.
269 * Choose the memory bank with the highest physical
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700270 * address which is large enough, so that we will not
271 * take memory from the lowest memory bank which the kernel
272 * is in (and cause boot problems) and so that we might
273 * be able to steal memory that would otherwise become
Larry Bassel7fb0b252011-07-22 14:18:50 -0700274 * highmem. However, do not use unstable memory.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700275 */
276 for (i = meminfo.nr_banks - 1; i >= 0; i--) {
277 mb = &meminfo.bank[i];
278 membank_type =
279 reserve_info->paddr_to_memtype(mb->start);
280 if (memtype != membank_type)
281 continue;
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800282 size = total_stable_size(i);
Larry Bassel7fb0b252011-07-22 14:18:50 -0700283 if (size >= mt->size) {
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800284 size = stable_size(mb,
285 reserve_info->low_unstable_address);
Larry Bassel4d4f4482012-04-04 11:26:09 -0700286 if (!size)
287 continue;
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800288 /* mt->size may be larger than size, all this
289 * means is that we are carving the memory pool
290 * out of multiple contiguous memory banks.
291 */
Olav Haugan5b633022011-10-28 13:26:31 -0700292 mt->start = mb->start + (size - mt->size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700293 ret = memblock_remove(mt->start, mt->size);
294 BUG_ON(ret);
295 break;
296 }
297 }
298 }
299}
300
301static void __init initialize_mempools(void)
302{
303 struct mem_pool *mpool;
304 int memtype;
305 struct memtype_reserve *mt;
306
307 mt = &reserve_info->memtype_reserve_table[0];
308 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
309 if (!mt->size)
310 continue;
311 mpool = initialize_memory_pool(mt->start, mt->size, memtype);
312 if (!mpool)
313 pr_warning("failed to create %s mempool\n",
314 memtype_name[memtype]);
315 }
316}
317
Larry Bassel4d4f4482012-04-04 11:26:09 -0700318#define MAX_FIXED_AREA_SIZE 0x11000000
319
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700320void __init msm_reserve(void)
321{
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700322 unsigned long msm_fixed_area_size;
323 unsigned long msm_fixed_area_start;
324
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700325 memory_pool_init();
326 reserve_info->calculate_reserve_sizes();
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700327
328 msm_fixed_area_size = reserve_info->fixed_area_size;
329 msm_fixed_area_start = reserve_info->fixed_area_start;
330 if (msm_fixed_area_size)
Larry Bassel4d4f4482012-04-04 11:26:09 -0700331 if (msm_fixed_area_start > reserve_info->low_unstable_address
332 - MAX_FIXED_AREA_SIZE)
333 reserve_info->low_unstable_address =
334 msm_fixed_area_start;
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700335
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700336 calculate_reserve_limits();
337 adjust_reserve_sizes();
338 reserve_memory_for_mempools();
339 initialize_mempools();
340}
341
342static int get_ebi_memtype(void)
343{
344 /* on 7x30 and 8x55 "EBI1 kernel PMEM" is really on EBI0 */
345 if (cpu_is_msm7x30() || cpu_is_msm8x55())
346 return MEMTYPE_EBI0;
347 return MEMTYPE_EBI1;
348}
349
350void *allocate_contiguous_ebi(unsigned long size,
351 unsigned long align, int cached)
352{
353 return allocate_contiguous_memory(size, get_ebi_memtype(),
354 align, cached);
355}
356EXPORT_SYMBOL(allocate_contiguous_ebi);
357
358unsigned long allocate_contiguous_ebi_nomap(unsigned long size,
359 unsigned long align)
360{
Jordan Crouse8c78b132011-05-26 10:27:47 -0600361 return _allocate_contiguous_memory_nomap(size, get_ebi_memtype(),
362 align, __builtin_return_address(0));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700363}
364EXPORT_SYMBOL(allocate_contiguous_ebi_nomap);
365
366/* emulation of the deprecated pmem_kalloc and pmem_kfree */
367int32_t pmem_kalloc(const size_t size, const uint32_t flags)
368{
369 int pmem_memtype;
370 int memtype = MEMTYPE_NONE;
371 int ebi1_memtype = MEMTYPE_EBI1;
372 unsigned int align;
373 int32_t paddr;
374
375 switch (flags & PMEM_ALIGNMENT_MASK) {
376 case PMEM_ALIGNMENT_4K:
377 align = SZ_4K;
378 break;
379 case PMEM_ALIGNMENT_1M:
380 align = SZ_1M;
381 break;
382 default:
383 pr_alert("Invalid alignment %x\n",
384 (flags & PMEM_ALIGNMENT_MASK));
385 return -EINVAL;
386 }
387
388 /* on 7x30 and 8x55 "EBI1 kernel PMEM" is really on EBI0 */
389 if (cpu_is_msm7x30() || cpu_is_msm8x55())
390 ebi1_memtype = MEMTYPE_EBI0;
391
392 pmem_memtype = flags & PMEM_MEMTYPE_MASK;
393 if (pmem_memtype == PMEM_MEMTYPE_EBI1)
394 memtype = ebi1_memtype;
395 else if (pmem_memtype == PMEM_MEMTYPE_SMI)
396 memtype = MEMTYPE_SMI_KERNEL;
397 else {
398 pr_alert("Invalid memory type %x\n",
399 flags & PMEM_MEMTYPE_MASK);
400 return -EINVAL;
401 }
402
Jordan Crouse8c78b132011-05-26 10:27:47 -0600403 paddr = _allocate_contiguous_memory_nomap(size, memtype, align,
404 __builtin_return_address(0));
405
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700406 if (!paddr && pmem_memtype == PMEM_MEMTYPE_SMI)
Jordan Crouse8c78b132011-05-26 10:27:47 -0600407 paddr = _allocate_contiguous_memory_nomap(size,
408 ebi1_memtype, align, __builtin_return_address(0));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700409
410 if (!paddr)
411 return -ENOMEM;
412 return paddr;
413}
414EXPORT_SYMBOL(pmem_kalloc);
415
416int pmem_kfree(const int32_t physaddr)
417{
418 free_contiguous_memory_by_paddr(physaddr);
419
420 return 0;
421}
422EXPORT_SYMBOL(pmem_kfree);
Vikram Mulukutlac39c6092011-07-29 18:36:35 -0700423
424unsigned int msm_ttbr0;
425
426void store_ttbr0(void)
427{
428 /* Store TTBR0 for post-mortem debugging purposes. */
429 asm("mrc p15, 0, %0, c2, c0, 0\n"
430 : "=r" (msm_ttbr0));
431}
Laura Abbottf637aff2011-12-14 14:16:17 -0800432
433int request_fmem_c_region(void *unused)
434{
435 return fmem_set_state(FMEM_C_STATE);
436}
437
438int release_fmem_c_region(void *unused)
439{
440 return fmem_set_state(FMEM_T_STATE);
441}