blob: 25afc95692b0f47b292424f5216969eed514b33a [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/memory.c
2 *
3 * Copyright (C) 2007 Google, Inc.
4 * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/mm.h>
18#include <linux/mm_types.h>
19#include <linux/bootmem.h>
20#include <linux/module.h>
21#include <linux/memory_alloc.h>
22#include <linux/memblock.h>
23#include <asm/pgtable.h>
24#include <asm/io.h>
25#include <asm/mach/map.h>
26#include <asm/cacheflush.h>
27#include <asm/setup.h>
28#include <asm/mach-types.h>
29#include <mach/msm_memtypes.h>
30#include <linux/hardirq.h>
31#if defined(CONFIG_MSM_NPA_REMOTE)
32#include "npa_remote.h"
33#include <linux/completion.h>
34#include <linux/err.h>
35#endif
36#include <linux/android_pmem.h>
37#include <mach/msm_iomap.h>
38#include <mach/socinfo.h>
39#include <../../mm/mm.h>
40
41int arch_io_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
42 unsigned long pfn, unsigned long size, pgprot_t prot)
43{
44 unsigned long pfn_addr = pfn << PAGE_SHIFT;
45 if ((pfn_addr >= 0x88000000) && (pfn_addr < 0xD0000000)) {
46 prot = pgprot_device(prot);
47 pr_debug("remapping device %lx\n", prot);
48 }
49 return remap_pfn_range(vma, addr, pfn, size, prot);
50}
51
52void *strongly_ordered_page;
53char strongly_ordered_mem[PAGE_SIZE*2-4];
54
55/*
56 * The trick of making the zero page strongly ordered no longer
57 * works. We no longer want to make a second alias to the zero
58 * page that is strongly ordered. Manually changing the bits
59 * in the page table for the zero page would have side effects
60 * elsewhere that aren't necessary. The result is that we need
61 * to get a page from else where. Given when the first call
62 * to write_to_strongly_ordered_memory occurs, using bootmem
63 * to get a page makes the most sense.
64 */
65void map_page_strongly_ordered(void)
66{
67#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
68 long unsigned int phys;
69 struct map_desc map;
70
71 if (strongly_ordered_page)
72 return;
73
74 strongly_ordered_page = (void*)PFN_ALIGN((int)&strongly_ordered_mem);
75 phys = __pa(strongly_ordered_page);
76
77 map.pfn = __phys_to_pfn(phys);
78 map.virtual = MSM_STRONGLY_ORDERED_PAGE;
79 map.length = PAGE_SIZE;
80 map.type = MT_DEVICE_STRONGLY_ORDERED;
81 create_mapping(&map);
82
83 printk(KERN_ALERT "Initialized strongly ordered page successfully\n");
84#endif
85}
86EXPORT_SYMBOL(map_page_strongly_ordered);
87
88void write_to_strongly_ordered_memory(void)
89{
90#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
91 if (!strongly_ordered_page) {
92 if (!in_interrupt())
93 map_page_strongly_ordered();
94 else {
95 printk(KERN_ALERT "Cannot map strongly ordered page in "
96 "Interrupt Context\n");
97 /* capture it here before the allocation fails later */
98 BUG();
99 }
100 }
101 *(int *)MSM_STRONGLY_ORDERED_PAGE = 0;
102#endif
103}
104EXPORT_SYMBOL(write_to_strongly_ordered_memory);
105
106void flush_axi_bus_buffer(void)
107{
108#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
109 __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
110 : : "r" (0) : "memory");
111 write_to_strongly_ordered_memory();
112#endif
113}
114
115#define CACHE_LINE_SIZE 32
116
117/* These cache related routines make the assumption that the associated
118 * physical memory is contiguous. They will operate on all (L1
119 * and L2 if present) caches.
120 */
121void clean_and_invalidate_caches(unsigned long vstart,
122 unsigned long length, unsigned long pstart)
123{
124 unsigned long vaddr;
125
126 for (vaddr = vstart; vaddr < vstart + length; vaddr += CACHE_LINE_SIZE)
127 asm ("mcr p15, 0, %0, c7, c14, 1" : : "r" (vaddr));
128#ifdef CONFIG_OUTER_CACHE
129 outer_flush_range(pstart, pstart + length);
130#endif
131 asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
132 asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
133
134 flush_axi_bus_buffer();
135}
136
137void clean_caches(unsigned long vstart,
138 unsigned long length, unsigned long pstart)
139{
140 unsigned long vaddr;
141
142 for (vaddr = vstart; vaddr < vstart + length; vaddr += CACHE_LINE_SIZE)
143 asm ("mcr p15, 0, %0, c7, c10, 1" : : "r" (vaddr));
144#ifdef CONFIG_OUTER_CACHE
145 outer_clean_range(pstart, pstart + length);
146#endif
147 asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
148 asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
149
150 flush_axi_bus_buffer();
151}
152
153void invalidate_caches(unsigned long vstart,
154 unsigned long length, unsigned long pstart)
155{
156 unsigned long vaddr;
157
158 for (vaddr = vstart; vaddr < vstart + length; vaddr += CACHE_LINE_SIZE)
159 asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (vaddr));
160#ifdef CONFIG_OUTER_CACHE
161 outer_inv_range(pstart, pstart + length);
162#endif
163 asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
164 asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
165
166 flush_axi_bus_buffer();
167}
168
169void *alloc_bootmem_aligned(unsigned long size, unsigned long alignment)
170{
171 void *unused_addr = NULL;
172 unsigned long addr, tmp_size, unused_size;
173
174 /* Allocate maximum size needed, see where it ends up.
175 * Then free it -- in this path there are no other allocators
176 * so we can depend on getting the same address back
177 * when we allocate a smaller piece that is aligned
178 * at the end (if necessary) and the piece we really want,
179 * then free the unused first piece.
180 */
181
182 tmp_size = size + alignment - PAGE_SIZE;
183 addr = (unsigned long)alloc_bootmem(tmp_size);
184 free_bootmem(__pa(addr), tmp_size);
185
186 unused_size = alignment - (addr % alignment);
187 if (unused_size)
188 unused_addr = alloc_bootmem(unused_size);
189
190 addr = (unsigned long)alloc_bootmem(size);
191 if (unused_size)
192 free_bootmem(__pa(unused_addr), unused_size);
193
194 return (void *)addr;
195}
196
Larry Bassela7eadea2011-07-14 10:46:00 -0700197int (*change_memory_power)(unsigned long, unsigned long, int);
198
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199int platform_physical_remove_pages(unsigned long start_pfn,
200 unsigned long nr_pages)
201{
Larry Bassela7eadea2011-07-14 10:46:00 -0700202 if (!change_memory_power)
203 return 0;
204 return change_memory_power(start_pfn, nr_pages, MEMORY_DEEP_POWERDOWN);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700205}
206
207int platform_physical_active_pages(unsigned long start_pfn,
208 unsigned long nr_pages)
209{
Larry Bassela7eadea2011-07-14 10:46:00 -0700210 if (!change_memory_power)
211 return 0;
212 return change_memory_power(start_pfn, nr_pages, MEMORY_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700213}
214
215int platform_physical_low_power_pages(unsigned long start_pfn,
216 unsigned long nr_pages)
217{
Larry Bassela7eadea2011-07-14 10:46:00 -0700218 if (!change_memory_power)
219 return 0;
220 return change_memory_power(start_pfn, nr_pages, MEMORY_SELF_REFRESH);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700221}
222
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700223char *memtype_name[] = {
224 "SMI_KERNEL",
225 "SMI",
226 "EBI0",
227 "EBI1"
228};
229
230struct reserve_info *reserve_info;
231
Larry Bassel7fb0b252011-07-22 14:18:50 -0700232static unsigned long stable_size(struct membank *mb,
233 unsigned long unstable_limit)
234{
235 if (!unstable_limit || mb->start + mb->size <= unstable_limit)
236 return mb->size;
237 if (mb->start >= unstable_limit)
238 return 0;
239 return unstable_limit - mb->start;
240}
241
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700242static void __init calculate_reserve_limits(void)
243{
244 int i;
245 struct membank *mb;
246 int memtype;
247 struct memtype_reserve *mt;
Larry Bassel7fb0b252011-07-22 14:18:50 -0700248 unsigned long size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700249
250 for (i = 0, mb = &meminfo.bank[0]; i < meminfo.nr_banks; i++, mb++) {
251 memtype = reserve_info->paddr_to_memtype(mb->start);
252 if (memtype == MEMTYPE_NONE) {
253 pr_warning("unknown memory type for bank at %lx\n",
254 (long unsigned int)mb->start);
255 continue;
256 }
257 mt = &reserve_info->memtype_reserve_table[memtype];
Larry Bassel7fb0b252011-07-22 14:18:50 -0700258 size = stable_size(mb, reserve_info->low_unstable_address);
259 mt->limit = max(mt->limit, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260 }
261}
262
263static void __init adjust_reserve_sizes(void)
264{
265 int i;
266 struct memtype_reserve *mt;
267
268 mt = &reserve_info->memtype_reserve_table[0];
269 for (i = 0; i < MEMTYPE_MAX; i++, mt++) {
270 if (mt->flags & MEMTYPE_FLAGS_1M_ALIGN)
271 mt->size = (mt->size + SECTION_SIZE - 1) & SECTION_MASK;
272 if (mt->size > mt->limit) {
273 pr_warning("%lx size for %s too large, setting to %lx\n",
274 mt->size, memtype_name[i], mt->limit);
275 mt->size = mt->limit;
276 }
277 }
278}
279
280static void __init reserve_memory_for_mempools(void)
281{
282 int i, memtype, membank_type;
283 struct memtype_reserve *mt;
284 struct membank *mb;
285 int ret;
Larry Bassel7fb0b252011-07-22 14:18:50 -0700286 unsigned long size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700287
288 mt = &reserve_info->memtype_reserve_table[0];
289 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
290 if (mt->flags & MEMTYPE_FLAGS_FIXED || !mt->size)
291 continue;
292
293 /* We know we will find a memory bank of the proper size
294 * as we have limited the size of the memory pool for
295 * each memory type to the size of the largest memory
296 * bank. Choose the memory bank with the highest physical
297 * address which is large enough, so that we will not
298 * take memory from the lowest memory bank which the kernel
299 * is in (and cause boot problems) and so that we might
300 * be able to steal memory that would otherwise become
Larry Bassel7fb0b252011-07-22 14:18:50 -0700301 * highmem. However, do not use unstable memory.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700302 */
303 for (i = meminfo.nr_banks - 1; i >= 0; i--) {
304 mb = &meminfo.bank[i];
305 membank_type =
306 reserve_info->paddr_to_memtype(mb->start);
307 if (memtype != membank_type)
308 continue;
Larry Bassel7fb0b252011-07-22 14:18:50 -0700309 size = stable_size(mb,
310 reserve_info->low_unstable_address);
311 if (size >= mt->size) {
312 mt->start = mb->start + size - mt->size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700313 ret = memblock_remove(mt->start, mt->size);
314 BUG_ON(ret);
315 break;
316 }
317 }
318 }
319}
320
321static void __init initialize_mempools(void)
322{
323 struct mem_pool *mpool;
324 int memtype;
325 struct memtype_reserve *mt;
326
327 mt = &reserve_info->memtype_reserve_table[0];
328 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
329 if (!mt->size)
330 continue;
331 mpool = initialize_memory_pool(mt->start, mt->size, memtype);
332 if (!mpool)
333 pr_warning("failed to create %s mempool\n",
334 memtype_name[memtype]);
335 }
336}
337
338void __init msm_reserve(void)
339{
340 memory_pool_init();
341 reserve_info->calculate_reserve_sizes();
342 calculate_reserve_limits();
343 adjust_reserve_sizes();
344 reserve_memory_for_mempools();
345 initialize_mempools();
346}
347
348static int get_ebi_memtype(void)
349{
350 /* on 7x30 and 8x55 "EBI1 kernel PMEM" is really on EBI0 */
351 if (cpu_is_msm7x30() || cpu_is_msm8x55())
352 return MEMTYPE_EBI0;
353 return MEMTYPE_EBI1;
354}
355
356void *allocate_contiguous_ebi(unsigned long size,
357 unsigned long align, int cached)
358{
359 return allocate_contiguous_memory(size, get_ebi_memtype(),
360 align, cached);
361}
362EXPORT_SYMBOL(allocate_contiguous_ebi);
363
364unsigned long allocate_contiguous_ebi_nomap(unsigned long size,
365 unsigned long align)
366{
367 return allocate_contiguous_memory_nomap(size, get_ebi_memtype(), align);
368}
369EXPORT_SYMBOL(allocate_contiguous_ebi_nomap);
370
371/* emulation of the deprecated pmem_kalloc and pmem_kfree */
372int32_t pmem_kalloc(const size_t size, const uint32_t flags)
373{
374 int pmem_memtype;
375 int memtype = MEMTYPE_NONE;
376 int ebi1_memtype = MEMTYPE_EBI1;
377 unsigned int align;
378 int32_t paddr;
379
380 switch (flags & PMEM_ALIGNMENT_MASK) {
381 case PMEM_ALIGNMENT_4K:
382 align = SZ_4K;
383 break;
384 case PMEM_ALIGNMENT_1M:
385 align = SZ_1M;
386 break;
387 default:
388 pr_alert("Invalid alignment %x\n",
389 (flags & PMEM_ALIGNMENT_MASK));
390 return -EINVAL;
391 }
392
393 /* on 7x30 and 8x55 "EBI1 kernel PMEM" is really on EBI0 */
394 if (cpu_is_msm7x30() || cpu_is_msm8x55())
395 ebi1_memtype = MEMTYPE_EBI0;
396
397 pmem_memtype = flags & PMEM_MEMTYPE_MASK;
398 if (pmem_memtype == PMEM_MEMTYPE_EBI1)
399 memtype = ebi1_memtype;
400 else if (pmem_memtype == PMEM_MEMTYPE_SMI)
401 memtype = MEMTYPE_SMI_KERNEL;
402 else {
403 pr_alert("Invalid memory type %x\n",
404 flags & PMEM_MEMTYPE_MASK);
405 return -EINVAL;
406 }
407
408 paddr = allocate_contiguous_memory_nomap(size, memtype, align);
409 if (!paddr && pmem_memtype == PMEM_MEMTYPE_SMI)
410 paddr = allocate_contiguous_memory_nomap(size,
411 ebi1_memtype, align);
412
413 if (!paddr)
414 return -ENOMEM;
415 return paddr;
416}
417EXPORT_SYMBOL(pmem_kalloc);
418
419int pmem_kfree(const int32_t physaddr)
420{
421 free_contiguous_memory_by_paddr(physaddr);
422
423 return 0;
424}
425EXPORT_SYMBOL(pmem_kfree);
Vikram Mulukutlac39c6092011-07-29 18:36:35 -0700426
427unsigned int msm_ttbr0;
428
429void store_ttbr0(void)
430{
431 /* Store TTBR0 for post-mortem debugging purposes. */
432 asm("mrc p15, 0, %0, c2, c0, 0\n"
433 : "=r" (msm_ttbr0));
434}