| Tomi Valkeinen | afedec1 | 2009-08-07 12:01:55 +0300 | [diff] [blame] | 1 | /* | 
|  | 2 | * VRAM manager for OMAP | 
|  | 3 | * | 
|  | 4 | * Copyright (C) 2009 Nokia Corporation | 
|  | 5 | * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> | 
|  | 6 | * | 
|  | 7 | * This program is free software; you can redistribute it and/or modify | 
|  | 8 | * it under the terms of the GNU General Public License version 2 as | 
|  | 9 | * published by the Free Software Foundation. | 
|  | 10 | * | 
|  | 11 | * This program is distributed in the hope that it will be useful, but | 
|  | 12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
|  | 14 | * General Public License for more details. | 
|  | 15 | * | 
|  | 16 | * You should have received a copy of the GNU General Public License along | 
|  | 17 | * with this program; if not, write to the Free Software Foundation, Inc., | 
|  | 18 | * 59 Temple Place - Suite 330, Boston, MA  02111-1307, USA. | 
|  | 19 | */ | 
|  | 20 |  | 
|  | 21 | /*#define DEBUG*/ | 
|  | 22 |  | 
|  | 23 | #include <linux/kernel.h> | 
|  | 24 | #include <linux/mm.h> | 
|  | 25 | #include <linux/list.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 26 | #include <linux/slab.h> | 
| Tomi Valkeinen | afedec1 | 2009-08-07 12:01:55 +0300 | [diff] [blame] | 27 | #include <linux/seq_file.h> | 
| Russell King | 98864ff | 2010-05-22 23:59:11 +0100 | [diff] [blame] | 28 | #include <linux/memblock.h> | 
| Tomi Valkeinen | afedec1 | 2009-08-07 12:01:55 +0300 | [diff] [blame] | 29 | #include <linux/completion.h> | 
|  | 30 | #include <linux/debugfs.h> | 
|  | 31 | #include <linux/jiffies.h> | 
|  | 32 | #include <linux/module.h> | 
|  | 33 |  | 
|  | 34 | #include <asm/setup.h> | 
|  | 35 |  | 
|  | 36 | #include <plat/sram.h> | 
|  | 37 | #include <plat/vram.h> | 
|  | 38 | #include <plat/dma.h> | 
|  | 39 |  | 
|  | 40 | #ifdef DEBUG | 
|  | 41 | #define DBG(format, ...) pr_debug("VRAM: " format, ## __VA_ARGS__) | 
|  | 42 | #else | 
|  | 43 | #define DBG(format, ...) | 
|  | 44 | #endif | 
|  | 45 |  | 
|  | 46 | #define OMAP2_SRAM_START		0x40200000 | 
|  | 47 | /* Maximum size, in reality this is smaller if SRAM is partially locked. */ | 
|  | 48 | #define OMAP2_SRAM_SIZE			0xa0000		/* 640k */ | 
|  | 49 |  | 
|  | 50 | /* postponed regions are used to temporarily store region information at boot | 
|  | 51 | * time when we cannot yet allocate the region list */ | 
|  | 52 | #define MAX_POSTPONED_REGIONS 10 | 
|  | 53 |  | 
|  | 54 | static bool vram_initialized; | 
|  | 55 | static int postponed_cnt; | 
|  | 56 | static struct { | 
|  | 57 | unsigned long paddr; | 
|  | 58 | size_t size; | 
|  | 59 | } postponed_regions[MAX_POSTPONED_REGIONS]; | 
|  | 60 |  | 
|  | 61 | struct vram_alloc { | 
|  | 62 | struct list_head list; | 
|  | 63 | unsigned long paddr; | 
|  | 64 | unsigned pages; | 
|  | 65 | }; | 
|  | 66 |  | 
|  | 67 | struct vram_region { | 
|  | 68 | struct list_head list; | 
|  | 69 | struct list_head alloc_list; | 
|  | 70 | unsigned long paddr; | 
|  | 71 | unsigned pages; | 
|  | 72 | }; | 
|  | 73 |  | 
|  | 74 | static DEFINE_MUTEX(region_mutex); | 
|  | 75 | static LIST_HEAD(region_list); | 
|  | 76 |  | 
|  | 77 | static inline int region_mem_type(unsigned long paddr) | 
|  | 78 | { | 
|  | 79 | if (paddr >= OMAP2_SRAM_START && | 
|  | 80 | paddr < OMAP2_SRAM_START + OMAP2_SRAM_SIZE) | 
|  | 81 | return OMAP_VRAM_MEMTYPE_SRAM; | 
|  | 82 | else | 
|  | 83 | return OMAP_VRAM_MEMTYPE_SDRAM; | 
|  | 84 | } | 
|  | 85 |  | 
|  | 86 | static struct vram_region *omap_vram_create_region(unsigned long paddr, | 
|  | 87 | unsigned pages) | 
|  | 88 | { | 
|  | 89 | struct vram_region *rm; | 
|  | 90 |  | 
|  | 91 | rm = kzalloc(sizeof(*rm), GFP_KERNEL); | 
|  | 92 |  | 
|  | 93 | if (rm) { | 
|  | 94 | INIT_LIST_HEAD(&rm->alloc_list); | 
|  | 95 | rm->paddr = paddr; | 
|  | 96 | rm->pages = pages; | 
|  | 97 | } | 
|  | 98 |  | 
|  | 99 | return rm; | 
|  | 100 | } | 
|  | 101 |  | 
|  | 102 | #if 0 | 
|  | 103 | static void omap_vram_free_region(struct vram_region *vr) | 
|  | 104 | { | 
|  | 105 | list_del(&vr->list); | 
|  | 106 | kfree(vr); | 
|  | 107 | } | 
|  | 108 | #endif | 
|  | 109 |  | 
|  | 110 | static struct vram_alloc *omap_vram_create_allocation(struct vram_region *vr, | 
|  | 111 | unsigned long paddr, unsigned pages) | 
|  | 112 | { | 
|  | 113 | struct vram_alloc *va; | 
|  | 114 | struct vram_alloc *new; | 
|  | 115 |  | 
|  | 116 | new = kzalloc(sizeof(*va), GFP_KERNEL); | 
|  | 117 |  | 
|  | 118 | if (!new) | 
|  | 119 | return NULL; | 
|  | 120 |  | 
|  | 121 | new->paddr = paddr; | 
|  | 122 | new->pages = pages; | 
|  | 123 |  | 
|  | 124 | list_for_each_entry(va, &vr->alloc_list, list) { | 
|  | 125 | if (va->paddr > new->paddr) | 
|  | 126 | break; | 
|  | 127 | } | 
|  | 128 |  | 
|  | 129 | list_add_tail(&new->list, &va->list); | 
|  | 130 |  | 
|  | 131 | return new; | 
|  | 132 | } | 
|  | 133 |  | 
|  | 134 | static void omap_vram_free_allocation(struct vram_alloc *va) | 
|  | 135 | { | 
|  | 136 | list_del(&va->list); | 
|  | 137 | kfree(va); | 
|  | 138 | } | 
|  | 139 |  | 
|  | 140 | int omap_vram_add_region(unsigned long paddr, size_t size) | 
|  | 141 | { | 
|  | 142 | struct vram_region *rm; | 
|  | 143 | unsigned pages; | 
|  | 144 |  | 
|  | 145 | if (vram_initialized) { | 
|  | 146 | DBG("adding region paddr %08lx size %d\n", | 
|  | 147 | paddr, size); | 
|  | 148 |  | 
|  | 149 | size &= PAGE_MASK; | 
|  | 150 | pages = size >> PAGE_SHIFT; | 
|  | 151 |  | 
|  | 152 | rm = omap_vram_create_region(paddr, pages); | 
|  | 153 | if (rm == NULL) | 
|  | 154 | return -ENOMEM; | 
|  | 155 |  | 
|  | 156 | list_add(&rm->list, ®ion_list); | 
|  | 157 | } else { | 
|  | 158 | if (postponed_cnt == MAX_POSTPONED_REGIONS) | 
|  | 159 | return -ENOMEM; | 
|  | 160 |  | 
|  | 161 | postponed_regions[postponed_cnt].paddr = paddr; | 
|  | 162 | postponed_regions[postponed_cnt].size = size; | 
|  | 163 |  | 
|  | 164 | ++postponed_cnt; | 
|  | 165 | } | 
|  | 166 | return 0; | 
|  | 167 | } | 
|  | 168 |  | 
|  | 169 | int omap_vram_free(unsigned long paddr, size_t size) | 
|  | 170 | { | 
|  | 171 | struct vram_region *rm; | 
|  | 172 | struct vram_alloc *alloc; | 
|  | 173 | unsigned start, end; | 
|  | 174 |  | 
|  | 175 | DBG("free mem paddr %08lx size %d\n", paddr, size); | 
|  | 176 |  | 
|  | 177 | size = PAGE_ALIGN(size); | 
|  | 178 |  | 
|  | 179 | mutex_lock(®ion_mutex); | 
|  | 180 |  | 
|  | 181 | list_for_each_entry(rm, ®ion_list, list) { | 
|  | 182 | list_for_each_entry(alloc, &rm->alloc_list, list) { | 
|  | 183 | start = alloc->paddr; | 
|  | 184 | end = alloc->paddr + (alloc->pages >> PAGE_SHIFT); | 
|  | 185 |  | 
|  | 186 | if (start >= paddr && end < paddr + size) | 
|  | 187 | goto found; | 
|  | 188 | } | 
|  | 189 | } | 
|  | 190 |  | 
|  | 191 | mutex_unlock(®ion_mutex); | 
|  | 192 | return -EINVAL; | 
|  | 193 |  | 
|  | 194 | found: | 
|  | 195 | omap_vram_free_allocation(alloc); | 
|  | 196 |  | 
|  | 197 | mutex_unlock(®ion_mutex); | 
|  | 198 | return 0; | 
|  | 199 | } | 
|  | 200 | EXPORT_SYMBOL(omap_vram_free); | 
|  | 201 |  | 
|  | 202 | static int _omap_vram_reserve(unsigned long paddr, unsigned pages) | 
|  | 203 | { | 
|  | 204 | struct vram_region *rm; | 
|  | 205 | struct vram_alloc *alloc; | 
|  | 206 | size_t size; | 
|  | 207 |  | 
|  | 208 | size = pages << PAGE_SHIFT; | 
|  | 209 |  | 
|  | 210 | list_for_each_entry(rm, ®ion_list, list) { | 
|  | 211 | unsigned long start, end; | 
|  | 212 |  | 
|  | 213 | DBG("checking region %lx %d\n", rm->paddr, rm->pages); | 
|  | 214 |  | 
|  | 215 | if (region_mem_type(rm->paddr) != region_mem_type(paddr)) | 
|  | 216 | continue; | 
|  | 217 |  | 
|  | 218 | start = rm->paddr; | 
|  | 219 | end = start + (rm->pages << PAGE_SHIFT) - 1; | 
|  | 220 | if (start > paddr || end < paddr + size - 1) | 
|  | 221 | continue; | 
|  | 222 |  | 
|  | 223 | DBG("block ok, checking allocs\n"); | 
|  | 224 |  | 
|  | 225 | list_for_each_entry(alloc, &rm->alloc_list, list) { | 
|  | 226 | end = alloc->paddr - 1; | 
|  | 227 |  | 
|  | 228 | if (start <= paddr && end >= paddr + size - 1) | 
|  | 229 | goto found; | 
|  | 230 |  | 
|  | 231 | start = alloc->paddr + (alloc->pages << PAGE_SHIFT); | 
|  | 232 | } | 
|  | 233 |  | 
|  | 234 | end = rm->paddr + (rm->pages << PAGE_SHIFT) - 1; | 
|  | 235 |  | 
|  | 236 | if (!(start <= paddr && end >= paddr + size - 1)) | 
|  | 237 | continue; | 
|  | 238 | found: | 
|  | 239 | DBG("found area start %lx, end %lx\n", start, end); | 
|  | 240 |  | 
|  | 241 | if (omap_vram_create_allocation(rm, paddr, pages) == NULL) | 
|  | 242 | return -ENOMEM; | 
|  | 243 |  | 
|  | 244 | return 0; | 
|  | 245 | } | 
|  | 246 |  | 
|  | 247 | return -ENOMEM; | 
|  | 248 | } | 
|  | 249 |  | 
|  | 250 | int omap_vram_reserve(unsigned long paddr, size_t size) | 
|  | 251 | { | 
|  | 252 | unsigned pages; | 
|  | 253 | int r; | 
|  | 254 |  | 
|  | 255 | DBG("reserve mem paddr %08lx size %d\n", paddr, size); | 
|  | 256 |  | 
|  | 257 | size = PAGE_ALIGN(size); | 
|  | 258 | pages = size >> PAGE_SHIFT; | 
|  | 259 |  | 
|  | 260 | mutex_lock(®ion_mutex); | 
|  | 261 |  | 
|  | 262 | r = _omap_vram_reserve(paddr, pages); | 
|  | 263 |  | 
|  | 264 | mutex_unlock(®ion_mutex); | 
|  | 265 |  | 
|  | 266 | return r; | 
|  | 267 | } | 
|  | 268 | EXPORT_SYMBOL(omap_vram_reserve); | 
|  | 269 |  | 
|  | 270 | static void _omap_vram_dma_cb(int lch, u16 ch_status, void *data) | 
|  | 271 | { | 
|  | 272 | struct completion *compl = data; | 
|  | 273 | complete(compl); | 
|  | 274 | } | 
|  | 275 |  | 
|  | 276 | static int _omap_vram_clear(u32 paddr, unsigned pages) | 
|  | 277 | { | 
|  | 278 | struct completion compl; | 
|  | 279 | unsigned elem_count; | 
|  | 280 | unsigned frame_count; | 
|  | 281 | int r; | 
|  | 282 | int lch; | 
|  | 283 |  | 
|  | 284 | init_completion(&compl); | 
|  | 285 |  | 
|  | 286 | r = omap_request_dma(OMAP_DMA_NO_DEVICE, "VRAM DMA", | 
|  | 287 | _omap_vram_dma_cb, | 
|  | 288 | &compl, &lch); | 
|  | 289 | if (r) { | 
|  | 290 | pr_err("VRAM: request_dma failed for memory clear\n"); | 
|  | 291 | return -EBUSY; | 
|  | 292 | } | 
|  | 293 |  | 
|  | 294 | elem_count = pages * PAGE_SIZE / 4; | 
|  | 295 | frame_count = 1; | 
|  | 296 |  | 
|  | 297 | omap_set_dma_transfer_params(lch, OMAP_DMA_DATA_TYPE_S32, | 
|  | 298 | elem_count, frame_count, | 
|  | 299 | OMAP_DMA_SYNC_ELEMENT, | 
|  | 300 | 0, 0); | 
|  | 301 |  | 
|  | 302 | omap_set_dma_dest_params(lch, 0, OMAP_DMA_AMODE_POST_INC, | 
|  | 303 | paddr, 0, 0); | 
|  | 304 |  | 
|  | 305 | omap_set_dma_color_mode(lch, OMAP_DMA_CONSTANT_FILL, 0x000000); | 
|  | 306 |  | 
|  | 307 | omap_start_dma(lch); | 
|  | 308 |  | 
|  | 309 | if (wait_for_completion_timeout(&compl, msecs_to_jiffies(1000)) == 0) { | 
|  | 310 | omap_stop_dma(lch); | 
|  | 311 | pr_err("VRAM: dma timeout while clearing memory\n"); | 
|  | 312 | r = -EIO; | 
|  | 313 | goto err; | 
|  | 314 | } | 
|  | 315 |  | 
|  | 316 | r = 0; | 
|  | 317 | err: | 
|  | 318 | omap_free_dma(lch); | 
|  | 319 |  | 
|  | 320 | return r; | 
|  | 321 | } | 
|  | 322 |  | 
|  | 323 | static int _omap_vram_alloc(int mtype, unsigned pages, unsigned long *paddr) | 
|  | 324 | { | 
|  | 325 | struct vram_region *rm; | 
|  | 326 | struct vram_alloc *alloc; | 
|  | 327 |  | 
|  | 328 | list_for_each_entry(rm, ®ion_list, list) { | 
|  | 329 | unsigned long start, end; | 
|  | 330 |  | 
|  | 331 | DBG("checking region %lx %d\n", rm->paddr, rm->pages); | 
|  | 332 |  | 
|  | 333 | if (region_mem_type(rm->paddr) != mtype) | 
|  | 334 | continue; | 
|  | 335 |  | 
|  | 336 | start = rm->paddr; | 
|  | 337 |  | 
|  | 338 | list_for_each_entry(alloc, &rm->alloc_list, list) { | 
|  | 339 | end = alloc->paddr; | 
|  | 340 |  | 
|  | 341 | if (end - start >= pages << PAGE_SHIFT) | 
|  | 342 | goto found; | 
|  | 343 |  | 
|  | 344 | start = alloc->paddr + (alloc->pages << PAGE_SHIFT); | 
|  | 345 | } | 
|  | 346 |  | 
|  | 347 | end = rm->paddr + (rm->pages << PAGE_SHIFT); | 
|  | 348 | found: | 
|  | 349 | if (end - start < pages << PAGE_SHIFT) | 
|  | 350 | continue; | 
|  | 351 |  | 
|  | 352 | DBG("found %lx, end %lx\n", start, end); | 
|  | 353 |  | 
|  | 354 | alloc = omap_vram_create_allocation(rm, start, pages); | 
|  | 355 | if (alloc == NULL) | 
|  | 356 | return -ENOMEM; | 
|  | 357 |  | 
|  | 358 | *paddr = start; | 
|  | 359 |  | 
|  | 360 | _omap_vram_clear(start, pages); | 
|  | 361 |  | 
|  | 362 | return 0; | 
|  | 363 | } | 
|  | 364 |  | 
|  | 365 | return -ENOMEM; | 
|  | 366 | } | 
|  | 367 |  | 
|  | 368 | int omap_vram_alloc(int mtype, size_t size, unsigned long *paddr) | 
|  | 369 | { | 
|  | 370 | unsigned pages; | 
|  | 371 | int r; | 
|  | 372 |  | 
|  | 373 | BUG_ON(mtype > OMAP_VRAM_MEMTYPE_MAX || !size); | 
|  | 374 |  | 
|  | 375 | DBG("alloc mem type %d size %d\n", mtype, size); | 
|  | 376 |  | 
|  | 377 | size = PAGE_ALIGN(size); | 
|  | 378 | pages = size >> PAGE_SHIFT; | 
|  | 379 |  | 
|  | 380 | mutex_lock(®ion_mutex); | 
|  | 381 |  | 
|  | 382 | r = _omap_vram_alloc(mtype, pages, paddr); | 
|  | 383 |  | 
|  | 384 | mutex_unlock(®ion_mutex); | 
|  | 385 |  | 
|  | 386 | return r; | 
|  | 387 | } | 
|  | 388 | EXPORT_SYMBOL(omap_vram_alloc); | 
|  | 389 |  | 
|  | 390 | void omap_vram_get_info(unsigned long *vram, | 
|  | 391 | unsigned long *free_vram, | 
|  | 392 | unsigned long *largest_free_block) | 
|  | 393 | { | 
|  | 394 | struct vram_region *vr; | 
|  | 395 | struct vram_alloc *va; | 
|  | 396 |  | 
|  | 397 | *vram = 0; | 
|  | 398 | *free_vram = 0; | 
|  | 399 | *largest_free_block = 0; | 
|  | 400 |  | 
|  | 401 | mutex_lock(®ion_mutex); | 
|  | 402 |  | 
|  | 403 | list_for_each_entry(vr, ®ion_list, list) { | 
|  | 404 | unsigned free; | 
|  | 405 | unsigned long pa; | 
|  | 406 |  | 
|  | 407 | pa = vr->paddr; | 
|  | 408 | *vram += vr->pages << PAGE_SHIFT; | 
|  | 409 |  | 
|  | 410 | list_for_each_entry(va, &vr->alloc_list, list) { | 
|  | 411 | free = va->paddr - pa; | 
|  | 412 | *free_vram += free; | 
|  | 413 | if (free > *largest_free_block) | 
|  | 414 | *largest_free_block = free; | 
|  | 415 | pa = va->paddr + (va->pages << PAGE_SHIFT); | 
|  | 416 | } | 
|  | 417 |  | 
|  | 418 | free = vr->paddr + (vr->pages << PAGE_SHIFT) - pa; | 
|  | 419 | *free_vram += free; | 
|  | 420 | if (free > *largest_free_block) | 
|  | 421 | *largest_free_block = free; | 
|  | 422 | } | 
|  | 423 |  | 
|  | 424 | mutex_unlock(®ion_mutex); | 
|  | 425 | } | 
|  | 426 | EXPORT_SYMBOL(omap_vram_get_info); | 
|  | 427 |  | 
|  | 428 | #if defined(CONFIG_DEBUG_FS) | 
|  | 429 | static int vram_debug_show(struct seq_file *s, void *unused) | 
|  | 430 | { | 
|  | 431 | struct vram_region *vr; | 
|  | 432 | struct vram_alloc *va; | 
|  | 433 | unsigned size; | 
|  | 434 |  | 
|  | 435 | mutex_lock(®ion_mutex); | 
|  | 436 |  | 
|  | 437 | list_for_each_entry(vr, ®ion_list, list) { | 
|  | 438 | size = vr->pages << PAGE_SHIFT; | 
|  | 439 | seq_printf(s, "%08lx-%08lx (%d bytes)\n", | 
|  | 440 | vr->paddr, vr->paddr + size - 1, | 
|  | 441 | size); | 
|  | 442 |  | 
|  | 443 | list_for_each_entry(va, &vr->alloc_list, list) { | 
|  | 444 | size = va->pages << PAGE_SHIFT; | 
|  | 445 | seq_printf(s, "    %08lx-%08lx (%d bytes)\n", | 
|  | 446 | va->paddr, va->paddr + size - 1, | 
|  | 447 | size); | 
|  | 448 | } | 
|  | 449 | } | 
|  | 450 |  | 
|  | 451 | mutex_unlock(®ion_mutex); | 
|  | 452 |  | 
|  | 453 | return 0; | 
|  | 454 | } | 
|  | 455 |  | 
|  | 456 | static int vram_debug_open(struct inode *inode, struct file *file) | 
|  | 457 | { | 
|  | 458 | return single_open(file, vram_debug_show, inode->i_private); | 
|  | 459 | } | 
|  | 460 |  | 
|  | 461 | static const struct file_operations vram_debug_fops = { | 
|  | 462 | .open           = vram_debug_open, | 
|  | 463 | .read           = seq_read, | 
|  | 464 | .llseek         = seq_lseek, | 
|  | 465 | .release        = single_release, | 
|  | 466 | }; | 
|  | 467 |  | 
|  | 468 | static int __init omap_vram_create_debugfs(void) | 
|  | 469 | { | 
|  | 470 | struct dentry *d; | 
|  | 471 |  | 
|  | 472 | d = debugfs_create_file("vram", S_IRUGO, NULL, | 
|  | 473 | NULL, &vram_debug_fops); | 
|  | 474 | if (IS_ERR(d)) | 
|  | 475 | return PTR_ERR(d); | 
|  | 476 |  | 
|  | 477 | return 0; | 
|  | 478 | } | 
|  | 479 | #endif | 
|  | 480 |  | 
|  | 481 | static __init int omap_vram_init(void) | 
|  | 482 | { | 
|  | 483 | int i; | 
|  | 484 |  | 
|  | 485 | vram_initialized = 1; | 
|  | 486 |  | 
|  | 487 | for (i = 0; i < postponed_cnt; i++) | 
|  | 488 | omap_vram_add_region(postponed_regions[i].paddr, | 
|  | 489 | postponed_regions[i].size); | 
|  | 490 |  | 
|  | 491 | #ifdef CONFIG_DEBUG_FS | 
|  | 492 | if (omap_vram_create_debugfs()) | 
|  | 493 | pr_err("VRAM: Failed to create debugfs file\n"); | 
|  | 494 | #endif | 
|  | 495 |  | 
|  | 496 | return 0; | 
|  | 497 | } | 
|  | 498 |  | 
|  | 499 | arch_initcall(omap_vram_init); | 
|  | 500 |  | 
|  | 501 | /* boottime vram alloc stuff */ | 
|  | 502 |  | 
|  | 503 | /* set from board file */ | 
|  | 504 | static u32 omap_vram_sram_start __initdata; | 
|  | 505 | static u32 omap_vram_sram_size __initdata; | 
|  | 506 |  | 
|  | 507 | /* set from board file */ | 
|  | 508 | static u32 omap_vram_sdram_start __initdata; | 
|  | 509 | static u32 omap_vram_sdram_size __initdata; | 
|  | 510 |  | 
|  | 511 | /* set from kernel cmdline */ | 
|  | 512 | static u32 omap_vram_def_sdram_size __initdata; | 
|  | 513 | static u32 omap_vram_def_sdram_start __initdata; | 
|  | 514 |  | 
| Thomas Weber | 5c1f96f | 2010-03-03 09:16:54 +0100 | [diff] [blame] | 515 | static int __init omap_vram_early_vram(char *p) | 
| Tomi Valkeinen | afedec1 | 2009-08-07 12:01:55 +0300 | [diff] [blame] | 516 | { | 
| Thomas Weber | 5c1f96f | 2010-03-03 09:16:54 +0100 | [diff] [blame] | 517 | omap_vram_def_sdram_size = memparse(p, &p); | 
|  | 518 | if (*p == ',') | 
|  | 519 | omap_vram_def_sdram_start = simple_strtoul(p + 1, &p, 16); | 
|  | 520 | return 0; | 
| Tomi Valkeinen | afedec1 | 2009-08-07 12:01:55 +0300 | [diff] [blame] | 521 | } | 
| Thomas Weber | 5c1f96f | 2010-03-03 09:16:54 +0100 | [diff] [blame] | 522 | early_param("vram", omap_vram_early_vram); | 
| Tomi Valkeinen | afedec1 | 2009-08-07 12:01:55 +0300 | [diff] [blame] | 523 |  | 
|  | 524 | /* | 
|  | 525 | * Called from map_io. We need to call to this early enough so that we | 
|  | 526 | * can reserve the fixed SDRAM regions before VM could get hold of them. | 
|  | 527 | */ | 
| Russell King | 98864ff | 2010-05-22 23:59:11 +0100 | [diff] [blame] | 528 | void __init omap_vram_reserve_sdram_memblock(void) | 
| Tomi Valkeinen | afedec1 | 2009-08-07 12:01:55 +0300 | [diff] [blame] | 529 | { | 
| Tomi Valkeinen | afedec1 | 2009-08-07 12:01:55 +0300 | [diff] [blame] | 530 | u32 paddr; | 
|  | 531 | u32 size = 0; | 
|  | 532 |  | 
|  | 533 | /* cmdline arg overrides the board file definition */ | 
|  | 534 | if (omap_vram_def_sdram_size) { | 
|  | 535 | size = omap_vram_def_sdram_size; | 
|  | 536 | paddr = omap_vram_def_sdram_start; | 
|  | 537 | } | 
|  | 538 |  | 
|  | 539 | if (!size) { | 
|  | 540 | size = omap_vram_sdram_size; | 
|  | 541 | paddr = omap_vram_sdram_start; | 
|  | 542 | } | 
|  | 543 |  | 
|  | 544 | #ifdef CONFIG_OMAP2_VRAM_SIZE | 
|  | 545 | if (!size) { | 
|  | 546 | size = CONFIG_OMAP2_VRAM_SIZE * 1024 * 1024; | 
|  | 547 | paddr = 0; | 
|  | 548 | } | 
|  | 549 | #endif | 
|  | 550 |  | 
|  | 551 | if (!size) | 
|  | 552 | return; | 
|  | 553 |  | 
|  | 554 | size = PAGE_ALIGN(size); | 
|  | 555 |  | 
| Tomi Valkeinen | afedec1 | 2009-08-07 12:01:55 +0300 | [diff] [blame] | 556 | if (paddr) { | 
| Russell King | 98864ff | 2010-05-22 23:59:11 +0100 | [diff] [blame] | 557 | struct memblock_property res; | 
|  | 558 |  | 
|  | 559 | res.base = paddr; | 
|  | 560 | res.size = size; | 
|  | 561 | if ((paddr & ~PAGE_MASK) || memblock_find(&res) || | 
|  | 562 | res.base != paddr || res.size != size) { | 
| Tomi Valkeinen | afedec1 | 2009-08-07 12:01:55 +0300 | [diff] [blame] | 563 | pr_err("Illegal SDRAM region for VRAM\n"); | 
|  | 564 | return; | 
|  | 565 | } | 
|  | 566 |  | 
| Russell King | 98864ff | 2010-05-22 23:59:11 +0100 | [diff] [blame] | 567 | if (memblock_is_region_reserved(paddr, size)) { | 
|  | 568 | pr_err("FB: failed to reserve VRAM - busy\n"); | 
| Tomi Valkeinen | afedec1 | 2009-08-07 12:01:55 +0300 | [diff] [blame] | 569 | return; | 
|  | 570 | } | 
|  | 571 |  | 
| Russell King | 98864ff | 2010-05-22 23:59:11 +0100 | [diff] [blame] | 572 | if (memblock_reserve(paddr, size) < 0) { | 
|  | 573 | pr_err("FB: failed to reserve VRAM - no memory\n"); | 
|  | 574 | return; | 
|  | 575 | } | 
|  | 576 | } else { | 
|  | 577 | paddr = memblock_alloc_base(size, PAGE_SIZE, MEMBLOCK_REAL_LIMIT); | 
| Tomi Valkeinen | afedec1 | 2009-08-07 12:01:55 +0300 | [diff] [blame] | 578 | } | 
|  | 579 |  | 
|  | 580 | omap_vram_add_region(paddr, size); | 
|  | 581 |  | 
|  | 582 | pr_info("Reserving %u bytes SDRAM for VRAM\n", size); | 
|  | 583 | } | 
|  | 584 |  | 
|  | 585 | /* | 
|  | 586 | * Called at sram init time, before anything is pushed to the SRAM stack. | 
|  | 587 | * Because of the stack scheme, we will allocate everything from the | 
|  | 588 | * start of the lowest address region to the end of SRAM. This will also | 
|  | 589 | * include padding for page alignment and possible holes between regions. | 
|  | 590 | * | 
|  | 591 | * As opposed to the SDRAM case, we'll also do any dynamic allocations at | 
|  | 592 | * this point, since the driver built as a module would have problem with | 
|  | 593 | * freeing / reallocating the regions. | 
|  | 594 | */ | 
|  | 595 | unsigned long __init omap_vram_reserve_sram(unsigned long sram_pstart, | 
|  | 596 | unsigned long sram_vstart, | 
|  | 597 | unsigned long sram_size, | 
|  | 598 | unsigned long pstart_avail, | 
|  | 599 | unsigned long size_avail) | 
|  | 600 | { | 
|  | 601 | unsigned long			pend_avail; | 
|  | 602 | unsigned long			reserved; | 
|  | 603 | u32 paddr; | 
|  | 604 | u32 size; | 
|  | 605 |  | 
|  | 606 | paddr = omap_vram_sram_start; | 
|  | 607 | size = omap_vram_sram_size; | 
|  | 608 |  | 
|  | 609 | if (!size) | 
|  | 610 | return 0; | 
|  | 611 |  | 
|  | 612 | reserved = 0; | 
|  | 613 | pend_avail = pstart_avail + size_avail; | 
|  | 614 |  | 
|  | 615 | if (!paddr) { | 
|  | 616 | /* Dynamic allocation */ | 
|  | 617 | if ((size_avail & PAGE_MASK) < size) { | 
|  | 618 | pr_err("Not enough SRAM for VRAM\n"); | 
|  | 619 | return 0; | 
|  | 620 | } | 
|  | 621 | size_avail = (size_avail - size) & PAGE_MASK; | 
|  | 622 | paddr = pstart_avail + size_avail; | 
|  | 623 | } | 
|  | 624 |  | 
|  | 625 | if (paddr < sram_pstart || | 
|  | 626 | paddr + size > sram_pstart + sram_size) { | 
|  | 627 | pr_err("Illegal SRAM region for VRAM\n"); | 
|  | 628 | return 0; | 
|  | 629 | } | 
|  | 630 |  | 
|  | 631 | /* Reserve everything above the start of the region. */ | 
|  | 632 | if (pend_avail - paddr > reserved) | 
|  | 633 | reserved = pend_avail - paddr; | 
|  | 634 | size_avail = pend_avail - reserved - pstart_avail; | 
|  | 635 |  | 
|  | 636 | omap_vram_add_region(paddr, size); | 
|  | 637 |  | 
|  | 638 | if (reserved) | 
|  | 639 | pr_info("Reserving %lu bytes SRAM for VRAM\n", reserved); | 
|  | 640 |  | 
|  | 641 | return reserved; | 
|  | 642 | } | 
|  | 643 |  | 
|  | 644 | void __init omap_vram_set_sdram_vram(u32 size, u32 start) | 
|  | 645 | { | 
|  | 646 | omap_vram_sdram_start = start; | 
|  | 647 | omap_vram_sdram_size = size; | 
|  | 648 | } | 
|  | 649 |  | 
|  | 650 | void __init omap_vram_set_sram_vram(u32 size, u32 start) | 
|  | 651 | { | 
|  | 652 | omap_vram_sram_start = start; | 
|  | 653 | omap_vram_sram_size = size; | 
|  | 654 | } |