| Alan Cox | 8c8f1c9 | 2011-11-03 18:21:09 +0000 | [diff] [blame] | 1 | /* | 
|  | 2 | * Copyright (c) 2007, Intel Corporation. | 
|  | 3 | * All Rights Reserved. | 
|  | 4 | * | 
|  | 5 | * This program is free software; you can redistribute it and/or modify it | 
|  | 6 | * under the terms and conditions of the GNU General Public License, | 
|  | 7 | * version 2, as published by the Free Software Foundation. | 
|  | 8 | * | 
|  | 9 | * This program is distributed in the hope it will be useful, but WITHOUT | 
|  | 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | 
|  | 11 | * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for | 
|  | 12 | * more details. | 
|  | 13 | * | 
|  | 14 | * You should have received a copy of the GNU General Public License along with | 
|  | 15 | * this program; if not, write to the Free Software Foundation, Inc., | 
|  | 16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | 
|  | 17 | * | 
|  | 18 | * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com> | 
|  | 19 | *	    Alan Cox <alan@linux.intel.com> | 
|  | 20 | */ | 
|  | 21 |  | 
|  | 22 | #include <drm/drmP.h> | 
| Alan Cox | e912b6d | 2012-01-24 16:57:42 +0000 | [diff] [blame] | 23 | #include <linux/shmem_fs.h> | 
| Alan Cox | 8c8f1c9 | 2011-11-03 18:21:09 +0000 | [diff] [blame] | 24 | #include "psb_drv.h" | 
|  | 25 |  | 
|  | 26 |  | 
|  | 27 | /* | 
|  | 28 | *	GTT resource allocator - manage page mappings in GTT space | 
|  | 29 | */ | 
|  | 30 |  | 
|  | 31 | /** | 
|  | 32 | *	psb_gtt_mask_pte	-	generate GTT pte entry | 
|  | 33 | *	@pfn: page number to encode | 
|  | 34 | *	@type: type of memory in the GTT | 
|  | 35 | * | 
|  | 36 | *	Set the GTT entry for the appropriate memory type. | 
|  | 37 | */ | 
|  | 38 | static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type) | 
|  | 39 | { | 
|  | 40 | uint32_t mask = PSB_PTE_VALID; | 
|  | 41 |  | 
|  | 42 | if (type & PSB_MMU_CACHED_MEMORY) | 
|  | 43 | mask |= PSB_PTE_CACHED; | 
|  | 44 | if (type & PSB_MMU_RO_MEMORY) | 
|  | 45 | mask |= PSB_PTE_RO; | 
|  | 46 | if (type & PSB_MMU_WO_MEMORY) | 
|  | 47 | mask |= PSB_PTE_WO; | 
|  | 48 |  | 
|  | 49 | return (pfn << PAGE_SHIFT) | mask; | 
|  | 50 | } | 
|  | 51 |  | 
|  | 52 | /** | 
|  | 53 | *	psb_gtt_entry		-	find the GTT entries for a gtt_range | 
|  | 54 | *	@dev: our DRM device | 
|  | 55 | *	@r: our GTT range | 
|  | 56 | * | 
|  | 57 | *	Given a gtt_range object return the GTT offset of the page table | 
|  | 58 | *	entries for this gtt_range | 
|  | 59 | */ | 
| Kirill A. Shutemov | ffe94d9 | 2012-03-08 16:03:55 +0000 | [diff] [blame] | 60 | static u32 *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r) | 
| Alan Cox | 8c8f1c9 | 2011-11-03 18:21:09 +0000 | [diff] [blame] | 61 | { | 
|  | 62 | struct drm_psb_private *dev_priv = dev->dev_private; | 
|  | 63 | unsigned long offset; | 
|  | 64 |  | 
|  | 65 | offset = r->resource.start - dev_priv->gtt_mem->start; | 
|  | 66 |  | 
|  | 67 | return dev_priv->gtt_map + (offset >> PAGE_SHIFT); | 
|  | 68 | } | 
|  | 69 |  | 
|  | 70 | /** | 
|  | 71 | *	psb_gtt_insert	-	put an object into the GTT | 
|  | 72 | *	@dev: our DRM device | 
|  | 73 | *	@r: our GTT range | 
|  | 74 | * | 
|  | 75 | *	Take our preallocated GTT range and insert the GEM object into | 
| Alan Cox | a746092 | 2011-11-29 22:21:03 +0000 | [diff] [blame] | 76 | *	the GTT. This is protected via the gtt mutex which the caller | 
|  | 77 | *	must hold. | 
| Alan Cox | 8c8f1c9 | 2011-11-03 18:21:09 +0000 | [diff] [blame] | 78 | */ | 
|  | 79 | static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r) | 
|  | 80 | { | 
|  | 81 | u32 *gtt_slot, pte; | 
|  | 82 | struct page **pages; | 
|  | 83 | int i; | 
|  | 84 |  | 
|  | 85 | if (r->pages == NULL) { | 
|  | 86 | WARN_ON(1); | 
|  | 87 | return -EINVAL; | 
|  | 88 | } | 
|  | 89 |  | 
|  | 90 | WARN_ON(r->stolen);	/* refcount these maybe ? */ | 
|  | 91 |  | 
|  | 92 | gtt_slot = psb_gtt_entry(dev, r); | 
|  | 93 | pages = r->pages; | 
|  | 94 |  | 
|  | 95 | /* Make sure changes are visible to the GPU */ | 
|  | 96 | set_pages_array_uc(pages, r->npage); | 
|  | 97 |  | 
|  | 98 | /* Write our page entries into the GTT itself */ | 
| Alan Cox | a6ba582 | 2011-11-29 22:27:22 +0000 | [diff] [blame] | 99 | for (i = r->roll; i < r->npage; i++) { | 
|  | 100 | pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); | 
|  | 101 | iowrite32(pte, gtt_slot++); | 
|  | 102 | } | 
|  | 103 | for (i = 0; i < r->roll; i++) { | 
|  | 104 | pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); | 
| Alan Cox | 8c8f1c9 | 2011-11-03 18:21:09 +0000 | [diff] [blame] | 105 | iowrite32(pte, gtt_slot++); | 
|  | 106 | } | 
|  | 107 | /* Make sure all the entries are set before we return */ | 
|  | 108 | ioread32(gtt_slot - 1); | 
| Alan Cox | a6ba582 | 2011-11-29 22:27:22 +0000 | [diff] [blame] | 109 |  | 
| Alan Cox | 8c8f1c9 | 2011-11-03 18:21:09 +0000 | [diff] [blame] | 110 | return 0; | 
|  | 111 | } | 
|  | 112 |  | 
|  | 113 | /** | 
|  | 114 | *	psb_gtt_remove	-	remove an object from the GTT | 
|  | 115 | *	@dev: our DRM device | 
|  | 116 | *	@r: our GTT range | 
|  | 117 | * | 
|  | 118 | *	Remove a preallocated GTT range from the GTT. Overwrite all the | 
| Alan Cox | a746092 | 2011-11-29 22:21:03 +0000 | [diff] [blame] | 119 | *	page table entries with the dummy page. This is protected via the gtt | 
|  | 120 | *	mutex which the caller must hold. | 
| Alan Cox | 8c8f1c9 | 2011-11-03 18:21:09 +0000 | [diff] [blame] | 121 | */ | 
| Alan Cox | 8c8f1c9 | 2011-11-03 18:21:09 +0000 | [diff] [blame] | 122 | static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r) | 
|  | 123 | { | 
|  | 124 | struct drm_psb_private *dev_priv = dev->dev_private; | 
|  | 125 | u32 *gtt_slot, pte; | 
|  | 126 | int i; | 
|  | 127 |  | 
|  | 128 | WARN_ON(r->stolen); | 
|  | 129 |  | 
|  | 130 | gtt_slot = psb_gtt_entry(dev, r); | 
|  | 131 | pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0); | 
|  | 132 |  | 
|  | 133 | for (i = 0; i < r->npage; i++) | 
|  | 134 | iowrite32(pte, gtt_slot++); | 
|  | 135 | ioread32(gtt_slot - 1); | 
|  | 136 | set_pages_array_wb(r->pages, r->npage); | 
|  | 137 | } | 
|  | 138 |  | 
|  | 139 | /** | 
| Alan Cox | a6ba582 | 2011-11-29 22:27:22 +0000 | [diff] [blame] | 140 | *	psb_gtt_roll	-	set scrolling position | 
|  | 141 | *	@dev: our DRM device | 
|  | 142 | *	@r: the gtt mapping we are using | 
|  | 143 | *	@roll: roll offset | 
|  | 144 | * | 
|  | 145 | *	Roll an existing pinned mapping by moving the pages through the GTT. | 
|  | 146 | *	This allows us to implement hardware scrolling on the consoles without | 
|  | 147 | *	a 2D engine | 
|  | 148 | */ | 
|  | 149 | void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll) | 
|  | 150 | { | 
|  | 151 | u32 *gtt_slot, pte; | 
|  | 152 | int i; | 
|  | 153 |  | 
|  | 154 | if (roll >= r->npage) { | 
|  | 155 | WARN_ON(1); | 
|  | 156 | return; | 
|  | 157 | } | 
|  | 158 |  | 
|  | 159 | r->roll = roll; | 
|  | 160 |  | 
|  | 161 | /* Not currently in the GTT - no worry we will write the mapping at | 
|  | 162 | the right position when it gets pinned */ | 
|  | 163 | if (!r->stolen && !r->in_gart) | 
|  | 164 | return; | 
|  | 165 |  | 
|  | 166 | gtt_slot = psb_gtt_entry(dev, r); | 
|  | 167 |  | 
|  | 168 | for (i = r->roll; i < r->npage; i++) { | 
|  | 169 | pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); | 
|  | 170 | iowrite32(pte, gtt_slot++); | 
|  | 171 | } | 
|  | 172 | for (i = 0; i < r->roll; i++) { | 
|  | 173 | pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); | 
|  | 174 | iowrite32(pte, gtt_slot++); | 
|  | 175 | } | 
|  | 176 | ioread32(gtt_slot - 1); | 
|  | 177 | } | 
|  | 178 |  | 
|  | 179 | /** | 
| Alan Cox | 8c8f1c9 | 2011-11-03 18:21:09 +0000 | [diff] [blame] | 180 | *	psb_gtt_attach_pages	-	attach and pin GEM pages | 
|  | 181 | *	@gt: the gtt range | 
|  | 182 | * | 
|  | 183 | *	Pin and build an in kernel list of the pages that back our GEM object. | 
| Alan Cox | a746092 | 2011-11-29 22:21:03 +0000 | [diff] [blame] | 184 | *	While we hold this the pages cannot be swapped out. This is protected | 
|  | 185 | *	via the gtt mutex which the caller must hold. | 
| Alan Cox | 8c8f1c9 | 2011-11-03 18:21:09 +0000 | [diff] [blame] | 186 | */ | 
|  | 187 | static int psb_gtt_attach_pages(struct gtt_range *gt) | 
|  | 188 | { | 
|  | 189 | struct inode *inode; | 
|  | 190 | struct address_space *mapping; | 
|  | 191 | int i; | 
|  | 192 | struct page *p; | 
|  | 193 | int pages = gt->gem.size / PAGE_SIZE; | 
|  | 194 |  | 
|  | 195 | WARN_ON(gt->pages); | 
|  | 196 |  | 
|  | 197 | /* This is the shared memory object that backs the GEM resource */ | 
|  | 198 | inode = gt->gem.filp->f_path.dentry->d_inode; | 
|  | 199 | mapping = inode->i_mapping; | 
|  | 200 |  | 
|  | 201 | gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL); | 
|  | 202 | if (gt->pages == NULL) | 
|  | 203 | return -ENOMEM; | 
|  | 204 | gt->npage = pages; | 
|  | 205 |  | 
|  | 206 | for (i = 0; i < pages; i++) { | 
| Alan Cox | e912b6d | 2012-01-24 16:57:42 +0000 | [diff] [blame] | 207 | p = shmem_read_mapping_page(mapping, i); | 
| Alan Cox | 8c8f1c9 | 2011-11-03 18:21:09 +0000 | [diff] [blame] | 208 | if (IS_ERR(p)) | 
|  | 209 | goto err; | 
|  | 210 | gt->pages[i] = p; | 
|  | 211 | } | 
|  | 212 | return 0; | 
|  | 213 |  | 
|  | 214 | err: | 
|  | 215 | while (i--) | 
|  | 216 | page_cache_release(gt->pages[i]); | 
|  | 217 | kfree(gt->pages); | 
|  | 218 | gt->pages = NULL; | 
|  | 219 | return PTR_ERR(p); | 
|  | 220 | } | 
|  | 221 |  | 
|  | 222 | /** | 
|  | 223 | *	psb_gtt_detach_pages	-	attach and pin GEM pages | 
|  | 224 | *	@gt: the gtt range | 
|  | 225 | * | 
|  | 226 | *	Undo the effect of psb_gtt_attach_pages. At this point the pages | 
|  | 227 | *	must have been removed from the GTT as they could now be paged out | 
| Alan Cox | a746092 | 2011-11-29 22:21:03 +0000 | [diff] [blame] | 228 | *	and move bus address. This is protected via the gtt mutex which the | 
|  | 229 | *	caller must hold. | 
| Alan Cox | 8c8f1c9 | 2011-11-03 18:21:09 +0000 | [diff] [blame] | 230 | */ | 
|  | 231 | static void psb_gtt_detach_pages(struct gtt_range *gt) | 
|  | 232 | { | 
|  | 233 | int i; | 
|  | 234 | for (i = 0; i < gt->npage; i++) { | 
|  | 235 | /* FIXME: do we need to force dirty */ | 
|  | 236 | set_page_dirty(gt->pages[i]); | 
|  | 237 | page_cache_release(gt->pages[i]); | 
|  | 238 | } | 
|  | 239 | kfree(gt->pages); | 
|  | 240 | gt->pages = NULL; | 
|  | 241 | } | 
|  | 242 |  | 
|  | 243 | /** | 
|  | 244 | *	psb_gtt_pin		-	pin pages into the GTT | 
|  | 245 | *	@gt: range to pin | 
|  | 246 | * | 
|  | 247 | *	Pin a set of pages into the GTT. The pins are refcounted so that | 
|  | 248 | *	multiple pins need multiple unpins to undo. | 
|  | 249 | * | 
|  | 250 | *	Non GEM backed objects treat this as a no-op as they are always GTT | 
|  | 251 | *	backed objects. | 
|  | 252 | */ | 
|  | 253 | int psb_gtt_pin(struct gtt_range *gt) | 
|  | 254 | { | 
|  | 255 | int ret = 0; | 
|  | 256 | struct drm_device *dev = gt->gem.dev; | 
|  | 257 | struct drm_psb_private *dev_priv = dev->dev_private; | 
|  | 258 |  | 
|  | 259 | mutex_lock(&dev_priv->gtt_mutex); | 
|  | 260 |  | 
|  | 261 | if (gt->in_gart == 0 && gt->stolen == 0) { | 
|  | 262 | ret = psb_gtt_attach_pages(gt); | 
|  | 263 | if (ret < 0) | 
|  | 264 | goto out; | 
|  | 265 | ret = psb_gtt_insert(dev, gt); | 
|  | 266 | if (ret < 0) { | 
|  | 267 | psb_gtt_detach_pages(gt); | 
|  | 268 | goto out; | 
|  | 269 | } | 
|  | 270 | } | 
|  | 271 | gt->in_gart++; | 
|  | 272 | out: | 
|  | 273 | mutex_unlock(&dev_priv->gtt_mutex); | 
|  | 274 | return ret; | 
|  | 275 | } | 
|  | 276 |  | 
|  | 277 | /** | 
|  | 278 | *	psb_gtt_unpin		-	Drop a GTT pin requirement | 
|  | 279 | *	@gt: range to pin | 
|  | 280 | * | 
|  | 281 | *	Undoes the effect of psb_gtt_pin. On the last drop the GEM object | 
|  | 282 | *	will be removed from the GTT which will also drop the page references | 
|  | 283 | *	and allow the VM to clean up or page stuff. | 
|  | 284 | * | 
|  | 285 | *	Non GEM backed objects treat this as a no-op as they are always GTT | 
|  | 286 | *	backed objects. | 
|  | 287 | */ | 
|  | 288 | void psb_gtt_unpin(struct gtt_range *gt) | 
|  | 289 | { | 
|  | 290 | struct drm_device *dev = gt->gem.dev; | 
|  | 291 | struct drm_psb_private *dev_priv = dev->dev_private; | 
|  | 292 |  | 
|  | 293 | mutex_lock(&dev_priv->gtt_mutex); | 
|  | 294 |  | 
|  | 295 | WARN_ON(!gt->in_gart); | 
|  | 296 |  | 
|  | 297 | gt->in_gart--; | 
|  | 298 | if (gt->in_gart == 0 && gt->stolen == 0) { | 
|  | 299 | psb_gtt_remove(dev, gt); | 
|  | 300 | psb_gtt_detach_pages(gt); | 
|  | 301 | } | 
|  | 302 | mutex_unlock(&dev_priv->gtt_mutex); | 
|  | 303 | } | 
|  | 304 |  | 
|  | 305 | /* | 
|  | 306 | *	GTT resource allocator - allocate and manage GTT address space | 
|  | 307 | */ | 
|  | 308 |  | 
|  | 309 | /** | 
|  | 310 | *	psb_gtt_alloc_range	-	allocate GTT address space | 
|  | 311 | *	@dev: Our DRM device | 
|  | 312 | *	@len: length (bytes) of address space required | 
|  | 313 | *	@name: resource name | 
|  | 314 | *	@backed: resource should be backed by stolen pages | 
|  | 315 | * | 
|  | 316 | *	Ask the kernel core to find us a suitable range of addresses | 
|  | 317 | *	to use for a GTT mapping. | 
|  | 318 | * | 
|  | 319 | *	Returns a gtt_range structure describing the object, or NULL on | 
|  | 320 | *	error. On successful return the resource is both allocated and marked | 
|  | 321 | *	as in use. | 
|  | 322 | */ | 
|  | 323 | struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len, | 
|  | 324 | const char *name, int backed) | 
|  | 325 | { | 
|  | 326 | struct drm_psb_private *dev_priv = dev->dev_private; | 
|  | 327 | struct gtt_range *gt; | 
|  | 328 | struct resource *r = dev_priv->gtt_mem; | 
|  | 329 | int ret; | 
|  | 330 | unsigned long start, end; | 
|  | 331 |  | 
|  | 332 | if (backed) { | 
|  | 333 | /* The start of the GTT is the stolen pages */ | 
|  | 334 | start = r->start; | 
|  | 335 | end = r->start + dev_priv->gtt.stolen_size - 1; | 
|  | 336 | } else { | 
|  | 337 | /* The rest we will use for GEM backed objects */ | 
|  | 338 | start = r->start + dev_priv->gtt.stolen_size; | 
|  | 339 | end = r->end; | 
|  | 340 | } | 
|  | 341 |  | 
|  | 342 | gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL); | 
|  | 343 | if (gt == NULL) | 
|  | 344 | return NULL; | 
|  | 345 | gt->resource.name = name; | 
|  | 346 | gt->stolen = backed; | 
|  | 347 | gt->in_gart = backed; | 
| Alan Cox | a6ba582 | 2011-11-29 22:27:22 +0000 | [diff] [blame] | 348 | gt->roll = 0; | 
| Alan Cox | 8c8f1c9 | 2011-11-03 18:21:09 +0000 | [diff] [blame] | 349 | /* Ensure this is set for non GEM objects */ | 
|  | 350 | gt->gem.dev = dev; | 
|  | 351 | ret = allocate_resource(dev_priv->gtt_mem, >->resource, | 
|  | 352 | len, start, end, PAGE_SIZE, NULL, NULL); | 
|  | 353 | if (ret == 0) { | 
|  | 354 | gt->offset = gt->resource.start - r->start; | 
|  | 355 | return gt; | 
|  | 356 | } | 
|  | 357 | kfree(gt); | 
|  | 358 | return NULL; | 
|  | 359 | } | 
|  | 360 |  | 
|  | 361 | /** | 
|  | 362 | *	psb_gtt_free_range	-	release GTT address space | 
|  | 363 | *	@dev: our DRM device | 
|  | 364 | *	@gt: a mapping created with psb_gtt_alloc_range | 
|  | 365 | * | 
|  | 366 | *	Release a resource that was allocated with psb_gtt_alloc_range. If the | 
|  | 367 | *	object has been pinned by mmap users we clean this up here currently. | 
|  | 368 | */ | 
|  | 369 | void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt) | 
|  | 370 | { | 
|  | 371 | /* Undo the mmap pin if we are destroying the object */ | 
|  | 372 | if (gt->mmapping) { | 
|  | 373 | psb_gtt_unpin(gt); | 
|  | 374 | gt->mmapping = 0; | 
|  | 375 | } | 
|  | 376 | WARN_ON(gt->in_gart && !gt->stolen); | 
|  | 377 | release_resource(>->resource); | 
|  | 378 | kfree(gt); | 
|  | 379 | } | 
|  | 380 |  | 
| Kirill A. Shutemov | ffe94d9 | 2012-03-08 16:03:55 +0000 | [diff] [blame] | 381 | static void psb_gtt_alloc(struct drm_device *dev) | 
| Alan Cox | 8c8f1c9 | 2011-11-03 18:21:09 +0000 | [diff] [blame] | 382 | { | 
|  | 383 | struct drm_psb_private *dev_priv = dev->dev_private; | 
|  | 384 | init_rwsem(&dev_priv->gtt.sem); | 
|  | 385 | } | 
|  | 386 |  | 
|  | 387 | void psb_gtt_takedown(struct drm_device *dev) | 
|  | 388 | { | 
|  | 389 | struct drm_psb_private *dev_priv = dev->dev_private; | 
|  | 390 |  | 
|  | 391 | if (dev_priv->gtt_map) { | 
|  | 392 | iounmap(dev_priv->gtt_map); | 
|  | 393 | dev_priv->gtt_map = NULL; | 
|  | 394 | } | 
|  | 395 | if (dev_priv->gtt_initialized) { | 
|  | 396 | pci_write_config_word(dev->pdev, PSB_GMCH_CTRL, | 
|  | 397 | dev_priv->gmch_ctrl); | 
|  | 398 | PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL); | 
|  | 399 | (void) PSB_RVDC32(PSB_PGETBL_CTL); | 
|  | 400 | } | 
|  | 401 | if (dev_priv->vram_addr) | 
|  | 402 | iounmap(dev_priv->gtt_map); | 
|  | 403 | } | 
|  | 404 |  | 
|  | 405 | int psb_gtt_init(struct drm_device *dev, int resume) | 
|  | 406 | { | 
|  | 407 | struct drm_psb_private *dev_priv = dev->dev_private; | 
|  | 408 | unsigned gtt_pages; | 
|  | 409 | unsigned long stolen_size, vram_stolen_size; | 
|  | 410 | unsigned i, num_pages; | 
|  | 411 | unsigned pfn_base; | 
|  | 412 | uint32_t vram_pages; | 
|  | 413 | uint32_t dvmt_mode = 0; | 
|  | 414 | struct psb_gtt *pg; | 
|  | 415 |  | 
|  | 416 | int ret = 0; | 
|  | 417 | uint32_t pte; | 
|  | 418 |  | 
|  | 419 | mutex_init(&dev_priv->gtt_mutex); | 
|  | 420 |  | 
|  | 421 | psb_gtt_alloc(dev); | 
|  | 422 | pg = &dev_priv->gtt; | 
|  | 423 |  | 
|  | 424 | /* Enable the GTT */ | 
|  | 425 | pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl); | 
|  | 426 | pci_write_config_word(dev->pdev, PSB_GMCH_CTRL, | 
|  | 427 | dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED); | 
|  | 428 |  | 
|  | 429 | dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL); | 
|  | 430 | PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL); | 
|  | 431 | (void) PSB_RVDC32(PSB_PGETBL_CTL); | 
|  | 432 |  | 
|  | 433 | /* The root resource we allocate address space from */ | 
|  | 434 | dev_priv->gtt_initialized = 1; | 
|  | 435 |  | 
|  | 436 | pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK; | 
|  | 437 |  | 
|  | 438 | /* | 
| Alan Cox | a746092 | 2011-11-29 22:21:03 +0000 | [diff] [blame] | 439 | *	The video mmu has a hw bug when accessing 0x0D0000000. | 
|  | 440 | *	Make gatt start at 0x0e000,0000. This doesn't actually | 
|  | 441 | *	matter for us but may do if the video acceleration ever | 
|  | 442 | *	gets opened up. | 
| Alan Cox | 8c8f1c9 | 2011-11-03 18:21:09 +0000 | [diff] [blame] | 443 | */ | 
|  | 444 | pg->mmu_gatt_start = 0xE0000000; | 
|  | 445 |  | 
|  | 446 | pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE); | 
|  | 447 | gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE) | 
|  | 448 | >> PAGE_SHIFT; | 
| Alan Cox | 055bf38 | 2012-03-05 14:22:16 +0000 | [diff] [blame] | 449 | /* CDV doesn't report this. In which case the system has 64 gtt pages */ | 
| Alan Cox | 8c8f1c9 | 2011-11-03 18:21:09 +0000 | [diff] [blame] | 450 | if (pg->gtt_start == 0 || gtt_pages == 0) { | 
| Alan Cox | 055bf38 | 2012-03-05 14:22:16 +0000 | [diff] [blame] | 451 | dev_dbg(dev->dev, "GTT PCI BAR not initialized.\n"); | 
| Alan Cox | 8c8f1c9 | 2011-11-03 18:21:09 +0000 | [diff] [blame] | 452 | gtt_pages = 64; | 
|  | 453 | pg->gtt_start = dev_priv->pge_ctl; | 
|  | 454 | } | 
|  | 455 |  | 
|  | 456 | pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE); | 
|  | 457 | pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE) | 
|  | 458 | >> PAGE_SHIFT; | 
|  | 459 | dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE]; | 
|  | 460 |  | 
|  | 461 | if (pg->gatt_pages == 0 || pg->gatt_start == 0) { | 
|  | 462 | static struct resource fudge;	/* Preferably peppermint */ | 
| Alan Cox | 055bf38 | 2012-03-05 14:22:16 +0000 | [diff] [blame] | 463 | /* This can occur on CDV systems. Fudge it in this case. | 
| Alan Cox | 8c8f1c9 | 2011-11-03 18:21:09 +0000 | [diff] [blame] | 464 | We really don't care what imaginary space is being allocated | 
|  | 465 | at this point */ | 
| Alan Cox | 055bf38 | 2012-03-05 14:22:16 +0000 | [diff] [blame] | 466 | dev_dbg(dev->dev, "GATT PCI BAR not initialized.\n"); | 
| Alan Cox | 8c8f1c9 | 2011-11-03 18:21:09 +0000 | [diff] [blame] | 467 | pg->gatt_start = 0x40000000; | 
|  | 468 | pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT; | 
| Alan Cox | a746092 | 2011-11-29 22:21:03 +0000 | [diff] [blame] | 469 | /* This is a little confusing but in fact the GTT is providing | 
|  | 470 | a view from the GPU into memory and not vice versa. As such | 
|  | 471 | this is really allocating space that is not the same as the | 
|  | 472 | CPU address space on CDV */ | 
| Alan Cox | 8c8f1c9 | 2011-11-03 18:21:09 +0000 | [diff] [blame] | 473 | fudge.start = 0x40000000; | 
|  | 474 | fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1; | 
|  | 475 | fudge.name = "fudge"; | 
|  | 476 | fudge.flags = IORESOURCE_MEM; | 
|  | 477 | dev_priv->gtt_mem = &fudge; | 
|  | 478 | } | 
|  | 479 |  | 
|  | 480 | pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base); | 
|  | 481 | vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base | 
|  | 482 | - PAGE_SIZE; | 
|  | 483 |  | 
|  | 484 | stolen_size = vram_stolen_size; | 
|  | 485 |  | 
|  | 486 | printk(KERN_INFO "Stolen memory information\n"); | 
|  | 487 | printk(KERN_INFO "       base in RAM: 0x%x\n", dev_priv->stolen_base); | 
|  | 488 | printk(KERN_INFO "       size: %luK, calculated by (GTT RAM base) - (Stolen base), seems wrong\n", | 
|  | 489 | vram_stolen_size/1024); | 
|  | 490 | dvmt_mode = (dev_priv->gmch_ctrl >> 4) & 0x7; | 
|  | 491 | printk(KERN_INFO "      the correct size should be: %dM(dvmt mode=%d)\n", | 
|  | 492 | (dvmt_mode == 1) ? 1 : (2 << (dvmt_mode - 1)), dvmt_mode); | 
|  | 493 |  | 
|  | 494 | if (resume && (gtt_pages != pg->gtt_pages) && | 
|  | 495 | (stolen_size != pg->stolen_size)) { | 
|  | 496 | dev_err(dev->dev, "GTT resume error.\n"); | 
|  | 497 | ret = -EINVAL; | 
|  | 498 | goto out_err; | 
|  | 499 | } | 
|  | 500 |  | 
|  | 501 | pg->gtt_pages = gtt_pages; | 
|  | 502 | pg->stolen_size = stolen_size; | 
|  | 503 | dev_priv->vram_stolen_size = vram_stolen_size; | 
|  | 504 |  | 
|  | 505 | /* | 
|  | 506 | *	Map the GTT and the stolen memory area | 
|  | 507 | */ | 
|  | 508 | dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start, | 
|  | 509 | gtt_pages << PAGE_SHIFT); | 
|  | 510 | if (!dev_priv->gtt_map) { | 
|  | 511 | dev_err(dev->dev, "Failure to map gtt.\n"); | 
|  | 512 | ret = -ENOMEM; | 
|  | 513 | goto out_err; | 
|  | 514 | } | 
|  | 515 |  | 
|  | 516 | dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size); | 
|  | 517 | if (!dev_priv->vram_addr) { | 
|  | 518 | dev_err(dev->dev, "Failure to map stolen base.\n"); | 
|  | 519 | ret = -ENOMEM; | 
|  | 520 | goto out_err; | 
|  | 521 | } | 
|  | 522 |  | 
|  | 523 | /* | 
|  | 524 | * Insert vram stolen pages into the GTT | 
|  | 525 | */ | 
|  | 526 |  | 
|  | 527 | pfn_base = dev_priv->stolen_base >> PAGE_SHIFT; | 
|  | 528 | vram_pages = num_pages = vram_stolen_size >> PAGE_SHIFT; | 
|  | 529 | printk(KERN_INFO"Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n", | 
|  | 530 | num_pages, pfn_base << PAGE_SHIFT, 0); | 
|  | 531 | for (i = 0; i < num_pages; ++i) { | 
|  | 532 | pte = psb_gtt_mask_pte(pfn_base + i, 0); | 
|  | 533 | iowrite32(pte, dev_priv->gtt_map + i); | 
|  | 534 | } | 
|  | 535 |  | 
|  | 536 | /* | 
|  | 537 | * Init rest of GTT to the scratch page to avoid accidents or scribbles | 
|  | 538 | */ | 
|  | 539 |  | 
|  | 540 | pfn_base = page_to_pfn(dev_priv->scratch_page); | 
|  | 541 | pte = psb_gtt_mask_pte(pfn_base, 0); | 
|  | 542 | for (; i < gtt_pages; ++i) | 
|  | 543 | iowrite32(pte, dev_priv->gtt_map + i); | 
|  | 544 |  | 
|  | 545 | (void) ioread32(dev_priv->gtt_map + i - 1); | 
|  | 546 | return 0; | 
|  | 547 |  | 
|  | 548 | out_err: | 
|  | 549 | psb_gtt_takedown(dev); | 
|  | 550 | return ret; | 
|  | 551 | } |