blob: 25edc163a762386e9f1e69e715f89bee3d47fab7 [file] [log] [blame]
Duy Truonge833aca2013-02-12 13:35:08 -08001/* Copyright (c) 2002,2007-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
Steve Mucklef132c6c2012-06-06 18:30:57 -070013#include <linux/export.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070014#include <linux/types.h>
15#include <linux/device.h>
16#include <linux/spinlock.h>
17#include <linux/genalloc.h>
18#include <linux/slab.h>
19#include <linux/sched.h>
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060020#include <linux/iommu.h>
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -070021#include <mach/iommu.h>
Jordan Crouse817e0b92012-02-04 10:23:53 -070022#include <mach/socinfo.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070023
24#include "kgsl.h"
25#include "kgsl_mmu.h"
26#include "kgsl_device.h"
27#include "kgsl_sharedmem.h"
Shubhraprakash Das7a0c93c2012-11-20 15:15:08 -070028#include "adreno.h"
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070029
30#define KGSL_MMU_ALIGN_SHIFT 13
31#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
32
Shubhraprakash Das767fdda2011-08-15 15:49:45 -060033static enum kgsl_mmutype kgsl_mmu_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070034
35static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
36
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
38{
39 int i;
Jeremy Gebben2aba0f32013-05-28 16:54:00 -060040 struct kgsl_device *device;
41
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070042 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
Jeremy Gebben2aba0f32013-05-28 16:54:00 -060043 device = kgsl_driver.devp[i];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044 if (device)
45 device->ftbl->cleanup_pt(device, pt);
46 }
Jeremy Gebben2aba0f32013-05-28 16:54:00 -060047 /* Only the 3d device needs mmu specific pt entries */
48 device = kgsl_driver.devp[KGSL_DEVICE_3D0];
49 if (device->mmu.mmu_ops->mmu_cleanup_pt != NULL)
50 device->mmu.mmu_ops->mmu_cleanup_pt(&device->mmu, pt);
51
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070052 return 0;
53}
54
Shubhraprakash Das6b30c9f2012-04-20 01:15:55 -060055
56static int kgsl_setup_pt(struct kgsl_pagetable *pt)
57{
58 int i = 0;
59 int status = 0;
Jeremy Gebben2aba0f32013-05-28 16:54:00 -060060 struct kgsl_device *device;
Shubhraprakash Das6b30c9f2012-04-20 01:15:55 -060061
Shubhraprakash Das6b30c9f2012-04-20 01:15:55 -060062 for (i = 0; i < KGSL_DEVICE_MAX; i++) {
Jeremy Gebben2aba0f32013-05-28 16:54:00 -060063 device = kgsl_driver.devp[i];
Shubhraprakash Das6b30c9f2012-04-20 01:15:55 -060064 if (device) {
65 status = device->ftbl->setup_pt(device, pt);
66 if (status)
67 goto error_pt;
68 }
69 }
Jeremy Gebben2aba0f32013-05-28 16:54:00 -060070 /* Only the 3d device needs mmu specific pt entries */
71 device = kgsl_driver.devp[KGSL_DEVICE_3D0];
72 if (device->mmu.mmu_ops->mmu_setup_pt != NULL) {
73 status = device->mmu.mmu_ops->mmu_setup_pt(&device->mmu, pt);
74 if (status)
75 goto error_pt;
76 }
Shubhraprakash Das6b30c9f2012-04-20 01:15:55 -060077 return status;
78error_pt:
79 while (i >= 0) {
80 struct kgsl_device *device = kgsl_driver.devp[i];
81 if (device)
82 device->ftbl->cleanup_pt(device, pt);
83 i--;
84 }
85 return status;
86}
87
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070088static void kgsl_destroy_pagetable(struct kref *kref)
89{
90 struct kgsl_pagetable *pagetable = container_of(kref,
91 struct kgsl_pagetable, refcount);
92 unsigned long flags;
93
94 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
95 list_del(&pagetable->list);
96 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
97
98 pagetable_remove_sysfs_objects(pagetable);
99
100 kgsl_cleanup_pt(pagetable);
101
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600102 if (pagetable->kgsl_pool)
103 gen_pool_destroy(pagetable->kgsl_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700104 if (pagetable->pool)
105 gen_pool_destroy(pagetable->pool);
106
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600107 pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
108
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700109 kfree(pagetable);
110}
111
112static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
113{
114 if (pagetable)
115 kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
116}
117
118static struct kgsl_pagetable *
119kgsl_get_pagetable(unsigned long name)
120{
121 struct kgsl_pagetable *pt, *ret = NULL;
122 unsigned long flags;
123
124 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
125 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
126 if (pt->name == name) {
127 ret = pt;
128 kref_get(&ret->refcount);
129 break;
130 }
131 }
132
133 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
134 return ret;
135}
136
137static struct kgsl_pagetable *
138_get_pt_from_kobj(struct kobject *kobj)
139{
140 unsigned long ptname;
141
142 if (!kobj)
143 return NULL;
144
145 if (sscanf(kobj->name, "%ld", &ptname) != 1)
146 return NULL;
147
148 return kgsl_get_pagetable(ptname);
149}
150
151static ssize_t
152sysfs_show_entries(struct kobject *kobj,
153 struct kobj_attribute *attr,
154 char *buf)
155{
156 struct kgsl_pagetable *pt;
157 int ret = 0;
158
159 pt = _get_pt_from_kobj(kobj);
160
161 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600162 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700163
164 kgsl_put_pagetable(pt);
165 return ret;
166}
167
168static ssize_t
169sysfs_show_mapped(struct kobject *kobj,
170 struct kobj_attribute *attr,
171 char *buf)
172{
173 struct kgsl_pagetable *pt;
174 int ret = 0;
175
176 pt = _get_pt_from_kobj(kobj);
177
178 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600179 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180
181 kgsl_put_pagetable(pt);
182 return ret;
183}
184
185static ssize_t
186sysfs_show_va_range(struct kobject *kobj,
187 struct kobj_attribute *attr,
188 char *buf)
189{
190 struct kgsl_pagetable *pt;
191 int ret = 0;
192
193 pt = _get_pt_from_kobj(kobj);
194
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600195 if (pt) {
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600196 ret += snprintf(buf, PAGE_SIZE, "0x%x\n",
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600197 kgsl_mmu_get_ptsize());
198 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199
200 kgsl_put_pagetable(pt);
201 return ret;
202}
203
204static ssize_t
205sysfs_show_max_mapped(struct kobject *kobj,
206 struct kobj_attribute *attr,
207 char *buf)
208{
209 struct kgsl_pagetable *pt;
210 int ret = 0;
211
212 pt = _get_pt_from_kobj(kobj);
213
214 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600215 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_mapped);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216
217 kgsl_put_pagetable(pt);
218 return ret;
219}
220
221static ssize_t
222sysfs_show_max_entries(struct kobject *kobj,
223 struct kobj_attribute *attr,
224 char *buf)
225{
226 struct kgsl_pagetable *pt;
227 int ret = 0;
228
229 pt = _get_pt_from_kobj(kobj);
230
231 if (pt)
Jeremy Gebbena87bb862011-08-08 16:09:38 -0600232 ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_entries);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700233
234 kgsl_put_pagetable(pt);
235 return ret;
236}
237
238static struct kobj_attribute attr_entries = {
239 .attr = { .name = "entries", .mode = 0444 },
240 .show = sysfs_show_entries,
241 .store = NULL,
242};
243
244static struct kobj_attribute attr_mapped = {
245 .attr = { .name = "mapped", .mode = 0444 },
246 .show = sysfs_show_mapped,
247 .store = NULL,
248};
249
250static struct kobj_attribute attr_va_range = {
251 .attr = { .name = "va_range", .mode = 0444 },
252 .show = sysfs_show_va_range,
253 .store = NULL,
254};
255
256static struct kobj_attribute attr_max_mapped = {
257 .attr = { .name = "max_mapped", .mode = 0444 },
258 .show = sysfs_show_max_mapped,
259 .store = NULL,
260};
261
262static struct kobj_attribute attr_max_entries = {
263 .attr = { .name = "max_entries", .mode = 0444 },
264 .show = sysfs_show_max_entries,
265 .store = NULL,
266};
267
268static struct attribute *pagetable_attrs[] = {
269 &attr_entries.attr,
270 &attr_mapped.attr,
271 &attr_va_range.attr,
272 &attr_max_mapped.attr,
273 &attr_max_entries.attr,
274 NULL,
275};
276
277static struct attribute_group pagetable_attr_group = {
278 .attrs = pagetable_attrs,
279};
280
281static void
282pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
283{
284 if (pagetable->kobj)
285 sysfs_remove_group(pagetable->kobj,
286 &pagetable_attr_group);
287
288 kobject_put(pagetable->kobj);
289}
290
291static int
292pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
293{
294 char ptname[16];
295 int ret = -ENOMEM;
296
297 snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
298 pagetable->kobj = kobject_create_and_add(ptname,
299 kgsl_driver.ptkobj);
300 if (pagetable->kobj == NULL)
301 goto err;
302
303 ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
304
305err:
306 if (ret) {
307 if (pagetable->kobj)
308 kobject_put(pagetable->kobj);
309
310 pagetable->kobj = NULL;
311 }
312
313 return ret;
314}
315
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600316int
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700317kgsl_mmu_get_ptname_from_ptbase(struct kgsl_mmu *mmu, unsigned int pt_base)
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600318{
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600319 struct kgsl_pagetable *pt;
320 int ptid = -1;
321
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700322 if (!mmu->mmu_ops || !mmu->mmu_ops->mmu_pt_equal)
323 return KGSL_MMU_GLOBAL_PT;
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600324 spin_lock(&kgsl_driver.ptlock);
325 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
Shubhraprakash Das3cf33be2012-08-16 22:42:55 -0700326 if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600327 ptid = (int) pt->name;
328 break;
329 }
330 }
331 spin_unlock(&kgsl_driver.ptlock);
332
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600333 return ptid;
334}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600335EXPORT_SYMBOL(kgsl_mmu_get_ptname_from_ptbase);
Sushmita Susheelendra354d9712011-07-28 17:16:49 -0600336
Tarun Karrab8107322013-02-07 13:46:02 -0800337unsigned int
338kgsl_mmu_log_fault_addr(struct kgsl_mmu *mmu, unsigned int pt_base,
339 unsigned int addr)
340{
341 struct kgsl_pagetable *pt;
342 unsigned int ret = 0;
343
344 if (!mmu->mmu_ops || !mmu->mmu_ops->mmu_pt_equal)
345 return KGSL_MMU_GLOBAL_PT;
346 spin_lock(&kgsl_driver.ptlock);
347 list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
348 if (mmu->mmu_ops->mmu_pt_equal(mmu, pt, pt_base)) {
Tarun Karra24e3dfa2013-02-25 21:58:05 -0800349 if ((addr & ~(PAGE_SIZE-1)) == pt->fault_addr) {
Tarun Karrab8107322013-02-07 13:46:02 -0800350 ret = 1;
351 break;
352 } else {
Tarun Karra24e3dfa2013-02-25 21:58:05 -0800353 pt->fault_addr = (addr & ~(PAGE_SIZE-1));
Tarun Karrab8107322013-02-07 13:46:02 -0800354 ret = 0;
355 break;
356 }
357
358 }
359 }
360 spin_unlock(&kgsl_driver.ptlock);
361
362 return ret;
363}
364EXPORT_SYMBOL(kgsl_mmu_log_fault_addr);
365
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600366int kgsl_mmu_init(struct kgsl_device *device)
367{
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600368 int status = 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600369 struct kgsl_mmu *mmu = &device->mmu;
370
371 mmu->device = device;
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600372 status = kgsl_allocate_contiguous(&mmu->setstate_memory, PAGE_SIZE);
373 if (status)
374 return status;
375 kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
376 mmu->setstate_memory.size);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600377
378 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type) {
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600379 dev_info(device->dev, "|%s| MMU type set for device is "
Shubhraprakash Dasf5526a12012-04-20 00:48:33 -0600380 "NOMMU\n", __func__);
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600381 goto done;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600382 } else if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
383 mmu->mmu_ops = &gpummu_ops;
384 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
385 mmu->mmu_ops = &iommu_ops;
386
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600387 status = mmu->mmu_ops->mmu_init(mmu);
388done:
389 if (status)
390 kgsl_sharedmem_free(&mmu->setstate_memory);
391 return status;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600392}
393EXPORT_SYMBOL(kgsl_mmu_init);
394
395int kgsl_mmu_start(struct kgsl_device *device)
396{
397 struct kgsl_mmu *mmu = &device->mmu;
398
399 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
400 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
Shubhraprakash Das6b30c9f2012-04-20 01:15:55 -0600401 /* Setup gpuaddr of global mappings */
402 if (!mmu->setstate_memory.gpuaddr)
403 kgsl_setup_pt(NULL);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600404 return 0;
405 } else {
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600406 return mmu->mmu_ops->mmu_start(mmu);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600407 }
408}
409EXPORT_SYMBOL(kgsl_mmu_start);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600410
Jeremy Gebbenf4ea0822012-04-05 16:27:08 -0600411static void mh_axi_error(struct kgsl_device *device, const char* type)
412{
413 unsigned int reg, gpu_err, phys_err, pt_base;
414
415 kgsl_regread(device, MH_AXI_ERROR, &reg);
416 pt_base = kgsl_mmu_get_current_ptbase(&device->mmu);
417 /*
418 * Read gpu virtual and physical addresses that
419 * caused the error from the debug data.
420 */
421 kgsl_regwrite(device, MH_DEBUG_CTRL, 44);
422 kgsl_regread(device, MH_DEBUG_DATA, &gpu_err);
423 kgsl_regwrite(device, MH_DEBUG_CTRL, 45);
424 kgsl_regread(device, MH_DEBUG_DATA, &phys_err);
425 KGSL_MEM_CRIT(device,
426 "axi %s error: %08x pt %08x gpu %08x phys %08x\n",
427 type, reg, pt_base, gpu_err, phys_err);
428}
429
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700430void kgsl_mh_intrcallback(struct kgsl_device *device)
431{
432 unsigned int status = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700433
434 kgsl_regread(device, MH_INTERRUPT_STATUS, &status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700435
436 if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR)
Jeremy Gebbenf4ea0822012-04-05 16:27:08 -0600437 mh_axi_error(device, "read");
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600438 if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
Jeremy Gebbenf4ea0822012-04-05 16:27:08 -0600439 mh_axi_error(device, "write");
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600440 if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600441 device->mmu.mmu_ops->mmu_pagefault(&device->mmu);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700442
Jordan Crousec8c9fcd2011-07-28 08:37:58 -0600443 status &= KGSL_MMU_INT_MASK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700444 kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700445}
446EXPORT_SYMBOL(kgsl_mh_intrcallback);
447
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
449 unsigned int name)
450{
451 int status = 0;
452 struct kgsl_pagetable *pagetable = NULL;
453 unsigned long flags;
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600454 unsigned int ptsize;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700455
456 pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
457 if (pagetable == NULL) {
458 KGSL_CORE_ERR("kzalloc(%d) failed\n",
459 sizeof(struct kgsl_pagetable));
460 return NULL;
461 }
462
463 kref_init(&pagetable->refcount);
464
465 spin_lock_init(&pagetable->lock);
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600466
467 ptsize = kgsl_mmu_get_ptsize();
468
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700469 pagetable->name = name;
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600470 pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(ptsize);
Rajeev Kulkarni42946992013-02-15 16:45:17 -0800471 pagetable->fault_addr = 0xFFFFFFFF;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700472
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600473 /*
474 * create a separate kgsl pool for IOMMU, global mappings can be mapped
475 * just once from this pool of the defaultpagetable
476 */
477 if ((KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) &&
Shubhraprakash Das19ca4a62012-05-18 12:11:20 -0600478 ((KGSL_MMU_GLOBAL_PT == name) ||
479 (KGSL_MMU_PRIV_BANK_TABLE_NAME == name))) {
480 pagetable->kgsl_pool = gen_pool_create(PAGE_SHIFT, -1);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600481 if (pagetable->kgsl_pool == NULL) {
482 KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
Jeremy Gebbenc589ccb2012-05-16 10:26:20 -0600483 KGSL_MMU_ALIGN_SHIFT);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600484 goto err_alloc;
485 }
486 if (gen_pool_add(pagetable->kgsl_pool,
487 KGSL_IOMMU_GLOBAL_MEM_BASE,
488 KGSL_IOMMU_GLOBAL_MEM_SIZE, -1)) {
489 KGSL_CORE_ERR("gen_pool_add failed\n");
490 goto err_kgsl_pool;
491 }
492 }
493
Jeremy Gebbenc589ccb2012-05-16 10:26:20 -0600494 pagetable->pool = gen_pool_create(KGSL_MMU_ALIGN_SHIFT, -1);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700495 if (pagetable->pool == NULL) {
Jeremy Gebbenc589ccb2012-05-16 10:26:20 -0600496 KGSL_CORE_ERR("gen_pool_create(%d) failed\n",
497 KGSL_MMU_ALIGN_SHIFT);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600498 goto err_kgsl_pool;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700499 }
500
Jeremy Gebben2aba0f32013-05-28 16:54:00 -0600501 if (gen_pool_add(pagetable->pool, kgsl_mmu_get_base_addr(),
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600502 ptsize, -1)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700503 KGSL_CORE_ERR("gen_pool_add failed\n");
504 goto err_pool;
505 }
506
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600507 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
508 pagetable->pt_ops = &gpummu_pt_ops;
509 else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
510 pagetable->pt_ops = &iommu_pt_ops;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600512 pagetable->priv = pagetable->pt_ops->mmu_create_pagetable();
513 if (!pagetable->priv)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700514 goto err_pool;
515
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700516 status = kgsl_setup_pt(pagetable);
517 if (status)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600518 goto err_mmu_create;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700519
520 spin_lock_irqsave(&kgsl_driver.ptlock, flags);
521 list_add(&pagetable->list, &kgsl_driver.pagetable_list);
522 spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
523
524 /* Create the sysfs entries */
525 pagetable_add_sysfs_objects(pagetable);
526
527 return pagetable;
528
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600529err_mmu_create:
530 pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531err_pool:
532 gen_pool_destroy(pagetable->pool);
Shubhraprakash Das84fdb112012-04-04 12:49:31 -0600533err_kgsl_pool:
534 if (pagetable->kgsl_pool)
535 gen_pool_destroy(pagetable->kgsl_pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536err_alloc:
537 kfree(pagetable);
538
539 return NULL;
540}
541
542struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name)
543{
544 struct kgsl_pagetable *pt;
545
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600546 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
547 return (void *)(-1);
548
Jeremy Gebben2aba0f32013-05-28 16:54:00 -0600549 if (!kgsl_mmu_is_perprocess())
Shubhraprakash Das15a8b462012-08-16 23:24:28 -0700550 name = KGSL_MMU_GLOBAL_PT;
551
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700552 pt = kgsl_get_pagetable(name);
553
554 if (pt == NULL)
555 pt = kgsl_mmu_createpagetableobject(name);
556
557 return pt;
558}
559
560void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
561{
562 kgsl_put_pagetable(pagetable);
563}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600564EXPORT_SYMBOL(kgsl_mmu_putpagetable);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700565
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600566void kgsl_setstate(struct kgsl_mmu *mmu, unsigned int context_id,
567 uint32_t flags)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700568{
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600569 struct kgsl_device *device = mmu->device;
Shubhraprakash Das7a0c93c2012-11-20 15:15:08 -0700570 struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
571
572 if (!(flags & (KGSL_MMUFLAGS_TLBFLUSH | KGSL_MMUFLAGS_PTUPDATE))
573 && !adreno_is_a2xx(adreno_dev))
574 return;
575
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600576 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
577 return;
Shubhraprakash Dasc6e21012012-05-11 17:24:51 -0600578 else if (device->ftbl->setstate)
Shubhraprakash Dasb2abc452012-06-08 16:33:03 -0600579 device->ftbl->setstate(device, context_id, flags);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600580 else if (mmu->mmu_ops->mmu_device_setstate)
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600581 mmu->mmu_ops->mmu_device_setstate(mmu, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700582}
583EXPORT_SYMBOL(kgsl_setstate);
584
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600585void kgsl_mh_start(struct kgsl_device *device)
586{
587 struct kgsl_mh *mh = &device->mh;
588 /* force mmu off to for now*/
589 kgsl_regwrite(device, MH_MMU_CONFIG, 0);
Jordan Crousea29a2e02012-08-14 09:09:23 -0600590 kgsl_idle(device);
Jeremy Gebben4e8aada2011-07-12 10:07:47 -0600591
592 /* define physical memory range accessible by the core */
593 kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
594 kgsl_regwrite(device, MH_MMU_MPU_END,
595 mh->mpu_base + mh->mpu_range);
596 kgsl_regwrite(device, MH_ARBITER_CONFIG, mh->mharb);
597
598 if (mh->mh_intf_cfg1 != 0)
599 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG1,
600 mh->mh_intf_cfg1);
601
602 if (mh->mh_intf_cfg2 != 0)
603 kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG2,
604 mh->mh_intf_cfg2);
605
606 /*
607 * Interrupts are enabled on a per-device level when
608 * kgsl_pwrctrl_irq() is called
609 */
610}
611
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700612int
613kgsl_mmu_map(struct kgsl_pagetable *pagetable,
614 struct kgsl_memdesc *memdesc,
615 unsigned int protflags)
616{
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600617 int ret;
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600618 struct gen_pool *pool;
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600619 int size;
Rajeev Kulkarni8dfdc3362012-11-22 00:22:32 -0800620 int page_align = ilog2(PAGE_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700621
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600622 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
Jordan Crouse40861a42012-02-06 10:18:23 -0700623 if (memdesc->sglen == 1) {
Shubhraprakash Das4d6af2b2012-04-20 00:35:03 -0600624 memdesc->gpuaddr = sg_dma_address(memdesc->sg);
625 if (!memdesc->gpuaddr)
626 memdesc->gpuaddr = sg_phys(memdesc->sg);
627 if (!memdesc->gpuaddr) {
628 KGSL_CORE_ERR("Unable to get a valid physical "
629 "address for memdesc\n");
630 return -EINVAL;
631 }
Jordan Crouse40861a42012-02-06 10:18:23 -0700632 return 0;
633 } else {
634 KGSL_CORE_ERR("Memory is not contigious "
635 "(sglen = %d)\n", memdesc->sglen);
636 return -EINVAL;
637 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600638 }
Jordan Crouse40861a42012-02-06 10:18:23 -0700639
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600640 size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
641
Jeremy Gebben2aba0f32013-05-28 16:54:00 -0600642 pool = pagetable->pool;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700643
Jeremy Gebben2aba0f32013-05-28 16:54:00 -0600644 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
645 /* Allocate aligned virtual addresses for iommu. This allows
646 * more efficient pagetable entries if the physical memory
647 * is also aligned. Don't do this for GPUMMU, because
648 * the address space is so small.
649 */
650 if (kgsl_memdesc_get_align(memdesc) > 0)
651 page_align = kgsl_memdesc_get_align(memdesc);
652 if (kgsl_memdesc_is_global(memdesc)) {
653 /*
654 * Only the default pagetable has a kgsl_pool, and
655 * it is responsible for creating the mapping for
656 * each global buffer. The mapping will be reused
657 * in all other pagetables and it must already exist
658 * when we're creating other pagetables which do not
659 * have a kgsl_pool.
660 */
661 pool = pagetable->kgsl_pool;
662 if (pool == NULL && memdesc->gpuaddr == 0) {
663 KGSL_CORE_ERR(
664 "No address for global mapping into pt %d\n",
665 pagetable->name);
666 return -EINVAL;
667 }
668 }
669 }
670 if (pool) {
671 memdesc->gpuaddr = gen_pool_alloc_aligned(pool, size,
672 page_align);
673 if (memdesc->gpuaddr == 0) {
674 KGSL_CORE_ERR("gen_pool_alloc(%d) failed, pool: %s\n",
675 size,
676 (pool == pagetable->kgsl_pool) ?
677 "kgsl_pool" : "general_pool");
678 KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
679 pagetable->name,
680 pagetable->stats.mapped,
681 pagetable->stats.entries);
682 return -ENOMEM;
683 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700684 }
685
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600686 if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
687 spin_lock(&pagetable->lock);
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600688 ret = pagetable->pt_ops->mmu_map(pagetable->priv, memdesc, protflags,
689 &pagetable->tlb_flags);
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600690 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
691 spin_lock(&pagetable->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700692
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600693 if (ret)
694 goto err_free_gpuaddr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700695
696 /* Keep track of the statistics for the sysfs files */
697
698 KGSL_STATS_ADD(1, pagetable->stats.entries,
699 pagetable->stats.max_entries);
700
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600701 KGSL_STATS_ADD(size, pagetable->stats.mapped,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700702 pagetable->stats.max_mapped);
703
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700704 spin_unlock(&pagetable->lock);
705
706 return 0;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600707
708err_free_gpuaddr:
709 spin_unlock(&pagetable->lock);
Jeremy Gebben2aba0f32013-05-28 16:54:00 -0600710 if (pool)
711 gen_pool_free(pool, memdesc->gpuaddr, size);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600712 memdesc->gpuaddr = 0;
713 return ret;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700714}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600715EXPORT_SYMBOL(kgsl_mmu_map);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700716
717int
718kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
719 struct kgsl_memdesc *memdesc)
720{
Jeremy Gebben1b9b1f142012-05-16 10:43:28 -0600721 struct gen_pool *pool;
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600722 int size;
Tarun Karrab8107322013-02-07 13:46:02 -0800723 unsigned int start_addr = 0;
724 unsigned int end_addr = 0;
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600725
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600726 if (memdesc->size == 0 || memdesc->gpuaddr == 0)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700727 return 0;
728
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600729 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
730 memdesc->gpuaddr = 0;
731 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700732 }
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600733
734 size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
735
Tarun Karrab8107322013-02-07 13:46:02 -0800736 start_addr = memdesc->gpuaddr;
737 end_addr = (memdesc->gpuaddr + size);
738
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600739 if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
740 spin_lock(&pagetable->lock);
Shubhraprakash Das0c811262012-06-06 23:22:19 -0600741 pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc,
742 &pagetable->tlb_flags);
Tarun Karrab8107322013-02-07 13:46:02 -0800743
744 /* If buffer is unmapped 0 fault addr */
745 if ((pagetable->fault_addr >= start_addr) &&
746 (pagetable->fault_addr < end_addr))
747 pagetable->fault_addr = 0;
748
Shubhraprakash Dasbadaeda2012-03-21 00:31:39 -0600749 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
750 spin_lock(&pagetable->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700751 /* Remove the statistics */
752 pagetable->stats.entries--;
Jordan Crouse3c86ca82012-05-21 08:41:52 -0600753 pagetable->stats.mapped -= size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700754
755 spin_unlock(&pagetable->lock);
756
Jeremy Gebben2aba0f32013-05-28 16:54:00 -0600757 pool = pagetable->pool;
758
759 if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()
760 && kgsl_memdesc_is_global(memdesc)) {
761 pool = pagetable->kgsl_pool;
762 }
763 if (pool)
764 gen_pool_free(pool, memdesc->gpuaddr, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700765
Jeremy Gebben7faf9ec2012-03-21 14:09:55 -0600766 /*
767 * Don't clear the gpuaddr on global mappings because they
768 * may be in use by other pagetables
769 */
Jeremy Gebben2aba0f32013-05-28 16:54:00 -0600770 if (!kgsl_memdesc_is_global(memdesc))
Jeremy Gebben7faf9ec2012-03-21 14:09:55 -0600771 memdesc->gpuaddr = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700772 return 0;
773}
774EXPORT_SYMBOL(kgsl_mmu_unmap);
775
776int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
777 struct kgsl_memdesc *memdesc, unsigned int protflags)
778{
779 int result = -EINVAL;
780 unsigned int gpuaddr = 0;
781
782 if (memdesc == NULL) {
783 KGSL_CORE_ERR("invalid memdesc\n");
784 goto error;
785 }
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600786 /* Not all global mappings are needed for all MMU types */
787 if (!memdesc->size)
788 return 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700789
790 gpuaddr = memdesc->gpuaddr;
Jordan Crousedc67dfb2012-10-25 09:41:46 -0600791 memdesc->priv |= KGSL_MEMDESC_GLOBAL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700792
793 result = kgsl_mmu_map(pagetable, memdesc, protflags);
794 if (result)
795 goto error;
796
797 /*global mappings must have the same gpu address in all pagetables*/
798 if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
799 KGSL_CORE_ERR("pt %p addr mismatch phys 0x%08x"
800 "gpu 0x%0x 0x%08x", pagetable, memdesc->physaddr,
801 gpuaddr, memdesc->gpuaddr);
802 goto error_unmap;
803 }
804 return result;
805error_unmap:
806 kgsl_mmu_unmap(pagetable, memdesc);
807error:
808 return result;
809}
810EXPORT_SYMBOL(kgsl_mmu_map_global);
811
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700812int kgsl_mmu_close(struct kgsl_device *device)
813{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700814 struct kgsl_mmu *mmu = &device->mmu;
815
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600816 kgsl_sharedmem_free(&mmu->setstate_memory);
817 if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600818 return 0;
Shubhraprakash Das0ff034f2012-05-02 15:51:07 -0600819 else
Shubhraprakash Das1c528262012-04-26 17:38:13 -0600820 return mmu->mmu_ops->mmu_close(mmu);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700821}
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600822EXPORT_SYMBOL(kgsl_mmu_close);
823
824int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
825 enum kgsl_deviceid id)
826{
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600827 unsigned int result = 0;
828
829 if (pt == NULL)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600830 return 0;
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600831
832 spin_lock(&pt->lock);
Shubhraprakash Das97828ae2012-06-06 22:46:37 -0600833 if (pt->tlb_flags & (1<<id)) {
Shubhraprakash Dasf764e462012-04-26 15:38:09 -0600834 result = KGSL_MMUFLAGS_TLBFLUSH;
835 pt->tlb_flags &= ~(1<<id);
836 }
837 spin_unlock(&pt->lock);
838 return result;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600839}
840EXPORT_SYMBOL(kgsl_mmu_pt_get_flags);
841
842void kgsl_mmu_ptpool_destroy(void *ptpool)
843{
844 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
845 kgsl_gpummu_ptpool_destroy(ptpool);
846 ptpool = 0;
847}
848EXPORT_SYMBOL(kgsl_mmu_ptpool_destroy);
849
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600850void *kgsl_mmu_ptpool_init(int entries)
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600851{
852 if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
Jordan Crouse6d76c4d2012-03-26 09:50:43 -0600853 return kgsl_gpummu_ptpool_init(entries);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600854 else
855 return (void *)(-1);
856}
857EXPORT_SYMBOL(kgsl_mmu_ptpool_init);
858
859int kgsl_mmu_enabled(void)
860{
861 if (KGSL_MMU_TYPE_NONE != kgsl_mmu_type)
862 return 1;
863 else
864 return 0;
865}
866EXPORT_SYMBOL(kgsl_mmu_enabled);
867
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600868enum kgsl_mmutype kgsl_mmu_get_mmutype(void)
869{
870 return kgsl_mmu_type;
871}
872EXPORT_SYMBOL(kgsl_mmu_get_mmutype);
873
874void kgsl_mmu_set_mmutype(char *mmutype)
875{
Jordan Crouse817e0b92012-02-04 10:23:53 -0700876 /* Set the default MMU - GPU on <=8960 and nothing on >= 8064 */
877 kgsl_mmu_type =
878 cpu_is_apq8064() ? KGSL_MMU_TYPE_NONE : KGSL_MMU_TYPE_GPU;
879
880 /* Use the IOMMU if it is found */
Steve Mucklef132c6c2012-06-06 18:30:57 -0700881 if (iommu_present(&platform_bus_type))
Jordan Crouse817e0b92012-02-04 10:23:53 -0700882 kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
883
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600884 if (mmutype && !strncmp(mmutype, "gpummu", 6))
885 kgsl_mmu_type = KGSL_MMU_TYPE_GPU;
Steve Mucklef132c6c2012-06-06 18:30:57 -0700886 if (iommu_present(&platform_bus_type) && mmutype &&
887 !strncmp(mmutype, "iommu", 5))
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600888 kgsl_mmu_type = KGSL_MMU_TYPE_IOMMU;
889 if (mmutype && !strncmp(mmutype, "nommu", 5))
890 kgsl_mmu_type = KGSL_MMU_TYPE_NONE;
891}
892EXPORT_SYMBOL(kgsl_mmu_set_mmutype);
Shubhraprakash Dase7652cf2012-08-11 17:15:19 -0700893
894int kgsl_mmu_gpuaddr_in_range(unsigned int gpuaddr)
895{
896 if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
897 return 1;
898 return ((gpuaddr >= KGSL_PAGETABLE_BASE) &&
899 (gpuaddr < (KGSL_PAGETABLE_BASE + kgsl_mmu_get_ptsize())));
900}
901EXPORT_SYMBOL(kgsl_mmu_gpuaddr_in_range);
902