msm: kgsl: disable use of iommu TTBR1
The v1 iommu only supports splitting between TTBR0 and TTBR1
on a power of two boundary. Cutting off the userspace address
at 2G (0x80000000) is inconvienient, as the GPU userspace
address space should align with the CPU address space.
This requires changing how global allocations are managed,
since there is no longer a separate pagetable for TTBR1.
The default pagetable is still the master of these allocations
and maintains the gen_pool for allocating global addresses.
But now, these regions are mapped into each process pagetable
by calling kgsl_setup_pt(). This requires kgsl_mmu_map
and kgsl_mmu_unmap to be able to handle mapping without
virtual address allocation.
Change-Id: I94e2d63dc7e6a7ef576f993770725b6b7ba14228
Signed-off-by: Jeremy Gebben <jgebben@codeaurora.org>
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index b49261c..44c13f9 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -275,6 +275,13 @@
if (result)
goto unmap_memstore_desc;
+ /*
+ * Set the mpu end to the last "normal" global memory we use.
+ * For the IOMMU, this will be used to restrict access to the
+ * mapped registers.
+ */
+ device->mh.mpu_range = device->mmu.setstate_memory.gpuaddr +
+ device->mmu.setstate_memory.size;
return result;
unmap_memstore_desc:
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 65eed58..6157bf6 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -692,13 +692,6 @@
dev_priv->device = device;
filep->private_data = dev_priv;
- /* Get file (per process) private struct */
- dev_priv->process_priv = kgsl_get_process_private(dev_priv);
- if (dev_priv->process_priv == NULL) {
- result = -ENOMEM;
- goto err_freedevpriv;
- }
-
mutex_lock(&device->mutex);
kgsl_check_suspended(device);
@@ -710,21 +703,38 @@
if (result) {
mutex_unlock(&device->mutex);
- goto err_putprocess;
+ goto err_freedevpriv;
}
kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
}
device->open_count++;
mutex_unlock(&device->mutex);
+ /*
+ * Get file (per process) private struct. This must be done
+ * after the first start so that the global pagetable mappings
+ * are set up before we create the per-process pagetable.
+ */
+ dev_priv->process_priv = kgsl_get_process_private(dev_priv);
+ if (dev_priv->process_priv == NULL) {
+ result = -ENOMEM;
+ goto err_stop;
+ }
+
KGSL_DRV_INFO(device, "Initialized %s: mmu=%s pagetable_count=%d\n",
device->name, kgsl_mmu_enabled() ? "on" : "off",
kgsl_pagetable_count);
return result;
-err_putprocess:
- kgsl_put_process_private(device, dev_priv->process_priv);
+err_stop:
+ mutex_lock(&device->mutex);
+ device->open_count--;
+ if (device->open_count == 0) {
+ result = device->ftbl->stop(device);
+ kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
+ }
+ mutex_unlock(&device->mutex);
err_freedevpriv:
filep->private_data = NULL;
kfree(dev_priv);
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index b41bd6b..4a35d29 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -215,7 +215,7 @@
entry = rb_entry(node, struct kgsl_mem_entry, node);
m = &entry->memdesc;
- flags[0] = m->priv & KGSL_MEMDESC_GLOBAL ? 'g' : '-';
+ flags[0] = kgsl_memdesc_is_global(m) ? 'g' : '-';
flags[1] = m->flags & KGSL_MEMFLAGS_GPUREADONLY ? 'r' : '-';
flags[2] = get_alignflag(m);
flags[3] = '\0';
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index d20f3d2..bb0ef29 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -917,6 +917,70 @@
}
}
+/*
+ * kgsl_iommu_setup_regs - map iommu registers into a pagetable
+ * @mmu: Pointer to mmu structure
+ * @pt: the pagetable
+ *
+ * To do pagetable switches from the GPU command stream, the IOMMU
+ * registers need to be mapped into the GPU's pagetable. This function
+ * is used differently on different targets. On 8960, the registers
+ * are mapped into every pagetable during kgsl_setup_pt(). On
+ * all other targets, the registers are mapped only into the second
+ * context bank.
+ *
+ * Return - 0 on success else error code
+ */
+static int kgsl_iommu_setup_regs(struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pt)
+{
+ int status;
+ int i = 0;
+ struct kgsl_iommu *iommu = mmu->priv;
+
+ if (!msm_soc_version_supports_iommu_v1())
+ return 0;
+
+ for (i = 0; i < iommu->unit_count; i++) {
+ iommu->iommu_units[i].reg_map.priv |= KGSL_MEMDESC_GLOBAL;
+ status = kgsl_mmu_map(pt,
+ &(iommu->iommu_units[i].reg_map),
+ GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+ if (status) {
+ iommu->iommu_units[i].reg_map.priv &=
+ ~KGSL_MEMDESC_GLOBAL;
+ goto err;
+ }
+ }
+ return 0;
+err:
+ for (i--; i >= 0; i--) {
+ kgsl_mmu_unmap(pt,
+ &(iommu->iommu_units[i].reg_map));
+ iommu->iommu_units[i].reg_map.priv &= ~KGSL_MEMDESC_GLOBAL;
+ }
+ return status;
+}
+
+/*
+ * kgsl_iommu_cleanup_regs - unmap iommu registers from a pagetable
+ * @mmu: Pointer to mmu structure
+ * @pt: the pagetable
+ *
+ * Removes mappings created by kgsl_iommu_setup_regs().
+ *
+ * Return - 0 on success else error code
+ */
+static void kgsl_iommu_cleanup_regs(struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pt)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ int i;
+ for (i = 0; i < iommu->unit_count; i++)
+ kgsl_mmu_unmap(pt, &(iommu->iommu_units[i].reg_map));
+}
+
+
static int kgsl_iommu_init(struct kgsl_mmu *mmu)
{
/*
@@ -959,6 +1023,15 @@
KGSL_IOMMU_SETSTATE_NOP_OFFSET,
cp_nop_packet(1));
+ if (cpu_is_msm8960()) {
+ /*
+ * 8960 doesn't have a second context bank, so the IOMMU
+ * registers must be mapped into every pagetable.
+ */
+ iommu_ops.mmu_setup_pt = kgsl_iommu_setup_regs;
+ iommu_ops.mmu_cleanup_pt = kgsl_iommu_cleanup_regs;
+ }
+
dev_info(mmu->device->dev, "|%s| MMU type set for device is IOMMU\n",
__func__);
done:
@@ -981,9 +1054,6 @@
static int kgsl_iommu_setup_defaultpagetable(struct kgsl_mmu *mmu)
{
int status = 0;
- int i = 0;
- struct kgsl_iommu *iommu = mmu->priv;
- struct kgsl_pagetable *pagetable = NULL;
/* If chip is not 8960 then we use the 2nd context bank for pagetable
* switching on the 3D side for which a separate table is allocated */
@@ -994,6 +1064,9 @@
status = -ENOMEM;
goto err;
}
+ status = kgsl_iommu_setup_regs(mmu, mmu->priv_bank_table);
+ if (status)
+ goto err;
}
mmu->defaultpagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
/* Return error if the default pagetable doesn't exist */
@@ -1001,31 +1074,10 @@
status = -ENOMEM;
goto err;
}
- pagetable = mmu->priv_bank_table ? mmu->priv_bank_table :
- mmu->defaultpagetable;
- /* Map the IOMMU regsiters to only defaultpagetable */
- if (msm_soc_version_supports_iommu_v1()) {
- for (i = 0; i < iommu->unit_count; i++) {
- iommu->iommu_units[i].reg_map.priv |=
- KGSL_MEMDESC_GLOBAL;
- status = kgsl_mmu_map(pagetable,
- &(iommu->iommu_units[i].reg_map),
- GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
- if (status) {
- iommu->iommu_units[i].reg_map.priv &=
- ~KGSL_MEMDESC_GLOBAL;
- goto err;
- }
- }
- }
return status;
err:
- for (i--; i >= 0; i--) {
- kgsl_mmu_unmap(pagetable,
- &(iommu->iommu_units[i].reg_map));
- iommu->iommu_units[i].reg_map.priv &= ~KGSL_MEMDESC_GLOBAL;
- }
if (mmu->priv_bank_table) {
+ kgsl_iommu_cleanup_regs(mmu, mmu->priv_bank_table);
kgsl_mmu_putpagetable(mmu->priv_bank_table);
mmu->priv_bank_table = NULL;
}
@@ -1061,10 +1113,12 @@
* a225, hence we still keep the MMU active on 8960 */
if (cpu_is_msm8960()) {
struct kgsl_mh *mh = &(mmu->device->mh);
+ BUG_ON(iommu->iommu_units[0].reg_map.gpuaddr != 0 &&
+ mh->mpu_base > iommu->iommu_units[0].reg_map.gpuaddr);
kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000001);
+
kgsl_regwrite(mmu->device, MH_MMU_MPU_END,
- mh->mpu_base +
- iommu->iommu_units[0].reg_map.gpuaddr);
+ mh->mpu_base + mh->mpu_range);
} else {
kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000000);
}
@@ -1137,14 +1191,12 @@
"with err: %d\n", iommu_pt->domain, gpuaddr,
range, ret);
-#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
/*
* Flushing only required if per process pagetables are used. With
* global case, flushing will happen inside iommu_map function
*/
- if (!ret && msm_soc_version_supports_iommu_v1())
+ if (!ret && kgsl_mmu_is_perprocess())
*tlb_flags = UINT_MAX;
-#endif
return 0;
}
@@ -1225,22 +1277,23 @@
{
struct kgsl_iommu *iommu = mmu->priv;
int i;
- for (i = 0; i < iommu->unit_count; i++) {
- struct kgsl_pagetable *pagetable = (mmu->priv_bank_table ?
- mmu->priv_bank_table : mmu->defaultpagetable);
- if (iommu->iommu_units[i].reg_map.gpuaddr)
- kgsl_mmu_unmap(pagetable,
- &(iommu->iommu_units[i].reg_map));
- if (iommu->iommu_units[i].reg_map.hostptr)
- iounmap(iommu->iommu_units[i].reg_map.hostptr);
- kgsl_sg_free(iommu->iommu_units[i].reg_map.sg,
- iommu->iommu_units[i].reg_map.sglen);
+
+ if (mmu->priv_bank_table != NULL) {
+ kgsl_iommu_cleanup_regs(mmu, mmu->priv_bank_table);
+ kgsl_mmu_putpagetable(mmu->priv_bank_table);
}
- if (mmu->priv_bank_table)
- kgsl_mmu_putpagetable(mmu->priv_bank_table);
- if (mmu->defaultpagetable)
+ if (mmu->defaultpagetable != NULL)
kgsl_mmu_putpagetable(mmu->defaultpagetable);
+
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_memdesc *reg_map = &iommu->iommu_units[i].reg_map;
+
+ if (reg_map->hostptr)
+ iounmap(reg_map->hostptr);
+ kgsl_sg_free(reg_map->sg, reg_map->sglen);
+ }
+
kfree(iommu);
return 0;
@@ -1385,6 +1438,9 @@
.mmu_get_pt_base_addr = kgsl_iommu_get_pt_base_addr,
.mmu_sync_lock = kgsl_iommu_sync_lock,
.mmu_sync_unlock = kgsl_iommu_sync_unlock,
+ /* These callbacks will be set on some chipsets */
+ .mmu_setup_pt = NULL,
+ .mmu_cleanup_pt = NULL,
};
struct kgsl_mmu_pt_ops iommu_pt_ops = {
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index f11511f..25edc16 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -37,17 +37,18 @@
static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
{
int i;
- /* For IOMMU only unmap the global structures to global pt */
- if ((KGSL_MMU_TYPE_NONE != kgsl_mmu_type) &&
- (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
- (KGSL_MMU_GLOBAL_PT != pt->name) &&
- (KGSL_MMU_PRIV_BANK_TABLE_NAME != pt->name))
- return 0;
+ struct kgsl_device *device;
+
for (i = 0; i < KGSL_DEVICE_MAX; i++) {
- struct kgsl_device *device = kgsl_driver.devp[i];
+ device = kgsl_driver.devp[i];
if (device)
device->ftbl->cleanup_pt(device, pt);
}
+ /* Only the 3d device needs mmu specific pt entries */
+ device = kgsl_driver.devp[KGSL_DEVICE_3D0];
+ if (device->mmu.mmu_ops->mmu_cleanup_pt != NULL)
+ device->mmu.mmu_ops->mmu_cleanup_pt(&device->mmu, pt);
+
return 0;
}
@@ -56,21 +57,23 @@
{
int i = 0;
int status = 0;
+ struct kgsl_device *device;
- /* For IOMMU only map the global structures to global pt */
- if ((KGSL_MMU_TYPE_NONE != kgsl_mmu_type) &&
- (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) &&
- (KGSL_MMU_GLOBAL_PT != pt->name) &&
- (KGSL_MMU_PRIV_BANK_TABLE_NAME != pt->name))
- return 0;
for (i = 0; i < KGSL_DEVICE_MAX; i++) {
- struct kgsl_device *device = kgsl_driver.devp[i];
+ device = kgsl_driver.devp[i];
if (device) {
status = device->ftbl->setup_pt(device, pt);
if (status)
goto error_pt;
}
}
+ /* Only the 3d device needs mmu specific pt entries */
+ device = kgsl_driver.devp[KGSL_DEVICE_3D0];
+ if (device->mmu.mmu_ops->mmu_setup_pt != NULL) {
+ status = device->mmu.mmu_ops->mmu_setup_pt(&device->mmu, pt);
+ if (status)
+ goto error_pt;
+ }
return status;
error_pt:
while (i >= 0) {
@@ -310,22 +313,6 @@
return ret;
}
-unsigned int kgsl_mmu_get_ptsize(void)
-{
- /*
- * For IOMMU, we could do up to 4G virtual range if we wanted to, but
- * it makes more sense to return a smaller range and leave the rest of
- * the virtual range for future improvements
- */
-
- if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
- return CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
- else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type)
- return SZ_2G - KGSL_PAGETABLE_BASE;
- else
- return 0;
-}
-
int
kgsl_mmu_get_ptname_from_ptbase(struct kgsl_mmu *mmu, unsigned int pt_base)
{
@@ -511,7 +498,7 @@
goto err_kgsl_pool;
}
- if (gen_pool_add(pagetable->pool, KGSL_PAGETABLE_BASE,
+ if (gen_pool_add(pagetable->pool, kgsl_mmu_get_base_addr(),
ptsize, -1)) {
KGSL_CORE_ERR("gen_pool_add failed\n");
goto err_pool;
@@ -559,11 +546,7 @@
if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
return (void *)(-1);
-#ifndef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
- name = KGSL_MMU_GLOBAL_PT;
-#endif
- /* We presently do not support per-process for IOMMU-v2 */
- if (!msm_soc_version_supports_iommu_v1())
+ if (!kgsl_mmu_is_perprocess())
name = KGSL_MMU_GLOBAL_PT;
pt = kgsl_get_pagetable(name);
@@ -626,15 +609,6 @@
*/
}
-static inline struct gen_pool *
-_get_pool(struct kgsl_pagetable *pagetable, unsigned int flags)
-{
- if (pagetable->kgsl_pool &&
- (KGSL_MEMDESC_GLOBAL & flags))
- return pagetable->kgsl_pool;
- return pagetable->pool;
-}
-
int
kgsl_mmu_map(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc,
@@ -665,28 +639,48 @@
size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
- /* Allocate from kgsl pool if it exists for global mappings */
- pool = _get_pool(pagetable, memdesc->priv);
+ pool = pagetable->pool;
- /* Allocate aligned virtual addresses for iommu. This allows
- * more efficient pagetable entries if the physical memory
- * is also aligned. Don't do this for GPUMMU, because
- * the address space is so small.
- */
- if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype() &&
- kgsl_memdesc_get_align(memdesc) > 0)
- page_align = kgsl_memdesc_get_align(memdesc);
-
- memdesc->gpuaddr = gen_pool_alloc_aligned(pool, size, page_align);
- if (memdesc->gpuaddr == 0) {
- KGSL_CORE_ERR("gen_pool_alloc(%d) failed from pool: %s\n",
- size,
- (pool == pagetable->kgsl_pool) ?
- "kgsl_pool" : "general_pool");
- KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
- pagetable->name, pagetable->stats.mapped,
- pagetable->stats.entries);
- return -ENOMEM;
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()) {
+ /* Allocate aligned virtual addresses for iommu. This allows
+ * more efficient pagetable entries if the physical memory
+ * is also aligned. Don't do this for GPUMMU, because
+ * the address space is so small.
+ */
+ if (kgsl_memdesc_get_align(memdesc) > 0)
+ page_align = kgsl_memdesc_get_align(memdesc);
+ if (kgsl_memdesc_is_global(memdesc)) {
+ /*
+ * Only the default pagetable has a kgsl_pool, and
+ * it is responsible for creating the mapping for
+ * each global buffer. The mapping will be reused
+ * in all other pagetables and it must already exist
+ * when we're creating other pagetables which do not
+ * have a kgsl_pool.
+ */
+ pool = pagetable->kgsl_pool;
+ if (pool == NULL && memdesc->gpuaddr == 0) {
+ KGSL_CORE_ERR(
+ "No address for global mapping into pt %d\n",
+ pagetable->name);
+ return -EINVAL;
+ }
+ }
+ }
+ if (pool) {
+ memdesc->gpuaddr = gen_pool_alloc_aligned(pool, size,
+ page_align);
+ if (memdesc->gpuaddr == 0) {
+ KGSL_CORE_ERR("gen_pool_alloc(%d) failed, pool: %s\n",
+ size,
+ (pool == pagetable->kgsl_pool) ?
+ "kgsl_pool" : "general_pool");
+ KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
+ pagetable->name,
+ pagetable->stats.mapped,
+ pagetable->stats.entries);
+ return -ENOMEM;
+ }
}
if (KGSL_MMU_TYPE_IOMMU != kgsl_mmu_get_mmutype())
@@ -713,7 +707,8 @@
err_free_gpuaddr:
spin_unlock(&pagetable->lock);
- gen_pool_free(pool, memdesc->gpuaddr, size);
+ if (pool)
+ gen_pool_free(pool, memdesc->gpuaddr, size);
memdesc->gpuaddr = 0;
return ret;
}
@@ -759,14 +754,20 @@
spin_unlock(&pagetable->lock);
- pool = _get_pool(pagetable, memdesc->priv);
- gen_pool_free(pool, memdesc->gpuaddr, size);
+ pool = pagetable->pool;
+
+ if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype()
+ && kgsl_memdesc_is_global(memdesc)) {
+ pool = pagetable->kgsl_pool;
+ }
+ if (pool)
+ gen_pool_free(pool, memdesc->gpuaddr, size);
/*
* Don't clear the gpuaddr on global mappings because they
* may be in use by other pagetables
*/
- if (!(memdesc->priv & KGSL_MEMDESC_GLOBAL))
+ if (!kgsl_memdesc_is_global(memdesc))
memdesc->gpuaddr = 0;
return 0;
}
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 377f342..bf330ee 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -13,13 +13,14 @@
#ifndef __KGSL_MMU_H
#define __KGSL_MMU_H
+#include <mach/iommu.h>
+
/*
- * These defines control the split between ttbr1 and ttbr0 pagetables of IOMMU
- * and what ranges of memory we map to them
+ * These defines control the address range for allocations that
+ * are mapped into all pagetables.
*/
#define KGSL_IOMMU_GLOBAL_MEM_BASE 0xC0000000
#define KGSL_IOMMU_GLOBAL_MEM_SIZE SZ_4M
-#define KGSL_IOMMU_TTBR1_SPLIT 2
#define KGSL_MMU_ALIGN_SHIFT 13
#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
@@ -155,6 +156,10 @@
unsigned int (*mmu_sync_unlock)
(struct kgsl_mmu *mmu,
unsigned int *cmds);
+ int (*mmu_setup_pt) (struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pt);
+ void (*mmu_cleanup_pt) (struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pt);
};
struct kgsl_mmu_pt_ops {
@@ -220,7 +225,6 @@
int kgsl_mmu_enabled(void);
void kgsl_mmu_set_mmutype(char *mmutype);
enum kgsl_mmutype kgsl_mmu_get_mmutype(void);
-unsigned int kgsl_mmu_get_ptsize(void);
int kgsl_mmu_gpuaddr_in_range(unsigned int gpuaddr);
/*
@@ -352,4 +356,58 @@
return 0;
}
+/*
+ * kgsl_mmu_is_perprocess() - Runtime check for per-process
+ * pagetables.
+ *
+ * Returns non-zero if per-process pagetables are enabled,
+ * 0 if not.
+ */
+#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
+static inline int kgsl_mmu_is_perprocess(void)
+{
+
+ /* We presently do not support per-process for IOMMU-v2 */
+ return (kgsl_mmu_get_mmutype() != KGSL_MMU_TYPE_IOMMU)
+ || msm_soc_version_supports_iommu_v1();
+}
+#else
+static inline int kgsl_mmu_is_perprocess(void)
+{
+ return 0;
+}
+#endif
+
+/*
+ * kgsl_mmu_base_addr() - Get gpu virtual address base.
+ *
+ * Returns the start address of the gpu
+ * virtual address space.
+ */
+static inline unsigned int kgsl_mmu_get_base_addr(void)
+{
+ return KGSL_PAGETABLE_BASE;
+}
+
+/*
+ * kgsl_mmu_get_ptsize() - Get gpu pagetable size
+ *
+ * Returns the usable size of the gpu address space.
+ */
+static inline unsigned int kgsl_mmu_get_ptsize(void)
+{
+ /*
+ * For IOMMU, we could do up to 4G virtual range if we wanted to, but
+ * it makes more sense to return a smaller range and leave the rest of
+ * the virtual range for future improvements
+ */
+ enum kgsl_mmutype mmu_type = kgsl_mmu_get_mmutype();
+
+ if (KGSL_MMU_TYPE_GPU == mmu_type)
+ return CONFIG_MSM_KGSL_PAGE_TABLE_SIZE;
+ else if (KGSL_MMU_TYPE_IOMMU == mmu_type)
+ return SZ_2G;
+ return 0;
+}
+
#endif /* __KGSL_MMU_H */
diff --git a/drivers/gpu/msm/kgsl_sharedmem.h b/drivers/gpu/msm/kgsl_sharedmem.h
index a895a75..ecf292e 100644
--- a/drivers/gpu/msm/kgsl_sharedmem.h
+++ b/drivers/gpu/msm/kgsl_sharedmem.h
@@ -162,6 +162,17 @@
return 0;
}
+/*
+ * kgsl_memdesc_is_global - is this a globally mapped buffer?
+ * @memdesc: the memdesc
+ *
+ * Returns nonzero if this is a global mapping, 0 otherwise
+ */
+static inline int kgsl_memdesc_is_global(const struct kgsl_memdesc *memdesc)
+{
+ return (memdesc->priv & KGSL_MEMDESC_GLOBAL) != 0;
+}
+
static inline int
kgsl_allocate(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable, size_t size)
diff --git a/drivers/gpu/msm/z180.c b/drivers/gpu/msm/z180.c
index cfa39b2..24ac1d5 100644
--- a/drivers/gpu/msm/z180.c
+++ b/drivers/gpu/msm/z180.c
@@ -260,6 +260,13 @@
GSL_PT_PAGE_RV);
if (result)
goto error_unmap_memstore;
+ /*
+ * Set the mpu end to the last "normal" global memory we use.
+ * For the IOMMU, this will be used to restrict access to the
+ * mapped registers.
+ */
+ device->mh.mpu_range = z180_dev->ringbuffer.cmdbufdesc.gpuaddr +
+ z180_dev->ringbuffer.cmdbufdesc.size;
return result;
error_unmap_dummy: