Merge commit 'AU_LINUX_ANDROID_ICS.04.00.04.00.126' into msm-3.4
AU_LINUX_ANDROID_ICS.04.00.04.00.126 from msm-3.0.
First parent is from google/android-3.4.
* commit 'AU_LINUX_ANDROID_ICS.04.00.04.00.126': (8712 commits)
PRNG: Device tree entry for qrng device.
vidc:1080p: Set video core timeout value for Thumbnail mode
msm: sps: improve the debugging support in SPS driver
board-8064 msm: Overlap secure and non secure video firmware heaps.
msm: clock: Add handoff ops for 7x30 and copper XO clocks
msm_fb: display: Wait for external vsync before DTV IOMMU unmap
msm: Fix ciruclar dependency in debug UART settings
msm: gdsc: Add GDSC regulator driver for msm-copper
defconfig: Enable Mobicore Driver.
mobicore: Add mobicore driver.
mobicore: rename variable to lower case.
mobicore: rename folder.
mobicore: add makefiles
mobicore: initial import of kernel driver
ASoC: msm: Add SLIMBUS_2_RX CPU DAI
board-8064-gpio: Update FUNC for EPM SPI CS
msm_fb: display: Remove chicken bit config during video playback
mmc: msm_sdcc: enable the sanitize capability
msm-fb: display: lm2 writeback support on mpq platfroms
msm_fb: display: Disable LVDS phy & pll during panel off
...
Signed-off-by: Steve Muckle <smuckle@codeaurora.org>
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
new file mode 100644
index 0000000..febb265
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -0,0 +1,1020 @@
+/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/genalloc.h>
+#include <linux/slab.h>
+#include <linux/iommu.h>
+#include <linux/msm_kgsl.h>
+
+#include "kgsl.h"
+#include "kgsl_device.h"
+#include "kgsl_mmu.h"
+#include "kgsl_sharedmem.h"
+#include "kgsl_iommu.h"
+#include "adreno_pm4types.h"
+#include "adreno.h"
+#include "kgsl_trace.h"
+
+static struct kgsl_iommu_unit *get_iommu_unit(struct device *dev)
+{
+ int i, j, k;
+
+ for (i = 0; i < KGSL_DEVICE_MAX; i++) {
+ struct kgsl_mmu *mmu;
+ struct kgsl_iommu *iommu;
+
+ if (kgsl_driver.devp[i] == NULL)
+ continue;
+
+ mmu = kgsl_get_mmu(kgsl_driver.devp[i]);
+ if (mmu == NULL || mmu->priv == NULL)
+ continue;
+
+ iommu = mmu->priv;
+
+ for (j = 0; j < iommu->unit_count; j++) {
+ struct kgsl_iommu_unit *iommu_unit =
+ &iommu->iommu_units[j];
+ for (k = 0; k < iommu_unit->dev_count; k++) {
+ if (iommu_unit->dev[k].dev == dev)
+ return iommu_unit;
+ }
+ }
+ }
+
+ return NULL;
+}
+
+static struct kgsl_iommu_device *get_iommu_device(struct kgsl_iommu_unit *unit,
+ struct device *dev)
+{
+ int k;
+
+ for (k = 0; unit && k < unit->dev_count; k++) {
+ if (unit->dev[k].dev == dev)
+ return &(unit->dev[k]);
+ }
+
+ return NULL;
+}
+
+static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
+ struct device *dev, unsigned long addr, int flags)
+{
+ struct kgsl_iommu_unit *iommu_unit = get_iommu_unit(dev);
+ struct kgsl_iommu_device *iommu_dev = get_iommu_device(iommu_unit, dev);
+ unsigned int ptbase, fsr;
+
+ if (!iommu_dev) {
+ KGSL_CORE_ERR("Invalid IOMMU device %p\n", dev);
+ return -ENOSYS;
+ }
+
+ ptbase = iommu_get_pt_base_addr(domain);
+
+ fsr = KGSL_IOMMU_GET_IOMMU_REG(iommu_unit->reg_map.hostptr,
+ iommu_dev->ctx_id, FSR);
+
+ KGSL_MEM_CRIT(iommu_dev->kgsldev,
+ "GPU PAGE FAULT: addr = %lX pid = %d\n",
+ addr, kgsl_mmu_get_ptname_from_ptbase(ptbase));
+ KGSL_MEM_CRIT(iommu_dev->kgsldev, "context = %d FSR = %X\n",
+ iommu_dev->ctx_id, fsr);
+
+ trace_kgsl_mmu_pagefault(iommu_dev->kgsldev, addr,
+ kgsl_mmu_get_ptname_from_ptbase(ptbase), 0);
+
+ return 0;
+}
+
+/*
+ * kgsl_iommu_disable_clk - Disable iommu clocks
+ * @mmu - Pointer to mmu structure
+ *
+ * Disables iommu clocks
+ * Return - void
+ */
+static void kgsl_iommu_disable_clk(struct kgsl_mmu *mmu)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+ int i, j;
+
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+ for (j = 0; j < iommu_unit->dev_count; j++) {
+ if (!iommu_unit->dev[j].clk_enabled)
+ continue;
+ iommu_drvdata = dev_get_drvdata(
+ iommu_unit->dev[j].dev->parent);
+ if (iommu_drvdata->clk)
+ clk_disable_unprepare(iommu_drvdata->clk);
+ clk_disable_unprepare(iommu_drvdata->pclk);
+ iommu_unit->dev[j].clk_enabled = false;
+ }
+ }
+}
+
+/*
+ * kgsl_iommu_enable_clk - Enable iommu clocks
+ * @mmu - Pointer to mmu structure
+ * @ctx_id - The context bank whose clocks are to be turned on
+ *
+ * Enables iommu clocks of a given context
+ * Return: 0 on success else error code
+ */
+static int kgsl_iommu_enable_clk(struct kgsl_mmu *mmu,
+ int ctx_id)
+{
+ int ret = 0;
+ int i, j;
+ struct kgsl_iommu *iommu = mmu->priv;
+ struct msm_iommu_drvdata *iommu_drvdata;
+
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+ for (j = 0; j < iommu_unit->dev_count; j++) {
+ if (iommu_unit->dev[j].clk_enabled ||
+ ctx_id != iommu_unit->dev[j].ctx_id)
+ continue;
+ iommu_drvdata =
+ dev_get_drvdata(iommu_unit->dev[j].dev->parent);
+ ret = clk_prepare_enable(iommu_drvdata->pclk);
+ if (ret)
+ goto done;
+ if (iommu_drvdata->clk) {
+ ret = clk_prepare_enable(iommu_drvdata->clk);
+ if (ret) {
+ clk_disable_unprepare(
+ iommu_drvdata->pclk);
+ goto done;
+ }
+ }
+ iommu_unit->dev[j].clk_enabled = true;
+ }
+ }
+done:
+ if (ret)
+ kgsl_iommu_disable_clk(mmu);
+ return ret;
+}
+
+/*
+ * kgsl_iommu_pt_equal - Check if pagetables are equal
+ * @pt - Pointer to pagetable
+ * @pt_base - Address of a pagetable that the IOMMU register is
+ * programmed with
+ *
+ * Checks whether the pt_base is equal to the base address of
+ * the pagetable which is contained in the pt structure
+ * Return - Non-zero if the pagetable addresses are equal else 0
+ */
+static int kgsl_iommu_pt_equal(struct kgsl_pagetable *pt,
+ unsigned int pt_base)
+{
+ struct kgsl_iommu_pt *iommu_pt = pt ? pt->priv : NULL;
+ unsigned int domain_ptbase = iommu_pt ?
+ iommu_get_pt_base_addr(iommu_pt->domain) : 0;
+ /* Only compare the valid address bits of the pt_base */
+ domain_ptbase &= (KGSL_IOMMU_TTBR0_PA_MASK <<
+ KGSL_IOMMU_TTBR0_PA_SHIFT);
+ pt_base &= (KGSL_IOMMU_TTBR0_PA_MASK <<
+ KGSL_IOMMU_TTBR0_PA_SHIFT);
+ return domain_ptbase && pt_base &&
+ (domain_ptbase == pt_base);
+}
+
+/*
+ * kgsl_iommu_destroy_pagetable - Free up reaources help by a pagetable
+ * @mmu_specific_pt - Pointer to pagetable which is to be freed
+ *
+ * Return - void
+ */
+static void kgsl_iommu_destroy_pagetable(void *mmu_specific_pt)
+{
+ struct kgsl_iommu_pt *iommu_pt = mmu_specific_pt;
+ if (iommu_pt->domain)
+ iommu_domain_free(iommu_pt->domain);
+ if (iommu_pt->iommu) {
+ if ((KGSL_IOMMU_ASID_REUSE == iommu_pt->asid) &&
+ iommu_pt->iommu->asid_reuse)
+ iommu_pt->iommu->asid_reuse--;
+ if (!iommu_pt->iommu->asid_reuse ||
+ (KGSL_IOMMU_ASID_REUSE != iommu_pt->asid))
+ clear_bit(iommu_pt->asid, iommu_pt->iommu->asids);
+ }
+ kfree(iommu_pt);
+}
+
+/*
+ * kgsl_iommu_create_pagetable - Create a IOMMU pagetable
+ *
+ * Allocate memory to hold a pagetable and allocate the IOMMU
+ * domain which is the actual IOMMU pagetable
+ * Return - void
+ */
+void *kgsl_iommu_create_pagetable(void)
+{
+ struct kgsl_iommu_pt *iommu_pt;
+
+ iommu_pt = kzalloc(sizeof(struct kgsl_iommu_pt), GFP_KERNEL);
+ if (!iommu_pt) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n",
+ sizeof(struct kgsl_iommu_pt));
+ return NULL;
+ }
+ iommu_pt->domain = iommu_domain_alloc(&platform_bus_type,
+ MSM_IOMMU_DOMAIN_PT_CACHEABLE);
+ if (!iommu_pt->domain) {
+ KGSL_CORE_ERR("Failed to create iommu domain\n");
+ kfree(iommu_pt);
+ return NULL;
+ } else {
+ iommu_set_fault_handler(iommu_pt->domain,
+ kgsl_iommu_fault_handler);
+ }
+
+ return iommu_pt;
+}
+
+/*
+ * kgsl_detach_pagetable_iommu_domain - Detach the IOMMU unit from a
+ * pagetable
+ * @mmu - Pointer to the device mmu structure
+ * @priv - Flag indicating whether the private or user context is to be
+ * detached
+ *
+ * Detach the IOMMU unit with the domain that is contained in the
+ * hwpagetable of the given mmu. After detaching the IOMMU unit is not
+ * in use because the PTBR will not be set after a detach
+ * Return - void
+ */
+static void kgsl_detach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
+{
+ struct kgsl_iommu_pt *iommu_pt;
+ struct kgsl_iommu *iommu = mmu->priv;
+ int i, j;
+
+ BUG_ON(mmu->hwpagetable == NULL);
+ BUG_ON(mmu->hwpagetable->priv == NULL);
+
+ iommu_pt = mmu->hwpagetable->priv;
+
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+ for (j = 0; j < iommu_unit->dev_count; j++) {
+ if (iommu_unit->dev[j].attached) {
+ iommu_detach_device(iommu_pt->domain,
+ iommu_unit->dev[j].dev);
+ iommu_unit->dev[j].attached = false;
+ KGSL_MEM_INFO(mmu->device, "iommu %p detached "
+ "from user dev of MMU: %p\n",
+ iommu_pt->domain, mmu);
+ }
+ }
+ }
+}
+
+/*
+ * kgsl_attach_pagetable_iommu_domain - Attach the IOMMU unit to a
+ * pagetable, i.e set the IOMMU's PTBR to the pagetable address and
+ * setup other IOMMU registers for the device so that it becomes
+ * active
+ * @mmu - Pointer to the device mmu structure
+ * @priv - Flag indicating whether the private or user context is to be
+ * attached
+ *
+ * Attach the IOMMU unit with the domain that is contained in the
+ * hwpagetable of the given mmu.
+ * Return - 0 on success else error code
+ */
+static int kgsl_attach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
+{
+ struct kgsl_iommu_pt *iommu_pt;
+ struct kgsl_iommu *iommu = mmu->priv;
+ int i, j, ret = 0;
+
+ BUG_ON(mmu->hwpagetable == NULL);
+ BUG_ON(mmu->hwpagetable->priv == NULL);
+
+ iommu_pt = mmu->hwpagetable->priv;
+
+ /*
+ * Loop through all the iommu devcies under all iommu units and
+ * attach the domain
+ */
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+ for (j = 0; j < iommu_unit->dev_count; j++) {
+ if (!iommu_unit->dev[j].attached) {
+ ret = iommu_attach_device(iommu_pt->domain,
+ iommu_unit->dev[j].dev);
+ if (ret) {
+ KGSL_MEM_ERR(mmu->device,
+ "Failed to attach device, err %d\n",
+ ret);
+ goto done;
+ }
+ iommu_unit->dev[j].attached = true;
+ KGSL_MEM_INFO(mmu->device,
+ "iommu pt %p attached to dev %p, ctx_id %d\n",
+ iommu_pt->domain, iommu_unit->dev[j].dev,
+ iommu_unit->dev[j].ctx_id);
+ }
+ }
+ }
+done:
+ return ret;
+}
+
+/*
+ * _get_iommu_ctxs - Get device pointer to IOMMU contexts
+ * @mmu - Pointer to mmu device
+ * data - Pointer to the platform data containing information about
+ * iommu devices for one iommu unit
+ * unit_id - The IOMMU unit number. This is not a specific ID but just
+ * a serial number. The serial numbers are treated as ID's of the
+ * IOMMU units
+ *
+ * Return - 0 on success else error code
+ */
+static int _get_iommu_ctxs(struct kgsl_mmu *mmu,
+ struct kgsl_device_iommu_data *data, unsigned int unit_id)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[unit_id];
+ int i;
+
+ if (data->iommu_ctx_count > KGSL_IOMMU_MAX_DEVS_PER_UNIT) {
+ KGSL_CORE_ERR("Too many iommu devices defined for an "
+ "IOMMU unit\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < data->iommu_ctx_count; i++) {
+ if (!data->iommu_ctxs[i].iommu_ctx_name)
+ continue;
+
+ iommu_unit->dev[iommu_unit->dev_count].dev =
+ msm_iommu_get_ctx(data->iommu_ctxs[i].iommu_ctx_name);
+ if (iommu_unit->dev[iommu_unit->dev_count].dev == NULL) {
+ KGSL_CORE_ERR("Failed to get iommu dev handle for "
+ "device %s\n", data->iommu_ctxs[i].iommu_ctx_name);
+ return -EINVAL;
+ }
+ if (KGSL_IOMMU_CONTEXT_USER != data->iommu_ctxs[i].ctx_id &&
+ KGSL_IOMMU_CONTEXT_PRIV != data->iommu_ctxs[i].ctx_id) {
+ KGSL_CORE_ERR("Invalid context ID defined: %d\n",
+ data->iommu_ctxs[i].ctx_id);
+ return -EINVAL;
+ }
+ iommu_unit->dev[iommu_unit->dev_count].ctx_id =
+ data->iommu_ctxs[i].ctx_id;
+ iommu_unit->dev[iommu_unit->dev_count].kgsldev = mmu->device;
+
+ KGSL_DRV_INFO(mmu->device,
+ "Obtained dev handle %p for iommu context %s\n",
+ iommu_unit->dev[iommu_unit->dev_count].dev,
+ data->iommu_ctxs[i].iommu_ctx_name);
+
+ iommu_unit->dev_count++;
+ }
+
+ return 0;
+}
+
+/*
+ * kgsl_get_iommu_ctxt - Get device pointer to IOMMU contexts
+ * @mmu - Pointer to mmu device
+ *
+ * Get the device pointers for the IOMMU user and priv contexts of the
+ * kgsl device
+ * Return - 0 on success else error code
+ */
+static int kgsl_get_iommu_ctxt(struct kgsl_mmu *mmu)
+{
+ struct platform_device *pdev =
+ container_of(mmu->device->parentdev, struct platform_device,
+ dev);
+ struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
+ struct kgsl_iommu *iommu = mmu->device->mmu.priv;
+ int i, ret = 0;
+
+ /* Go through the IOMMU data and get all the context devices */
+ if (KGSL_IOMMU_MAX_UNITS < pdata_dev->iommu_count) {
+ KGSL_CORE_ERR("Too many IOMMU units defined\n");
+ ret = -EINVAL;
+ goto done;
+ }
+
+ for (i = 0; i < pdata_dev->iommu_count; i++) {
+ ret = _get_iommu_ctxs(mmu, &pdata_dev->iommu_data[i], i);
+ if (ret)
+ break;
+ }
+ iommu->unit_count = pdata_dev->iommu_count;
+done:
+ return ret;
+}
+
+/*
+ * kgsl_set_register_map - Map the IOMMU regsiters in the memory descriptors
+ * of the respective iommu units
+ * @mmu - Pointer to mmu structure
+ *
+ * Return - 0 on success else error code
+ */
+static int kgsl_set_register_map(struct kgsl_mmu *mmu)
+{
+ struct platform_device *pdev =
+ container_of(mmu->device->parentdev, struct platform_device,
+ dev);
+ struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
+ struct kgsl_iommu *iommu = mmu->device->mmu.priv;
+ struct kgsl_iommu_unit *iommu_unit;
+ int i = 0, ret = 0;
+
+ for (; i < pdata_dev->iommu_count; i++) {
+ struct kgsl_device_iommu_data data = pdata_dev->iommu_data[i];
+ iommu_unit = &iommu->iommu_units[i];
+ /* set up the IOMMU register map for the given IOMMU unit */
+ if (!data.physstart || !data.physend) {
+ KGSL_CORE_ERR("The register range for IOMMU unit not"
+ " specified\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ iommu_unit->reg_map.hostptr = ioremap(data.physstart,
+ data.physend - data.physstart + 1);
+ if (!iommu_unit->reg_map.hostptr) {
+ KGSL_CORE_ERR("Failed to map SMMU register address "
+ "space from %x to %x\n", data.physstart,
+ data.physend - data.physstart + 1);
+ ret = -ENOMEM;
+ i--;
+ goto err;
+ }
+ iommu_unit->reg_map.size = data.physend - data.physstart + 1;
+ iommu_unit->reg_map.physaddr = data.physstart;
+ memdesc_sg_phys(&iommu_unit->reg_map, data.physstart,
+ iommu_unit->reg_map.size);
+ }
+ iommu->unit_count = pdata_dev->iommu_count;
+ return ret;
+err:
+ /* Unmap any mapped IOMMU regions */
+ for (; i >= 0; i--) {
+ iommu_unit = &iommu->iommu_units[i];
+ iounmap(iommu_unit->reg_map.hostptr);
+ iommu_unit->reg_map.size = 0;
+ iommu_unit->reg_map.physaddr = 0;
+ }
+ return ret;
+}
+
+/*
+ * kgsl_iommu_pt_get_base_addr - Get the address of the pagetable that the
+ * IOMMU ttbr0 register is programmed with
+ * @pt - kgsl pagetable pointer that contains the IOMMU domain pointer
+ *
+ * Return - actual pagetable address that the ttbr0 register is programmed
+ * with
+ */
+static unsigned int kgsl_iommu_pt_get_base_addr(struct kgsl_pagetable *pt)
+{
+ struct kgsl_iommu_pt *iommu_pt = pt->priv;
+ return iommu_get_pt_base_addr(iommu_pt->domain);
+}
+
+/*
+ * kgsl_iommu_get_pt_lsb - Return the lsb of the ttbr0 IOMMU register
+ * @mmu - Pointer to mmu structure
+ * @hostptr - Pointer to the IOMMU register map. This is used to match
+ * the iommu device whose lsb value is to be returned
+ * @ctx_id - The context bank whose lsb valus is to be returned
+ * Return - returns the lsb which is the last 14 bits of the ttbr0 IOMMU
+ * register. ttbr0 is the actual PTBR for of the IOMMU. The last 14 bits
+ * are only programmed once in the beginning when a domain is attached
+ * does not change.
+ */
+static int kgsl_iommu_get_pt_lsb(struct kgsl_mmu *mmu,
+ unsigned int unit_id,
+ enum kgsl_iommu_context_id ctx_id)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ int i, j;
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+ for (j = 0; j < iommu_unit->dev_count; j++)
+ if (unit_id == i &&
+ ctx_id == iommu_unit->dev[j].ctx_id)
+ return iommu_unit->dev[j].pt_lsb;
+ }
+ return 0;
+}
+
+static void kgsl_iommu_setstate(struct kgsl_mmu *mmu,
+ struct kgsl_pagetable *pagetable)
+{
+ if (mmu->flags & KGSL_FLAGS_STARTED) {
+ struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu_pt *iommu_pt = pagetable->priv;
+ /* page table not current, then setup mmu to use new
+ * specified page table
+ */
+ if (mmu->hwpagetable != pagetable) {
+ unsigned int flags = 0;
+ mmu->hwpagetable = pagetable;
+ /* force tlb flush if asid is reused */
+ if (iommu->asid_reuse &&
+ (KGSL_IOMMU_ASID_REUSE == iommu_pt->asid))
+ flags |= KGSL_MMUFLAGS_TLBFLUSH;
+ flags |= kgsl_mmu_pt_get_flags(mmu->hwpagetable,
+ mmu->device->id);
+ kgsl_setstate(mmu, KGSL_MMUFLAGS_PTUPDATE | flags);
+ }
+ }
+}
+
+static int kgsl_iommu_init(struct kgsl_mmu *mmu)
+{
+ /*
+ * intialize device mmu
+ *
+ * call this with the global lock held
+ */
+ int status = 0;
+ struct kgsl_iommu *iommu;
+
+ iommu = kzalloc(sizeof(struct kgsl_iommu), GFP_KERNEL);
+ if (!iommu) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n",
+ sizeof(struct kgsl_iommu));
+ return -ENOMEM;
+ }
+ iommu->asids = kzalloc(BITS_TO_LONGS(KGSL_IOMMU_MAX_ASIDS) *
+ sizeof(unsigned long), GFP_KERNEL);
+ if (!iommu->asids) {
+ KGSL_CORE_ERR("kzalloc(%d) failed\n",
+ sizeof(struct kgsl_iommu));
+ status = -ENOMEM;
+ goto done;
+ }
+
+ mmu->priv = iommu;
+ status = kgsl_get_iommu_ctxt(mmu);
+ if (status)
+ goto done;
+ status = kgsl_set_register_map(mmu);
+ if (status)
+ goto done;
+
+ /* A nop is required in an indirect buffer when switching
+ * pagetables in-stream */
+ kgsl_sharedmem_writel(&mmu->setstate_memory,
+ KGSL_IOMMU_SETSTATE_NOP_OFFSET,
+ cp_nop_packet(1));
+
+ dev_info(mmu->device->dev, "|%s| MMU type set for device is IOMMU\n",
+ __func__);
+done:
+ if (status) {
+ kfree(iommu->asids);
+ kfree(iommu);
+ mmu->priv = NULL;
+ }
+ return status;
+}
+
+/*
+ * kgsl_iommu_setup_defaultpagetable - Setup the initial defualtpagetable
+ * for iommu. This function is only called once during first start, successive
+ * start do not call this funciton.
+ * @mmu - Pointer to mmu structure
+ *
+ * Create the initial defaultpagetable and setup the iommu mappings to it
+ * Return - 0 on success else error code
+ */
+static int kgsl_iommu_setup_defaultpagetable(struct kgsl_mmu *mmu)
+{
+ int status = 0;
+ int i = 0;
+ struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu_pt *iommu_pt;
+
+ mmu->defaultpagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
+ /* Return error if the default pagetable doesn't exist */
+ if (mmu->defaultpagetable == NULL) {
+ status = -ENOMEM;
+ goto err;
+ }
+ /* Map the IOMMU regsiters to only defaultpagetable */
+ for (i = 0; i < iommu->unit_count; i++) {
+ iommu->iommu_units[i].reg_map.priv |= KGSL_MEMFLAGS_GLOBAL;
+ status = kgsl_mmu_map(mmu->defaultpagetable,
+ &(iommu->iommu_units[i].reg_map),
+ GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
+ if (status) {
+ iommu->iommu_units[i].reg_map.priv &=
+ ~KGSL_MEMFLAGS_GLOBAL;
+ goto err;
+ }
+ }
+ /*
+ * The dafault pagetable always has asid 0 assigned by the iommu driver
+ * and asid 1 is assigned to the private context.
+ */
+ iommu_pt = mmu->defaultpagetable->priv;
+ iommu_pt->asid = 0;
+ set_bit(0, iommu->asids);
+ set_bit(1, iommu->asids);
+ return status;
+err:
+ for (i--; i >= 0; i--) {
+ kgsl_mmu_unmap(mmu->defaultpagetable,
+ &(iommu->iommu_units[i].reg_map));
+ iommu->iommu_units[i].reg_map.priv &= ~KGSL_MEMFLAGS_GLOBAL;
+ }
+ if (mmu->defaultpagetable) {
+ kgsl_mmu_putpagetable(mmu->defaultpagetable);
+ mmu->defaultpagetable = NULL;
+ }
+ return status;
+}
+
+static int kgsl_iommu_start(struct kgsl_mmu *mmu)
+{
+ int status;
+ struct kgsl_iommu *iommu = mmu->priv;
+ int i, j;
+
+ if (mmu->flags & KGSL_FLAGS_STARTED)
+ return 0;
+
+ if (mmu->defaultpagetable == NULL) {
+ status = kgsl_iommu_setup_defaultpagetable(mmu);
+ if (status)
+ return -ENOMEM;
+ }
+ /* We use the GPU MMU to control access to IOMMU registers on a225,
+ * hence we still keep the MMU active on a225 */
+ if (adreno_is_a225(ADRENO_DEVICE(mmu->device))) {
+ struct kgsl_mh *mh = &(mmu->device->mh);
+ kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000001);
+ kgsl_regwrite(mmu->device, MH_MMU_MPU_END,
+ mh->mpu_base +
+ iommu->iommu_units
+ [iommu->unit_count - 1].reg_map.gpuaddr -
+ PAGE_SIZE);
+ } else {
+ kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000000);
+ }
+
+ mmu->hwpagetable = mmu->defaultpagetable;
+
+ status = kgsl_attach_pagetable_iommu_domain(mmu);
+ if (status) {
+ mmu->hwpagetable = NULL;
+ goto done;
+ }
+ status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
+ if (status) {
+ KGSL_CORE_ERR("clk enable failed\n");
+ goto done;
+ }
+ status = kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_PRIV);
+ if (status) {
+ KGSL_CORE_ERR("clk enable failed\n");
+ goto done;
+ }
+ /* Get the lsb value of pagetables set in the IOMMU ttbr0 register as
+ * that value should not change when we change pagetables, so while
+ * changing pagetables we can use this lsb value of the pagetable w/o
+ * having to read it again
+ */
+ for (i = 0; i < iommu->unit_count; i++) {
+ struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
+ for (j = 0; j < iommu_unit->dev_count; j++)
+ iommu_unit->dev[j].pt_lsb = KGSL_IOMMMU_PT_LSB(
+ KGSL_IOMMU_GET_IOMMU_REG(
+ iommu_unit->reg_map.hostptr,
+ iommu_unit->dev[j].ctx_id,
+ TTBR0));
+ }
+ iommu->asid = KGSL_IOMMU_GET_IOMMU_REG(
+ iommu->iommu_units[0].reg_map.hostptr,
+ KGSL_IOMMU_CONTEXT_USER,
+ CONTEXTIDR);
+
+ kgsl_iommu_disable_clk(mmu);
+ mmu->flags |= KGSL_FLAGS_STARTED;
+
+done:
+ if (status) {
+ kgsl_iommu_disable_clk(mmu);
+ kgsl_detach_pagetable_iommu_domain(mmu);
+ }
+ return status;
+}
+
+static int
+kgsl_iommu_unmap(void *mmu_specific_pt,
+ struct kgsl_memdesc *memdesc)
+{
+ int ret;
+ unsigned int range = kgsl_sg_size(memdesc->sg, memdesc->sglen);
+ struct kgsl_iommu_pt *iommu_pt = mmu_specific_pt;
+
+ /* All GPU addresses as assigned are page aligned, but some
+ functions purturb the gpuaddr with an offset, so apply the
+ mask here to make sure we have the right address */
+
+ unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
+
+ if (range == 0 || gpuaddr == 0)
+ return 0;
+
+ ret = iommu_unmap_range(iommu_pt->domain, gpuaddr, range);
+ if (ret)
+ KGSL_CORE_ERR("iommu_unmap_range(%p, %x, %d) failed "
+ "with err: %d\n", iommu_pt->domain, gpuaddr,
+ range, ret);
+
+ return 0;
+}
+
+static int
+kgsl_iommu_map(void *mmu_specific_pt,
+ struct kgsl_memdesc *memdesc,
+ unsigned int protflags,
+ unsigned int *tlb_flags)
+{
+ int ret;
+ unsigned int iommu_virt_addr;
+ struct kgsl_iommu_pt *iommu_pt = mmu_specific_pt;
+ int size = kgsl_sg_size(memdesc->sg, memdesc->sglen);
+
+ BUG_ON(NULL == iommu_pt);
+
+
+ iommu_virt_addr = memdesc->gpuaddr;
+
+ ret = iommu_map_range(iommu_pt->domain, iommu_virt_addr, memdesc->sg,
+ size, (IOMMU_READ | IOMMU_WRITE));
+ if (ret) {
+ KGSL_CORE_ERR("iommu_map_range(%p, %x, %p, %d, %d) "
+ "failed with err: %d\n", iommu_pt->domain,
+ iommu_virt_addr, memdesc->sg, size,
+ (IOMMU_READ | IOMMU_WRITE), ret);
+ return ret;
+ }
+
+#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
+ /*
+ * Flushing only required if per process pagetables are used. With
+ * global case, flushing will happen inside iommu_map function
+ */
+ if (!ret)
+ *tlb_flags = UINT_MAX;
+#endif
+ return ret;
+}
+
+static void kgsl_iommu_stop(struct kgsl_mmu *mmu)
+{
+ /*
+ * stop device mmu
+ *
+ * call this with the global lock held
+ */
+
+ if (mmu->flags & KGSL_FLAGS_STARTED) {
+ kgsl_regwrite(mmu->device, MH_MMU_CONFIG, 0x00000000);
+ /* detach iommu attachment */
+ kgsl_detach_pagetable_iommu_domain(mmu);
+ mmu->hwpagetable = NULL;
+
+ mmu->flags &= ~KGSL_FLAGS_STARTED;
+ }
+}
+
+static int kgsl_iommu_close(struct kgsl_mmu *mmu)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ int i;
+ for (i = 0; i < iommu->unit_count; i++) {
+ if (iommu->iommu_units[i].reg_map.gpuaddr)
+ kgsl_mmu_unmap(mmu->defaultpagetable,
+ &(iommu->iommu_units[i].reg_map));
+ if (iommu->iommu_units[i].reg_map.hostptr)
+ iounmap(iommu->iommu_units[i].reg_map.hostptr);
+ kgsl_sg_free(iommu->iommu_units[i].reg_map.sg,
+ iommu->iommu_units[i].reg_map.sglen);
+ }
+ if (mmu->defaultpagetable)
+ kgsl_mmu_putpagetable(mmu->defaultpagetable);
+ kfree(iommu->asids);
+ kfree(iommu);
+
+ return 0;
+}
+
+static unsigned int
+kgsl_iommu_get_current_ptbase(struct kgsl_mmu *mmu)
+{
+ unsigned int pt_base;
+ struct kgsl_iommu *iommu = mmu->priv;
+ /* Return the current pt base by reading IOMMU pt_base register */
+ kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER);
+ pt_base = readl_relaxed(iommu->iommu_units[0].reg_map.hostptr +
+ (KGSL_IOMMU_CONTEXT_USER << KGSL_IOMMU_CTX_SHIFT) +
+ KGSL_IOMMU_TTBR0);
+ kgsl_iommu_disable_clk(mmu);
+ return pt_base & (KGSL_IOMMU_TTBR0_PA_MASK <<
+ KGSL_IOMMU_TTBR0_PA_SHIFT);
+}
+
+/*
+ * kgsl_iommu_get_hwpagetable_asid - Returns asid(application space ID) for a
+ * pagetable
+ * @mmu - Pointer to mmu structure
+ *
+ * Allocates an asid to a IOMMU domain if it does not already have one. asid's
+ * are unique identifiers for pagetable that can be used to selectively flush
+ * tlb entries of the IOMMU unit.
+ * Return - asid to be used with the IOMMU domain
+ */
+static int kgsl_iommu_get_hwpagetable_asid(struct kgsl_mmu *mmu)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ struct kgsl_iommu_pt *iommu_pt = mmu->hwpagetable->priv;
+
+ /*
+ * If the iommu pagetable does not have any asid assigned and is not the
+ * default pagetable then assign asid.
+ */
+ if (!iommu_pt->asid && iommu_pt != mmu->defaultpagetable->priv) {
+ iommu_pt->asid = find_first_zero_bit(iommu->asids,
+ KGSL_IOMMU_MAX_ASIDS);
+ /* No free bits means reuse asid */
+ if (iommu_pt->asid >= KGSL_IOMMU_MAX_ASIDS) {
+ iommu_pt->asid = KGSL_IOMMU_ASID_REUSE;
+ iommu->asid_reuse++;
+ }
+ set_bit(iommu_pt->asid, iommu->asids);
+ /*
+ * Store pointer to asids list so that during pagetable destroy
+ * the asid assigned to this pagetable may be cleared
+ */
+ iommu_pt->iommu = iommu;
+ }
+ /* Return the asid + the constant part of asid that never changes */
+ return (iommu_pt->asid & (KGSL_IOMMU_CONTEXTIDR_ASID_MASK <<
+ KGSL_IOMMU_CONTEXTIDR_ASID_SHIFT)) +
+ (iommu->asid & ~(KGSL_IOMMU_CONTEXTIDR_ASID_MASK <<
+ KGSL_IOMMU_CONTEXTIDR_ASID_SHIFT));
+}
+
+/*
+ * kgsl_iommu_default_setstate - Change the IOMMU pagetable or flush IOMMU tlb
+ * of the primary context bank
+ * @mmu - Pointer to mmu structure
+ * @flags - Flags indicating whether pagetable has to chnage or tlb is to be
+ * flushed or both
+ *
+ * Based on flags set the new pagetable fo the IOMMU unit or flush it's tlb or
+ * do both by doing direct register writes to the IOMMu registers through the
+ * cpu
+ * Return - void
+ */
+static void kgsl_iommu_default_setstate(struct kgsl_mmu *mmu,
+ uint32_t flags)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ int temp;
+ int i;
+ unsigned int pt_base = kgsl_iommu_pt_get_base_addr(
+ mmu->hwpagetable);
+ unsigned int pt_val;
+
+ if (kgsl_iommu_enable_clk(mmu, KGSL_IOMMU_CONTEXT_USER)) {
+ KGSL_DRV_ERR(mmu->device, "Failed to enable iommu clocks\n");
+ return;
+ }
+ /* Mask off the lsb of the pt base address since lsb will not change */
+ pt_base &= (KGSL_IOMMU_TTBR0_PA_MASK << KGSL_IOMMU_TTBR0_PA_SHIFT);
+ if (flags & KGSL_MMUFLAGS_PTUPDATE) {
+ kgsl_idle(mmu->device, KGSL_TIMEOUT_DEFAULT);
+ for (i = 0; i < iommu->unit_count; i++) {
+ /* get the lsb value which should not change when
+ * changing ttbr0 */
+ pt_val = kgsl_iommu_get_pt_lsb(mmu, i,
+ KGSL_IOMMU_CONTEXT_USER);
+ pt_val += pt_base;
+
+ KGSL_IOMMU_SET_IOMMU_REG(
+ iommu->iommu_units[i].reg_map.hostptr,
+ KGSL_IOMMU_CONTEXT_USER, TTBR0, pt_val);
+
+ mb();
+ temp = KGSL_IOMMU_GET_IOMMU_REG(
+ iommu->iommu_units[i].reg_map.hostptr,
+ KGSL_IOMMU_CONTEXT_USER, TTBR0);
+ /* Set asid */
+ KGSL_IOMMU_SET_IOMMU_REG(
+ iommu->iommu_units[i].reg_map.hostptr,
+ KGSL_IOMMU_CONTEXT_USER, CONTEXTIDR,
+ kgsl_iommu_get_hwpagetable_asid(mmu));
+ mb();
+ temp = KGSL_IOMMU_GET_IOMMU_REG(
+ iommu->iommu_units[i].reg_map.hostptr,
+ KGSL_IOMMU_CONTEXT_USER, CONTEXTIDR);
+ }
+ }
+ /* Flush tlb */
+ if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
+ for (i = 0; i < iommu->unit_count; i++) {
+ KGSL_IOMMU_SET_IOMMU_REG(
+ iommu->iommu_units[i].reg_map.hostptr,
+ KGSL_IOMMU_CONTEXT_USER, CTX_TLBIASID,
+ kgsl_iommu_get_hwpagetable_asid(mmu));
+ mb();
+ }
+ }
+ /* Disable smmu clock */
+ kgsl_iommu_disable_clk(mmu);
+}
+
+/*
+ * kgsl_iommu_get_reg_map_desc - Returns an array of pointers that contain
+ * the address of memory descriptors which map the IOMMU registers
+ * @mmu - Pointer to mmu structure
+ * @reg_map_desc - Out parameter in which the address of the array containing
+ * pointers to register map descriptors is returned. The caller is supposed
+ * to free this array
+ *
+ * Return - The number of iommu units which is also the number of register
+ * mapped descriptor arrays which the out parameter will have
+ */
+static int kgsl_iommu_get_reg_map_desc(struct kgsl_mmu *mmu,
+ void **reg_map_desc)
+{
+ struct kgsl_iommu *iommu = mmu->priv;
+ void **reg_desc_ptr;
+ int i;
+
+ /*
+ * Alocate array of pointers that will hold address of the register map
+ * descriptors
+ */
+ reg_desc_ptr = kmalloc(iommu->unit_count *
+ sizeof(struct kgsl_memdesc *), GFP_KERNEL);
+ if (!reg_desc_ptr) {
+ KGSL_CORE_ERR("Failed to kmalloc(%d)\n",
+ iommu->unit_count * sizeof(struct kgsl_memdesc *));
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < iommu->unit_count; i++)
+ reg_desc_ptr[i] = &(iommu->iommu_units[i].reg_map);
+
+ *reg_map_desc = reg_desc_ptr;
+ return i;
+}
+
+struct kgsl_mmu_ops iommu_ops = {
+ .mmu_init = kgsl_iommu_init,
+ .mmu_close = kgsl_iommu_close,
+ .mmu_start = kgsl_iommu_start,
+ .mmu_stop = kgsl_iommu_stop,
+ .mmu_setstate = kgsl_iommu_setstate,
+ .mmu_device_setstate = kgsl_iommu_default_setstate,
+ .mmu_pagefault = NULL,
+ .mmu_get_current_ptbase = kgsl_iommu_get_current_ptbase,
+ .mmu_enable_clk = kgsl_iommu_enable_clk,
+ .mmu_disable_clk = kgsl_iommu_disable_clk,
+ .mmu_get_hwpagetable_asid = kgsl_iommu_get_hwpagetable_asid,
+ .mmu_get_pt_lsb = kgsl_iommu_get_pt_lsb,
+ .mmu_get_reg_map_desc = kgsl_iommu_get_reg_map_desc,
+};
+
+struct kgsl_mmu_pt_ops iommu_pt_ops = {
+ .mmu_map = kgsl_iommu_map,
+ .mmu_unmap = kgsl_iommu_unmap,
+ .mmu_create_pagetable = kgsl_iommu_create_pagetable,
+ .mmu_destroy_pagetable = kgsl_iommu_destroy_pagetable,
+ .mmu_pt_equal = kgsl_iommu_pt_equal,
+ .mmu_pt_get_base_addr = kgsl_iommu_pt_get_base_addr,
+};