gpu: ion: Add support for content protection 2.0 in CP heap
Add support to allow ion_cp_heap.c to secure according to new
requirements. This includes adding versioning to the secure
calls to allow other types of securing to take place.
Change-Id: I65e07ebaeefa1d0572b6531753a707a28284aa0d
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index 7e84aa7..5c7ab3a 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -1898,7 +1898,8 @@
mutex_unlock(&dev->lock);
}
-int ion_secure_heap(struct ion_device *dev, int heap_id)
+int ion_secure_heap(struct ion_device *dev, int heap_id, int version,
+ void *data)
{
struct rb_node *n;
int ret_val = 0;
@@ -1915,7 +1916,7 @@
if (ION_HEAP(heap->id) != heap_id)
continue;
if (heap->ops->secure_heap)
- ret_val = heap->ops->secure_heap(heap);
+ ret_val = heap->ops->secure_heap(heap, version, data);
else
ret_val = -EINVAL;
break;
@@ -1924,7 +1925,8 @@
return ret_val;
}
-int ion_unsecure_heap(struct ion_device *dev, int heap_id)
+int ion_unsecure_heap(struct ion_device *dev, int heap_id, int version,
+ void *data)
{
struct rb_node *n;
int ret_val = 0;
@@ -1941,7 +1943,7 @@
if (ION_HEAP(heap->id) != heap_id)
continue;
if (heap->ops->secure_heap)
- ret_val = heap->ops->unsecure_heap(heap);
+ ret_val = heap->ops->unsecure_heap(heap, version, data);
else
ret_val = -EINVAL;
break;
diff --git a/drivers/gpu/ion/ion_cp_heap.c b/drivers/gpu/ion/ion_cp_heap.c
index 6edc30b..c5e9caf 100644
--- a/drivers/gpu/ion/ion_cp_heap.c
+++ b/drivers/gpu/ion/ion_cp_heap.c
@@ -40,6 +40,7 @@
#include <asm/mach/map.h>
#include <asm/cacheflush.h>
+#include "msm/ion_cp_common.h"
/**
* struct ion_cp_heap - container for the heap and shared heap data
@@ -106,10 +107,12 @@
};
static int ion_cp_protect_mem(unsigned int phy_base, unsigned int size,
- unsigned int permission_type);
+ unsigned int permission_type, int version,
+ void *data);
static int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size,
- unsigned int permission_type);
+ unsigned int permission_type, int version,
+ void *data);
/**
* Get the total number of kernel mappings.
@@ -126,7 +129,7 @@
* the correct FMEM state if this heap is a reusable heap.
* Must be called with heap->lock locked.
*/
-static int ion_cp_protect(struct ion_heap *heap)
+static int ion_cp_protect(struct ion_heap *heap, int version, void *data)
{
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
@@ -141,7 +144,8 @@
}
ret_value = ion_cp_protect_mem(cp_heap->secure_base,
- cp_heap->secure_size, cp_heap->permission_type);
+ cp_heap->secure_size, cp_heap->permission_type,
+ version, data);
if (ret_value) {
pr_err("Failed to protect memory for heap %s - "
"error code: %d\n", heap->name, ret_value);
@@ -170,7 +174,7 @@
* the correct FMEM state if this heap is a reusable heap.
* Must be called with heap->lock locked.
*/
-static void ion_cp_unprotect(struct ion_heap *heap)
+static void ion_cp_unprotect(struct ion_heap *heap, int version, void *data)
{
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
@@ -178,7 +182,7 @@
if (atomic_dec_and_test(&cp_heap->protect_cnt)) {
int error_code = ion_cp_unprotect_mem(
cp_heap->secure_base, cp_heap->secure_size,
- cp_heap->permission_type);
+ cp_heap->permission_type, version, data);
if (error_code) {
pr_err("Failed to un-protect memory for heap %s - "
"error code: %d\n", heap->name, error_code);
@@ -649,14 +653,14 @@
return 0;
}
-int ion_cp_secure_heap(struct ion_heap *heap)
+int ion_cp_secure_heap(struct ion_heap *heap, int version, void *data)
{
int ret_value;
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
mutex_lock(&cp_heap->lock);
if (cp_heap->umap_count == 0 && cp_heap->kmap_cached_count == 0) {
- ret_value = ion_cp_protect(heap);
+ ret_value = ion_cp_protect(heap, version, data);
} else {
pr_err("ION cannot secure heap with outstanding mappings: "
"User space: %lu, kernel space (cached): %lu\n",
@@ -668,13 +672,13 @@
return ret_value;
}
-int ion_cp_unsecure_heap(struct ion_heap *heap)
+int ion_cp_unsecure_heap(struct ion_heap *heap, int version, void *data)
{
int ret_value = 0;
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
mutex_lock(&cp_heap->lock);
- ion_cp_unprotect(heap);
+ ion_cp_unprotect(heap, version, data);
mutex_unlock(&cp_heap->lock);
return ret_value;
}
@@ -1000,8 +1004,7 @@
unsigned char lock;
} __attribute__ ((__packed__));
-
-static int ion_cp_protect_mem(unsigned int phy_base, unsigned int size,
+static int ion_cp_protect_mem_v1(unsigned int phy_base, unsigned int size,
unsigned int permission_type)
{
struct cp_lock_msg cmd;
@@ -1014,7 +1017,7 @@
&cmd, sizeof(cmd), NULL, 0);
}
-static int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size,
+static int ion_cp_unprotect_mem_v1(unsigned int phy_base, unsigned int size,
unsigned int permission_type)
{
struct cp_lock_msg cmd;
@@ -1026,3 +1029,70 @@
return scm_call(SCM_SVC_CP, SCM_CP_LOCK_CMD_ID,
&cmd, sizeof(cmd), NULL, 0);
}
+
+#define V2_CHUNK_SIZE SZ_1M
+
+static int ion_cp_change_mem_v2(unsigned int phy_base, unsigned int size,
+ void *data, int lock)
+{
+ enum cp_mem_usage usage = (enum cp_mem_usage) data;
+ unsigned long *chunk_list;
+ int nchunks;
+ int ret;
+ int i;
+
+ if (usage < 0 || usage >= MAX_USAGE)
+ return -EINVAL;
+
+ if (!IS_ALIGNED(size, V2_CHUNK_SIZE)) {
+ pr_err("%s: heap size is not aligned to %x\n",
+ __func__, V2_CHUNK_SIZE);
+ return -EINVAL;
+ }
+
+ nchunks = size / V2_CHUNK_SIZE;
+
+ chunk_list = allocate_contiguous_ebi(sizeof(unsigned long)*nchunks,
+ SZ_4K, 0);
+ if (!chunk_list)
+ return -ENOMEM;
+
+ for (i = 0; i < nchunks; i++)
+ chunk_list[i] = phy_base + i * V2_CHUNK_SIZE;
+
+ ret = ion_cp_change_chunks_state(memory_pool_node_paddr(chunk_list),
+ nchunks, V2_CHUNK_SIZE, usage, lock);
+
+ free_contiguous_memory(chunk_list);
+ return ret;
+}
+
+static int ion_cp_protect_mem(unsigned int phy_base, unsigned int size,
+ unsigned int permission_type, int version,
+ void *data)
+{
+ switch (version) {
+ case ION_CP_V1:
+ return ion_cp_protect_mem_v1(phy_base, size, permission_type);
+ case ION_CP_V2:
+ return ion_cp_change_mem_v2(phy_base, size, data,
+ SCM_CP_PROTECT);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size,
+ unsigned int permission_type, int version,
+ void *data)
+{
+ switch (version) {
+ case ION_CP_V1:
+ return ion_cp_unprotect_mem_v1(phy_base, size, permission_type);
+ case ION_CP_V2:
+ return ion_cp_change_mem_v2(phy_base, size, data,
+ SCM_CP_UNPROTECT);
+ default:
+ return -EINVAL;
+ }
+}
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index 6d636ee..6940e2f 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -158,8 +158,8 @@
void (*unmap_iommu)(struct ion_iommu_map *data);
int (*print_debug)(struct ion_heap *heap, struct seq_file *s,
const struct rb_root *mem_map);
- int (*secure_heap)(struct ion_heap *heap);
- int (*unsecure_heap)(struct ion_heap *heap);
+ int (*secure_heap)(struct ion_heap *heap, int version, void *data);
+ int (*unsecure_heap)(struct ion_heap *heap, int version, void *data);
};
/**
diff --git a/drivers/gpu/ion/msm/ion_cp_common.h b/drivers/gpu/ion/msm/ion_cp_common.h
index ae66128..69dd19e 100644
--- a/drivers/gpu/ion/msm/ion_cp_common.h
+++ b/drivers/gpu/ion/msm/ion_cp_common.h
@@ -17,6 +17,9 @@
#include <asm-generic/errno-base.h>
#include <linux/ion.h>
+#define ION_CP_V1 1
+#define ION_CP_V2 2
+
#if defined(CONFIG_ION_MSM)
/*
* ion_cp2_protect_mem - secures memory via trustzone
diff --git a/drivers/gpu/ion/msm/msm_ion.c b/drivers/gpu/ion/msm/msm_ion.c
index f6a4cf4..eec3fe0 100644
--- a/drivers/gpu/ion/msm/msm_ion.c
+++ b/drivers/gpu/ion/msm/msm_ion.c
@@ -21,6 +21,7 @@
#include <mach/ion.h>
#include <mach/msm_memtypes.h>
#include "../ion_priv.h"
+#include "ion_cp_common.h"
static struct ion_device *idev;
static int num_heaps;
@@ -35,16 +36,28 @@
int msm_ion_secure_heap(int heap_id)
{
- return ion_secure_heap(idev, heap_id);
+ return ion_secure_heap(idev, heap_id, ION_CP_V1, NULL);
}
EXPORT_SYMBOL(msm_ion_secure_heap);
int msm_ion_unsecure_heap(int heap_id)
{
- return ion_unsecure_heap(idev, heap_id);
+ return ion_unsecure_heap(idev, heap_id, ION_CP_V1, NULL);
}
EXPORT_SYMBOL(msm_ion_unsecure_heap);
+int msm_ion_secure_heap_2_0(int heap_id, enum cp_mem_usage usage)
+{
+ return ion_secure_heap(idev, heap_id, ION_CP_V2, (void *)usage);
+}
+EXPORT_SYMBOL(msm_ion_secure_heap_2_0);
+
+int msm_ion_unsecure_heap_2_0(int heap_id, enum cp_mem_usage usage)
+{
+ return ion_unsecure_heap(idev, heap_id, ION_CP_V2, (void *)usage);
+}
+EXPORT_SYMBOL(msm_ion_unsecure_heap_2_0);
+
int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
void *vaddr, unsigned long len, unsigned int cmd)
{