gpu: ion: Add support for iommus
Add infrastructure to support mapping allocations
into iommus.
Change-Id: Ia5eafebee408e297013bf55284abf67d9eb8d78b
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index ac51854..77b73e2 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -22,6 +22,7 @@
#include <linux/mutex.h>
#include <linux/rbtree.h>
#include <linux/ion.h>
+#include <linux/iommu.h>
struct ion_mapping;
@@ -35,6 +36,34 @@
void *vaddr;
};
+/**
+ * struct ion_iommu_map - represents a mapping of an ion buffer to an iommu
+ * @iova_addr - iommu virtual address
+ * @node - rb node to exist in the buffer's tree of iommu mappings
+ * @domain_info - contains the partition number and domain number
+ * domain_info[1] = domain number
+ * domain_info[0] = partition number
+ * @ref - for reference counting this mapping
+ * @mapped_size - size of the iova space mapped
+ * (may not be the same as the buffer size)
+ *
+ * Represents a mapping of one ion buffer to a particular iommu domain
+ * and address range. There may exist other mappings of this buffer in
+ * different domains or address ranges. All mappings will have the same
+ * cacheability and security.
+ */
+struct ion_iommu_map {
+ unsigned long iova_addr;
+ struct rb_node node;
+ union {
+ int domain_info[2];
+ uint64_t key;
+ };
+ struct ion_buffer *buffer;
+ struct kref ref;
+ int mapped_size;
+};
+
struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
/**
@@ -72,6 +101,8 @@
int dmap_cnt;
struct scatterlist *sglist;
int umap_cnt;
+ unsigned int iommu_map_cnt;
+ struct rb_root iommu_maps;
int marked;
};
@@ -109,6 +140,15 @@
unsigned int length, unsigned int cmd);
unsigned long (*get_allocated)(struct ion_heap *heap);
unsigned long (*get_total)(struct ion_heap *heap);
+ int (*map_iommu)(struct ion_buffer *buffer,
+ struct ion_iommu_map *map_data,
+ unsigned int domain_num,
+ unsigned int partition_num,
+ unsigned long align,
+ unsigned long iova_length,
+ unsigned long flags);
+ void (*unmap_iommu)(struct ion_iommu_map *data);
+
};
/**
@@ -136,6 +176,11 @@
const char *name;
};
+
+
+#define iommu_map_domain(__m) ((__m)->domain_info[1])
+#define iommu_map_partition(__m) ((__m)->domain_info[0])
+
/**
* ion_device_create - allocates and returns an ion device
* @custom_ioctl: arch specific ioctl function if applicable
@@ -177,6 +222,10 @@
struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *);
void ion_carveout_heap_destroy(struct ion_heap *);
+
+struct ion_heap *ion_iommu_heap_create(struct ion_platform_heap *);
+void ion_iommu_heap_destroy(struct ion_heap *);
+
/**
* kernel api to allocate/free from carveout -- used when carveout is
* used to back an architecture specific custom heap
@@ -185,6 +234,9 @@
unsigned long align);
void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
unsigned long size);
+
+
+struct ion_heap *msm_get_contiguous_heap(void);
/**
* The carveout heap returns physical addresses, since 0 may be a valid
* physical address, this is used to indicate allocation failed