blob: e4e561cef50c4e2023ab8683425b3451a7b29c96 [file] [log] [blame]
Shubhraprakash Das767fdda2011-08-15 15:49:45 -06001/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/types.h>
14#include <linux/device.h>
15#include <linux/spinlock.h>
16#include <linux/genalloc.h>
17#include <linux/slab.h>
18#include <linux/iommu.h>
19#include <mach/iommu.h>
20#include <linux/msm_kgsl.h>
21
22#include "kgsl.h"
23#include "kgsl_device.h"
24#include "kgsl_mmu.h"
25#include "kgsl_sharedmem.h"
26
27struct kgsl_iommu {
28 struct device *iommu_user_dev;
29 int iommu_user_dev_attached;
30 struct device *iommu_priv_dev;
31 int iommu_priv_dev_attached;
32};
33
34static int kgsl_iommu_pt_equal(struct kgsl_pagetable *pt,
35 unsigned int pt_base)
36{
37 struct iommu_domain *domain = pt->priv;
38 return pt && pt_base && ((unsigned int)domain == pt_base);
39}
40
41static void kgsl_iommu_destroy_pagetable(void *mmu_specific_pt)
42{
43 struct iommu_domain *domain = mmu_specific_pt;
44 if (domain)
45 iommu_domain_free(domain);
46}
47
48void *kgsl_iommu_create_pagetable(void)
49{
50 struct iommu_domain *domain = iommu_domain_alloc(0);
51 if (!domain)
52 KGSL_CORE_ERR("Failed to create iommu domain\n");
53
54 return domain;
55}
56
57static void kgsl_detach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
58{
59 struct iommu_domain *domain;
60 struct kgsl_iommu *iommu = mmu->priv;
61
62 BUG_ON(mmu->hwpagetable == NULL);
63 BUG_ON(mmu->hwpagetable->priv == NULL);
64
65 domain = mmu->hwpagetable->priv;
66
67 if (iommu->iommu_user_dev_attached) {
68 iommu_detach_device(domain, iommu->iommu_user_dev);
69 iommu->iommu_user_dev_attached = 0;
70 KGSL_MEM_INFO(mmu->device,
71 "iommu %p detached from user dev of MMU: %p\n",
72 domain, mmu);
73 }
74 if (iommu->iommu_priv_dev_attached) {
75 iommu_detach_device(domain, iommu->iommu_priv_dev);
76 iommu->iommu_priv_dev_attached = 0;
77 KGSL_MEM_INFO(mmu->device,
78 "iommu %p detached from priv dev of MMU: %p\n",
79 domain, mmu);
80 }
81}
82
83static int kgsl_attach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
84{
85 struct iommu_domain *domain;
86 int ret = 0;
87 struct kgsl_iommu *iommu = mmu->priv;
88
89 BUG_ON(mmu->hwpagetable == NULL);
90 BUG_ON(mmu->hwpagetable->priv == NULL);
91
92 domain = mmu->hwpagetable->priv;
93
94 if (iommu->iommu_user_dev && !iommu->iommu_user_dev_attached) {
95 ret = iommu_attach_device(domain, iommu->iommu_user_dev);
96 if (ret) {
97 KGSL_MEM_ERR(mmu->device,
98 "Failed to attach device, err %d\n", ret);
99 goto done;
100 }
101 iommu->iommu_user_dev_attached = 1;
102 KGSL_MEM_INFO(mmu->device,
103 "iommu %p attached to user dev of MMU: %p\n",
104 domain, mmu);
105 }
106 if (iommu->iommu_priv_dev && !iommu->iommu_priv_dev_attached) {
107 ret = iommu_attach_device(domain, iommu->iommu_priv_dev);
108 if (ret) {
109 KGSL_MEM_ERR(mmu->device,
110 "Failed to attach device, err %d\n", ret);
111 iommu_detach_device(domain, iommu->iommu_user_dev);
112 iommu->iommu_user_dev_attached = 0;
113 goto done;
114 }
115 iommu->iommu_priv_dev_attached = 1;
116 KGSL_MEM_INFO(mmu->device,
117 "iommu %p attached to priv dev of MMU: %p\n",
118 domain, mmu);
119 }
120done:
121 return ret;
122}
123
124static int kgsl_get_iommu_ctxt(struct kgsl_iommu *iommu,
125 struct kgsl_device *device)
126{
127 int status = 0;
128 struct platform_device *pdev =
129 container_of(device->parentdev, struct platform_device, dev);
130 struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
131 if (pdata_dev->iommu_user_ctx_name)
132 iommu->iommu_user_dev = msm_iommu_get_ctx(
133 pdata_dev->iommu_user_ctx_name);
134 if (pdata_dev->iommu_priv_ctx_name)
135 iommu->iommu_priv_dev = msm_iommu_get_ctx(
136 pdata_dev->iommu_priv_ctx_name);
137 if (!iommu->iommu_user_dev) {
138 KGSL_CORE_ERR("Failed to get user iommu dev handle for "
139 "device %s\n",
140 pdata_dev->iommu_user_ctx_name);
141 status = -EINVAL;
142 }
143 return status;
144}
145
146static void kgsl_iommu_setstate(struct kgsl_device *device,
147 struct kgsl_pagetable *pagetable)
148{
149 struct kgsl_mmu *mmu = &device->mmu;
150
151 if (mmu->flags & KGSL_FLAGS_STARTED) {
152 /* page table not current, then setup mmu to use new
153 * specified page table
154 */
155 if (mmu->hwpagetable != pagetable) {
156 kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
157 kgsl_detach_pagetable_iommu_domain(mmu);
158 mmu->hwpagetable = pagetable;
159 if (mmu->hwpagetable)
160 kgsl_attach_pagetable_iommu_domain(mmu);
161 }
162 }
163}
164
165static int kgsl_iommu_init(struct kgsl_device *device)
166{
167 /*
168 * intialize device mmu
169 *
170 * call this with the global lock held
171 */
172 int status = 0;
173 struct kgsl_mmu *mmu = &device->mmu;
174 struct kgsl_iommu *iommu;
175
176 mmu->device = device;
177
178 iommu = kzalloc(sizeof(struct kgsl_iommu), GFP_KERNEL);
179 if (!iommu) {
180 KGSL_CORE_ERR("kzalloc(%d) failed\n",
181 sizeof(struct kgsl_iommu));
182 return -ENOMEM;
183 }
184
185 iommu->iommu_priv_dev_attached = 0;
186 iommu->iommu_user_dev_attached = 0;
187 status = kgsl_get_iommu_ctxt(iommu, device);
188 if (status) {
189 kfree(iommu);
190 iommu = NULL;
191 }
192 mmu->priv = iommu;
193
194 dev_info(device->dev, "|%s| MMU type set for device is IOMMU\n",
195 __func__);
196 return status;
197}
198
199static int kgsl_iommu_start(struct kgsl_device *device)
200{
201 int status;
202 struct kgsl_mmu *mmu = &device->mmu;
203
204 if (mmu->flags & KGSL_FLAGS_STARTED)
205 return 0;
206
207 kgsl_regwrite(device, MH_MMU_CONFIG, 0x00000000);
208 if (mmu->defaultpagetable == NULL)
209 mmu->defaultpagetable =
210 kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
211 /* Return error if the default pagetable doesn't exist */
212 if (mmu->defaultpagetable == NULL)
213 return -ENOMEM;
214 mmu->hwpagetable = mmu->defaultpagetable;
215
216 status = kgsl_attach_pagetable_iommu_domain(mmu);
217 if (!status)
218 mmu->flags |= KGSL_FLAGS_STARTED;
219
220 return status;
221}
222
223static int
224kgsl_iommu_unmap(void *mmu_specific_pt,
225 struct kgsl_memdesc *memdesc)
226{
227 int ret;
228 unsigned int range = memdesc->size;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600229 struct iommu_domain *domain = (struct iommu_domain *)
230 mmu_specific_pt;
231
232 /* All GPU addresses as assigned are page aligned, but some
233 functions purturb the gpuaddr with an offset, so apply the
234 mask here to make sure we have the right address */
235
236 unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
237
238 if (range == 0 || gpuaddr == 0)
239 return 0;
240
Shubhraprakash Das08894b92011-10-14 11:42:25 -0600241 ret = iommu_unmap_range(domain, gpuaddr, range);
242 if (ret)
243 KGSL_CORE_ERR("iommu_unmap_range(%p, %x, %d) failed "
244 "with err: %d\n", domain, gpuaddr,
245 range, ret);
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600246
247 return 0;
248}
249
250static int
251kgsl_iommu_map(void *mmu_specific_pt,
252 struct kgsl_memdesc *memdesc,
253 unsigned int protflags)
254{
Shubhraprakash Das08894b92011-10-14 11:42:25 -0600255 int ret;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600256 unsigned int iommu_virt_addr;
Jordan Croused17e9aa2011-10-12 16:57:48 -0600257 struct iommu_domain *domain = mmu_specific_pt;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600258
259 BUG_ON(NULL == domain);
260
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600261
Jordan Croused17e9aa2011-10-12 16:57:48 -0600262 iommu_virt_addr = memdesc->gpuaddr;
263
Shubhraprakash Das08894b92011-10-14 11:42:25 -0600264 ret = iommu_map_range(domain, iommu_virt_addr, memdesc->sg,
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -0800265 memdesc->size, 0);
Shubhraprakash Das08894b92011-10-14 11:42:25 -0600266 if (ret) {
267 KGSL_CORE_ERR("iommu_map_range(%p, %x, %p, %d, %d) "
268 "failed with err: %d\n", domain,
269 iommu_virt_addr, memdesc->sg, memdesc->size,
Stepan Moskovchenko6ee3be82011-11-08 15:24:53 -0800270 0, ret);
Shubhraprakash Das08894b92011-10-14 11:42:25 -0600271 return ret;
Shubhraprakash Das767fdda2011-08-15 15:49:45 -0600272 }
273
274 return ret;
275}
276
277static int kgsl_iommu_stop(struct kgsl_device *device)
278{
279 /*
280 * stop device mmu
281 *
282 * call this with the global lock held
283 */
284 struct kgsl_mmu *mmu = &device->mmu;
285
286 if (mmu->flags & KGSL_FLAGS_STARTED) {
287 /* detach iommu attachment */
288 kgsl_detach_pagetable_iommu_domain(mmu);
289
290 mmu->flags &= ~KGSL_FLAGS_STARTED;
291 }
292
293 return 0;
294}
295
296static int kgsl_iommu_close(struct kgsl_device *device)
297{
298 struct kgsl_mmu *mmu = &device->mmu;
299 if (mmu->defaultpagetable)
300 kgsl_mmu_putpagetable(mmu->defaultpagetable);
301
302 return 0;
303}
304
305static unsigned int
306kgsl_iommu_get_current_ptbase(struct kgsl_device *device)
307{
308 /* Current base is always the hwpagetables domain as we
309 * do not use per process pagetables right not for iommu.
310 * This will change when we switch to per process pagetables.
311 */
312 return (unsigned int)device->mmu.hwpagetable->priv;
313}
314
315struct kgsl_mmu_ops iommu_ops = {
316 .mmu_init = kgsl_iommu_init,
317 .mmu_close = kgsl_iommu_close,
318 .mmu_start = kgsl_iommu_start,
319 .mmu_stop = kgsl_iommu_stop,
320 .mmu_setstate = kgsl_iommu_setstate,
321 .mmu_device_setstate = NULL,
322 .mmu_pagefault = NULL,
323 .mmu_get_current_ptbase = kgsl_iommu_get_current_ptbase,
324};
325
326struct kgsl_mmu_pt_ops iommu_pt_ops = {
327 .mmu_map = kgsl_iommu_map,
328 .mmu_unmap = kgsl_iommu_unmap,
329 .mmu_create_pagetable = kgsl_iommu_create_pagetable,
330 .mmu_destroy_pagetable = kgsl_iommu_destroy_pagetable,
331 .mmu_pt_equal = kgsl_iommu_pt_equal,
332 .mmu_pt_get_flags = NULL,
333};