blob: b1b5a80e3125b1b6d62b3894645ab10ed9984f9b [file] [log] [blame]
Terje Bergstrom7ede0b02013-03-22 16:34:02 +02001/*
2 * Tegra host1x Interrupt Management
3 *
4 * Copyright (c) 2010-2013, NVIDIA Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/clk.h>
20#include <linux/interrupt.h>
21#include <linux/slab.h>
22#include <linux/irq.h>
23
24#include "dev.h"
25#include "intr.h"
26
27/* Wait list management */
28
29enum waitlist_state {
30 WLS_PENDING,
31 WLS_REMOVED,
32 WLS_CANCELLED,
33 WLS_HANDLED
34};
35
36static void waiter_release(struct kref *kref)
37{
38 kfree(container_of(kref, struct host1x_waitlist, refcount));
39}
40
41/*
42 * add a waiter to a waiter queue, sorted by threshold
43 * returns true if it was added at the head of the queue
44 */
45static bool add_waiter_to_queue(struct host1x_waitlist *waiter,
46 struct list_head *queue)
47{
48 struct host1x_waitlist *pos;
49 u32 thresh = waiter->thresh;
50
51 list_for_each_entry_reverse(pos, queue, list)
52 if ((s32)(pos->thresh - thresh) <= 0) {
53 list_add(&waiter->list, &pos->list);
54 return false;
55 }
56
57 list_add(&waiter->list, queue);
58 return true;
59}
60
61/*
62 * run through a waiter queue for a single sync point ID
63 * and gather all completed waiters into lists by actions
64 */
65static void remove_completed_waiters(struct list_head *head, u32 sync,
66 struct list_head completed[HOST1X_INTR_ACTION_COUNT])
67{
68 struct list_head *dest;
69 struct host1x_waitlist *waiter, *next;
70
71 list_for_each_entry_safe(waiter, next, head, list) {
72 if ((s32)(waiter->thresh - sync) > 0)
73 break;
74
75 dest = completed + waiter->action;
76
77 /* PENDING->REMOVED or CANCELLED->HANDLED */
78 if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
79 list_del(&waiter->list);
80 kref_put(&waiter->refcount, waiter_release);
81 } else
82 list_move_tail(&waiter->list, dest);
83 }
84}
85
86static void reset_threshold_interrupt(struct host1x *host,
87 struct list_head *head,
88 unsigned int id)
89{
90 u32 thresh =
91 list_first_entry(head, struct host1x_waitlist, list)->thresh;
92
93 host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
94 host1x_hw_intr_enable_syncpt_intr(host, id);
95}
96
97static void action_wakeup(struct host1x_waitlist *waiter)
98{
99 wait_queue_head_t *wq = waiter->data;
100 wake_up(wq);
101}
102
103static void action_wakeup_interruptible(struct host1x_waitlist *waiter)
104{
105 wait_queue_head_t *wq = waiter->data;
106 wake_up_interruptible(wq);
107}
108
109typedef void (*action_handler)(struct host1x_waitlist *waiter);
110
111static action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = {
112 action_wakeup,
113 action_wakeup_interruptible,
114};
115
116static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT])
117{
118 struct list_head *head = completed;
119 int i;
120
121 for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) {
122 action_handler handler = action_handlers[i];
123 struct host1x_waitlist *waiter, *next;
124
125 list_for_each_entry_safe(waiter, next, head, list) {
126 list_del(&waiter->list);
127 handler(waiter);
128 WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) !=
129 WLS_REMOVED);
130 kref_put(&waiter->refcount, waiter_release);
131 }
132 }
133}
134
135/*
136 * Remove & handle all waiters that have completed for the given syncpt
137 */
138static int process_wait_list(struct host1x *host,
139 struct host1x_syncpt *syncpt,
140 u32 threshold)
141{
142 struct list_head completed[HOST1X_INTR_ACTION_COUNT];
143 unsigned int i;
144 int empty;
145
146 for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i)
147 INIT_LIST_HEAD(completed + i);
148
149 spin_lock(&syncpt->intr.lock);
150
151 remove_completed_waiters(&syncpt->intr.wait_head, threshold,
152 completed);
153
154 empty = list_empty(&syncpt->intr.wait_head);
155 if (empty)
156 host1x_hw_intr_disable_syncpt_intr(host, syncpt->id);
157 else
158 reset_threshold_interrupt(host, &syncpt->intr.wait_head,
159 syncpt->id);
160
161 spin_unlock(&syncpt->intr.lock);
162
163 run_handlers(completed);
164
165 return empty;
166}
167
168/*
169 * Sync point threshold interrupt service thread function
170 * Handles sync point threshold triggers, in thread context
171 */
172
173static void syncpt_thresh_work(struct work_struct *work)
174{
175 struct host1x_syncpt_intr *syncpt_intr =
176 container_of(work, struct host1x_syncpt_intr, work);
177 struct host1x_syncpt *syncpt =
178 container_of(syncpt_intr, struct host1x_syncpt, intr);
179 unsigned int id = syncpt->id;
180 struct host1x *host = syncpt->host;
181
182 (void)process_wait_list(host, syncpt,
183 host1x_syncpt_load(host->syncpt + id));
184}
185
186int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh,
187 enum host1x_intr_action action, void *data,
188 struct host1x_waitlist *waiter, void **ref)
189{
190 struct host1x_syncpt *syncpt;
191 int queue_was_empty;
192
193 if (waiter == NULL) {
194 pr_warn("%s: NULL waiter\n", __func__);
195 return -EINVAL;
196 }
197
198 /* initialize a new waiter */
199 INIT_LIST_HEAD(&waiter->list);
200 kref_init(&waiter->refcount);
201 if (ref)
202 kref_get(&waiter->refcount);
203 waiter->thresh = thresh;
204 waiter->action = action;
205 atomic_set(&waiter->state, WLS_PENDING);
206 waiter->data = data;
207 waiter->count = 1;
208
209 syncpt = host->syncpt + id;
210
211 spin_lock(&syncpt->intr.lock);
212
213 queue_was_empty = list_empty(&syncpt->intr.wait_head);
214
215 if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) {
216 /* added at head of list - new threshold value */
217 host1x_hw_intr_set_syncpt_threshold(host, id, thresh);
218
219 /* added as first waiter - enable interrupt */
220 if (queue_was_empty)
221 host1x_hw_intr_enable_syncpt_intr(host, id);
222 }
223
224 spin_unlock(&syncpt->intr.lock);
225
226 if (ref)
227 *ref = waiter;
228 return 0;
229}
230
231void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref)
232{
233 struct host1x_waitlist *waiter = ref;
234 struct host1x_syncpt *syncpt;
235
236 while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) ==
237 WLS_REMOVED)
238 schedule();
239
240 syncpt = host->syncpt + id;
241 (void)process_wait_list(host, syncpt,
242 host1x_syncpt_load(host->syncpt + id));
243
244 kref_put(&waiter->refcount, waiter_release);
245}
246
247int host1x_intr_init(struct host1x *host, unsigned int irq_sync)
248{
249 unsigned int id;
250 u32 nb_pts = host1x_syncpt_nb_pts(host);
251
252 mutex_init(&host->intr_mutex);
253 host->intr_syncpt_irq = irq_sync;
254 host->intr_wq = create_workqueue("host_syncpt");
255 if (!host->intr_wq)
256 return -ENOMEM;
257
258 for (id = 0; id < nb_pts; ++id) {
259 struct host1x_syncpt *syncpt = host->syncpt + id;
260
261 spin_lock_init(&syncpt->intr.lock);
262 INIT_LIST_HEAD(&syncpt->intr.wait_head);
263 snprintf(syncpt->intr.thresh_irq_name,
264 sizeof(syncpt->intr.thresh_irq_name),
265 "host1x_sp_%02d", id);
266 }
267
268 host1x_intr_start(host);
269
270 return 0;
271}
272
273void host1x_intr_deinit(struct host1x *host)
274{
275 host1x_intr_stop(host);
276 destroy_workqueue(host->intr_wq);
277}
278
279void host1x_intr_start(struct host1x *host)
280{
281 u32 hz = clk_get_rate(host->clk);
282 int err;
283
284 mutex_lock(&host->intr_mutex);
285 err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000),
286 syncpt_thresh_work);
287 if (err) {
288 mutex_unlock(&host->intr_mutex);
289 return;
290 }
291 mutex_unlock(&host->intr_mutex);
292}
293
294void host1x_intr_stop(struct host1x *host)
295{
296 unsigned int id;
297 struct host1x_syncpt *syncpt = host->syncpt;
298 u32 nb_pts = host1x_syncpt_nb_pts(host);
299
300 mutex_lock(&host->intr_mutex);
301
302 host1x_hw_intr_disable_all_syncpt_intrs(host);
303
304 for (id = 0; id < nb_pts; ++id) {
305 struct host1x_waitlist *waiter, *next;
306
307 list_for_each_entry_safe(waiter, next,
308 &syncpt[id].intr.wait_head, list) {
309 if (atomic_cmpxchg(&waiter->state,
310 WLS_CANCELLED, WLS_HANDLED) == WLS_CANCELLED) {
311 list_del(&waiter->list);
312 kref_put(&waiter->refcount, waiter_release);
313 }
314 }
315
316 if (!list_empty(&syncpt[id].intr.wait_head)) {
317 /* output diagnostics */
318 mutex_unlock(&host->intr_mutex);
319 pr_warn("%s cannot stop syncpt intr id=%d\n",
320 __func__, id);
321 return;
322 }
323 }
324
325 host1x_hw_intr_free_syncpt_irq(host);
326
327 mutex_unlock(&host->intr_mutex);
328}