blob: d76d8c8ded56cdf24b50c75f95e293558cbc3eee [file] [log] [blame]
Jeff Dike2ea5bc52007-05-10 22:22:32 -07001/*
Jeff Dikeba180fd2007-10-16 01:27:00 -07002 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * Licensed under the GPL
4 * Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
5 * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
6 */
7
Jeff Dikeba180fd2007-10-16 01:27:00 -07008#include "linux/cpumask.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -07009#include "linux/hardirq.h"
Jeff Dikeba180fd2007-10-16 01:27:00 -070010#include "linux/interrupt.h"
11#include "linux/kernel_stat.h"
12#include "linux/module.h"
Alexey Dobriyand43c36d2009-10-07 17:09:06 +040013#include "linux/sched.h"
Jeff Dikeba180fd2007-10-16 01:27:00 -070014#include "linux/seq_file.h"
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include "linux/slab.h"
Jeff Dikec14b8492007-05-10 22:22:34 -070016#include "as-layout.h"
Jeff Dikeba180fd2007-10-16 01:27:00 -070017#include "kern_util.h"
18#include "os.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
20/*
21 * Generic, controller-independent functions:
22 */
23
24int show_interrupts(struct seq_file *p, void *v)
25{
26 int i = *(loff_t *) v, j;
27 struct irqaction * action;
28 unsigned long flags;
29
30 if (i == 0) {
31 seq_printf(p, " ");
32 for_each_online_cpu(j)
33 seq_printf(p, "CPU%d ",j);
34 seq_putc(p, '\n');
35 }
36
37 if (i < NR_IRQS) {
Thomas Gleixnerd5b4eea2011-02-06 22:45:36 +000038 struct irq_desc *desc = irq_to_desc(i);
39
40 raw_spin_lock_irqsave(&desc->lock, flags);
41 action = desc->action;
Jeff Dike2ea5bc52007-05-10 22:22:32 -070042 if (!action)
Linus Torvalds1da177e2005-04-16 15:20:36 -070043 goto skip;
44 seq_printf(p, "%3d: ",i);
45#ifndef CONFIG_SMP
46 seq_printf(p, "%10u ", kstat_irqs(i));
47#else
48 for_each_online_cpu(j)
Yinghai Ludee41022009-01-11 00:29:15 -080049 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#endif
Thomas Gleixner0ebec352011-03-24 18:24:42 +010051 seq_printf(p, " %14s", irq_desc_get_chip(desc)->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052 seq_printf(p, " %s", action->name);
53
54 for (action=action->next; action; action = action->next)
55 seq_printf(p, ", %s", action->name);
56
57 seq_putc(p, '\n');
58skip:
Thomas Gleixnerd5b4eea2011-02-06 22:45:36 +000059 raw_spin_unlock_irqrestore(&desc->lock, flags);
Jeff Dikeba180fd2007-10-16 01:27:00 -070060 } else if (i == NR_IRQS)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 seq_putc(p, '\n');
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
63 return 0;
64}
65
Jeff Diked973a772007-05-06 14:51:27 -070066/*
67 * This list is accessed under irq_lock, except in sigio_handler,
68 * where it is safe from being modified. IRQ handlers won't change it -
69 * if an IRQ source has vanished, it will be freed by free_irqs just
70 * before returning from sigio_handler. That will process a separate
71 * list of irqs to free, with its own locking, coming back here to
72 * remove list elements, taking the irq_lock to do so.
73 */
Jeff Dikef2e62992007-02-10 01:44:23 -080074static struct irq_fd *active_fds = NULL;
Jeff Dike9b4f0182006-03-27 01:14:31 -080075static struct irq_fd **last_irq_ptr = &active_fds;
76
77extern void free_irqs(void);
78
Jeff Dike77bf4402007-10-16 01:26:58 -070079void sigio_handler(int sig, struct uml_pt_regs *regs)
Jeff Dike9b4f0182006-03-27 01:14:31 -080080{
81 struct irq_fd *irq_fd;
82 int n;
83
Jesper Juhl191ef962006-05-01 12:15:57 -070084 if (smp_sigio_handler())
85 return;
86
87 while (1) {
Jeff Dike9b4f0182006-03-27 01:14:31 -080088 n = os_waiting_for_events(active_fds);
89 if (n <= 0) {
Jeff Dikeba180fd2007-10-16 01:27:00 -070090 if (n == -EINTR)
91 continue;
Jeff Dike9b4f0182006-03-27 01:14:31 -080092 else break;
93 }
94
Jeff Dikeba180fd2007-10-16 01:27:00 -070095 for (irq_fd = active_fds; irq_fd != NULL;
96 irq_fd = irq_fd->next) {
Jesper Juhl191ef962006-05-01 12:15:57 -070097 if (irq_fd->current_events != 0) {
Jeff Dike9b4f0182006-03-27 01:14:31 -080098 irq_fd->current_events = 0;
99 do_IRQ(irq_fd->irq, regs);
100 }
101 }
102 }
103
104 free_irqs();
105}
106
Jeff Dikebfaafd72006-07-10 04:45:10 -0700107static DEFINE_SPINLOCK(irq_lock);
108
WANG Cong4c182ae2008-07-23 21:28:47 -0700109static int activate_fd(int irq, int fd, int type, void *dev_id)
Jeff Dike9b4f0182006-03-27 01:14:31 -0800110{
111 struct pollfd *tmp_pfd;
112 struct irq_fd *new_fd, *irq_fd;
113 unsigned long flags;
Jeff Dikebf8fde72008-02-04 22:31:04 -0800114 int events, err, n;
Jeff Dike9b4f0182006-03-27 01:14:31 -0800115
Jeff Dikebf8fde72008-02-04 22:31:04 -0800116 err = os_set_fd_async(fd);
Jesper Juhl191ef962006-05-01 12:15:57 -0700117 if (err < 0)
Jeff Dike9b4f0182006-03-27 01:14:31 -0800118 goto out;
119
Jeff Dike9b4f0182006-03-27 01:14:31 -0800120 err = -ENOMEM;
Jeff Dikef2e62992007-02-10 01:44:23 -0800121 new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL);
Jesper Juhl191ef962006-05-01 12:15:57 -0700122 if (new_fd == NULL)
Jeff Dike9b4f0182006-03-27 01:14:31 -0800123 goto out;
124
Jesper Juhl191ef962006-05-01 12:15:57 -0700125 if (type == IRQ_READ)
126 events = UM_POLLIN | UM_POLLPRI;
Jeff Dikeba180fd2007-10-16 01:27:00 -0700127 else events = UM_POLLOUT;
Jeff Dike9b4f0182006-03-27 01:14:31 -0800128 *new_fd = ((struct irq_fd) { .next = NULL,
129 .id = dev_id,
130 .fd = fd,
131 .type = type,
132 .irq = irq,
Jeff Dike9b4f0182006-03-27 01:14:31 -0800133 .events = events,
134 .current_events = 0 } );
135
Paolo 'Blaisorblade' Giarrusso0f978692007-03-07 20:41:13 -0800136 err = -EBUSY;
Jeff Dikebfaafd72006-07-10 04:45:10 -0700137 spin_lock_irqsave(&irq_lock, flags);
Jesper Juhl191ef962006-05-01 12:15:57 -0700138 for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
139 if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
Jeff Dikeba180fd2007-10-16 01:27:00 -0700140 printk(KERN_ERR "Registering fd %d twice\n", fd);
141 printk(KERN_ERR "Irqs : %d, %d\n", irq_fd->irq, irq);
142 printk(KERN_ERR "Ids : 0x%p, 0x%p\n", irq_fd->id,
143 dev_id);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800144 goto out_unlock;
145 }
146 }
147
Jesper Juhl191ef962006-05-01 12:15:57 -0700148 if (type == IRQ_WRITE)
Jeff Dike9b4f0182006-03-27 01:14:31 -0800149 fd = -1;
150
151 tmp_pfd = NULL;
152 n = 0;
153
Jesper Juhl191ef962006-05-01 12:15:57 -0700154 while (1) {
Jeff Dike9b4f0182006-03-27 01:14:31 -0800155 n = os_create_pollfd(fd, events, tmp_pfd, n);
156 if (n == 0)
157 break;
158
Jeff Dikeba180fd2007-10-16 01:27:00 -0700159 /*
160 * n > 0
Jeff Dike9b4f0182006-03-27 01:14:31 -0800161 * It means we couldn't put new pollfd to current pollfds
162 * and tmp_fds is NULL or too small for new pollfds array.
163 * Needed size is equal to n as minimum.
164 *
165 * Here we have to drop the lock in order to call
166 * kmalloc, which might sleep.
167 * If something else came in and changed the pollfds array
168 * so we will not be able to put new pollfd struct to pollfds
169 * then we free the buffer tmp_fds and try again.
170 */
Jeff Dikebfaafd72006-07-10 04:45:10 -0700171 spin_unlock_irqrestore(&irq_lock, flags);
Jesper Juhl191ef962006-05-01 12:15:57 -0700172 kfree(tmp_pfd);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800173
Jeff Dikef2e62992007-02-10 01:44:23 -0800174 tmp_pfd = kmalloc(n, GFP_KERNEL);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800175 if (tmp_pfd == NULL)
176 goto out_kfree;
177
Jeff Dikebfaafd72006-07-10 04:45:10 -0700178 spin_lock_irqsave(&irq_lock, flags);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800179 }
Jeff Dike9b4f0182006-03-27 01:14:31 -0800180
181 *last_irq_ptr = new_fd;
182 last_irq_ptr = &new_fd->next;
183
Jeff Dikebfaafd72006-07-10 04:45:10 -0700184 spin_unlock_irqrestore(&irq_lock, flags);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800185
Jeff Dikeba180fd2007-10-16 01:27:00 -0700186 /*
187 * This calls activate_fd, so it has to be outside the critical
Jeff Dike9b4f0182006-03-27 01:14:31 -0800188 * section.
189 */
Jeff Dike8e64d96a2006-07-10 04:45:11 -0700190 maybe_sigio_broken(fd, (type == IRQ_READ));
Jeff Dike9b4f0182006-03-27 01:14:31 -0800191
Jeff Dike19bdf042006-09-25 23:33:04 -0700192 return 0;
Jeff Dike9b4f0182006-03-27 01:14:31 -0800193
194 out_unlock:
Jeff Dikebfaafd72006-07-10 04:45:10 -0700195 spin_unlock_irqrestore(&irq_lock, flags);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800196 out_kfree:
197 kfree(new_fd);
198 out:
Jeff Dike19bdf042006-09-25 23:33:04 -0700199 return err;
Jeff Dike9b4f0182006-03-27 01:14:31 -0800200}
201
202static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
203{
204 unsigned long flags;
205
Jeff Dikebfaafd72006-07-10 04:45:10 -0700206 spin_lock_irqsave(&irq_lock, flags);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800207 os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
Jeff Dikebfaafd72006-07-10 04:45:10 -0700208 spin_unlock_irqrestore(&irq_lock, flags);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800209}
210
211struct irq_and_dev {
212 int irq;
213 void *dev;
214};
215
216static int same_irq_and_dev(struct irq_fd *irq, void *d)
217{
218 struct irq_and_dev *data = d;
219
Jesper Juhl191ef962006-05-01 12:15:57 -0700220 return ((irq->irq == data->irq) && (irq->id == data->dev));
Jeff Dike9b4f0182006-03-27 01:14:31 -0800221}
222
WANG Cong4c182ae2008-07-23 21:28:47 -0700223static void free_irq_by_irq_and_dev(unsigned int irq, void *dev)
Jeff Dike9b4f0182006-03-27 01:14:31 -0800224{
225 struct irq_and_dev data = ((struct irq_and_dev) { .irq = irq,
226 .dev = dev });
227
228 free_irq_by_cb(same_irq_and_dev, &data);
229}
230
231static int same_fd(struct irq_fd *irq, void *fd)
232{
Jesper Juhl191ef962006-05-01 12:15:57 -0700233 return (irq->fd == *((int *)fd));
Jeff Dike9b4f0182006-03-27 01:14:31 -0800234}
235
236void free_irq_by_fd(int fd)
237{
238 free_irq_by_cb(same_fd, &fd);
239}
240
Jeff Diked973a772007-05-06 14:51:27 -0700241/* Must be called with irq_lock held */
Jeff Dike9b4f0182006-03-27 01:14:31 -0800242static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
243{
244 struct irq_fd *irq;
245 int i = 0;
246 int fdi;
247
Jesper Juhl191ef962006-05-01 12:15:57 -0700248 for (irq = active_fds; irq != NULL; irq = irq->next) {
249 if ((irq->fd == fd) && (irq->irq == irqnum))
250 break;
Jeff Dike9b4f0182006-03-27 01:14:31 -0800251 i++;
252 }
Jesper Juhl191ef962006-05-01 12:15:57 -0700253 if (irq == NULL) {
Jeff Dikeba180fd2007-10-16 01:27:00 -0700254 printk(KERN_ERR "find_irq_by_fd doesn't have descriptor %d\n",
255 fd);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800256 goto out;
257 }
258 fdi = os_get_pollfd(i);
Jesper Juhl191ef962006-05-01 12:15:57 -0700259 if ((fdi != -1) && (fdi != fd)) {
Jeff Dikeba180fd2007-10-16 01:27:00 -0700260 printk(KERN_ERR "find_irq_by_fd - mismatch between active_fds "
261 "and pollfds, fd %d vs %d, need %d\n", irq->fd,
Jeff Dike9b4f0182006-03-27 01:14:31 -0800262 fdi, fd);
263 irq = NULL;
264 goto out;
265 }
266 *index_out = i;
267 out:
Jesper Juhl191ef962006-05-01 12:15:57 -0700268 return irq;
Jeff Dike9b4f0182006-03-27 01:14:31 -0800269}
270
271void reactivate_fd(int fd, int irqnum)
272{
273 struct irq_fd *irq;
274 unsigned long flags;
275 int i;
276
Jeff Dikebfaafd72006-07-10 04:45:10 -0700277 spin_lock_irqsave(&irq_lock, flags);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800278 irq = find_irq_by_fd(fd, irqnum, &i);
Jesper Juhl191ef962006-05-01 12:15:57 -0700279 if (irq == NULL) {
Jeff Dikebfaafd72006-07-10 04:45:10 -0700280 spin_unlock_irqrestore(&irq_lock, flags);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800281 return;
282 }
283 os_set_pollfd(i, irq->fd);
Jeff Dikebfaafd72006-07-10 04:45:10 -0700284 spin_unlock_irqrestore(&irq_lock, flags);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800285
Jeff Dike19bdf042006-09-25 23:33:04 -0700286 add_sigio_fd(fd);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800287}
288
289void deactivate_fd(int fd, int irqnum)
290{
291 struct irq_fd *irq;
292 unsigned long flags;
293 int i;
294
Jeff Dikebfaafd72006-07-10 04:45:10 -0700295 spin_lock_irqsave(&irq_lock, flags);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800296 irq = find_irq_by_fd(fd, irqnum, &i);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700297 if (irq == NULL) {
Jeff Dike19bdf042006-09-25 23:33:04 -0700298 spin_unlock_irqrestore(&irq_lock, flags);
299 return;
300 }
301
Jeff Dike9b4f0182006-03-27 01:14:31 -0800302 os_set_pollfd(i, -1);
Jeff Dikebfaafd72006-07-10 04:45:10 -0700303 spin_unlock_irqrestore(&irq_lock, flags);
Jeff Dike19bdf042006-09-25 23:33:04 -0700304
305 ignore_sigio_fd(fd);
Jeff Dike9b4f0182006-03-27 01:14:31 -0800306}
307
Jeff Diked973a772007-05-06 14:51:27 -0700308/*
309 * Called just before shutdown in order to provide a clean exec
310 * environment in case the system is rebooting. No locking because
311 * that would cause a pointless shutdown hang if something hadn't
312 * released the lock.
313 */
Jeff Dike9b4f0182006-03-27 01:14:31 -0800314int deactivate_all_fds(void)
315{
316 struct irq_fd *irq;
317 int err;
318
Jesper Juhl191ef962006-05-01 12:15:57 -0700319 for (irq = active_fds; irq != NULL; irq = irq->next) {
Jeff Dike9b4f0182006-03-27 01:14:31 -0800320 err = os_clear_fd_async(irq->fd);
Jesper Juhl191ef962006-05-01 12:15:57 -0700321 if (err)
322 return err;
Jeff Dike9b4f0182006-03-27 01:14:31 -0800323 }
324 /* If there is a signal already queued, after unblocking ignore it */
325 os_set_ioignore();
326
Jesper Juhl191ef962006-05-01 12:15:57 -0700327 return 0;
Jeff Dike9b4f0182006-03-27 01:14:31 -0800328}
329
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330/*
Simon Arlottb60745b2007-10-20 01:23:03 +0200331 * do_IRQ handles all normal device IRQs (the special
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 * SMP cross-CPU interrupts have their own specific
333 * handlers).
334 */
Jeff Dike77bf4402007-10-16 01:26:58 -0700335unsigned int do_IRQ(int irq, struct uml_pt_regs *regs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336{
Al Viro7bea96f2006-10-08 22:49:34 +0100337 struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
338 irq_enter();
Richard Weinbergerbe76d812010-10-26 14:22:20 -0700339 generic_handle_irq(irq);
Al Viro7bea96f2006-10-08 22:49:34 +0100340 irq_exit();
341 set_irq_regs(old_regs);
342 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343}
344
345int um_request_irq(unsigned int irq, int fd, int type,
David Howells40220c12006-10-09 12:19:47 +0100346 irq_handler_t handler,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 unsigned long irqflags, const char * devname,
348 void *dev_id)
349{
350 int err;
351
Jeff Dike9ac625a2007-11-14 17:00:23 -0800352 if (fd != -1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 err = activate_fd(irq, fd, type, dev_id);
Jeff Dike9ac625a2007-11-14 17:00:23 -0800354 if (err)
355 return err;
356 }
357
358 return request_irq(irq, handler, irqflags, devname, dev_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359}
Jeff Dike9ac625a2007-11-14 17:00:23 -0800360
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361EXPORT_SYMBOL(um_request_irq);
362EXPORT_SYMBOL(reactivate_fd);
363
Jeff Dikeba180fd2007-10-16 01:27:00 -0700364/*
Thomas Gleixner1d119aa2011-02-06 22:45:34 +0000365 * irq_chip must define at least enable/disable and ack when
366 * the edge handler is used.
Jeff Dikeba180fd2007-10-16 01:27:00 -0700367 */
Thomas Gleixner1d119aa2011-02-06 22:45:34 +0000368static void dummy(struct irq_data *d)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369{
370}
371
Paolo 'Blaisorblade' Giarrussodbce7062005-06-21 17:16:19 -0700372/* This is used for everything else than the timer. */
Thomas Gleixner6fa851c2009-06-16 15:33:29 -0700373static struct irq_chip normal_irq_type = {
Thomas Gleixnerd1ea13c2010-09-23 18:40:07 +0200374 .name = "SIGIO",
Paolo 'Blaisorblade' Giarrussodbce7062005-06-21 17:16:19 -0700375 .release = free_irq_by_irq_and_dev,
Thomas Gleixner1d119aa2011-02-06 22:45:34 +0000376 .irq_disable = dummy,
377 .irq_enable = dummy,
378 .irq_ack = dummy,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379};
380
Thomas Gleixner6fa851c2009-06-16 15:33:29 -0700381static struct irq_chip SIGVTALRM_irq_type = {
Thomas Gleixnerd1ea13c2010-09-23 18:40:07 +0200382 .name = "SIGVTALRM",
Paolo 'Blaisorblade' Giarrussodbce7062005-06-21 17:16:19 -0700383 .release = free_irq_by_irq_and_dev,
Thomas Gleixner1d119aa2011-02-06 22:45:34 +0000384 .irq_disable = dummy,
385 .irq_enable = dummy,
386 .irq_ack = dummy,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387};
388
389void __init init_IRQ(void)
390{
391 int i;
392
Thomas Gleixner0ebec352011-03-24 18:24:42 +0100393 irq_set_chip_and_handler(TIMER_IRQ, &SIGVTALRM_irq_type, handle_edge_irq);
Richard Weinbergerbe76d812010-10-26 14:22:20 -0700394
Thomas Gleixner0ebec352011-03-24 18:24:42 +0100395 for (i = 1; i < NR_IRQS; i++)
396 irq_set_chip_and_handler(i, &normal_irq_type, handle_edge_irq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397}
398
Jeff Dikec14b8492007-05-10 22:22:34 -0700399/*
400 * IRQ stack entry and exit:
401 *
402 * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
403 * and switch over to the IRQ stack after some preparation. We use
404 * sigaltstack to receive signals on a separate stack from the start.
405 * These two functions make sure the rest of the kernel won't be too
406 * upset by being on a different stack. The IRQ stack has a
407 * thread_info structure at the bottom so that current et al continue
408 * to work.
409 *
410 * to_irq_stack copies the current task's thread_info to the IRQ stack
411 * thread_info and sets the tasks's stack to point to the IRQ stack.
412 *
413 * from_irq_stack copies the thread_info struct back (flags may have
414 * been modified) and resets the task's stack pointer.
415 *
416 * Tricky bits -
417 *
418 * What happens when two signals race each other? UML doesn't block
419 * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
420 * could arrive while a previous one is still setting up the
421 * thread_info.
422 *
423 * There are three cases -
424 * The first interrupt on the stack - sets up the thread_info and
425 * handles the interrupt
426 * A nested interrupt interrupting the copying of the thread_info -
427 * can't handle the interrupt, as the stack is in an unknown state
428 * A nested interrupt not interrupting the copying of the
429 * thread_info - doesn't do any setup, just handles the interrupt
430 *
431 * The first job is to figure out whether we interrupted stack setup.
432 * This is done by xchging the signal mask with thread_info->pending.
433 * If the value that comes back is zero, then there is no setup in
434 * progress, and the interrupt can be handled. If the value is
435 * non-zero, then there is stack setup in progress. In order to have
436 * the interrupt handled, we leave our signal in the mask, and it will
437 * be handled by the upper handler after it has set up the stack.
438 *
439 * Next is to figure out whether we are the outer handler or a nested
440 * one. As part of setting up the stack, thread_info->real_thread is
441 * set to non-NULL (and is reset to NULL on exit). This is the
442 * nesting indicator. If it is non-NULL, then the stack is already
443 * set up and the handler can run.
444 */
445
446static unsigned long pending_mask;
447
Jeff Dike508a9272007-09-18 22:46:49 -0700448unsigned long to_irq_stack(unsigned long *mask_out)
Jeff Dikec14b8492007-05-10 22:22:34 -0700449{
450 struct thread_info *ti;
451 unsigned long mask, old;
452 int nested;
453
Jeff Dike508a9272007-09-18 22:46:49 -0700454 mask = xchg(&pending_mask, *mask_out);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700455 if (mask != 0) {
456 /*
457 * If any interrupts come in at this point, we want to
Jeff Dikec14b8492007-05-10 22:22:34 -0700458 * make sure that their bits aren't lost by our
459 * putting our bit in. So, this loop accumulates bits
460 * until xchg returns the same value that we put in.
461 * When that happens, there were no new interrupts,
462 * and pending_mask contains a bit for each interrupt
463 * that came in.
464 */
Jeff Dike508a9272007-09-18 22:46:49 -0700465 old = *mask_out;
Jeff Dikec14b8492007-05-10 22:22:34 -0700466 do {
467 old |= mask;
468 mask = xchg(&pending_mask, old);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700469 } while (mask != old);
Jeff Dikec14b8492007-05-10 22:22:34 -0700470 return 1;
471 }
472
473 ti = current_thread_info();
474 nested = (ti->real_thread != NULL);
Jeff Dikeba180fd2007-10-16 01:27:00 -0700475 if (!nested) {
Jeff Dikec14b8492007-05-10 22:22:34 -0700476 struct task_struct *task;
477 struct thread_info *tti;
478
479 task = cpu_tasks[ti->cpu].task;
480 tti = task_thread_info(task);
Jeff Dike508a9272007-09-18 22:46:49 -0700481
Jeff Dikec14b8492007-05-10 22:22:34 -0700482 *ti = *tti;
483 ti->real_thread = tti;
484 task->stack = ti;
485 }
486
487 mask = xchg(&pending_mask, 0);
488 *mask_out |= mask | nested;
489 return 0;
490}
491
492unsigned long from_irq_stack(int nested)
493{
494 struct thread_info *ti, *to;
495 unsigned long mask;
496
497 ti = current_thread_info();
498
499 pending_mask = 1;
500
501 to = ti->real_thread;
502 current->stack = to;
503 ti->real_thread = NULL;
504 *to = *ti;
505
506 mask = xchg(&pending_mask, 0);
507 return mask & ~1;
508}
509