-/*
+/*
* Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
* Derived (i.e. mostly copied) from arch/i386/kernel/irq.c:
* Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
*/
-#include "linux/config.h"
#include "linux/kernel.h"
#include "linux/module.h"
#include "linux/smp.h"
#include "asm/system.h"
#include "asm/errno.h"
#include "asm/uaccess.h"
-#include "user_util.h"
#include "kern_util.h"
#include "irq_user.h"
#include "irq_kern.h"
#include "os.h"
#include "sigio.h"
#include "misc_constants.h"
+#include "as-layout.h"
/*
* Generic, controller-independent functions:
if (i < NR_IRQS) {
spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
- if (!action)
+ if (!action)
goto skip;
seq_printf(p, "%3d: ",i);
#ifndef CONFIG_SMP
return 0;
}
-struct irq_fd *active_fds = NULL;
+/*
+ * This list is accessed under irq_lock, except in sigio_handler,
+ * where it is safe from being modified. IRQ handlers won't change it -
+ * if an IRQ source has vanished, it will be freed by free_irqs just
+ * before returning from sigio_handler. That will process a separate
+ * list of irqs to free, with its own locking, coming back here to
+ * remove list elements, taking the irq_lock to do so.
+ */
+static struct irq_fd *active_fds = NULL;
static struct irq_fd **last_irq_ptr = &active_fds;
extern void free_irqs(void);
free_irqs();
}
-static void maybe_sigio_broken(int fd, int type)
-{
- if (os_isatty(fd)) {
- if ((type == IRQ_WRITE) && !pty_output_sigio) {
- write_sigio_workaround();
- add_sigio_fd(fd, 0);
- } else if ((type == IRQ_READ) && !pty_close_sigio) {
- write_sigio_workaround();
- add_sigio_fd(fd, 1);
- }
- }
-}
+static DEFINE_SPINLOCK(irq_lock);
int activate_fd(int irq, int fd, int type, void *dev_id)
{
if (err < 0)
goto out;
- new_fd = um_kmalloc(sizeof(*new_fd));
err = -ENOMEM;
+ new_fd = kmalloc(sizeof(struct irq_fd), GFP_KERNEL);
if (new_fd == NULL)
goto out;
.events = events,
.current_events = 0 } );
- /* Critical section - locked by a spinlock because this stuff can
- * be changed from interrupt handlers. The stuff above is done
- * outside the lock because it allocates memory.
- */
-
- /* Actually, it only looks like it can be called from interrupt
- * context. The culprit is reactivate_fd, which calls
- * maybe_sigio_broken, which calls write_sigio_workaround,
- * which calls activate_fd. However, write_sigio_workaround should
- * only be called once, at boot time. That would make it clear that
- * this is called only from process context, and can be locked with
- * a semaphore.
- */
- flags = irq_lock();
+ err = -EBUSY;
+ spin_lock_irqsave(&irq_lock, flags);
for (irq_fd = active_fds; irq_fd != NULL; irq_fd = irq_fd->next) {
if ((irq_fd->fd == fd) && (irq_fd->type == type)) {
printk("Registering fd %d twice\n", fd);
}
}
- /*-------------*/
if (type == IRQ_WRITE)
fd = -1;
* so we will not be able to put new pollfd struct to pollfds
* then we free the buffer tmp_fds and try again.
*/
- irq_unlock(flags);
+ spin_unlock_irqrestore(&irq_lock, flags);
kfree(tmp_pfd);
- tmp_pfd = NULL;
- tmp_pfd = um_kmalloc(n);
+ tmp_pfd = kmalloc(n, GFP_KERNEL);
if (tmp_pfd == NULL)
goto out_kfree;
- flags = irq_lock();
+ spin_lock_irqsave(&irq_lock, flags);
}
- /*-------------*/
*last_irq_ptr = new_fd;
last_irq_ptr = &new_fd->next;
- irq_unlock(flags);
+ spin_unlock_irqrestore(&irq_lock, flags);
/* This calls activate_fd, so it has to be outside the critical
* section.
*/
- maybe_sigio_broken(fd, type);
+ maybe_sigio_broken(fd, (type == IRQ_READ));
- return(0);
+ return 0;
out_unlock:
- irq_unlock(flags);
+ spin_unlock_irqrestore(&irq_lock, flags);
out_kfree:
kfree(new_fd);
out:
- return(err);
+ return err;
}
static void free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg)
{
unsigned long flags;
- flags = irq_lock();
+ spin_lock_irqsave(&irq_lock, flags);
os_free_irq_by_cb(test, arg, active_fds, &last_irq_ptr);
- irq_unlock(flags);
+ spin_unlock_irqrestore(&irq_lock, flags);
}
struct irq_and_dev {
free_irq_by_cb(same_fd, &fd);
}
+/* Must be called with irq_lock held */
static struct irq_fd *find_irq_by_fd(int fd, int irqnum, int *index_out)
{
struct irq_fd *irq;
unsigned long flags;
int i;
- flags = irq_lock();
+ spin_lock_irqsave(&irq_lock, flags);
irq = find_irq_by_fd(fd, irqnum, &i);
if (irq == NULL) {
- irq_unlock(flags);
+ spin_unlock_irqrestore(&irq_lock, flags);
return;
}
os_set_pollfd(i, irq->fd);
- irq_unlock(flags);
+ spin_unlock_irqrestore(&irq_lock, flags);
- /* This calls activate_fd, so it has to be outside the critical
- * section.
- */
- maybe_sigio_broken(fd, irq->type);
+ add_sigio_fd(fd);
}
void deactivate_fd(int fd, int irqnum)
unsigned long flags;
int i;
- flags = irq_lock();
+ spin_lock_irqsave(&irq_lock, flags);
irq = find_irq_by_fd(fd, irqnum, &i);
- if (irq == NULL)
- goto out;
+ if(irq == NULL){
+ spin_unlock_irqrestore(&irq_lock, flags);
+ return;
+ }
+
os_set_pollfd(i, -1);
- out:
- irq_unlock(flags);
+ spin_unlock_irqrestore(&irq_lock, flags);
+
+ ignore_sigio_fd(fd);
}
+/*
+ * Called just before shutdown in order to provide a clean exec
+ * environment in case the system is rebooting. No locking because
+ * that would cause a pointless shutdown hang if something hadn't
+ * released the lock.
+ */
int deactivate_all_fds(void)
{
struct irq_fd *irq;
return 0;
}
-void forward_interrupts(int pid)
-{
- struct irq_fd *irq;
- unsigned long flags;
- int err;
-
- flags = irq_lock();
- for (irq = active_fds; irq != NULL; irq = irq->next) {
- err = os_set_owner(irq->fd, pid);
- if (err < 0) {
- /* XXX Just remove the irq rather than
- * print out an infinite stream of these
- */
- printk("Failed to forward %d to pid %d, err = %d\n",
- irq->fd, pid, -err);
- }
-
- irq->pid = pid;
- }
- irq_unlock(flags);
-}
-
/*
* do_IRQ handles all normal device IRQ's (the special
* SMP cross-CPU interrupts have their own specific
*/
unsigned int do_IRQ(int irq, union uml_pt_regs *regs)
{
- irq_enter();
- __do_IRQ(irq, (struct pt_regs *)regs);
- irq_exit();
- return 1;
+ struct pt_regs *old_regs = set_irq_regs((struct pt_regs *)regs);
+ irq_enter();
+ __do_IRQ(irq);
+ irq_exit();
+ set_irq_regs(old_regs);
+ return 1;
}
int um_request_irq(unsigned int irq, int fd, int type,
- irqreturn_t (*handler)(int, void *, struct pt_regs *),
+ irq_handler_t handler,
unsigned long irqflags, const char * devname,
void *dev_id)
{
EXPORT_SYMBOL(um_request_irq);
EXPORT_SYMBOL(reactivate_fd);
-static DEFINE_SPINLOCK(irq_spinlock);
-
-unsigned long irq_lock(void)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&irq_spinlock, flags);
- return flags;
-}
-
-void irq_unlock(unsigned long flags)
-{
- spin_unlock_irqrestore(&irq_spinlock, flags);
-}
-
/* hw_interrupt_type must define (startup || enable) &&
* (shutdown || disable) && end */
static void dummy(unsigned int irq)
}
}
-int init_aio_irq(int irq, char *name, irqreturn_t (*handler)(int, void *,
- struct pt_regs *))
+int init_aio_irq(int irq, char *name, irq_handler_t handler)
{
int fds[2], err;
out:
return err;
}
+
+/*
+ * IRQ stack entry and exit:
+ *
+ * Unlike i386, UML doesn't receive IRQs on the normal kernel stack
+ * and switch over to the IRQ stack after some preparation. We use
+ * sigaltstack to receive signals on a separate stack from the start.
+ * These two functions make sure the rest of the kernel won't be too
+ * upset by being on a different stack. The IRQ stack has a
+ * thread_info structure at the bottom so that current et al continue
+ * to work.
+ *
+ * to_irq_stack copies the current task's thread_info to the IRQ stack
+ * thread_info and sets the tasks's stack to point to the IRQ stack.
+ *
+ * from_irq_stack copies the thread_info struct back (flags may have
+ * been modified) and resets the task's stack pointer.
+ *
+ * Tricky bits -
+ *
+ * What happens when two signals race each other? UML doesn't block
+ * signals with sigprocmask, SA_DEFER, or sa_mask, so a second signal
+ * could arrive while a previous one is still setting up the
+ * thread_info.
+ *
+ * There are three cases -
+ * The first interrupt on the stack - sets up the thread_info and
+ * handles the interrupt
+ * A nested interrupt interrupting the copying of the thread_info -
+ * can't handle the interrupt, as the stack is in an unknown state
+ * A nested interrupt not interrupting the copying of the
+ * thread_info - doesn't do any setup, just handles the interrupt
+ *
+ * The first job is to figure out whether we interrupted stack setup.
+ * This is done by xchging the signal mask with thread_info->pending.
+ * If the value that comes back is zero, then there is no setup in
+ * progress, and the interrupt can be handled. If the value is
+ * non-zero, then there is stack setup in progress. In order to have
+ * the interrupt handled, we leave our signal in the mask, and it will
+ * be handled by the upper handler after it has set up the stack.
+ *
+ * Next is to figure out whether we are the outer handler or a nested
+ * one. As part of setting up the stack, thread_info->real_thread is
+ * set to non-NULL (and is reset to NULL on exit). This is the
+ * nesting indicator. If it is non-NULL, then the stack is already
+ * set up and the handler can run.
+ */
+
+static unsigned long pending_mask;
+
+unsigned long to_irq_stack(unsigned long *mask_out)
+{
+ struct thread_info *ti;
+ unsigned long mask, old;
+ int nested;
+
+ mask = xchg(&pending_mask, *mask_out);
+ if(mask != 0){
+ /* If any interrupts come in at this point, we want to
+ * make sure that their bits aren't lost by our
+ * putting our bit in. So, this loop accumulates bits
+ * until xchg returns the same value that we put in.
+ * When that happens, there were no new interrupts,
+ * and pending_mask contains a bit for each interrupt
+ * that came in.
+ */
+ old = *mask_out;
+ do {
+ old |= mask;
+ mask = xchg(&pending_mask, old);
+ } while(mask != old);
+ return 1;
+ }
+
+ ti = current_thread_info();
+ nested = (ti->real_thread != NULL);
+ if(!nested){
+ struct task_struct *task;
+ struct thread_info *tti;
+
+ task = cpu_tasks[ti->cpu].task;
+ tti = task_thread_info(task);
+
+ *ti = *tti;
+ ti->real_thread = tti;
+ task->stack = ti;
+ }
+
+ mask = xchg(&pending_mask, 0);
+ *mask_out |= mask | nested;
+ return 0;
+}
+
+unsigned long from_irq_stack(int nested)
+{
+ struct thread_info *ti, *to;
+ unsigned long mask;
+
+ ti = current_thread_info();
+
+ pending_mask = 1;
+
+ to = ti->real_thread;
+ current->stack = to;
+ ti->real_thread = NULL;
+ *to = *ti;
+
+ mask = xchg(&pending_mask, 0);
+ return mask & ~1;
+}
+