summaryrefslogtreecommitdiff
path: root/linux/dev/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'linux/dev/kernel')
-rw-r--r--linux/dev/kernel/dma.c109
-rw-r--r--linux/dev/kernel/printk.c80
-rw-r--r--linux/dev/kernel/resource.c145
-rw-r--r--linux/dev/kernel/sched.c642
-rw-r--r--linux/dev/kernel/softirq.c46
5 files changed, 1022 insertions, 0 deletions
diff --git a/linux/dev/kernel/dma.c b/linux/dev/kernel/dma.c
new file mode 100644
index 0000000..4b56978
--- /dev/null
+++ b/linux/dev/kernel/dma.c
@@ -0,0 +1,109 @@
+/* $Id: dma.c,v 1.1 1999/04/26 05:49:35 tb Exp $
+ * linux/kernel/dma.c: A DMA channel allocator. Inspired by linux/kernel/irq.c.
+ *
+ * Written by Hennus Bergman, 1992.
+ *
+ * 1994/12/26: Changes by Alex Nash to fix a minor bug in /proc/dma.
+ * In the previous version the reported device could end up being wrong,
+ * if a device requested a DMA channel that was already in use.
+ * [It also happened to remove the sizeof(char *) == sizeof(int)
+ * assumption introduced because of those /proc/dma patches. -- Hennus]
+ */
+
+#define MACH_INCLUDE
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <asm/dma.h>
+#include <asm/system.h>
+
+
+/* A note on resource allocation:
+ *
+ * All drivers needing DMA channels, should allocate and release them
+ * through the public routines `request_dma()' and `free_dma()'.
+ *
+ * In order to avoid problems, all processes should allocate resources in
+ * the same sequence and release them in the reverse order.
+ *
+ * So, when allocating DMAs and IRQs, first allocate the IRQ, then the DMA.
+ * When releasing them, first release the DMA, then release the IRQ.
+ * If you don't, you may cause allocation requests to fail unnecessarily.
+ * This doesn't really matter now, but it will once we get real semaphores
+ * in the kernel.
+ */
+
+
+
+/* Channel n is busy iff dma_chan_busy[n].lock != 0.
+ * DMA0 used to be reserved for DRAM refresh, but apparently not any more...
+ * DMA4 is reserved for cascading.
+ */
+
+struct dma_chan
+{
+ int lock;
+ const char *device_id;
+};
+
+static struct dma_chan dma_chan_busy[MAX_DMA_CHANNELS] =
+{
+ { 0, 0 },
+ { 0, 0 },
+ { 0, 0 },
+ { 0, 0 },
+ { 1, "cascade" },
+ { 0, 0 },
+ { 0, 0 },
+ { 0, 0 }
+};
+
+#ifndef MACH
+int
+get_dma_list (char *buf)
+{
+ int i, len = 0;
+
+ for (i = 0 ; i < MAX_DMA_CHANNELS ; i++)
+ {
+ if (dma_chan_busy[i].lock)
+ {
+ len += linux_sprintf (buf+len, "%2d: %s\n",
+ i,
+ dma_chan_busy[i].device_id);
+ }
+ }
+ return len;
+} /* get_dma_list */
+#endif
+
+int
+request_dma (unsigned int dmanr, const char *device_id)
+{
+ if (dmanr >= MAX_DMA_CHANNELS)
+ return -LINUX_EINVAL;
+
+ if (xchg (&dma_chan_busy[dmanr].lock, 1) != 0)
+ return -LINUX_EBUSY;
+
+ dma_chan_busy[dmanr].device_id = device_id;
+
+ /* old flag was 0, now contains 1 to indicate busy */
+ return 0;
+} /* request_dma */
+
+
+void
+free_dma (unsigned int dmanr)
+{
+ if (dmanr >= MAX_DMA_CHANNELS)
+ {
+ printk ("Trying to free DMA%d\n", dmanr);
+ return;
+ }
+
+ if (xchg (&dma_chan_busy[dmanr].lock, 0) == 0)
+ {
+ printk ("Trying to free free DMA%d\n", dmanr);
+ return;
+ }
+} /* free_dma */
diff --git a/linux/dev/kernel/printk.c b/linux/dev/kernel/printk.c
new file mode 100644
index 0000000..2ff52d1
--- /dev/null
+++ b/linux/dev/kernel/printk.c
@@ -0,0 +1,80 @@
+/*
+ * Linux kernel print routine.
+ * Copyright (C) 1995 Shantanu Goel.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+/*
+ * linux/kernel/printk.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#define MACH_INCLUDE
+#include <stdarg.h>
+#include <asm/system.h>
+
+static char buf[2048];
+
+#define DEFAULT_MESSAGE_LOGLEVEL 4
+#define DEFAULT_CONSOLE_LOGLEVEL 7
+
+int console_loglevel = DEFAULT_CONSOLE_LOGLEVEL;
+
+int
+printk (char *fmt, ...)
+{
+ va_list args;
+ int n, flags;
+ extern void cnputc ();
+ extern int linux_vsprintf (char *buf, char *fmt,...);
+ char *p, *msg, *buf_end;
+ static int msg_level = -1;
+
+ save_flags (flags);
+ cli ();
+ va_start (args, fmt);
+ n = linux_vsprintf (buf + 3, fmt, args);
+ buf_end = buf + 3 + n;
+ va_end (args);
+ for (p = buf + 3; p < buf_end; p++)
+ {
+ msg = p;
+ if (msg_level < 0)
+ {
+ if (p[0] != '<' || p[1] < '0' || p[1] > '7' || p[2] != '>')
+ {
+ p -= 3;
+ p[0] = '<';
+ p[1] = DEFAULT_MESSAGE_LOGLEVEL + '0';
+ p[2] = '>';
+ }
+ else
+ msg += 3;
+ msg_level = p[1] - '0';
+ }
+ for (; p < buf_end; p++)
+ if (*p == '\n')
+ break;
+ if (msg_level < console_loglevel)
+ while (msg <= p)
+ cnputc (*msg++);
+ if (*p == '\n')
+ msg_level = -1;
+ }
+ restore_flags (flags);
+ return n;
+}
diff --git a/linux/dev/kernel/resource.c b/linux/dev/kernel/resource.c
new file mode 100644
index 0000000..7a18755
--- /dev/null
+++ b/linux/dev/kernel/resource.c
@@ -0,0 +1,145 @@
+/*
+ * linux/kernel/resource.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * David Hinds
+ *
+ * Kernel io-region resource management
+ */
+
+#include <sys/types.h>
+
+#define MACH_INCLUDE
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+
+#define IOTABLE_SIZE 128
+
+typedef struct resource_entry_t
+{
+ u_long from, num;
+ const char *name;
+ struct resource_entry_t *next;
+} resource_entry_t;
+
+static resource_entry_t iolist = { 0, 0, "", NULL };
+
+static resource_entry_t iotable[IOTABLE_SIZE];
+
+/*
+ * This generates the report for /proc/ioports
+ */
+#ifndef MACH
+int
+get_ioport_list (char *buf)
+{
+ resource_entry_t *p;
+ int len = 0;
+
+ for (p = iolist.next; (p) && (len < 4000); p = p->next)
+ len += linux_sprintf (buf+len, "%04lx-%04lx : %s\n",
+ p->from, p->from+p->num-1, p->name);
+ if (p)
+ len += linux_sprintf (buf+len, "4K limit reached!\n");
+ return len;
+}
+#endif
+
+/*
+ * The workhorse function: find where to put a new entry
+ */
+static resource_entry_t *
+find_gap (resource_entry_t *root, u_long from, u_long num)
+{
+ unsigned long flags;
+ resource_entry_t *p;
+
+ if (from > from+num-1)
+ return NULL;
+ save_flags (flags);
+ cli ();
+ for (p = root; ; p = p->next)
+ {
+ if ((p != root) && (p->from+p->num-1 >= from))
+ {
+ p = NULL;
+ break;
+ }
+ if ((p->next == NULL) || (p->next->from > from+num-1))
+ break;
+ }
+ restore_flags (flags);
+ return p;
+}
+
+/*
+ * Call this from the device driver to register the ioport region.
+ */
+void
+request_region (unsigned int from, unsigned int num, const char *name)
+{
+ resource_entry_t *p;
+ int i;
+
+ for (i = 0; i < IOTABLE_SIZE; i++)
+ if (iotable[i].num == 0)
+ break;
+ if (i == IOTABLE_SIZE)
+ printk ("warning: ioport table is full\n");
+ else
+ {
+ p = find_gap (&iolist, from, num);
+ if (p == NULL)
+ return;
+ iotable[i].name = name;
+ iotable[i].from = from;
+ iotable[i].num = num;
+ iotable[i].next = p->next;
+ p->next = &iotable[i];
+ return;
+ }
+}
+
+/*
+ * Call this when the device driver is unloaded
+ */
+void
+release_region (unsigned int from, unsigned int num)
+{
+ resource_entry_t *p, *q;
+
+ for (p = &iolist; ; p = q)
+ {
+ q = p->next;
+ if (q == NULL)
+ break;
+ if ((q->from == from) && (q->num == num))
+ {
+ q->num = 0;
+ p->next = q->next;
+ return;
+ }
+ }
+}
+
+/*
+ * Call this to check the ioport region before probing
+ */
+int
+check_region (unsigned int from, unsigned int num)
+{
+ return (find_gap (&iolist, from, num) == NULL) ? -LINUX_EBUSY : 0;
+}
+
+/* Called from init/main.c to reserve IO ports. */
+void
+reserve_setup(char *str, int *ints)
+{
+ int i;
+
+ for (i = 1; i < ints[0]; i += 2)
+ request_region (ints[i], ints[i+1], "reserved");
+}
diff --git a/linux/dev/kernel/sched.c b/linux/dev/kernel/sched.c
new file mode 100644
index 0000000..792c2da
--- /dev/null
+++ b/linux/dev/kernel/sched.c
@@ -0,0 +1,642 @@
+/*
+ * Linux scheduling support.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+/*
+ * linux/kernel/sched.c
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ */
+
+#include <sys/types.h>
+#include <machine/spl.h>
+
+#include <mach/boolean.h>
+
+#include <kern/thread.h>
+#include <kern/sched_prim.h>
+
+#define MACH_INCLUDE
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/interrupt.h>
+
+#include <asm/system.h>
+#include <asm/atomic.h>
+
+int securelevel = 0;
+
+extern void *alloc_contig_mem (unsigned, unsigned, unsigned, vm_page_t *);
+extern void free_contig_mem (vm_page_t);
+extern spl_t splhigh (void);
+extern spl_t splx (spl_t);
+extern void linux_soft_intr (void);
+extern int issig (void);
+extern int printf (const char *, ...);
+extern int linux_auto_config;
+
+static void timer_bh (void);
+
+DECLARE_TASK_QUEUE (tq_timer);
+DECLARE_TASK_QUEUE (tq_immediate);
+DECLARE_TASK_QUEUE (tq_scheduler);
+
+static struct wait_queue **auto_config_queue;
+
+static inline void
+handle_soft_intr (void)
+{
+ if (bh_active & bh_mask)
+ {
+ intr_count = 1;
+ linux_soft_intr ();
+ intr_count = 0;
+ }
+}
+
+static void
+tqueue_bh (void)
+{
+ run_task_queue(&tq_timer);
+}
+
+static void
+immediate_bh (void)
+{
+ run_task_queue (&tq_immediate);
+}
+
+void
+add_wait_queue (struct wait_queue **q, struct wait_queue *wait)
+{
+ unsigned long flags;
+
+ if (! linux_auto_config)
+ {
+ save_flags (flags);
+ cli ();
+ assert_wait ((event_t) q, FALSE);
+ restore_flags (flags);
+ return;
+ }
+
+ if (auto_config_queue)
+ printf ("add_wait_queue: queue not empty\n");
+ auto_config_queue = q;
+}
+
+void
+remove_wait_queue (struct wait_queue **q, struct wait_queue *wait)
+{
+ unsigned long flags;
+
+ if (! linux_auto_config)
+ {
+ save_flags (flags);
+ thread_wakeup ((event_t) q);
+ restore_flags (flags);
+ return;
+ }
+
+ auto_config_queue = NULL;
+}
+
+static inline int
+waking_non_zero (struct semaphore *sem)
+{
+ int ret;
+ unsigned long flags;
+
+ get_buzz_lock (&sem->lock);
+ save_flags (flags);
+ cli ();
+
+ if ((ret = (sem->waking > 0)))
+ sem->waking--;
+
+ restore_flags (flags);
+ give_buzz_lock (&sem->lock);
+ return ret;
+}
+
+void
+__up (struct semaphore *sem)
+{
+ atomic_inc (&sem->waking);
+ wake_up (&sem->wait);
+}
+
+int
+__do_down (struct semaphore *sem, int task_state)
+{
+ unsigned long flags;
+ int ret = 0;
+ int s;
+
+ if (!linux_auto_config)
+ {
+ save_flags (flags);
+ s = splhigh ();
+ for (;;)
+ {
+ if (waking_non_zero (sem))
+ break;
+
+ if (task_state == TASK_INTERRUPTIBLE && issig ())
+ {
+ ret = -LINUX_EINTR;
+ atomic_inc (&sem->count);
+ break;
+ }
+
+ assert_wait ((event_t) &sem->wait,
+ task_state == TASK_INTERRUPTIBLE ? TRUE : FALSE);
+ splx (s);
+ schedule ();
+ s = splhigh ();
+ }
+ splx (s);
+ restore_flags (flags);
+ return ret;
+ }
+
+ while (!waking_non_zero (sem))
+ {
+ if (task_state == TASK_INTERRUPTIBLE && issig ())
+ {
+ ret = -LINUX_EINTR;
+ atomic_inc (&sem->count);
+ break;
+ }
+ schedule ();
+ }
+
+ return ret;
+}
+
+void
+__down (struct semaphore *sem)
+{
+ __do_down(sem, TASK_UNINTERRUPTIBLE);
+}
+
+int
+__down_interruptible (struct semaphore *sem)
+{
+ return __do_down (sem, TASK_INTERRUPTIBLE);
+}
+
+void
+__sleep_on (struct wait_queue **q, int state)
+{
+ unsigned long flags;
+
+ if (!q)
+ return;
+ save_flags (flags);
+ if (!linux_auto_config)
+ {
+ assert_wait ((event_t) q, state == TASK_INTERRUPTIBLE ? TRUE : FALSE);
+ sti ();
+ schedule ();
+ restore_flags (flags);
+ return;
+ }
+
+ add_wait_queue (q, NULL);
+ sti ();
+ while (auto_config_queue)
+ schedule ();
+ restore_flags (flags);
+}
+
+void
+sleep_on (struct wait_queue **q)
+{
+ __sleep_on (q, TASK_UNINTERRUPTIBLE);
+}
+
+void
+interruptible_sleep_on (struct wait_queue **q)
+{
+ __sleep_on (q, TASK_INTERRUPTIBLE);
+}
+
+void
+wake_up (struct wait_queue **q)
+{
+ unsigned long flags;
+
+ if (! linux_auto_config)
+ {
+ if (q != &wait_for_request) /* ??? by OKUJI Yoshinori. */
+ {
+ save_flags (flags);
+ thread_wakeup ((event_t) q);
+ restore_flags (flags);
+ }
+ return;
+ }
+
+ if (auto_config_queue == q)
+ auto_config_queue = NULL;
+}
+
+void
+__wait_on_buffer (struct buffer_head *bh)
+{
+ unsigned long flags;
+
+ save_flags (flags);
+ if (! linux_auto_config)
+ {
+ while (1)
+ {
+ cli ();
+ run_task_queue (&tq_disk);
+ if (! buffer_locked (bh))
+ break;
+ bh->b_wait = (struct wait_queue *) 1;
+ assert_wait ((event_t) bh, FALSE);
+ sti ();
+ schedule ();
+ }
+ restore_flags (flags);
+ return;
+ }
+
+ sti ();
+ while (buffer_locked (bh))
+ {
+ run_task_queue (&tq_disk);
+ schedule ();
+ }
+ restore_flags (flags);
+}
+
+void
+unlock_buffer (struct buffer_head *bh)
+{
+ unsigned long flags;
+
+ save_flags (flags);
+ cli ();
+ clear_bit (BH_Lock, &bh->b_state);
+ if (bh->b_wait && ! linux_auto_config)
+ {
+ bh->b_wait = NULL;
+ thread_wakeup ((event_t) bh);
+ }
+ restore_flags (flags);
+}
+
+void
+schedule (void)
+{
+ if (intr_count)
+ printk ("Aiee: scheduling in interrupt %p\n",
+ __builtin_return_address (0));
+
+ handle_soft_intr ();
+ run_task_queue (&tq_scheduler);
+
+ if (!linux_auto_config)
+ thread_block (0);
+}
+
+void
+cdrom_sleep (int t)
+{
+ int xxx;
+
+ assert_wait ((event_t) &xxx, TRUE);
+ thread_set_timeout (t);
+ schedule ();
+}
+
+void
+linux_sched_init (void)
+{
+ /*
+ * Install software interrupt handlers.
+ */
+ init_bh (TIMER_BH, timer_bh);
+ init_bh (TQUEUE_BH, tqueue_bh);
+ init_bh (IMMEDIATE_BH, immediate_bh);
+}
+
+/*
+ * Linux timers.
+ *
+ * Copyright (C) 1996 The University of Utah and the Computer Systems
+ * Laboratory at the University of Utah (CSL)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Author: Shantanu Goel, University of Utah CSL
+ */
+
+unsigned long volatile jiffies = 0;
+
+/*
+ * Mask of active timers.
+ */
+unsigned long timer_active = 0;
+
+/*
+ * List of timeout routines.
+ */
+struct timer_struct timer_table[32];
+
+#define TVN_BITS 6
+#define TVR_BITS 8
+#define TVN_SIZE (1 << TVN_BITS)
+#define TVR_SIZE (1 << TVR_BITS)
+#define TVN_MASK (TVN_SIZE - 1)
+#define TVR_MASK (TVR_SIZE - 1)
+
+#define SLOW_BUT_DEBUGGING_TIMERS 0
+
+struct timer_vec
+ {
+ int index;
+ struct timer_list *vec[TVN_SIZE];
+ };
+
+struct timer_vec_root
+ {
+ int index;
+ struct timer_list *vec[TVR_SIZE];
+ };
+
+static struct timer_vec tv5 =
+{0};
+static struct timer_vec tv4 =
+{0};
+static struct timer_vec tv3 =
+{0};
+static struct timer_vec tv2 =
+{0};
+static struct timer_vec_root tv1 =
+{0};
+
+static struct timer_vec *const tvecs[] =
+{
+ (struct timer_vec *) &tv1, &tv2, &tv3, &tv4, &tv5
+};
+
+#define NOOF_TVECS (sizeof(tvecs) / sizeof(tvecs[0]))
+
+static unsigned long timer_jiffies = 0;
+
+static inline void
+insert_timer (struct timer_list *timer, struct timer_list **vec, int idx)
+{
+ if ((timer->next = vec[idx]))
+ vec[idx]->prev = timer;
+ vec[idx] = timer;
+ timer->prev = (struct timer_list *) &vec[idx];
+}
+
+static inline void
+internal_add_timer (struct timer_list *timer)
+{
+ /*
+ * must be cli-ed when calling this
+ */
+ unsigned long expires = timer->expires;
+ unsigned long idx = expires - timer_jiffies;
+
+ if (idx < TVR_SIZE)
+ {
+ int i = expires & TVR_MASK;
+ insert_timer (timer, tv1.vec, i);
+ }
+ else if (idx < 1 << (TVR_BITS + TVN_BITS))
+ {
+ int i = (expires >> TVR_BITS) & TVN_MASK;
+ insert_timer (timer, tv2.vec, i);
+ }
+ else if (idx < 1 << (TVR_BITS + 2 * TVN_BITS))
+ {
+ int i = (expires >> (TVR_BITS + TVN_BITS)) & TVN_MASK;
+ insert_timer (timer, tv3.vec, i);
+ }
+ else if (idx < 1 << (TVR_BITS + 3 * TVN_BITS))
+ {
+ int i = (expires >> (TVR_BITS + 2 * TVN_BITS)) & TVN_MASK;
+ insert_timer (timer, tv4.vec, i);
+ }
+ else if (expires < timer_jiffies)
+ {
+ /* can happen if you add a timer with expires == jiffies,
+ * or you set a timer to go off in the past
+ */
+ insert_timer (timer, tv1.vec, tv1.index);
+ }
+ else if (idx < 0xffffffffUL)
+ {
+ int i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
+ insert_timer (timer, tv5.vec, i);
+ }
+ else
+ {
+ /* Can only get here on architectures with 64-bit jiffies */
+ timer->next = timer->prev = timer;
+ }
+}
+
+void
+add_timer (struct timer_list *timer)
+{
+ unsigned long flags;
+
+ save_flags (flags);
+ cli ();
+#if SLOW_BUT_DEBUGGING_TIMERS
+ if (timer->next || timer->prev)
+ {
+ printk ("add_timer() called with non-zero list from %p\n",
+ __builtin_return_address (0));
+ goto out;
+ }
+#endif
+ internal_add_timer (timer);
+#if SLOW_BUT_DEBUGGING_TIMERS
+out:
+#endif
+ restore_flags (flags);
+}
+
+static inline int
+detach_timer (struct timer_list *timer)
+{
+ int ret = 0;
+ struct timer_list *next, *prev;
+
+ next = timer->next;
+ prev = timer->prev;
+ if (next)
+ {
+ next->prev = prev;
+ }
+ if (prev)
+ {
+ ret = 1;
+ prev->next = next;
+ }
+ return ret;
+}
+
+int
+del_timer (struct timer_list *timer)
+{
+ int ret;
+ unsigned long flags;
+
+ save_flags (flags);
+ cli ();
+ ret = detach_timer (timer);
+ timer->next = timer->prev = 0;
+ restore_flags (flags);
+ return ret;
+}
+
+static inline void
+run_old_timers (void)
+{
+ struct timer_struct *tp;
+ unsigned long mask;
+
+ for (mask = 1, tp = timer_table + 0; mask; tp++, mask += mask)
+ {
+ if (mask > timer_active)
+ break;
+ if (!(mask & timer_active))
+ continue;
+ if (tp->expires > jiffies)
+ continue;
+ timer_active &= ~mask;
+ tp->fn ();
+ sti ();
+ }
+}
+
+static inline void
+cascade_timers (struct timer_vec *tv)
+{
+ /* cascade all the timers from tv up one level */
+ struct timer_list *timer;
+
+ timer = tv->vec[tv->index];
+ /*
+ * We are removing _all_ timers from the list, so we don't have to
+ * detach them individually, just clear the list afterwards.
+ */
+ while (timer)
+ {
+ struct timer_list *tmp = timer;
+ timer = timer->next;
+ internal_add_timer (tmp);
+ }
+ tv->vec[tv->index] = NULL;
+ tv->index = (tv->index + 1) & TVN_MASK;
+}
+
+static inline void
+run_timer_list (void)
+{
+ cli ();
+ while ((long) (jiffies - timer_jiffies) >= 0)
+ {
+ struct timer_list *timer;
+
+ if (!tv1.index)
+ {
+ int n = 1;
+
+ do
+ {
+ cascade_timers (tvecs[n]);
+ }
+ while (tvecs[n]->index == 1 && ++n < NOOF_TVECS);
+ }
+ while ((timer = tv1.vec[tv1.index]))
+ {
+ void (*fn) (unsigned long) = timer->function;
+ unsigned long data = timer->data;
+
+ detach_timer (timer);
+ timer->next = timer->prev = NULL;
+ sti ();
+ fn (data);
+ cli ();
+ }
+ ++timer_jiffies;
+ tv1.index = (tv1.index + 1) & TVR_MASK;
+ }
+ sti ();
+}
+
+/*
+ * Timer software interrupt handler.
+ */
+static void
+timer_bh (void)
+{
+ run_old_timers ();
+ run_timer_list ();
+}
+
+#if 0
+int linux_timer_print = 0;
+#endif
+
+/*
+ * Timer interrupt handler.
+ */
+void
+linux_timer_intr (void)
+{
+ (*(unsigned long *) &jiffies)++;
+ mark_bh (TIMER_BH);
+ if (tq_timer)
+ mark_bh (TQUEUE_BH);
+#if 0
+ if (linux_timer_print)
+ printf ("linux_timer_intr: pic_mask[0] %x\n", pic_mask[0]);
+#endif
+}
diff --git a/linux/dev/kernel/softirq.c b/linux/dev/kernel/softirq.c
new file mode 100644
index 0000000..96102a7
--- /dev/null
+++ b/linux/dev/kernel/softirq.c
@@ -0,0 +1,46 @@
+/*
+ * linux/kernel/softirq.c
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ *
+ * do_bottom_half() runs at normal kernel priority: all interrupts
+ * enabled. do_bottom_half() is atomic with respect to itself: a
+ * bottom_half handler need not be re-entrant.
+ */
+
+#define MACH_INCLUDE
+#include <linux/ptrace.h>
+#include <linux/interrupt.h>
+#include <asm/system.h>
+
+int bh_mask_count[32];
+unsigned long bh_active = 0;
+unsigned long bh_mask = 0;
+void (*bh_base[32]) (void);
+
+void
+linux_soft_intr (void)
+{
+ unsigned long active;
+ unsigned long mask, left;
+ void (**bh) (void);
+
+ sti ();
+ bh = bh_base;
+ active = bh_active & bh_mask;
+ for (mask = 1, left = ~0; left & active; bh++, mask += mask, left += left)
+ {
+ if (mask & active)
+ {
+ void (*fn) (void);
+ bh_active &= ~mask;
+ fn = *bh;
+ if (!fn)
+ goto bad_bh;
+ fn ();
+ }
+ }
+ return;
+bad_bh:
+ printk ("linux_soft_intr:bad interrupt handler entry %08lx\n", mask);
+}