summaryrefslogtreecommitdiff
path: root/linux/src/include/asm-i386
diff options
context:
space:
mode:
Diffstat (limited to 'linux/src/include/asm-i386')
-rw-r--r--linux/src/include/asm-i386/hardirq.h65
-rw-r--r--linux/src/include/asm-i386/spinlock.h262
2 files changed, 327 insertions, 0 deletions
diff --git a/linux/src/include/asm-i386/hardirq.h b/linux/src/include/asm-i386/hardirq.h
new file mode 100644
index 0000000..5339613
--- /dev/null
+++ b/linux/src/include/asm-i386/hardirq.h
@@ -0,0 +1,65 @@
+#ifndef __ASM_HARDIRQ_H
+#define __ASM_HARDIRQ_H
+
+#include <linux/tasks.h>
+
+extern unsigned int local_irq_count[NR_CPUS];
+
+/*
+ * Are we in an interrupt context? Either doing bottom half
+ * or hardware interrupt processing?
+ */
+#define in_interrupt() ({ int __cpu = smp_processor_id(); \
+ (local_irq_count[__cpu] + local_bh_count[__cpu] != 0); })
+
+#ifndef __SMP__
+
+#define hardirq_trylock(cpu) (local_irq_count[cpu] == 0)
+#define hardirq_endlock(cpu) do { } while (0)
+
+#define hardirq_enter(cpu) (local_irq_count[cpu]++)
+#define hardirq_exit(cpu) (local_irq_count[cpu]--)
+
+#define synchronize_irq() barrier()
+
+#else
+
+#include <asm/atomic.h>
+
+extern unsigned char global_irq_holder;
+extern unsigned volatile int global_irq_lock;
+extern atomic_t global_irq_count;
+
+static inline void release_irqlock(int cpu)
+{
+ /* if we didn't own the irq lock, just ignore.. */
+ if (global_irq_holder == (unsigned char) cpu) {
+ global_irq_holder = NO_PROC_ID;
+ clear_bit(0,&global_irq_lock);
+ }
+}
+
+static inline void hardirq_enter(int cpu)
+{
+ ++local_irq_count[cpu];
+ atomic_inc(&global_irq_count);
+}
+
+static inline void hardirq_exit(int cpu)
+{
+ atomic_dec(&global_irq_count);
+ --local_irq_count[cpu];
+}
+
+static inline int hardirq_trylock(int cpu)
+{
+ return !atomic_read(&global_irq_count) && !test_bit(0,&global_irq_lock);
+}
+
+#define hardirq_endlock(cpu) do { } while (0)
+
+extern void synchronize_irq(void);
+
+#endif /* __SMP__ */
+
+#endif /* __ASM_HARDIRQ_H */
diff --git a/linux/src/include/asm-i386/spinlock.h b/linux/src/include/asm-i386/spinlock.h
new file mode 100644
index 0000000..18119d4
--- /dev/null
+++ b/linux/src/include/asm-i386/spinlock.h
@@ -0,0 +1,262 @@
+#ifndef __ASM_SPINLOCK_H
+#define __ASM_SPINLOCK_H
+
+#ifndef __SMP__
+
+#define DEBUG_SPINLOCKS 0 /* 0 == no debugging, 1 == maintain lock state, 2 == full debug */
+
+#if (DEBUG_SPINLOCKS < 1)
+
+/*
+ * Your basic spinlocks, allowing only a single CPU anywhere
+ *
+ * Gcc-2.7.x has a nasty bug with empty initializers.
+ */
+#if (__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR__ >= 8)
+ typedef struct { } spinlock_t;
+ #define SPIN_LOCK_UNLOCKED (spinlock_t) { }
+#else
+ typedef struct { int gcc_is_buggy; } spinlock_t;
+ #define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
+#endif
+
+#define spin_lock_init(lock) do { } while(0)
+#define spin_lock(lock) (void)(lock) /* Not "unused variable". */
+#define spin_trylock(lock) (1)
+#define spin_unlock_wait(lock) do { } while(0)
+#define spin_unlock(lock) do { } while(0)
+#define spin_lock_irq(lock) cli()
+#define spin_unlock_irq(lock) sti()
+
+#define spin_lock_irqsave(lock, flags) \
+ do { save_flags(flags); cli(); } while (0)
+#define spin_unlock_irqrestore(lock, flags) \
+ restore_flags(flags)
+
+#elif (DEBUG_SPINLOCKS < 2)
+
+typedef struct {
+ volatile unsigned int lock;
+} spinlock_t;
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
+
+#define spin_lock_init(x) do { (x)->lock = 0; } while (0)
+#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
+
+#define spin_lock(x) do { (x)->lock = 1; } while (0)
+#define spin_unlock_wait(x) do { } while (0)
+#define spin_unlock(x) do { (x)->lock = 0; } while (0)
+#define spin_lock_irq(x) do { cli(); spin_lock(x); } while (0)
+#define spin_unlock_irq(x) do { spin_unlock(x); sti(); } while (0)
+
+#define spin_lock_irqsave(x, flags) \
+ do { save_flags(flags); spin_lock_irq(x); } while (0)
+#define spin_unlock_irqrestore(x, flags) \
+ do { spin_unlock(x); restore_flags(flags); } while (0)
+
+#else /* (DEBUG_SPINLOCKS >= 2) */
+
+typedef struct {
+ volatile unsigned int lock;
+ volatile unsigned int babble;
+ const char *module;
+} spinlock_t;
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0, 25, __BASE_FILE__ }
+
+#include <linux/kernel.h>
+
+#define spin_lock_init(x) do { (x)->lock = 0; } while (0)
+#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
+
+#define spin_lock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_lock(%s:%p) already locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1; restore_flags(__spinflags);} while (0)
+#define spin_unlock_wait(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock_wait(%s:%p) deadlock\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} restore_flags(__spinflags);} while (0)
+#define spin_unlock(x) do {unsigned long __spinflags; save_flags(__spinflags); cli(); if (!(x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock(%s:%p) not locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; restore_flags(__spinflags);} while (0)
+#define spin_lock_irq(x) do {cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_lock_irq(%s:%p) already locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1;} while (0)
+#define spin_unlock_irq(x) do {cli(); if (!(x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock_irq(%s:%p) not locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; sti();} while (0)
+
+#define spin_lock_irqsave(x,flags) do {save_flags(flags); cli(); if ((x)->lock&&(x)->babble) {printk("%s:%d: spin_lock_irqsave(%s:%p) already locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 1;} while (0)
+#define spin_unlock_irqrestore(x,flags) do {cli(); if (!(x)->lock&&(x)->babble) {printk("%s:%d: spin_unlock_irqrestore(%s:%p) not locked\n", __BASE_FILE__,__LINE__, (x)->module, (x));(x)->babble--;} (x)->lock = 0; restore_flags(flags);} while (0)
+
+#endif /* DEBUG_SPINLOCKS */
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ *
+ * Gcc-2.7.x has a nasty bug with empty initializers.
+ */
+#if (__GNUC__ > 2) || (__GNUC__ == 2 && __GNUC_MINOR__ >= 8)
+ typedef struct { } rwlock_t;
+ #define RW_LOCK_UNLOCKED (rwlock_t) { }
+#else
+ typedef struct { int gcc_is_buggy; } rwlock_t;
+ #define RW_LOCK_UNLOCKED (rwlock_t) { 0 }
+#endif
+
+#define read_lock(lock) (void)(lock) /* Not "unused variable." */
+#define read_unlock(lock) do { } while(0)
+#define write_lock(lock) (void)(lock) /* Not "unused variable." */
+#define write_unlock(lock) do { } while(0)
+#define read_lock_irq(lock) cli()
+#define read_unlock_irq(lock) sti()
+#define write_lock_irq(lock) cli()
+#define write_unlock_irq(lock) sti()
+
+#define read_lock_irqsave(lock, flags) \
+ do { save_flags(flags); cli(); } while (0)
+#define read_unlock_irqrestore(lock, flags) \
+ restore_flags(flags)
+#define write_lock_irqsave(lock, flags) \
+ do { save_flags(flags); cli(); } while (0)
+#define write_unlock_irqrestore(lock, flags) \
+ restore_flags(flags)
+
+#else /* __SMP__ */
+
+/*
+ * Your basic spinlocks, allowing only a single CPU anywhere
+ */
+
+typedef struct {
+ volatile unsigned int lock;
+} spinlock_t;
+
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
+
+#define spin_lock_init(x) do { (x)->lock = 0; } while(0)
+/*
+ * Simple spin lock operations. There are two variants, one clears IRQ's
+ * on the local processor, one does not.
+ *
+ * We make no fairness assumptions. They have a cost.
+ */
+
+#define spin_unlock_wait(x) do { barrier(); } while(((volatile spinlock_t *)(x))->lock)
+
+typedef struct { unsigned long a[100]; } __dummy_lock_t;
+#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
+
+/*
+ * Intel PIV would benefit from using 'rep nop' here but on older
+ * processors and non intel it is listed as 'undefined' so cannot be
+ * blindly used. On 2.4 we should add a PIV CPU type for this one.
+ */
+#define spin_lock_string \
+ "\n1:\t" \
+ "lock ; btsl $0,%0\n\t" \
+ "jc 2f\n" \
+ ".section .text.lock,\"ax\"\n" \
+ "2:\t" \
+ "rep; nop\n\t" \
+ "testb $1,%0\n\t" \
+ "jne 2b\n\t" \
+ "jmp 1b\n" \
+ ".previous"
+
+#define spin_unlock_string \
+ "lock ; btrl $0,%0"
+
+#define spin_lock(lock) \
+__asm__ __volatile__( \
+ spin_lock_string \
+ :"=m" (__dummy_lock(lock)))
+
+#define spin_unlock(lock) \
+__asm__ __volatile__( \
+ spin_unlock_string \
+ :"=m" (__dummy_lock(lock)))
+
+#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
+
+#define spin_lock_irq(lock) \
+ do { __cli(); spin_lock(lock); } while (0)
+
+#define spin_unlock_irq(lock) \
+ do { spin_unlock(lock); __sti(); } while (0)
+
+#define spin_lock_irqsave(lock, flags) \
+ do { __save_flags(flags); __cli(); spin_lock(lock); } while (0)
+
+#define spin_unlock_irqrestore(lock, flags) \
+ do { spin_unlock(lock); __restore_flags(flags); } while (0)
+
+/*
+ * Read-write spinlocks, allowing multiple readers
+ * but only one writer.
+ *
+ * NOTE! it is quite common to have readers in interrupts
+ * but no interrupt writers. For those circumstances we
+ * can "mix" irq-safe locks - any writer needs to get a
+ * irq-safe write-lock, but readers can get non-irqsafe
+ * read-locks.
+ */
+typedef struct {
+ volatile unsigned int lock;
+ unsigned long previous;
+} rwlock_t;
+
+#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
+
+/*
+ * On x86, we implement read-write locks as a 32-bit counter
+ * with the high bit (sign) being the "write" bit.
+ *
+ * The inline assembly is non-obvious. Think about it.
+ */
+#define read_lock(rw) \
+ asm volatile("\n1:\t" \
+ "lock ; incl %0\n\t" \
+ "js 2f\n" \
+ ".section .text.lock,\"ax\"\n" \
+ "2:\tlock ; decl %0\n" \
+ "3:\trep; nop\n\t" \
+ "cmpl $0,%0\n\t" \
+ "js 3b\n\t" \
+ "jmp 1b\n" \
+ ".previous" \
+ :"=m" (__dummy_lock(&(rw)->lock)))
+
+#define read_unlock(rw) \
+ asm volatile("lock ; decl %0" \
+ :"=m" (__dummy_lock(&(rw)->lock)))
+
+#define write_lock(rw) \
+ asm volatile("\n1:\t" \
+ "lock ; btsl $31,%0\n\t" \
+ "jc 4f\n" \
+ "2:\ttestl $0x7fffffff,%0\n\t" \
+ "jne 3f\n" \
+ ".section .text.lock,\"ax\"\n" \
+ "3:\tlock ; btrl $31,%0\n" \
+ "4:\trep; nop\n\t" \
+ "cmp $0,%0\n\t" \
+ "jne 4b\n\t" \
+ "jmp 1b\n" \
+ ".previous" \
+ :"=m" (__dummy_lock(&(rw)->lock)))
+
+#define write_unlock(rw) \
+ asm volatile("lock ; btrl $31,%0":"=m" (__dummy_lock(&(rw)->lock)))
+
+#define read_lock_irq(lock) do { __cli(); read_lock(lock); } while (0)
+#define read_unlock_irq(lock) do { read_unlock(lock); __sti(); } while (0)
+#define write_lock_irq(lock) do { __cli(); write_lock(lock); } while (0)
+#define write_unlock_irq(lock) do { write_unlock(lock); __sti(); } while (0)
+
+#define read_lock_irqsave(lock, flags) \
+ do { __save_flags(flags); __cli(); read_lock(lock); } while (0)
+#define read_unlock_irqrestore(lock, flags) \
+ do { read_unlock(lock); __restore_flags(flags); } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do { __save_flags(flags); __cli(); write_lock(lock); } while (0)
+#define write_unlock_irqrestore(lock, flags) \
+ do { write_unlock(lock); __restore_flags(flags); } while (0)
+
+#endif /* __SMP__ */
+#endif /* __ASM_SPINLOCK_H */