summaryrefslogtreecommitdiff
path: root/pfinet/glue-include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'pfinet/glue-include/asm')
-rw-r--r--pfinet/glue-include/asm/atomic.h27
-rw-r--r--pfinet/glue-include/asm/bitops.h37
-rw-r--r--pfinet/glue-include/asm/byteorder.h154
-rw-r--r--pfinet/glue-include/asm/checksum.h5
-rw-r--r--pfinet/glue-include/asm/errno.h3
-rw-r--r--pfinet/glue-include/asm/hardirq.h1
-rw-r--r--pfinet/glue-include/asm/init.h3
-rw-r--r--pfinet/glue-include/asm/segment.h0
-rw-r--r--pfinet/glue-include/asm/spinlock.h75
-rw-r--r--pfinet/glue-include/asm/system.h20
-rw-r--r--pfinet/glue-include/asm/types.h1
-rw-r--r--pfinet/glue-include/asm/uaccess.h47
12 files changed, 373 insertions, 0 deletions
diff --git a/pfinet/glue-include/asm/atomic.h b/pfinet/glue-include/asm/atomic.h
new file mode 100644
index 00000000..d053854e
--- /dev/null
+++ b/pfinet/glue-include/asm/atomic.h
@@ -0,0 +1,27 @@
+#ifndef _HACK_ASM_ATOMIC_H
+#define _HACK_ASM_ATOMIC_H
+
+/* We don't need atomicity in the Linux code because we serialize all
+ entries to it. */
+
+typedef struct { int counter; } atomic_t;
+
+#define ATOMIC_INIT(i) { (i) }
+
+#define atomic_read(v) ((v)->counter)
+#define atomic_set(v,i) (((v)->counter) = (i))
+
+static __inline__ void atomic_add(int i, atomic_t *v) { v->counter += i; }
+static __inline__ void atomic_sub(int i, atomic_t *v) { v->counter -= i; }
+static __inline__ void atomic_inc(atomic_t *v) { ++v->counter; }
+static __inline__ void atomic_dec(atomic_t *v) { --v->counter; }
+static __inline__ int atomic_dec_and_test(atomic_t *v)
+{ return --v->counter == 0; }
+static __inline__ int atomic_inc_and_test_greater_zero(atomic_t *v)
+{ return ++v->counter > 0; }
+
+#define atomic_clear_mask(mask, addr) (*(addr) &= ~(mask))
+#define atomic_set_mask(mask, addr) (*(addr) |= (mask))
+
+
+#endif
diff --git a/pfinet/glue-include/asm/bitops.h b/pfinet/glue-include/asm/bitops.h
new file mode 100644
index 00000000..8c5a835d
--- /dev/null
+++ b/pfinet/glue-include/asm/bitops.h
@@ -0,0 +1,37 @@
+#ifndef _HACK_ASM_BITOPS_H
+#define _HACK_ASM_BITOPS_H
+
+/* We don't need atomicity in the Linux code because we serialize all
+ entries to it. */
+
+#include <stdint.h>
+
+#define BITOPS_WORD(nr, addr) (((uint32_t *) (addr))[(nr) / 32])
+#define BITOPS_MASK(nr) (1 << ((nr) & 31))
+
+static __inline__ void set_bit (int nr, void *addr)
+{ BITOPS_WORD (nr, addr) |= BITOPS_MASK (nr); }
+
+static __inline__ void clear_bit (int nr, void *addr)
+{ BITOPS_WORD (nr, addr) &= ~BITOPS_MASK (nr); }
+
+static __inline__ void change_bit (int nr, void *addr)
+{ BITOPS_WORD (nr, addr) ^= BITOPS_MASK (nr); }
+
+static __inline__ int test_bit (int nr, void *addr)
+{ return BITOPS_WORD (nr, addr) & BITOPS_MASK (nr); }
+
+static __inline__ int test_and_set_bit (int nr, void *addr)
+{
+ int res = BITOPS_WORD (nr, addr) & BITOPS_MASK (nr);
+ BITOPS_WORD (nr, addr) |= BITOPS_MASK (nr);
+ return res;
+}
+
+#define find_first_zero_bit #error loser
+#define find_next_zero_bit #error loser
+
+#define ffz(word) (ffs (~(unsigned int) (word)) - 1)
+
+
+#endif
diff --git a/pfinet/glue-include/asm/byteorder.h b/pfinet/glue-include/asm/byteorder.h
new file mode 100644
index 00000000..ee1da1f4
--- /dev/null
+++ b/pfinet/glue-include/asm/byteorder.h
@@ -0,0 +1,154 @@
+/* Provide the specified-byte-order access functions used in the Linux
+ kernel, implemented as macros in terms of the GNU libc facilities. */
+
+#ifndef _HACK_ASM_BYTEORDER_H
+#define _HACK_ASM_BYTEORDER_H 1
+
+#include <endian.h>
+#include <byteswap.h>
+
+#define BO_cvt(bits, from, to, x) \
+ ((from) == (to) ? (u_int##bits##_t) (x) : bswap_##bits (x))
+#define BO_cvtp(bits, from, to, p) \
+ BO_cvt (bits, from, to, *(const u_int##bits##_t *) (p))
+#define BO_cvts(bits, from, to, p) \
+ ({ const u_int##bits##_t *_p = (p); *_p = BO_cvt (bits, from, to, *_p); })
+
+#define __cpu_to_le64(x) BO_cvt (64, BYTE_ORDER, LITTLE_ENDIAN, (x))
+#define __le64_to_cpu(x) BO_cvt (64, LITTLE_ENDIAN, BYTE_ORDER, (x))
+#define __cpu_to_le32(x) BO_cvt (32, BYTE_ORDER, LITTLE_ENDIAN, (x))
+#define __le32_to_cpu(x) BO_cvt (32, LITTLE_ENDIAN, BYTE_ORDER, (x))
+#define __cpu_to_le16(x) BO_cvt (16, BYTE_ORDER, LITTLE_ENDIAN, (x))
+#define __le16_to_cpu(x) BO_cvt (16, LITTLE_ENDIAN, BYTE_ORDER, (x))
+#define __cpu_to_be64(x) BO_cvt (64, BYTE_ORDER, BIG_ENDIAN, (x))
+#define __be64_to_cpu(x) BO_cvt (64, BIG_ENDIAN, BYTE_ORDER, (x))
+#define __cpu_to_be32(x) BO_cvt (32, BYTE_ORDER, BIG_ENDIAN, (x))
+#define __be32_to_cpu(x) BO_cvt (32, BIG_ENDIAN, BYTE_ORDER, (x))
+#define __cpu_to_be16(x) BO_cvt (16, BYTE_ORDER, BIG_ENDIAN, (x))
+#define __be16_to_cpu(x) BO_cvt (16, BIG_ENDIAN, BYTE_ORDER, (x))
+#define __cpu_to_le64p(p) BO_cvtp (64, BYTE_ORDER, LITTLE_ENDIAN, (p))
+#define __le64_to_cpup(p) BO_cvtp (64, LITTLE_ENDIAN, BYTE_ORDER, (p))
+#define __cpu_to_le32p(p) BO_cvtp (32, BYTE_ORDER, LITTLE_ENDIAN, (p))
+#define __le32_to_cpup(p) BO_cvtp (32, LITTLE_ENDIAN, BYTE_ORDER, (p))
+#define __cpu_to_le16p(p) BO_cvtp (16, BYTE_ORDER, LITTLE_ENDIAN, (p))
+#define __le16_to_cpup(p) BO_cvtp (16, LITTLE_ENDIAN, BYTE_ORDER, (p))
+#define __cpu_to_be64p(p) BO_cvtp (64, BYTE_ORDER, BIG_ENDIAN, (p))
+#define __be64_to_cpup(p) BO_cvtp (64, BIG_ENDIAN, BYTE_ORDER, (p))
+#define __cpu_to_be32p(p) BO_cvtp (32, BYTE_ORDER, BIG_ENDIAN, (p))
+#define __be32_to_cpup(p) BO_cvtp (32, BIG_ENDIAN, BYTE_ORDER, (p))
+#define __cpu_to_be16p(p) BO_cvtp (16, BYTE_ORDER, BIG_ENDIAN, (p))
+#define __be16_to_cpup(p) BO_cvtp (16, BIG_ENDIAN, BYTE_ORDER, (p))
+#define __cpu_to_le64s(p) BO_cvts (64, BYTE_ORDER, LITTLE_ENDIAN, (p))
+#define __le64_to_cpus(p) BO_cvts (64, LITTLE_ENDIAN, BYTE_ORDER, (p))
+#define __cpu_to_le32s(p) BO_cvts (32, BYTE_ORDER, LITTLE_ENDIAN, (p))
+#define __le32_to_cpus(p) BO_cvts (32, LITTLE_ENDIAN, BYTE_ORDER, (p))
+#define __cpu_to_le16s(p) BO_cvts (16, BYTE_ORDER, LITTLE_ENDIAN, (p))
+#define __le16_to_cpus(p) BO_cvts (16, LITTLE_ENDIAN, BYTE_ORDER, (p))
+#define __cpu_to_be64s(p) BO_cvts (64, BYTE_ORDER, BIG_ENDIAN, (p))
+#define __be64_to_cpus(p) BO_cvts (64, BIG_ENDIAN, BYTE_ORDER, (p))
+#define __cpu_to_be32s(p) BO_cvts (32, BYTE_ORDER, BIG_ENDIAN, (p))
+#define __be32_to_cpus(p) BO_cvts (32, BIG_ENDIAN, BYTE_ORDER, (p))
+#define __cpu_to_be16s(p) BO_cvts (16, BYTE_ORDER, BIG_ENDIAN, (p))
+#define __be16_to_cpus(p) BO_cvts (16, BIG_ENDIAN, BYTE_ORDER, (p))
+
+#define cpu_to_le64 __cpu_to_le64
+#define le64_to_cpu __le64_to_cpu
+#define cpu_to_le32 __cpu_to_le32
+#define le32_to_cpu __le32_to_cpu
+#define cpu_to_le16 __cpu_to_le16
+#define le16_to_cpu __le16_to_cpu
+#define cpu_to_be64 __cpu_to_be64
+#define be64_to_cpu __be64_to_cpu
+#define cpu_to_be32 __cpu_to_be32
+#define be32_to_cpu __be32_to_cpu
+#define cpu_to_be16 __cpu_to_be16
+#define be16_to_cpu __be16_to_cpu
+#define cpu_to_le64p __cpu_to_le64p
+#define le64_to_cpup __le64_to_cpup
+#define cpu_to_le32p __cpu_to_le32p
+#define le32_to_cpup __le32_to_cpup
+#define cpu_to_le16p __cpu_to_le16p
+#define le16_to_cpup __le16_to_cpup
+#define cpu_to_be64p __cpu_to_be64p
+#define be64_to_cpup __be64_to_cpup
+#define cpu_to_be32p __cpu_to_be32p
+#define be32_to_cpup __be32_to_cpup
+#define cpu_to_be16p __cpu_to_be16p
+#define be16_to_cpup __be16_to_cpup
+#define cpu_to_le64s __cpu_to_le64s
+#define le64_to_cpus __le64_to_cpus
+#define cpu_to_le32s __cpu_to_le32s
+#define le32_to_cpus __le32_to_cpus
+#define cpu_to_le16s __cpu_to_le16s
+#define le16_to_cpus __le16_to_cpus
+#define cpu_to_be64s __cpu_to_be64s
+#define be64_to_cpus __be64_to_cpus
+#define cpu_to_be32s __cpu_to_be32s
+#define be32_to_cpus __be32_to_cpus
+#define cpu_to_be16s __cpu_to_be16s
+#define be16_to_cpus __be16_to_cpus
+
+
+#if BYTE_ORDER == BIG_ENDIAN
+# define __BIG_ENDIAN_BITFIELD
+#elif BYTE_ORDER == LITTLE_ENDIAN
+# define __LITTLE_ENDIAN_BITFIELD
+#else
+# error __FOO_ENDIAN_BITFIELD
+#endif
+
+
+#include <netinet/in.h> /* for htonl et al */
+
+/* Though the optimized macros defined by glibc do the constant magic,
+ there are places in the Linux code that use these in constant-only
+ places like initializers, and the ({...}) expressions the macros use are
+ not valid in those contexts. */
+#if BYTE_ORDER == BIG_ENDIAN
+# if !defined(__constant_htonl)
+# define __constant_htonl(x) (x)
+# endif
+# if !defined(__constant_htons)
+# define __constant_htons(x) (x)
+# endif
+#elif BYTE_ORDER == LITTLE_ENDIAN
+# if !defined(__constant_htonl)
+# define __constant_htonl(x) \
+ ((unsigned long int)((((unsigned long int)(x) & 0x000000ffU) << 24) | \
+ (((unsigned long int)(x) & 0x0000ff00U) << 8) | \
+ (((unsigned long int)(x) & 0x00ff0000U) >> 8) | \
+ (((unsigned long int)(x) & 0xff000000U) >> 24)))
+# endif
+# if !defined(__constant_htons)
+# define __constant_htons(x) \
+ ((unsigned short int)((((unsigned short int)(x) & 0x00ff) << 8) | \
+ (((unsigned short int)(x) & 0xff00) >> 8)))
+# endif
+#else
+# error "Don't know if bytes are big- or little-endian!"
+#endif
+
+/* The transformation is the same in both directions. */
+#define __constant_ntohl __constant_htonl
+#define __constant_ntohs __constant_htons
+
+
+/* Some Linux code (e.g. <net/tcp.h>) uses #ifdef __BIG_ENDIAN et al.
+ This is not real wonderful with the glibc definitions, where
+ __BIG_ENDIAN et al are always defined (as values for __BYTE_ORDER). */
+#if BYTE_ORDER == BIG_ENDIAN
+#undef __LITTLE_ENDIAN
+#elif BYTE_ORDER == LITTLE_ENDIAN
+#undef __BIG_ENDIAN
+#endif
+#undef __PDP_ENDIAN
+
+/* Since we've now broken anything that does glibc-style tests,
+ make sure they break loudly rather than silently. Any headers
+ that need __BYTE_ORDER will just have to be included before
+ we diddle with __BIG_ENDIAN or __LITTLE_ENDIAN above. */
+#undef __BYTE_ORDER
+#define __BYTE_ORDER ?????crash?????
+
+
+#endif /* asm/byteorder.h */
diff --git a/pfinet/glue-include/asm/checksum.h b/pfinet/glue-include/asm/checksum.h
new file mode 100644
index 00000000..5bcf7551
--- /dev/null
+++ b/pfinet/glue-include/asm/checksum.h
@@ -0,0 +1,5 @@
+/* This is the only file from the Linux include/asm-* directory
+ that we use, so we use this magic file here rather than making a
+ symlink asm -> .../linux-src/include/asm-SYSTYPE somewhere. */
+
+#include "../../linux-src/include/asm-" _HURD_SYSTYPE "/checksum.h"
diff --git a/pfinet/glue-include/asm/errno.h b/pfinet/glue-include/asm/errno.h
new file mode 100644
index 00000000..7afb6fdc
--- /dev/null
+++ b/pfinet/glue-include/asm/errno.h
@@ -0,0 +1,3 @@
+/* This is used only by checksum.S; the C code uses <linux/errno.h>. */
+
+#define EFAULT 42 /* only used in unreached code */
diff --git a/pfinet/glue-include/asm/hardirq.h b/pfinet/glue-include/asm/hardirq.h
new file mode 100644
index 00000000..c73d7dcb
--- /dev/null
+++ b/pfinet/glue-include/asm/hardirq.h
@@ -0,0 +1 @@
+#include <linux/interrupt.h>
diff --git a/pfinet/glue-include/asm/init.h b/pfinet/glue-include/asm/init.h
new file mode 100644
index 00000000..2331be7c
--- /dev/null
+++ b/pfinet/glue-include/asm/init.h
@@ -0,0 +1,3 @@
+#define __init
+#define __initdata
+#define __initfunc(x) x
diff --git a/pfinet/glue-include/asm/segment.h b/pfinet/glue-include/asm/segment.h
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/pfinet/glue-include/asm/segment.h
diff --git a/pfinet/glue-include/asm/spinlock.h b/pfinet/glue-include/asm/spinlock.h
new file mode 100644
index 00000000..937c5d05
--- /dev/null
+++ b/pfinet/glue-include/asm/spinlock.h
@@ -0,0 +1,75 @@
+#ifndef _HACK_ASM_SPINLOCK_H_
+#define _HACK_ASM_SPINLOCK_H_
+
+#include <cthreads.h>
+
+typedef struct { } spinlock_t;
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { }
+
+#undef spin_lock_init
+#undef spin_lock
+#undef spin_unlock
+
+#define spin_lock_init(lock) ((void) (lock))
+#define spin_lock(lock) ((void) (lock))
+#define spin_trylock(lock) ((void) (lock), 1)
+#define spin_unlock_wait(lock) ((void) (lock))
+#define spin_unlock(lock) ((void) (lock))
+#define spin_lock_irq(lock) ((void) (lock))
+#define spin_unlock_irq(lock) ((void) (lock))
+#define spin_lock_irqsave(lock, flags) ((void) (lock), (void) (flags))
+#define spin_unlock_irqrestore(lock, flags) ((void) (lock), (void) (flags))
+
+typedef struct { } rwlock_t;
+#define read_lock(rw) do { } while(0)
+#define write_lock(rw) do { } while(0)
+#define write_unlock(rw) do { } while(0)
+#define read_unlock(rw) do { } while(0)
+
+#if 0
+#include <rwlock.h>
+
+typedef struct mutex spinlock_t;
+
+#undef spin_lock_init
+#undef spin_lock
+#undef spin_unlock
+
+#define SPIN_LOCK_UNLOCKED MUTEX_INITIALIZER
+#define spin_lock_init(lock) ({ __mutex_init (lock); })
+#define spin_lock(lock) ({ __mutex_lock (lock); })
+#define spin_trylock(lock) ({ __mutex_trylock (lock); })
+#define spin_unlock_wait(lock) ({ __mutex_unlock (lock); })
+#define spin_unlock(lock) ({ __mutex_unlock (lock); })
+#define spin_lock_irq(lock) ({ __mutex_lock (lock); })
+#define spin_unlock_irq(lock) ({ __mutex_unlock (lock); })
+
+#define spin_lock_irqsave(lock, flags) \
+ do { flags = 0; __mutex_lock (lock); } while (0)
+#define spin_unlock_irqrestore(lock, flags) ({ __mutex_unlock (lock); })
+
+
+typedef struct rwlock rwlock_t;
+
+#define read_lock(rw) rwlock_reader_lock(rw)
+#define write_lock(rw) rwlock_writer_lock(rw)
+#define write_unlock(rw) rwlock_writer_unlock(rw)
+#define read_unlock(rw) rwlock_reader_unlock(rw)
+
+#endif
+
+
+#define read_lock_irq(lock) read_lock(lock)
+#define read_unlock_irq(lock) read_unlock(lock)
+#define write_lock_irq(lock) write_lock(lock)
+#define write_unlock_irq(lock) write_unlock(lock)
+
+#define read_lock_irqsave(lock, flags) \
+ do { (flags) = 0; read_lock(lock); } while (0)
+#define read_unlock_irqrestore(lock, flags) read_unlock(lock)
+#define write_lock_irqsave(lock, flags) \
+ do { (flags) = 0; write_lock(lock); } while (0)
+#define write_unlock_irqrestore(lock, flags) write_unlock(lock)
+
+
+#endif
diff --git a/pfinet/glue-include/asm/system.h b/pfinet/glue-include/asm/system.h
new file mode 100644
index 00000000..1a5d61cd
--- /dev/null
+++ b/pfinet/glue-include/asm/system.h
@@ -0,0 +1,20 @@
+#ifndef _HACK_ASM_SYSTEM_H
+#define _HACK_ASM_SYSTEM_H
+
+/* We don't need atomicity in the Linux code because we serialize all
+ entries to it. */
+
+#include <stdint.h>
+
+#define xchg(ptr, x) \
+ ({ \
+ __typeof__ (*(ptr)) *_ptr = (ptr), _x = *_ptr; \
+ (uintptr_t) *_ptr = (x); _x; \
+ })
+
+#define mb() ((void) 0) /* memory barrier */
+#define rmb() mb()
+#define wmb() mb()
+
+
+#endif
diff --git a/pfinet/glue-include/asm/types.h b/pfinet/glue-include/asm/types.h
new file mode 100644
index 00000000..ee9b980a
--- /dev/null
+++ b/pfinet/glue-include/asm/types.h
@@ -0,0 +1 @@
+#include <linux/types.h>
diff --git a/pfinet/glue-include/asm/uaccess.h b/pfinet/glue-include/asm/uaccess.h
new file mode 100644
index 00000000..d8373797
--- /dev/null
+++ b/pfinet/glue-include/asm/uaccess.h
@@ -0,0 +1,47 @@
+#ifndef _HACK_ASM_UACCESS_H_
+#define _HACK_ASM_UACCESS_H_
+
+#include <linux/mm.h>
+
+
+#define MAKE_MM_SEG(s) XXX
+#define KERNEL_DS XXX
+#define USER_DS XXX
+
+#define get_ds() XXX
+#define get_fs() XXX
+#define set_fs(x) XXX
+
+#define segment_eq(a,b) XXX
+
+extern int __verify_write(const void *, unsigned long);
+#define __verify_write XXX
+
+#define __addr_ok(addr) (1)
+#define __range_ok(addr,size) (1)
+#define access_ok(type,addr,size) (1)
+
+#define put_user(x,ptr) (*(ptr) = (x), 0)
+#define get_user(x,ptr) ((x) = *(ptr), 0)
+
+/*
+ * The "xxx_ret" versions return constant specified in third argument, if
+ * something bad happens. These macros can be optimized for the
+ * case of just returning from the function xxx_ret is used.
+ */
+
+#define put_user_ret(x,ptr,ret) ({ if (put_user(x,ptr)) return ret; })
+
+#define get_user_ret(x,ptr,ret) ({ if (get_user(x,ptr)) return ret; })
+
+
+#define copy_to_user(to,from,n) (memcpy ((to), (from), (n)), 0)
+#define copy_from_user(to,from,n) (memcpy ((to), (from), (n)), 0)
+#define clear_user(mem, len) (bzero ((mem), (len)), 0)
+
+#define copy_to_user_ret(to,from,n,retval) ({ if (copy_to_user(to,from,n)) return retval; })
+
+#define copy_from_user_ret(to,from,n,retval) ({ if (copy_from_user(to,from,n)) return retval; })
+
+
+#endif