summaryrefslogtreecommitdiff
path: root/libdde-linux26/contrib/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'libdde-linux26/contrib/include/asm-generic')
-rw-r--r--libdde-linux26/contrib/include/asm-generic/4level-fixup.h37
-rw-r--r--libdde-linux26/contrib/include/asm-generic/Kbuild13
-rw-r--r--libdde-linux26/contrib/include/asm-generic/Kbuild.asm38
-rw-r--r--libdde-linux26/contrib/include/asm-generic/atomic.h258
-rw-r--r--libdde-linux26/contrib/include/asm-generic/audit_change_attr.h22
-rw-r--r--libdde-linux26/contrib/include/asm-generic/audit_dir_write.h18
-rw-r--r--libdde-linux26/contrib/include/asm-generic/audit_read.h8
-rw-r--r--libdde-linux26/contrib/include/asm-generic/audit_signal.h3
-rw-r--r--libdde-linux26/contrib/include/asm-generic/audit_write.h13
-rw-r--r--libdde-linux26/contrib/include/asm-generic/bitops.h33
-rw-r--r--libdde-linux26/contrib/include/asm-generic/bitops/__ffs.h43
-rw-r--r--libdde-linux26/contrib/include/asm-generic/bitops/__fls.h43
-rw-r--r--libdde-linux26/contrib/include/asm-generic/bitops/atomic.h188
-rw-r--r--libdde-linux26/contrib/include/asm-generic/bitops/ext2-atomic.h22
-rw-r--r--libdde-linux26/contrib/include/asm-generic/bitops/ext2-non-atomic.h20
-rw-r--r--libdde-linux26/contrib/include/asm-generic/bitops/ffs.h41
-rw-r--r--libdde-linux26/contrib/include/asm-generic/bitops/ffz.h12
-rw-r--r--libdde-linux26/contrib/include/asm-generic/bitops/find.h15
-rw-r--r--libdde-linux26/contrib/include/asm-generic/bitops/fls.h41
-rw-r--r--libdde-linux26/contrib/include/asm-generic/bitops/fls64.h36
-rw-r--r--libdde-linux26/contrib/include/asm-generic/bitops/hweight.h11
-rw-r--r--libdde-linux26/contrib/include/asm-generic/bitops/le.h57
-rw-r--r--libdde-linux26/contrib/include/asm-generic/bitops/lock.h45
-rw-r--r--libdde-linux26/contrib/include/asm-generic/bitops/minix-le.h17
-rw-r--r--libdde-linux26/contrib/include/asm-generic/bitops/minix.h15
-rw-r--r--libdde-linux26/contrib/include/asm-generic/bitops/non-atomic.h108
-rw-r--r--libdde-linux26/contrib/include/asm-generic/bitops/sched.h31
-rw-r--r--libdde-linux26/contrib/include/asm-generic/bug.h143
-rw-r--r--libdde-linux26/contrib/include/asm-generic/cmpxchg-local.h65
-rw-r--r--libdde-linux26/contrib/include/asm-generic/cmpxchg.h22
-rw-r--r--libdde-linux26/contrib/include/asm-generic/cputime.h69
-rw-r--r--libdde-linux26/contrib/include/asm-generic/device.h12
-rw-r--r--libdde-linux26/contrib/include/asm-generic/div64.h58
-rw-r--r--libdde-linux26/contrib/include/asm-generic/dma-coherent.h32
-rw-r--r--libdde-linux26/contrib/include/asm-generic/dma-mapping-broken.h82
-rw-r--r--libdde-linux26/contrib/include/asm-generic/dma-mapping.h308
-rw-r--r--libdde-linux26/contrib/include/asm-generic/emergency-restart.h9
-rw-r--r--libdde-linux26/contrib/include/asm-generic/errno-base.h39
-rw-r--r--libdde-linux26/contrib/include/asm-generic/errno.h109
-rw-r--r--libdde-linux26/contrib/include/asm-generic/fcntl.h151
-rw-r--r--libdde-linux26/contrib/include/asm-generic/futex.h56
-rw-r--r--libdde-linux26/contrib/include/asm-generic/gpio.h188
-rw-r--r--libdde-linux26/contrib/include/asm-generic/ide_iops.h38
-rw-r--r--libdde-linux26/contrib/include/asm-generic/int-l64.h71
-rw-r--r--libdde-linux26/contrib/include/asm-generic/int-ll64.h76
-rw-r--r--libdde-linux26/contrib/include/asm-generic/ioctl.h105
-rw-r--r--libdde-linux26/contrib/include/asm-generic/iomap.h72
-rw-r--r--libdde-linux26/contrib/include/asm-generic/irq_regs.h37
-rw-r--r--libdde-linux26/contrib/include/asm-generic/kdebug.h9
-rw-r--r--libdde-linux26/contrib/include/asm-generic/libata-portmap.h7
-rw-r--r--libdde-linux26/contrib/include/asm-generic/local.h74
-rw-r--r--libdde-linux26/contrib/include/asm-generic/memory_model.h77
-rw-r--r--libdde-linux26/contrib/include/asm-generic/mm_hooks.h18
-rw-r--r--libdde-linux26/contrib/include/asm-generic/mman.h41
-rw-r--r--libdde-linux26/contrib/include/asm-generic/mutex-dec.h90
-rw-r--r--libdde-linux26/contrib/include/asm-generic/mutex-null.h19
-rw-r--r--libdde-linux26/contrib/include/asm-generic/mutex-xchg.h111
-rw-r--r--libdde-linux26/contrib/include/asm-generic/page.h24
-rw-r--r--libdde-linux26/contrib/include/asm-generic/pci-dma-compat.h107
-rw-r--r--libdde-linux26/contrib/include/asm-generic/pci.h55
-rw-r--r--libdde-linux26/contrib/include/asm-generic/percpu.h83
-rw-r--r--libdde-linux26/contrib/include/asm-generic/pgtable-nopmd.h69
-rw-r--r--libdde-linux26/contrib/include/asm-generic/pgtable-nopud.h61
-rw-r--r--libdde-linux26/contrib/include/asm-generic/pgtable.h344
-rw-r--r--libdde-linux26/contrib/include/asm-generic/poll.h37
-rw-r--r--libdde-linux26/contrib/include/asm-generic/resource.h94
-rw-r--r--libdde-linux26/contrib/include/asm-generic/rtc.h218
-rw-r--r--libdde-linux26/contrib/include/asm-generic/sections.h23
-rw-r--r--libdde-linux26/contrib/include/asm-generic/siginfo.h296
-rw-r--r--libdde-linux26/contrib/include/asm-generic/signal.h28
-rw-r--r--libdde-linux26/contrib/include/asm-generic/statfs.h82
-rw-r--r--libdde-linux26/contrib/include/asm-generic/syscall.h141
-rw-r--r--libdde-linux26/contrib/include/asm-generic/termios.h77
-rw-r--r--libdde-linux26/contrib/include/asm-generic/tlb.h148
-rw-r--r--libdde-linux26/contrib/include/asm-generic/topology.h82
-rw-r--r--libdde-linux26/contrib/include/asm-generic/uaccess.h26
-rw-r--r--libdde-linux26/contrib/include/asm-generic/vmlinux.lds.h441
-rw-r--r--libdde-linux26/contrib/include/asm-generic/xor.h718
78 files changed, 6530 insertions, 0 deletions
diff --git a/libdde-linux26/contrib/include/asm-generic/4level-fixup.h b/libdde-linux26/contrib/include/asm-generic/4level-fixup.h
new file mode 100644
index 00000000..9d40e879
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/4level-fixup.h
@@ -0,0 +1,37 @@
+#ifndef _4LEVEL_FIXUP_H
+#define _4LEVEL_FIXUP_H
+
+#define __ARCH_HAS_4LEVEL_HACK
+#define __PAGETABLE_PUD_FOLDED
+
+#define PUD_SIZE PGDIR_SIZE
+#define PUD_MASK PGDIR_MASK
+#define PTRS_PER_PUD 1
+
+#define pud_t pgd_t
+
+#define pmd_alloc(mm, pud, address) \
+ ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
+ NULL: pmd_offset(pud, address))
+
+#define pud_alloc(mm, pgd, address) (pgd)
+#define pud_offset(pgd, start) (pgd)
+#define pud_none(pud) 0
+#define pud_bad(pud) 0
+#define pud_present(pud) 1
+#define pud_ERROR(pud) do { } while (0)
+#define pud_clear(pud) pgd_clear(pud)
+#define pud_val(pud) pgd_val(pud)
+#define pud_populate(mm, pud, pmd) pgd_populate(mm, pud, pmd)
+#define pud_page(pud) pgd_page(pud)
+#define pud_page_vaddr(pud) pgd_page_vaddr(pud)
+
+#undef pud_free_tlb
+#define pud_free_tlb(tlb, x) do { } while (0)
+#define pud_free(mm, x) do { } while (0)
+#define __pud_free_tlb(tlb, x) do { } while (0)
+
+#undef pud_addr_end
+#define pud_addr_end(addr, end) (end)
+
+#endif
diff --git a/libdde-linux26/contrib/include/asm-generic/Kbuild b/libdde-linux26/contrib/include/asm-generic/Kbuild
new file mode 100644
index 00000000..4c9932a2
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/Kbuild
@@ -0,0 +1,13 @@
+header-y += errno-base.h
+header-y += errno.h
+header-y += fcntl.h
+header-y += ioctl.h
+header-y += mman.h
+header-y += poll.h
+header-y += signal.h
+header-y += statfs.h
+
+unifdef-y += int-l64.h
+unifdef-y += int-ll64.h
+unifdef-y += resource.h
+unifdef-y += siginfo.h
diff --git a/libdde-linux26/contrib/include/asm-generic/Kbuild.asm b/libdde-linux26/contrib/include/asm-generic/Kbuild.asm
new file mode 100644
index 00000000..70d18553
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/Kbuild.asm
@@ -0,0 +1,38 @@
+ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/kvm.h \
+ $(srctree)/include/asm-$(SRCARCH)/kvm.h),)
+header-y += kvm.h
+endif
+
+ifneq ($(wildcard $(srctree)/arch/$(SRCARCH)/include/asm/a.out.h \
+ $(srctree)/include/asm-$(SRCARCH)/a.out.h),)
+unifdef-y += a.out.h
+endif
+unifdef-y += auxvec.h
+unifdef-y += byteorder.h
+unifdef-y += errno.h
+unifdef-y += fcntl.h
+unifdef-y += ioctl.h
+unifdef-y += ioctls.h
+unifdef-y += ipcbuf.h
+unifdef-y += mman.h
+unifdef-y += msgbuf.h
+unifdef-y += param.h
+unifdef-y += poll.h
+unifdef-y += posix_types.h
+unifdef-y += ptrace.h
+unifdef-y += resource.h
+unifdef-y += sembuf.h
+unifdef-y += setup.h
+unifdef-y += shmbuf.h
+unifdef-y += sigcontext.h
+unifdef-y += siginfo.h
+unifdef-y += signal.h
+unifdef-y += socket.h
+unifdef-y += sockios.h
+unifdef-y += stat.h
+unifdef-y += statfs.h
+unifdef-y += swab.h
+unifdef-y += termbits.h
+unifdef-y += termios.h
+unifdef-y += types.h
+unifdef-y += unistd.h
diff --git a/libdde-linux26/contrib/include/asm-generic/atomic.h b/libdde-linux26/contrib/include/asm-generic/atomic.h
new file mode 100644
index 00000000..7abdaa91
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/atomic.h
@@ -0,0 +1,258 @@
+#ifndef _ASM_GENERIC_ATOMIC_H
+#define _ASM_GENERIC_ATOMIC_H
+/*
+ * Copyright (C) 2005 Silicon Graphics, Inc.
+ * Christoph Lameter
+ *
+ * Allows to provide arch independent atomic definitions without the need to
+ * edit all arch specific atomic.h files.
+ */
+
+#include <asm/types.h>
+
+/*
+ * Suppport for atomic_long_t
+ *
+ * Casts for parameters are avoided for existing atomic functions in order to
+ * avoid issues with cast-as-lval under gcc 4.x and other limitations that the
+ * macros of a platform may have.
+ */
+
+#if BITS_PER_LONG == 64
+
+typedef atomic64_t atomic_long_t;
+
+#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
+
+static inline long atomic_long_read(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return (long)atomic64_read(v);
+}
+
+static inline void atomic_long_set(atomic_long_t *l, long i)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ atomic64_set(v, i);
+}
+
+static inline void atomic_long_inc(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ atomic64_inc(v);
+}
+
+static inline void atomic_long_dec(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ atomic64_dec(v);
+}
+
+static inline void atomic_long_add(long i, atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ atomic64_add(i, v);
+}
+
+static inline void atomic_long_sub(long i, atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ atomic64_sub(i, v);
+}
+
+static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return atomic64_sub_and_test(i, v);
+}
+
+static inline int atomic_long_dec_and_test(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return atomic64_dec_and_test(v);
+}
+
+static inline int atomic_long_inc_and_test(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return atomic64_inc_and_test(v);
+}
+
+static inline int atomic_long_add_negative(long i, atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return atomic64_add_negative(i, v);
+}
+
+static inline long atomic_long_add_return(long i, atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return (long)atomic64_add_return(i, v);
+}
+
+static inline long atomic_long_sub_return(long i, atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return (long)atomic64_sub_return(i, v);
+}
+
+static inline long atomic_long_inc_return(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return (long)atomic64_inc_return(v);
+}
+
+static inline long atomic_long_dec_return(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return (long)atomic64_dec_return(v);
+}
+
+static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return (long)atomic64_add_unless(v, a, u);
+}
+
+#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
+
+#define atomic_long_cmpxchg(l, old, new) \
+ (atomic_cmpxchg((atomic64_t *)(l), (old), (new)))
+#define atomic_long_xchg(v, new) \
+ (atomic_xchg((atomic64_t *)(l), (new)))
+
+#else /* BITS_PER_LONG == 64 */
+
+typedef atomic_t atomic_long_t;
+
+#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
+static inline long atomic_long_read(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)atomic_read(v);
+}
+
+static inline void atomic_long_set(atomic_long_t *l, long i)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ atomic_set(v, i);
+}
+
+static inline void atomic_long_inc(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ atomic_inc(v);
+}
+
+static inline void atomic_long_dec(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ atomic_dec(v);
+}
+
+static inline void atomic_long_add(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ atomic_add(i, v);
+}
+
+static inline void atomic_long_sub(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ atomic_sub(i, v);
+}
+
+static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_sub_and_test(i, v);
+}
+
+static inline int atomic_long_dec_and_test(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_dec_and_test(v);
+}
+
+static inline int atomic_long_inc_and_test(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_inc_and_test(v);
+}
+
+static inline int atomic_long_add_negative(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_add_negative(i, v);
+}
+
+static inline long atomic_long_add_return(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)atomic_add_return(i, v);
+}
+
+static inline long atomic_long_sub_return(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)atomic_sub_return(i, v);
+}
+
+static inline long atomic_long_inc_return(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)atomic_inc_return(v);
+}
+
+static inline long atomic_long_dec_return(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)atomic_dec_return(v);
+}
+
+static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)atomic_add_unless(v, a, u);
+}
+
+#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l))
+
+#define atomic_long_cmpxchg(l, old, new) \
+ (atomic_cmpxchg((atomic_t *)(l), (old), (new)))
+#define atomic_long_xchg(v, new) \
+ (atomic_xchg((atomic_t *)(v), (new)))
+
+#endif /* BITS_PER_LONG == 64 */
+
+#endif /* _ASM_GENERIC_ATOMIC_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/audit_change_attr.h b/libdde-linux26/contrib/include/asm-generic/audit_change_attr.h
new file mode 100644
index 00000000..50764550
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/audit_change_attr.h
@@ -0,0 +1,22 @@
+__NR_chmod,
+__NR_fchmod,
+#ifdef __NR_chown
+__NR_chown,
+__NR_fchown,
+__NR_lchown,
+#endif
+__NR_setxattr,
+__NR_lsetxattr,
+__NR_fsetxattr,
+__NR_removexattr,
+__NR_lremovexattr,
+__NR_fremovexattr,
+#ifdef __NR_fchownat
+__NR_fchownat,
+__NR_fchmodat,
+#endif
+#ifdef __NR_chown32
+__NR_chown32,
+__NR_fchown32,
+__NR_lchown32,
+#endif
diff --git a/libdde-linux26/contrib/include/asm-generic/audit_dir_write.h b/libdde-linux26/contrib/include/asm-generic/audit_dir_write.h
new file mode 100644
index 00000000..6621bd82
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/audit_dir_write.h
@@ -0,0 +1,18 @@
+__NR_rename,
+__NR_mkdir,
+__NR_rmdir,
+#ifdef __NR_creat
+__NR_creat,
+#endif
+__NR_link,
+__NR_unlink,
+__NR_symlink,
+__NR_mknod,
+#ifdef __NR_mkdirat
+__NR_mkdirat,
+__NR_mknodat,
+__NR_unlinkat,
+__NR_renameat,
+__NR_linkat,
+__NR_symlinkat,
+#endif
diff --git a/libdde-linux26/contrib/include/asm-generic/audit_read.h b/libdde-linux26/contrib/include/asm-generic/audit_read.h
new file mode 100644
index 00000000..0e87464d
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/audit_read.h
@@ -0,0 +1,8 @@
+__NR_readlink,
+__NR_quotactl,
+__NR_listxattr,
+__NR_llistxattr,
+__NR_flistxattr,
+__NR_getxattr,
+__NR_lgetxattr,
+__NR_fgetxattr,
diff --git a/libdde-linux26/contrib/include/asm-generic/audit_signal.h b/libdde-linux26/contrib/include/asm-generic/audit_signal.h
new file mode 100644
index 00000000..6feab7f1
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/audit_signal.h
@@ -0,0 +1,3 @@
+__NR_kill,
+__NR_tgkill,
+__NR_tkill,
diff --git a/libdde-linux26/contrib/include/asm-generic/audit_write.h b/libdde-linux26/contrib/include/asm-generic/audit_write.h
new file mode 100644
index 00000000..c5f1c2c9
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/audit_write.h
@@ -0,0 +1,13 @@
+#include <asm-generic/audit_dir_write.h>
+__NR_acct,
+#ifdef __NR_swapon
+__NR_swapon,
+#endif
+__NR_quotactl,
+__NR_truncate,
+#ifdef __NR_truncate64
+__NR_truncate64,
+#endif
+#ifdef __NR_bind
+__NR_bind, /* bind can affect fs object only in one way... */
+#endif
diff --git a/libdde-linux26/contrib/include/asm-generic/bitops.h b/libdde-linux26/contrib/include/asm-generic/bitops.h
new file mode 100644
index 00000000..c9f369c4
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/bitops.h
@@ -0,0 +1,33 @@
+#ifndef _ASM_GENERIC_BITOPS_H_
+#define _ASM_GENERIC_BITOPS_H_
+
+/*
+ * For the benefit of those who are trying to port Linux to another
+ * architecture, here are some C-language equivalents. You should
+ * recode these in the native assembly language, if at all possible.
+ *
+ * C language equivalents written by Theodore Ts'o, 9/26/92
+ */
+
+#include <asm-generic/bitops/atomic.h>
+#include <asm-generic/bitops/non-atomic.h>
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/find.h>
+
+#ifndef _LINUX_BITOPS_H
+#error only <linux/bitops.h> can be included directly
+#endif
+
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/hweight.h>
+#include <asm-generic/bitops/lock.h>
+
+#include <asm-generic/bitops/ext2-non-atomic.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+#include <asm-generic/bitops/minix.h>
+
+#endif /* _ASM_GENERIC_BITOPS_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/bitops/__ffs.h b/libdde-linux26/contrib/include/asm-generic/bitops/__ffs.h
new file mode 100644
index 00000000..937d7c43
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/bitops/__ffs.h
@@ -0,0 +1,43 @@
+#ifndef _ASM_GENERIC_BITOPS___FFS_H_
+#define _ASM_GENERIC_BITOPS___FFS_H_
+
+#include <asm/types.h>
+
+/**
+ * __ffs - find first bit in word.
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __ffs(unsigned long word)
+{
+ int num = 0;
+
+#if BITS_PER_LONG == 64
+ if ((word & 0xffffffff) == 0) {
+ num += 32;
+ word >>= 32;
+ }
+#endif
+ if ((word & 0xffff) == 0) {
+ num += 16;
+ word >>= 16;
+ }
+ if ((word & 0xff) == 0) {
+ num += 8;
+ word >>= 8;
+ }
+ if ((word & 0xf) == 0) {
+ num += 4;
+ word >>= 4;
+ }
+ if ((word & 0x3) == 0) {
+ num += 2;
+ word >>= 2;
+ }
+ if ((word & 0x1) == 0)
+ num += 1;
+ return num;
+}
+
+#endif /* _ASM_GENERIC_BITOPS___FFS_H_ */
diff --git a/libdde-linux26/contrib/include/asm-generic/bitops/__fls.h b/libdde-linux26/contrib/include/asm-generic/bitops/__fls.h
new file mode 100644
index 00000000..a60a7ccb
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/bitops/__fls.h
@@ -0,0 +1,43 @@
+#ifndef _ASM_GENERIC_BITOPS___FLS_H_
+#define _ASM_GENERIC_BITOPS___FLS_H_
+
+#include <asm/types.h>
+
+/**
+ * __fls - find last (most-significant) set bit in a long word
+ * @word: the word to search
+ *
+ * Undefined if no set bit exists, so code should check against 0 first.
+ */
+static __always_inline unsigned long __fls(unsigned long word)
+{
+ int num = BITS_PER_LONG - 1;
+
+#if BITS_PER_LONG == 64
+ if (!(word & (~0ul << 32))) {
+ num -= 32;
+ word <<= 32;
+ }
+#endif
+ if (!(word & (~0ul << (BITS_PER_LONG-16)))) {
+ num -= 16;
+ word <<= 16;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG-8)))) {
+ num -= 8;
+ word <<= 8;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG-4)))) {
+ num -= 4;
+ word <<= 4;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG-2)))) {
+ num -= 2;
+ word <<= 2;
+ }
+ if (!(word & (~0ul << (BITS_PER_LONG-1))))
+ num -= 1;
+ return num;
+}
+
+#endif /* _ASM_GENERIC_BITOPS___FLS_H_ */
diff --git a/libdde-linux26/contrib/include/asm-generic/bitops/atomic.h b/libdde-linux26/contrib/include/asm-generic/bitops/atomic.h
new file mode 100644
index 00000000..4657f3e4
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/bitops/atomic.h
@@ -0,0 +1,188 @@
+#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
+#define _ASM_GENERIC_BITOPS_ATOMIC_H_
+
+#include <asm/types.h>
+
+#ifdef CONFIG_SMP
+#include <asm/spinlock.h>
+#include <asm/cache.h> /* we use L1_CACHE_BYTES */
+
+/* Use an array of spinlocks for our atomic_ts.
+ * Hash function to index into a different SPINLOCK.
+ * Since "a" is usually an address, use one spinlock per cacheline.
+ */
+# define ATOMIC_HASH_SIZE 4
+# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
+
+extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
+
+/* Can't use raw_spin_lock_irq because of #include problems, so
+ * this is the substitute */
+#define _atomic_spin_lock_irqsave(l,f) do { \
+ raw_spinlock_t *s = ATOMIC_HASH(l); \
+ local_irq_save(f); \
+ __raw_spin_lock(s); \
+} while(0)
+
+#define _atomic_spin_unlock_irqrestore(l,f) do { \
+ raw_spinlock_t *s = ATOMIC_HASH(l); \
+ __raw_spin_unlock(s); \
+ local_irq_restore(f); \
+} while(0)
+
+
+#else
+# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
+# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
+#endif
+
+/*
+ * NMI events can occur at any time, including when interrupts have been
+ * disabled by *_irqsave(). So you can get NMI events occurring while a
+ * *_bit function is holding a spin lock. If the NMI handler also wants
+ * to do bit manipulation (and they do) then you can get a deadlock
+ * between the original caller of *_bit() and the NMI handler.
+ *
+ * by Keith Owens
+ */
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This function is atomic and may not be reordered. See __set_bit()
+ * if you do not require the atomic guarantees.
+ *
+ * Note: there are no guarantees that this function will not be reordered
+ * on non x86 architectures, so if you are writing portable code,
+ * make sure not to rely on its reordering guarantees.
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void set_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long flags;
+
+ _atomic_spin_lock_irqsave(p, flags);
+ *p |= mask;
+ _atomic_spin_unlock_irqrestore(p, flags);
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * clear_bit() is atomic and may not be reordered. However, it does
+ * not contain a memory barrier, so if it is used for locking purposes,
+ * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
+ * in order to ensure changes are visible on other processors.
+ */
+static inline void clear_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long flags;
+
+ _atomic_spin_lock_irqsave(p, flags);
+ *p &= ~mask;
+ _atomic_spin_unlock_irqrestore(p, flags);
+}
+
+/**
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ *
+ * change_bit() is atomic and may not be reordered. It may be
+ * reordered on other architectures than x86.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void change_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long flags;
+
+ _atomic_spin_lock_irqsave(p, flags);
+ *p ^= mask;
+ _atomic_spin_unlock_irqrestore(p, flags);
+}
+
+/**
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It may be reordered on other architectures than x86.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old;
+ unsigned long flags;
+
+ _atomic_spin_lock_irqsave(p, flags);
+ old = *p;
+ *p = old | mask;
+ _atomic_spin_unlock_irqrestore(p, flags);
+
+ return (old & mask) != 0;
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It can be reorderdered on other architectures other than x86.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old;
+ unsigned long flags;
+
+ _atomic_spin_lock_irqsave(p, flags);
+ old = *p;
+ *p = old & ~mask;
+ _atomic_spin_unlock_irqrestore(p, flags);
+
+ return (old & mask) != 0;
+}
+
+/**
+ * test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old;
+ unsigned long flags;
+
+ _atomic_spin_lock_irqsave(p, flags);
+ old = *p;
+ *p = old ^ mask;
+ _atomic_spin_unlock_irqrestore(p, flags);
+
+ return (old & mask) != 0;
+}
+
+#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/bitops/ext2-atomic.h b/libdde-linux26/contrib/include/asm-generic/bitops/ext2-atomic.h
new file mode 100644
index 00000000..ab1c875e
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/bitops/ext2-atomic.h
@@ -0,0 +1,22 @@
+#ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_
+#define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_
+
+#define ext2_set_bit_atomic(lock, nr, addr) \
+ ({ \
+ int ret; \
+ spin_lock(lock); \
+ ret = ext2_set_bit((nr), (unsigned long *)(addr)); \
+ spin_unlock(lock); \
+ ret; \
+ })
+
+#define ext2_clear_bit_atomic(lock, nr, addr) \
+ ({ \
+ int ret; \
+ spin_lock(lock); \
+ ret = ext2_clear_bit((nr), (unsigned long *)(addr)); \
+ spin_unlock(lock); \
+ ret; \
+ })
+
+#endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ */
diff --git a/libdde-linux26/contrib/include/asm-generic/bitops/ext2-non-atomic.h b/libdde-linux26/contrib/include/asm-generic/bitops/ext2-non-atomic.h
new file mode 100644
index 00000000..63cf8224
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/bitops/ext2-non-atomic.h
@@ -0,0 +1,20 @@
+#ifndef _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_
+#define _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_
+
+#include <asm-generic/bitops/le.h>
+
+#define ext2_set_bit(nr,addr) \
+ generic___test_and_set_le_bit((nr),(unsigned long *)(addr))
+#define ext2_clear_bit(nr,addr) \
+ generic___test_and_clear_le_bit((nr),(unsigned long *)(addr))
+
+#define ext2_test_bit(nr,addr) \
+ generic_test_le_bit((nr),(unsigned long *)(addr))
+#define ext2_find_first_zero_bit(addr, size) \
+ generic_find_first_zero_le_bit((unsigned long *)(addr), (size))
+#define ext2_find_next_zero_bit(addr, size, off) \
+ generic_find_next_zero_le_bit((unsigned long *)(addr), (size), (off))
+#define ext2_find_next_bit(addr, size, off) \
+ generic_find_next_le_bit((unsigned long *)(addr), (size), (off))
+
+#endif /* _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ */
diff --git a/libdde-linux26/contrib/include/asm-generic/bitops/ffs.h b/libdde-linux26/contrib/include/asm-generic/bitops/ffs.h
new file mode 100644
index 00000000..fbbb43af
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/bitops/ffs.h
@@ -0,0 +1,41 @@
+#ifndef _ASM_GENERIC_BITOPS_FFS_H_
+#define _ASM_GENERIC_BITOPS_FFS_H_
+
+/**
+ * ffs - find first bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as
+ * the libc and compiler builtin ffs routines, therefore
+ * differs in spirit from the above ffz (man ffs).
+ */
+static inline int ffs(int x)
+{
+ int r = 1;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff)) {
+ x >>= 16;
+ r += 16;
+ }
+ if (!(x & 0xff)) {
+ x >>= 8;
+ r += 8;
+ }
+ if (!(x & 0xf)) {
+ x >>= 4;
+ r += 4;
+ }
+ if (!(x & 3)) {
+ x >>= 2;
+ r += 2;
+ }
+ if (!(x & 1)) {
+ x >>= 1;
+ r += 1;
+ }
+ return r;
+}
+
+#endif /* _ASM_GENERIC_BITOPS_FFS_H_ */
diff --git a/libdde-linux26/contrib/include/asm-generic/bitops/ffz.h b/libdde-linux26/contrib/include/asm-generic/bitops/ffz.h
new file mode 100644
index 00000000..6744bd4c
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/bitops/ffz.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_GENERIC_BITOPS_FFZ_H_
+#define _ASM_GENERIC_BITOPS_FFZ_H_
+
+/*
+ * ffz - find first zero in word.
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+#define ffz(x) __ffs(~(x))
+
+#endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */
diff --git a/libdde-linux26/contrib/include/asm-generic/bitops/find.h b/libdde-linux26/contrib/include/asm-generic/bitops/find.h
new file mode 100644
index 00000000..1914e974
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/bitops/find.h
@@ -0,0 +1,15 @@
+#ifndef _ASM_GENERIC_BITOPS_FIND_H_
+#define _ASM_GENERIC_BITOPS_FIND_H_
+
+#ifndef CONFIG_GENERIC_FIND_NEXT_BIT
+extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
+ size, unsigned long offset);
+
+extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
+ long size, unsigned long offset);
+#endif
+
+#define find_first_bit(addr, size) find_next_bit((addr), (size), 0)
+#define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0)
+
+#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */
diff --git a/libdde-linux26/contrib/include/asm-generic/bitops/fls.h b/libdde-linux26/contrib/include/asm-generic/bitops/fls.h
new file mode 100644
index 00000000..0576d1f4
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/bitops/fls.h
@@ -0,0 +1,41 @@
+#ifndef _ASM_GENERIC_BITOPS_FLS_H_
+#define _ASM_GENERIC_BITOPS_FLS_H_
+
+/**
+ * fls - find last (most-significant) bit set
+ * @x: the word to search
+ *
+ * This is defined the same way as ffs.
+ * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
+ */
+
+static __always_inline int fls(int x)
+{
+ int r = 32;
+
+ if (!x)
+ return 0;
+ if (!(x & 0xffff0000u)) {
+ x <<= 16;
+ r -= 16;
+ }
+ if (!(x & 0xff000000u)) {
+ x <<= 8;
+ r -= 8;
+ }
+ if (!(x & 0xf0000000u)) {
+ x <<= 4;
+ r -= 4;
+ }
+ if (!(x & 0xc0000000u)) {
+ x <<= 2;
+ r -= 2;
+ }
+ if (!(x & 0x80000000u)) {
+ x <<= 1;
+ r -= 1;
+ }
+ return r;
+}
+
+#endif /* _ASM_GENERIC_BITOPS_FLS_H_ */
diff --git a/libdde-linux26/contrib/include/asm-generic/bitops/fls64.h b/libdde-linux26/contrib/include/asm-generic/bitops/fls64.h
new file mode 100644
index 00000000..b097cf84
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/bitops/fls64.h
@@ -0,0 +1,36 @@
+#ifndef _ASM_GENERIC_BITOPS_FLS64_H_
+#define _ASM_GENERIC_BITOPS_FLS64_H_
+
+#include <asm/types.h>
+
+/**
+ * fls64 - find last set bit in a 64-bit word
+ * @x: the word to search
+ *
+ * This is defined in a similar way as the libc and compiler builtin
+ * ffsll, but returns the position of the most significant set bit.
+ *
+ * fls64(value) returns 0 if value is 0 or the position of the last
+ * set bit if value is nonzero. The last (most significant) bit is
+ * at position 64.
+ */
+#if BITS_PER_LONG == 32
+static __always_inline int fls64(__u64 x)
+{
+ __u32 h = x >> 32;
+ if (h)
+ return fls(h) + 32;
+ return fls(x);
+}
+#elif BITS_PER_LONG == 64
+static __always_inline int fls64(__u64 x)
+{
+ if (x == 0)
+ return 0;
+ return __fls(x) + 1;
+}
+#else
+#error BITS_PER_LONG not 32 or 64
+#endif
+
+#endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */
diff --git a/libdde-linux26/contrib/include/asm-generic/bitops/hweight.h b/libdde-linux26/contrib/include/asm-generic/bitops/hweight.h
new file mode 100644
index 00000000..fbbc3837
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/bitops/hweight.h
@@ -0,0 +1,11 @@
+#ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_
+#define _ASM_GENERIC_BITOPS_HWEIGHT_H_
+
+#include <asm/types.h>
+
+extern unsigned int hweight32(unsigned int w);
+extern unsigned int hweight16(unsigned int w);
+extern unsigned int hweight8(unsigned int w);
+extern unsigned long hweight64(__u64 w);
+
+#endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */
diff --git a/libdde-linux26/contrib/include/asm-generic/bitops/le.h b/libdde-linux26/contrib/include/asm-generic/bitops/le.h
new file mode 100644
index 00000000..80e3bf13
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/bitops/le.h
@@ -0,0 +1,57 @@
+#ifndef _ASM_GENERIC_BITOPS_LE_H_
+#define _ASM_GENERIC_BITOPS_LE_H_
+
+#include <asm/types.h>
+#include <asm/byteorder.h>
+
+#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
+#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
+
+#if defined(__LITTLE_ENDIAN)
+
+#define generic_test_le_bit(nr, addr) test_bit(nr, addr)
+#define generic___set_le_bit(nr, addr) __set_bit(nr, addr)
+#define generic___clear_le_bit(nr, addr) __clear_bit(nr, addr)
+
+#define generic_test_and_set_le_bit(nr, addr) test_and_set_bit(nr, addr)
+#define generic_test_and_clear_le_bit(nr, addr) test_and_clear_bit(nr, addr)
+
+#define generic___test_and_set_le_bit(nr, addr) __test_and_set_bit(nr, addr)
+#define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr)
+
+#define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset)
+#define generic_find_next_le_bit(addr, size, offset) \
+ find_next_bit(addr, size, offset)
+
+#elif defined(__BIG_ENDIAN)
+
+#define generic_test_le_bit(nr, addr) \
+ test_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+#define generic___set_le_bit(nr, addr) \
+ __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+#define generic___clear_le_bit(nr, addr) \
+ __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+
+#define generic_test_and_set_le_bit(nr, addr) \
+ test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+#define generic_test_and_clear_le_bit(nr, addr) \
+ test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+
+#define generic___test_and_set_le_bit(nr, addr) \
+ __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+#define generic___test_and_clear_le_bit(nr, addr) \
+ __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr))
+
+extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset);
+extern unsigned long generic_find_next_le_bit(const unsigned long *addr,
+ unsigned long size, unsigned long offset);
+
+#else
+#error "Please fix <asm/byteorder.h>"
+#endif
+
+#define generic_find_first_zero_le_bit(addr, size) \
+ generic_find_next_zero_le_bit((addr), (size), 0)
+
+#endif /* _ASM_GENERIC_BITOPS_LE_H_ */
diff --git a/libdde-linux26/contrib/include/asm-generic/bitops/lock.h b/libdde-linux26/contrib/include/asm-generic/bitops/lock.h
new file mode 100644
index 00000000..308a9e22
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/bitops/lock.h
@@ -0,0 +1,45 @@
+#ifndef _ASM_GENERIC_BITOPS_LOCK_H_
+#define _ASM_GENERIC_BITOPS_LOCK_H_
+
+/**
+ * test_and_set_bit_lock - Set a bit and return its old value, for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and provides acquire barrier semantics.
+ * It can be used to implement bit locks.
+ */
+#define test_and_set_bit_lock(nr, addr) test_and_set_bit(nr, addr)
+
+/**
+ * clear_bit_unlock - Clear a bit in memory, for unlock
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This operation is atomic and provides release barrier semantics.
+ */
+#define clear_bit_unlock(nr, addr) \
+do { \
+ smp_mb__before_clear_bit(); \
+ clear_bit(nr, addr); \
+} while (0)
+
+/**
+ * __clear_bit_unlock - Clear a bit in memory, for unlock
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This operation is like clear_bit_unlock, however it is not atomic.
+ * It does provide release barrier semantics so it can be used to unlock
+ * a bit lock, however it would only be used if no other CPU can modify
+ * any bits in the memory until the lock is released (a good example is
+ * if the bit lock itself protects access to the other bits in the word).
+ */
+#define __clear_bit_unlock(nr, addr) \
+do { \
+ smp_mb(); \
+ __clear_bit(nr, addr); \
+} while (0)
+
+#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
+
diff --git a/libdde-linux26/contrib/include/asm-generic/bitops/minix-le.h b/libdde-linux26/contrib/include/asm-generic/bitops/minix-le.h
new file mode 100644
index 00000000..4a981c1b
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/bitops/minix-le.h
@@ -0,0 +1,17 @@
+#ifndef _ASM_GENERIC_BITOPS_MINIX_LE_H_
+#define _ASM_GENERIC_BITOPS_MINIX_LE_H_
+
+#include <asm-generic/bitops/le.h>
+
+#define minix_test_and_set_bit(nr,addr) \
+ generic___test_and_set_le_bit((nr),(unsigned long *)(addr))
+#define minix_set_bit(nr,addr) \
+ generic___set_le_bit((nr),(unsigned long *)(addr))
+#define minix_test_and_clear_bit(nr,addr) \
+ generic___test_and_clear_le_bit((nr),(unsigned long *)(addr))
+#define minix_test_bit(nr,addr) \
+ generic_test_le_bit((nr),(unsigned long *)(addr))
+#define minix_find_first_zero_bit(addr,size) \
+ generic_find_first_zero_le_bit((unsigned long *)(addr),(size))
+
+#endif /* _ASM_GENERIC_BITOPS_MINIX_LE_H_ */
diff --git a/libdde-linux26/contrib/include/asm-generic/bitops/minix.h b/libdde-linux26/contrib/include/asm-generic/bitops/minix.h
new file mode 100644
index 00000000..91f42e87
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/bitops/minix.h
@@ -0,0 +1,15 @@
+#ifndef _ASM_GENERIC_BITOPS_MINIX_H_
+#define _ASM_GENERIC_BITOPS_MINIX_H_
+
+#define minix_test_and_set_bit(nr,addr) \
+ __test_and_set_bit((nr),(unsigned long *)(addr))
+#define minix_set_bit(nr,addr) \
+ __set_bit((nr),(unsigned long *)(addr))
+#define minix_test_and_clear_bit(nr,addr) \
+ __test_and_clear_bit((nr),(unsigned long *)(addr))
+#define minix_test_bit(nr,addr) \
+ test_bit((nr),(unsigned long *)(addr))
+#define minix_find_first_zero_bit(addr,size) \
+ find_first_zero_bit((unsigned long *)(addr),(size))
+
+#endif /* _ASM_GENERIC_BITOPS_MINIX_H_ */
diff --git a/libdde-linux26/contrib/include/asm-generic/bitops/non-atomic.h b/libdde-linux26/contrib/include/asm-generic/bitops/non-atomic.h
new file mode 100644
index 00000000..697cc2b7
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/bitops/non-atomic.h
@@ -0,0 +1,108 @@
+#ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
+#define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_
+
+#include <asm/types.h>
+
+/**
+ * __set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static inline void __set_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+ *p |= mask;
+}
+
+static inline void __clear_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+ *p &= ~mask;
+}
+
+/**
+ * __change_bit - Toggle a bit in memory
+ * @nr: the bit to change
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic and may be reordered.
+ * If it's called on the same region of memory simultaneously, the effect
+ * may be that only one operation succeeds.
+ */
+static inline void __change_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+
+ *p ^= mask;
+}
+
+/**
+ * __test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old | mask;
+ return (old & mask) != 0;
+}
+
+/**
+ * __test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic and can be reordered.
+ * If two examples of this operation race, one can appear to succeed
+ * but actually fail. You must protect multiple accesses with a lock.
+ */
+static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old & ~mask;
+ return (old & mask) != 0;
+}
+
+/* WARNING: non atomic and it can be reordered! */
+static inline int __test_and_change_bit(int nr,
+ volatile unsigned long *addr)
+{
+ unsigned long mask = BIT_MASK(nr);
+ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
+ unsigned long old = *p;
+
+ *p = old ^ mask;
+ return (old & mask) != 0;
+}
+
+/**
+ * test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static inline int test_bit(int nr, const volatile unsigned long *addr)
+{
+ return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1)));
+}
+
+#endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */
diff --git a/libdde-linux26/contrib/include/asm-generic/bitops/sched.h b/libdde-linux26/contrib/include/asm-generic/bitops/sched.h
new file mode 100644
index 00000000..604fab70
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/bitops/sched.h
@@ -0,0 +1,31 @@
+#ifndef _ASM_GENERIC_BITOPS_SCHED_H_
+#define _ASM_GENERIC_BITOPS_SCHED_H_
+
+#include <linux/compiler.h> /* unlikely() */
+#include <asm/types.h>
+
+/*
+ * Every architecture must define this function. It's the fastest
+ * way of searching a 100-bit bitmap. It's guaranteed that at least
+ * one of the 100 bits is cleared.
+ */
+static inline int sched_find_first_bit(const unsigned long *b)
+{
+#if BITS_PER_LONG == 64
+ if (b[0])
+ return __ffs(b[0]);
+ return __ffs(b[1]) + 64;
+#elif BITS_PER_LONG == 32
+ if (b[0])
+ return __ffs(b[0]);
+ if (b[1])
+ return __ffs(b[1]) + 32;
+ if (b[2])
+ return __ffs(b[2]) + 64;
+ return __ffs(b[3]) + 96;
+#else
+#error BITS_PER_LONG not defined
+#endif
+}
+
+#endif /* _ASM_GENERIC_BITOPS_SCHED_H_ */
diff --git a/libdde-linux26/contrib/include/asm-generic/bug.h b/libdde-linux26/contrib/include/asm-generic/bug.h
new file mode 100644
index 00000000..37b82cb9
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/bug.h
@@ -0,0 +1,143 @@
+#ifndef _ASM_GENERIC_BUG_H
+#define _ASM_GENERIC_BUG_H
+
+#include <linux/compiler.h>
+
+#ifdef CONFIG_BUG
+
+#ifdef CONFIG_GENERIC_BUG
+#ifndef __ASSEMBLY__
+struct bug_entry {
+#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+ unsigned long bug_addr;
+#else
+ signed int bug_addr_disp;
+#endif
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#ifndef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+ const char *file;
+#else
+ signed int file_disp;
+#endif
+ unsigned short line;
+#endif
+ unsigned short flags;
+};
+#endif /* __ASSEMBLY__ */
+
+#define BUGFLAG_WARNING (1<<0)
+#endif /* CONFIG_GENERIC_BUG */
+
+/*
+ * Don't use BUG() or BUG_ON() unless there's really no way out; one
+ * example might be detecting data structure corruption in the middle
+ * of an operation that can't be backed out of. If the (sub)system
+ * can somehow continue operating, perhaps with reduced functionality,
+ * it's probably not BUG-worthy.
+ *
+ * If you're tempted to BUG(), think again: is completely giving up
+ * really the *only* solution? There are usually better options, where
+ * users don't need to reboot ASAP and can mostly shut down cleanly.
+ */
+#ifndef HAVE_ARCH_BUG
+#define BUG() do { \
+ printk("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
+ panic("BUG!"); \
+} while (0)
+#endif
+
+#ifndef HAVE_ARCH_BUG_ON
+#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while(0)
+#endif
+
+/*
+ * WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report
+ * significant issues that need prompt attention if they should ever
+ * appear at runtime. Use the versions with printk format strings
+ * to provide better diagnostics.
+ */
+#ifndef __WARN
+#ifndef __ASSEMBLY__
+extern void warn_slowpath(const char *file, const int line,
+ const char *fmt, ...) __attribute__((format(printf, 3, 4)));
+#define WANT_WARN_ON_SLOWPATH
+#endif
+#define __WARN() warn_slowpath(__FILE__, __LINE__, NULL)
+#define __WARN_printf(arg...) warn_slowpath(__FILE__, __LINE__, arg)
+#else
+#define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0)
+#endif
+
+#ifndef WARN_ON
+#define WARN_ON(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN(); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
+#ifndef WARN
+#define WARN(condition, format...) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_printf(format); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
+#else /* !CONFIG_BUG */
+#ifndef HAVE_ARCH_BUG
+#define BUG()
+#endif
+
+#ifndef HAVE_ARCH_BUG_ON
+#define BUG_ON(condition) do { if (condition) ; } while(0)
+#endif
+
+#ifndef HAVE_ARCH_WARN_ON
+#define WARN_ON(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
+#ifndef WARN
+#define WARN(condition, format...) ({ \
+ int __ret_warn_on = !!(condition); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
+#endif
+
+#define WARN_ON_ONCE(condition) ({ \
+ static int __warned; \
+ int __ret_warn_once = !!(condition); \
+ \
+ if (unlikely(__ret_warn_once)) \
+ if (WARN_ON(!__warned)) \
+ __warned = 1; \
+ unlikely(__ret_warn_once); \
+})
+
+#define WARN_ONCE(condition, format...) ({ \
+ static int __warned; \
+ int __ret_warn_once = !!(condition); \
+ \
+ if (unlikely(__ret_warn_once)) \
+ if (WARN(!__warned, format)) \
+ __warned = 1; \
+ unlikely(__ret_warn_once); \
+})
+
+#define WARN_ON_RATELIMIT(condition, state) \
+ WARN_ON((condition) && __ratelimit(state))
+
+#ifdef CONFIG_SMP
+# define WARN_ON_SMP(x) WARN_ON(x)
+#else
+# define WARN_ON_SMP(x) do { } while (0)
+#endif
+
+#endif
diff --git a/libdde-linux26/contrib/include/asm-generic/cmpxchg-local.h b/libdde-linux26/contrib/include/asm-generic/cmpxchg-local.h
new file mode 100644
index 00000000..b2ba2fc8
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/cmpxchg-local.h
@@ -0,0 +1,65 @@
+#ifndef __ASM_GENERIC_CMPXCHG_LOCAL_H
+#define __ASM_GENERIC_CMPXCHG_LOCAL_H
+
+#include <linux/types.h>
+
+extern unsigned long wrong_size_cmpxchg(volatile void *ptr);
+
+/*
+ * Generic version of __cmpxchg_local (disables interrupts). Takes an unsigned
+ * long parameter, supporting various types of architectures.
+ */
+static inline unsigned long __cmpxchg_local_generic(volatile void *ptr,
+ unsigned long old, unsigned long new, int size)
+{
+ unsigned long flags, prev;
+
+ /*
+ * Sanity checking, compile-time.
+ */
+ if (size == 8 && sizeof(unsigned long) != 8)
+ wrong_size_cmpxchg(ptr);
+
+ local_irq_save(flags);
+ switch (size) {
+ case 1: prev = *(u8 *)ptr;
+ if (prev == old)
+ *(u8 *)ptr = (u8)new;
+ break;
+ case 2: prev = *(u16 *)ptr;
+ if (prev == old)
+ *(u16 *)ptr = (u16)new;
+ break;
+ case 4: prev = *(u32 *)ptr;
+ if (prev == old)
+ *(u32 *)ptr = (u32)new;
+ break;
+ case 8: prev = *(u64 *)ptr;
+ if (prev == old)
+ *(u64 *)ptr = (u64)new;
+ break;
+ default:
+ wrong_size_cmpxchg(ptr);
+ }
+ local_irq_restore(flags);
+ return prev;
+}
+
+/*
+ * Generic version of __cmpxchg64_local. Takes an u64 parameter.
+ */
+static inline u64 __cmpxchg64_local_generic(volatile void *ptr,
+ u64 old, u64 new)
+{
+ u64 prev;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ prev = *(u64 *)ptr;
+ if (prev == old)
+ *(u64 *)ptr = new;
+ local_irq_restore(flags);
+ return prev;
+}
+
+#endif
diff --git a/libdde-linux26/contrib/include/asm-generic/cmpxchg.h b/libdde-linux26/contrib/include/asm-generic/cmpxchg.h
new file mode 100644
index 00000000..213ac6e8
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/cmpxchg.h
@@ -0,0 +1,22 @@
+#ifndef __ASM_GENERIC_CMPXCHG_H
+#define __ASM_GENERIC_CMPXCHG_H
+
+/*
+ * Generic cmpxchg
+ *
+ * Uses the local cmpxchg. Does not support SMP.
+ */
+#ifdef CONFIG_SMP
+#error "Cannot use generic cmpxchg on SMP"
+#endif
+
+/*
+ * Atomic compare and exchange.
+ *
+ * Do not define __HAVE_ARCH_CMPXCHG because we want to use it to check whether
+ * a cmpxchg primitive faster than repeated local irq save/restore exists.
+ */
+#define cmpxchg(ptr, o, n) cmpxchg_local((ptr), (o), (n))
+#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
+
+#endif
diff --git a/libdde-linux26/contrib/include/asm-generic/cputime.h b/libdde-linux26/contrib/include/asm-generic/cputime.h
new file mode 100644
index 00000000..1c1fa422
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/cputime.h
@@ -0,0 +1,69 @@
+#ifndef _ASM_GENERIC_CPUTIME_H
+#define _ASM_GENERIC_CPUTIME_H
+
+#include <linux/time.h>
+#include <linux/jiffies.h>
+
+typedef unsigned long cputime_t;
+
+#define cputime_zero (0UL)
+#define cputime_max ((~0UL >> 1) - 1)
+#define cputime_add(__a, __b) ((__a) + (__b))
+#define cputime_sub(__a, __b) ((__a) - (__b))
+#define cputime_div(__a, __n) ((__a) / (__n))
+#define cputime_halve(__a) ((__a) >> 1)
+#define cputime_eq(__a, __b) ((__a) == (__b))
+#define cputime_gt(__a, __b) ((__a) > (__b))
+#define cputime_ge(__a, __b) ((__a) >= (__b))
+#define cputime_lt(__a, __b) ((__a) < (__b))
+#define cputime_le(__a, __b) ((__a) <= (__b))
+#define cputime_to_jiffies(__ct) (__ct)
+#define cputime_to_scaled(__ct) (__ct)
+#define jiffies_to_cputime(__hz) (__hz)
+
+typedef u64 cputime64_t;
+
+#define cputime64_zero (0ULL)
+#define cputime64_add(__a, __b) ((__a) + (__b))
+#define cputime64_sub(__a, __b) ((__a) - (__b))
+#define cputime64_to_jiffies64(__ct) (__ct)
+#define jiffies64_to_cputime64(__jif) (__jif)
+#define cputime_to_cputime64(__ct) ((u64) __ct)
+
+
+/*
+ * Convert cputime to milliseconds and back.
+ */
+#define cputime_to_msecs(__ct) jiffies_to_msecs(__ct)
+#define msecs_to_cputime(__msecs) msecs_to_jiffies(__msecs)
+
+/*
+ * Convert cputime to seconds and back.
+ */
+#define cputime_to_secs(jif) ((jif) / HZ)
+#define secs_to_cputime(sec) ((sec) * HZ)
+
+/*
+ * Convert cputime to timespec and back.
+ */
+#define timespec_to_cputime(__val) timespec_to_jiffies(__val)
+#define cputime_to_timespec(__ct,__val) jiffies_to_timespec(__ct,__val)
+
+/*
+ * Convert cputime to timeval and back.
+ */
+#define timeval_to_cputime(__val) timeval_to_jiffies(__val)
+#define cputime_to_timeval(__ct,__val) jiffies_to_timeval(__ct,__val)
+
+/*
+ * Convert cputime to clock and back.
+ */
+#define cputime_to_clock_t(__ct) jiffies_to_clock_t(__ct)
+#define clock_t_to_cputime(__x) clock_t_to_jiffies(__x)
+
+/*
+ * Convert cputime64 to clock.
+ */
+#define cputime64_to_clock_t(__ct) jiffies_64_to_clock_t(__ct)
+
+#endif
diff --git a/libdde-linux26/contrib/include/asm-generic/device.h b/libdde-linux26/contrib/include/asm-generic/device.h
new file mode 100644
index 00000000..c17c9600
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/device.h
@@ -0,0 +1,12 @@
+/*
+ * Arch specific extensions to struct device
+ *
+ * This file is released under the GPLv2
+ */
+#ifndef _ASM_GENERIC_DEVICE_H
+#define _ASM_GENERIC_DEVICE_H
+
+struct dev_archdata {
+};
+
+#endif /* _ASM_GENERIC_DEVICE_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/div64.h b/libdde-linux26/contrib/include/asm-generic/div64.h
new file mode 100644
index 00000000..8f4e3193
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/div64.h
@@ -0,0 +1,58 @@
+#ifndef _ASM_GENERIC_DIV64_H
+#define _ASM_GENERIC_DIV64_H
+/*
+ * Copyright (C) 2003 Bernardo Innocenti <bernie@develer.com>
+ * Based on former asm-ppc/div64.h and asm-m68knommu/div64.h
+ *
+ * The semantics of do_div() are:
+ *
+ * uint32_t do_div(uint64_t *n, uint32_t base)
+ * {
+ * uint32_t remainder = *n % base;
+ * *n = *n / base;
+ * return remainder;
+ * }
+ *
+ * NOTE: macro parameter n is evaluated multiple times,
+ * beware of side effects!
+ */
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+#if BITS_PER_LONG == 64
+
+# define do_div(n,base) ({ \
+ uint32_t __base = (base); \
+ uint32_t __rem; \
+ __rem = ((uint64_t)(n)) % __base; \
+ (n) = ((uint64_t)(n)) / __base; \
+ __rem; \
+ })
+
+#elif BITS_PER_LONG == 32
+
+extern uint32_t __div64_32(uint64_t *dividend, uint32_t divisor);
+
+/* The unnecessary pointer compare is there
+ * to check for type safety (n must be 64bit)
+ */
+# define do_div(n,base) ({ \
+ uint32_t __base = (base); \
+ uint32_t __rem; \
+ (void)(((typeof((n)) *)0) == ((uint64_t *)0)); \
+ if (likely(((n) >> 32) == 0)) { \
+ __rem = (uint32_t)(n) % __base; \
+ (n) = (uint32_t)(n) / __base; \
+ } else \
+ __rem = __div64_32(&(n), __base); \
+ __rem; \
+ })
+
+#else /* BITS_PER_LONG == ?? */
+
+# error do_div() does not yet support the C64
+
+#endif /* BITS_PER_LONG */
+
+#endif /* _ASM_GENERIC_DIV64_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/dma-coherent.h b/libdde-linux26/contrib/include/asm-generic/dma-coherent.h
new file mode 100644
index 00000000..85a3ffaa
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/dma-coherent.h
@@ -0,0 +1,32 @@
+#ifndef DMA_COHERENT_H
+#define DMA_COHERENT_H
+
+#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
+/*
+ * These two functions are only for dma allocator.
+ * Don't use them in device drivers.
+ */
+int dma_alloc_from_coherent(struct device *dev, ssize_t size,
+ dma_addr_t *dma_handle, void **ret);
+int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
+
+/*
+ * Standard interface
+ */
+#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
+extern int
+dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
+ dma_addr_t device_addr, size_t size, int flags);
+
+extern void
+dma_release_declared_memory(struct device *dev);
+
+extern void *
+dma_mark_declared_memory_occupied(struct device *dev,
+ dma_addr_t device_addr, size_t size);
+#else
+#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
+#define dma_release_from_coherent(dev, order, vaddr) (0)
+#endif
+
+#endif
diff --git a/libdde-linux26/contrib/include/asm-generic/dma-mapping-broken.h b/libdde-linux26/contrib/include/asm-generic/dma-mapping-broken.h
new file mode 100644
index 00000000..82cd0cb1
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/dma-mapping-broken.h
@@ -0,0 +1,82 @@
+#ifndef _ASM_GENERIC_DMA_MAPPING_H
+#define _ASM_GENERIC_DMA_MAPPING_H
+
+/* define the dma api to allow compilation but not linking of
+ * dma dependent code. Code that depends on the dma-mapping
+ * API needs to set 'depends on HAS_DMA' in its Kconfig
+ */
+
+struct scatterlist;
+
+extern void *
+dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag);
+
+extern void
+dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle);
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+
+extern dma_addr_t
+dma_map_single(struct device *dev, void *ptr, size_t size,
+ enum dma_data_direction direction);
+
+extern void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction);
+
+extern int
+dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction);
+
+extern void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+ enum dma_data_direction direction);
+
+extern dma_addr_t
+dma_map_page(struct device *dev, struct page *page, unsigned long offset,
+ size_t size, enum dma_data_direction direction);
+
+extern void
+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+ enum dma_data_direction direction);
+
+extern void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction);
+
+extern void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction);
+
+extern void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction);
+
+#define dma_sync_single_for_device dma_sync_single_for_cpu
+#define dma_sync_single_range_for_device dma_sync_single_range_for_cpu
+#define dma_sync_sg_for_device dma_sync_sg_for_cpu
+
+extern int
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
+
+extern int
+dma_supported(struct device *dev, u64 mask);
+
+extern int
+dma_set_mask(struct device *dev, u64 mask);
+
+extern int
+dma_get_cache_alignment(void);
+
+extern int
+dma_is_consistent(struct device *dev, dma_addr_t dma_handle);
+
+extern void
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction);
+
+#endif /* _ASM_GENERIC_DMA_MAPPING_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/dma-mapping.h b/libdde-linux26/contrib/include/asm-generic/dma-mapping.h
new file mode 100644
index 00000000..189486c3
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/dma-mapping.h
@@ -0,0 +1,308 @@
+/* Copyright (C) 2002 by James.Bottomley@HansenPartnership.com
+ *
+ * Implements the generic device dma API via the existing pci_ one
+ * for unconverted architectures
+ */
+
+#ifndef _ASM_GENERIC_DMA_MAPPING_H
+#define _ASM_GENERIC_DMA_MAPPING_H
+
+
+#ifdef CONFIG_PCI
+
+/* we implement the API below in terms of the existing PCI one,
+ * so include it */
+#include <linux/pci.h>
+/* need struct page definitions */
+#include <linux/mm.h>
+
+static inline int
+dma_supported(struct device *dev, u64 mask)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ return pci_dma_supported(to_pci_dev(dev), mask);
+}
+
+static inline int
+dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
+}
+
+static inline void *
+dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
+}
+
+static inline void
+dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle);
+}
+
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *cpu_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
+}
+
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
+}
+
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
+}
+
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
+}
+
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
+}
+
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+ enum dma_data_direction direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
+}
+
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
+ size, (int)direction);
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
+ size, (int)direction);
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction);
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ BUG_ON(dev->bus != &pci_bus_type);
+
+ pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction);
+}
+
+static inline int
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ return pci_dma_mapping_error(to_pci_dev(dev), dma_addr);
+}
+
+
+#else
+
+static inline int
+dma_supported(struct device *dev, u64 mask)
+{
+ return 0;
+}
+
+static inline int
+dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ BUG();
+ return 0;
+}
+
+static inline void *
+dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t flag)
+{
+ BUG();
+ return NULL;
+}
+
+static inline void
+dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t dma_handle)
+{
+ BUG();
+}
+
+static inline dma_addr_t
+dma_map_single(struct device *dev, void *cpu_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG();
+ return 0;
+}
+
+static inline void
+dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG();
+}
+
+static inline dma_addr_t
+dma_map_page(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG();
+ return 0;
+}
+
+static inline void
+dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG();
+}
+
+static inline int
+dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ BUG();
+ return 0;
+}
+
+static inline void
+dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
+ enum dma_data_direction direction)
+{
+ BUG();
+}
+
+static inline void
+dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG();
+}
+
+static inline void
+dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
+ enum dma_data_direction direction)
+{
+ BUG();
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ BUG();
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
+ enum dma_data_direction direction)
+{
+ BUG();
+}
+
+static inline int
+dma_error(dma_addr_t dma_addr)
+{
+ return 0;
+}
+
+#endif
+
+/* Now for the API extensions over the pci_ one */
+
+#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
+#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
+#define dma_is_consistent(d, h) (1)
+
+static inline int
+dma_get_cache_alignment(void)
+{
+ /* no easy way to get cache size on all processors, so return
+ * the maximum possible, to be safe */
+ return (1 << INTERNODE_CACHE_SHIFT);
+}
+
+static inline void
+dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ /* just sync everything, that's all the pci API can do */
+ dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
+}
+
+static inline void
+dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction)
+{
+ /* just sync everything, that's all the pci API can do */
+ dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
+}
+
+static inline void
+dma_cache_sync(struct device *dev, void *vaddr, size_t size,
+ enum dma_data_direction direction)
+{
+ /* could define this in terms of the dma_cache ... operations,
+ * but if you get this on a platform, you should convert the platform
+ * to using the generic device DMA API */
+ BUG();
+}
+
+#endif
+
diff --git a/libdde-linux26/contrib/include/asm-generic/emergency-restart.h b/libdde-linux26/contrib/include/asm-generic/emergency-restart.h
new file mode 100644
index 00000000..0d68a1ea
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/emergency-restart.h
@@ -0,0 +1,9 @@
+#ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
+#define _ASM_GENERIC_EMERGENCY_RESTART_H
+
+static inline void machine_emergency_restart(void)
+{
+ machine_restart(NULL);
+}
+
+#endif /* _ASM_GENERIC_EMERGENCY_RESTART_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/errno-base.h b/libdde-linux26/contrib/include/asm-generic/errno-base.h
new file mode 100644
index 00000000..65115978
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/errno-base.h
@@ -0,0 +1,39 @@
+#ifndef _ASM_GENERIC_ERRNO_BASE_H
+#define _ASM_GENERIC_ERRNO_BASE_H
+
+#define EPERM 1 /* Operation not permitted */
+#define ENOENT 2 /* No such file or directory */
+#define ESRCH 3 /* No such process */
+#define EINTR 4 /* Interrupted system call */
+#define EIO 5 /* I/O error */
+#define ENXIO 6 /* No such device or address */
+#define E2BIG 7 /* Argument list too long */
+#define ENOEXEC 8 /* Exec format error */
+#define EBADF 9 /* Bad file number */
+#define ECHILD 10 /* No child processes */
+#define EAGAIN 11 /* Try again */
+#define ENOMEM 12 /* Out of memory */
+#define EACCES 13 /* Permission denied */
+#define EFAULT 14 /* Bad address */
+#define ENOTBLK 15 /* Block device required */
+#define EBUSY 16 /* Device or resource busy */
+#define EEXIST 17 /* File exists */
+#define EXDEV 18 /* Cross-device link */
+#define ENODEV 19 /* No such device */
+#define ENOTDIR 20 /* Not a directory */
+#define EISDIR 21 /* Is a directory */
+#define EINVAL 22 /* Invalid argument */
+#define ENFILE 23 /* File table overflow */
+#define EMFILE 24 /* Too many open files */
+#define ENOTTY 25 /* Not a typewriter */
+#define ETXTBSY 26 /* Text file busy */
+#define EFBIG 27 /* File too large */
+#define ENOSPC 28 /* No space left on device */
+#define ESPIPE 29 /* Illegal seek */
+#define EROFS 30 /* Read-only file system */
+#define EMLINK 31 /* Too many links */
+#define EPIPE 32 /* Broken pipe */
+#define EDOM 33 /* Math argument out of domain of func */
+#define ERANGE 34 /* Math result not representable */
+
+#endif
diff --git a/libdde-linux26/contrib/include/asm-generic/errno.h b/libdde-linux26/contrib/include/asm-generic/errno.h
new file mode 100644
index 00000000..e8852c09
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/errno.h
@@ -0,0 +1,109 @@
+#ifndef _ASM_GENERIC_ERRNO_H
+#define _ASM_GENERIC_ERRNO_H
+
+#include <asm-generic/errno-base.h>
+
+#define EDEADLK 35 /* Resource deadlock would occur */
+#define ENAMETOOLONG 36 /* File name too long */
+#define ENOLCK 37 /* No record locks available */
+#define ENOSYS 38 /* Function not implemented */
+#define ENOTEMPTY 39 /* Directory not empty */
+#define ELOOP 40 /* Too many symbolic links encountered */
+#define EWOULDBLOCK EAGAIN /* Operation would block */
+#define ENOMSG 42 /* No message of desired type */
+#define EIDRM 43 /* Identifier removed */
+#define ECHRNG 44 /* Channel number out of range */
+#define EL2NSYNC 45 /* Level 2 not synchronized */
+#define EL3HLT 46 /* Level 3 halted */
+#define EL3RST 47 /* Level 3 reset */
+#define ELNRNG 48 /* Link number out of range */
+#define EUNATCH 49 /* Protocol driver not attached */
+#define ENOCSI 50 /* No CSI structure available */
+#define EL2HLT 51 /* Level 2 halted */
+#define EBADE 52 /* Invalid exchange */
+#define EBADR 53 /* Invalid request descriptor */
+#define EXFULL 54 /* Exchange full */
+#define ENOANO 55 /* No anode */
+#define EBADRQC 56 /* Invalid request code */
+#define EBADSLT 57 /* Invalid slot */
+
+#define EDEADLOCK EDEADLK
+
+#define EBFONT 59 /* Bad font file format */
+#define ENOSTR 60 /* Device not a stream */
+#define ENODATA 61 /* No data available */
+#define ETIME 62 /* Timer expired */
+#define ENOSR 63 /* Out of streams resources */
+#define ENONET 64 /* Machine is not on the network */
+#define ENOPKG 65 /* Package not installed */
+#define EREMOTE 66 /* Object is remote */
+#define ENOLINK 67 /* Link has been severed */
+#define EADV 68 /* Advertise error */
+#define ESRMNT 69 /* Srmount error */
+#define ECOMM 70 /* Communication error on send */
+#define EPROTO 71 /* Protocol error */
+#define EMULTIHOP 72 /* Multihop attempted */
+#define EDOTDOT 73 /* RFS specific error */
+#define EBADMSG 74 /* Not a data message */
+#define EOVERFLOW 75 /* Value too large for defined data type */
+#define ENOTUNIQ 76 /* Name not unique on network */
+#define EBADFD 77 /* File descriptor in bad state */
+#define EREMCHG 78 /* Remote address changed */
+#define ELIBACC 79 /* Can not access a needed shared library */
+#define ELIBBAD 80 /* Accessing a corrupted shared library */
+#define ELIBSCN 81 /* .lib section in a.out corrupted */
+#define ELIBMAX 82 /* Attempting to link in too many shared libraries */
+#define ELIBEXEC 83 /* Cannot exec a shared library directly */
+#define EILSEQ 84 /* Illegal byte sequence */
+#define ERESTART 85 /* Interrupted system call should be restarted */
+#define ESTRPIPE 86 /* Streams pipe error */
+#define EUSERS 87 /* Too many users */
+#define ENOTSOCK 88 /* Socket operation on non-socket */
+#define EDESTADDRREQ 89 /* Destination address required */
+#define EMSGSIZE 90 /* Message too long */
+#define EPROTOTYPE 91 /* Protocol wrong type for socket */
+#define ENOPROTOOPT 92 /* Protocol not available */
+#define EPROTONOSUPPORT 93 /* Protocol not supported */
+#define ESOCKTNOSUPPORT 94 /* Socket type not supported */
+#define EOPNOTSUPP 95 /* Operation not supported on transport endpoint */
+#define EPFNOSUPPORT 96 /* Protocol family not supported */
+#define EAFNOSUPPORT 97 /* Address family not supported by protocol */
+#define EADDRINUSE 98 /* Address already in use */
+#define EADDRNOTAVAIL 99 /* Cannot assign requested address */
+#define ENETDOWN 100 /* Network is down */
+#define ENETUNREACH 101 /* Network is unreachable */
+#define ENETRESET 102 /* Network dropped connection because of reset */
+#define ECONNABORTED 103 /* Software caused connection abort */
+#define ECONNRESET 104 /* Connection reset by peer */
+#define ENOBUFS 105 /* No buffer space available */
+#define EISCONN 106 /* Transport endpoint is already connected */
+#define ENOTCONN 107 /* Transport endpoint is not connected */
+#define ESHUTDOWN 108 /* Cannot send after transport endpoint shutdown */
+#define ETOOMANYREFS 109 /* Too many references: cannot splice */
+#define ETIMEDOUT 110 /* Connection timed out */
+#define ECONNREFUSED 111 /* Connection refused */
+#define EHOSTDOWN 112 /* Host is down */
+#define EHOSTUNREACH 113 /* No route to host */
+#define EALREADY 114 /* Operation already in progress */
+#define EINPROGRESS 115 /* Operation now in progress */
+#define ESTALE 116 /* Stale NFS file handle */
+#define EUCLEAN 117 /* Structure needs cleaning */
+#define ENOTNAM 118 /* Not a XENIX named type file */
+#define ENAVAIL 119 /* No XENIX semaphores available */
+#define EISNAM 120 /* Is a named type file */
+#define EREMOTEIO 121 /* Remote I/O error */
+#define EDQUOT 122 /* Quota exceeded */
+
+#define ENOMEDIUM 123 /* No medium found */
+#define EMEDIUMTYPE 124 /* Wrong medium type */
+#define ECANCELED 125 /* Operation Canceled */
+#define ENOKEY 126 /* Required key not available */
+#define EKEYEXPIRED 127 /* Key has expired */
+#define EKEYREVOKED 128 /* Key has been revoked */
+#define EKEYREJECTED 129 /* Key was rejected by service */
+
+/* for robust mutexes */
+#define EOWNERDEAD 130 /* Owner died */
+#define ENOTRECOVERABLE 131 /* State not recoverable */
+
+#endif
diff --git a/libdde-linux26/contrib/include/asm-generic/fcntl.h b/libdde-linux26/contrib/include/asm-generic/fcntl.h
new file mode 100644
index 00000000..b8477414
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/fcntl.h
@@ -0,0 +1,151 @@
+#ifndef _ASM_GENERIC_FCNTL_H
+#define _ASM_GENERIC_FCNTL_H
+
+#include <linux/types.h>
+
+/* open/fcntl - O_SYNC is only implemented on blocks devices and on files
+ located on an ext2 file system */
+#define O_ACCMODE 00000003
+#define O_RDONLY 00000000
+#define O_WRONLY 00000001
+#define O_RDWR 00000002
+#ifndef O_CREAT
+#define O_CREAT 00000100 /* not fcntl */
+#endif
+#ifndef O_EXCL
+#define O_EXCL 00000200 /* not fcntl */
+#endif
+#ifndef O_NOCTTY
+#define O_NOCTTY 00000400 /* not fcntl */
+#endif
+#ifndef O_TRUNC
+#define O_TRUNC 00001000 /* not fcntl */
+#endif
+#ifndef O_APPEND
+#define O_APPEND 00002000
+#endif
+#ifndef O_NONBLOCK
+#define O_NONBLOCK 00004000
+#endif
+#ifndef O_SYNC
+#define O_SYNC 00010000
+#endif
+#ifndef FASYNC
+#define FASYNC 00020000 /* fcntl, for BSD compatibility */
+#endif
+#ifndef O_DIRECT
+#define O_DIRECT 00040000 /* direct disk access hint */
+#endif
+#ifndef O_LARGEFILE
+#define O_LARGEFILE 00100000
+#endif
+#ifndef O_DIRECTORY
+#define O_DIRECTORY 00200000 /* must be a directory */
+#endif
+#ifndef O_NOFOLLOW
+#define O_NOFOLLOW 00400000 /* don't follow links */
+#endif
+#ifndef O_NOATIME
+#define O_NOATIME 01000000
+#endif
+#ifndef O_CLOEXEC
+#define O_CLOEXEC 02000000 /* set close_on_exec */
+#endif
+#ifndef O_NDELAY
+#define O_NDELAY O_NONBLOCK
+#endif
+
+#define F_DUPFD 0 /* dup */
+#define F_GETFD 1 /* get close_on_exec */
+#define F_SETFD 2 /* set/clear close_on_exec */
+#define F_GETFL 3 /* get file->f_flags */
+#define F_SETFL 4 /* set file->f_flags */
+#ifndef F_GETLK
+#define F_GETLK 5
+#define F_SETLK 6
+#define F_SETLKW 7
+#endif
+#ifndef F_SETOWN
+#define F_SETOWN 8 /* for sockets. */
+#define F_GETOWN 9 /* for sockets. */
+#endif
+#ifndef F_SETSIG
+#define F_SETSIG 10 /* for sockets. */
+#define F_GETSIG 11 /* for sockets. */
+#endif
+
+/* for F_[GET|SET]FL */
+#define FD_CLOEXEC 1 /* actually anything with low bit set goes */
+
+/* for posix fcntl() and lockf() */
+#ifndef F_RDLCK
+#define F_RDLCK 0
+#define F_WRLCK 1
+#define F_UNLCK 2
+#endif
+
+/* for old implementation of bsd flock () */
+#ifndef F_EXLCK
+#define F_EXLCK 4 /* or 3 */
+#define F_SHLCK 8 /* or 4 */
+#endif
+
+/* for leases */
+#ifndef F_INPROGRESS
+#define F_INPROGRESS 16
+#endif
+
+/* operations for bsd flock(), also used by the kernel implementation */
+#define LOCK_SH 1 /* shared lock */
+#define LOCK_EX 2 /* exclusive lock */
+#define LOCK_NB 4 /* or'd with one of the above to prevent
+ blocking */
+#define LOCK_UN 8 /* remove lock */
+
+#define LOCK_MAND 32 /* This is a mandatory flock ... */
+#define LOCK_READ 64 /* which allows concurrent read operations */
+#define LOCK_WRITE 128 /* which allows concurrent write operations */
+#define LOCK_RW 192 /* which allows concurrent read & write ops */
+
+#define F_LINUX_SPECIFIC_BASE 1024
+
+#ifndef HAVE_ARCH_STRUCT_FLOCK
+#ifndef __ARCH_FLOCK_PAD
+#define __ARCH_FLOCK_PAD
+#endif
+
+struct flock {
+ short l_type;
+ short l_whence;
+ off_t l_start;
+ off_t l_len;
+ pid_t l_pid;
+ __ARCH_FLOCK_PAD
+};
+#endif
+
+#ifndef CONFIG_64BIT
+
+#ifndef F_GETLK64
+#define F_GETLK64 12 /* using 'struct flock64' */
+#define F_SETLK64 13
+#define F_SETLKW64 14
+#endif
+
+#ifndef HAVE_ARCH_STRUCT_FLOCK64
+#ifndef __ARCH_FLOCK64_PAD
+#define __ARCH_FLOCK64_PAD
+#endif
+
+struct flock64 {
+ short l_type;
+ short l_whence;
+ loff_t l_start;
+ loff_t l_len;
+ pid_t l_pid;
+ __ARCH_FLOCK64_PAD
+};
+#endif
+#endif /* !CONFIG_64BIT */
+
+#endif /* _ASM_GENERIC_FCNTL_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/futex.h b/libdde-linux26/contrib/include/asm-generic/futex.h
new file mode 100644
index 00000000..3c2344f4
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/futex.h
@@ -0,0 +1,56 @@
+#ifndef _ASM_GENERIC_FUTEX_H
+#define _ASM_GENERIC_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <asm/errno.h>
+
+static inline int
+futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
+{
+ int op = (encoded_op >> 28) & 7;
+ int cmp = (encoded_op >> 24) & 15;
+ int oparg = (encoded_op << 8) >> 20;
+ int cmparg = (encoded_op << 20) >> 20;
+ int oldval = 0, ret;
+ if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
+ oparg = 1 << oparg;
+
+ if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
+ return -EFAULT;
+
+ pagefault_disable();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ case FUTEX_OP_ADD:
+ case FUTEX_OP_OR:
+ case FUTEX_OP_ANDN:
+ case FUTEX_OP_XOR:
+ default:
+ ret = -ENOSYS;
+ }
+
+ pagefault_enable();
+
+ if (!ret) {
+ switch (cmp) {
+ case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break;
+ case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break;
+ case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break;
+ case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break;
+ case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break;
+ case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break;
+ default: ret = -ENOSYS;
+ }
+ }
+ return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
+{
+ return -ENOSYS;
+}
+
+#endif
diff --git a/libdde-linux26/contrib/include/asm-generic/gpio.h b/libdde-linux26/contrib/include/asm-generic/gpio.h
new file mode 100644
index 00000000..81797ec9
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/gpio.h
@@ -0,0 +1,188 @@
+#ifndef _ASM_GENERIC_GPIO_H
+#define _ASM_GENERIC_GPIO_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+
+#ifdef CONFIG_GPIOLIB
+
+#include <linux/compiler.h>
+
+/* Platforms may implement their GPIO interface with library code,
+ * at a small performance cost for non-inlined operations and some
+ * extra memory (for code and for per-GPIO table entries).
+ *
+ * While the GPIO programming interface defines valid GPIO numbers
+ * to be in the range 0..MAX_INT, this library restricts them to the
+ * smaller range 0..ARCH_NR_GPIOS-1.
+ */
+
+#ifndef ARCH_NR_GPIOS
+#define ARCH_NR_GPIOS 256
+#endif
+
+static inline int gpio_is_valid(int number)
+{
+ /* only some non-negative numbers are valid */
+ return ((unsigned)number) < ARCH_NR_GPIOS;
+}
+
+struct seq_file;
+struct module;
+
+/**
+ * struct gpio_chip - abstract a GPIO controller
+ * @label: for diagnostics
+ * @dev: optional device providing the GPIOs
+ * @owner: helps prevent removal of modules exporting active GPIOs
+ * @request: optional hook for chip-specific activation, such as
+ * enabling module power and clock; may sleep
+ * @free: optional hook for chip-specific deactivation, such as
+ * disabling module power and clock; may sleep
+ * @direction_input: configures signal "offset" as input, or returns error
+ * @get: returns value for signal "offset"; for output signals this
+ * returns either the value actually sensed, or zero
+ * @direction_output: configures signal "offset" as output, or returns error
+ * @set: assigns output value for signal "offset"
+ * @to_irq: optional hook supporting non-static gpio_to_irq() mappings;
+ * implementation may not sleep
+ * @dbg_show: optional routine to show contents in debugfs; default code
+ * will be used when this is omitted, but custom code can show extra
+ * state (such as pullup/pulldown configuration).
+ * @base: identifies the first GPIO number handled by this chip; or, if
+ * negative during registration, requests dynamic ID allocation.
+ * @ngpio: the number of GPIOs handled by this controller; the last GPIO
+ * handled is (base + ngpio - 1).
+ * @can_sleep: flag must be set iff get()/set() methods sleep, as they
+ * must while accessing GPIO expander chips over I2C or SPI
+ *
+ * A gpio_chip can help platforms abstract various sources of GPIOs so
+ * they can all be accessed through a common programing interface.
+ * Example sources would be SOC controllers, FPGAs, multifunction
+ * chips, dedicated GPIO expanders, and so on.
+ *
+ * Each chip controls a number of signals, identified in method calls
+ * by "offset" values in the range 0..(@ngpio - 1). When those signals
+ * are referenced through calls like gpio_get_value(gpio), the offset
+ * is calculated by subtracting @base from the gpio number.
+ */
+struct gpio_chip {
+ const char *label;
+ struct device *dev;
+ struct module *owner;
+
+ int (*request)(struct gpio_chip *chip,
+ unsigned offset);
+ void (*free)(struct gpio_chip *chip,
+ unsigned offset);
+
+ int (*direction_input)(struct gpio_chip *chip,
+ unsigned offset);
+ int (*get)(struct gpio_chip *chip,
+ unsigned offset);
+ int (*direction_output)(struct gpio_chip *chip,
+ unsigned offset, int value);
+ void (*set)(struct gpio_chip *chip,
+ unsigned offset, int value);
+
+ int (*to_irq)(struct gpio_chip *chip,
+ unsigned offset);
+
+ void (*dbg_show)(struct seq_file *s,
+ struct gpio_chip *chip);
+ int base;
+ u16 ngpio;
+ unsigned can_sleep:1;
+ unsigned exported:1;
+};
+
+extern const char *gpiochip_is_requested(struct gpio_chip *chip,
+ unsigned offset);
+extern int __must_check gpiochip_reserve(int start, int ngpio);
+
+/* add/remove chips */
+extern int gpiochip_add(struct gpio_chip *chip);
+extern int __must_check gpiochip_remove(struct gpio_chip *chip);
+
+
+/* Always use the library code for GPIO management calls,
+ * or when sleeping may be involved.
+ */
+extern int gpio_request(unsigned gpio, const char *label);
+extern void gpio_free(unsigned gpio);
+
+extern int gpio_direction_input(unsigned gpio);
+extern int gpio_direction_output(unsigned gpio, int value);
+
+extern int gpio_get_value_cansleep(unsigned gpio);
+extern void gpio_set_value_cansleep(unsigned gpio, int value);
+
+
+/* A platform's <asm/gpio.h> code may want to inline the I/O calls when
+ * the GPIO is constant and refers to some always-present controller,
+ * giving direct access to chip registers and tight bitbanging loops.
+ */
+extern int __gpio_get_value(unsigned gpio);
+extern void __gpio_set_value(unsigned gpio, int value);
+
+extern int __gpio_cansleep(unsigned gpio);
+
+extern int __gpio_to_irq(unsigned gpio);
+
+#ifdef CONFIG_GPIO_SYSFS
+
+/*
+ * A sysfs interface can be exported by individual drivers if they want,
+ * but more typically is configured entirely from userspace.
+ */
+extern int gpio_export(unsigned gpio, bool direction_may_change);
+extern void gpio_unexport(unsigned gpio);
+
+#endif /* CONFIG_GPIO_SYSFS */
+
+#else /* !CONFIG_HAVE_GPIO_LIB */
+
+static inline int gpio_is_valid(int number)
+{
+ /* only non-negative numbers are valid */
+ return number >= 0;
+}
+
+/* platforms that don't directly support access to GPIOs through I2C, SPI,
+ * or other blocking infrastructure can use these wrappers.
+ */
+
+static inline int gpio_cansleep(unsigned gpio)
+{
+ return 0;
+}
+
+static inline int gpio_get_value_cansleep(unsigned gpio)
+{
+ might_sleep();
+ return gpio_get_value(gpio);
+}
+
+static inline void gpio_set_value_cansleep(unsigned gpio, int value)
+{
+ might_sleep();
+ gpio_set_value(gpio, value);
+}
+
+#endif /* !CONFIG_HAVE_GPIO_LIB */
+
+#ifndef CONFIG_GPIO_SYSFS
+
+/* sysfs support is only available with gpiolib, where it's optional */
+
+static inline int gpio_export(unsigned gpio, bool direction_may_change)
+{
+ return -ENOSYS;
+}
+
+static inline void gpio_unexport(unsigned gpio)
+{
+}
+#endif /* CONFIG_GPIO_SYSFS */
+
+#endif /* _ASM_GENERIC_GPIO_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/ide_iops.h b/libdde-linux26/contrib/include/asm-generic/ide_iops.h
new file mode 100644
index 00000000..1b91d068
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/ide_iops.h
@@ -0,0 +1,38 @@
+/* Generic I/O and MEMIO string operations. */
+
+#define __ide_insw insw
+#define __ide_insl insl
+#define __ide_outsw outsw
+#define __ide_outsl outsl
+
+static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
+{
+ while (count--) {
+ *(u16 *)addr = readw(port);
+ addr += 2;
+ }
+}
+
+static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
+{
+ while (count--) {
+ *(u32 *)addr = readl(port);
+ addr += 4;
+ }
+}
+
+static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count)
+{
+ while (count--) {
+ writew(*(u16 *)addr, port);
+ addr += 2;
+ }
+}
+
+static __inline__ void __ide_mm_outsl(void __iomem * port, void *addr, u32 count)
+{
+ while (count--) {
+ writel(*(u32 *)addr, port);
+ addr += 4;
+ }
+}
diff --git a/libdde-linux26/contrib/include/asm-generic/int-l64.h b/libdde-linux26/contrib/include/asm-generic/int-l64.h
new file mode 100644
index 00000000..2af9b75d
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/int-l64.h
@@ -0,0 +1,71 @@
+/*
+ * asm-generic/int-l64.h
+ *
+ * Integer declarations for architectures which use "long"
+ * for 64-bit types.
+ */
+
+#ifndef _ASM_GENERIC_INT_L64_H
+#define _ASM_GENERIC_INT_L64_H
+
+#ifndef __ASSEMBLY__
+/*
+ * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+ * header files exported to user space
+ */
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+typedef __signed__ long __s64;
+typedef unsigned long __u64;
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+typedef signed char s8;
+typedef unsigned char u8;
+
+typedef signed short s16;
+typedef unsigned short u16;
+
+typedef signed int s32;
+typedef unsigned int u32;
+
+typedef signed long s64;
+typedef unsigned long u64;
+
+#define S8_C(x) x
+#define U8_C(x) x ## U
+#define S16_C(x) x
+#define U16_C(x) x ## U
+#define S32_C(x) x
+#define U32_C(x) x ## U
+#define S64_C(x) x ## L
+#define U64_C(x) x ## UL
+
+#else /* __ASSEMBLY__ */
+
+#define S8_C(x) x
+#define U8_C(x) x
+#define S16_C(x) x
+#define U16_C(x) x
+#define S32_C(x) x
+#define U32_C(x) x
+#define S64_C(x) x
+#define U64_C(x) x
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_GENERIC_INT_L64_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/int-ll64.h b/libdde-linux26/contrib/include/asm-generic/int-ll64.h
new file mode 100644
index 00000000..f9bc9ac2
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/int-ll64.h
@@ -0,0 +1,76 @@
+/*
+ * asm-generic/int-ll64.h
+ *
+ * Integer declarations for architectures which use "long long"
+ * for 64-bit types.
+ */
+
+#ifndef _ASM_GENERIC_INT_LL64_H
+#define _ASM_GENERIC_INT_LL64_H
+
+#ifndef __ASSEMBLY__
+/*
+ * __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
+ * header files exported to user space
+ */
+
+typedef __signed__ char __s8;
+typedef unsigned char __u8;
+
+typedef __signed__ short __s16;
+typedef unsigned short __u16;
+
+typedef __signed__ int __s32;
+typedef unsigned int __u32;
+
+#ifdef __GNUC__
+__extension__ typedef __signed__ long long __s64;
+__extension__ typedef unsigned long long __u64;
+#else
+typedef __signed__ long long __s64;
+typedef unsigned long long __u64;
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef __KERNEL__
+
+#ifndef __ASSEMBLY__
+
+typedef signed char s8;
+typedef unsigned char u8;
+
+typedef signed short s16;
+typedef unsigned short u16;
+
+typedef signed int s32;
+typedef unsigned int u32;
+
+typedef signed long long s64;
+typedef unsigned long long u64;
+
+#define S8_C(x) x
+#define U8_C(x) x ## U
+#define S16_C(x) x
+#define U16_C(x) x ## U
+#define S32_C(x) x
+#define U32_C(x) x ## U
+#define S64_C(x) x ## LL
+#define U64_C(x) x ## ULL
+
+#else /* __ASSEMBLY__ */
+
+#define S8_C(x) x
+#define U8_C(x) x
+#define S16_C(x) x
+#define U16_C(x) x
+#define S32_C(x) x
+#define U32_C(x) x
+#define S64_C(x) x
+#define U64_C(x) x
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_GENERIC_INT_LL64_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/ioctl.h b/libdde-linux26/contrib/include/asm-generic/ioctl.h
new file mode 100644
index 00000000..15828b2d
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/ioctl.h
@@ -0,0 +1,105 @@
+#ifndef _ASM_GENERIC_IOCTL_H
+#define _ASM_GENERIC_IOCTL_H
+
+/* ioctl command encoding: 32 bits total, command in lower 16 bits,
+ * size of the parameter structure in the lower 14 bits of the
+ * upper 16 bits.
+ * Encoding the size of the parameter structure in the ioctl request
+ * is useful for catching programs compiled with old versions
+ * and to avoid overwriting user space outside the user buffer area.
+ * The highest 2 bits are reserved for indicating the ``access mode''.
+ * NOTE: This limits the max parameter size to 16kB -1 !
+ */
+
+/*
+ * The following is for compatibility across the various Linux
+ * platforms. The generic ioctl numbering scheme doesn't really enforce
+ * a type field. De facto, however, the top 8 bits of the lower 16
+ * bits are indeed used as a type field, so we might just as well make
+ * this explicit here. Please be sure to use the decoding macros
+ * below from now on.
+ */
+#define _IOC_NRBITS 8
+#define _IOC_TYPEBITS 8
+
+/*
+ * Let any architecture override either of the following before
+ * including this file.
+ */
+
+#ifndef _IOC_SIZEBITS
+# define _IOC_SIZEBITS 14
+#endif
+
+#ifndef _IOC_DIRBITS
+# define _IOC_DIRBITS 2
+#endif
+
+#define _IOC_NRMASK ((1 << _IOC_NRBITS)-1)
+#define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1)
+#define _IOC_SIZEMASK ((1 << _IOC_SIZEBITS)-1)
+#define _IOC_DIRMASK ((1 << _IOC_DIRBITS)-1)
+
+#define _IOC_NRSHIFT 0
+#define _IOC_TYPESHIFT (_IOC_NRSHIFT+_IOC_NRBITS)
+#define _IOC_SIZESHIFT (_IOC_TYPESHIFT+_IOC_TYPEBITS)
+#define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS)
+
+/*
+ * Direction bits, which any architecture can choose to override
+ * before including this file.
+ */
+
+#ifndef _IOC_NONE
+# define _IOC_NONE 0U
+#endif
+
+#ifndef _IOC_WRITE
+# define _IOC_WRITE 1U
+#endif
+
+#ifndef _IOC_READ
+# define _IOC_READ 2U
+#endif
+
+#define _IOC(dir,type,nr,size) \
+ (((dir) << _IOC_DIRSHIFT) | \
+ ((type) << _IOC_TYPESHIFT) | \
+ ((nr) << _IOC_NRSHIFT) | \
+ ((size) << _IOC_SIZESHIFT))
+
+#ifdef __KERNEL__
+/* provoke compile error for invalid uses of size argument */
+extern unsigned int __invalid_size_argument_for_IOC;
+#define _IOC_TYPECHECK(t) \
+ ((sizeof(t) == sizeof(t[1]) && \
+ sizeof(t) < (1 << _IOC_SIZEBITS)) ? \
+ sizeof(t) : __invalid_size_argument_for_IOC)
+#else
+#define _IOC_TYPECHECK(t) (sizeof(t))
+#endif
+
+/* used to create numbers */
+#define _IO(type,nr) _IOC(_IOC_NONE,(type),(nr),0)
+#define _IOR(type,nr,size) _IOC(_IOC_READ,(type),(nr),(_IOC_TYPECHECK(size)))
+#define _IOW(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
+#define _IOWR(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),(_IOC_TYPECHECK(size)))
+#define _IOR_BAD(type,nr,size) _IOC(_IOC_READ,(type),(nr),sizeof(size))
+#define _IOW_BAD(type,nr,size) _IOC(_IOC_WRITE,(type),(nr),sizeof(size))
+#define _IOWR_BAD(type,nr,size) _IOC(_IOC_READ|_IOC_WRITE,(type),(nr),sizeof(size))
+
+/* used to decode ioctl numbers.. */
+#define _IOC_DIR(nr) (((nr) >> _IOC_DIRSHIFT) & _IOC_DIRMASK)
+#define _IOC_TYPE(nr) (((nr) >> _IOC_TYPESHIFT) & _IOC_TYPEMASK)
+#define _IOC_NR(nr) (((nr) >> _IOC_NRSHIFT) & _IOC_NRMASK)
+#define _IOC_SIZE(nr) (((nr) >> _IOC_SIZESHIFT) & _IOC_SIZEMASK)
+
+/* ...and for the drivers/sound files... */
+
+#define IOC_IN (_IOC_WRITE << _IOC_DIRSHIFT)
+#define IOC_OUT (_IOC_READ << _IOC_DIRSHIFT)
+#define IOC_INOUT ((_IOC_WRITE|_IOC_READ) << _IOC_DIRSHIFT)
+#define IOCSIZE_MASK (_IOC_SIZEMASK << _IOC_SIZESHIFT)
+#define IOCSIZE_SHIFT (_IOC_SIZESHIFT)
+
+#endif /* _ASM_GENERIC_IOCTL_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/iomap.h b/libdde-linux26/contrib/include/asm-generic/iomap.h
new file mode 100644
index 00000000..76b0cc56
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/iomap.h
@@ -0,0 +1,72 @@
+#ifndef __GENERIC_IO_H
+#define __GENERIC_IO_H
+
+#include <linux/linkage.h>
+#include <asm/byteorder.h>
+
+/*
+ * These are the "generic" interfaces for doing new-style
+ * memory-mapped or PIO accesses. Architectures may do
+ * their own arch-optimized versions, these just act as
+ * wrappers around the old-style IO register access functions:
+ * read[bwl]/write[bwl]/in[bwl]/out[bwl]
+ *
+ * Don't include this directly, include it from <asm/io.h>.
+ */
+
+/*
+ * Read/write from/to an (offsettable) iomem cookie. It might be a PIO
+ * access or a MMIO access, these functions don't care. The info is
+ * encoded in the hardware mapping set up by the mapping functions
+ * (or the cookie itself, depending on implementation and hw).
+ *
+ * The generic routines just encode the PIO/MMIO as part of the
+ * cookie, and coldly assume that the MMIO IO mappings are not
+ * in the low address range. Architectures for which this is not
+ * true can't use this generic implementation.
+ */
+extern unsigned int ioread8(void __iomem *);
+extern unsigned int ioread16(void __iomem *);
+extern unsigned int ioread16be(void __iomem *);
+extern unsigned int ioread32(void __iomem *);
+extern unsigned int ioread32be(void __iomem *);
+
+extern void iowrite8(u8, void __iomem *);
+extern void iowrite16(u16, void __iomem *);
+extern void iowrite16be(u16, void __iomem *);
+extern void iowrite32(u32, void __iomem *);
+extern void iowrite32be(u32, void __iomem *);
+
+/*
+ * "string" versions of the above. Note that they
+ * use native byte ordering for the accesses (on
+ * the assumption that IO and memory agree on a
+ * byte order, and CPU byteorder is irrelevant).
+ *
+ * They do _not_ update the port address. If you
+ * want MMIO that copies stuff laid out in MMIO
+ * memory across multiple ports, use "memcpy_toio()"
+ * and friends.
+ */
+extern void ioread8_rep(void __iomem *port, void *buf, unsigned long count);
+extern void ioread16_rep(void __iomem *port, void *buf, unsigned long count);
+extern void ioread32_rep(void __iomem *port, void *buf, unsigned long count);
+
+extern void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count);
+extern void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count);
+extern void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count);
+
+/* Create a virtual mapping cookie for an IO port range */
+extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
+extern void ioport_unmap(void __iomem *);
+
+#ifndef ARCH_HAS_IOREMAP_WC
+#define ioremap_wc ioremap_nocache
+#endif
+
+/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
+struct pci_dev;
+extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
+
+#endif
diff --git a/libdde-linux26/contrib/include/asm-generic/irq_regs.h b/libdde-linux26/contrib/include/asm-generic/irq_regs.h
new file mode 100644
index 00000000..5ae1d07d
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/irq_regs.h
@@ -0,0 +1,37 @@
+/* Fallback per-CPU frame pointer holder
+ *
+ * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _ASM_GENERIC_IRQ_REGS_H
+#define _ASM_GENERIC_IRQ_REGS_H
+
+#include <linux/percpu.h>
+
+/*
+ * Per-cpu current frame pointer - the location of the last exception frame on
+ * the stack
+ */
+DECLARE_PER_CPU(struct pt_regs *, __irq_regs);
+
+static inline struct pt_regs *get_irq_regs(void)
+{
+ return __get_cpu_var(__irq_regs);
+}
+
+static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
+{
+ struct pt_regs *old_regs, **pp_regs = &__get_cpu_var(__irq_regs);
+
+ old_regs = *pp_regs;
+ *pp_regs = new_regs;
+ return old_regs;
+}
+
+#endif /* _ASM_GENERIC_IRQ_REGS_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/kdebug.h b/libdde-linux26/contrib/include/asm-generic/kdebug.h
new file mode 100644
index 00000000..11e57b6a
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/kdebug.h
@@ -0,0 +1,9 @@
+#ifndef _ASM_GENERIC_KDEBUG_H
+#define _ASM_GENERIC_KDEBUG_H
+
+enum die_val {
+ DIE_UNUSED,
+ DIE_OOPS=1
+};
+
+#endif /* _ASM_GENERIC_KDEBUG_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/libata-portmap.h b/libdde-linux26/contrib/include/asm-generic/libata-portmap.h
new file mode 100644
index 00000000..cf14f2ff
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/libata-portmap.h
@@ -0,0 +1,7 @@
+#ifndef __ASM_GENERIC_LIBATA_PORTMAP_H
+#define __ASM_GENERIC_LIBATA_PORTMAP_H
+
+#define ATA_PRIMARY_IRQ(dev) 14
+#define ATA_SECONDARY_IRQ(dev) 15
+
+#endif
diff --git a/libdde-linux26/contrib/include/asm-generic/local.h b/libdde-linux26/contrib/include/asm-generic/local.h
new file mode 100644
index 00000000..dbd61507
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/local.h
@@ -0,0 +1,74 @@
+#ifndef _ASM_GENERIC_LOCAL_H
+#define _ASM_GENERIC_LOCAL_H
+
+#include <linux/percpu.h>
+#include <asm/atomic.h>
+#include <asm/types.h>
+
+/*
+ * A signed long type for operations which are atomic for a single CPU.
+ * Usually used in combination with per-cpu variables.
+ *
+ * This is the default implementation, which uses atomic_long_t. Which is
+ * rather pointless. The whole point behind local_t is that some processors
+ * can perform atomic adds and subtracts in a manner which is atomic wrt IRQs
+ * running on this CPU. local_t allows exploitation of such capabilities.
+ */
+
+/* Implement in terms of atomics. */
+
+/* Don't use typedef: don't want them to be mixed with atomic_t's. */
+typedef struct
+{
+ atomic_long_t a;
+} local_t;
+
+#define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
+
+#define local_read(l) atomic_long_read(&(l)->a)
+#define local_set(l,i) atomic_long_set((&(l)->a),(i))
+#define local_inc(l) atomic_long_inc(&(l)->a)
+#define local_dec(l) atomic_long_dec(&(l)->a)
+#define local_add(i,l) atomic_long_add((i),(&(l)->a))
+#define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
+
+#define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
+#define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
+#define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
+#define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
+#define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
+#define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
+#define local_inc_return(l) atomic_long_inc_return(&(l)->a)
+
+#define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
+#define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
+#define local_add_unless(l, a, u) atomic_long_add_unless((&(l)->a), (a), (u))
+#define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
+
+/* Non-atomic variants, ie. preemption disabled and won't be touched
+ * in interrupt, etc. Some archs can optimize this case well. */
+#define __local_inc(l) local_set((l), local_read(l) + 1)
+#define __local_dec(l) local_set((l), local_read(l) - 1)
+#define __local_add(i,l) local_set((l), local_read(l) + (i))
+#define __local_sub(i,l) local_set((l), local_read(l) - (i))
+
+/* Use these for per-cpu local_t variables: on some archs they are
+ * much more efficient than these naive implementations. Note they take
+ * a variable (eg. mystruct.foo), not an address.
+ */
+#define cpu_local_read(l) local_read(&__get_cpu_var(l))
+#define cpu_local_set(l, i) local_set(&__get_cpu_var(l), (i))
+#define cpu_local_inc(l) local_inc(&__get_cpu_var(l))
+#define cpu_local_dec(l) local_dec(&__get_cpu_var(l))
+#define cpu_local_add(i, l) local_add((i), &__get_cpu_var(l))
+#define cpu_local_sub(i, l) local_sub((i), &__get_cpu_var(l))
+
+/* Non-atomic increments, ie. preemption disabled and won't be touched
+ * in interrupt, etc. Some archs can optimize this case well.
+ */
+#define __cpu_local_inc(l) __local_inc(&__get_cpu_var(l))
+#define __cpu_local_dec(l) __local_dec(&__get_cpu_var(l))
+#define __cpu_local_add(i, l) __local_add((i), &__get_cpu_var(l))
+#define __cpu_local_sub(i, l) __local_sub((i), &__get_cpu_var(l))
+
+#endif /* _ASM_GENERIC_LOCAL_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/memory_model.h b/libdde-linux26/contrib/include/asm-generic/memory_model.h
new file mode 100644
index 00000000..4c8d0afa
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/memory_model.h
@@ -0,0 +1,77 @@
+#ifndef __ASM_MEMORY_MODEL_H
+#define __ASM_MEMORY_MODEL_H
+
+#ifndef __ASSEMBLY__
+
+#if defined(CONFIG_FLATMEM)
+
+#ifndef ARCH_PFN_OFFSET
+#define ARCH_PFN_OFFSET (0UL)
+#endif
+
+#elif defined(CONFIG_DISCONTIGMEM)
+
+#ifndef arch_pfn_to_nid
+#define arch_pfn_to_nid(pfn) pfn_to_nid(pfn)
+#endif
+
+#ifndef arch_local_page_offset
+#define arch_local_page_offset(pfn, nid) \
+ ((pfn) - NODE_DATA(nid)->node_start_pfn)
+#endif
+
+#endif /* CONFIG_DISCONTIGMEM */
+
+/*
+ * supports 3 memory models.
+ */
+#if defined(CONFIG_FLATMEM)
+
+#define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET))
+#define __page_to_pfn(page) ((unsigned long)((page) - mem_map) + \
+ ARCH_PFN_OFFSET)
+#elif defined(CONFIG_DISCONTIGMEM)
+
+#define __pfn_to_page(pfn) \
+({ unsigned long __pfn = (pfn); \
+ unsigned long __nid = arch_pfn_to_nid(__pfn); \
+ NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
+})
+
+#define __page_to_pfn(pg) \
+({ struct page *__pg = (pg); \
+ struct pglist_data *__pgdat = NODE_DATA(page_to_nid(__pg)); \
+ (unsigned long)(__pg - __pgdat->node_mem_map) + \
+ __pgdat->node_start_pfn; \
+})
+
+#elif defined(CONFIG_SPARSEMEM_VMEMMAP)
+
+/* memmap is virtually contigious. */
+#define __pfn_to_page(pfn) (vmemmap + (pfn))
+#define __page_to_pfn(page) (unsigned long)((page) - vmemmap)
+
+#elif defined(CONFIG_SPARSEMEM)
+/*
+ * Note: section's mem_map is encorded to reflect its start_pfn.
+ * section[i].section_mem_map == mem_map's address - start_pfn;
+ */
+#define __page_to_pfn(pg) \
+({ struct page *__pg = (pg); \
+ int __sec = page_to_section(__pg); \
+ (unsigned long)(__pg - __section_mem_map_addr(__nr_to_section(__sec))); \
+})
+
+#define __pfn_to_page(pfn) \
+({ unsigned long __pfn = (pfn); \
+ struct mem_section *__sec = __pfn_to_section(__pfn); \
+ __section_mem_map_addr(__sec) + __pfn; \
+})
+#endif /* CONFIG_FLATMEM/DISCONTIGMEM/SPARSEMEM */
+
+#define page_to_pfn __page_to_pfn
+#define pfn_to_page __pfn_to_page
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/libdde-linux26/contrib/include/asm-generic/mm_hooks.h b/libdde-linux26/contrib/include/asm-generic/mm_hooks.h
new file mode 100644
index 00000000..67dea812
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/mm_hooks.h
@@ -0,0 +1,18 @@
+/*
+ * Define generic no-op hooks for arch_dup_mmap and arch_exit_mmap, to
+ * be included in asm-FOO/mmu_context.h for any arch FOO which doesn't
+ * need to hook these.
+ */
+#ifndef _ASM_GENERIC_MM_HOOKS_H
+#define _ASM_GENERIC_MM_HOOKS_H
+
+static inline void arch_dup_mmap(struct mm_struct *oldmm,
+ struct mm_struct *mm)
+{
+}
+
+static inline void arch_exit_mmap(struct mm_struct *mm)
+{
+}
+
+#endif /* _ASM_GENERIC_MM_HOOKS_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/mman.h b/libdde-linux26/contrib/include/asm-generic/mman.h
new file mode 100644
index 00000000..5e3dde2e
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/mman.h
@@ -0,0 +1,41 @@
+#ifndef _ASM_GENERIC_MMAN_H
+#define _ASM_GENERIC_MMAN_H
+
+/*
+ Author: Michael S. Tsirkin <mst@mellanox.co.il>, Mellanox Technologies Ltd.
+ Based on: asm-xxx/mman.h
+*/
+
+#define PROT_READ 0x1 /* page can be read */
+#define PROT_WRITE 0x2 /* page can be written */
+#define PROT_EXEC 0x4 /* page can be executed */
+#define PROT_SEM 0x8 /* page may be used for atomic ops */
+#define PROT_NONE 0x0 /* page can not be accessed */
+#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
+#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
+
+#define MAP_SHARED 0x01 /* Share changes */
+#define MAP_PRIVATE 0x02 /* Changes are private */
+#define MAP_TYPE 0x0f /* Mask for type of mapping */
+#define MAP_FIXED 0x10 /* Interpret addr exactly */
+#define MAP_ANONYMOUS 0x20 /* don't use a file */
+
+#define MS_ASYNC 1 /* sync memory asynchronously */
+#define MS_INVALIDATE 2 /* invalidate the caches */
+#define MS_SYNC 4 /* synchronous memory sync */
+
+#define MADV_NORMAL 0 /* no further special treatment */
+#define MADV_RANDOM 1 /* expect random page references */
+#define MADV_SEQUENTIAL 2 /* expect sequential page references */
+#define MADV_WILLNEED 3 /* will need these pages */
+#define MADV_DONTNEED 4 /* don't need these pages */
+
+/* common parameters: try to keep these consistent across architectures */
+#define MADV_REMOVE 9 /* remove these pages & resources */
+#define MADV_DONTFORK 10 /* don't inherit across fork */
+#define MADV_DOFORK 11 /* do inherit across fork */
+
+/* compatibility flags */
+#define MAP_FILE 0
+
+#endif
diff --git a/libdde-linux26/contrib/include/asm-generic/mutex-dec.h b/libdde-linux26/contrib/include/asm-generic/mutex-dec.h
new file mode 100644
index 00000000..f104af7c
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/mutex-dec.h
@@ -0,0 +1,90 @@
+/*
+ * include/asm-generic/mutex-dec.h
+ *
+ * Generic implementation of the mutex fastpath, based on atomic
+ * decrement/increment.
+ */
+#ifndef _ASM_GENERIC_MUTEX_DEC_H
+#define _ASM_GENERIC_MUTEX_DEC_H
+
+/**
+ * __mutex_fastpath_lock - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+ * it wasn't 1 originally. This function MUST leave the value lower than
+ * 1 even when the "1" assertion wasn't true.
+ */
+static inline void
+__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
+{
+ if (unlikely(atomic_dec_return(count) < 0))
+ fail_fn(count);
+}
+
+/**
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if
+ * it wasn't 1 originally. This function returns 0 if the fastpath succeeds,
+ * or anything the slow path function returns.
+ */
+static inline int
+__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ if (unlikely(atomic_dec_return(count) < 0))
+ return fail_fn(count);
+ return 0;
+}
+
+/**
+ * __mutex_fastpath_unlock - try to promote the count from 0 to 1
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 0
+ *
+ * Try to promote the count from 0 to 1. If it wasn't 0, call <fail_fn>.
+ * In the failure case, this function is allowed to either set the value to
+ * 1, or to set it to a value lower than 1.
+ *
+ * If the implementation sets it to a value of lower than 1, then the
+ * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
+ * to return 0 otherwise.
+ */
+static inline void
+__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
+{
+ if (unlikely(atomic_inc_return(count) <= 0))
+ fail_fn(count);
+}
+
+#define __mutex_slowpath_needs_to_unlock() 1
+
+/**
+ * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
+ *
+ * @count: pointer of type atomic_t
+ * @fail_fn: fallback function
+ *
+ * Change the count from 1 to a value lower than 1, and return 0 (failure)
+ * if it wasn't 1 originally, or return 1 (success) otherwise. This function
+ * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
+ * Additionally, if the value was < 0 originally, this function must not leave
+ * it to 0 on failure.
+ *
+ * If the architecture has no effective trylock variant, it should call the
+ * <fail_fn> spinlock-based trylock variant unconditionally.
+ */
+static inline int
+__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ if (likely(atomic_cmpxchg(count, 1, 0) == 1))
+ return 1;
+ return 0;
+}
+
+#endif
diff --git a/libdde-linux26/contrib/include/asm-generic/mutex-null.h b/libdde-linux26/contrib/include/asm-generic/mutex-null.h
new file mode 100644
index 00000000..e1bbbc72
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/mutex-null.h
@@ -0,0 +1,19 @@
+/*
+ * include/asm-generic/mutex-null.h
+ *
+ * Generic implementation of the mutex fastpath, based on NOP :-)
+ *
+ * This is used by the mutex-debugging infrastructure, but it can also
+ * be used by architectures that (for whatever reason) want to use the
+ * spinlock based slowpath.
+ */
+#ifndef _ASM_GENERIC_MUTEX_NULL_H
+#define _ASM_GENERIC_MUTEX_NULL_H
+
+#define __mutex_fastpath_lock(count, fail_fn) fail_fn(count)
+#define __mutex_fastpath_lock_retval(count, fail_fn) fail_fn(count)
+#define __mutex_fastpath_unlock(count, fail_fn) fail_fn(count)
+#define __mutex_fastpath_trylock(count, fail_fn) fail_fn(count)
+#define __mutex_slowpath_needs_to_unlock() 1
+
+#endif
diff --git a/libdde-linux26/contrib/include/asm-generic/mutex-xchg.h b/libdde-linux26/contrib/include/asm-generic/mutex-xchg.h
new file mode 100644
index 00000000..580a6d35
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/mutex-xchg.h
@@ -0,0 +1,111 @@
+/*
+ * include/asm-generic/mutex-xchg.h
+ *
+ * Generic implementation of the mutex fastpath, based on xchg().
+ *
+ * NOTE: An xchg based implementation might be less optimal than an atomic
+ * decrement/increment based implementation. If your architecture
+ * has a reasonable atomic dec/inc then you should probably use
+ * asm-generic/mutex-dec.h instead, or you could open-code an
+ * optimized version in asm/mutex.h.
+ */
+#ifndef _ASM_GENERIC_MUTEX_XCHG_H
+#define _ASM_GENERIC_MUTEX_XCHG_H
+
+/**
+ * __mutex_fastpath_lock - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
+ * wasn't 1 originally. This function MUST leave the value lower than 1
+ * even when the "1" assertion wasn't true.
+ */
+static inline void
+__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
+{
+ if (unlikely(atomic_xchg(count, 0) != 1))
+ fail_fn(count);
+}
+
+/**
+ * __mutex_fastpath_lock_retval - try to take the lock by moving the count
+ * from 1 to a 0 value
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 1
+ *
+ * Change the count from 1 to a value lower than 1, and call <fail_fn> if it
+ * wasn't 1 originally. This function returns 0 if the fastpath succeeds,
+ * or anything the slow path function returns
+ */
+static inline int
+__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ if (unlikely(atomic_xchg(count, 0) != 1))
+ return fail_fn(count);
+ return 0;
+}
+
+/**
+ * __mutex_fastpath_unlock - try to promote the mutex from 0 to 1
+ * @count: pointer of type atomic_t
+ * @fail_fn: function to call if the original value was not 0
+ *
+ * try to promote the mutex from 0 to 1. if it wasn't 0, call <function>
+ * In the failure case, this function is allowed to either set the value to
+ * 1, or to set it to a value lower than one.
+ * If the implementation sets it to a value of lower than one, the
+ * __mutex_slowpath_needs_to_unlock() macro needs to return 1, it needs
+ * to return 0 otherwise.
+ */
+static inline void
+__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
+{
+ if (unlikely(atomic_xchg(count, 1) != 0))
+ fail_fn(count);
+}
+
+#define __mutex_slowpath_needs_to_unlock() 0
+
+/**
+ * __mutex_fastpath_trylock - try to acquire the mutex, without waiting
+ *
+ * @count: pointer of type atomic_t
+ * @fail_fn: spinlock based trylock implementation
+ *
+ * Change the count from 1 to a value lower than 1, and return 0 (failure)
+ * if it wasn't 1 originally, or return 1 (success) otherwise. This function
+ * MUST leave the value lower than 1 even when the "1" assertion wasn't true.
+ * Additionally, if the value was < 0 originally, this function must not leave
+ * it to 0 on failure.
+ *
+ * If the architecture has no effective trylock variant, it should call the
+ * <fail_fn> spinlock-based trylock variant unconditionally.
+ */
+static inline int
+__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+{
+ int prev = atomic_xchg(count, 0);
+
+ if (unlikely(prev < 0)) {
+ /*
+ * The lock was marked contended so we must restore that
+ * state. If while doing so we get back a prev value of 1
+ * then we just own it.
+ *
+ * [ In the rare case of the mutex going to 1, to 0, to -1
+ * and then back to 0 in this few-instructions window,
+ * this has the potential to trigger the slowpath for the
+ * owner's unlock path needlessly, but that's not a problem
+ * in practice. ]
+ */
+ prev = atomic_xchg(count, prev);
+ if (prev < 0)
+ prev = 0;
+ }
+
+ return prev;
+}
+
+#endif
diff --git a/libdde-linux26/contrib/include/asm-generic/page.h b/libdde-linux26/contrib/include/asm-generic/page.h
new file mode 100644
index 00000000..14db733b
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/page.h
@@ -0,0 +1,24 @@
+#ifndef _ASM_GENERIC_PAGE_H
+#define _ASM_GENERIC_PAGE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/compiler.h>
+
+/* Pure 2^n version of get_order */
+static __inline__ __attribute_const__ int get_order(unsigned long size)
+{
+ int order;
+
+ size = (size - 1) >> (PAGE_SHIFT - 1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_GENERIC_PAGE_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/pci-dma-compat.h b/libdde-linux26/contrib/include/asm-generic/pci-dma-compat.h
new file mode 100644
index 00000000..37b37062
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/pci-dma-compat.h
@@ -0,0 +1,107 @@
+/* include this file if the platform implements the dma_ DMA Mapping API
+ * and wants to provide the pci_ DMA Mapping API in terms of it */
+
+#ifndef _ASM_GENERIC_PCI_DMA_COMPAT_H
+#define _ASM_GENERIC_PCI_DMA_COMPAT_H
+
+#include <linux/dma-mapping.h>
+
+/* note pci_set_dma_mask isn't here, since it's a public function
+ * exported from drivers/pci, use dma_supported instead */
+
+static inline int
+pci_dma_supported(struct pci_dev *hwdev, u64 mask)
+{
+ return dma_supported(hwdev == NULL ? NULL : &hwdev->dev, mask);
+}
+
+static inline void *
+pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
+ dma_addr_t *dma_handle)
+{
+ return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC);
+}
+
+static inline void
+pci_free_consistent(struct pci_dev *hwdev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle);
+}
+
+static inline dma_addr_t
+pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
+{
+ return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction);
+}
+
+static inline void
+pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
+ size_t size, int direction)
+{
+ dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction);
+}
+
+static inline dma_addr_t
+pci_map_page(struct pci_dev *hwdev, struct page *page,
+ unsigned long offset, size_t size, int direction)
+{
+ return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction);
+}
+
+static inline void
+pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
+ size_t size, int direction)
+{
+ dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction);
+}
+
+static inline int
+pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nents, int direction)
+{
+ return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
+}
+
+static inline void
+pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nents, int direction)
+{
+ dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction);
+}
+
+static inline void
+pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+ dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
+}
+
+static inline void
+pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
+ size_t size, int direction)
+{
+ dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction);
+}
+
+static inline void
+pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nelems, int direction)
+{
+ dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
+}
+
+static inline void
+pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
+ int nelems, int direction)
+{
+ dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction);
+}
+
+static inline int
+pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
+{
+ return dma_mapping_error(&pdev->dev, dma_addr);
+}
+
+#endif
diff --git a/libdde-linux26/contrib/include/asm-generic/pci.h b/libdde-linux26/contrib/include/asm-generic/pci.h
new file mode 100644
index 00000000..c36a77d3
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/pci.h
@@ -0,0 +1,55 @@
+/*
+ * linux/include/asm-generic/pci.h
+ *
+ * Copyright (C) 2003 Russell King
+ */
+#ifndef _ASM_GENERIC_PCI_H
+#define _ASM_GENERIC_PCI_H
+
+/**
+ * pcibios_resource_to_bus - convert resource to PCI bus address
+ * @dev: device which owns this resource
+ * @region: converted bus-centric region (start,end)
+ * @res: resource to convert
+ *
+ * Convert a resource to a PCI device bus address or bus window.
+ */
+static inline void
+pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
+ struct resource *res)
+{
+ region->start = res->start;
+ region->end = res->end;
+}
+
+static inline void
+pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
+ struct pci_bus_region *region)
+{
+ res->start = region->start;
+ res->end = region->end;
+}
+
+static inline struct resource *
+pcibios_select_root(struct pci_dev *pdev, struct resource *res)
+{
+ struct resource *root = NULL;
+
+ if (res->flags & IORESOURCE_IO)
+ root = &ioport_resource;
+ if (res->flags & IORESOURCE_MEM)
+ root = &iomem_resource;
+
+ return root;
+}
+
+#define pcibios_scan_all_fns(a, b) 0
+
+#ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+ return channel ? 15 : 14;
+}
+#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */
+
+#endif
diff --git a/libdde-linux26/contrib/include/asm-generic/percpu.h b/libdde-linux26/contrib/include/asm-generic/percpu.h
new file mode 100644
index 00000000..b0e63c67
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/percpu.h
@@ -0,0 +1,83 @@
+#ifndef _ASM_GENERIC_PERCPU_H_
+#define _ASM_GENERIC_PERCPU_H_
+#include <linux/compiler.h>
+#include <linux/threads.h>
+
+/*
+ * Determine the real variable name from the name visible in the
+ * kernel sources.
+ */
+#define per_cpu_var(var) per_cpu__##var
+
+#ifdef CONFIG_SMP
+
+/*
+ * per_cpu_offset() is the offset that has to be added to a
+ * percpu variable to get to the instance for a certain processor.
+ *
+ * Most arches use the __per_cpu_offset array for those offsets but
+ * some arches have their own ways of determining the offset (x86_64, s390).
+ */
+#ifndef __per_cpu_offset
+extern unsigned long __per_cpu_offset[NR_CPUS];
+
+#define per_cpu_offset(x) (__per_cpu_offset[x])
+#endif
+
+/*
+ * Determine the offset for the currently active processor.
+ * An arch may define __my_cpu_offset to provide a more effective
+ * means of obtaining the offset to the per cpu variables of the
+ * current processor.
+ */
+#ifndef __my_cpu_offset
+#define __my_cpu_offset per_cpu_offset(raw_smp_processor_id())
+#endif
+#ifdef CONFIG_DEBUG_PREEMPT
+#define my_cpu_offset per_cpu_offset(smp_processor_id())
+#else
+#define my_cpu_offset __my_cpu_offset
+#endif
+
+/*
+ * Add a offset to a pointer but keep the pointer as is.
+ *
+ * Only S390 provides its own means of moving the pointer.
+ */
+#ifndef SHIFT_PERCPU_PTR
+#define SHIFT_PERCPU_PTR(__p, __offset) RELOC_HIDE((__p), (__offset))
+#endif
+
+/*
+ * A percpu variable may point to a discarded regions. The following are
+ * established ways to produce a usable pointer from the percpu variable
+ * offset.
+ */
+#define per_cpu(var, cpu) \
+ (*SHIFT_PERCPU_PTR(&per_cpu_var(var), per_cpu_offset(cpu)))
+#define __get_cpu_var(var) \
+ (*SHIFT_PERCPU_PTR(&per_cpu_var(var), my_cpu_offset))
+#define __raw_get_cpu_var(var) \
+ (*SHIFT_PERCPU_PTR(&per_cpu_var(var), __my_cpu_offset))
+
+
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
+extern void setup_per_cpu_areas(void);
+#endif
+
+#else /* ! SMP */
+
+#define per_cpu(var, cpu) (*((void)(cpu), &per_cpu_var(var)))
+#define __get_cpu_var(var) per_cpu_var(var)
+#define __raw_get_cpu_var(var) per_cpu_var(var)
+
+#endif /* SMP */
+
+#ifndef PER_CPU_ATTRIBUTES
+#define PER_CPU_ATTRIBUTES
+#endif
+
+#define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \
+ __typeof__(type) per_cpu_var(name)
+
+#endif /* _ASM_GENERIC_PERCPU_H_ */
diff --git a/libdde-linux26/contrib/include/asm-generic/pgtable-nopmd.h b/libdde-linux26/contrib/include/asm-generic/pgtable-nopmd.h
new file mode 100644
index 00000000..a7cdc48e
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/pgtable-nopmd.h
@@ -0,0 +1,69 @@
+#ifndef _PGTABLE_NOPMD_H
+#define _PGTABLE_NOPMD_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm-generic/pgtable-nopud.h>
+
+struct mm_struct;
+
+#define __PAGETABLE_PMD_FOLDED
+
+/*
+ * Having the pmd type consist of a pud gets the size right, and allows
+ * us to conceptually access the pud entry that this pmd is folded into
+ * without casting.
+ */
+typedef struct { pud_t pud; } pmd_t;
+
+#define PMD_SHIFT PUD_SHIFT
+#define PTRS_PER_PMD 1
+#define PMD_SIZE (1UL << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE-1))
+
+/*
+ * The "pud_xxx()" functions here are trivial for a folded two-level
+ * setup: the pmd is never bad, and a pmd always exists (as it's folded
+ * into the pud entry)
+ */
+static inline int pud_none(pud_t pud) { return 0; }
+static inline int pud_bad(pud_t pud) { return 0; }
+static inline int pud_present(pud_t pud) { return 1; }
+static inline void pud_clear(pud_t *pud) { }
+#define pmd_ERROR(pmd) (pud_ERROR((pmd).pud))
+
+#define pud_populate(mm, pmd, pte) do { } while (0)
+
+/*
+ * (pmds are folded into puds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pud(pudptr, pudval) set_pmd((pmd_t *)(pudptr), (pmd_t) { pudval })
+
+static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address)
+{
+ return (pmd_t *)pud;
+}
+
+#define pmd_val(x) (pud_val((x).pud))
+#define __pmd(x) ((pmd_t) { __pud(x) } )
+
+#define pud_page(pud) (pmd_page((pmd_t){ pud }))
+#define pud_page_vaddr(pud) (pmd_page_vaddr((pmd_t){ pud }))
+
+/*
+ * allocating and freeing a pmd is trivial: the 1-entry pmd is
+ * inside the pud, so has no extra memory associated with it.
+ */
+#define pmd_alloc_one(mm, address) NULL
+static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
+{
+}
+#define __pmd_free_tlb(tlb, x) do { } while (0)
+
+#undef pmd_addr_end
+#define pmd_addr_end(addr, end) (end)
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _PGTABLE_NOPMD_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/pgtable-nopud.h b/libdde-linux26/contrib/include/asm-generic/pgtable-nopud.h
new file mode 100644
index 00000000..87cf449a
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/pgtable-nopud.h
@@ -0,0 +1,61 @@
+#ifndef _PGTABLE_NOPUD_H
+#define _PGTABLE_NOPUD_H
+
+#ifndef __ASSEMBLY__
+
+#define __PAGETABLE_PUD_FOLDED
+
+/*
+ * Having the pud type consist of a pgd gets the size right, and allows
+ * us to conceptually access the pgd entry that this pud is folded into
+ * without casting.
+ */
+typedef struct { pgd_t pgd; } pud_t;
+
+#define PUD_SHIFT PGDIR_SHIFT
+#define PTRS_PER_PUD 1
+#define PUD_SIZE (1UL << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE-1))
+
+/*
+ * The "pgd_xxx()" functions here are trivial for a folded two-level
+ * setup: the pud is never bad, and a pud always exists (as it's folded
+ * into the pgd entry)
+ */
+static inline int pgd_none(pgd_t pgd) { return 0; }
+static inline int pgd_bad(pgd_t pgd) { return 0; }
+static inline int pgd_present(pgd_t pgd) { return 1; }
+static inline void pgd_clear(pgd_t *pgd) { }
+#define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
+
+#define pgd_populate(mm, pgd, pud) do { } while (0)
+/*
+ * (puds are folded into pgds so this doesn't get actually called,
+ * but the define is needed for a generic inline function.)
+ */
+#define set_pgd(pgdptr, pgdval) set_pud((pud_t *)(pgdptr), (pud_t) { pgdval })
+
+static inline pud_t * pud_offset(pgd_t * pgd, unsigned long address)
+{
+ return (pud_t *)pgd;
+}
+
+#define pud_val(x) (pgd_val((x).pgd))
+#define __pud(x) ((pud_t) { __pgd(x) } )
+
+#define pgd_page(pgd) (pud_page((pud_t){ pgd }))
+#define pgd_page_vaddr(pgd) (pud_page_vaddr((pud_t){ pgd }))
+
+/*
+ * allocating and freeing a pud is trivial: the 1-entry pud is
+ * inside the pgd, so has no extra memory associated with it.
+ */
+#define pud_alloc_one(mm, address) NULL
+#define pud_free(mm, x) do { } while (0)
+#define __pud_free_tlb(tlb, x) do { } while (0)
+
+#undef pud_addr_end
+#define pud_addr_end(addr, end) (end)
+
+#endif /* __ASSEMBLY__ */
+#endif /* _PGTABLE_NOPUD_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/pgtable.h b/libdde-linux26/contrib/include/asm-generic/pgtable.h
new file mode 100644
index 00000000..8e6d0ca7
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/pgtable.h
@@ -0,0 +1,344 @@
+#ifndef _ASM_GENERIC_PGTABLE_H
+#define _ASM_GENERIC_PGTABLE_H
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_MMU
+
+#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+/*
+ * Largely same as above, but only sets the access flags (dirty,
+ * accessed, and writable). Furthermore, we know it always gets set
+ * to a "more permissive" setting, which allows most architectures
+ * to optimize this. We return whether the PTE actually changed, which
+ * in turn instructs the caller to do things like update__mmu_cache.
+ * This used to be done in the caller, but sparc needs minor faults to
+ * force that call on sun4c so we changed this macro slightly
+ */
+#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
+({ \
+ int __changed = !pte_same(*(__ptep), __entry); \
+ if (__changed) { \
+ set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
+ flush_tlb_page(__vma, __address); \
+ } \
+ __changed; \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+#define ptep_test_and_clear_young(__vma, __address, __ptep) \
+({ \
+ pte_t __pte = *(__ptep); \
+ int r = 1; \
+ if (!pte_young(__pte)) \
+ r = 0; \
+ else \
+ set_pte_at((__vma)->vm_mm, (__address), \
+ (__ptep), pte_mkold(__pte)); \
+ r; \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+#define ptep_clear_flush_young(__vma, __address, __ptep) \
+({ \
+ int __young; \
+ __young = ptep_test_and_clear_young(__vma, __address, __ptep); \
+ if (__young) \
+ flush_tlb_page(__vma, __address); \
+ __young; \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
+#define ptep_get_and_clear(__mm, __address, __ptep) \
+({ \
+ pte_t __pte = *(__ptep); \
+ pte_clear((__mm), (__address), (__ptep)); \
+ __pte; \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
+#define ptep_get_and_clear_full(__mm, __address, __ptep, __full) \
+({ \
+ pte_t __pte; \
+ __pte = ptep_get_and_clear((__mm), (__address), (__ptep)); \
+ __pte; \
+})
+#endif
+
+/*
+ * Some architectures may be able to avoid expensive synchronization
+ * primitives when modifications are made to PTE's which are already
+ * not present, or in the process of an address space destruction.
+ */
+#ifndef __HAVE_ARCH_PTE_CLEAR_NOT_PRESENT_FULL
+#define pte_clear_not_present_full(__mm, __address, __ptep, __full) \
+do { \
+ pte_clear((__mm), (__address), (__ptep)); \
+} while (0)
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
+#define ptep_clear_flush(__vma, __address, __ptep) \
+({ \
+ pte_t __pte; \
+ __pte = ptep_get_and_clear((__vma)->vm_mm, __address, __ptep); \
+ flush_tlb_page(__vma, __address); \
+ __pte; \
+})
+#endif
+
+#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
+struct mm_struct;
+static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
+{
+ pte_t old_pte = *ptep;
+ set_pte_at(mm, address, ptep, pte_wrprotect(old_pte));
+}
+#endif
+
+#ifndef __HAVE_ARCH_PTE_SAME
+#define pte_same(A,B) (pte_val(A) == pte_val(B))
+#endif
+
+#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
+#define page_test_dirty(page) (0)
+#endif
+
+#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY
+#define page_clear_dirty(page) do { } while (0)
+#endif
+
+#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
+#define pte_maybe_dirty(pte) pte_dirty(pte)
+#else
+#define pte_maybe_dirty(pte) (1)
+#endif
+
+#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
+#define page_test_and_clear_young(page) (0)
+#endif
+
+#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
+#define pgd_offset_gate(mm, addr) pgd_offset(mm, addr)
+#endif
+
+#ifndef __HAVE_ARCH_MOVE_PTE
+#define move_pte(pte, prot, old_addr, new_addr) (pte)
+#endif
+
+#ifndef pgprot_writecombine
+#define pgprot_writecombine pgprot_noncached
+#endif
+
+/*
+ * When walking page tables, get the address of the next boundary,
+ * or the end address of the range if that comes earlier. Although no
+ * vma end wraps to 0, rounded up __boundary may wrap to 0 throughout.
+ */
+
+#define pgd_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + PGDIR_SIZE) & PGDIR_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+
+#ifndef pud_addr_end
+#define pud_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + PUD_SIZE) & PUD_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+#endif
+
+#ifndef pmd_addr_end
+#define pmd_addr_end(addr, end) \
+({ unsigned long __boundary = ((addr) + PMD_SIZE) & PMD_MASK; \
+ (__boundary - 1 < (end) - 1)? __boundary: (end); \
+})
+#endif
+
+/*
+ * When walking page tables, we usually want to skip any p?d_none entries;
+ * and any p?d_bad entries - reporting the error before resetting to none.
+ * Do the tests inline, but report and clear the bad entry in mm/memory.c.
+ */
+void pgd_clear_bad(pgd_t *);
+void pud_clear_bad(pud_t *);
+void pmd_clear_bad(pmd_t *);
+
+static inline int pgd_none_or_clear_bad(pgd_t *pgd)
+{
+ if (pgd_none(*pgd))
+ return 1;
+ if (unlikely(pgd_bad(*pgd))) {
+ pgd_clear_bad(pgd);
+ return 1;
+ }
+ return 0;
+}
+
+static inline int pud_none_or_clear_bad(pud_t *pud)
+{
+ if (pud_none(*pud))
+ return 1;
+ if (unlikely(pud_bad(*pud))) {
+ pud_clear_bad(pud);
+ return 1;
+ }
+ return 0;
+}
+
+static inline int pmd_none_or_clear_bad(pmd_t *pmd)
+{
+ if (pmd_none(*pmd))
+ return 1;
+ if (unlikely(pmd_bad(*pmd))) {
+ pmd_clear_bad(pmd);
+ return 1;
+ }
+ return 0;
+}
+
+static inline pte_t __ptep_modify_prot_start(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep)
+{
+ /*
+ * Get the current pte state, but zero it out to make it
+ * non-present, preventing the hardware from asynchronously
+ * updating it.
+ */
+ return ptep_get_and_clear(mm, addr, ptep);
+}
+
+static inline void __ptep_modify_prot_commit(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ /*
+ * The pte is non-present, so there's no hardware state to
+ * preserve.
+ */
+ set_pte_at(mm, addr, ptep, pte);
+}
+
+#ifndef __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
+/*
+ * Start a pte protection read-modify-write transaction, which
+ * protects against asynchronous hardware modifications to the pte.
+ * The intention is not to prevent the hardware from making pte
+ * updates, but to prevent any updates it may make from being lost.
+ *
+ * This does not protect against other software modifications of the
+ * pte; the appropriate pte lock must be held over the transation.
+ *
+ * Note that this interface is intended to be batchable, meaning that
+ * ptep_modify_prot_commit may not actually update the pte, but merely
+ * queue the update to be done at some later time. The update must be
+ * actually committed before the pte lock is released, however.
+ */
+static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep)
+{
+ return __ptep_modify_prot_start(mm, addr, ptep);
+}
+
+/*
+ * Commit an update to a pte, leaving any hardware-controlled bits in
+ * the PTE unmodified.
+ */
+static inline void ptep_modify_prot_commit(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep, pte_t pte)
+{
+ __ptep_modify_prot_commit(mm, addr, ptep, pte);
+}
+#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
+#endif /* CONFIG_MMU */
+
+/*
+ * A facility to provide lazy MMU batching. This allows PTE updates and
+ * page invalidations to be delayed until a call to leave lazy MMU mode
+ * is issued. Some architectures may benefit from doing this, and it is
+ * beneficial for both shadow and direct mode hypervisors, which may batch
+ * the PTE updates which happen during this window. Note that using this
+ * interface requires that read hazards be removed from the code. A read
+ * hazard could result in the direct mode hypervisor case, since the actual
+ * write to the page tables may not yet have taken place, so reads though
+ * a raw PTE pointer after it has been modified are not guaranteed to be
+ * up to date. This mode can only be entered and left under the protection of
+ * the page table locks for all page tables which may be modified. In the UP
+ * case, this is required so that preemption is disabled, and in the SMP case,
+ * it must synchronize the delayed page table writes properly on other CPUs.
+ */
+#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+#define arch_enter_lazy_mmu_mode() do {} while (0)
+#define arch_leave_lazy_mmu_mode() do {} while (0)
+#define arch_flush_lazy_mmu_mode() do {} while (0)
+#endif
+
+/*
+ * A facility to provide batching of the reload of page tables with the
+ * actual context switch code for paravirtualized guests. By convention,
+ * only one of the lazy modes (CPU, MMU) should be active at any given
+ * time, entry should never be nested, and entry and exits should always
+ * be paired. This is for sanity of maintaining and reasoning about the
+ * kernel code.
+ */
+#ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE
+#define arch_enter_lazy_cpu_mode() do {} while (0)
+#define arch_leave_lazy_cpu_mode() do {} while (0)
+#define arch_flush_lazy_cpu_mode() do {} while (0)
+#endif
+
+#ifndef __HAVE_PFNMAP_TRACKING
+/*
+ * Interface that can be used by architecture code to keep track of
+ * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
+ *
+ * track_pfn_vma_new is called when a _new_ pfn mapping is being established
+ * for physical range indicated by pfn and size.
+ */
+static inline int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
+ unsigned long pfn, unsigned long size)
+{
+ return 0;
+}
+
+/*
+ * Interface that can be used by architecture code to keep track of
+ * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
+ *
+ * track_pfn_vma_copy is called when vma that is covering the pfnmap gets
+ * copied through copy_page_range().
+ */
+static inline int track_pfn_vma_copy(struct vm_area_struct *vma)
+{
+ return 0;
+}
+
+/*
+ * Interface that can be used by architecture code to keep track of
+ * memory type of pfn mappings (remap_pfn_range, vm_insert_pfn)
+ *
+ * untrack_pfn_vma is called while unmapping a pfnmap for a region.
+ * untrack can be called for a specific region indicated by pfn and size or
+ * can be for the entire vma (in which case size can be zero).
+ */
+static inline void untrack_pfn_vma(struct vm_area_struct *vma,
+ unsigned long pfn, unsigned long size)
+{
+}
+#else
+extern int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
+ unsigned long pfn, unsigned long size);
+extern int track_pfn_vma_copy(struct vm_area_struct *vma);
+extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
+ unsigned long size);
+#endif
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/poll.h b/libdde-linux26/contrib/include/asm-generic/poll.h
new file mode 100644
index 00000000..44bce836
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/poll.h
@@ -0,0 +1,37 @@
+#ifndef __ASM_GENERIC_POLL_H
+#define __ASM_GENERIC_POLL_H
+
+/* These are specified by iBCS2 */
+#define POLLIN 0x0001
+#define POLLPRI 0x0002
+#define POLLOUT 0x0004
+#define POLLERR 0x0008
+#define POLLHUP 0x0010
+#define POLLNVAL 0x0020
+
+/* The rest seem to be more-or-less nonstandard. Check them! */
+#define POLLRDNORM 0x0040
+#define POLLRDBAND 0x0080
+#ifndef POLLWRNORM
+#define POLLWRNORM 0x0100
+#endif
+#ifndef POLLWRBAND
+#define POLLWRBAND 0x0200
+#endif
+#ifndef POLLMSG
+#define POLLMSG 0x0400
+#endif
+#ifndef POLLREMOVE
+#define POLLREMOVE 0x1000
+#endif
+#ifndef POLLRDHUP
+#define POLLRDHUP 0x2000
+#endif
+
+struct pollfd {
+ int fd;
+ short events;
+ short revents;
+};
+
+#endif /* __ASM_GENERIC_POLL_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/resource.h b/libdde-linux26/contrib/include/asm-generic/resource.h
new file mode 100644
index 00000000..587566f9
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/resource.h
@@ -0,0 +1,94 @@
+#ifndef _ASM_GENERIC_RESOURCE_H
+#define _ASM_GENERIC_RESOURCE_H
+
+/*
+ * Resource limit IDs
+ *
+ * ( Compatibility detail: there are architectures that have
+ * a different rlimit ID order in the 5-9 range and want
+ * to keep that order for binary compatibility. The reasons
+ * are historic and all new rlimits are identical across all
+ * arches. If an arch has such special order for some rlimits
+ * then it defines them prior including asm-generic/resource.h. )
+ */
+
+#define RLIMIT_CPU 0 /* CPU time in sec */
+#define RLIMIT_FSIZE 1 /* Maximum filesize */
+#define RLIMIT_DATA 2 /* max data size */
+#define RLIMIT_STACK 3 /* max stack size */
+#define RLIMIT_CORE 4 /* max core file size */
+
+#ifndef RLIMIT_RSS
+# define RLIMIT_RSS 5 /* max resident set size */
+#endif
+
+#ifndef RLIMIT_NPROC
+# define RLIMIT_NPROC 6 /* max number of processes */
+#endif
+
+#ifndef RLIMIT_NOFILE
+# define RLIMIT_NOFILE 7 /* max number of open files */
+#endif
+
+#ifndef RLIMIT_MEMLOCK
+# define RLIMIT_MEMLOCK 8 /* max locked-in-memory address space */
+#endif
+
+#ifndef RLIMIT_AS
+# define RLIMIT_AS 9 /* address space limit */
+#endif
+
+#define RLIMIT_LOCKS 10 /* maximum file locks held */
+#define RLIMIT_SIGPENDING 11 /* max number of pending signals */
+#define RLIMIT_MSGQUEUE 12 /* maximum bytes in POSIX mqueues */
+#define RLIMIT_NICE 13 /* max nice prio allowed to raise to
+ 0-39 for nice level 19 .. -20 */
+#define RLIMIT_RTPRIO 14 /* maximum realtime priority */
+#define RLIMIT_RTTIME 15 /* timeout for RT tasks in us */
+#define RLIM_NLIMITS 16
+
+/*
+ * SuS says limits have to be unsigned.
+ * Which makes a ton more sense anyway.
+ *
+ * Some architectures override this (for compatibility reasons):
+ */
+#ifndef RLIM_INFINITY
+# define RLIM_INFINITY (~0UL)
+#endif
+
+/*
+ * RLIMIT_STACK default maximum - some architectures override it:
+ */
+#ifndef _STK_LIM_MAX
+# define _STK_LIM_MAX RLIM_INFINITY
+#endif
+
+#ifdef __KERNEL__
+
+/*
+ * boot-time rlimit defaults for the init task:
+ */
+#define INIT_RLIMITS \
+{ \
+ [RLIMIT_CPU] = { RLIM_INFINITY, RLIM_INFINITY }, \
+ [RLIMIT_FSIZE] = { RLIM_INFINITY, RLIM_INFINITY }, \
+ [RLIMIT_DATA] = { RLIM_INFINITY, RLIM_INFINITY }, \
+ [RLIMIT_STACK] = { _STK_LIM, _STK_LIM_MAX }, \
+ [RLIMIT_CORE] = { 0, RLIM_INFINITY }, \
+ [RLIMIT_RSS] = { RLIM_INFINITY, RLIM_INFINITY }, \
+ [RLIMIT_NPROC] = { 0, 0 }, \
+ [RLIMIT_NOFILE] = { INR_OPEN, INR_OPEN }, \
+ [RLIMIT_MEMLOCK] = { MLOCK_LIMIT, MLOCK_LIMIT }, \
+ [RLIMIT_AS] = { RLIM_INFINITY, RLIM_INFINITY }, \
+ [RLIMIT_LOCKS] = { RLIM_INFINITY, RLIM_INFINITY }, \
+ [RLIMIT_SIGPENDING] = { 0, 0 }, \
+ [RLIMIT_MSGQUEUE] = { MQ_BYTES_MAX, MQ_BYTES_MAX }, \
+ [RLIMIT_NICE] = { 0, 0 }, \
+ [RLIMIT_RTPRIO] = { 0, 0 }, \
+ [RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \
+}
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/libdde-linux26/contrib/include/asm-generic/rtc.h b/libdde-linux26/contrib/include/asm-generic/rtc.h
new file mode 100644
index 00000000..763e3b06
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/rtc.h
@@ -0,0 +1,218 @@
+/*
+ * include/asm-generic/rtc.h
+ *
+ * Author: Tom Rini <trini@mvista.com>
+ *
+ * Based on:
+ * drivers/char/rtc.c
+ *
+ * Please read the COPYING file for all license details.
+ */
+
+#ifndef __ASM_RTC_H__
+#define __ASM_RTC_H__
+
+#include <linux/mc146818rtc.h>
+#include <linux/rtc.h>
+#include <linux/bcd.h>
+#include <linux/delay.h>
+
+#define RTC_PIE 0x40 /* periodic interrupt enable */
+#define RTC_AIE 0x20 /* alarm interrupt enable */
+#define RTC_UIE 0x10 /* update-finished interrupt enable */
+
+/* some dummy definitions */
+#define RTC_BATT_BAD 0x100 /* battery bad */
+#define RTC_SQWE 0x08 /* enable square-wave output */
+#define RTC_DM_BINARY 0x04 /* all time/date values are BCD if clear */
+#define RTC_24H 0x02 /* 24 hour mode - else hours bit 7 means pm */
+#define RTC_DST_EN 0x01 /* auto switch DST - works f. USA only */
+
+/*
+ * Returns true if a clock update is in progress
+ */
+static inline unsigned char rtc_is_updating(void)
+{
+ unsigned char uip;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+ uip = (CMOS_READ(RTC_FREQ_SELECT) & RTC_UIP);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return uip;
+}
+
+static inline unsigned int __get_rtc_time(struct rtc_time *time)
+{
+ unsigned char ctrl;
+ unsigned long flags;
+
+#ifdef CONFIG_MACH_DECSTATION
+ unsigned int real_year;
+#endif
+
+ /*
+ * read RTC once any update in progress is done. The update
+ * can take just over 2ms. We wait 20ms. There is no need to
+ * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
+ * If you need to know *exactly* when a second has started, enable
+ * periodic update complete interrupts, (via ioctl) and then
+ * immediately read /dev/rtc which will block until you get the IRQ.
+ * Once the read clears, read the RTC time (again via ioctl). Easy.
+ */
+ if (rtc_is_updating())
+ mdelay(20);
+
+ /*
+ * Only the values that we read from the RTC are set. We leave
+ * tm_wday, tm_yday and tm_isdst untouched. Even though the
+ * RTC has RTC_DAY_OF_WEEK, we ignore it, as it is only updated
+ * by the RTC when initially set to a non-zero value.
+ */
+ spin_lock_irqsave(&rtc_lock, flags);
+ time->tm_sec = CMOS_READ(RTC_SECONDS);
+ time->tm_min = CMOS_READ(RTC_MINUTES);
+ time->tm_hour = CMOS_READ(RTC_HOURS);
+ time->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
+ time->tm_mon = CMOS_READ(RTC_MONTH);
+ time->tm_year = CMOS_READ(RTC_YEAR);
+#ifdef CONFIG_MACH_DECSTATION
+ real_year = CMOS_READ(RTC_DEC_YEAR);
+#endif
+ ctrl = CMOS_READ(RTC_CONTROL);
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
+ if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
+ {
+ time->tm_sec = bcd2bin(time->tm_sec);
+ time->tm_min = bcd2bin(time->tm_min);
+ time->tm_hour = bcd2bin(time->tm_hour);
+ time->tm_mday = bcd2bin(time->tm_mday);
+ time->tm_mon = bcd2bin(time->tm_mon);
+ time->tm_year = bcd2bin(time->tm_year);
+ }
+
+#ifdef CONFIG_MACH_DECSTATION
+ time->tm_year += real_year - 72;
+#endif
+
+ /*
+ * Account for differences between how the RTC uses the values
+ * and how they are defined in a struct rtc_time;
+ */
+ if (time->tm_year <= 69)
+ time->tm_year += 100;
+
+ time->tm_mon--;
+
+ return RTC_24H;
+}
+
+#ifndef get_rtc_time
+#define get_rtc_time __get_rtc_time
+#endif
+
+/* Set the current date and time in the real time clock. */
+static inline int __set_rtc_time(struct rtc_time *time)
+{
+ unsigned long flags;
+ unsigned char mon, day, hrs, min, sec;
+ unsigned char save_control, save_freq_select;
+ unsigned int yrs;
+#ifdef CONFIG_MACH_DECSTATION
+ unsigned int real_yrs, leap_yr;
+#endif
+
+ yrs = time->tm_year;
+ mon = time->tm_mon + 1; /* tm_mon starts at zero */
+ day = time->tm_mday;
+ hrs = time->tm_hour;
+ min = time->tm_min;
+ sec = time->tm_sec;
+
+ if (yrs > 255) /* They are unsigned */
+ return -EINVAL;
+
+ spin_lock_irqsave(&rtc_lock, flags);
+#ifdef CONFIG_MACH_DECSTATION
+ real_yrs = yrs;
+ leap_yr = ((!((yrs + 1900) % 4) && ((yrs + 1900) % 100)) ||
+ !((yrs + 1900) % 400));
+ yrs = 72;
+
+ /*
+ * We want to keep the year set to 73 until March
+ * for non-leap years, so that Feb, 29th is handled
+ * correctly.
+ */
+ if (!leap_yr && mon < 3) {
+ real_yrs--;
+ yrs = 73;
+ }
+#endif
+ /* These limits and adjustments are independent of
+ * whether the chip is in binary mode or not.
+ */
+ if (yrs > 169) {
+ spin_unlock_irqrestore(&rtc_lock, flags);
+ return -EINVAL;
+ }
+
+ if (yrs >= 100)
+ yrs -= 100;
+
+ if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
+ || RTC_ALWAYS_BCD) {
+ sec = bin2bcd(sec);
+ min = bin2bcd(min);
+ hrs = bin2bcd(hrs);
+ day = bin2bcd(day);
+ mon = bin2bcd(mon);
+ yrs = bin2bcd(yrs);
+ }
+
+ save_control = CMOS_READ(RTC_CONTROL);
+ CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
+ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
+ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
+
+#ifdef CONFIG_MACH_DECSTATION
+ CMOS_WRITE(real_yrs, RTC_DEC_YEAR);
+#endif
+ CMOS_WRITE(yrs, RTC_YEAR);
+ CMOS_WRITE(mon, RTC_MONTH);
+ CMOS_WRITE(day, RTC_DAY_OF_MONTH);
+ CMOS_WRITE(hrs, RTC_HOURS);
+ CMOS_WRITE(min, RTC_MINUTES);
+ CMOS_WRITE(sec, RTC_SECONDS);
+
+ CMOS_WRITE(save_control, RTC_CONTROL);
+ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
+
+ spin_unlock_irqrestore(&rtc_lock, flags);
+
+ return 0;
+}
+
+#ifndef set_rtc_time
+#define set_rtc_time __set_rtc_time
+#endif
+
+static inline unsigned int get_rtc_ss(void)
+{
+ struct rtc_time h;
+
+ __get_rtc_time(&h);
+ return h.tm_sec;
+}
+
+static inline int get_rtc_pll(struct rtc_pll_info *pll)
+{
+ return -EINVAL;
+}
+static inline int set_rtc_pll(struct rtc_pll_info *pll)
+{
+ return -EINVAL;
+}
+
+#endif /* __ASM_RTC_H__ */
diff --git a/libdde-linux26/contrib/include/asm-generic/sections.h b/libdde-linux26/contrib/include/asm-generic/sections.h
new file mode 100644
index 00000000..79a7ff92
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/sections.h
@@ -0,0 +1,23 @@
+#ifndef _ASM_GENERIC_SECTIONS_H_
+#define _ASM_GENERIC_SECTIONS_H_
+
+/* References to section boundaries */
+
+extern char _text[], _stext[], _etext[];
+extern char _data[], _sdata[], _edata[];
+extern char __bss_start[], __bss_stop[];
+extern char __init_begin[], __init_end[];
+extern char _sinittext[], _einittext[];
+extern char _end[];
+extern char __per_cpu_start[], __per_cpu_end[];
+extern char __kprobes_text_start[], __kprobes_text_end[];
+extern char __initdata_begin[], __initdata_end[];
+extern char __start_rodata[], __end_rodata[];
+
+/* function descriptor handling (if any). Override
+ * in asm/sections.h */
+#ifndef dereference_function_descriptor
+#define dereference_function_descriptor(p) (p)
+#endif
+
+#endif /* _ASM_GENERIC_SECTIONS_H_ */
diff --git a/libdde-linux26/contrib/include/asm-generic/siginfo.h b/libdde-linux26/contrib/include/asm-generic/siginfo.h
new file mode 100644
index 00000000..96957016
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/siginfo.h
@@ -0,0 +1,296 @@
+#ifndef _ASM_GENERIC_SIGINFO_H
+#define _ASM_GENERIC_SIGINFO_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+typedef union sigval {
+ int sival_int;
+ void __user *sival_ptr;
+} sigval_t;
+
+/*
+ * This is the size (including padding) of the part of the
+ * struct siginfo that is before the union.
+ */
+#ifndef __ARCH_SI_PREAMBLE_SIZE
+#define __ARCH_SI_PREAMBLE_SIZE (3 * sizeof(int))
+#endif
+
+#define SI_MAX_SIZE 128
+#ifndef SI_PAD_SIZE
+#define SI_PAD_SIZE ((SI_MAX_SIZE - __ARCH_SI_PREAMBLE_SIZE) / sizeof(int))
+#endif
+
+#ifndef __ARCH_SI_UID_T
+#define __ARCH_SI_UID_T uid_t
+#endif
+
+/*
+ * The default "si_band" type is "long", as specified by POSIX.
+ * However, some architectures want to override this to "int"
+ * for historical compatibility reasons, so we allow that.
+ */
+#ifndef __ARCH_SI_BAND_T
+#define __ARCH_SI_BAND_T long
+#endif
+
+#ifndef HAVE_ARCH_SIGINFO_T
+
+typedef struct siginfo {
+ int si_signo;
+ int si_errno;
+ int si_code;
+
+ union {
+ int _pad[SI_PAD_SIZE];
+
+ /* kill() */
+ struct {
+ pid_t _pid; /* sender's pid */
+ __ARCH_SI_UID_T _uid; /* sender's uid */
+ } _kill;
+
+ /* POSIX.1b timers */
+ struct {
+ timer_t _tid; /* timer id */
+ int _overrun; /* overrun count */
+ char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
+ sigval_t _sigval; /* same as below */
+ int _sys_private; /* not to be passed to user */
+ } _timer;
+
+ /* POSIX.1b signals */
+ struct {
+ pid_t _pid; /* sender's pid */
+ __ARCH_SI_UID_T _uid; /* sender's uid */
+ sigval_t _sigval;
+ } _rt;
+
+ /* SIGCHLD */
+ struct {
+ pid_t _pid; /* which child */
+ __ARCH_SI_UID_T _uid; /* sender's uid */
+ int _status; /* exit code */
+ clock_t _utime;
+ clock_t _stime;
+ } _sigchld;
+
+ /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
+ struct {
+ void __user *_addr; /* faulting insn/memory ref. */
+#ifdef __ARCH_SI_TRAPNO
+ int _trapno; /* TRAP # which caused the signal */
+#endif
+ } _sigfault;
+
+ /* SIGPOLL */
+ struct {
+ __ARCH_SI_BAND_T _band; /* POLL_IN, POLL_OUT, POLL_MSG */
+ int _fd;
+ } _sigpoll;
+ } _sifields;
+} siginfo_t;
+
+#endif
+
+/*
+ * How these fields are to be accessed.
+ */
+#define si_pid _sifields._kill._pid
+#define si_uid _sifields._kill._uid
+#define si_tid _sifields._timer._tid
+#define si_overrun _sifields._timer._overrun
+#define si_sys_private _sifields._timer._sys_private
+#define si_status _sifields._sigchld._status
+#define si_utime _sifields._sigchld._utime
+#define si_stime _sifields._sigchld._stime
+#define si_value _sifields._rt._sigval
+#define si_int _sifields._rt._sigval.sival_int
+#define si_ptr _sifields._rt._sigval.sival_ptr
+#define si_addr _sifields._sigfault._addr
+#ifdef __ARCH_SI_TRAPNO
+#define si_trapno _sifields._sigfault._trapno
+#endif
+#define si_band _sifields._sigpoll._band
+#define si_fd _sifields._sigpoll._fd
+
+#ifdef __KERNEL__
+#define __SI_MASK 0xffff0000u
+#define __SI_KILL (0 << 16)
+#define __SI_TIMER (1 << 16)
+#define __SI_POLL (2 << 16)
+#define __SI_FAULT (3 << 16)
+#define __SI_CHLD (4 << 16)
+#define __SI_RT (5 << 16)
+#define __SI_MESGQ (6 << 16)
+#define __SI_CODE(T,N) ((T) | ((N) & 0xffff))
+#else
+#define __SI_KILL 0
+#define __SI_TIMER 0
+#define __SI_POLL 0
+#define __SI_FAULT 0
+#define __SI_CHLD 0
+#define __SI_RT 0
+#define __SI_MESGQ 0
+#define __SI_CODE(T,N) (N)
+#endif
+
+/*
+ * si_code values
+ * Digital reserves positive values for kernel-generated signals.
+ */
+#define SI_USER 0 /* sent by kill, sigsend, raise */
+#define SI_KERNEL 0x80 /* sent by the kernel from somewhere */
+#define SI_QUEUE -1 /* sent by sigqueue */
+#define SI_TIMER __SI_CODE(__SI_TIMER,-2) /* sent by timer expiration */
+#define SI_MESGQ __SI_CODE(__SI_MESGQ,-3) /* sent by real time mesq state change */
+#define SI_ASYNCIO -4 /* sent by AIO completion */
+#define SI_SIGIO -5 /* sent by queued SIGIO */
+#define SI_TKILL -6 /* sent by tkill system call */
+#define SI_DETHREAD -7 /* sent by execve() killing subsidiary threads */
+
+#define SI_FROMUSER(siptr) ((siptr)->si_code <= 0)
+#define SI_FROMKERNEL(siptr) ((siptr)->si_code > 0)
+
+/*
+ * SIGILL si_codes
+ */
+#define ILL_ILLOPC (__SI_FAULT|1) /* illegal opcode */
+#define ILL_ILLOPN (__SI_FAULT|2) /* illegal operand */
+#define ILL_ILLADR (__SI_FAULT|3) /* illegal addressing mode */
+#define ILL_ILLTRP (__SI_FAULT|4) /* illegal trap */
+#define ILL_PRVOPC (__SI_FAULT|5) /* privileged opcode */
+#define ILL_PRVREG (__SI_FAULT|6) /* privileged register */
+#define ILL_COPROC (__SI_FAULT|7) /* coprocessor error */
+#define ILL_BADSTK (__SI_FAULT|8) /* internal stack error */
+#define NSIGILL 8
+
+/*
+ * SIGFPE si_codes
+ */
+#define FPE_INTDIV (__SI_FAULT|1) /* integer divide by zero */
+#define FPE_INTOVF (__SI_FAULT|2) /* integer overflow */
+#define FPE_FLTDIV (__SI_FAULT|3) /* floating point divide by zero */
+#define FPE_FLTOVF (__SI_FAULT|4) /* floating point overflow */
+#define FPE_FLTUND (__SI_FAULT|5) /* floating point underflow */
+#define FPE_FLTRES (__SI_FAULT|6) /* floating point inexact result */
+#define FPE_FLTINV (__SI_FAULT|7) /* floating point invalid operation */
+#define FPE_FLTSUB (__SI_FAULT|8) /* subscript out of range */
+#define NSIGFPE 8
+
+/*
+ * SIGSEGV si_codes
+ */
+#define SEGV_MAPERR (__SI_FAULT|1) /* address not mapped to object */
+#define SEGV_ACCERR (__SI_FAULT|2) /* invalid permissions for mapped object */
+#define NSIGSEGV 2
+
+/*
+ * SIGBUS si_codes
+ */
+#define BUS_ADRALN (__SI_FAULT|1) /* invalid address alignment */
+#define BUS_ADRERR (__SI_FAULT|2) /* non-existant physical address */
+#define BUS_OBJERR (__SI_FAULT|3) /* object specific hardware error */
+#define NSIGBUS 3
+
+/*
+ * SIGTRAP si_codes
+ */
+#define TRAP_BRKPT (__SI_FAULT|1) /* process breakpoint */
+#define TRAP_TRACE (__SI_FAULT|2) /* process trace trap */
+#define TRAP_BRANCH (__SI_FAULT|3) /* process taken branch trap */
+#define TRAP_HWBKPT (__SI_FAULT|4) /* hardware breakpoint/watchpoint */
+#define NSIGTRAP 2
+
+/*
+ * SIGCHLD si_codes
+ */
+#define CLD_EXITED (__SI_CHLD|1) /* child has exited */
+#define CLD_KILLED (__SI_CHLD|2) /* child was killed */
+#define CLD_DUMPED (__SI_CHLD|3) /* child terminated abnormally */
+#define CLD_TRAPPED (__SI_CHLD|4) /* traced child has trapped */
+#define CLD_STOPPED (__SI_CHLD|5) /* child has stopped */
+#define CLD_CONTINUED (__SI_CHLD|6) /* stopped child has continued */
+#define NSIGCHLD 6
+
+/*
+ * SIGPOLL si_codes
+ */
+#define POLL_IN (__SI_POLL|1) /* data input available */
+#define POLL_OUT (__SI_POLL|2) /* output buffers available */
+#define POLL_MSG (__SI_POLL|3) /* input message available */
+#define POLL_ERR (__SI_POLL|4) /* i/o error */
+#define POLL_PRI (__SI_POLL|5) /* high priority input available */
+#define POLL_HUP (__SI_POLL|6) /* device disconnected */
+#define NSIGPOLL 6
+
+/*
+ * sigevent definitions
+ *
+ * It seems likely that SIGEV_THREAD will have to be handled from
+ * userspace, libpthread transmuting it to SIGEV_SIGNAL, which the
+ * thread manager then catches and does the appropriate nonsense.
+ * However, everything is written out here so as to not get lost.
+ */
+#define SIGEV_SIGNAL 0 /* notify via signal */
+#define SIGEV_NONE 1 /* other notification: meaningless */
+#define SIGEV_THREAD 2 /* deliver via thread creation */
+#define SIGEV_THREAD_ID 4 /* deliver to thread */
+
+/*
+ * This works because the alignment is ok on all current architectures
+ * but we leave open this being overridden in the future
+ */
+#ifndef __ARCH_SIGEV_PREAMBLE_SIZE
+#define __ARCH_SIGEV_PREAMBLE_SIZE (sizeof(int) * 2 + sizeof(sigval_t))
+#endif
+
+#define SIGEV_MAX_SIZE 64
+#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE - __ARCH_SIGEV_PREAMBLE_SIZE) \
+ / sizeof(int))
+
+typedef struct sigevent {
+ sigval_t sigev_value;
+ int sigev_signo;
+ int sigev_notify;
+ union {
+ int _pad[SIGEV_PAD_SIZE];
+ int _tid;
+
+ struct {
+ void (*_function)(sigval_t);
+ void *_attribute; /* really pthread_attr_t */
+ } _sigev_thread;
+ } _sigev_un;
+} sigevent_t;
+
+#define sigev_notify_function _sigev_un._sigev_thread._function
+#define sigev_notify_attributes _sigev_un._sigev_thread._attribute
+#define sigev_notify_thread_id _sigev_un._tid
+
+#ifdef __KERNEL__
+
+struct siginfo;
+void do_schedule_next_timer(struct siginfo *info);
+
+#ifndef HAVE_ARCH_COPY_SIGINFO
+
+#include <linux/string.h>
+
+static inline void copy_siginfo(struct siginfo *to, struct siginfo *from)
+{
+ if (from->si_code < 0)
+ memcpy(to, from, sizeof(*to));
+ else
+ /* _sigchld is currently the largest know union member */
+ memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld));
+}
+
+#endif
+
+extern int copy_siginfo_to_user(struct siginfo __user *to, struct siginfo *from);
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/libdde-linux26/contrib/include/asm-generic/signal.h b/libdde-linux26/contrib/include/asm-generic/signal.h
new file mode 100644
index 00000000..dae1d872
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/signal.h
@@ -0,0 +1,28 @@
+#ifndef __ASM_GENERIC_SIGNAL_H
+#define __ASM_GENERIC_SIGNAL_H
+
+#include <linux/compiler.h>
+
+#ifndef SIG_BLOCK
+#define SIG_BLOCK 0 /* for blocking signals */
+#endif
+#ifndef SIG_UNBLOCK
+#define SIG_UNBLOCK 1 /* for unblocking signals */
+#endif
+#ifndef SIG_SETMASK
+#define SIG_SETMASK 2 /* for setting the signal mask */
+#endif
+
+#ifndef __ASSEMBLY__
+typedef void __signalfn_t(int);
+typedef __signalfn_t __user *__sighandler_t;
+
+typedef void __restorefn_t(void);
+typedef __restorefn_t __user *__sigrestore_t;
+
+#define SIG_DFL ((__force __sighandler_t)0) /* default signal handling */
+#define SIG_IGN ((__force __sighandler_t)1) /* ignore signal */
+#define SIG_ERR ((__force __sighandler_t)-1) /* error return from signal */
+#endif
+
+#endif /* __ASM_GENERIC_SIGNAL_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/statfs.h b/libdde-linux26/contrib/include/asm-generic/statfs.h
new file mode 100644
index 00000000..6129d680
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/statfs.h
@@ -0,0 +1,82 @@
+#ifndef _GENERIC_STATFS_H
+#define _GENERIC_STATFS_H
+
+#ifndef __KERNEL_STRICT_NAMES
+# include <linux/types.h>
+typedef __kernel_fsid_t fsid_t;
+#endif
+
+/*
+ * Most 64-bit platforms use 'long', while most 32-bit platforms use '__u32'.
+ * Yes, they differ in signedness as well as size.
+ * Special cases can override it for themselves -- except for S390x, which
+ * is just a little too special for us. And MIPS, which I'm not touching
+ * with a 10' pole.
+ */
+#ifndef __statfs_word
+#if BITS_PER_LONG == 64
+#define __statfs_word long
+#else
+#define __statfs_word __u32
+#endif
+#endif
+
+struct statfs {
+ __statfs_word f_type;
+ __statfs_word f_bsize;
+ __statfs_word f_blocks;
+ __statfs_word f_bfree;
+ __statfs_word f_bavail;
+ __statfs_word f_files;
+ __statfs_word f_ffree;
+ __kernel_fsid_t f_fsid;
+ __statfs_word f_namelen;
+ __statfs_word f_frsize;
+ __statfs_word f_spare[5];
+};
+
+/*
+ * ARM needs to avoid the 32-bit padding at the end, for consistency
+ * between EABI and OABI
+ */
+#ifndef ARCH_PACK_STATFS64
+#define ARCH_PACK_STATFS64
+#endif
+
+struct statfs64 {
+ __statfs_word f_type;
+ __statfs_word f_bsize;
+ __u64 f_blocks;
+ __u64 f_bfree;
+ __u64 f_bavail;
+ __u64 f_files;
+ __u64 f_ffree;
+ __kernel_fsid_t f_fsid;
+ __statfs_word f_namelen;
+ __statfs_word f_frsize;
+ __statfs_word f_spare[5];
+} ARCH_PACK_STATFS64;
+
+/*
+ * IA64 and x86_64 need to avoid the 32-bit padding at the end,
+ * to be compatible with the i386 ABI
+ */
+#ifndef ARCH_PACK_COMPAT_STATFS64
+#define ARCH_PACK_COMPAT_STATFS64
+#endif
+
+struct compat_statfs64 {
+ __u32 f_type;
+ __u32 f_bsize;
+ __u64 f_blocks;
+ __u64 f_bfree;
+ __u64 f_bavail;
+ __u64 f_files;
+ __u64 f_ffree;
+ __kernel_fsid_t f_fsid;
+ __u32 f_namelen;
+ __u32 f_frsize;
+ __u32 f_spare[5];
+} ARCH_PACK_COMPAT_STATFS64;
+
+#endif
diff --git a/libdde-linux26/contrib/include/asm-generic/syscall.h b/libdde-linux26/contrib/include/asm-generic/syscall.h
new file mode 100644
index 00000000..ea8087b5
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/syscall.h
@@ -0,0 +1,141 @@
+/*
+ * Access to user system call parameters and results
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License v.2.
+ *
+ * This file is a stub providing documentation for what functions
+ * asm-ARCH/syscall.h files need to define. Most arch definitions
+ * will be simple inlines.
+ *
+ * All of these functions expect to be called with no locks,
+ * and only when the caller is sure that the task of interest
+ * cannot return to user mode while we are looking at it.
+ */
+
+#ifndef _ASM_SYSCALL_H
+#define _ASM_SYSCALL_H 1
+
+struct task_struct;
+struct pt_regs;
+
+/**
+ * syscall_get_nr - find what system call a task is executing
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ *
+ * If @task is executing a system call or is at system call
+ * tracing about to attempt one, returns the system call number.
+ * If @task is not executing a system call, i.e. it's blocked
+ * inside the kernel for a fault or signal, returns -1.
+ *
+ * It's only valid to call this when @task is known to be blocked.
+ */
+long syscall_get_nr(struct task_struct *task, struct pt_regs *regs);
+
+/**
+ * syscall_rollback - roll back registers after an aborted system call
+ * @task: task of interest, must be in system call exit tracing
+ * @regs: task_pt_regs() of @task
+ *
+ * It's only valid to call this when @task is stopped for system
+ * call exit tracing (due to TIF_SYSCALL_TRACE or TIF_SYSCALL_AUDIT),
+ * after tracehook_report_syscall_entry() returned nonzero to prevent
+ * the system call from taking place.
+ *
+ * This rolls back the register state in @regs so it's as if the
+ * system call instruction was a no-op. The registers containing
+ * the system call number and arguments are as they were before the
+ * system call instruction. This may not be the same as what the
+ * register state looked like at system call entry tracing.
+ */
+void syscall_rollback(struct task_struct *task, struct pt_regs *regs);
+
+/**
+ * syscall_get_error - check result of traced system call
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ *
+ * Returns 0 if the system call succeeded, or -ERRORCODE if it failed.
+ *
+ * It's only valid to call this when @task is stopped for tracing on exit
+ * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ */
+long syscall_get_error(struct task_struct *task, struct pt_regs *regs);
+
+/**
+ * syscall_get_return_value - get the return value of a traced system call
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ *
+ * Returns the return value of the successful system call.
+ * This value is meaningless if syscall_get_error() returned nonzero.
+ *
+ * It's only valid to call this when @task is stopped for tracing on exit
+ * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ */
+long syscall_get_return_value(struct task_struct *task, struct pt_regs *regs);
+
+/**
+ * syscall_set_return_value - change the return value of a traced system call
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ * @error: negative error code, or zero to indicate success
+ * @val: user return value if @error is zero
+ *
+ * This changes the results of the system call that user mode will see.
+ * If @error is zero, the user sees a successful system call with a
+ * return value of @val. If @error is nonzero, it's a negated errno
+ * code; the user sees a failed system call with this errno code.
+ *
+ * It's only valid to call this when @task is stopped for tracing on exit
+ * from a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ */
+void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs,
+ int error, long val);
+
+/**
+ * syscall_get_arguments - extract system call parameter values
+ * @task: task of interest, must be blocked
+ * @regs: task_pt_regs() of @task
+ * @i: argument index [0,5]
+ * @n: number of arguments; n+i must be [1,6].
+ * @args: array filled with argument values
+ *
+ * Fetches @n arguments to the system call starting with the @i'th argument
+ * (from 0 through 5). Argument @i is stored in @args[0], and so on.
+ * An arch inline version is probably optimal when @i and @n are constants.
+ *
+ * It's only valid to call this when @task is stopped for tracing on
+ * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ * It's invalid to call this with @i + @n > 6; we only support system calls
+ * taking up to 6 arguments.
+ */
+void syscall_get_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned int i, unsigned int n, unsigned long *args);
+
+/**
+ * syscall_set_arguments - change system call parameter value
+ * @task: task of interest, must be in system call entry tracing
+ * @regs: task_pt_regs() of @task
+ * @i: argument index [0,5]
+ * @n: number of arguments; n+i must be [1,6].
+ * @args: array of argument values to store
+ *
+ * Changes @n arguments to the system call starting with the @i'th argument.
+ * Argument @i gets value @args[0], and so on.
+ * An arch inline version is probably optimal when @i and @n are constants.
+ *
+ * It's only valid to call this when @task is stopped for tracing on
+ * entry to a system call, due to %TIF_SYSCALL_TRACE or %TIF_SYSCALL_AUDIT.
+ * It's invalid to call this with @i + @n > 6; we only support system calls
+ * taking up to 6 arguments.
+ */
+void syscall_set_arguments(struct task_struct *task, struct pt_regs *regs,
+ unsigned int i, unsigned int n,
+ const unsigned long *args);
+
+#endif /* _ASM_SYSCALL_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/termios.h b/libdde-linux26/contrib/include/asm-generic/termios.h
new file mode 100644
index 00000000..7d39ecc9
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/termios.h
@@ -0,0 +1,77 @@
+/* termios.h: generic termios/termio user copying/translation
+ */
+
+#ifndef _ASM_GENERIC_TERMIOS_H
+#define _ASM_GENERIC_TERMIOS_H
+
+#include <asm/uaccess.h>
+
+#ifndef __ARCH_TERMIO_GETPUT
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+static inline int user_termio_to_kernel_termios(struct ktermios *termios,
+ struct termio __user *termio)
+{
+ unsigned short tmp;
+
+ if (get_user(tmp, &termio->c_iflag) < 0)
+ goto fault;
+ termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp;
+
+ if (get_user(tmp, &termio->c_oflag) < 0)
+ goto fault;
+ termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp;
+
+ if (get_user(tmp, &termio->c_cflag) < 0)
+ goto fault;
+ termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp;
+
+ if (get_user(tmp, &termio->c_lflag) < 0)
+ goto fault;
+ termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp;
+
+ if (get_user(termios->c_line, &termio->c_line) < 0)
+ goto fault;
+
+ if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0)
+ goto fault;
+
+ return 0;
+
+ fault:
+ return -EFAULT;
+}
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+static inline int kernel_termios_to_user_termio(struct termio __user *termio,
+ struct ktermios *termios)
+{
+ if (put_user(termios->c_iflag, &termio->c_iflag) < 0 ||
+ put_user(termios->c_oflag, &termio->c_oflag) < 0 ||
+ put_user(termios->c_cflag, &termio->c_cflag) < 0 ||
+ put_user(termios->c_lflag, &termio->c_lflag) < 0 ||
+ put_user(termios->c_line, &termio->c_line) < 0 ||
+ copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0)
+ return -EFAULT;
+
+ return 0;
+}
+
+#ifndef user_termios_to_kernel_termios
+#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
+#endif
+
+#ifndef kernel_termios_to_user_termios
+#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
+#endif
+
+#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
+#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
+
+#endif /* __ARCH_TERMIO_GETPUT */
+
+#endif /* _ASM_GENERIC_TERMIOS_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/tlb.h b/libdde-linux26/contrib/include/asm-generic/tlb.h
new file mode 100644
index 00000000..f490e43a
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/tlb.h
@@ -0,0 +1,148 @@
+/* include/asm-generic/tlb.h
+ *
+ * Generic TLB shootdown code
+ *
+ * Copyright 2001 Red Hat, Inc.
+ * Based on code from mm/memory.c Copyright Linus Torvalds and others.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_GENERIC__TLB_H
+#define _ASM_GENERIC__TLB_H
+
+#include <linux/swap.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+/*
+ * For UP we don't need to worry about TLB flush
+ * and page free order so much..
+ */
+#ifdef CONFIG_SMP
+ #ifdef ARCH_FREE_PTR_NR
+ #define FREE_PTR_NR ARCH_FREE_PTR_NR
+ #else
+ #define FREE_PTE_NR 506
+ #endif
+ #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U)
+#else
+ #define FREE_PTE_NR 1
+ #define tlb_fast_mode(tlb) 1
+#endif
+
+/* struct mmu_gather is an opaque type used by the mm code for passing around
+ * any data needed by arch specific code for tlb_remove_page.
+ */
+struct mmu_gather {
+ struct mm_struct *mm;
+ unsigned int nr; /* set to ~0U means fast mode */
+ unsigned int need_flush;/* Really unmapped some ptes? */
+ unsigned int fullmm; /* non-zero means full mm flush */
+ struct page * pages[FREE_PTE_NR];
+};
+
+/* Users of the generic TLB shootdown code must declare this storage space. */
+DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
+
+/* tlb_gather_mmu
+ * Return a pointer to an initialized struct mmu_gather.
+ */
+static inline struct mmu_gather *
+tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
+{
+ struct mmu_gather *tlb = &get_cpu_var(mmu_gathers);
+
+ tlb->mm = mm;
+
+ /* Use fast mode if only one CPU is online */
+ tlb->nr = num_online_cpus() > 1 ? 0U : ~0U;
+
+ tlb->fullmm = full_mm_flush;
+
+ return tlb;
+}
+
+static inline void
+tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+{
+ if (!tlb->need_flush)
+ return;
+ tlb->need_flush = 0;
+ tlb_flush(tlb);
+ if (!tlb_fast_mode(tlb)) {
+ free_pages_and_swap_cache(tlb->pages, tlb->nr);
+ tlb->nr = 0;
+ }
+}
+
+/* tlb_finish_mmu
+ * Called at the end of the shootdown operation to free up any resources
+ * that were required.
+ */
+static inline void
+tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
+{
+ tlb_flush_mmu(tlb, start, end);
+
+ /* keep the page table cache within bounds */
+ check_pgt_cache();
+
+ put_cpu_var(mmu_gathers);
+}
+
+/* tlb_remove_page
+ * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while
+ * handling the additional races in SMP caused by other CPUs caching valid
+ * mappings in their TLBs.
+ */
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+ tlb->need_flush = 1;
+ if (tlb_fast_mode(tlb)) {
+ free_page_and_swap_cache(page);
+ return;
+ }
+ tlb->pages[tlb->nr++] = page;
+ if (tlb->nr >= FREE_PTE_NR)
+ tlb_flush_mmu(tlb, 0, 0);
+}
+
+/**
+ * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
+ *
+ * Record the fact that pte's were really umapped in ->need_flush, so we can
+ * later optimise away the tlb invalidate. This helps when userspace is
+ * unmapping already-unmapped pages, which happens quite a lot.
+ */
+#define tlb_remove_tlb_entry(tlb, ptep, address) \
+ do { \
+ tlb->need_flush = 1; \
+ __tlb_remove_tlb_entry(tlb, ptep, address); \
+ } while (0)
+
+#define pte_free_tlb(tlb, ptep) \
+ do { \
+ tlb->need_flush = 1; \
+ __pte_free_tlb(tlb, ptep); \
+ } while (0)
+
+#ifndef __ARCH_HAS_4LEVEL_HACK
+#define pud_free_tlb(tlb, pudp) \
+ do { \
+ tlb->need_flush = 1; \
+ __pud_free_tlb(tlb, pudp); \
+ } while (0)
+#endif
+
+#define pmd_free_tlb(tlb, pmdp) \
+ do { \
+ tlb->need_flush = 1; \
+ __pmd_free_tlb(tlb, pmdp); \
+ } while (0)
+
+#define tlb_migrate_finish(mm) do {} while (0)
+
+#endif /* _ASM_GENERIC__TLB_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/topology.h b/libdde-linux26/contrib/include/asm-generic/topology.h
new file mode 100644
index 00000000..0e9e2bc0
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/topology.h
@@ -0,0 +1,82 @@
+/*
+ * linux/include/asm-generic/topology.h
+ *
+ * Written by: Matthew Dobson, IBM Corporation
+ *
+ * Copyright (C) 2002, IBM Corp.
+ *
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
+ * NON INFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Send feedback to <colpatch@us.ibm.com>
+ */
+#ifndef _ASM_GENERIC_TOPOLOGY_H
+#define _ASM_GENERIC_TOPOLOGY_H
+
+#ifndef CONFIG_NUMA
+
+/* Other architectures wishing to use this simple topology API should fill
+ in the below functions as appropriate in their own <asm/topology.h> file. */
+#ifndef cpu_to_node
+#define cpu_to_node(cpu) ((void)(cpu),0)
+#endif
+#ifndef parent_node
+#define parent_node(node) ((void)(node),0)
+#endif
+#ifndef node_to_cpumask
+#define node_to_cpumask(node) ((void)node, cpu_online_map)
+#endif
+#ifndef cpumask_of_node
+#define cpumask_of_node(node) ((void)node, cpu_online_mask)
+#endif
+#ifndef node_to_first_cpu
+#define node_to_first_cpu(node) ((void)(node),0)
+#endif
+#ifndef pcibus_to_node
+#define pcibus_to_node(bus) ((void)(bus), -1)
+#endif
+
+#ifndef pcibus_to_cpumask
+#define pcibus_to_cpumask(bus) (pcibus_to_node(bus) == -1 ? \
+ CPU_MASK_ALL : \
+ node_to_cpumask(pcibus_to_node(bus)) \
+ )
+#endif
+
+#ifndef cpumask_of_pcibus
+#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
+ cpu_all_mask : \
+ cpumask_of_node(pcibus_to_node(bus)))
+#endif
+
+#endif /* CONFIG_NUMA */
+
+/*
+ * returns pointer to cpumask for specified node
+ * Deprecated: use "const struct cpumask *mask = cpumask_of_node(node)"
+ */
+#ifndef node_to_cpumask_ptr
+
+#define node_to_cpumask_ptr(v, node) \
+ cpumask_t _##v = node_to_cpumask(node); \
+ const cpumask_t *v = &_##v
+
+#define node_to_cpumask_ptr_next(v, node) \
+ _##v = node_to_cpumask(node)
+#endif
+
+#endif /* _ASM_GENERIC_TOPOLOGY_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/uaccess.h b/libdde-linux26/contrib/include/asm-generic/uaccess.h
new file mode 100644
index 00000000..549cb3a1
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/uaccess.h
@@ -0,0 +1,26 @@
+#ifndef _ASM_GENERIC_UACCESS_H_
+#define _ASM_GENERIC_UACCESS_H_
+
+/*
+ * This macro should be used instead of __get_user() when accessing
+ * values at locations that are not known to be aligned.
+ */
+#define __get_user_unaligned(x, ptr) \
+({ \
+ __typeof__ (*(ptr)) __x; \
+ __copy_from_user(&__x, (ptr), sizeof(*(ptr))) ? -EFAULT : 0; \
+ (x) = __x; \
+})
+
+
+/*
+ * This macro should be used instead of __put_user() when accessing
+ * values at locations that are not known to be aligned.
+ */
+#define __put_user_unaligned(x, ptr) \
+({ \
+ __typeof__ (*(ptr)) __x = (x); \
+ __copy_to_user((ptr), &__x, sizeof(*(ptr))) ? -EFAULT : 0; \
+})
+
+#endif /* _ASM_GENERIC_UACCESS_H */
diff --git a/libdde-linux26/contrib/include/asm-generic/vmlinux.lds.h b/libdde-linux26/contrib/include/asm-generic/vmlinux.lds.h
new file mode 100644
index 00000000..c61fab1d
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/vmlinux.lds.h
@@ -0,0 +1,441 @@
+#ifndef LOAD_OFFSET
+#define LOAD_OFFSET 0
+#endif
+
+#ifndef VMLINUX_SYMBOL
+#define VMLINUX_SYMBOL(_sym_) _sym_
+#endif
+
+/* Align . to a 8 byte boundary equals to maximum function alignment. */
+#define ALIGN_FUNCTION() . = ALIGN(8)
+
+/* The actual configuration determine if the init/exit sections
+ * are handled as text/data or they can be discarded (which
+ * often happens at runtime)
+ */
+#ifdef CONFIG_HOTPLUG
+#define DEV_KEEP(sec) *(.dev##sec)
+#define DEV_DISCARD(sec)
+#else
+#define DEV_KEEP(sec)
+#define DEV_DISCARD(sec) *(.dev##sec)
+#endif
+
+#ifdef CONFIG_HOTPLUG_CPU
+#define CPU_KEEP(sec) *(.cpu##sec)
+#define CPU_DISCARD(sec)
+#else
+#define CPU_KEEP(sec)
+#define CPU_DISCARD(sec) *(.cpu##sec)
+#endif
+
+#if defined(CONFIG_MEMORY_HOTPLUG)
+#define MEM_KEEP(sec) *(.mem##sec)
+#define MEM_DISCARD(sec)
+#else
+#define MEM_KEEP(sec)
+#define MEM_DISCARD(sec) *(.mem##sec)
+#endif
+
+#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+#define MCOUNT_REC() VMLINUX_SYMBOL(__start_mcount_loc) = .; \
+ *(__mcount_loc) \
+ VMLINUX_SYMBOL(__stop_mcount_loc) = .;
+#else
+#define MCOUNT_REC()
+#endif
+
+#ifdef CONFIG_TRACE_BRANCH_PROFILING
+#define LIKELY_PROFILE() VMLINUX_SYMBOL(__start_annotated_branch_profile) = .; \
+ *(_ftrace_annotated_branch) \
+ VMLINUX_SYMBOL(__stop_annotated_branch_profile) = .;
+#else
+#define LIKELY_PROFILE()
+#endif
+
+#ifdef CONFIG_PROFILE_ALL_BRANCHES
+#define BRANCH_PROFILE() VMLINUX_SYMBOL(__start_branch_profile) = .; \
+ *(_ftrace_branch) \
+ VMLINUX_SYMBOL(__stop_branch_profile) = .;
+#else
+#define BRANCH_PROFILE()
+#endif
+
+/* .data section */
+#define DATA_DATA \
+ *(.data) \
+ *(.data.init.refok) \
+ *(.ref.data) \
+ DEV_KEEP(init.data) \
+ DEV_KEEP(exit.data) \
+ CPU_KEEP(init.data) \
+ CPU_KEEP(exit.data) \
+ MEM_KEEP(init.data) \
+ MEM_KEEP(exit.data) \
+ . = ALIGN(8); \
+ VMLINUX_SYMBOL(__start___markers) = .; \
+ *(__markers) \
+ VMLINUX_SYMBOL(__stop___markers) = .; \
+ . = ALIGN(32); \
+ VMLINUX_SYMBOL(__start___tracepoints) = .; \
+ *(__tracepoints) \
+ VMLINUX_SYMBOL(__stop___tracepoints) = .; \
+ LIKELY_PROFILE() \
+ BRANCH_PROFILE()
+
+#define RO_DATA(align) \
+ . = ALIGN((align)); \
+ .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start_rodata) = .; \
+ *(.rodata) *(.rodata.*) \
+ *(__vermagic) /* Kernel version magic */ \
+ *(__markers_strings) /* Markers: strings */ \
+ *(__tracepoints_strings)/* Tracepoints: strings */ \
+ } \
+ \
+ .rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
+ *(.rodata1) \
+ } \
+ \
+ BUG_TABLE \
+ \
+ /* PCI quirks */ \
+ .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
+ *(.pci_fixup_early) \
+ VMLINUX_SYMBOL(__end_pci_fixups_early) = .; \
+ VMLINUX_SYMBOL(__start_pci_fixups_header) = .; \
+ *(.pci_fixup_header) \
+ VMLINUX_SYMBOL(__end_pci_fixups_header) = .; \
+ VMLINUX_SYMBOL(__start_pci_fixups_final) = .; \
+ *(.pci_fixup_final) \
+ VMLINUX_SYMBOL(__end_pci_fixups_final) = .; \
+ VMLINUX_SYMBOL(__start_pci_fixups_enable) = .; \
+ *(.pci_fixup_enable) \
+ VMLINUX_SYMBOL(__end_pci_fixups_enable) = .; \
+ VMLINUX_SYMBOL(__start_pci_fixups_resume) = .; \
+ *(.pci_fixup_resume) \
+ VMLINUX_SYMBOL(__end_pci_fixups_resume) = .; \
+ VMLINUX_SYMBOL(__start_pci_fixups_resume_early) = .; \
+ *(.pci_fixup_resume_early) \
+ VMLINUX_SYMBOL(__end_pci_fixups_resume_early) = .; \
+ VMLINUX_SYMBOL(__start_pci_fixups_suspend) = .; \
+ *(.pci_fixup_suspend) \
+ VMLINUX_SYMBOL(__end_pci_fixups_suspend) = .; \
+ } \
+ \
+ /* Built-in firmware blobs */ \
+ .builtin_fw : AT(ADDR(.builtin_fw) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start_builtin_fw) = .; \
+ *(.builtin_fw) \
+ VMLINUX_SYMBOL(__end_builtin_fw) = .; \
+ } \
+ \
+ /* RapidIO route ops */ \
+ .rio_route : AT(ADDR(.rio_route) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start_rio_route_ops) = .; \
+ *(.rio_route_ops) \
+ VMLINUX_SYMBOL(__end_rio_route_ops) = .; \
+ } \
+ \
+ TRACEDATA \
+ \
+ /* Kernel symbol table: Normal symbols */ \
+ __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___ksymtab) = .; \
+ *(__ksymtab) \
+ VMLINUX_SYMBOL(__stop___ksymtab) = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-only symbols */ \
+ __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
+ *(__ksymtab_gpl) \
+ VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
+ } \
+ \
+ /* Kernel symbol table: Normal unused symbols */ \
+ __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
+ *(__ksymtab_unused) \
+ VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-only unused symbols */ \
+ __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
+ *(__ksymtab_unused_gpl) \
+ VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-future-only symbols */ \
+ __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
+ *(__ksymtab_gpl_future) \
+ VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
+ } \
+ \
+ /* Kernel symbol table: Normal symbols */ \
+ __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___kcrctab) = .; \
+ *(__kcrctab) \
+ VMLINUX_SYMBOL(__stop___kcrctab) = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-only symbols */ \
+ __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
+ *(__kcrctab_gpl) \
+ VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
+ } \
+ \
+ /* Kernel symbol table: Normal unused symbols */ \
+ __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
+ *(__kcrctab_unused) \
+ VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-only unused symbols */ \
+ __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
+ *(__kcrctab_unused_gpl) \
+ VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
+ } \
+ \
+ /* Kernel symbol table: GPL-future-only symbols */ \
+ __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
+ *(__kcrctab_gpl_future) \
+ VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
+ } \
+ \
+ /* Kernel symbol table: strings */ \
+ __ksymtab_strings : AT(ADDR(__ksymtab_strings) - LOAD_OFFSET) { \
+ *(__ksymtab_strings) \
+ } \
+ \
+ /* __*init sections */ \
+ __init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
+ *(.ref.rodata) \
+ MCOUNT_REC() \
+ DEV_KEEP(init.rodata) \
+ DEV_KEEP(exit.rodata) \
+ CPU_KEEP(init.rodata) \
+ CPU_KEEP(exit.rodata) \
+ MEM_KEEP(init.rodata) \
+ MEM_KEEP(exit.rodata) \
+ } \
+ \
+ /* Built-in module parameters. */ \
+ __param : AT(ADDR(__param) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___param) = .; \
+ *(__param) \
+ VMLINUX_SYMBOL(__stop___param) = .; \
+ . = ALIGN((align)); \
+ VMLINUX_SYMBOL(__end_rodata) = .; \
+ } \
+ . = ALIGN((align));
+
+/* RODATA provided for backward compatibility.
+ * All archs are supposed to use RO_DATA() */
+#define RODATA RO_DATA(4096)
+
+#define SECURITY_INIT \
+ .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__security_initcall_start) = .; \
+ *(.security_initcall.init) \
+ VMLINUX_SYMBOL(__security_initcall_end) = .; \
+ }
+
+/* .text section. Map to function alignment to avoid address changes
+ * during second ld run in second ld pass when generating System.map */
+#define TEXT_TEXT \
+ ALIGN_FUNCTION(); \
+ *(.text.hot) \
+ *(.text) \
+ *(.ref.text) \
+ *(.text.init.refok) \
+ *(.exit.text.refok) \
+ DEV_KEEP(init.text) \
+ DEV_KEEP(exit.text) \
+ CPU_KEEP(init.text) \
+ CPU_KEEP(exit.text) \
+ MEM_KEEP(init.text) \
+ MEM_KEEP(exit.text) \
+ *(.text.unlikely)
+
+
+/* sched.text is aling to function alignment to secure we have same
+ * address even at second ld pass when generating System.map */
+#define SCHED_TEXT \
+ ALIGN_FUNCTION(); \
+ VMLINUX_SYMBOL(__sched_text_start) = .; \
+ *(.sched.text) \
+ VMLINUX_SYMBOL(__sched_text_end) = .;
+
+/* spinlock.text is aling to function alignment to secure we have same
+ * address even at second ld pass when generating System.map */
+#define LOCK_TEXT \
+ ALIGN_FUNCTION(); \
+ VMLINUX_SYMBOL(__lock_text_start) = .; \
+ *(.spinlock.text) \
+ VMLINUX_SYMBOL(__lock_text_end) = .;
+
+#define KPROBES_TEXT \
+ ALIGN_FUNCTION(); \
+ VMLINUX_SYMBOL(__kprobes_text_start) = .; \
+ *(.kprobes.text) \
+ VMLINUX_SYMBOL(__kprobes_text_end) = .;
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#define IRQENTRY_TEXT \
+ ALIGN_FUNCTION(); \
+ VMLINUX_SYMBOL(__irqentry_text_start) = .; \
+ *(.irqentry.text) \
+ VMLINUX_SYMBOL(__irqentry_text_end) = .;
+#else
+#define IRQENTRY_TEXT
+#endif
+
+/* Section used for early init (in .S files) */
+#define HEAD_TEXT *(.head.text)
+
+/* init and exit section handling */
+#define INIT_DATA \
+ *(.init.data) \
+ DEV_DISCARD(init.data) \
+ DEV_DISCARD(init.rodata) \
+ CPU_DISCARD(init.data) \
+ CPU_DISCARD(init.rodata) \
+ MEM_DISCARD(init.data) \
+ MEM_DISCARD(init.rodata) \
+ /* implement dynamic printk debug */ \
+ VMLINUX_SYMBOL(__start___verbose_strings) = .; \
+ *(__verbose_strings) \
+ VMLINUX_SYMBOL(__stop___verbose_strings) = .; \
+ . = ALIGN(8); \
+ VMLINUX_SYMBOL(__start___verbose) = .; \
+ *(__verbose) \
+ VMLINUX_SYMBOL(__stop___verbose) = .;
+
+#define INIT_TEXT \
+ *(.init.text) \
+ DEV_DISCARD(init.text) \
+ CPU_DISCARD(init.text) \
+ MEM_DISCARD(init.text)
+
+#define EXIT_DATA \
+ *(.exit.data) \
+ DEV_DISCARD(exit.data) \
+ DEV_DISCARD(exit.rodata) \
+ CPU_DISCARD(exit.data) \
+ CPU_DISCARD(exit.rodata) \
+ MEM_DISCARD(exit.data) \
+ MEM_DISCARD(exit.rodata)
+
+#define EXIT_TEXT \
+ *(.exit.text) \
+ DEV_DISCARD(exit.text) \
+ CPU_DISCARD(exit.text) \
+ MEM_DISCARD(exit.text)
+
+ /* DWARF debug sections.
+ Symbols in the DWARF debugging sections are relative to
+ the beginning of the section so we begin them at 0. */
+#define DWARF_DEBUG \
+ /* DWARF 1 */ \
+ .debug 0 : { *(.debug) } \
+ .line 0 : { *(.line) } \
+ /* GNU DWARF 1 extensions */ \
+ .debug_srcinfo 0 : { *(.debug_srcinfo) } \
+ .debug_sfnames 0 : { *(.debug_sfnames) } \
+ /* DWARF 1.1 and DWARF 2 */ \
+ .debug_aranges 0 : { *(.debug_aranges) } \
+ .debug_pubnames 0 : { *(.debug_pubnames) } \
+ /* DWARF 2 */ \
+ .debug_info 0 : { *(.debug_info \
+ .gnu.linkonce.wi.*) } \
+ .debug_abbrev 0 : { *(.debug_abbrev) } \
+ .debug_line 0 : { *(.debug_line) } \
+ .debug_frame 0 : { *(.debug_frame) } \
+ .debug_str 0 : { *(.debug_str) } \
+ .debug_loc 0 : { *(.debug_loc) } \
+ .debug_macinfo 0 : { *(.debug_macinfo) } \
+ /* SGI/MIPS DWARF 2 extensions */ \
+ .debug_weaknames 0 : { *(.debug_weaknames) } \
+ .debug_funcnames 0 : { *(.debug_funcnames) } \
+ .debug_typenames 0 : { *(.debug_typenames) } \
+ .debug_varnames 0 : { *(.debug_varnames) } \
+
+ /* Stabs debugging sections. */
+#define STABS_DEBUG \
+ .stab 0 : { *(.stab) } \
+ .stabstr 0 : { *(.stabstr) } \
+ .stab.excl 0 : { *(.stab.excl) } \
+ .stab.exclstr 0 : { *(.stab.exclstr) } \
+ .stab.index 0 : { *(.stab.index) } \
+ .stab.indexstr 0 : { *(.stab.indexstr) } \
+ .comment 0 : { *(.comment) }
+
+#ifdef CONFIG_GENERIC_BUG
+#define BUG_TABLE \
+ . = ALIGN(8); \
+ __bug_table : AT(ADDR(__bug_table) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___bug_table) = .; \
+ *(__bug_table) \
+ VMLINUX_SYMBOL(__stop___bug_table) = .; \
+ }
+#else
+#define BUG_TABLE
+#endif
+
+#ifdef CONFIG_PM_TRACE
+#define TRACEDATA \
+ . = ALIGN(4); \
+ .tracedata : AT(ADDR(.tracedata) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__tracedata_start) = .; \
+ *(.tracedata) \
+ VMLINUX_SYMBOL(__tracedata_end) = .; \
+ }
+#else
+#define TRACEDATA
+#endif
+
+#define NOTES \
+ .notes : AT(ADDR(.notes) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start_notes) = .; \
+ *(.note.*) \
+ VMLINUX_SYMBOL(__stop_notes) = .; \
+ }
+
+#define INITCALLS \
+ *(.initcallearly.init) \
+ VMLINUX_SYMBOL(__early_initcall_end) = .; \
+ *(.initcall0.init) \
+ *(.initcall0s.init) \
+ *(.initcall1.init) \
+ *(.initcall1s.init) \
+ *(.initcall2.init) \
+ *(.initcall2s.init) \
+ *(.initcall3.init) \
+ *(.initcall3s.init) \
+ *(.initcall4.init) \
+ *(.initcall4s.init) \
+ *(.initcall5.init) \
+ *(.initcall5s.init) \
+ *(.initcallrootfs.init) \
+ *(.initcall6.init) \
+ *(.initcall6s.init) \
+ *(.initcall7.init) \
+ *(.initcall7s.init)
+
+#define PERCPU(align) \
+ . = ALIGN(align); \
+ VMLINUX_SYMBOL(__per_cpu_start) = .; \
+ .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \
+ *(.data.percpu.page_aligned) \
+ *(.data.percpu) \
+ *(.data.percpu.shared_aligned) \
+ } \
+ VMLINUX_SYMBOL(__per_cpu_end) = .;
diff --git a/libdde-linux26/contrib/include/asm-generic/xor.h b/libdde-linux26/contrib/include/asm-generic/xor.h
new file mode 100644
index 00000000..aaab875e
--- /dev/null
+++ b/libdde-linux26/contrib/include/asm-generic/xor.h
@@ -0,0 +1,718 @@
+/*
+ * include/asm-generic/xor.h
+ *
+ * Generic optimized RAID-5 checksumming functions.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * You should have received a copy of the GNU General Public License
+ * (for example /usr/src/linux/COPYING); if not, write to the Free
+ * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <asm/processor.h>
+
+static void
+xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ p1[0] ^= p2[0];
+ p1[1] ^= p2[1];
+ p1[2] ^= p2[2];
+ p1[3] ^= p2[3];
+ p1[4] ^= p2[4];
+ p1[5] ^= p2[5];
+ p1[6] ^= p2[6];
+ p1[7] ^= p2[7];
+ p1 += 8;
+ p2 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_8regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ p1[0] ^= p2[0] ^ p3[0];
+ p1[1] ^= p2[1] ^ p3[1];
+ p1[2] ^= p2[2] ^ p3[2];
+ p1[3] ^= p2[3] ^ p3[3];
+ p1[4] ^= p2[4] ^ p3[4];
+ p1[5] ^= p2[5] ^ p3[5];
+ p1[6] ^= p2[6] ^ p3[6];
+ p1[7] ^= p2[7] ^ p3[7];
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_8regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ p1[0] ^= p2[0] ^ p3[0] ^ p4[0];
+ p1[1] ^= p2[1] ^ p3[1] ^ p4[1];
+ p1[2] ^= p2[2] ^ p3[2] ^ p4[2];
+ p1[3] ^= p2[3] ^ p3[3] ^ p4[3];
+ p1[4] ^= p2[4] ^ p3[4] ^ p4[4];
+ p1[5] ^= p2[5] ^ p3[5] ^ p4[5];
+ p1[6] ^= p2[6] ^ p3[6] ^ p4[6];
+ p1[7] ^= p2[7] ^ p3[7] ^ p4[7];
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_8regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0];
+ p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1];
+ p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2];
+ p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3];
+ p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4];
+ p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5];
+ p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6];
+ p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7];
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ p5 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_32regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_32regs_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ d0 ^= p3[0];
+ d1 ^= p3[1];
+ d2 ^= p3[2];
+ d3 ^= p3[3];
+ d4 ^= p3[4];
+ d5 ^= p3[5];
+ d6 ^= p3[6];
+ d7 ^= p3[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_32regs_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ d0 ^= p3[0];
+ d1 ^= p3[1];
+ d2 ^= p3[2];
+ d3 ^= p3[3];
+ d4 ^= p3[4];
+ d5 ^= p3[5];
+ d6 ^= p3[6];
+ d7 ^= p3[7];
+ d0 ^= p4[0];
+ d1 ^= p4[1];
+ d2 ^= p4[2];
+ d3 ^= p4[3];
+ d4 ^= p4[4];
+ d5 ^= p4[5];
+ d6 ^= p4[6];
+ d7 ^= p4[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_32regs_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ long lines = bytes / (sizeof (long)) / 8;
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ d0 ^= p3[0];
+ d1 ^= p3[1];
+ d2 ^= p3[2];
+ d3 ^= p3[3];
+ d4 ^= p3[4];
+ d5 ^= p3[5];
+ d6 ^= p3[6];
+ d7 ^= p3[7];
+ d0 ^= p4[0];
+ d1 ^= p4[1];
+ d2 ^= p4[2];
+ d3 ^= p4[3];
+ d4 ^= p4[4];
+ d5 ^= p4[5];
+ d6 ^= p4[6];
+ d7 ^= p4[7];
+ d0 ^= p5[0];
+ d1 ^= p5[1];
+ d2 ^= p5[2];
+ d3 ^= p5[3];
+ d4 ^= p5[4];
+ d5 ^= p5[5];
+ d6 ^= p5[6];
+ d7 ^= p5[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ p5 += 8;
+ } while (--lines > 0);
+}
+
+static void
+xor_8regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+ prefetchw(p1);
+ prefetch(p2);
+
+ do {
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ once_more:
+ p1[0] ^= p2[0];
+ p1[1] ^= p2[1];
+ p1[2] ^= p2[2];
+ p1[3] ^= p2[3];
+ p1[4] ^= p2[4];
+ p1[5] ^= p2[5];
+ p1[6] ^= p2[6];
+ p1[7] ^= p2[7];
+ p1 += 8;
+ p2 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_8regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+ prefetchw(p1);
+ prefetch(p2);
+ prefetch(p3);
+
+ do {
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ prefetch(p3+8);
+ once_more:
+ p1[0] ^= p2[0] ^ p3[0];
+ p1[1] ^= p2[1] ^ p3[1];
+ p1[2] ^= p2[2] ^ p3[2];
+ p1[3] ^= p2[3] ^ p3[3];
+ p1[4] ^= p2[4] ^ p3[4];
+ p1[5] ^= p2[5] ^ p3[5];
+ p1[6] ^= p2[6] ^ p3[6];
+ p1[7] ^= p2[7] ^ p3[7];
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_8regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+
+ prefetchw(p1);
+ prefetch(p2);
+ prefetch(p3);
+ prefetch(p4);
+
+ do {
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ prefetch(p3+8);
+ prefetch(p4+8);
+ once_more:
+ p1[0] ^= p2[0] ^ p3[0] ^ p4[0];
+ p1[1] ^= p2[1] ^ p3[1] ^ p4[1];
+ p1[2] ^= p2[2] ^ p3[2] ^ p4[2];
+ p1[3] ^= p2[3] ^ p3[3] ^ p4[3];
+ p1[4] ^= p2[4] ^ p3[4] ^ p4[4];
+ p1[5] ^= p2[5] ^ p3[5] ^ p4[5];
+ p1[6] ^= p2[6] ^ p3[6] ^ p4[6];
+ p1[7] ^= p2[7] ^ p3[7] ^ p4[7];
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_8regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+
+ prefetchw(p1);
+ prefetch(p2);
+ prefetch(p3);
+ prefetch(p4);
+ prefetch(p5);
+
+ do {
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ prefetch(p3+8);
+ prefetch(p4+8);
+ prefetch(p5+8);
+ once_more:
+ p1[0] ^= p2[0] ^ p3[0] ^ p4[0] ^ p5[0];
+ p1[1] ^= p2[1] ^ p3[1] ^ p4[1] ^ p5[1];
+ p1[2] ^= p2[2] ^ p3[2] ^ p4[2] ^ p5[2];
+ p1[3] ^= p2[3] ^ p3[3] ^ p4[3] ^ p5[3];
+ p1[4] ^= p2[4] ^ p3[4] ^ p4[4] ^ p5[4];
+ p1[5] ^= p2[5] ^ p3[5] ^ p4[5] ^ p5[5];
+ p1[6] ^= p2[6] ^ p3[6] ^ p4[6] ^ p5[6];
+ p1[7] ^= p2[7] ^ p3[7] ^ p4[7] ^ p5[7];
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ p5 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_32regs_p_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+
+ prefetchw(p1);
+ prefetch(p2);
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ once_more:
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_32regs_p_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+
+ prefetchw(p1);
+ prefetch(p2);
+ prefetch(p3);
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ prefetch(p3+8);
+ once_more:
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ d0 ^= p3[0];
+ d1 ^= p3[1];
+ d2 ^= p3[2];
+ d3 ^= p3[3];
+ d4 ^= p3[4];
+ d5 ^= p3[5];
+ d6 ^= p3[6];
+ d7 ^= p3[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_32regs_p_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+
+ prefetchw(p1);
+ prefetch(p2);
+ prefetch(p3);
+ prefetch(p4);
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ prefetch(p3+8);
+ prefetch(p4+8);
+ once_more:
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ d0 ^= p3[0];
+ d1 ^= p3[1];
+ d2 ^= p3[2];
+ d3 ^= p3[3];
+ d4 ^= p3[4];
+ d5 ^= p3[5];
+ d6 ^= p3[6];
+ d7 ^= p3[7];
+ d0 ^= p4[0];
+ d1 ^= p4[1];
+ d2 ^= p4[2];
+ d3 ^= p4[3];
+ d4 ^= p4[4];
+ d5 ^= p4[5];
+ d6 ^= p4[6];
+ d7 ^= p4[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static void
+xor_32regs_p_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
+ unsigned long *p3, unsigned long *p4, unsigned long *p5)
+{
+ long lines = bytes / (sizeof (long)) / 8 - 1;
+
+ prefetchw(p1);
+ prefetch(p2);
+ prefetch(p3);
+ prefetch(p4);
+ prefetch(p5);
+
+ do {
+ register long d0, d1, d2, d3, d4, d5, d6, d7;
+
+ prefetchw(p1+8);
+ prefetch(p2+8);
+ prefetch(p3+8);
+ prefetch(p4+8);
+ prefetch(p5+8);
+ once_more:
+ d0 = p1[0]; /* Pull the stuff into registers */
+ d1 = p1[1]; /* ... in bursts, if possible. */
+ d2 = p1[2];
+ d3 = p1[3];
+ d4 = p1[4];
+ d5 = p1[5];
+ d6 = p1[6];
+ d7 = p1[7];
+ d0 ^= p2[0];
+ d1 ^= p2[1];
+ d2 ^= p2[2];
+ d3 ^= p2[3];
+ d4 ^= p2[4];
+ d5 ^= p2[5];
+ d6 ^= p2[6];
+ d7 ^= p2[7];
+ d0 ^= p3[0];
+ d1 ^= p3[1];
+ d2 ^= p3[2];
+ d3 ^= p3[3];
+ d4 ^= p3[4];
+ d5 ^= p3[5];
+ d6 ^= p3[6];
+ d7 ^= p3[7];
+ d0 ^= p4[0];
+ d1 ^= p4[1];
+ d2 ^= p4[2];
+ d3 ^= p4[3];
+ d4 ^= p4[4];
+ d5 ^= p4[5];
+ d6 ^= p4[6];
+ d7 ^= p4[7];
+ d0 ^= p5[0];
+ d1 ^= p5[1];
+ d2 ^= p5[2];
+ d3 ^= p5[3];
+ d4 ^= p5[4];
+ d5 ^= p5[5];
+ d6 ^= p5[6];
+ d7 ^= p5[7];
+ p1[0] = d0; /* Store the result (in bursts) */
+ p1[1] = d1;
+ p1[2] = d2;
+ p1[3] = d3;
+ p1[4] = d4;
+ p1[5] = d5;
+ p1[6] = d6;
+ p1[7] = d7;
+ p1 += 8;
+ p2 += 8;
+ p3 += 8;
+ p4 += 8;
+ p5 += 8;
+ } while (--lines > 0);
+ if (lines == 0)
+ goto once_more;
+}
+
+static struct xor_block_template xor_block_8regs = {
+ .name = "8regs",
+ .do_2 = xor_8regs_2,
+ .do_3 = xor_8regs_3,
+ .do_4 = xor_8regs_4,
+ .do_5 = xor_8regs_5,
+};
+
+static struct xor_block_template xor_block_32regs = {
+ .name = "32regs",
+ .do_2 = xor_32regs_2,
+ .do_3 = xor_32regs_3,
+ .do_4 = xor_32regs_4,
+ .do_5 = xor_32regs_5,
+};
+
+static struct xor_block_template xor_block_8regs_p = {
+ .name = "8regs_prefetch",
+ .do_2 = xor_8regs_p_2,
+ .do_3 = xor_8regs_p_3,
+ .do_4 = xor_8regs_p_4,
+ .do_5 = xor_8regs_p_5,
+};
+
+static struct xor_block_template xor_block_32regs_p = {
+ .name = "32regs_prefetch",
+ .do_2 = xor_32regs_p_2,
+ .do_3 = xor_32regs_p_3,
+ .do_4 = xor_32regs_p_4,
+ .do_5 = xor_32regs_p_5,
+};
+
+#define XOR_TRY_TEMPLATES \
+ do { \
+ xor_speed(&xor_block_8regs); \
+ xor_speed(&xor_block_8regs_p); \
+ xor_speed(&xor_block_32regs); \
+ xor_speed(&xor_block_32regs_p); \
+ } while (0)