summaryrefslogtreecommitdiff
path: root/libdde_linux26/lib/src/arch
diff options
context:
space:
mode:
authorZheng Da <zhengda1936@gmail.com>2009-12-06 05:26:23 +0100
committerZheng Da <zhengda1936@gmail.com>2009-12-06 05:26:23 +0100
commit8a6d48c0542876eb3acfc0970c0ab7872db08d5f (patch)
tree496e78bc728317ea779781b92f897d16936ee231 /libdde_linux26/lib/src/arch
parentb4bffcfcdf3ab7a55d664e9aa5907f88da503f38 (diff)
check in the original version of dde linux26.
Diffstat (limited to 'libdde_linux26/lib/src/arch')
-rw-r--r--libdde_linux26/lib/src/arch/.svn/all-wcprops5
-rw-r--r--libdde_linux26/lib/src/arch/.svn/entries34
-rw-r--r--libdde_linux26/lib/src/arch/.svn/format1
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/all-wcprops155
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/entries878
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/format1
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/cli_sti.c.svn-base66
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/fs.c.svn-base111
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/hw-helpers.c.svn-base12
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/init.c.svn-base33
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/init_task.c.svn-base131
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/inodes.c.svn-base311
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/irq.c.svn-base247
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/kmalloc.c.svn-base199
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/kmem_cache.c.svn-base213
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/local.h.svn-base99
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/mm-helper.c.svn-base45
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/net.c.svn-base36
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/page_alloc.c.svn-base281
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/param.c.svn-base32
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/pci.c.svn-base189
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/power.c.svn-base23
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/process.c.svn-base347
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/res.c.svn-base180
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/sched.c.svn-base155
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/signal.c.svn-base24
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/smp.c.svn-base37
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/softirq.c.svn-base267
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/timer.c.svn-base184
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/vmalloc.c.svn-base30
-rw-r--r--libdde_linux26/lib/src/arch/l4/.svn/text-base/vmstat.c.svn-base34
-rw-r--r--libdde_linux26/lib/src/arch/l4/cli_sti.c66
-rw-r--r--libdde_linux26/lib/src/arch/l4/fs.c111
-rw-r--r--libdde_linux26/lib/src/arch/l4/hw-helpers.c12
-rw-r--r--libdde_linux26/lib/src/arch/l4/init.c33
-rw-r--r--libdde_linux26/lib/src/arch/l4/init_task.c131
-rw-r--r--libdde_linux26/lib/src/arch/l4/inodes.c311
-rw-r--r--libdde_linux26/lib/src/arch/l4/irq.c247
-rw-r--r--libdde_linux26/lib/src/arch/l4/kmalloc.c199
-rw-r--r--libdde_linux26/lib/src/arch/l4/kmem_cache.c213
-rw-r--r--libdde_linux26/lib/src/arch/l4/local.h99
-rw-r--r--libdde_linux26/lib/src/arch/l4/mm-helper.c45
-rw-r--r--libdde_linux26/lib/src/arch/l4/net.c36
-rw-r--r--libdde_linux26/lib/src/arch/l4/page_alloc.c281
-rw-r--r--libdde_linux26/lib/src/arch/l4/param.c32
-rw-r--r--libdde_linux26/lib/src/arch/l4/pci.c189
-rw-r--r--libdde_linux26/lib/src/arch/l4/power.c23
-rw-r--r--libdde_linux26/lib/src/arch/l4/process.c347
-rw-r--r--libdde_linux26/lib/src/arch/l4/res.c180
-rw-r--r--libdde_linux26/lib/src/arch/l4/sched.c155
-rw-r--r--libdde_linux26/lib/src/arch/l4/signal.c24
-rw-r--r--libdde_linux26/lib/src/arch/l4/smp.c37
-rw-r--r--libdde_linux26/lib/src/arch/l4/softirq.c267
-rw-r--r--libdde_linux26/lib/src/arch/l4/timer.c184
-rw-r--r--libdde_linux26/lib/src/arch/l4/vmalloc.c30
-rw-r--r--libdde_linux26/lib/src/arch/l4/vmstat.c34
-rw-r--r--libdde_linux26/lib/src/arch/x86/.svn/all-wcprops5
-rw-r--r--libdde_linux26/lib/src/arch/x86/.svn/entries31
-rw-r--r--libdde_linux26/lib/src/arch/x86/.svn/format1
-rw-r--r--libdde_linux26/lib/src/arch/x86/lib/.svn/all-wcprops11
-rw-r--r--libdde_linux26/lib/src/arch/x86/lib/.svn/entries62
-rw-r--r--libdde_linux26/lib/src/arch/x86/lib/.svn/format1
-rw-r--r--libdde_linux26/lib/src/arch/x86/lib/.svn/text-base/semaphore_32.S.svn-base138
-rw-r--r--libdde_linux26/lib/src/arch/x86/lib/semaphore_32.S138
64 files changed, 8033 insertions, 0 deletions
diff --git a/libdde_linux26/lib/src/arch/.svn/all-wcprops b/libdde_linux26/lib/src/arch/.svn/all-wcprops
new file mode 100644
index 00000000..775206bc
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/.svn/all-wcprops
@@ -0,0 +1,5 @@
+K 25
+svn:wc:ra_dav:version-url
+V 63
+/repos/tudos/!svn/ver/457/trunk/l4/pkg/dde/linux26/lib/src/arch
+END
diff --git a/libdde_linux26/lib/src/arch/.svn/entries b/libdde_linux26/lib/src/arch/.svn/entries
new file mode 100644
index 00000000..b075adca
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/.svn/entries
@@ -0,0 +1,34 @@
+9
+
+dir
+465
+http://svn.tudos.org/repos/tudos/trunk/l4/pkg/dde/linux26/lib/src/arch
+http://svn.tudos.org/repos/tudos
+
+
+
+2009-05-23T02:50:17.774710Z
+457
+l4check
+
+
+svn:special svn:externals svn:needs-lock
+
+
+
+
+
+
+
+
+
+
+
+a704ac0b-3a55-4d43-a2a9-7be6f07c34fb
+
+l4
+dir
+
+x86
+dir
+
diff --git a/libdde_linux26/lib/src/arch/.svn/format b/libdde_linux26/lib/src/arch/.svn/format
new file mode 100644
index 00000000..ec635144
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/.svn/format
@@ -0,0 +1 @@
+9
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/all-wcprops b/libdde_linux26/lib/src/arch/l4/.svn/all-wcprops
new file mode 100644
index 00000000..132337d1
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/all-wcprops
@@ -0,0 +1,155 @@
+K 25
+svn:wc:ra_dav:version-url
+V 66
+/repos/tudos/!svn/ver/457/trunk/l4/pkg/dde/linux26/lib/src/arch/l4
+END
+local.h
+K 25
+svn:wc:ra_dav:version-url
+V 74
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/local.h
+END
+smp.c
+K 25
+svn:wc:ra_dav:version-url
+V 72
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/smp.c
+END
+param.c
+K 25
+svn:wc:ra_dav:version-url
+V 74
+/repos/tudos/!svn/ver/240/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/param.c
+END
+init.c
+K 25
+svn:wc:ra_dav:version-url
+V 73
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/init.c
+END
+fs.c
+K 25
+svn:wc:ra_dav:version-url
+V 71
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/fs.c
+END
+pci.c
+K 25
+svn:wc:ra_dav:version-url
+V 72
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/pci.c
+END
+kmem_cache.c
+K 25
+svn:wc:ra_dav:version-url
+V 79
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/kmem_cache.c
+END
+signal.c
+K 25
+svn:wc:ra_dav:version-url
+V 75
+/repos/tudos/!svn/ver/174/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/signal.c
+END
+hw-helpers.c
+K 25
+svn:wc:ra_dav:version-url
+V 79
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/hw-helpers.c
+END
+process.c
+K 25
+svn:wc:ra_dav:version-url
+V 76
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/process.c
+END
+vmstat.c
+K 25
+svn:wc:ra_dav:version-url
+V 75
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/vmstat.c
+END
+timer.c
+K 25
+svn:wc:ra_dav:version-url
+V 74
+/repos/tudos/!svn/ver/457/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/timer.c
+END
+inodes.c
+K 25
+svn:wc:ra_dav:version-url
+V 75
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/inodes.c
+END
+kmalloc.c
+K 25
+svn:wc:ra_dav:version-url
+V 76
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/kmalloc.c
+END
+init_task.c
+K 25
+svn:wc:ra_dav:version-url
+V 78
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/init_task.c
+END
+page_alloc.c
+K 25
+svn:wc:ra_dav:version-url
+V 79
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/page_alloc.c
+END
+net.c
+K 25
+svn:wc:ra_dav:version-url
+V 72
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/net.c
+END
+mm-helper.c
+K 25
+svn:wc:ra_dav:version-url
+V 78
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/mm-helper.c
+END
+cli_sti.c
+K 25
+svn:wc:ra_dav:version-url
+V 76
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/cli_sti.c
+END
+sched.c
+K 25
+svn:wc:ra_dav:version-url
+V 74
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/sched.c
+END
+softirq.c
+K 25
+svn:wc:ra_dav:version-url
+V 76
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/softirq.c
+END
+res.c
+K 25
+svn:wc:ra_dav:version-url
+V 72
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/res.c
+END
+irq.c
+K 25
+svn:wc:ra_dav:version-url
+V 72
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/irq.c
+END
+power.c
+K 25
+svn:wc:ra_dav:version-url
+V 74
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/power.c
+END
+vmalloc.c
+K 25
+svn:wc:ra_dav:version-url
+V 76
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/l4/vmalloc.c
+END
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/entries b/libdde_linux26/lib/src/arch/l4/.svn/entries
new file mode 100644
index 00000000..a4e08f01
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/entries
@@ -0,0 +1,878 @@
+9
+
+dir
+465
+http://svn.tudos.org/repos/tudos/trunk/l4/pkg/dde/linux26/lib/src/arch/l4
+http://svn.tudos.org/repos/tudos
+
+
+
+2009-05-23T02:50:17.774710Z
+457
+l4check
+
+
+svn:special svn:externals svn:needs-lock
+
+
+
+
+
+
+
+
+
+
+
+a704ac0b-3a55-4d43-a2a9-7be6f07c34fb
+
+local.h
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+29e4e373a5332517fa8d3a54b63a934c
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+3155
+
+smp.c
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+0baa40739f76a596efe1d2ef99768f59
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+1294
+
+param.c
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+8b9465dfae207ca0ce9548228914b19f
+2007-11-27T03:55:44.347675Z
+240
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+1039
+
+init.c
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+c59f682047dbf216f271ba4fb8f962ef
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+713
+
+fs.c
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+361eeaac7be43ebe64478812bdf3808a
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+2353
+
+pci.c
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+670757aeca81d5fdbcce07c45902b2f4
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+4732
+
+kmem_cache.c
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+1d7b5540f6113539b83f4688eb5a320d
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+5429
+
+signal.c
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+1c1133b1a3dcf504174b892eba60986a
+2007-09-08T19:44:13.897747Z
+174
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+875
+
+hw-helpers.c
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+7900d76d82fab85c74b0d8baec1baac5
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+170
+
+process.c
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+e256217d715b25cf56bb937480c82376
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+9859
+
+vmstat.c
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+3f706a9a494cf0bfd99facee793dd0d5
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+608
+
+timer.c
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+f13640bc9b9d4520e7b8ec09d3b9e452
+2009-05-23T02:50:17.774710Z
+457
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+3669
+
+inodes.c
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+8f1f06ea530105b7b1b1b24fd0cb5d00
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+6687
+
+kmalloc.c
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+2fd70eccfddd108357815aa4bb031354
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+4327
+
+init_task.c
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+fc20d990328c12f596b836e722781f63
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+3360
+
+page_alloc.c
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+584a5941cdf1efe2435718993a980ba1
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+6768
+
+net.c
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+2adc371a98f0fd8fae363e0f70854314
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+1307
+
+mm-helper.c
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+4b1d4ac41bb0a6ffb8c9bbe6f1ab95b1
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+880
+
+cli_sti.c
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+8732f061e7ff7d24c42fb8dc9aec718f
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+1284
+
+sched.c
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+904a23f9a1efa20b904b9294e5c3fe43
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+3206
+
+softirq.c
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+7564de83f9ac6f983ff7e8346c78bba8
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+5778
+
+res.c
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+9190bb75985ff0ee2135c7ae47a7800d
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+4275
+
+irq.c
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+19e10dde42bbbe5176338d96ae8553ba
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+5607
+
+power.c
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+c108e3ad0b0c0c68015fed4e159c1b53
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+642
+
+vmalloc.c
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+73cceaf52046b2f0d152ad8cfde1685f
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+919
+
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/format b/libdde_linux26/lib/src/arch/l4/.svn/format
new file mode 100644
index 00000000..ec635144
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/format
@@ -0,0 +1 @@
+9
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/cli_sti.c.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/cli_sti.c.svn-base
new file mode 100644
index 00000000..81c4feea
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/cli_sti.c.svn-base
@@ -0,0 +1,66 @@
+#include "local.h"
+
+#include <linux/kernel.h>
+
+/* IRQ lock reference counter */
+static atomic_t _refcnt = ATOMIC_INIT(0);
+
+/* Check whether IRQs are currently disabled.
+ *
+ * This is the case, if flags is greater than 0.
+ */
+
+int raw_irqs_disabled_flags(unsigned long flags)
+{
+ return ((int)flags > 0);
+}
+
+/* Store the current flags state.
+ *
+ * This is done by returning the current refcnt.
+ *
+ * XXX: Up to now, flags was always 0 at this point and
+ * I assume that this is always the case. Prove?
+ */
+unsigned long __raw_local_save_flags(void)
+{
+ return (unsigned long)atomic_read(&_refcnt);
+}
+
+/* Restore IRQ state. */
+void raw_local_irq_restore(unsigned long flags)
+{
+ atomic_set(&_refcnt, flags);
+}
+
+/* Disable IRQs by grabbing the IRQ lock. */
+void raw_local_irq_disable(void)
+{
+ atomic_inc(&_refcnt);
+}
+
+/* Unlock the IRQ lock until refcnt is 0. */
+void raw_local_irq_enable(void)
+{
+ atomic_set(&_refcnt, 0);
+}
+
+
+void raw_safe_halt(void)
+{
+ WARN_UNIMPL;
+}
+
+
+void halt(void)
+{
+ WARN_UNIMPL;
+}
+
+/* These functions are empty for DDE. Every DDE thread is a separate
+ * "virtual" CPU. Therefore there is no need to en/disable bottom halves.
+ */
+void local_bh_disable(void) {}
+void __local_bh_enable(void) {}
+void _local_bh_enable(void) {}
+void local_bh_enable(void) {}
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/fs.c.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/fs.c.svn-base
new file mode 100644
index 00000000..db452949
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/fs.c.svn-base
@@ -0,0 +1,111 @@
+#include "local.h"
+
+#include <linux/fs.h>
+#include <linux/backing-dev.h>
+#include <linux/mount.h>
+
+/*
+ * Some subsystems, such as the blockdev layer, implement their data
+ * hierarchy as a pseudo file system. To not incorporate the complete
+ * Linux VFS implementation, we cut this down to an own one only for
+ * pseudo file systems.
+ */
+static LIST_HEAD(dde_vfs_mounts);
+
+#define MAX_RA_PAGES 1
+
+void default_unplug_io_fn(struct backing_dev_info *bdi, struct page* p)
+{
+}
+
+struct backing_dev_info default_backing_dev_info = {
+ .ra_pages = MAX_RA_PAGES,
+ .state = 0,
+ .capabilities = BDI_CAP_MAP_COPY,
+ .unplug_io_fn = default_unplug_io_fn,
+};
+
+int seq_printf(struct seq_file *m, const char *f, ...)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+int generic_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+
+/**************************************
+ * Filemap stuff *
+ **************************************/
+struct page * find_get_page(struct address_space *mapping, unsigned long offset)
+{
+ WARN_UNIMPL;
+ return NULL;
+}
+
+void unlock_page(struct page *page)
+{
+ WARN_UNIMPL;
+}
+
+int test_set_page_writeback(struct page *page)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+void end_page_writeback(struct page *page)
+{
+ WARN_UNIMPL;
+}
+
+void do_invalidatepage(struct page *page, unsigned long offset)
+{
+ WARN_UNIMPL;
+}
+
+int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+static struct vfsmount *dde_kern_mount(struct file_system_type *type,
+ int flags, const char *name,
+ void *data)
+{
+ struct list_head *pos, *head;
+ int error;
+
+ head = &dde_vfs_mounts;
+ __list_for_each(pos, head) {
+ struct vfsmount *mnt = list_entry(pos, struct vfsmount, next);
+ if (strcmp(name, mnt->name) == 0) {
+ printk("FS type %s already mounted!?\n", name);
+ BUG();
+ return NULL;
+ }
+ }
+
+ struct vfsmount *m = kzalloc(sizeof(*m), GFP_KERNEL);
+ m->fs_type = type;
+ m->name = kmalloc(strlen(name) + 1, GFP_KERNEL);
+ memcpy(m->name, name, strlen(name) + 1);
+
+ error = type->get_sb(type, flags, name, data, m);
+ BUG_ON(error);
+
+ list_add_tail(&m->next, &dde_vfs_mounts);
+
+ return m;
+}
+
+struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
+{
+ return dde_kern_mount(type, 0, type->name, NULL);
+}
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/hw-helpers.c.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/hw-helpers.c.svn-base
new file mode 100644
index 00000000..555406c9
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/hw-helpers.c.svn-base
@@ -0,0 +1,12 @@
+#include "local.h"
+
+#include <linux/kexec.h>
+
+note_buf_t *crash_notes = NULL;
+
+void touch_nmi_watchdog(void)
+{
+ WARN_UNIMPL;
+}
+
+unsigned long pci_mem_start = 0xABCDABCD;
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/init.c.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/init.c.svn-base
new file mode 100644
index 00000000..e89ef27f
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/init.c.svn-base
@@ -0,0 +1,33 @@
+#include "local.h"
+
+#include <l4/dde/linux26/dde26.h>
+#include <l4/dde/dde.h>
+
+#define DEBUG_PCI(msg, ...) ddekit_printf( "\033[33m"msg"\033[0m\n", ##__VA_ARGS__)
+
+/* Didn't know where to put this. */
+unsigned long __per_cpu_offset[NR_CPUS];
+
+extern void driver_init(void);
+extern int classes_init(void);
+
+void __init __attribute__((used)) l4dde26_init(void)
+{
+ /* first, initialize DDEKit */
+ ddekit_init();
+
+ l4dde26_kmalloc_init();
+
+ /* Init Linux driver framework before trying to add PCI devs to the bus */
+ driver_init();
+
+ printk("Initialized DDELinux 2.6\n");
+}
+
+void l4dde26_do_initcalls(void)
+{
+ /* finally, let DDEKit perform all the initcalls */
+ ddekit_do_initcalls();
+}
+
+dde_initcall(l4dde26_init);
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/init_task.c.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/init_task.c.svn-base
new file mode 100644
index 00000000..685373d1
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/init_task.c.svn-base
@@ -0,0 +1,131 @@
+#include "local.h"
+
+//#include <asm/desc.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+
+#include <linux/fs.h>
+#include <linux/fdtable.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/init_task.h>
+#include <linux/ipc_namespace.h>
+#include <linux/kernel.h>
+#include <linux/mqueue.h>
+#include <linux/module.h>
+#include <linux/personality.h>
+
+/* init task */
+struct task_struct init_task;
+
+/* From kernel/pid.c */
+#define BITS_PER_PAGE (PAGE_SIZE*8)
+#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
+
+/* From init/main.c */
+enum system_states system_state;
+EXPORT_SYMBOL(system_state);
+
+struct fs_struct init_fs = {
+ .count = ATOMIC_INIT(1),
+ .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
+ .umask = 0022,
+};
+
+struct files_struct init_files = {
+ .count = ATOMIC_INIT(1),
+ .fdt = &init_files.fdtab,
+ .fdtab = {
+ .max_fds = NR_OPEN_DEFAULT,
+ .fd = &init_files.fd_array[0],
+ .close_on_exec = (fd_set *)&init_files.close_on_exec_init,
+ .open_fds = (fd_set *)&init_files.open_fds_init,
+ .rcu = RCU_HEAD_INIT,
+ },
+ .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock),
+};
+
+struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct mm_struct init_mm = INIT_MM(init_mm);
+pgd_t swapper_pg_dir[PTRS_PER_PGD];
+union thread_union init_thread_union = { INIT_THREAD_INFO(init_task) };
+struct group_info init_groups = {.usage = ATOMIC_INIT(2)};
+
+struct user_struct root_user = {
+ .__count = ATOMIC_INIT(1),
+ .processes = ATOMIC_INIT(1),
+ .files = ATOMIC_INIT(0),
+ .sigpending = ATOMIC_INIT(0),
+ .mq_bytes = 0,
+ .locked_shm = 0,
+};
+
+/*
+ * PID-map pages start out as NULL, they get allocated upon
+ * first use and are never deallocated. This way a low pid_max
+ * value does not cause lots of bitmaps to be allocated, but
+ * the scheme scales to up to 4 million PIDs, runtime.
+ */
+struct pid_namespace init_pid_ns = {
+ .kref = {
+ .refcount = ATOMIC_INIT(2),
+ },
+ .pidmap = {
+ [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
+ },
+ .last_pid = 0,
+ .level = 0,
+ .child_reaper = &init_task,
+};
+EXPORT_SYMBOL_GPL(init_pid_ns);
+
+struct net init_net __attribute__((weak));
+
+struct nsproxy init_nsproxy = INIT_NSPROXY(init_nsproxy);
+
+struct ipc_namespace init_ipc_ns = {
+ .kref = {
+ .refcount = ATOMIC_INIT(2),
+ },
+};
+
+struct user_namespace init_user_ns = {
+ .kref = {
+ .refcount = ATOMIC_INIT(2),
+ },
+};
+
+
+struct uts_namespace init_uts_ns = {
+ .kref = {
+ .refcount = ATOMIC_INIT(2),
+ },
+ .name = {
+ .sysname = "L4/DDE",
+ .nodename = "",
+ .release = "2.6",
+ .version = "25",
+ .machine = "",
+ .domainname = "",
+ },
+};
+
+struct exec_domain default_exec_domain = {
+ .name = "Linux", /* name */
+ .handler = NULL, /* no signaling! */
+ .pers_low = 0, /* PER_LINUX personality. */
+ .pers_high = 0, /* PER_LINUX personality. */
+ .signal_map = 0, /* Identity map signals. */
+ .signal_invmap = 0, /* - both ways. */
+};
+
+/* copy of the initial task struct */
+struct task_struct init_task = INIT_TASK(init_task);
+/* copy of the initial thread info (which contains init_task) */
+struct thread_info init_thread = INIT_THREAD_INFO(init_task);
+
+long do_no_restart_syscall(struct restart_block *param)
+{
+ return -EINTR;
+}
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/inodes.c.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/inodes.c.svn-base
new file mode 100644
index 00000000..9ef02ed5
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/inodes.c.svn-base
@@ -0,0 +1,311 @@
+/** lib/src/arch/l4/inodes.c
+ *
+ * Assorted dummies implementing inode and superblock access functions,
+ * which are used by the block layer stuff, but not needed in DDE_Linux.
+ */
+
+#include "local.h"
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/mount.h>
+
+/*
+ * Linux' global list of all super blocks.
+ */
+LIST_HEAD(super_blocks);
+
+/**********************************
+ * Inode stuff *
+ **********************************/
+
+struct inode* new_inode(struct super_block *sb)
+{
+ if (sb->s_op->alloc_inode)
+ return sb->s_op->alloc_inode(sb);
+
+ return kzalloc(sizeof(struct inode), GFP_KERNEL);
+}
+
+void __mark_inode_dirty(struct inode *inode, int flags)
+{
+ WARN_UNIMPL;
+}
+
+void iput(struct inode *inode)
+{
+ WARN_UNIMPL;
+}
+
+void generic_delete_inode(struct inode *inode)
+{
+ WARN_UNIMPL;
+}
+
+int invalidate_inodes(struct super_block * sb)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
+{
+ WARN_UNIMPL;
+}
+
+void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
+{
+ WARN_UNIMPL;
+}
+
+/**********************************
+ * Superblock stuff *
+ **********************************/
+
+struct super_block * get_super(struct block_device *bdev)
+{
+ WARN_UNIMPL;
+ return NULL;
+}
+
+int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+void kill_anon_super(struct super_block *sb)
+{
+ WARN_UNIMPL;
+}
+
+void shrink_dcache_sb(struct super_block * sb)
+{
+ WARN_UNIMPL;
+}
+
+void drop_super(struct super_block *sb)
+{
+ WARN_UNIMPL;
+}
+
+struct inode_operations empty_iops = { };
+struct file_operations empty_fops = { };
+
+/**! Alloc and init a new inode.
+ *
+ * Basically stolen from linux/fs/inode.c:alloc_inode()
+ */
+static struct inode *dde_alloc_inode(struct super_block *sb)
+{
+ struct inode *inode;
+
+ if (sb->s_op->alloc_inode)
+ inode = sb->s_op->alloc_inode(sb);
+ else
+ inode = kzalloc(sizeof(*inode), GFP_KERNEL);
+
+ if (inode) {
+ inode->i_sb = sb;
+ inode->i_blkbits = sb->s_blocksize_bits;
+ inode->i_flags = 0;
+ atomic_set(&inode->i_count, 1);
+ inode->i_op = &empty_iops;
+ inode->i_fop = &empty_fops;
+ inode->i_nlink = 1;
+ atomic_set(&inode->i_writecount, 0);
+ inode->i_size = 0;
+ inode->i_blocks = 0;
+ inode->i_bytes = 0;
+ inode->i_generation = 0;
+ inode->i_pipe = NULL;
+ inode->i_bdev = NULL;
+ inode->i_cdev = NULL;
+ inode->i_rdev = 0;
+ inode->dirtied_when = 0;
+ inode->i_private = NULL;
+ }
+
+ return inode;
+}
+
+
+void __iget(struct inode *inode)
+{
+ atomic_inc(&inode->i_count);
+}
+
+
+static struct inode *dde_new_inode(struct super_block *sb, struct list_head *head,
+ int (*test)(struct inode *, void *),
+ int (*set)(struct inode *, void *), void *data)
+{
+ struct inode *ret = dde_alloc_inode(sb);
+ int err = 0;
+
+ if (set)
+ err = set(ret, data);
+
+ BUG_ON(err);
+
+ __iget(ret);
+ ret->i_state = I_LOCK|I_NEW;
+
+ list_add_tail(&ret->i_sb_list, &sb->s_inodes);
+
+ return ret;
+}
+
+
+struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
+ int (*test)(struct inode *, void *),
+ int (*set)(struct inode *, void *), void *data)
+{
+ struct inode *inode = NULL;
+ struct list_head *p;
+
+ list_for_each(p, &sb->s_inodes) {
+ struct inode *i = list_entry(p, struct inode, i_sb_list);
+ if (test) {
+ if (!test(i, data)) {
+ DEBUG_MSG("test false");
+ continue;
+ }
+ else {
+ inode = i;
+ break;
+ }
+ }
+ }
+
+ if (inode)
+ return inode;
+
+ return dde_new_inode(sb, &sb->s_inodes, test, set, data);
+}
+
+void unlock_new_inode(struct inode *inode)
+{
+ inode->i_state &= ~(I_LOCK | I_NEW);
+ wake_up_bit(&inode->i_state, __I_LOCK);
+}
+
+struct super_block *sget(struct file_system_type *type,
+ int (*test)(struct super_block *, void*),
+ int (*set)(struct super_block *, void*),
+ void *data)
+{
+ struct super_block *s = NULL;
+ struct list_head *p;
+ int err;
+
+ if (test) {
+ list_for_each(p, &type->fs_supers) {
+ struct super_block *block = list_entry(p,
+ struct super_block,
+ s_instances);
+ if (!test(block, data))
+ continue;
+ return block;
+ }
+ }
+
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ BUG_ON(!s);
+
+ INIT_LIST_HEAD(&s->s_dirty);
+ INIT_LIST_HEAD(&s->s_io);
+ INIT_LIST_HEAD(&s->s_files);
+ INIT_LIST_HEAD(&s->s_instances);
+ INIT_HLIST_HEAD(&s->s_anon);
+ INIT_LIST_HEAD(&s->s_inodes);
+ init_rwsem(&s->s_umount);
+ mutex_init(&s->s_lock);
+ lockdep_set_class(&s->s_umount, &type->s_umount_key);
+ /*
+ * The locking rules for s_lock are up to the
+ * filesystem. For example ext3fs has different
+ * lock ordering than usbfs:
+ */
+ lockdep_set_class(&s->s_lock, &type->s_lock_key);
+ down_write(&s->s_umount);
+ s->s_count = S_BIAS;
+ atomic_set(&s->s_active, 1);
+ mutex_init(&s->s_vfs_rename_mutex);
+ mutex_init(&s->s_dquot.dqio_mutex);
+ mutex_init(&s->s_dquot.dqonoff_mutex);
+ init_rwsem(&s->s_dquot.dqptr_sem);
+ init_waitqueue_head(&s->s_wait_unfrozen);
+ s->s_maxbytes = MAX_NON_LFS;
+#if 0
+ s->dq_op = sb_dquot_ops;
+ s->s_qcop = sb_quotactl_ops;
+ s->s_op = &default_op;
+#endif
+ s->s_time_gran = 1000000000;
+
+ err = set(s, data);
+ BUG_ON(err);
+
+ s->s_type = type;
+ strlcpy(s->s_id, type->name, sizeof(s->s_id));
+ list_add_tail(&s->s_list, &super_blocks);
+ list_add(&s->s_instances, &type->fs_supers);
+ __module_get(type->owner);
+ return s;
+}
+
+int set_anon_super(struct super_block *s, void *data)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+int get_sb_pseudo(struct file_system_type *fs_type, char *name,
+ const struct super_operations *ops, unsigned long magic,
+ struct vfsmount *mnt)
+{
+ struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL);
+ struct super_operations default_ops = {};
+ struct inode *root = NULL;
+ struct dentry *dentry = NULL;
+ struct qstr d_name = {.name = name, .len = strlen(name)};
+
+ BUG_ON(IS_ERR(s));
+
+ s->s_flags = MS_NOUSER;
+ s->s_maxbytes = ~0ULL;
+ s->s_blocksize = 1024;
+ s->s_blocksize_bits = 10;
+ s->s_magic = magic;
+ s->s_op = ops ? ops : &default_ops;
+ s->s_time_gran = 1;
+ root = new_inode(s);
+
+ BUG_ON(!root);
+
+ root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR;
+ root->i_uid = root->i_gid = 0;
+#if 0
+ root->i_atime = root->i_mtime = root->i_ctime = CURRENT_TIME;
+ dentry = d_alloc(NULL, &d_name);
+ dentry->d_sb = s;
+ dentry->d_parent = dentry;
+ d_instantiate(dentry, root);
+#endif
+ s->s_root = dentry;
+ s->s_flags |= MS_ACTIVE;
+
+ mnt->mnt_sb = s;
+ mnt->mnt_root = dget(s->s_root);
+
+ DEBUG_MSG("root mnt sb @ %p", mnt->mnt_sb);
+
+ return 0;
+}
+
+void inode_init_once(struct inode *inode)
+{
+ WARN_UNIMPL;
+}
+
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/irq.c.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/irq.c.svn-base
new file mode 100644
index 00000000..0e565e54
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/irq.c.svn-base
@@ -0,0 +1,247 @@
+/*
+ * \brief Hardware-interrupt support
+ * \author Christian Helmuth <ch12@os.inf.tu-dresden.de>
+ * \date 2007-02-12
+ *
+ *
+ *
+ * XXX Consider support for IRQ_HANDLED and friends (linux/irqreturn.h)
+ */
+
+/* Linux */
+#include <linux/interrupt.h>
+#include <linux/string.h> /* memset() */
+
+/* DDEKit */
+#include <l4/dde/ddekit/interrupt.h>
+#include <l4/dde/ddekit/memory.h>
+
+/* local */
+#include "dde26.h"
+#include "local.h"
+
+/* dummy */
+irq_cpustat_t irq_stat[CONFIG_NR_CPUS];
+
+/**
+ * IRQ handling data
+ */
+static struct dde_irq
+{
+ unsigned irq; /* IRQ number */
+ unsigned count; /* usage count */
+ int shared; /* shared IRQ */
+ struct ddekit_thread *thread; /* DDEKit interrupt thread */
+ struct irqaction *action; /* Linux IRQ action */
+
+ struct dde_irq *next; /* next DDE IRQ */
+} *used_irqs;
+
+
+static void irq_thread_init(void *p) {
+ l4dde26_process_add_worker(); }
+
+
+extern ddekit_sem_t *dde_softirq_sem;
+static void irq_handler(void *arg)
+{
+ struct dde_irq *irq = arg;
+ struct irqaction *action;
+
+#if 0
+ DEBUG_MSG("irq 0x%x", irq->irq);
+#endif
+ /* interrupt occurred - call all handlers */
+ for (action = irq->action; action; action = action->next) {
+ irqreturn_t r = action->handler(action->irq, action->dev_id);
+#if 0
+ DEBUG_MSG("return: %s", r == IRQ_HANDLED ? "IRQ_HANDLED" : r == IRQ_NONE ? "IRQ_NONE" : "??");
+#endif
+ }
+
+ /* upon return we check for pending soft irqs */
+ if (local_softirq_pending())
+ ddekit_sem_up(dde_softirq_sem);
+}
+
+
+/*****************************
+ ** IRQ handler bookkeeping **
+ *****************************/
+
+/**
+ * Claim IRQ
+ *
+ * \return usage counter or negative error code
+ *
+ * FIXME list locking
+ * FIXME are there more races?
+ */
+static int claim_irq(struct irqaction *action)
+{
+ int shared = action->flags & IRQF_SHARED ? 1 : 0;
+ struct dde_irq *irq;
+
+ /* check if IRQ already used */
+ for (irq = used_irqs; irq; irq = irq->next)
+ if (irq->irq == action->irq) break;
+
+ /* we have to setup IRQ handling */
+ if (!irq) {
+ /* allocate and initalize new descriptor */
+ irq = ddekit_simple_malloc(sizeof(*irq));
+ if (!irq) return -ENOMEM;
+ memset(irq, 0, sizeof(*irq));
+
+ irq->irq = action->irq;
+ irq->shared = shared;
+ irq->next = used_irqs;
+ used_irqs = irq;
+
+ /* attach to interrupt */
+ irq->thread = ddekit_interrupt_attach(irq->irq,
+ irq->shared,
+ irq_thread_init,
+ irq_handler,
+ (void *)irq);
+ if (!irq->thread) {
+ ddekit_simple_free(irq);
+ return -EBUSY;
+ }
+ }
+
+ /* does desciptor allow our new handler? */
+ if ((!irq->shared || !shared) && irq->action) return -EBUSY;
+
+ /* add handler */
+ irq->count++;
+ action->next = irq->action;
+ irq->action = action;
+
+ return irq->count;
+}
+
+
+/**
+ * Free previously claimed IRQ
+ *
+ * \return usage counter or negative error code
+ */
+static struct irqaction *release_irq(unsigned irq_num, void *dev_id)
+{
+ struct dde_irq *prev_irq, *irq;
+
+ /* check if IRQ already used */
+ for (prev_irq = 0, irq = used_irqs; irq;
+ prev_irq = irq, irq = irq->next)
+ if (irq->irq == irq_num) break;
+
+ if (!irq) return 0;
+
+ struct irqaction *prev_action, *action;
+
+ for (prev_action = 0, action = irq->action; action;
+ prev_action = action, action = action->next)
+ if (action->dev_id == dev_id) break;
+
+ if (!action) return 0;
+
+ /* dequeue action from irq */
+ if (prev_action)
+ prev_action->next = action->next;
+ else
+ irq->action = action->next;
+
+ /* dequeue irq from used_irqs list and free structure,
+ if no more actions available */
+ if (!irq->action) {
+ if (prev_irq)
+ prev_irq->next = irq->next;
+ else
+ used_irqs = irq->next;
+
+ /* detach from interrupt */
+ ddekit_interrupt_detach(irq->irq);
+
+ ddekit_simple_free(irq);
+ }
+
+ return action;
+}
+
+
+/***************
+ ** Linux API **
+ ***************/
+
+/**
+ * Request interrupt
+ *
+ * \param irq interrupt number
+ * \param handler interrupt handler -> top half
+ * \param flags interrupt handling flags (SA_SHIRQ, ...)
+ * \param dev_name device name
+ * \param dev_id cookie passed back to handler
+ *
+ * \return 0 on success; error code otherwise
+ *
+ * \todo FIXME consider locking!
+ */
+int request_irq(unsigned int irq, irq_handler_t handler,
+ unsigned long flags, const char *dev_name, void *dev_id)
+{
+ if (!handler) return -EINVAL;
+
+ /* facilitate Linux irqaction for this handler */
+ struct irqaction *irq_action = ddekit_simple_malloc(sizeof(*irq_action));
+ if (!irq_action) return -ENOMEM;
+ memset(irq_action, 0, sizeof(*irq_action));
+
+ irq_action->handler = handler;
+ irq_action->flags = flags;
+ irq_action->name = dev_name;
+ irq_action->dev_id = dev_id;
+ irq_action->irq = irq;
+
+ /* attach to IRQ */
+ int err = claim_irq(irq_action);
+ if (err < 0) return err;
+
+ return 0;
+}
+
+/** Release Interrupt
+ * \ingroup mod_irq
+ *
+ * \param irq interrupt number
+ * \param dev_id cookie passed back to handler
+ *
+ */
+void free_irq(unsigned int irq, void *dev_id)
+{
+ struct irqaction *irq_action = release_irq(irq, dev_id);
+
+ if (irq_action)
+ ddekit_simple_free(irq_action);
+}
+
+void disable_irq(unsigned int irq)
+{
+ ddekit_interrupt_disable(irq);
+}
+
+void disable_irq_nosync(unsigned int irq)
+{
+ /*
+ * Note:
+ * In contrast to the _nosync semantics, DDEKit's
+ * disable definitely waits until a currently executed
+ * IRQ handler terminates.
+ */
+ ddekit_interrupt_disable(irq);
+}
+
+void enable_irq(unsigned int irq)
+{
+ ddekit_interrupt_enable(irq);
+}
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/kmalloc.c.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/kmalloc.c.svn-base
new file mode 100644
index 00000000..065c13c7
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/kmalloc.c.svn-base
@@ -0,0 +1,199 @@
+/*
+ * \brief kmalloc() implementation
+ * \author Christian Helmuth <ch12@os.inf.tu-dresden.de>
+ * \date 2007-01-24
+ *
+ * In Linux 2.6 this resides in mm/slab.c.
+ *
+ * This implementation of kmalloc() stays with Linux's and uses kmem_caches for
+ * some power of two bytes. For larger allocations ddedkit_large_malloc() is
+ * used. This way, we optimize for speed and potentially waste memory
+ * resources.
+ */
+
+/* Linux */
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/bootmem.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/mm.h>
+#include <asm/io.h>
+
+/* DDEKit */
+#include <l4/dde/ddekit/debug.h>
+#include <l4/dde/ddekit/memory.h>
+
+#include <l4/dde/linux26/dde26.h>
+
+/* dummy */
+int forbid_dac;
+
+/* This stuff is needed by some drivers, e.g. for ethtool.
+ * XXX: This is a fake, implement it if you really need ethtool stuff.
+ */
+struct page* mem_map = NULL;
+static bootmem_data_t contig_bootmem_data;
+struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
+
+int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+ return 0;
+}
+EXPORT_SYMBOL(remap_pfn_range);
+
+/*******************
+ ** Configuration **
+ *******************/
+
+#define DEBUG_MALLOC 0
+
+/********************
+ ** Implementation **
+ ********************/
+
+/*
+ * These are the default caches for kmalloc. Custom caches can have other sizes.
+ */
+static struct cache_sizes malloc_sizes[] = {
+#define CACHE(x) { .cs_size = (x) },
+#include <linux/kmalloc_sizes.h>
+ CACHE(ULONG_MAX)
+#undef CACHE
+};
+
+
+/*
+ * kmalloc() cache names
+ */
+static const char *malloc_names[] = {
+#define CACHE(x) "size-" #x,
+#include <linux/kmalloc_sizes.h>
+ NULL
+#undef CACHE
+};
+
+
+/**
+ * Find kmalloc() cache for size
+ */
+static struct kmem_cache *find_cache(size_t size)
+{
+ struct cache_sizes *sizes;
+
+ for (sizes = malloc_sizes; size > sizes->cs_size; ++sizes) ;
+
+ return sizes->cs_cachep;
+}
+
+
+/**
+ * Free previously allocated memory
+ * @objp: pointer returned by kmalloc.
+ *
+ * If @objp is NULL, no operation is performed.
+ *
+ * Don't free memory not originally allocated by kmalloc()
+ * or you will run into trouble.
+ */
+void kfree(const void *objp)
+{
+ if (!objp) return;
+
+ /* find cache back-pointer */
+ void **p = (void **)objp - 1;
+
+ ddekit_log(DEBUG_MALLOC, "objp=%p cache=%p (%d)",
+ p, *p, *p ? kmem_cache_size(*p) : 0);
+
+ if (*p)
+ /* free from cache */
+ kmem_cache_free(*p, p);
+ else
+ /* no cache for this size - use ddekit free */
+ ddekit_large_free(p);
+}
+
+
+/**
+ * Allocate memory
+ * @size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate.
+ *
+ * kmalloc is the normal method of allocating memory
+ * in the kernel.
+ */
+void *__kmalloc(size_t size, gfp_t flags)
+{
+ /* add space for back-pointer */
+ size += sizeof(void *);
+
+ /* find appropriate cache */
+ struct kmem_cache *cache = find_cache(size);
+
+ void **p;
+ if (cache)
+ /* allocate from cache */
+ p = kmem_cache_alloc(cache, flags);
+ else
+ /* no cache for this size - use ddekit malloc */
+ p = ddekit_large_malloc(size);
+
+ ddekit_log(DEBUG_MALLOC, "size=%d, cache=%p (%d) => %p",
+ size, cache, cache ? kmem_cache_size(cache) : 0, p);
+
+ /* return pointer to actual chunk */
+ if (p) {
+ *p = cache;
+ p++;
+ }
+ return p;
+}
+
+
+size_t ksize(const void *p)
+{
+ struct kmem_cache *cache = (struct kmem_cache *)*((void**)p - 1);
+ if (cache)
+ return kmem_cache_size(cache);
+ return -1;
+}
+
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ void *ret = (void *)__get_free_pages(flag, get_order(size));
+
+ if (ret != NULL) {
+ memset(ret, 0, size);
+ *dma_handle = virt_to_bus(ret);
+ }
+ return ret;
+}
+
+
+void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ free_pages((unsigned long)vaddr, get_order(size));
+}
+
+
+/********************
+ ** Initialization **
+ ********************/
+
+/**
+ * dde_linux kmalloc initialization
+ */
+void l4dde26_kmalloc_init(void)
+{
+ struct cache_sizes *sizes = malloc_sizes;
+ const char **names = malloc_names;
+
+ /* init malloc sizes array */
+ for (; sizes->cs_size != ULONG_MAX; ++sizes, ++names)
+ sizes->cs_cachep = kmem_cache_create(*names, sizes->cs_size, 0, 0, 0);
+}
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/kmem_cache.c.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/kmem_cache.c.svn-base
new file mode 100644
index 00000000..1465ac6c
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/kmem_cache.c.svn-base
@@ -0,0 +1,213 @@
+/*
+ * \brief Kmem_cache implementation
+ * \author Christian Helmuth
+ * \date 2007-01-22
+ *
+ * In Linux 2.6 this resides in mm/slab.c.
+ *
+ * I'll disregard the following function currently...
+ *
+ * extern struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags);
+ * extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
+ */
+
+/* Linux */
+#include <linux/slab.h>
+
+/* DDEKit */
+#include <l4/dde/ddekit/memory.h>
+#include <l4/dde/ddekit/lock.h>
+
+
+/*******************
+ ** Configuration **
+ *******************/
+
+#define DEBUG_SLAB 0
+
+#if DEBUG_SLAB
+# define DEBUG_SLAB_ALLOC 1
+#else
+# define DEBUG_SLAB_ALLOC 0
+#endif
+
+/*
+ * Kmem cache structure
+ */
+struct kmem_cache
+{
+ const char *name; /**< cache name */
+ unsigned size; /**< obj size */
+
+ struct ddekit_slab *ddekit_slab_cache; /**< backing DDEKit cache */
+ ddekit_lock_t cache_lock; /**< lock */
+ void (*ctor)(void *); /**< object constructor */
+};
+
+
+/**
+ * Return size of objects in cache
+ */
+unsigned int kmem_cache_size(struct kmem_cache *cache)
+{
+ return cache->size;
+}
+
+
+/**
+ * Return name of cache
+ */
+const char *kmem_cache_name(struct kmem_cache *cache)
+{
+ return cache->name;
+}
+
+
+/**
+ * kmem_cache_shrink - Shrink a cache.
+ * @cachep: The cache to shrink.
+ *
+ * Releases as many slabs as possible for a cache.
+ * To help debugging, a zero exit status indicates all slabs were released.
+ */
+int kmem_cache_shrink(struct kmem_cache *cache)
+{
+ /* noop */
+ return 1;
+}
+
+
+/**
+ * kmem_cache_free - Deallocate an object
+ * @cachep: The cache the allocation was from.
+ * @objp: The previously allocated object.
+ *
+ * Free an object which was previously allocated from this
+ * cache.
+ */
+void kmem_cache_free(struct kmem_cache *cache, void *objp)
+{
+ ddekit_log(DEBUG_SLAB_ALLOC, "\"%s\" (%p)", cache->name, objp);
+
+ ddekit_lock_lock(&cache->cache_lock);
+ ddekit_slab_free(cache->ddekit_slab_cache, objp);
+ ddekit_lock_unlock(&cache->cache_lock);
+}
+
+
+/**
+ * kmem_cache_alloc - Allocate an object
+ * @cachep: The cache to allocate from.
+ * @flags: See kmalloc().
+ *
+ * Allocate an object from this cache. The flags are only relevant
+ * if the cache has no available objects.
+ */
+void *kmem_cache_alloc(struct kmem_cache *cache, gfp_t flags)
+{
+ void *ret;
+
+ ddekit_log(DEBUG_SLAB_ALLOC, "\"%s\" flags=%x", cache->name, flags);
+
+ ddekit_lock_lock(&cache->cache_lock);
+ ret = ddekit_slab_alloc(cache->ddekit_slab_cache);
+ ddekit_lock_unlock(&cache->cache_lock);
+
+ // XXX: is it valid to run ctor AND memset to zero?
+ if (flags & __GFP_ZERO)
+ memset(ret, 0, cache->size);
+ else if (cache->ctor)
+ cache->ctor(ret);
+
+ return ret;
+}
+
+
+/**
+ * kmem_cache_destroy - delete a cache
+ * @cachep: the cache to destroy
+ *
+ * Remove a struct kmem_cache object from the slab cache.
+ * Returns 0 on success.
+ *
+ * It is expected this function will be called by a module when it is
+ * unloaded. This will remove the cache completely, and avoid a duplicate
+ * cache being allocated each time a module is loaded and unloaded, if the
+ * module doesn't have persistent in-kernel storage across loads and unloads.
+ *
+ * The cache must be empty before calling this function.
+ *
+ * The caller must guarantee that noone will allocate memory from the cache
+ * during the kmem_cache_destroy().
+ */
+void kmem_cache_destroy(struct kmem_cache *cache)
+{
+ ddekit_log(DEBUG_SLAB, "\"%s\"", cache->name);
+
+ ddekit_slab_destroy(cache->ddekit_slab_cache);
+ ddekit_simple_free(cache);
+}
+
+
+/**
+ * kmem_cache_create - Create a cache.
+ * @name: A string which is used in /proc/slabinfo to identify this cache.
+ * @size: The size of objects to be created in this cache.
+ * @align: The required alignment for the objects.
+ * @flags: SLAB flags
+ * @ctor: A constructor for the objects.
+ *
+ * Returns a ptr to the cache on success, NULL on failure.
+ * Cannot be called within a int, but can be interrupted.
+ * The @ctor is run when new pages are allocated by the cache
+ * and the @dtor is run before the pages are handed back.
+ *
+ * @name must be valid until the cache is destroyed. This implies that
+ * the module calling this has to destroy the cache before getting unloaded.
+ *
+ * The flags are
+ *
+ * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
+ * to catch references to uninitialised memory.
+ *
+ * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
+ * for buffer overruns.
+ *
+ * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
+ * cacheline. This can be beneficial if you're counting cycles as closely
+ * as davem.
+ */
+struct kmem_cache * kmem_cache_create(const char *name, size_t size, size_t align,
+ unsigned long flags,
+ void (*ctor)(void *))
+{
+ ddekit_log(DEBUG_SLAB, "\"%s\" obj_size=%d", name, size);
+
+ struct kmem_cache *cache;
+
+ if (!name) {
+ printk("kmem_cache name reqeuired\n");
+ return 0;
+ }
+
+ cache = ddekit_simple_malloc(sizeof(*cache));
+ if (!cache) {
+ printk("No memory for slab cache\n");
+ return 0;
+ }
+
+ /* Initialize a physically contiguous cache for kmem */
+ if (!(cache->ddekit_slab_cache = ddekit_slab_init(size, 1))) {
+ printk("DDEKit slab init failed\n");
+ ddekit_simple_free(cache);
+ return 0;
+ }
+
+ cache->name = name;
+ cache->size = size;
+ cache->ctor = ctor;
+
+ ddekit_lock_init_unlocked(&cache->cache_lock);
+
+ return cache;
+}
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/local.h.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/local.h.svn-base
new file mode 100644
index 00000000..35b3e449
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/local.h.svn-base
@@ -0,0 +1,99 @@
+#ifndef __DDE26_LOCAL_H
+#define __DDE26_LOCAL_H
+
+#include <linux/sched.h>
+
+#include <l4/dde/ddekit/assert.h>
+#include <l4/dde/ddekit/condvar.h>
+#include <l4/dde/ddekit/debug.h>
+#include <l4/dde/ddekit/initcall.h>
+#include <l4/dde/ddekit/interrupt.h>
+#include <l4/dde/ddekit/lock.h>
+#include <l4/dde/ddekit/memory.h>
+#include <l4/dde/ddekit/panic.h>
+#include <l4/dde/ddekit/pci.h>
+#include <l4/dde/ddekit/pgtab.h>
+#include <l4/dde/ddekit/printf.h>
+#include <l4/dde/ddekit/resources.h>
+#include <l4/dde/ddekit/semaphore.h>
+#include <l4/dde/ddekit/thread.h>
+#include <l4/dde/ddekit/types.h>
+#include <l4/dde/ddekit/timer.h>
+
+#include <l4/dde/linux26/dde26.h>
+
+#define DDE_DEBUG 1
+#define DDE_FERRET 0
+
+/* Ferret Debugging stuff, note that this is the only point we are using
+ * L4 headers directly and only for debugging. */
+#if DDE_FERRET
+#include <l4/ferret/maj_min.h>
+#include <l4/ferret/client.h>
+#include <l4/ferret/clock.h>
+#include <l4/ferret/types.h>
+#include <l4/ferret/sensors/list_producer.h>
+#include <l4/ferret/sensors/list_producer_wrap.h>
+extern ferret_list_local_t *ferret_ore_sensor;
+#endif
+
+/***
+ * Internal representation of a Linux kernel thread. This struct
+ * contains Linux' data as well as some additional data used by DDE.
+ */
+typedef struct dde26_thread_data
+{
+ /* NOTE: _threadinfo needs to be first in this struct! */
+ struct thread_info _thread_info; ///< Linux thread info (see current())
+ ddekit_thread_t *_ddekit_thread; ///< underlying DDEKit thread
+ ddekit_sem_t *_sleep_lock; ///< lock used for sleep_interruptible()
+ struct pid _vpid; ///< virtual PID
+} dde26_thread_data;
+
+#define LX_THREAD(thread_data) ((thread_data)->_thread_info)
+#define LX_TASK(thread_data) ((thread_data)->_thread_info.task)
+#define DDEKIT_THREAD(thread_data) ((thread_data)->_ddekit_thread)
+#define SLEEP_LOCK(thread_data) ((thread_data)->_sleep_lock)
+#define VPID_P(thread_data) (&(thread_data)->_vpid)
+
+#if DDE_DEBUG
+#define WARN_UNIMPL printk("unimplemented: %s\n", __FUNCTION__)
+#define DEBUG_MSG(msg, ...) printk("%s: \033[36m"msg"\033[0m\n", __FUNCTION__, ##__VA_ARGS__)
+
+#define DECLARE_INITVAR(name) \
+ static struct { \
+ int _initialized; \
+ char *name; \
+ } init_##name = {0, #name,}
+
+#define INITIALIZE_INITVAR(name) init_##name._initialized = 1
+
+#define CHECK_INITVAR(name) \
+ if (init_##name._initialized == 0) { \
+ printk("DDE26: \033[31;1mUsing uninitialized subsystem: "#name"\033[0m\n"); \
+ BUG(); \
+ }
+
+#else /* !DDE_DEBUG */
+
+#define WARN_UNIMPL do {} while(0)
+#define DEBUG_MSG(...) do {} while(0)
+#define DECLARE_INITVAR(name)
+#define CHECK_INITVAR(name) do {} while(0)
+#define INITIALIZE_INITVAR(name) do {} while(0)
+
+#endif
+
+/* since _thread_info always comes first in the thread_data struct,
+ * we can derive the dde26_thread_data from a task struct by simply
+ * dereferencing its thread_info pointer
+ */
+static dde26_thread_data *lxtask_to_ddethread(struct task_struct *t)
+{
+ return (dde26_thread_data *)(task_thread_info(t));
+}
+
+extern struct thread_info init_thread;
+extern struct task_struct init_task;
+
+#endif
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/mm-helper.c.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/mm-helper.c.svn-base
new file mode 100644
index 00000000..68c0213b
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/mm-helper.c.svn-base
@@ -0,0 +1,45 @@
+/* Linux */
+#include <linux/gfp.h>
+#include <linux/string.h>
+#include <asm/page.h>
+
+/* DDEKit */
+#include <l4/dde/ddekit/memory.h>
+#include <l4/dde/ddekit/assert.h>
+#include <l4/dde/ddekit/panic.h>
+
+#include "local.h"
+
+int ioprio_best(unsigned short aprio, unsigned short bprio)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+void *__alloc_bootmem(unsigned long size, unsigned long align,
+ unsigned long goal)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+/*
+ * Stolen from linux-2.6.29/fs/libfs.c
+ */
+ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
+ const void *from, size_t available)
+{
+ loff_t pos = *ppos;
+ if (pos < 0)
+ return -EINVAL;
+ if (pos > available)
+ return 0;
+ if (count > available - pos)
+ count = available - pos;
+ memcpy(to, from + pos, count);
+ *ppos = pos + count;
+
+ return count;
+}
+
+int capable(int f) { return 1; }
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/net.c.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/net.c.svn-base
new file mode 100644
index 00000000..d6637d96
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/net.c.svn-base
@@ -0,0 +1,36 @@
+/******************************************************************************
+ * DDELinux networking utilities. *
+ * *
+ * Bjoern Doebel <doebel@tudos.org> *
+ * *
+ * (c) 2005 - 2007 Technische Universitaet Dresden *
+ * This file is part of DROPS, which is distributed under the terms of the *
+ * GNU General Public License 2. Please see the COPYING file for details. *
+ ******************************************************************************/
+
+#include <l4/dde/linux26/dde26_net.h>
+
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+
+#include "local.h"
+
+
+/* Callback function to be called if a network packet arrives and needs to
+ * be handled by netif_rx() or netif_receive_skb()
+ */
+linux_rx_callback l4dde26_rx_callback = NULL;
+
+
+/* Register a netif_rx callback function.
+ *
+ * \return pointer to old callback function
+ */
+linux_rx_callback l4dde26_register_rx_callback(linux_rx_callback cb)
+{
+ linux_rx_callback old = l4dde26_rx_callback;
+ l4dde26_rx_callback = cb;
+ DEBUG_MSG("New rx callback @ %p.", cb);
+
+ return old;
+}
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/page_alloc.c.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/page_alloc.c.svn-base
new file mode 100644
index 00000000..0a2e3fdf
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/page_alloc.c.svn-base
@@ -0,0 +1,281 @@
+/*
+ * \brief Page allocation
+ * \author Christian Helmuth <ch12@tudos.org>
+ * Bjoern Doebel <doebel@tudos.org>
+ * \date 2007-01-22
+ *
+ * In Linux 2.6 this resides in mm/page_alloc.c.
+ *
+ * This implementation is far from complete as it does not cover "struct page"
+ * emulation. In Linux, there's an array of structures for all pages. In
+ * particular, iteration works for this array like:
+ *
+ * struct page *p = alloc_pages(3); // p refers to first page of allocation
+ * ++p; // p refers to second page
+ *
+ * There may be more things to cover and we should have a deep look into the
+ * kernel parts we want to reuse. Candidates for problems may be file systems,
+ * storage (USB, IDE), and video (bttv).
+ */
+
+/* Linux */
+#include <linux/gfp.h>
+#include <linux/string.h>
+#include <linux/pagevec.h>
+#include <linux/mm.h>
+#include <asm/page.h>
+
+/* DDEKit */
+#include <l4/dde/ddekit/memory.h>
+#include <l4/dde/ddekit/assert.h>
+#include <l4/dde/ddekit/panic.h>
+
+#include "local.h"
+
+unsigned long max_low_pfn;
+unsigned long min_low_pfn;
+unsigned long max_pfn;
+
+/*******************
+ ** Configuration **
+ *******************/
+
+#define DEBUG_PAGE_ALLOC 0
+
+
+/*
+ * DDE page cache
+ *
+ * We need to store all pages somewhere (which in the Linux kernel is
+ * performed by the huge VM infrastructure. Purpose for us is:
+ * - make virt_to_phys() work
+ * - enable external clients to hand in memory (e.g., a dm_phys
+ * dataspace and make it accessible as Linux pages to the DDE)
+ */
+
+#define DDE_PAGE_CACHE_SHIFT 10
+#define DDE_PAGE_CACHE_SIZE (1 << DDE_PAGE_CACHE_SHIFT)
+#define DDE_PAGE_CACHE_MASK (DDE_PAGE_CACHE_SIZE - 1)
+
+typedef struct
+{
+ struct hlist_node list;
+ struct page *page;
+} page_cache_entry;
+
+static struct hlist_head dde_page_cache[DDE_PAGE_CACHE_SIZE];
+
+/** Hash function to map virtual addresses to page cache buckets. */
+#define VIRT_TO_PAGEHASH(a) ((((unsigned long)a) >> PAGE_SHIFT) & DDE_PAGE_CACHE_MASK)
+
+
+void dde_page_cache_add(struct page *p)
+{
+ unsigned int hashval = VIRT_TO_PAGEHASH(p->virtual);
+
+ page_cache_entry *e = kmalloc(sizeof(page_cache_entry), GFP_KERNEL);
+
+#if DEBUG_PAGE_ALLOC
+ DEBUG_MSG("virt %p, hash: %x", p->virtual, hashval);
+#endif
+
+ e->page = p;
+ INIT_HLIST_NODE(&e->list);
+
+ hlist_add_head(&e->list, &dde_page_cache[hashval]);
+}
+
+
+void dde_page_cache_remove(struct page *p)
+{
+ unsigned int hashval = VIRT_TO_PAGEHASH(p->virtual);
+ struct hlist_node *hn = NULL;
+ struct hlist_head *h = &dde_page_cache[hashval];
+ page_cache_entry *e = NULL;
+ struct hlist_node *v = NULL;
+
+ hlist_for_each_entry(e, hn, h, list) {
+ if ((unsigned long)e->page->virtual == ((unsigned long)p->virtual & PAGE_MASK))
+ v = hn;
+ break;
+ }
+
+ if (v) {
+#if DEBUG_PAGE_ALLOC
+ DEBUG_MSG("deleting node %p which contained page %p", v, p);
+#endif
+ hlist_del(v);
+ }
+}
+
+
+struct page* dde_page_lookup(unsigned long va)
+{
+ unsigned int hashval = VIRT_TO_PAGEHASH(va);
+
+ struct hlist_node *hn = NULL;
+ struct hlist_head *h = &dde_page_cache[hashval];
+ page_cache_entry *e = NULL;
+
+ hlist_for_each_entry(e, hn, h, list) {
+ if ((unsigned long)e->page->virtual == (va & PAGE_MASK))
+ return e->page;
+ }
+
+ return NULL;
+}
+
+
+struct page * __alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
+ struct zonelist *zonelist, nodemask_t *nm)
+{
+ /* XXX: In fact, according to order, we should have one struct page
+ * for every page, not only for the first one.
+ */
+ struct page *ret = kmalloc(sizeof(*ret), GFP_KERNEL);
+
+ ret->virtual = (void *)__get_free_pages(gfp_mask, order);
+ dde_page_cache_add(ret);
+
+ return ret;
+}
+
+
+unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
+{
+ ddekit_log(DEBUG_PAGE_ALLOC, "gfp_mask=%x order=%d (%d bytes)",
+ gfp_mask, order, PAGE_SIZE << order);
+
+ Assert(gfp_mask != GFP_DMA);
+ void *p = ddekit_large_malloc(PAGE_SIZE << order);
+
+ return (unsigned long)p;
+}
+
+
+unsigned long get_zeroed_page(gfp_t gfp_mask)
+{
+ unsigned long p = __get_free_pages(gfp_mask, 0);
+
+ if (p) memset((void *)p, 0, PAGE_SIZE);
+
+ return (unsigned long)p;
+}
+
+
+void free_hot_page(struct page *page)
+{
+ WARN_UNIMPL;
+}
+
+/*
+ * XXX: If alloc_pages() gets fixed to allocate a page struct per page,
+ * this needs to be adapted, too.
+ */
+void __free_pages(struct page *page, unsigned int order)
+{
+ free_pages((unsigned long)page->virtual, order);
+ dde_page_cache_remove(page);
+}
+
+void __pagevec_free(struct pagevec *pvec)
+{
+ WARN_UNIMPL;
+}
+
+int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int len, int write, int force,
+ struct page **pages, struct vm_area_struct **vmas)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+/**
+ * ...
+ *
+ * XXX order may be larger than allocation at 'addr' - it may comprise several
+ * allocation via __get_free_pages()!
+ */
+void free_pages(unsigned long addr, unsigned int order)
+{
+ ddekit_log(DEBUG_PAGE_ALLOC, "addr=%p order=%d", (void *)addr, order);
+
+ ddekit_large_free((void *)addr);
+}
+
+
+unsigned long __pa(volatile void *addr)
+{
+ return ddekit_pgtab_get_physaddr((void*)addr);
+}
+
+void *__va(unsigned long addr)
+{
+ return (void*)ddekit_pgtab_get_virtaddr((ddekit_addr_t) addr);
+}
+
+
+int set_page_dirty_lock(struct page *page)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+
+/*
+ * basically copied from linux/mm/page_alloc.c
+ */
+void *__init alloc_large_system_hash(const char *tablename,
+ unsigned long bucketsize,
+ unsigned long numentries,
+ int scale,
+ int flags,
+ unsigned int *_hash_shift,
+ unsigned int *_hash_mask,
+ unsigned long limit)
+{
+ void * table = NULL;
+ unsigned long log2qty;
+ unsigned long size;
+
+ if (numentries == 0)
+ numentries = 1024;
+
+ log2qty = ilog2(numentries);
+ size = bucketsize << log2qty;
+
+ do {
+ unsigned long order;
+ for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++);
+ table = (void*) __get_free_pages(GFP_ATOMIC, order);
+ } while (!table && size > PAGE_SIZE && --log2qty);
+
+ if (!table)
+ panic("Failed to allocate %s hash table\n", tablename);
+
+ printk("%s hash table entries: %d (order: %d, %lu bytes)\n",
+ tablename,
+ (1U << log2qty),
+ ilog2(size) - PAGE_SHIFT,
+ size);
+
+ if (_hash_shift)
+ *_hash_shift = log2qty;
+ if (_hash_mask)
+ *_hash_mask = (1 << log2qty) - 1;
+
+ return table;
+}
+
+
+static void __init dde_page_cache_init(void)
+{
+ printk("Initializing DDE page cache\n");
+ int i=0;
+
+ for (i; i < DDE_PAGE_CACHE_SIZE; ++i)
+ INIT_HLIST_HEAD(&dde_page_cache[i]);
+}
+
+core_initcall(dde_page_cache_init);
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/param.c.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/param.c.svn-base
new file mode 100644
index 00000000..5bd83f32
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/param.c.svn-base
@@ -0,0 +1,32 @@
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+/* Lazy bastard, eh? */
+#define STANDARD_PARAM_DEF(name, type, format, tmptype, strtolfn) \
+ int param_set_##name(const char *val, struct kernel_param *kp) \
+ { \
+ return 0; \
+ } \
+ int param_get_##name(char *buffer, struct kernel_param *kp) \
+ { \
+ return 0;\
+ }
+
+STANDARD_PARAM_DEF(byte, unsigned char, "%c", unsigned long, simple_strtoul);
+STANDARD_PARAM_DEF(short, short, "%hi", long, simple_strtol);
+STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, simple_strtoul);
+STANDARD_PARAM_DEF(int, int, "%i", long, simple_strtol);
+STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, simple_strtoul);
+STANDARD_PARAM_DEF(long, long, "%li", long, simple_strtol);
+STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, simple_strtoul);
+
+int printk_ratelimit(void)
+{
+ return 0;
+}
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/pci.c.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/pci.c.svn-base
new file mode 100644
index 00000000..2a0391f2
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/pci.c.svn-base
@@ -0,0 +1,189 @@
+#include "local.h"
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/init.h>
+
+/* will include $(CONTRIB)/drivers/pci/pci.h */
+#include "pci.h"
+
+DECLARE_INITVAR(dde26_pci);
+
+/** PCI device descriptor */
+typedef struct l4dde_pci_dev {
+ struct list_head next; /**< chain info */
+ struct ddekit_pci_dev *ddekit_dev; /**< corresponding DDEKit descriptor */
+ struct pci_dev *linux_dev; /**< Linux descriptor */
+} l4dde_pci_dev_t;
+
+
+/*******************************************************************************************
+ ** PCI data **
+ *******************************************************************************************/
+/** List of Linux-DDEKit PCIDev mappings */
+static LIST_HEAD(pcidev_mappings);
+
+/** PCI bus */
+static struct pci_bus *pci_bus = NULL;
+
+static int l4dde26_pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
+static int l4dde26_pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
+
+/** PCI operations for our virtual PCI bus */
+static struct pci_ops dde_pcibus_ops = {
+ .read = l4dde26_pci_read,
+ .write = l4dde26_pci_write,
+};
+
+
+/*******************************************************************************************
+ ** Read/write PCI config space. This is simply mapped to the DDEKit functions. **
+ *******************************************************************************************/
+static int l4dde26_pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val)
+{
+ return ddekit_pci_read(bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
+}
+
+static int l4dde26_pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val)
+{
+ return ddekit_pci_write(bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
+}
+
+int pci_irq_enable(struct pci_dev *dev)
+{
+ int irq = dev->irq;
+ int pin = 0;
+ int ret;
+
+ DEBUG_MSG("dev %p", dev);
+ if (!dev)
+ return -EINVAL;
+
+ pin = (int)dev->pin;
+ DEBUG_MSG("irq %d, pin %d", dev->irq, dev->pin);
+ if (!pin) {
+ dev_warn(&dev->dev,
+ "No interrupt pin configured for device %s\n",
+ pci_name(dev));
+ return 0;
+ }
+ pin--;
+
+ ret = ddekit_pci_irq_enable(dev->bus->number, PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn), pin, &irq);
+ if (ret) {
+ dev_warn(&dev->dev, "Interrupt enable failed for device %s (%d)\n",
+ pci_name(dev), ret);
+ return -1;
+ }
+
+ dev_info(&dev->dev, "PCI INT %c -> GSI %d -> IRQ %d\n",
+ 'A' + pin, irq, dev->irq);
+
+ dev->irq = irq;
+ return 0;
+}
+
+int __pci_enable_device(struct pci_dev *dev)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+
+/**
+ * pci_enable_device - Initialize device before it's used by a driver.
+ *
+ * Initialize device before it's used by a driver. Ask low-level code
+ * to enable I/O and memory. Wake up the device if it was suspended.
+ * Beware, this function can fail.
+ *
+ * \param dev PCI device to be initialized
+ *
+ */
+int
+pci_enable_device(struct pci_dev *dev)
+{
+ CHECK_INITVAR(dde26_pci);
+// WARN_UNIMPL;
+ return pci_irq_enable(dev);
+}
+
+
+/**
+ * pci_disable_device - Disable PCI device after use
+ *
+ * Signal to the system that the PCI device is not in use by the system
+ * anymore. This only involves disabling PCI bus-mastering, if active.
+ *
+ * \param dev PCI device to be disabled
+ */
+void pci_disable_device(struct pci_dev *dev)
+{
+ CHECK_INITVAR(dde26_pci);
+ WARN_UNIMPL;
+}
+
+
+void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
+{
+ //WARN_UNIMPL;
+}
+
+void pci_set_master(struct pci_dev *dev)
+{
+ CHECK_INITVAR(dde26_pci);
+ WARN_UNIMPL;
+}
+
+
+int pci_create_sysfs_dev_files(struct pci_dev *pdev)
+{
+ return 0;
+}
+
+unsigned int pcibios_assign_all_busses(void)
+{
+ return 1;
+}
+
+void
+pcibios_align_resource(void *data, struct resource *res,
+ resource_size_t size, resource_size_t align)
+{
+ WARN_UNIMPL;
+}
+
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+#if 0
+ int err;
+
+ if ((err = pcibios_enable_resources(dev, mask)) < 0)
+ return err;
+
+ return pcibios_enable_irq(dev);
+#endif
+ return 0;
+}
+
+/*******************************************************************************************
+ ** Initialization function **
+ *******************************************************************************************/
+
+/** Initialize DDELinux PCI subsystem.
+ */
+void __init l4dde26_init_pci(void)
+{
+ ddekit_pci_init();
+
+ pci_bus = pci_create_bus(NULL, 0, &dde_pcibus_ops, NULL);
+ Assert(pci_bus);
+
+ pci_do_scan_bus(pci_bus);
+
+ INITIALIZE_INITVAR(dde26_pci);
+}
+
+arch_initcall(l4dde26_init_pci);
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/power.c.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/power.c.svn-base
new file mode 100644
index 00000000..e36487bd
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/power.c.svn-base
@@ -0,0 +1,23 @@
+/* Dummy functions for power management. */
+
+#include "local.h"
+#include <linux/device.h>
+
+int device_pm_add(struct device * dev)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+
+void device_pm_remove(struct device * dev)
+{
+ WARN_UNIMPL;
+}
+
+int pm_qos_add_requirement(int qos, char *name, s32 value) { return 0; }
+int pm_qos_update_requirement(int qos, char *name, s32 new_value) { return 0; }
+void pm_qos_remove_requirement(int qos, char *name) { }
+int pm_qos_requirement(int qos) { return 0; }
+int pm_qos_add_notifier(int qos, struct notifier_block *notifier) { return 0; }
+int pm_qos_remove_notifier(int qos, struct notifier_block *notifier) { return 0; }
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/process.c.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/process.c.svn-base
new file mode 100644
index 00000000..5fe43b32
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/process.c.svn-base
@@ -0,0 +1,347 @@
+#include <l4/dde/dde.h>
+#include <l4/dde/linux26/dde26.h>
+
+#include <asm/atomic.h>
+
+#include <linux/init_task.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/thread_info.h>
+#include <linux/sched.h>
+#include <linux/pid.h>
+#include <linux/vmalloc.h>
+
+#include "local.h"
+
+/*****************************************************************************
+ ** Current() implementation **
+ *****************************************************************************/
+struct thread_info *current_thread_info(void)
+{
+ dde26_thread_data *cur = (dde26_thread_data *)ddekit_thread_get_my_data();
+ return &LX_THREAD(cur);
+}
+
+struct task_struct *get_current(void)
+{
+ return current_thread_info()->task;
+}
+
+/*****************************************************************************
+ ** PID-related stuff **
+ ** **
+ ** Linux manages lists of PIDs that are handed out to processes so that at **
+ ** a later point it is able to determine which task_struct belongs to a **
+ ** certain PID. We implement this with a single list holding the mappings **
+ ** for all our threads. **
+ *****************************************************************************/
+
+LIST_HEAD(_pid_task_list);
+ddekit_lock_t _pid_task_list_lock;
+
+/** PID to task_struct mapping */
+struct pid2task
+{
+ struct list_head list; /**< list data */
+ struct pid *pid; /**< PID */
+ struct task_struct *ts; /**< task struct */
+};
+
+struct pid init_struct_pid = INIT_STRUCT_PID;
+
+void put_pid(struct pid *pid)
+{
+ if (pid)
+ atomic_dec(&pid->count);
+ // no freeing here, our struct pid's are always allocated as
+ // part of the dde26_thread_data
+}
+
+/** Attach PID to a certain task struct. */
+void attach_pid(struct task_struct *task, enum pid_type type
+ __attribute__((unused)), struct pid *pid)
+{
+ /* Initialize a new pid2task mapping */
+ struct pid2task *pt = kmalloc(sizeof(struct pid2task), GFP_KERNEL);
+ pt->pid = get_pid(pid);
+ pt->ts = task;
+
+ /* add to list */
+ ddekit_lock_lock(&_pid_task_list_lock);
+ list_add(&pt->list, &_pid_task_list);
+ ddekit_lock_unlock(&_pid_task_list_lock);
+}
+
+/** Detach PID from a task struct. */
+void detach_pid(struct task_struct *task, enum pid_type type __attribute__((unused)))
+{
+ struct list_head *p, *n, *h;
+
+ h = &_pid_task_list;
+
+ ddekit_lock_lock(&_pid_task_list_lock);
+ /* search for mapping with given task struct and free it if necessary */
+ list_for_each_safe(p, n, h) {
+ struct pid2task *pt = list_entry(p, struct pid2task, list);
+ if (pt->ts == task) {
+ put_pid(pt->pid);
+ list_del(p);
+ kfree(pt);
+ break;
+ }
+ }
+ ddekit_lock_unlock(&_pid_task_list_lock);
+}
+
+struct task_struct *find_task_by_pid_type(int type, int nr)
+{
+ struct list_head *h, *p;
+ h = &_pid_task_list;
+
+ ddekit_lock_lock(&_pid_task_list_lock);
+ list_for_each(p, h) {
+ struct pid2task *pt = list_entry(p, struct pid2task, list);
+ if (pid_nr(pt->pid) == nr) {
+ ddekit_lock_unlock(&_pid_task_list_lock);
+ return pt->ts;
+ }
+ }
+ ddekit_lock_unlock(&_pid_task_list_lock);
+
+ return NULL;
+}
+
+
+struct task_struct *find_task_by_pid_ns(int nr, struct pid_namespace *ns)
+{
+ /* we don't implement PID name spaces */
+ return find_task_by_pid_type(0, nr);
+}
+
+struct task_struct *find_task_by_pid(int nr)
+{
+ return find_task_by_pid_type(0, nr);
+}
+
+/*****************************************************************************
+ ** kernel_thread() implementation **
+ *****************************************************************************/
+/* Struct containing thread data for a newly created kthread. */
+struct __kthread_data
+{
+ int (*fn)(void *);
+ void *arg;
+ ddekit_lock_t lock;
+ dde26_thread_data *kthread;
+};
+
+/** Counter for running kthreads. It is used to create unique names
+ * for kthreads.
+ */
+static atomic_t kthread_count = ATOMIC_INIT(0);
+
+/** Entry point for new kernel threads. Make this thread a DDE26
+ * worker and then execute the real thread fn.
+ */
+static void __kthread_helper(void *arg)
+{
+ struct __kthread_data *k = (struct __kthread_data *)arg;
+
+ /*
+ * Make a copy of the fn and arg pointers, as the kthread struct is
+ * deleted by our parent after notifying it and this may happen before we
+ * get to execute the function.
+ */
+ int (*_fn)(void*) = k->fn;
+ void *_arg = k->arg;
+
+ l4dde26_process_add_worker();
+
+ /*
+ * Handshake with creator - we store our thread data in the
+ * kthread struct and then unlock the lock to notify our
+ * creator about completing setup
+ */
+ k->kthread = (dde26_thread_data *)ddekit_thread_get_my_data();
+ ddekit_lock_unlock(&k->lock);
+
+ do_exit(_fn(_arg));
+}
+
+/** Our implementation of Linux' kernel_thread() function. Setup a new
+ * thread running our __kthread_helper() function.
+ */
+int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+{
+ ddekit_thread_t *t;
+ char name[20];
+ struct __kthread_data *kt = vmalloc(sizeof(struct __kthread_data));
+ ddekit_lock_t lock;
+
+ /* Initialize (and grab) handshake lock */
+ ddekit_lock_init(&lock);
+ ddekit_lock_lock(&lock);
+
+ int threadnum = atomic_inc_return(&kthread_count);
+ kt->fn = fn;
+ kt->arg = arg;
+ kt->lock = lock; // Copy lock ptr, note that kt is freed by the
+ // new thread, so we MUST NOT use kt->lock after
+ // this point!
+
+ snprintf(name, 20, ".kthread%x", threadnum);
+ t = ddekit_thread_create(__kthread_helper,
+ (void *)kt, name);
+ Assert(t);
+
+ ddekit_lock_lock(&lock);
+ ddekit_lock_deinit(&lock);
+
+ return pid_nr(VPID_P(kt->kthread));
+}
+
+/** Our implementation of exit(). For DDE purposes this only relates
+ * to kernel threads.
+ */
+void do_exit(long code)
+{
+ ddekit_thread_t *t = DDEKIT_THREAD(lxtask_to_ddethread(current));
+// printk("Thread %s exits with code %x\n", ddekit_thread_get_name(t), code);
+
+ /* do some cleanup */
+ detach_pid(current, 0);
+
+ /* goodbye, cruel world... */
+ ddekit_thread_exit();
+}
+
+/*****************************************************************************
+ ** Misc functions **
+ *****************************************************************************/
+
+void dump_stack(void)
+{
+}
+
+
+char *get_task_comm(char *buf, struct task_struct *tsk)
+{
+ char *ret;
+ /* buf must be at least sizeof(tsk->comm) in size */
+ task_lock(tsk);
+ ret = strncpy(buf, tsk->comm, sizeof(tsk->comm));
+ task_unlock(tsk);
+ return ret;
+}
+
+
+void set_task_comm(struct task_struct *tsk, char *buf)
+{
+ task_lock(tsk);
+ strlcpy(tsk->comm, buf, sizeof(tsk->comm));
+ task_unlock(tsk);
+}
+
+
+/*****************************************************************************
+ ** DDEKit gluecode, init functions **
+ *****************************************************************************/
+/* Initialize a dde26 thread.
+ *
+ * - Allocate thread data, as well as a Linux task struct,
+ * - Fill in default values for thread_info, and task,
+ * - Adapt task struct's thread_info backreference
+ * - Initialize the DDE sleep lock
+ */
+static dde26_thread_data *init_dde26_thread(void)
+{
+ /*
+ * Virtual PID counter
+ */
+ static atomic_t pid_counter = ATOMIC_INIT(0);
+ dde26_thread_data *t = vmalloc(sizeof(dde26_thread_data));
+ Assert(t);
+
+ memcpy(&t->_vpid, &init_struct_pid, sizeof(struct pid));
+ t->_vpid.numbers[0].nr = atomic_inc_return(&pid_counter);
+
+ memcpy(&LX_THREAD(t), &init_thread, sizeof(struct thread_info));
+
+ LX_TASK(t) = vmalloc(sizeof(struct task_struct));
+ Assert(LX_TASK(t));
+
+ memcpy(LX_TASK(t), &init_task, sizeof(struct task_struct));
+
+ /* nice: Linux backreferences a task`s thread_info from the
+ * task struct (which in turn can be found using the
+ * thread_info...) */
+ LX_TASK(t)->stack = &LX_THREAD(t);
+
+ /* initialize this thread's sleep lock */
+ SLEEP_LOCK(t) = ddekit_sem_init(0);
+
+ return t;
+}
+
+/* Process setup for worker threads */
+int l4dde26_process_add_worker(void)
+{
+ dde26_thread_data *cur = init_dde26_thread();
+
+ /* If this function is called for a kernel_thread, the thread already has
+ * been set up and we just need to store a reference to the ddekit struct.
+ * However, this function may also be called directly to turn an L4 thread
+ * into a DDE thread. Then, we need to initialize here. */
+ cur->_ddekit_thread = ddekit_thread_myself();
+ if (cur->_ddekit_thread == NULL)
+ cur->_ddekit_thread = ddekit_thread_setup_myself(".dde26_thread");
+ Assert(cur->_ddekit_thread);
+
+ ddekit_thread_set_my_data(cur);
+
+ attach_pid(LX_TASK(cur), 0, &cur->_vpid);
+
+ /* Linux' default is to have this set to 1 initially and let the
+ * scheduler set this to 0 later on.
+ */
+ current_thread_info()->preempt_count = 0;
+
+ return 0;
+}
+
+
+/**
+ * Add an already existing DDEKit thread to the set of threads known to the
+ * Linux environment. This is used for the timer thread, which is actually a
+ * DDEKit thread, but Linux code shall see it as a Linux thread as well.
+ */
+int l4dde26_process_from_ddekit(ddekit_thread_t *t)
+{
+ Assert(t);
+
+ dde26_thread_data *cur = init_dde26_thread();
+ cur->_ddekit_thread = t;
+ ddekit_thread_set_data(t, cur);
+ attach_pid(LX_TASK(cur), 0, &cur->_vpid);
+
+ return 0;
+}
+
+/** Function to initialize the first DDE process.
+ */
+int __init l4dde26_process_init(void)
+{
+ ddekit_lock_init_unlocked(&_pid_task_list_lock);
+
+ int kthreadd_pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
+ kthreadd_task = find_task_by_pid(kthreadd_pid);
+
+ l4dde26_process_add_worker();
+
+ return 0;
+}
+
+DEFINE_PER_CPU(int, cpu_number);
+
+//dde_process_initcall(l4dde26_process_init);
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/res.c.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/res.c.svn-base
new file mode 100644
index 00000000..fbd2d09b
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/res.c.svn-base
@@ -0,0 +1,180 @@
+#include "local.h"
+
+#include <linux/ioport.h>
+
+/** Request an IO port region.
+ *
+ * \param start start port
+ * \param n number of ports
+ * \param name name of allocator (unused)
+ *
+ * \return NULL error
+ * \return !=NULL success
+ *
+ * \bug Since no one in Linux uses this function's return value,
+ * we do not allocate and fill a resource struct.
+ */
+static struct resource *l4dde26_request_region(resource_size_t start,
+ resource_size_t n,
+ const char *name)
+{
+ int err = ddekit_request_io(start, n);
+
+ if (err)
+ return NULL;
+
+ return (struct resource *)1;
+}
+
+
+/** List of memory regions that have been requested. This is used to
+ * perform ioremap() and iounmap()
+ */
+static LIST_HEAD(dde_mem_regions);
+
+/** va->pa mapping used to store memory regions */
+struct dde_mem_region {
+ ddekit_addr_t pa;
+ ddekit_addr_t va;
+ unsigned int size;
+ struct list_head list;
+};
+
+void __iomem * ioremap(unsigned long phys_addr, unsigned long size);
+
+/** Request an IO memory region.
+ *
+ * \param start start address
+ * \param n size of memory area
+ * \param name name of allocator (unused)
+ *
+ * \return NULL error
+ * \return !=NULL success
+ *
+ * \bug Since no one in Linux uses this function's return value,
+ * we do not allocate and fill a resource struct.
+ */
+static struct resource *l4dde26_request_mem_region(resource_size_t start,
+ resource_size_t n,
+ const char *name)
+{
+ ddekit_addr_t va = 0;
+ struct dde_mem_region *mreg;
+
+ // do not a resource request twice
+ if (ioremap(start, n))
+ return (struct resource *)1;
+
+ int i = ddekit_request_mem(start, n, &va);
+
+ if (i) {
+ ddekit_printf("request_mem_region() failed (start %lx, size %x)", start, n);
+ return NULL;
+ }
+
+ mreg = kmalloc(sizeof(struct dde_mem_region), GFP_KERNEL);
+ Assert(mreg);
+
+ mreg->pa = start;
+ mreg->va = va;
+ mreg->size = n;
+ list_add(&mreg->list, &dde_mem_regions);
+
+#if 0
+ ddekit_pgtab_set_region_with_size((void *)va, start, n, PTE_TYPE_OTHER);
+#endif
+
+ return (struct resource *)1;
+}
+
+
+struct resource * __request_region(struct resource *parent,
+ resource_size_t start,
+ resource_size_t n,
+ const char *name, int flags)
+{
+ Assert(parent);
+ Assert(parent->flags & IORESOURCE_IO || parent->flags & IORESOURCE_MEM);
+
+ switch (parent->flags)
+ {
+ case IORESOURCE_IO:
+ return l4dde26_request_region(start, n, name);
+ case IORESOURCE_MEM:
+ return l4dde26_request_mem_region(start, n, name);
+ }
+
+ return NULL;
+}
+
+
+/** Release IO port region.
+ */
+static void l4dde26_release_region(resource_size_t start, resource_size_t n)
+{
+ /* FIXME: we need a list of "struct resource"s that have been
+ * allocated by request_region() and then need to
+ * free this stuff here! */
+ ddekit_release_io(start, n);
+}
+
+
+/** Release IO memory region.
+ */
+static void l4dde26_release_mem_region(resource_size_t start, resource_size_t n)
+{
+ ddekit_release_mem(start, n);
+ ddekit_pgtab_clear_region((void *)start, PTE_TYPE_OTHER);
+}
+
+
+int __check_region(struct resource *root, resource_size_t s, resource_size_t n)
+{
+ WARN_UNIMPL;
+ return -1;
+}
+
+void __release_region(struct resource *root, resource_size_t start,
+ resource_size_t n)
+{
+ switch (root->flags)
+ {
+ case IORESOURCE_IO:
+ return l4dde26_release_region(start, n);
+ case IORESOURCE_MEM:
+ return l4dde26_release_mem_region(start, n);
+ }
+}
+
+
+/** Map physical I/O region into virtual address space.
+ *
+ * For our sake, this only returns the virtual address belonging to
+ * the physical region, since we don't manage page tables ourselves.
+ */
+void __iomem * ioremap(unsigned long phys_addr, unsigned long size)
+{
+ struct list_head *pos, *head;
+ head = &dde_mem_regions;
+
+ list_for_each(pos, head) {
+ struct dde_mem_region *mreg = list_entry(pos, struct dde_mem_region,
+ list);
+ if (mreg->pa <= phys_addr && mreg->pa + mreg->size >= phys_addr + size)
+ return (void *)(mreg->va + (phys_addr - mreg->pa));
+ }
+
+ return NULL;
+}
+
+
+void __iomem * ioremap_nocache(unsigned long offset, unsigned long size)
+{
+ return ioremap(offset, size);
+}
+
+
+void iounmap(volatile void __iomem *addr)
+{
+ WARN_UNIMPL;
+}
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/sched.c.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/sched.c.svn-base
new file mode 100644
index 00000000..b38520c6
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/sched.c.svn-base
@@ -0,0 +1,155 @@
+#include "local.h"
+
+#include <linux/sched.h>
+
+DEFINE_RWLOCK(tasklist_lock);
+
+asmlinkage void preempt_schedule(void)
+{
+ WARN_UNIMPL;
+}
+
+
+/* Our version of scheduler invocation.
+ *
+ * Scheduling is performed by Fiasco, so we don't care about it as long as
+ * a thread is running. If a task becomes TASK_INTERRUPTIBLE or
+ * TASK_UNINTERRUPTIBLE, we make sure that the task does not become
+ * scheduled by locking the task's sleep lock.
+ */
+asmlinkage void schedule(void)
+{
+ dde26_thread_data *t = lxtask_to_ddethread(current);
+
+ switch (current->state) {
+ case TASK_RUNNING:
+ ddekit_thread_schedule();
+ break;
+ case TASK_INTERRUPTIBLE:
+ case TASK_UNINTERRUPTIBLE:
+ ddekit_sem_down(SLEEP_LOCK(t));
+ break;
+ default:
+ panic("current->state = %d --- unknown state\n", current->state);
+ }
+}
+
+
+/** yield the current processor to other threads.
+ *
+ * this is a shortcut for kernel-space yielding - it marks the
+ * thread runnable and calls sys_sched_yield().
+ */
+void __sched yield(void)
+{
+ set_current_state(TASK_RUNNING);
+ ddekit_yield();
+}
+
+
+/***
+ * try_to_wake_up - wake up a thread
+ * @p: the to-be-woken-up thread
+ * @state: the mask of task states that can be woken
+ * @sync: do a synchronous wakeup?
+ */
+int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
+{
+ Assert(p);
+ dde26_thread_data *t = lxtask_to_ddethread(p);
+
+ Assert(t);
+ Assert(SLEEP_LOCK(t));
+
+ p->state = TASK_RUNNING;
+ ddekit_sem_up(SLEEP_LOCK(t));
+
+ return 0;
+}
+
+
+static void process_timeout(unsigned long data)
+{
+ wake_up_process((struct task_struct *)data);
+}
+
+
+signed long __sched schedule_timeout(signed long timeout)
+{
+ struct timer_list timer;
+ unsigned long expire = timeout + jiffies;
+
+ setup_timer(&timer, process_timeout, (unsigned long)current);
+ timer.expires = expire;
+
+ switch(timeout)
+ {
+ /*
+ * Hah!
+ *
+ * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
+ * the CPU away without a bound on the timeout. In this case the return
+ * value will be %MAX_SCHEDULE_TIMEOUT.
+ */
+ case MAX_SCHEDULE_TIMEOUT:
+ schedule();
+ break;
+ default:
+ add_timer(&timer);
+ schedule();
+ del_timer(&timer);
+ break;
+ }
+
+ timeout = expire - jiffies;
+
+ return timeout < 0 ? 0 : timeout;
+}
+
+
+signed long __sched schedule_timeout_interruptible(signed long timeout)
+{
+ __set_current_state(TASK_INTERRUPTIBLE);
+ return schedule_timeout(timeout);
+}
+
+
+signed long __sched schedule_timeout_uninterruptible(signed long timeout)
+{
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ return schedule_timeout(timeout);
+}
+
+/** Tasks may be forced to run only on a certain no. of CPUs. Since
+ * we only emulate a SMP-environment for the sake of having multiple
+ * threads, we do not need to implement this.
+ */
+int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
+{
+ return 0;
+}
+
+void set_user_nice(struct task_struct *p, long nice)
+{
+ //WARN_UNIMPL;
+}
+
+void __sched io_schedule(void)
+{
+ WARN_UNIMPL;
+}
+
+long __sched io_schedule_timeout(long timeout)
+{
+ WARN_UNIMPL;
+ return -1;
+}
+
+extern int sched_setscheduler_nocheck(struct task_struct *t, int flags,
+ struct sched_param *p)
+{
+ WARN_UNIMPL;
+ return -1;
+}
+
+void ignore_signals(struct task_struct *t) { }
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/signal.c.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/signal.c.svn-base
new file mode 100644
index 00000000..bd0bc0a7
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/signal.c.svn-base
@@ -0,0 +1,24 @@
+#include "local.h"
+
+/******************************************************************************
+ ** Dummy signal implementation. **
+ ** DDE does not provide its own signal implementation. To make it compile, **
+ ** we provide dummy versions of signalling functions here. If later on **
+ ** someone *REALLY* wants to use signals in the DDE context, he might **
+ ** erase this file and use something like the L4 signalling library for **
+ ** such purposes. **
+*******************************************************************************/
+
+int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
+{
+ return 0;
+}
+
+void flush_signals(struct task_struct *t)
+{
+}
+
+int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
+{
+ return 0;
+}
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/smp.c.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/smp.c.svn-base
new file mode 100644
index 00000000..1ebf08c2
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/smp.c.svn-base
@@ -0,0 +1,37 @@
+#include <linux/cpumask.h>
+
+#include "local.h"
+
+static struct cpumask _possible = CPU_MASK_ALL;
+static struct cpumask _online = CPU_MASK_CPU0;
+static struct cpumask _present = CPU_MASK_CPU0;
+static struct cpumask _active = CPU_MASK_CPU0;
+
+const struct cpumask *const cpu_possible_mask = &_possible;
+const struct cpumask *const cpu_online_mask = &_online;
+const struct cpumask *const cpu_present_mask = &_present;
+const struct cpumask *const cpu_active_mask = &_active;
+
+cpumask_t cpu_mask_all = CPU_MASK_ALL;
+int nr_cpu_ids = NR_CPUS;
+const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
+
+/* cpu_bit_bitmap[0] is empty - so we can back into it */
+#define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x)
+#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
+#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
+#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
+
+const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
+ MASK_DECLARE_8(0), MASK_DECLARE_8(8),
+ MASK_DECLARE_8(16), MASK_DECLARE_8(24),
+#if BITS_PER_LONG > 32
+ MASK_DECLARE_8(32), MASK_DECLARE_8(40),
+ MASK_DECLARE_8(48), MASK_DECLARE_8(56),
+#endif
+};
+
+void __smp_call_function_single(int cpuid, struct call_single_data *data)
+{
+ data->func(data->info);
+}
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/softirq.c.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/softirq.c.svn-base
new file mode 100644
index 00000000..21b36d17
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/softirq.c.svn-base
@@ -0,0 +1,267 @@
+#include "local.h"
+
+#include <linux/interrupt.h>
+
+/* There are at most 32 softirqs in Linux, but only 6 are really used. */
+#define NUM_SOFTIRQS 6
+
+DECLARE_INITVAR(dde26_softirq);
+
+/* softirq threads and their wakeup semaphores */
+ddekit_thread_t *dde_softirq_thread;
+ddekit_sem_t *dde_softirq_sem;
+
+/* struct tasklet_head is not defined in a header in Linux 2.6 */
+struct tasklet_head
+{
+ struct tasklet_struct *list;
+ ddekit_lock_t lock; /* list lock */
+};
+
+/* What to do if a softirq occurs. */
+static struct softirq_action softirq_vec[32];
+
+/* tasklet queues for each softirq thread */
+struct tasklet_head tasklet_vec;
+struct tasklet_head tasklet_hi_vec;
+
+void open_softirq(int nr, void (*action)(struct softirq_action*))
+{
+ softirq_vec[nr].action = action;
+}
+
+static void raise_softirq_irqoff_cpu(unsigned int nr, unsigned int cpu)
+{
+ CHECK_INITVAR(dde26_softirq);
+
+ /* mark softirq scheduled */
+ __raise_softirq_irqoff(nr);
+ /* wake softirq thread */
+ ddekit_sem_up(dde_softirq_sem);
+}
+
+void raise_softirq_irqoff(unsigned int nr)
+{
+ raise_softirq_irqoff_cpu(nr, 0);
+}
+
+void raise_softirq(unsigned int nr)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ raise_softirq_irqoff(nr);
+ local_irq_restore(flags);
+}
+
+/**
+ * Initialize tasklet.
+ */
+void tasklet_init(struct tasklet_struct *t,
+ void (*func)(unsigned long), unsigned long data)
+{
+ t->next = NULL;
+ t->state = 0;
+ atomic_set(&t->count, 0);
+ t->func = func;
+ t->data = data;
+}
+
+/* enqueue tasklet */
+static void __tasklet_enqueue(struct tasklet_struct *t,
+ struct tasklet_head *listhead)
+{
+ ddekit_lock_lock(&listhead->lock);
+ t->next = listhead->list;
+ listhead->list = t;
+ ddekit_lock_unlock(&listhead->lock);
+}
+
+void __tasklet_schedule(struct tasklet_struct *t)
+{
+ unsigned long flags;
+
+ CHECK_INITVAR(dde26_softirq);
+
+ local_irq_save(flags);
+
+ __tasklet_enqueue(t, &tasklet_vec);
+ /* raise softirq */
+ raise_softirq_irqoff_cpu(TASKLET_SOFTIRQ, 0);
+
+ local_irq_restore(flags);
+}
+
+void __tasklet_hi_schedule(struct tasklet_struct *t)
+{
+ unsigned long flags;
+
+ CHECK_INITVAR(dde26_softirq);
+
+ local_irq_save(flags);
+ __tasklet_enqueue(t, &tasklet_hi_vec);
+ raise_softirq_irqoff_cpu(HI_SOFTIRQ, 0);
+ local_irq_restore(flags);
+}
+
+/* Execute tasklets */
+static void tasklet_action(struct softirq_action *a)
+{
+ struct tasklet_struct *list;
+
+ ddekit_lock_lock(&tasklet_vec.lock);
+ list = tasklet_vec.list;
+ tasklet_vec.list = NULL;
+ ddekit_lock_unlock(&tasklet_vec.lock);
+
+ while (list) {
+ struct tasklet_struct *t = list;
+
+ list = list->next;
+
+ if (tasklet_trylock(t)) {
+ if (!atomic_read(&t->count)) {
+ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ BUG();
+ t->func(t->data);
+ tasklet_unlock(t);
+ continue;
+ }
+ tasklet_unlock(t);
+ }
+
+ ddekit_lock_lock(&tasklet_vec.lock);
+ t->next = tasklet_vec.list;
+ tasklet_vec.list = t;
+ raise_softirq_irqoff_cpu(TASKLET_SOFTIRQ, 0);
+ ddekit_lock_unlock(&tasklet_vec.lock);
+ }
+}
+
+
+static void tasklet_hi_action(struct softirq_action *a)
+{
+ struct tasklet_struct *list;
+
+ ddekit_lock_lock(&tasklet_hi_vec.lock);
+ list = tasklet_hi_vec.list;
+ tasklet_hi_vec.list = NULL;
+ ddekit_lock_unlock(&tasklet_hi_vec.lock);
+
+ while (list) {
+ struct tasklet_struct *t = list;
+
+ list = list->next;
+
+ if (tasklet_trylock(t)) {
+ if (!atomic_read(&t->count)) {
+ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ BUG();
+ t->func(t->data);
+ tasklet_unlock(t);
+ continue;
+ }
+ tasklet_unlock(t);
+ }
+
+ ddekit_lock_lock(&tasklet_hi_vec.lock);
+ t->next = tasklet_hi_vec.list;
+ tasklet_hi_vec.list = t;
+ raise_softirq_irqoff_cpu(HI_SOFTIRQ, 0);
+ ddekit_lock_unlock(&tasklet_hi_vec.lock);
+ }
+}
+
+
+#define MAX_SOFTIRQ_RETRIES 10
+
+/** Run softirq handlers
+ */
+void __do_softirq(void)
+{
+ int retries = MAX_SOFTIRQ_RETRIES;
+ do {
+ struct softirq_action *h = softirq_vec;
+ unsigned long pending = local_softirq_pending();
+
+ /* reset softirq count */
+ set_softirq_pending(0);
+
+ /* While we have a softirq pending... */
+ while (pending) {
+ /* need to execute current softirq? */
+ if (pending & 1)
+ h->action(h);
+ /* try next softirq */
+ h++;
+ /* remove pending flag for last softirq */
+ pending >>= 1;
+ }
+
+ /* Somebody might have scheduled another softirq in between
+ * (e.g., an IRQ thread or another tasklet). */
+ } while (local_softirq_pending() && --retries);
+
+}
+
+
+void do_softirq(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (local_softirq_pending())
+ __do_softirq();
+ local_irq_restore(flags);
+}
+
+/** Softirq thread function.
+ *
+ * Once started, a softirq thread waits for tasklets to be scheduled
+ * and executes them.
+ *
+ * \param arg # of this softirq thread so that it grabs the correct lock
+ * if multiple softirq threads are running.
+ */
+void l4dde26_softirq_thread(void *arg)
+{
+ printk("Softirq daemon starting\n");
+ l4dde26_process_add_worker();
+
+ /* This thread will always be in a softirq, so set the
+ * corresponding flag right now.
+ */
+ preempt_count() |= SOFTIRQ_MASK;
+
+ while(1) {
+ ddekit_sem_down(dde_softirq_sem);
+ do_softirq();
+ }
+}
+
+/** Initialize softirq subsystem.
+ *
+ * Start NUM_SOFTIRQ_THREADS threads executing the \ref l4dde26_softirq_thread
+ * function.
+ */
+void l4dde26_softirq_init(void)
+{
+ char name[20];
+
+ dde_softirq_sem = ddekit_sem_init(0);
+
+ set_softirq_pending(0);
+
+ ddekit_lock_init_unlocked(&tasklet_vec.lock);
+ ddekit_lock_init_unlocked(&tasklet_hi_vec.lock);
+
+ snprintf(name, 20, ".softirqd");
+ dde_softirq_thread = ddekit_thread_create(
+ l4dde26_softirq_thread,
+ NULL, name);
+
+ open_softirq(TASKLET_SOFTIRQ, tasklet_action);
+ open_softirq(HI_SOFTIRQ, tasklet_hi_action);
+
+ INITIALIZE_INITVAR(dde26_softirq);
+}
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/timer.c.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/timer.c.svn-base
new file mode 100644
index 00000000..ea04b67e
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/timer.c.svn-base
@@ -0,0 +1,184 @@
+#include "local.h"
+
+#include <linux/timer.h>
+#include <linux/fs.h>
+#include <asm/delay.h>
+
+DECLARE_INITVAR(dde26_timer);
+
+/* Definitions from linux/kernel/timer.c */
+
+/*
+ * per-CPU timer vector definitions:
+ */
+#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
+#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
+#define TVN_SIZE (1 << TVN_BITS)
+#define TVR_SIZE (1 << TVR_BITS)
+#define TVN_MASK (TVN_SIZE - 1)
+#define TVR_MASK (TVR_SIZE - 1)
+
+typedef struct tvec_s {
+ struct list_head vec[TVN_SIZE];
+} tvec_t;
+
+typedef struct tvec_root_s {
+ struct list_head vec[TVR_SIZE];
+} tvec_root_t;
+
+struct tvec_base {
+ spinlock_t lock;
+ struct timer_list *running_timer;
+ unsigned long timer_jiffies;
+ tvec_root_t tv1;
+ tvec_t tv2;
+ tvec_t tv3;
+ tvec_t tv4;
+ tvec_t tv5;
+} ____cacheline_aligned_in_smp;
+
+typedef struct tvec_t_base_s tvec_base_t;
+
+struct tvec_base boot_tvec_bases __attribute__((unused));
+
+static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) __attribute__((unused)) = &boot_tvec_bases;
+
+void init_timer(struct timer_list *timer)
+{
+ timer->ddekit_timer_id = DDEKIT_INVALID_TIMER_ID;
+}
+
+void add_timer(struct timer_list *timer)
+{
+ CHECK_INITVAR(dde26_timer);
+ /* DDE2.6 uses jiffies and HZ as exported from L4IO. Therefore
+ * we just need to hand over the timeout to DDEKit. */
+ timer->ddekit_timer_id = ddekit_add_timer((void *)timer->function,
+ (void *)timer->data,
+ timer->expires);
+}
+
+
+void add_timer_on(struct timer_list *timer, int cpu)
+{
+ add_timer(timer);
+}
+
+
+int del_timer(struct timer_list * timer)
+{
+ int ret;
+ CHECK_INITVAR(dde26_timer);
+ ret = ddekit_del_timer(timer->ddekit_timer_id);
+ timer->ddekit_timer_id = DDEKIT_INVALID_TIMER_ID;
+
+ return ret >= 0;
+}
+
+int del_timer_sync(struct timer_list *timer)
+{
+ return del_timer(timer);
+}
+
+
+int __mod_timer(struct timer_list *timer, unsigned long expires)
+{
+ /* XXX: Naive implementation. If we really need to be fast with
+ * this function, we can implement a faster version inside
+ * the DDEKit. Bjoern just does not think that this is the
+ * case.
+ */
+ int r;
+
+ CHECK_INITVAR(dde26_timer);
+ r = del_timer(timer);
+
+ timer->expires = expires;
+ add_timer(timer);
+
+ return (r > 0);
+}
+
+
+int mod_timer(struct timer_list *timer, unsigned long expires)
+{
+ return __mod_timer(timer, expires);
+}
+
+
+int timer_pending(const struct timer_list *timer)
+{
+ CHECK_INITVAR(dde26_timer);
+ /* There must be a valid DDEKit timer ID in the timer field
+ * *AND* it must be pending in the DDEKit.
+ */
+ return ((timer->ddekit_timer_id != DDEKIT_INVALID_TIMER_ID)
+ && ddekit_timer_pending(timer->ddekit_timer_id));
+}
+
+
+/**
+ * msleep - sleep safely even with waitqueue interruptions
+ * @msecs: Time in milliseconds to sleep for
+ */
+void msleep(unsigned int msecs)
+{
+ ddekit_thread_msleep(msecs);
+}
+
+
+void __const_udelay(unsigned long xloops)
+{
+ ddekit_thread_usleep(xloops);
+}
+
+
+void __udelay(unsigned long usecs)
+{
+ ddekit_thread_usleep(usecs);
+}
+
+
+void __ndelay(unsigned long nsecs)
+{
+ ddekit_thread_nsleep(nsecs);
+}
+
+
+void __init l4dde26_init_timers(void)
+{
+ ddekit_init_timers();
+
+ l4dde26_process_from_ddekit(ddekit_get_timer_thread());
+
+ INITIALIZE_INITVAR(dde26_timer);
+}
+
+core_initcall(l4dde26_init_timers);
+
+extern unsigned long volatile __jiffy_data jiffies;
+
+__attribute__((weak)) void do_gettimeofday (struct timeval *tv)
+{
+ WARN_UNIMPL;
+}
+
+struct timespec current_fs_time(struct super_block *sb)
+{
+ struct timespec now = {0,0};
+ WARN_UNIMPL;
+ return now;
+}
+
+ktime_t ktime_get_real(void)
+{
+ struct timespec now = {0,0};
+ WARN_UNIMPL;
+ return timespec_to_ktime(now);
+}
+
+
+void native_io_delay(void)
+{
+ udelay(2);
+}
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/vmalloc.c.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/vmalloc.c.svn-base
new file mode 100644
index 00000000..134b80c3
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/vmalloc.c.svn-base
@@ -0,0 +1,30 @@
+/******************************************************************************
+ * Bjoern Doebel <doebel@tudos.org> *
+ * *
+ * (c) 2005 - 2007 Technische Universitaet Dresden *
+ * This file is part of DROPS, which is distributed under the terms of the *
+ * GNU General Public License 2. Please see the COPYING file for details. *
+ ******************************************************************************/
+
+/*
+ * \brief vmalloc implementation
+ * \author Bjoern Doebel
+ * \date 2007-07-30
+ */
+
+/* Linux */
+#include <linux/vmalloc.h>
+
+/* DDEKit */
+#include <l4/dde/ddekit/memory.h>
+#include <l4/dde/ddekit/lock.h>
+
+void *vmalloc(unsigned long size)
+{
+ return ddekit_simple_malloc(size);
+}
+
+void vfree(const void *addr)
+{
+ ddekit_simple_free((void*)addr);
+}
diff --git a/libdde_linux26/lib/src/arch/l4/.svn/text-base/vmstat.c.svn-base b/libdde_linux26/lib/src/arch/l4/.svn/text-base/vmstat.c.svn-base
new file mode 100644
index 00000000..2e87389e
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/.svn/text-base/vmstat.c.svn-base
@@ -0,0 +1,34 @@
+#include "local.h"
+
+#include <linux/fs.h>
+
+atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
+
+
+void dec_zone_page_state(struct page *page, enum zone_stat_item item)
+{
+ WARN_UNIMPL;
+}
+
+
+void inc_zone_page_state(struct page *page, enum zone_stat_item item)
+{
+ WARN_UNIMPL;
+}
+
+
+void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
+{
+ WARN_UNIMPL;
+}
+
+void __get_zone_counts(unsigned long *active, unsigned long *inactive,
+ unsigned long *free, struct pglist_data *pgdat)
+{
+ WARN_UNIMPL;
+}
+
+void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
+{
+ WARN_UNIMPL;
+}
diff --git a/libdde_linux26/lib/src/arch/l4/cli_sti.c b/libdde_linux26/lib/src/arch/l4/cli_sti.c
new file mode 100644
index 00000000..81c4feea
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/cli_sti.c
@@ -0,0 +1,66 @@
+#include "local.h"
+
+#include <linux/kernel.h>
+
+/* IRQ lock reference counter */
+static atomic_t _refcnt = ATOMIC_INIT(0);
+
+/* Check whether IRQs are currently disabled.
+ *
+ * This is the case, if flags is greater than 0.
+ */
+
+int raw_irqs_disabled_flags(unsigned long flags)
+{
+ return ((int)flags > 0);
+}
+
+/* Store the current flags state.
+ *
+ * This is done by returning the current refcnt.
+ *
+ * XXX: Up to now, flags was always 0 at this point and
+ * I assume that this is always the case. Prove?
+ */
+unsigned long __raw_local_save_flags(void)
+{
+ return (unsigned long)atomic_read(&_refcnt);
+}
+
+/* Restore IRQ state. */
+void raw_local_irq_restore(unsigned long flags)
+{
+ atomic_set(&_refcnt, flags);
+}
+
+/* Disable IRQs by grabbing the IRQ lock. */
+void raw_local_irq_disable(void)
+{
+ atomic_inc(&_refcnt);
+}
+
+/* Unlock the IRQ lock until refcnt is 0. */
+void raw_local_irq_enable(void)
+{
+ atomic_set(&_refcnt, 0);
+}
+
+
+void raw_safe_halt(void)
+{
+ WARN_UNIMPL;
+}
+
+
+void halt(void)
+{
+ WARN_UNIMPL;
+}
+
+/* These functions are empty for DDE. Every DDE thread is a separate
+ * "virtual" CPU. Therefore there is no need to en/disable bottom halves.
+ */
+void local_bh_disable(void) {}
+void __local_bh_enable(void) {}
+void _local_bh_enable(void) {}
+void local_bh_enable(void) {}
diff --git a/libdde_linux26/lib/src/arch/l4/fs.c b/libdde_linux26/lib/src/arch/l4/fs.c
new file mode 100644
index 00000000..db452949
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/fs.c
@@ -0,0 +1,111 @@
+#include "local.h"
+
+#include <linux/fs.h>
+#include <linux/backing-dev.h>
+#include <linux/mount.h>
+
+/*
+ * Some subsystems, such as the blockdev layer, implement their data
+ * hierarchy as a pseudo file system. To not incorporate the complete
+ * Linux VFS implementation, we cut this down to an own one only for
+ * pseudo file systems.
+ */
+static LIST_HEAD(dde_vfs_mounts);
+
+#define MAX_RA_PAGES 1
+
+void default_unplug_io_fn(struct backing_dev_info *bdi, struct page* p)
+{
+}
+
+struct backing_dev_info default_backing_dev_info = {
+ .ra_pages = MAX_RA_PAGES,
+ .state = 0,
+ .capabilities = BDI_CAP_MAP_COPY,
+ .unplug_io_fn = default_unplug_io_fn,
+};
+
+int seq_printf(struct seq_file *m, const char *f, ...)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+int generic_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+
+/**************************************
+ * Filemap stuff *
+ **************************************/
+struct page * find_get_page(struct address_space *mapping, unsigned long offset)
+{
+ WARN_UNIMPL;
+ return NULL;
+}
+
+void unlock_page(struct page *page)
+{
+ WARN_UNIMPL;
+}
+
+int test_set_page_writeback(struct page *page)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+void end_page_writeback(struct page *page)
+{
+ WARN_UNIMPL;
+}
+
+void do_invalidatepage(struct page *page, unsigned long offset)
+{
+ WARN_UNIMPL;
+}
+
+int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+static struct vfsmount *dde_kern_mount(struct file_system_type *type,
+ int flags, const char *name,
+ void *data)
+{
+ struct list_head *pos, *head;
+ int error;
+
+ head = &dde_vfs_mounts;
+ __list_for_each(pos, head) {
+ struct vfsmount *mnt = list_entry(pos, struct vfsmount, next);
+ if (strcmp(name, mnt->name) == 0) {
+ printk("FS type %s already mounted!?\n", name);
+ BUG();
+ return NULL;
+ }
+ }
+
+ struct vfsmount *m = kzalloc(sizeof(*m), GFP_KERNEL);
+ m->fs_type = type;
+ m->name = kmalloc(strlen(name) + 1, GFP_KERNEL);
+ memcpy(m->name, name, strlen(name) + 1);
+
+ error = type->get_sb(type, flags, name, data, m);
+ BUG_ON(error);
+
+ list_add_tail(&m->next, &dde_vfs_mounts);
+
+ return m;
+}
+
+struct vfsmount *kern_mount_data(struct file_system_type *type, void *data)
+{
+ return dde_kern_mount(type, 0, type->name, NULL);
+}
diff --git a/libdde_linux26/lib/src/arch/l4/hw-helpers.c b/libdde_linux26/lib/src/arch/l4/hw-helpers.c
new file mode 100644
index 00000000..555406c9
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/hw-helpers.c
@@ -0,0 +1,12 @@
+#include "local.h"
+
+#include <linux/kexec.h>
+
+note_buf_t *crash_notes = NULL;
+
+void touch_nmi_watchdog(void)
+{
+ WARN_UNIMPL;
+}
+
+unsigned long pci_mem_start = 0xABCDABCD;
diff --git a/libdde_linux26/lib/src/arch/l4/init.c b/libdde_linux26/lib/src/arch/l4/init.c
new file mode 100644
index 00000000..e89ef27f
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/init.c
@@ -0,0 +1,33 @@
+#include "local.h"
+
+#include <l4/dde/linux26/dde26.h>
+#include <l4/dde/dde.h>
+
+#define DEBUG_PCI(msg, ...) ddekit_printf( "\033[33m"msg"\033[0m\n", ##__VA_ARGS__)
+
+/* Didn't know where to put this. */
+unsigned long __per_cpu_offset[NR_CPUS];
+
+extern void driver_init(void);
+extern int classes_init(void);
+
+void __init __attribute__((used)) l4dde26_init(void)
+{
+ /* first, initialize DDEKit */
+ ddekit_init();
+
+ l4dde26_kmalloc_init();
+
+ /* Init Linux driver framework before trying to add PCI devs to the bus */
+ driver_init();
+
+ printk("Initialized DDELinux 2.6\n");
+}
+
+void l4dde26_do_initcalls(void)
+{
+ /* finally, let DDEKit perform all the initcalls */
+ ddekit_do_initcalls();
+}
+
+dde_initcall(l4dde26_init);
diff --git a/libdde_linux26/lib/src/arch/l4/init_task.c b/libdde_linux26/lib/src/arch/l4/init_task.c
new file mode 100644
index 00000000..685373d1
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/init_task.c
@@ -0,0 +1,131 @@
+#include "local.h"
+
+//#include <asm/desc.h>
+#include <asm/pgtable.h>
+#include <asm/uaccess.h>
+
+#include <linux/fs.h>
+#include <linux/fdtable.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/init_task.h>
+#include <linux/ipc_namespace.h>
+#include <linux/kernel.h>
+#include <linux/mqueue.h>
+#include <linux/module.h>
+#include <linux/personality.h>
+
+/* init task */
+struct task_struct init_task;
+
+/* From kernel/pid.c */
+#define BITS_PER_PAGE (PAGE_SIZE*8)
+#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
+
+/* From init/main.c */
+enum system_states system_state;
+EXPORT_SYMBOL(system_state);
+
+struct fs_struct init_fs = {
+ .count = ATOMIC_INIT(1),
+ .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
+ .umask = 0022,
+};
+
+struct files_struct init_files = {
+ .count = ATOMIC_INIT(1),
+ .fdt = &init_files.fdtab,
+ .fdtab = {
+ .max_fds = NR_OPEN_DEFAULT,
+ .fd = &init_files.fd_array[0],
+ .close_on_exec = (fd_set *)&init_files.close_on_exec_init,
+ .open_fds = (fd_set *)&init_files.open_fds_init,
+ .rcu = RCU_HEAD_INIT,
+ },
+ .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock),
+};
+
+struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct mm_struct init_mm = INIT_MM(init_mm);
+pgd_t swapper_pg_dir[PTRS_PER_PGD];
+union thread_union init_thread_union = { INIT_THREAD_INFO(init_task) };
+struct group_info init_groups = {.usage = ATOMIC_INIT(2)};
+
+struct user_struct root_user = {
+ .__count = ATOMIC_INIT(1),
+ .processes = ATOMIC_INIT(1),
+ .files = ATOMIC_INIT(0),
+ .sigpending = ATOMIC_INIT(0),
+ .mq_bytes = 0,
+ .locked_shm = 0,
+};
+
+/*
+ * PID-map pages start out as NULL, they get allocated upon
+ * first use and are never deallocated. This way a low pid_max
+ * value does not cause lots of bitmaps to be allocated, but
+ * the scheme scales to up to 4 million PIDs, runtime.
+ */
+struct pid_namespace init_pid_ns = {
+ .kref = {
+ .refcount = ATOMIC_INIT(2),
+ },
+ .pidmap = {
+ [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL }
+ },
+ .last_pid = 0,
+ .level = 0,
+ .child_reaper = &init_task,
+};
+EXPORT_SYMBOL_GPL(init_pid_ns);
+
+struct net init_net __attribute__((weak));
+
+struct nsproxy init_nsproxy = INIT_NSPROXY(init_nsproxy);
+
+struct ipc_namespace init_ipc_ns = {
+ .kref = {
+ .refcount = ATOMIC_INIT(2),
+ },
+};
+
+struct user_namespace init_user_ns = {
+ .kref = {
+ .refcount = ATOMIC_INIT(2),
+ },
+};
+
+
+struct uts_namespace init_uts_ns = {
+ .kref = {
+ .refcount = ATOMIC_INIT(2),
+ },
+ .name = {
+ .sysname = "L4/DDE",
+ .nodename = "",
+ .release = "2.6",
+ .version = "25",
+ .machine = "",
+ .domainname = "",
+ },
+};
+
+struct exec_domain default_exec_domain = {
+ .name = "Linux", /* name */
+ .handler = NULL, /* no signaling! */
+ .pers_low = 0, /* PER_LINUX personality. */
+ .pers_high = 0, /* PER_LINUX personality. */
+ .signal_map = 0, /* Identity map signals. */
+ .signal_invmap = 0, /* - both ways. */
+};
+
+/* copy of the initial task struct */
+struct task_struct init_task = INIT_TASK(init_task);
+/* copy of the initial thread info (which contains init_task) */
+struct thread_info init_thread = INIT_THREAD_INFO(init_task);
+
+long do_no_restart_syscall(struct restart_block *param)
+{
+ return -EINTR;
+}
diff --git a/libdde_linux26/lib/src/arch/l4/inodes.c b/libdde_linux26/lib/src/arch/l4/inodes.c
new file mode 100644
index 00000000..9ef02ed5
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/inodes.c
@@ -0,0 +1,311 @@
+/** lib/src/arch/l4/inodes.c
+ *
+ * Assorted dummies implementing inode and superblock access functions,
+ * which are used by the block layer stuff, but not needed in DDE_Linux.
+ */
+
+#include "local.h"
+
+#include <linux/fs.h>
+#include <linux/module.h>
+#include <linux/mount.h>
+
+/*
+ * Linux' global list of all super blocks.
+ */
+LIST_HEAD(super_blocks);
+
+/**********************************
+ * Inode stuff *
+ **********************************/
+
+struct inode* new_inode(struct super_block *sb)
+{
+ if (sb->s_op->alloc_inode)
+ return sb->s_op->alloc_inode(sb);
+
+ return kzalloc(sizeof(struct inode), GFP_KERNEL);
+}
+
+void __mark_inode_dirty(struct inode *inode, int flags)
+{
+ WARN_UNIMPL;
+}
+
+void iput(struct inode *inode)
+{
+ WARN_UNIMPL;
+}
+
+void generic_delete_inode(struct inode *inode)
+{
+ WARN_UNIMPL;
+}
+
+int invalidate_inodes(struct super_block * sb)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
+{
+ WARN_UNIMPL;
+}
+
+void touch_atime(struct vfsmount *mnt, struct dentry *dentry)
+{
+ WARN_UNIMPL;
+}
+
+/**********************************
+ * Superblock stuff *
+ **********************************/
+
+struct super_block * get_super(struct block_device *bdev)
+{
+ WARN_UNIMPL;
+ return NULL;
+}
+
+int simple_statfs(struct dentry *dentry, struct kstatfs *buf)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+void kill_anon_super(struct super_block *sb)
+{
+ WARN_UNIMPL;
+}
+
+void shrink_dcache_sb(struct super_block * sb)
+{
+ WARN_UNIMPL;
+}
+
+void drop_super(struct super_block *sb)
+{
+ WARN_UNIMPL;
+}
+
+struct inode_operations empty_iops = { };
+struct file_operations empty_fops = { };
+
+/**! Alloc and init a new inode.
+ *
+ * Basically stolen from linux/fs/inode.c:alloc_inode()
+ */
+static struct inode *dde_alloc_inode(struct super_block *sb)
+{
+ struct inode *inode;
+
+ if (sb->s_op->alloc_inode)
+ inode = sb->s_op->alloc_inode(sb);
+ else
+ inode = kzalloc(sizeof(*inode), GFP_KERNEL);
+
+ if (inode) {
+ inode->i_sb = sb;
+ inode->i_blkbits = sb->s_blocksize_bits;
+ inode->i_flags = 0;
+ atomic_set(&inode->i_count, 1);
+ inode->i_op = &empty_iops;
+ inode->i_fop = &empty_fops;
+ inode->i_nlink = 1;
+ atomic_set(&inode->i_writecount, 0);
+ inode->i_size = 0;
+ inode->i_blocks = 0;
+ inode->i_bytes = 0;
+ inode->i_generation = 0;
+ inode->i_pipe = NULL;
+ inode->i_bdev = NULL;
+ inode->i_cdev = NULL;
+ inode->i_rdev = 0;
+ inode->dirtied_when = 0;
+ inode->i_private = NULL;
+ }
+
+ return inode;
+}
+
+
+void __iget(struct inode *inode)
+{
+ atomic_inc(&inode->i_count);
+}
+
+
+static struct inode *dde_new_inode(struct super_block *sb, struct list_head *head,
+ int (*test)(struct inode *, void *),
+ int (*set)(struct inode *, void *), void *data)
+{
+ struct inode *ret = dde_alloc_inode(sb);
+ int err = 0;
+
+ if (set)
+ err = set(ret, data);
+
+ BUG_ON(err);
+
+ __iget(ret);
+ ret->i_state = I_LOCK|I_NEW;
+
+ list_add_tail(&ret->i_sb_list, &sb->s_inodes);
+
+ return ret;
+}
+
+
+struct inode *iget5_locked(struct super_block *sb, unsigned long hashval,
+ int (*test)(struct inode *, void *),
+ int (*set)(struct inode *, void *), void *data)
+{
+ struct inode *inode = NULL;
+ struct list_head *p;
+
+ list_for_each(p, &sb->s_inodes) {
+ struct inode *i = list_entry(p, struct inode, i_sb_list);
+ if (test) {
+ if (!test(i, data)) {
+ DEBUG_MSG("test false");
+ continue;
+ }
+ else {
+ inode = i;
+ break;
+ }
+ }
+ }
+
+ if (inode)
+ return inode;
+
+ return dde_new_inode(sb, &sb->s_inodes, test, set, data);
+}
+
+void unlock_new_inode(struct inode *inode)
+{
+ inode->i_state &= ~(I_LOCK | I_NEW);
+ wake_up_bit(&inode->i_state, __I_LOCK);
+}
+
+struct super_block *sget(struct file_system_type *type,
+ int (*test)(struct super_block *, void*),
+ int (*set)(struct super_block *, void*),
+ void *data)
+{
+ struct super_block *s = NULL;
+ struct list_head *p;
+ int err;
+
+ if (test) {
+ list_for_each(p, &type->fs_supers) {
+ struct super_block *block = list_entry(p,
+ struct super_block,
+ s_instances);
+ if (!test(block, data))
+ continue;
+ return block;
+ }
+ }
+
+ s = kzalloc(sizeof(*s), GFP_KERNEL);
+ BUG_ON(!s);
+
+ INIT_LIST_HEAD(&s->s_dirty);
+ INIT_LIST_HEAD(&s->s_io);
+ INIT_LIST_HEAD(&s->s_files);
+ INIT_LIST_HEAD(&s->s_instances);
+ INIT_HLIST_HEAD(&s->s_anon);
+ INIT_LIST_HEAD(&s->s_inodes);
+ init_rwsem(&s->s_umount);
+ mutex_init(&s->s_lock);
+ lockdep_set_class(&s->s_umount, &type->s_umount_key);
+ /*
+ * The locking rules for s_lock are up to the
+ * filesystem. For example ext3fs has different
+ * lock ordering than usbfs:
+ */
+ lockdep_set_class(&s->s_lock, &type->s_lock_key);
+ down_write(&s->s_umount);
+ s->s_count = S_BIAS;
+ atomic_set(&s->s_active, 1);
+ mutex_init(&s->s_vfs_rename_mutex);
+ mutex_init(&s->s_dquot.dqio_mutex);
+ mutex_init(&s->s_dquot.dqonoff_mutex);
+ init_rwsem(&s->s_dquot.dqptr_sem);
+ init_waitqueue_head(&s->s_wait_unfrozen);
+ s->s_maxbytes = MAX_NON_LFS;
+#if 0
+ s->dq_op = sb_dquot_ops;
+ s->s_qcop = sb_quotactl_ops;
+ s->s_op = &default_op;
+#endif
+ s->s_time_gran = 1000000000;
+
+ err = set(s, data);
+ BUG_ON(err);
+
+ s->s_type = type;
+ strlcpy(s->s_id, type->name, sizeof(s->s_id));
+ list_add_tail(&s->s_list, &super_blocks);
+ list_add(&s->s_instances, &type->fs_supers);
+ __module_get(type->owner);
+ return s;
+}
+
+int set_anon_super(struct super_block *s, void *data)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+int get_sb_pseudo(struct file_system_type *fs_type, char *name,
+ const struct super_operations *ops, unsigned long magic,
+ struct vfsmount *mnt)
+{
+ struct super_block *s = sget(fs_type, NULL, set_anon_super, NULL);
+ struct super_operations default_ops = {};
+ struct inode *root = NULL;
+ struct dentry *dentry = NULL;
+ struct qstr d_name = {.name = name, .len = strlen(name)};
+
+ BUG_ON(IS_ERR(s));
+
+ s->s_flags = MS_NOUSER;
+ s->s_maxbytes = ~0ULL;
+ s->s_blocksize = 1024;
+ s->s_blocksize_bits = 10;
+ s->s_magic = magic;
+ s->s_op = ops ? ops : &default_ops;
+ s->s_time_gran = 1;
+ root = new_inode(s);
+
+ BUG_ON(!root);
+
+ root->i_mode = S_IFDIR | S_IRUSR | S_IWUSR;
+ root->i_uid = root->i_gid = 0;
+#if 0
+ root->i_atime = root->i_mtime = root->i_ctime = CURRENT_TIME;
+ dentry = d_alloc(NULL, &d_name);
+ dentry->d_sb = s;
+ dentry->d_parent = dentry;
+ d_instantiate(dentry, root);
+#endif
+ s->s_root = dentry;
+ s->s_flags |= MS_ACTIVE;
+
+ mnt->mnt_sb = s;
+ mnt->mnt_root = dget(s->s_root);
+
+ DEBUG_MSG("root mnt sb @ %p", mnt->mnt_sb);
+
+ return 0;
+}
+
+void inode_init_once(struct inode *inode)
+{
+ WARN_UNIMPL;
+}
+
diff --git a/libdde_linux26/lib/src/arch/l4/irq.c b/libdde_linux26/lib/src/arch/l4/irq.c
new file mode 100644
index 00000000..0e565e54
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/irq.c
@@ -0,0 +1,247 @@
+/*
+ * \brief Hardware-interrupt support
+ * \author Christian Helmuth <ch12@os.inf.tu-dresden.de>
+ * \date 2007-02-12
+ *
+ *
+ *
+ * XXX Consider support for IRQ_HANDLED and friends (linux/irqreturn.h)
+ */
+
+/* Linux */
+#include <linux/interrupt.h>
+#include <linux/string.h> /* memset() */
+
+/* DDEKit */
+#include <l4/dde/ddekit/interrupt.h>
+#include <l4/dde/ddekit/memory.h>
+
+/* local */
+#include "dde26.h"
+#include "local.h"
+
+/* dummy */
+irq_cpustat_t irq_stat[CONFIG_NR_CPUS];
+
+/**
+ * IRQ handling data
+ */
+static struct dde_irq
+{
+ unsigned irq; /* IRQ number */
+ unsigned count; /* usage count */
+ int shared; /* shared IRQ */
+ struct ddekit_thread *thread; /* DDEKit interrupt thread */
+ struct irqaction *action; /* Linux IRQ action */
+
+ struct dde_irq *next; /* next DDE IRQ */
+} *used_irqs;
+
+
+static void irq_thread_init(void *p) {
+ l4dde26_process_add_worker(); }
+
+
+extern ddekit_sem_t *dde_softirq_sem;
+static void irq_handler(void *arg)
+{
+ struct dde_irq *irq = arg;
+ struct irqaction *action;
+
+#if 0
+ DEBUG_MSG("irq 0x%x", irq->irq);
+#endif
+ /* interrupt occurred - call all handlers */
+ for (action = irq->action; action; action = action->next) {
+ irqreturn_t r = action->handler(action->irq, action->dev_id);
+#if 0
+ DEBUG_MSG("return: %s", r == IRQ_HANDLED ? "IRQ_HANDLED" : r == IRQ_NONE ? "IRQ_NONE" : "??");
+#endif
+ }
+
+ /* upon return we check for pending soft irqs */
+ if (local_softirq_pending())
+ ddekit_sem_up(dde_softirq_sem);
+}
+
+
+/*****************************
+ ** IRQ handler bookkeeping **
+ *****************************/
+
+/**
+ * Claim IRQ
+ *
+ * \return usage counter or negative error code
+ *
+ * FIXME list locking
+ * FIXME are there more races?
+ */
+static int claim_irq(struct irqaction *action)
+{
+ int shared = action->flags & IRQF_SHARED ? 1 : 0;
+ struct dde_irq *irq;
+
+ /* check if IRQ already used */
+ for (irq = used_irqs; irq; irq = irq->next)
+ if (irq->irq == action->irq) break;
+
+ /* we have to setup IRQ handling */
+ if (!irq) {
+ /* allocate and initalize new descriptor */
+ irq = ddekit_simple_malloc(sizeof(*irq));
+ if (!irq) return -ENOMEM;
+ memset(irq, 0, sizeof(*irq));
+
+ irq->irq = action->irq;
+ irq->shared = shared;
+ irq->next = used_irqs;
+ used_irqs = irq;
+
+ /* attach to interrupt */
+ irq->thread = ddekit_interrupt_attach(irq->irq,
+ irq->shared,
+ irq_thread_init,
+ irq_handler,
+ (void *)irq);
+ if (!irq->thread) {
+ ddekit_simple_free(irq);
+ return -EBUSY;
+ }
+ }
+
+ /* does desciptor allow our new handler? */
+ if ((!irq->shared || !shared) && irq->action) return -EBUSY;
+
+ /* add handler */
+ irq->count++;
+ action->next = irq->action;
+ irq->action = action;
+
+ return irq->count;
+}
+
+
+/**
+ * Free previously claimed IRQ
+ *
+ * \return usage counter or negative error code
+ */
+static struct irqaction *release_irq(unsigned irq_num, void *dev_id)
+{
+ struct dde_irq *prev_irq, *irq;
+
+ /* check if IRQ already used */
+ for (prev_irq = 0, irq = used_irqs; irq;
+ prev_irq = irq, irq = irq->next)
+ if (irq->irq == irq_num) break;
+
+ if (!irq) return 0;
+
+ struct irqaction *prev_action, *action;
+
+ for (prev_action = 0, action = irq->action; action;
+ prev_action = action, action = action->next)
+ if (action->dev_id == dev_id) break;
+
+ if (!action) return 0;
+
+ /* dequeue action from irq */
+ if (prev_action)
+ prev_action->next = action->next;
+ else
+ irq->action = action->next;
+
+ /* dequeue irq from used_irqs list and free structure,
+ if no more actions available */
+ if (!irq->action) {
+ if (prev_irq)
+ prev_irq->next = irq->next;
+ else
+ used_irqs = irq->next;
+
+ /* detach from interrupt */
+ ddekit_interrupt_detach(irq->irq);
+
+ ddekit_simple_free(irq);
+ }
+
+ return action;
+}
+
+
+/***************
+ ** Linux API **
+ ***************/
+
+/**
+ * Request interrupt
+ *
+ * \param irq interrupt number
+ * \param handler interrupt handler -> top half
+ * \param flags interrupt handling flags (SA_SHIRQ, ...)
+ * \param dev_name device name
+ * \param dev_id cookie passed back to handler
+ *
+ * \return 0 on success; error code otherwise
+ *
+ * \todo FIXME consider locking!
+ */
+int request_irq(unsigned int irq, irq_handler_t handler,
+ unsigned long flags, const char *dev_name, void *dev_id)
+{
+ if (!handler) return -EINVAL;
+
+ /* facilitate Linux irqaction for this handler */
+ struct irqaction *irq_action = ddekit_simple_malloc(sizeof(*irq_action));
+ if (!irq_action) return -ENOMEM;
+ memset(irq_action, 0, sizeof(*irq_action));
+
+ irq_action->handler = handler;
+ irq_action->flags = flags;
+ irq_action->name = dev_name;
+ irq_action->dev_id = dev_id;
+ irq_action->irq = irq;
+
+ /* attach to IRQ */
+ int err = claim_irq(irq_action);
+ if (err < 0) return err;
+
+ return 0;
+}
+
+/** Release Interrupt
+ * \ingroup mod_irq
+ *
+ * \param irq interrupt number
+ * \param dev_id cookie passed back to handler
+ *
+ */
+void free_irq(unsigned int irq, void *dev_id)
+{
+ struct irqaction *irq_action = release_irq(irq, dev_id);
+
+ if (irq_action)
+ ddekit_simple_free(irq_action);
+}
+
+void disable_irq(unsigned int irq)
+{
+ ddekit_interrupt_disable(irq);
+}
+
+void disable_irq_nosync(unsigned int irq)
+{
+ /*
+ * Note:
+ * In contrast to the _nosync semantics, DDEKit's
+ * disable definitely waits until a currently executed
+ * IRQ handler terminates.
+ */
+ ddekit_interrupt_disable(irq);
+}
+
+void enable_irq(unsigned int irq)
+{
+ ddekit_interrupt_enable(irq);
+}
diff --git a/libdde_linux26/lib/src/arch/l4/kmalloc.c b/libdde_linux26/lib/src/arch/l4/kmalloc.c
new file mode 100644
index 00000000..065c13c7
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/kmalloc.c
@@ -0,0 +1,199 @@
+/*
+ * \brief kmalloc() implementation
+ * \author Christian Helmuth <ch12@os.inf.tu-dresden.de>
+ * \date 2007-01-24
+ *
+ * In Linux 2.6 this resides in mm/slab.c.
+ *
+ * This implementation of kmalloc() stays with Linux's and uses kmem_caches for
+ * some power of two bytes. For larger allocations ddedkit_large_malloc() is
+ * used. This way, we optimize for speed and potentially waste memory
+ * resources.
+ */
+
+/* Linux */
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/bootmem.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/mm.h>
+#include <asm/io.h>
+
+/* DDEKit */
+#include <l4/dde/ddekit/debug.h>
+#include <l4/dde/ddekit/memory.h>
+
+#include <l4/dde/linux26/dde26.h>
+
+/* dummy */
+int forbid_dac;
+
+/* This stuff is needed by some drivers, e.g. for ethtool.
+ * XXX: This is a fake, implement it if you really need ethtool stuff.
+ */
+struct page* mem_map = NULL;
+static bootmem_data_t contig_bootmem_data;
+struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data };
+
+int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t prot)
+{
+ return 0;
+}
+EXPORT_SYMBOL(remap_pfn_range);
+
+/*******************
+ ** Configuration **
+ *******************/
+
+#define DEBUG_MALLOC 0
+
+/********************
+ ** Implementation **
+ ********************/
+
+/*
+ * These are the default caches for kmalloc. Custom caches can have other sizes.
+ */
+static struct cache_sizes malloc_sizes[] = {
+#define CACHE(x) { .cs_size = (x) },
+#include <linux/kmalloc_sizes.h>
+ CACHE(ULONG_MAX)
+#undef CACHE
+};
+
+
+/*
+ * kmalloc() cache names
+ */
+static const char *malloc_names[] = {
+#define CACHE(x) "size-" #x,
+#include <linux/kmalloc_sizes.h>
+ NULL
+#undef CACHE
+};
+
+
+/**
+ * Find kmalloc() cache for size
+ */
+static struct kmem_cache *find_cache(size_t size)
+{
+ struct cache_sizes *sizes;
+
+ for (sizes = malloc_sizes; size > sizes->cs_size; ++sizes) ;
+
+ return sizes->cs_cachep;
+}
+
+
+/**
+ * Free previously allocated memory
+ * @objp: pointer returned by kmalloc.
+ *
+ * If @objp is NULL, no operation is performed.
+ *
+ * Don't free memory not originally allocated by kmalloc()
+ * or you will run into trouble.
+ */
+void kfree(const void *objp)
+{
+ if (!objp) return;
+
+ /* find cache back-pointer */
+ void **p = (void **)objp - 1;
+
+ ddekit_log(DEBUG_MALLOC, "objp=%p cache=%p (%d)",
+ p, *p, *p ? kmem_cache_size(*p) : 0);
+
+ if (*p)
+ /* free from cache */
+ kmem_cache_free(*p, p);
+ else
+ /* no cache for this size - use ddekit free */
+ ddekit_large_free(p);
+}
+
+
+/**
+ * Allocate memory
+ * @size: how many bytes of memory are required.
+ * @flags: the type of memory to allocate.
+ *
+ * kmalloc is the normal method of allocating memory
+ * in the kernel.
+ */
+void *__kmalloc(size_t size, gfp_t flags)
+{
+ /* add space for back-pointer */
+ size += sizeof(void *);
+
+ /* find appropriate cache */
+ struct kmem_cache *cache = find_cache(size);
+
+ void **p;
+ if (cache)
+ /* allocate from cache */
+ p = kmem_cache_alloc(cache, flags);
+ else
+ /* no cache for this size - use ddekit malloc */
+ p = ddekit_large_malloc(size);
+
+ ddekit_log(DEBUG_MALLOC, "size=%d, cache=%p (%d) => %p",
+ size, cache, cache ? kmem_cache_size(cache) : 0, p);
+
+ /* return pointer to actual chunk */
+ if (p) {
+ *p = cache;
+ p++;
+ }
+ return p;
+}
+
+
+size_t ksize(const void *p)
+{
+ struct kmem_cache *cache = (struct kmem_cache *)*((void**)p - 1);
+ if (cache)
+ return kmem_cache_size(cache);
+ return -1;
+}
+
+
+void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ void *ret = (void *)__get_free_pages(flag, get_order(size));
+
+ if (ret != NULL) {
+ memset(ret, 0, size);
+ *dma_handle = virt_to_bus(ret);
+ }
+ return ret;
+}
+
+
+void dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle)
+{
+ free_pages((unsigned long)vaddr, get_order(size));
+}
+
+
+/********************
+ ** Initialization **
+ ********************/
+
+/**
+ * dde_linux kmalloc initialization
+ */
+void l4dde26_kmalloc_init(void)
+{
+ struct cache_sizes *sizes = malloc_sizes;
+ const char **names = malloc_names;
+
+ /* init malloc sizes array */
+ for (; sizes->cs_size != ULONG_MAX; ++sizes, ++names)
+ sizes->cs_cachep = kmem_cache_create(*names, sizes->cs_size, 0, 0, 0);
+}
diff --git a/libdde_linux26/lib/src/arch/l4/kmem_cache.c b/libdde_linux26/lib/src/arch/l4/kmem_cache.c
new file mode 100644
index 00000000..1465ac6c
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/kmem_cache.c
@@ -0,0 +1,213 @@
+/*
+ * \brief Kmem_cache implementation
+ * \author Christian Helmuth
+ * \date 2007-01-22
+ *
+ * In Linux 2.6 this resides in mm/slab.c.
+ *
+ * I'll disregard the following function currently...
+ *
+ * extern struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags);
+ * extern void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
+ */
+
+/* Linux */
+#include <linux/slab.h>
+
+/* DDEKit */
+#include <l4/dde/ddekit/memory.h>
+#include <l4/dde/ddekit/lock.h>
+
+
+/*******************
+ ** Configuration **
+ *******************/
+
+#define DEBUG_SLAB 0
+
+#if DEBUG_SLAB
+# define DEBUG_SLAB_ALLOC 1
+#else
+# define DEBUG_SLAB_ALLOC 0
+#endif
+
+/*
+ * Kmem cache structure
+ */
+struct kmem_cache
+{
+ const char *name; /**< cache name */
+ unsigned size; /**< obj size */
+
+ struct ddekit_slab *ddekit_slab_cache; /**< backing DDEKit cache */
+ ddekit_lock_t cache_lock; /**< lock */
+ void (*ctor)(void *); /**< object constructor */
+};
+
+
+/**
+ * Return size of objects in cache
+ */
+unsigned int kmem_cache_size(struct kmem_cache *cache)
+{
+ return cache->size;
+}
+
+
+/**
+ * Return name of cache
+ */
+const char *kmem_cache_name(struct kmem_cache *cache)
+{
+ return cache->name;
+}
+
+
+/**
+ * kmem_cache_shrink - Shrink a cache.
+ * @cachep: The cache to shrink.
+ *
+ * Releases as many slabs as possible for a cache.
+ * To help debugging, a zero exit status indicates all slabs were released.
+ */
+int kmem_cache_shrink(struct kmem_cache *cache)
+{
+ /* noop */
+ return 1;
+}
+
+
+/**
+ * kmem_cache_free - Deallocate an object
+ * @cachep: The cache the allocation was from.
+ * @objp: The previously allocated object.
+ *
+ * Free an object which was previously allocated from this
+ * cache.
+ */
+void kmem_cache_free(struct kmem_cache *cache, void *objp)
+{
+ ddekit_log(DEBUG_SLAB_ALLOC, "\"%s\" (%p)", cache->name, objp);
+
+ ddekit_lock_lock(&cache->cache_lock);
+ ddekit_slab_free(cache->ddekit_slab_cache, objp);
+ ddekit_lock_unlock(&cache->cache_lock);
+}
+
+
+/**
+ * kmem_cache_alloc - Allocate an object
+ * @cachep: The cache to allocate from.
+ * @flags: See kmalloc().
+ *
+ * Allocate an object from this cache. The flags are only relevant
+ * if the cache has no available objects.
+ */
+void *kmem_cache_alloc(struct kmem_cache *cache, gfp_t flags)
+{
+ void *ret;
+
+ ddekit_log(DEBUG_SLAB_ALLOC, "\"%s\" flags=%x", cache->name, flags);
+
+ ddekit_lock_lock(&cache->cache_lock);
+ ret = ddekit_slab_alloc(cache->ddekit_slab_cache);
+ ddekit_lock_unlock(&cache->cache_lock);
+
+ // XXX: is it valid to run ctor AND memset to zero?
+ if (flags & __GFP_ZERO)
+ memset(ret, 0, cache->size);
+ else if (cache->ctor)
+ cache->ctor(ret);
+
+ return ret;
+}
+
+
+/**
+ * kmem_cache_destroy - delete a cache
+ * @cachep: the cache to destroy
+ *
+ * Remove a struct kmem_cache object from the slab cache.
+ * Returns 0 on success.
+ *
+ * It is expected this function will be called by a module when it is
+ * unloaded. This will remove the cache completely, and avoid a duplicate
+ * cache being allocated each time a module is loaded and unloaded, if the
+ * module doesn't have persistent in-kernel storage across loads and unloads.
+ *
+ * The cache must be empty before calling this function.
+ *
+ * The caller must guarantee that noone will allocate memory from the cache
+ * during the kmem_cache_destroy().
+ */
+void kmem_cache_destroy(struct kmem_cache *cache)
+{
+ ddekit_log(DEBUG_SLAB, "\"%s\"", cache->name);
+
+ ddekit_slab_destroy(cache->ddekit_slab_cache);
+ ddekit_simple_free(cache);
+}
+
+
+/**
+ * kmem_cache_create - Create a cache.
+ * @name: A string which is used in /proc/slabinfo to identify this cache.
+ * @size: The size of objects to be created in this cache.
+ * @align: The required alignment for the objects.
+ * @flags: SLAB flags
+ * @ctor: A constructor for the objects.
+ *
+ * Returns a ptr to the cache on success, NULL on failure.
+ * Cannot be called within a int, but can be interrupted.
+ * The @ctor is run when new pages are allocated by the cache
+ * and the @dtor is run before the pages are handed back.
+ *
+ * @name must be valid until the cache is destroyed. This implies that
+ * the module calling this has to destroy the cache before getting unloaded.
+ *
+ * The flags are
+ *
+ * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
+ * to catch references to uninitialised memory.
+ *
+ * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
+ * for buffer overruns.
+ *
+ * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
+ * cacheline. This can be beneficial if you're counting cycles as closely
+ * as davem.
+ */
+struct kmem_cache * kmem_cache_create(const char *name, size_t size, size_t align,
+ unsigned long flags,
+ void (*ctor)(void *))
+{
+ ddekit_log(DEBUG_SLAB, "\"%s\" obj_size=%d", name, size);
+
+ struct kmem_cache *cache;
+
+ if (!name) {
+ printk("kmem_cache name reqeuired\n");
+ return 0;
+ }
+
+ cache = ddekit_simple_malloc(sizeof(*cache));
+ if (!cache) {
+ printk("No memory for slab cache\n");
+ return 0;
+ }
+
+ /* Initialize a physically contiguous cache for kmem */
+ if (!(cache->ddekit_slab_cache = ddekit_slab_init(size, 1))) {
+ printk("DDEKit slab init failed\n");
+ ddekit_simple_free(cache);
+ return 0;
+ }
+
+ cache->name = name;
+ cache->size = size;
+ cache->ctor = ctor;
+
+ ddekit_lock_init_unlocked(&cache->cache_lock);
+
+ return cache;
+}
diff --git a/libdde_linux26/lib/src/arch/l4/local.h b/libdde_linux26/lib/src/arch/l4/local.h
new file mode 100644
index 00000000..35b3e449
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/local.h
@@ -0,0 +1,99 @@
+#ifndef __DDE26_LOCAL_H
+#define __DDE26_LOCAL_H
+
+#include <linux/sched.h>
+
+#include <l4/dde/ddekit/assert.h>
+#include <l4/dde/ddekit/condvar.h>
+#include <l4/dde/ddekit/debug.h>
+#include <l4/dde/ddekit/initcall.h>
+#include <l4/dde/ddekit/interrupt.h>
+#include <l4/dde/ddekit/lock.h>
+#include <l4/dde/ddekit/memory.h>
+#include <l4/dde/ddekit/panic.h>
+#include <l4/dde/ddekit/pci.h>
+#include <l4/dde/ddekit/pgtab.h>
+#include <l4/dde/ddekit/printf.h>
+#include <l4/dde/ddekit/resources.h>
+#include <l4/dde/ddekit/semaphore.h>
+#include <l4/dde/ddekit/thread.h>
+#include <l4/dde/ddekit/types.h>
+#include <l4/dde/ddekit/timer.h>
+
+#include <l4/dde/linux26/dde26.h>
+
+#define DDE_DEBUG 1
+#define DDE_FERRET 0
+
+/* Ferret Debugging stuff, note that this is the only point we are using
+ * L4 headers directly and only for debugging. */
+#if DDE_FERRET
+#include <l4/ferret/maj_min.h>
+#include <l4/ferret/client.h>
+#include <l4/ferret/clock.h>
+#include <l4/ferret/types.h>
+#include <l4/ferret/sensors/list_producer.h>
+#include <l4/ferret/sensors/list_producer_wrap.h>
+extern ferret_list_local_t *ferret_ore_sensor;
+#endif
+
+/***
+ * Internal representation of a Linux kernel thread. This struct
+ * contains Linux' data as well as some additional data used by DDE.
+ */
+typedef struct dde26_thread_data
+{
+ /* NOTE: _threadinfo needs to be first in this struct! */
+ struct thread_info _thread_info; ///< Linux thread info (see current())
+ ddekit_thread_t *_ddekit_thread; ///< underlying DDEKit thread
+ ddekit_sem_t *_sleep_lock; ///< lock used for sleep_interruptible()
+ struct pid _vpid; ///< virtual PID
+} dde26_thread_data;
+
+#define LX_THREAD(thread_data) ((thread_data)->_thread_info)
+#define LX_TASK(thread_data) ((thread_data)->_thread_info.task)
+#define DDEKIT_THREAD(thread_data) ((thread_data)->_ddekit_thread)
+#define SLEEP_LOCK(thread_data) ((thread_data)->_sleep_lock)
+#define VPID_P(thread_data) (&(thread_data)->_vpid)
+
+#if DDE_DEBUG
+#define WARN_UNIMPL printk("unimplemented: %s\n", __FUNCTION__)
+#define DEBUG_MSG(msg, ...) printk("%s: \033[36m"msg"\033[0m\n", __FUNCTION__, ##__VA_ARGS__)
+
+#define DECLARE_INITVAR(name) \
+ static struct { \
+ int _initialized; \
+ char *name; \
+ } init_##name = {0, #name,}
+
+#define INITIALIZE_INITVAR(name) init_##name._initialized = 1
+
+#define CHECK_INITVAR(name) \
+ if (init_##name._initialized == 0) { \
+ printk("DDE26: \033[31;1mUsing uninitialized subsystem: "#name"\033[0m\n"); \
+ BUG(); \
+ }
+
+#else /* !DDE_DEBUG */
+
+#define WARN_UNIMPL do {} while(0)
+#define DEBUG_MSG(...) do {} while(0)
+#define DECLARE_INITVAR(name)
+#define CHECK_INITVAR(name) do {} while(0)
+#define INITIALIZE_INITVAR(name) do {} while(0)
+
+#endif
+
+/* since _thread_info always comes first in the thread_data struct,
+ * we can derive the dde26_thread_data from a task struct by simply
+ * dereferencing its thread_info pointer
+ */
+static dde26_thread_data *lxtask_to_ddethread(struct task_struct *t)
+{
+ return (dde26_thread_data *)(task_thread_info(t));
+}
+
+extern struct thread_info init_thread;
+extern struct task_struct init_task;
+
+#endif
diff --git a/libdde_linux26/lib/src/arch/l4/mm-helper.c b/libdde_linux26/lib/src/arch/l4/mm-helper.c
new file mode 100644
index 00000000..68c0213b
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/mm-helper.c
@@ -0,0 +1,45 @@
+/* Linux */
+#include <linux/gfp.h>
+#include <linux/string.h>
+#include <asm/page.h>
+
+/* DDEKit */
+#include <l4/dde/ddekit/memory.h>
+#include <l4/dde/ddekit/assert.h>
+#include <l4/dde/ddekit/panic.h>
+
+#include "local.h"
+
+int ioprio_best(unsigned short aprio, unsigned short bprio)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+void *__alloc_bootmem(unsigned long size, unsigned long align,
+ unsigned long goal)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+/*
+ * Stolen from linux-2.6.29/fs/libfs.c
+ */
+ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
+ const void *from, size_t available)
+{
+ loff_t pos = *ppos;
+ if (pos < 0)
+ return -EINVAL;
+ if (pos > available)
+ return 0;
+ if (count > available - pos)
+ count = available - pos;
+ memcpy(to, from + pos, count);
+ *ppos = pos + count;
+
+ return count;
+}
+
+int capable(int f) { return 1; }
diff --git a/libdde_linux26/lib/src/arch/l4/net.c b/libdde_linux26/lib/src/arch/l4/net.c
new file mode 100644
index 00000000..d6637d96
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/net.c
@@ -0,0 +1,36 @@
+/******************************************************************************
+ * DDELinux networking utilities. *
+ * *
+ * Bjoern Doebel <doebel@tudos.org> *
+ * *
+ * (c) 2005 - 2007 Technische Universitaet Dresden *
+ * This file is part of DROPS, which is distributed under the terms of the *
+ * GNU General Public License 2. Please see the COPYING file for details. *
+ ******************************************************************************/
+
+#include <l4/dde/linux26/dde26_net.h>
+
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+
+#include "local.h"
+
+
+/* Callback function to be called if a network packet arrives and needs to
+ * be handled by netif_rx() or netif_receive_skb()
+ */
+linux_rx_callback l4dde26_rx_callback = NULL;
+
+
+/* Register a netif_rx callback function.
+ *
+ * \return pointer to old callback function
+ */
+linux_rx_callback l4dde26_register_rx_callback(linux_rx_callback cb)
+{
+ linux_rx_callback old = l4dde26_rx_callback;
+ l4dde26_rx_callback = cb;
+ DEBUG_MSG("New rx callback @ %p.", cb);
+
+ return old;
+}
diff --git a/libdde_linux26/lib/src/arch/l4/page_alloc.c b/libdde_linux26/lib/src/arch/l4/page_alloc.c
new file mode 100644
index 00000000..0a2e3fdf
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/page_alloc.c
@@ -0,0 +1,281 @@
+/*
+ * \brief Page allocation
+ * \author Christian Helmuth <ch12@tudos.org>
+ * Bjoern Doebel <doebel@tudos.org>
+ * \date 2007-01-22
+ *
+ * In Linux 2.6 this resides in mm/page_alloc.c.
+ *
+ * This implementation is far from complete as it does not cover "struct page"
+ * emulation. In Linux, there's an array of structures for all pages. In
+ * particular, iteration works for this array like:
+ *
+ * struct page *p = alloc_pages(3); // p refers to first page of allocation
+ * ++p; // p refers to second page
+ *
+ * There may be more things to cover and we should have a deep look into the
+ * kernel parts we want to reuse. Candidates for problems may be file systems,
+ * storage (USB, IDE), and video (bttv).
+ */
+
+/* Linux */
+#include <linux/gfp.h>
+#include <linux/string.h>
+#include <linux/pagevec.h>
+#include <linux/mm.h>
+#include <asm/page.h>
+
+/* DDEKit */
+#include <l4/dde/ddekit/memory.h>
+#include <l4/dde/ddekit/assert.h>
+#include <l4/dde/ddekit/panic.h>
+
+#include "local.h"
+
+unsigned long max_low_pfn;
+unsigned long min_low_pfn;
+unsigned long max_pfn;
+
+/*******************
+ ** Configuration **
+ *******************/
+
+#define DEBUG_PAGE_ALLOC 0
+
+
+/*
+ * DDE page cache
+ *
+ * We need to store all pages somewhere (which in the Linux kernel is
+ * performed by the huge VM infrastructure. Purpose for us is:
+ * - make virt_to_phys() work
+ * - enable external clients to hand in memory (e.g., a dm_phys
+ * dataspace and make it accessible as Linux pages to the DDE)
+ */
+
+#define DDE_PAGE_CACHE_SHIFT 10
+#define DDE_PAGE_CACHE_SIZE (1 << DDE_PAGE_CACHE_SHIFT)
+#define DDE_PAGE_CACHE_MASK (DDE_PAGE_CACHE_SIZE - 1)
+
+typedef struct
+{
+ struct hlist_node list;
+ struct page *page;
+} page_cache_entry;
+
+static struct hlist_head dde_page_cache[DDE_PAGE_CACHE_SIZE];
+
+/** Hash function to map virtual addresses to page cache buckets. */
+#define VIRT_TO_PAGEHASH(a) ((((unsigned long)a) >> PAGE_SHIFT) & DDE_PAGE_CACHE_MASK)
+
+
+void dde_page_cache_add(struct page *p)
+{
+ unsigned int hashval = VIRT_TO_PAGEHASH(p->virtual);
+
+ page_cache_entry *e = kmalloc(sizeof(page_cache_entry), GFP_KERNEL);
+
+#if DEBUG_PAGE_ALLOC
+ DEBUG_MSG("virt %p, hash: %x", p->virtual, hashval);
+#endif
+
+ e->page = p;
+ INIT_HLIST_NODE(&e->list);
+
+ hlist_add_head(&e->list, &dde_page_cache[hashval]);
+}
+
+
+void dde_page_cache_remove(struct page *p)
+{
+ unsigned int hashval = VIRT_TO_PAGEHASH(p->virtual);
+ struct hlist_node *hn = NULL;
+ struct hlist_head *h = &dde_page_cache[hashval];
+ page_cache_entry *e = NULL;
+ struct hlist_node *v = NULL;
+
+ hlist_for_each_entry(e, hn, h, list) {
+ if ((unsigned long)e->page->virtual == ((unsigned long)p->virtual & PAGE_MASK))
+ v = hn;
+ break;
+ }
+
+ if (v) {
+#if DEBUG_PAGE_ALLOC
+ DEBUG_MSG("deleting node %p which contained page %p", v, p);
+#endif
+ hlist_del(v);
+ }
+}
+
+
+struct page* dde_page_lookup(unsigned long va)
+{
+ unsigned int hashval = VIRT_TO_PAGEHASH(va);
+
+ struct hlist_node *hn = NULL;
+ struct hlist_head *h = &dde_page_cache[hashval];
+ page_cache_entry *e = NULL;
+
+ hlist_for_each_entry(e, hn, h, list) {
+ if ((unsigned long)e->page->virtual == (va & PAGE_MASK))
+ return e->page;
+ }
+
+ return NULL;
+}
+
+
+struct page * __alloc_pages_internal(gfp_t gfp_mask, unsigned int order,
+ struct zonelist *zonelist, nodemask_t *nm)
+{
+ /* XXX: In fact, according to order, we should have one struct page
+ * for every page, not only for the first one.
+ */
+ struct page *ret = kmalloc(sizeof(*ret), GFP_KERNEL);
+
+ ret->virtual = (void *)__get_free_pages(gfp_mask, order);
+ dde_page_cache_add(ret);
+
+ return ret;
+}
+
+
+unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
+{
+ ddekit_log(DEBUG_PAGE_ALLOC, "gfp_mask=%x order=%d (%d bytes)",
+ gfp_mask, order, PAGE_SIZE << order);
+
+ Assert(gfp_mask != GFP_DMA);
+ void *p = ddekit_large_malloc(PAGE_SIZE << order);
+
+ return (unsigned long)p;
+}
+
+
+unsigned long get_zeroed_page(gfp_t gfp_mask)
+{
+ unsigned long p = __get_free_pages(gfp_mask, 0);
+
+ if (p) memset((void *)p, 0, PAGE_SIZE);
+
+ return (unsigned long)p;
+}
+
+
+void free_hot_page(struct page *page)
+{
+ WARN_UNIMPL;
+}
+
+/*
+ * XXX: If alloc_pages() gets fixed to allocate a page struct per page,
+ * this needs to be adapted, too.
+ */
+void __free_pages(struct page *page, unsigned int order)
+{
+ free_pages((unsigned long)page->virtual, order);
+ dde_page_cache_remove(page);
+}
+
+void __pagevec_free(struct pagevec *pvec)
+{
+ WARN_UNIMPL;
+}
+
+int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, int len, int write, int force,
+ struct page **pages, struct vm_area_struct **vmas)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+/**
+ * ...
+ *
+ * XXX order may be larger than allocation at 'addr' - it may comprise several
+ * allocation via __get_free_pages()!
+ */
+void free_pages(unsigned long addr, unsigned int order)
+{
+ ddekit_log(DEBUG_PAGE_ALLOC, "addr=%p order=%d", (void *)addr, order);
+
+ ddekit_large_free((void *)addr);
+}
+
+
+unsigned long __pa(volatile void *addr)
+{
+ return ddekit_pgtab_get_physaddr((void*)addr);
+}
+
+void *__va(unsigned long addr)
+{
+ return (void*)ddekit_pgtab_get_virtaddr((ddekit_addr_t) addr);
+}
+
+
+int set_page_dirty_lock(struct page *page)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+
+/*
+ * basically copied from linux/mm/page_alloc.c
+ */
+void *__init alloc_large_system_hash(const char *tablename,
+ unsigned long bucketsize,
+ unsigned long numentries,
+ int scale,
+ int flags,
+ unsigned int *_hash_shift,
+ unsigned int *_hash_mask,
+ unsigned long limit)
+{
+ void * table = NULL;
+ unsigned long log2qty;
+ unsigned long size;
+
+ if (numentries == 0)
+ numentries = 1024;
+
+ log2qty = ilog2(numentries);
+ size = bucketsize << log2qty;
+
+ do {
+ unsigned long order;
+ for (order = 0; ((1UL << order) << PAGE_SHIFT) < size; order++);
+ table = (void*) __get_free_pages(GFP_ATOMIC, order);
+ } while (!table && size > PAGE_SIZE && --log2qty);
+
+ if (!table)
+ panic("Failed to allocate %s hash table\n", tablename);
+
+ printk("%s hash table entries: %d (order: %d, %lu bytes)\n",
+ tablename,
+ (1U << log2qty),
+ ilog2(size) - PAGE_SHIFT,
+ size);
+
+ if (_hash_shift)
+ *_hash_shift = log2qty;
+ if (_hash_mask)
+ *_hash_mask = (1 << log2qty) - 1;
+
+ return table;
+}
+
+
+static void __init dde_page_cache_init(void)
+{
+ printk("Initializing DDE page cache\n");
+ int i=0;
+
+ for (i; i < DDE_PAGE_CACHE_SIZE; ++i)
+ INIT_HLIST_HEAD(&dde_page_cache[i]);
+}
+
+core_initcall(dde_page_cache_init);
diff --git a/libdde_linux26/lib/src/arch/l4/param.c b/libdde_linux26/lib/src/arch/l4/param.c
new file mode 100644
index 00000000..5bd83f32
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/param.c
@@ -0,0 +1,32 @@
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+/* Lazy bastard, eh? */
+#define STANDARD_PARAM_DEF(name, type, format, tmptype, strtolfn) \
+ int param_set_##name(const char *val, struct kernel_param *kp) \
+ { \
+ return 0; \
+ } \
+ int param_get_##name(char *buffer, struct kernel_param *kp) \
+ { \
+ return 0;\
+ }
+
+STANDARD_PARAM_DEF(byte, unsigned char, "%c", unsigned long, simple_strtoul);
+STANDARD_PARAM_DEF(short, short, "%hi", long, simple_strtol);
+STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, simple_strtoul);
+STANDARD_PARAM_DEF(int, int, "%i", long, simple_strtol);
+STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, simple_strtoul);
+STANDARD_PARAM_DEF(long, long, "%li", long, simple_strtol);
+STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, simple_strtoul);
+
+int printk_ratelimit(void)
+{
+ return 0;
+}
diff --git a/libdde_linux26/lib/src/arch/l4/pci.c b/libdde_linux26/lib/src/arch/l4/pci.c
new file mode 100644
index 00000000..2a0391f2
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/pci.c
@@ -0,0 +1,189 @@
+#include "local.h"
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/list.h>
+#include <linux/init.h>
+
+/* will include $(CONTRIB)/drivers/pci/pci.h */
+#include "pci.h"
+
+DECLARE_INITVAR(dde26_pci);
+
+/** PCI device descriptor */
+typedef struct l4dde_pci_dev {
+ struct list_head next; /**< chain info */
+ struct ddekit_pci_dev *ddekit_dev; /**< corresponding DDEKit descriptor */
+ struct pci_dev *linux_dev; /**< Linux descriptor */
+} l4dde_pci_dev_t;
+
+
+/*******************************************************************************************
+ ** PCI data **
+ *******************************************************************************************/
+/** List of Linux-DDEKit PCIDev mappings */
+static LIST_HEAD(pcidev_mappings);
+
+/** PCI bus */
+static struct pci_bus *pci_bus = NULL;
+
+static int l4dde26_pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val);
+static int l4dde26_pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val);
+
+/** PCI operations for our virtual PCI bus */
+static struct pci_ops dde_pcibus_ops = {
+ .read = l4dde26_pci_read,
+ .write = l4dde26_pci_write,
+};
+
+
+/*******************************************************************************************
+ ** Read/write PCI config space. This is simply mapped to the DDEKit functions. **
+ *******************************************************************************************/
+static int l4dde26_pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val)
+{
+ return ddekit_pci_read(bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
+}
+
+static int l4dde26_pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val)
+{
+ return ddekit_pci_write(bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
+}
+
+int pci_irq_enable(struct pci_dev *dev)
+{
+ int irq = dev->irq;
+ int pin = 0;
+ int ret;
+
+ DEBUG_MSG("dev %p", dev);
+ if (!dev)
+ return -EINVAL;
+
+ pin = (int)dev->pin;
+ DEBUG_MSG("irq %d, pin %d", dev->irq, dev->pin);
+ if (!pin) {
+ dev_warn(&dev->dev,
+ "No interrupt pin configured for device %s\n",
+ pci_name(dev));
+ return 0;
+ }
+ pin--;
+
+ ret = ddekit_pci_irq_enable(dev->bus->number, PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn), pin, &irq);
+ if (ret) {
+ dev_warn(&dev->dev, "Interrupt enable failed for device %s (%d)\n",
+ pci_name(dev), ret);
+ return -1;
+ }
+
+ dev_info(&dev->dev, "PCI INT %c -> GSI %d -> IRQ %d\n",
+ 'A' + pin, irq, dev->irq);
+
+ dev->irq = irq;
+ return 0;
+}
+
+int __pci_enable_device(struct pci_dev *dev)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+
+/**
+ * pci_enable_device - Initialize device before it's used by a driver.
+ *
+ * Initialize device before it's used by a driver. Ask low-level code
+ * to enable I/O and memory. Wake up the device if it was suspended.
+ * Beware, this function can fail.
+ *
+ * \param dev PCI device to be initialized
+ *
+ */
+int
+pci_enable_device(struct pci_dev *dev)
+{
+ CHECK_INITVAR(dde26_pci);
+// WARN_UNIMPL;
+ return pci_irq_enable(dev);
+}
+
+
+/**
+ * pci_disable_device - Disable PCI device after use
+ *
+ * Signal to the system that the PCI device is not in use by the system
+ * anymore. This only involves disabling PCI bus-mastering, if active.
+ *
+ * \param dev PCI device to be disabled
+ */
+void pci_disable_device(struct pci_dev *dev)
+{
+ CHECK_INITVAR(dde26_pci);
+ WARN_UNIMPL;
+}
+
+
+void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev)
+{
+ //WARN_UNIMPL;
+}
+
+void pci_set_master(struct pci_dev *dev)
+{
+ CHECK_INITVAR(dde26_pci);
+ WARN_UNIMPL;
+}
+
+
+int pci_create_sysfs_dev_files(struct pci_dev *pdev)
+{
+ return 0;
+}
+
+unsigned int pcibios_assign_all_busses(void)
+{
+ return 1;
+}
+
+void
+pcibios_align_resource(void *data, struct resource *res,
+ resource_size_t size, resource_size_t align)
+{
+ WARN_UNIMPL;
+}
+
+int pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+#if 0
+ int err;
+
+ if ((err = pcibios_enable_resources(dev, mask)) < 0)
+ return err;
+
+ return pcibios_enable_irq(dev);
+#endif
+ return 0;
+}
+
+/*******************************************************************************************
+ ** Initialization function **
+ *******************************************************************************************/
+
+/** Initialize DDELinux PCI subsystem.
+ */
+void __init l4dde26_init_pci(void)
+{
+ ddekit_pci_init();
+
+ pci_bus = pci_create_bus(NULL, 0, &dde_pcibus_ops, NULL);
+ Assert(pci_bus);
+
+ pci_do_scan_bus(pci_bus);
+
+ INITIALIZE_INITVAR(dde26_pci);
+}
+
+arch_initcall(l4dde26_init_pci);
diff --git a/libdde_linux26/lib/src/arch/l4/power.c b/libdde_linux26/lib/src/arch/l4/power.c
new file mode 100644
index 00000000..e36487bd
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/power.c
@@ -0,0 +1,23 @@
+/* Dummy functions for power management. */
+
+#include "local.h"
+#include <linux/device.h>
+
+int device_pm_add(struct device * dev)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+
+void device_pm_remove(struct device * dev)
+{
+ WARN_UNIMPL;
+}
+
+int pm_qos_add_requirement(int qos, char *name, s32 value) { return 0; }
+int pm_qos_update_requirement(int qos, char *name, s32 new_value) { return 0; }
+void pm_qos_remove_requirement(int qos, char *name) { }
+int pm_qos_requirement(int qos) { return 0; }
+int pm_qos_add_notifier(int qos, struct notifier_block *notifier) { return 0; }
+int pm_qos_remove_notifier(int qos, struct notifier_block *notifier) { return 0; }
diff --git a/libdde_linux26/lib/src/arch/l4/process.c b/libdde_linux26/lib/src/arch/l4/process.c
new file mode 100644
index 00000000..5fe43b32
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/process.c
@@ -0,0 +1,347 @@
+#include <l4/dde/dde.h>
+#include <l4/dde/linux26/dde26.h>
+
+#include <asm/atomic.h>
+
+#include <linux/init_task.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/list.h>
+#include <linux/thread_info.h>
+#include <linux/sched.h>
+#include <linux/pid.h>
+#include <linux/vmalloc.h>
+
+#include "local.h"
+
+/*****************************************************************************
+ ** Current() implementation **
+ *****************************************************************************/
+struct thread_info *current_thread_info(void)
+{
+ dde26_thread_data *cur = (dde26_thread_data *)ddekit_thread_get_my_data();
+ return &LX_THREAD(cur);
+}
+
+struct task_struct *get_current(void)
+{
+ return current_thread_info()->task;
+}
+
+/*****************************************************************************
+ ** PID-related stuff **
+ ** **
+ ** Linux manages lists of PIDs that are handed out to processes so that at **
+ ** a later point it is able to determine which task_struct belongs to a **
+ ** certain PID. We implement this with a single list holding the mappings **
+ ** for all our threads. **
+ *****************************************************************************/
+
+LIST_HEAD(_pid_task_list);
+ddekit_lock_t _pid_task_list_lock;
+
+/** PID to task_struct mapping */
+struct pid2task
+{
+ struct list_head list; /**< list data */
+ struct pid *pid; /**< PID */
+ struct task_struct *ts; /**< task struct */
+};
+
+struct pid init_struct_pid = INIT_STRUCT_PID;
+
+void put_pid(struct pid *pid)
+{
+ if (pid)
+ atomic_dec(&pid->count);
+ // no freeing here, our struct pid's are always allocated as
+ // part of the dde26_thread_data
+}
+
+/** Attach PID to a certain task struct. */
+void attach_pid(struct task_struct *task, enum pid_type type
+ __attribute__((unused)), struct pid *pid)
+{
+ /* Initialize a new pid2task mapping */
+ struct pid2task *pt = kmalloc(sizeof(struct pid2task), GFP_KERNEL);
+ pt->pid = get_pid(pid);
+ pt->ts = task;
+
+ /* add to list */
+ ddekit_lock_lock(&_pid_task_list_lock);
+ list_add(&pt->list, &_pid_task_list);
+ ddekit_lock_unlock(&_pid_task_list_lock);
+}
+
+/** Detach PID from a task struct. */
+void detach_pid(struct task_struct *task, enum pid_type type __attribute__((unused)))
+{
+ struct list_head *p, *n, *h;
+
+ h = &_pid_task_list;
+
+ ddekit_lock_lock(&_pid_task_list_lock);
+ /* search for mapping with given task struct and free it if necessary */
+ list_for_each_safe(p, n, h) {
+ struct pid2task *pt = list_entry(p, struct pid2task, list);
+ if (pt->ts == task) {
+ put_pid(pt->pid);
+ list_del(p);
+ kfree(pt);
+ break;
+ }
+ }
+ ddekit_lock_unlock(&_pid_task_list_lock);
+}
+
+struct task_struct *find_task_by_pid_type(int type, int nr)
+{
+ struct list_head *h, *p;
+ h = &_pid_task_list;
+
+ ddekit_lock_lock(&_pid_task_list_lock);
+ list_for_each(p, h) {
+ struct pid2task *pt = list_entry(p, struct pid2task, list);
+ if (pid_nr(pt->pid) == nr) {
+ ddekit_lock_unlock(&_pid_task_list_lock);
+ return pt->ts;
+ }
+ }
+ ddekit_lock_unlock(&_pid_task_list_lock);
+
+ return NULL;
+}
+
+
+struct task_struct *find_task_by_pid_ns(int nr, struct pid_namespace *ns)
+{
+ /* we don't implement PID name spaces */
+ return find_task_by_pid_type(0, nr);
+}
+
+struct task_struct *find_task_by_pid(int nr)
+{
+ return find_task_by_pid_type(0, nr);
+}
+
+/*****************************************************************************
+ ** kernel_thread() implementation **
+ *****************************************************************************/
+/* Struct containing thread data for a newly created kthread. */
+struct __kthread_data
+{
+ int (*fn)(void *);
+ void *arg;
+ ddekit_lock_t lock;
+ dde26_thread_data *kthread;
+};
+
+/** Counter for running kthreads. It is used to create unique names
+ * for kthreads.
+ */
+static atomic_t kthread_count = ATOMIC_INIT(0);
+
+/** Entry point for new kernel threads. Make this thread a DDE26
+ * worker and then execute the real thread fn.
+ */
+static void __kthread_helper(void *arg)
+{
+ struct __kthread_data *k = (struct __kthread_data *)arg;
+
+ /*
+ * Make a copy of the fn and arg pointers, as the kthread struct is
+ * deleted by our parent after notifying it and this may happen before we
+ * get to execute the function.
+ */
+ int (*_fn)(void*) = k->fn;
+ void *_arg = k->arg;
+
+ l4dde26_process_add_worker();
+
+ /*
+ * Handshake with creator - we store our thread data in the
+ * kthread struct and then unlock the lock to notify our
+ * creator about completing setup
+ */
+ k->kthread = (dde26_thread_data *)ddekit_thread_get_my_data();
+ ddekit_lock_unlock(&k->lock);
+
+ do_exit(_fn(_arg));
+}
+
+/** Our implementation of Linux' kernel_thread() function. Setup a new
+ * thread running our __kthread_helper() function.
+ */
+int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+{
+ ddekit_thread_t *t;
+ char name[20];
+ struct __kthread_data *kt = vmalloc(sizeof(struct __kthread_data));
+ ddekit_lock_t lock;
+
+ /* Initialize (and grab) handshake lock */
+ ddekit_lock_init(&lock);
+ ddekit_lock_lock(&lock);
+
+ int threadnum = atomic_inc_return(&kthread_count);
+ kt->fn = fn;
+ kt->arg = arg;
+ kt->lock = lock; // Copy lock ptr, note that kt is freed by the
+ // new thread, so we MUST NOT use kt->lock after
+ // this point!
+
+ snprintf(name, 20, ".kthread%x", threadnum);
+ t = ddekit_thread_create(__kthread_helper,
+ (void *)kt, name);
+ Assert(t);
+
+ ddekit_lock_lock(&lock);
+ ddekit_lock_deinit(&lock);
+
+ return pid_nr(VPID_P(kt->kthread));
+}
+
+/** Our implementation of exit(). For DDE purposes this only relates
+ * to kernel threads.
+ */
+void do_exit(long code)
+{
+ ddekit_thread_t *t = DDEKIT_THREAD(lxtask_to_ddethread(current));
+// printk("Thread %s exits with code %x\n", ddekit_thread_get_name(t), code);
+
+ /* do some cleanup */
+ detach_pid(current, 0);
+
+ /* goodbye, cruel world... */
+ ddekit_thread_exit();
+}
+
+/*****************************************************************************
+ ** Misc functions **
+ *****************************************************************************/
+
+void dump_stack(void)
+{
+}
+
+
+char *get_task_comm(char *buf, struct task_struct *tsk)
+{
+ char *ret;
+ /* buf must be at least sizeof(tsk->comm) in size */
+ task_lock(tsk);
+ ret = strncpy(buf, tsk->comm, sizeof(tsk->comm));
+ task_unlock(tsk);
+ return ret;
+}
+
+
+void set_task_comm(struct task_struct *tsk, char *buf)
+{
+ task_lock(tsk);
+ strlcpy(tsk->comm, buf, sizeof(tsk->comm));
+ task_unlock(tsk);
+}
+
+
+/*****************************************************************************
+ ** DDEKit gluecode, init functions **
+ *****************************************************************************/
+/* Initialize a dde26 thread.
+ *
+ * - Allocate thread data, as well as a Linux task struct,
+ * - Fill in default values for thread_info, and task,
+ * - Adapt task struct's thread_info backreference
+ * - Initialize the DDE sleep lock
+ */
+static dde26_thread_data *init_dde26_thread(void)
+{
+ /*
+ * Virtual PID counter
+ */
+ static atomic_t pid_counter = ATOMIC_INIT(0);
+ dde26_thread_data *t = vmalloc(sizeof(dde26_thread_data));
+ Assert(t);
+
+ memcpy(&t->_vpid, &init_struct_pid, sizeof(struct pid));
+ t->_vpid.numbers[0].nr = atomic_inc_return(&pid_counter);
+
+ memcpy(&LX_THREAD(t), &init_thread, sizeof(struct thread_info));
+
+ LX_TASK(t) = vmalloc(sizeof(struct task_struct));
+ Assert(LX_TASK(t));
+
+ memcpy(LX_TASK(t), &init_task, sizeof(struct task_struct));
+
+ /* nice: Linux backreferences a task`s thread_info from the
+ * task struct (which in turn can be found using the
+ * thread_info...) */
+ LX_TASK(t)->stack = &LX_THREAD(t);
+
+ /* initialize this thread's sleep lock */
+ SLEEP_LOCK(t) = ddekit_sem_init(0);
+
+ return t;
+}
+
+/* Process setup for worker threads */
+int l4dde26_process_add_worker(void)
+{
+ dde26_thread_data *cur = init_dde26_thread();
+
+ /* If this function is called for a kernel_thread, the thread already has
+ * been set up and we just need to store a reference to the ddekit struct.
+ * However, this function may also be called directly to turn an L4 thread
+ * into a DDE thread. Then, we need to initialize here. */
+ cur->_ddekit_thread = ddekit_thread_myself();
+ if (cur->_ddekit_thread == NULL)
+ cur->_ddekit_thread = ddekit_thread_setup_myself(".dde26_thread");
+ Assert(cur->_ddekit_thread);
+
+ ddekit_thread_set_my_data(cur);
+
+ attach_pid(LX_TASK(cur), 0, &cur->_vpid);
+
+ /* Linux' default is to have this set to 1 initially and let the
+ * scheduler set this to 0 later on.
+ */
+ current_thread_info()->preempt_count = 0;
+
+ return 0;
+}
+
+
+/**
+ * Add an already existing DDEKit thread to the set of threads known to the
+ * Linux environment. This is used for the timer thread, which is actually a
+ * DDEKit thread, but Linux code shall see it as a Linux thread as well.
+ */
+int l4dde26_process_from_ddekit(ddekit_thread_t *t)
+{
+ Assert(t);
+
+ dde26_thread_data *cur = init_dde26_thread();
+ cur->_ddekit_thread = t;
+ ddekit_thread_set_data(t, cur);
+ attach_pid(LX_TASK(cur), 0, &cur->_vpid);
+
+ return 0;
+}
+
+/** Function to initialize the first DDE process.
+ */
+int __init l4dde26_process_init(void)
+{
+ ddekit_lock_init_unlocked(&_pid_task_list_lock);
+
+ int kthreadd_pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES);
+ kthreadd_task = find_task_by_pid(kthreadd_pid);
+
+ l4dde26_process_add_worker();
+
+ return 0;
+}
+
+DEFINE_PER_CPU(int, cpu_number);
+
+//dde_process_initcall(l4dde26_process_init);
diff --git a/libdde_linux26/lib/src/arch/l4/res.c b/libdde_linux26/lib/src/arch/l4/res.c
new file mode 100644
index 00000000..fbd2d09b
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/res.c
@@ -0,0 +1,180 @@
+#include "local.h"
+
+#include <linux/ioport.h>
+
+/** Request an IO port region.
+ *
+ * \param start start port
+ * \param n number of ports
+ * \param name name of allocator (unused)
+ *
+ * \return NULL error
+ * \return !=NULL success
+ *
+ * \bug Since no one in Linux uses this function's return value,
+ * we do not allocate and fill a resource struct.
+ */
+static struct resource *l4dde26_request_region(resource_size_t start,
+ resource_size_t n,
+ const char *name)
+{
+ int err = ddekit_request_io(start, n);
+
+ if (err)
+ return NULL;
+
+ return (struct resource *)1;
+}
+
+
+/** List of memory regions that have been requested. This is used to
+ * perform ioremap() and iounmap()
+ */
+static LIST_HEAD(dde_mem_regions);
+
+/** va->pa mapping used to store memory regions */
+struct dde_mem_region {
+ ddekit_addr_t pa;
+ ddekit_addr_t va;
+ unsigned int size;
+ struct list_head list;
+};
+
+void __iomem * ioremap(unsigned long phys_addr, unsigned long size);
+
+/** Request an IO memory region.
+ *
+ * \param start start address
+ * \param n size of memory area
+ * \param name name of allocator (unused)
+ *
+ * \return NULL error
+ * \return !=NULL success
+ *
+ * \bug Since no one in Linux uses this function's return value,
+ * we do not allocate and fill a resource struct.
+ */
+static struct resource *l4dde26_request_mem_region(resource_size_t start,
+ resource_size_t n,
+ const char *name)
+{
+ ddekit_addr_t va = 0;
+ struct dde_mem_region *mreg;
+
+ // do not a resource request twice
+ if (ioremap(start, n))
+ return (struct resource *)1;
+
+ int i = ddekit_request_mem(start, n, &va);
+
+ if (i) {
+ ddekit_printf("request_mem_region() failed (start %lx, size %x)", start, n);
+ return NULL;
+ }
+
+ mreg = kmalloc(sizeof(struct dde_mem_region), GFP_KERNEL);
+ Assert(mreg);
+
+ mreg->pa = start;
+ mreg->va = va;
+ mreg->size = n;
+ list_add(&mreg->list, &dde_mem_regions);
+
+#if 0
+ ddekit_pgtab_set_region_with_size((void *)va, start, n, PTE_TYPE_OTHER);
+#endif
+
+ return (struct resource *)1;
+}
+
+
+struct resource * __request_region(struct resource *parent,
+ resource_size_t start,
+ resource_size_t n,
+ const char *name, int flags)
+{
+ Assert(parent);
+ Assert(parent->flags & IORESOURCE_IO || parent->flags & IORESOURCE_MEM);
+
+ switch (parent->flags)
+ {
+ case IORESOURCE_IO:
+ return l4dde26_request_region(start, n, name);
+ case IORESOURCE_MEM:
+ return l4dde26_request_mem_region(start, n, name);
+ }
+
+ return NULL;
+}
+
+
+/** Release IO port region.
+ */
+static void l4dde26_release_region(resource_size_t start, resource_size_t n)
+{
+ /* FIXME: we need a list of "struct resource"s that have been
+ * allocated by request_region() and then need to
+ * free this stuff here! */
+ ddekit_release_io(start, n);
+}
+
+
+/** Release IO memory region.
+ */
+static void l4dde26_release_mem_region(resource_size_t start, resource_size_t n)
+{
+ ddekit_release_mem(start, n);
+ ddekit_pgtab_clear_region((void *)start, PTE_TYPE_OTHER);
+}
+
+
+int __check_region(struct resource *root, resource_size_t s, resource_size_t n)
+{
+ WARN_UNIMPL;
+ return -1;
+}
+
+void __release_region(struct resource *root, resource_size_t start,
+ resource_size_t n)
+{
+ switch (root->flags)
+ {
+ case IORESOURCE_IO:
+ return l4dde26_release_region(start, n);
+ case IORESOURCE_MEM:
+ return l4dde26_release_mem_region(start, n);
+ }
+}
+
+
+/** Map physical I/O region into virtual address space.
+ *
+ * For our sake, this only returns the virtual address belonging to
+ * the physical region, since we don't manage page tables ourselves.
+ */
+void __iomem * ioremap(unsigned long phys_addr, unsigned long size)
+{
+ struct list_head *pos, *head;
+ head = &dde_mem_regions;
+
+ list_for_each(pos, head) {
+ struct dde_mem_region *mreg = list_entry(pos, struct dde_mem_region,
+ list);
+ if (mreg->pa <= phys_addr && mreg->pa + mreg->size >= phys_addr + size)
+ return (void *)(mreg->va + (phys_addr - mreg->pa));
+ }
+
+ return NULL;
+}
+
+
+void __iomem * ioremap_nocache(unsigned long offset, unsigned long size)
+{
+ return ioremap(offset, size);
+}
+
+
+void iounmap(volatile void __iomem *addr)
+{
+ WARN_UNIMPL;
+}
diff --git a/libdde_linux26/lib/src/arch/l4/sched.c b/libdde_linux26/lib/src/arch/l4/sched.c
new file mode 100644
index 00000000..b38520c6
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/sched.c
@@ -0,0 +1,155 @@
+#include "local.h"
+
+#include <linux/sched.h>
+
+DEFINE_RWLOCK(tasklist_lock);
+
+asmlinkage void preempt_schedule(void)
+{
+ WARN_UNIMPL;
+}
+
+
+/* Our version of scheduler invocation.
+ *
+ * Scheduling is performed by Fiasco, so we don't care about it as long as
+ * a thread is running. If a task becomes TASK_INTERRUPTIBLE or
+ * TASK_UNINTERRUPTIBLE, we make sure that the task does not become
+ * scheduled by locking the task's sleep lock.
+ */
+asmlinkage void schedule(void)
+{
+ dde26_thread_data *t = lxtask_to_ddethread(current);
+
+ switch (current->state) {
+ case TASK_RUNNING:
+ ddekit_thread_schedule();
+ break;
+ case TASK_INTERRUPTIBLE:
+ case TASK_UNINTERRUPTIBLE:
+ ddekit_sem_down(SLEEP_LOCK(t));
+ break;
+ default:
+ panic("current->state = %d --- unknown state\n", current->state);
+ }
+}
+
+
+/** yield the current processor to other threads.
+ *
+ * this is a shortcut for kernel-space yielding - it marks the
+ * thread runnable and calls sys_sched_yield().
+ */
+void __sched yield(void)
+{
+ set_current_state(TASK_RUNNING);
+ ddekit_yield();
+}
+
+
+/***
+ * try_to_wake_up - wake up a thread
+ * @p: the to-be-woken-up thread
+ * @state: the mask of task states that can be woken
+ * @sync: do a synchronous wakeup?
+ */
+int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
+{
+ Assert(p);
+ dde26_thread_data *t = lxtask_to_ddethread(p);
+
+ Assert(t);
+ Assert(SLEEP_LOCK(t));
+
+ p->state = TASK_RUNNING;
+ ddekit_sem_up(SLEEP_LOCK(t));
+
+ return 0;
+}
+
+
+static void process_timeout(unsigned long data)
+{
+ wake_up_process((struct task_struct *)data);
+}
+
+
+signed long __sched schedule_timeout(signed long timeout)
+{
+ struct timer_list timer;
+ unsigned long expire = timeout + jiffies;
+
+ setup_timer(&timer, process_timeout, (unsigned long)current);
+ timer.expires = expire;
+
+ switch(timeout)
+ {
+ /*
+ * Hah!
+ *
+ * Specifying a @timeout value of %MAX_SCHEDULE_TIMEOUT will schedule
+ * the CPU away without a bound on the timeout. In this case the return
+ * value will be %MAX_SCHEDULE_TIMEOUT.
+ */
+ case MAX_SCHEDULE_TIMEOUT:
+ schedule();
+ break;
+ default:
+ add_timer(&timer);
+ schedule();
+ del_timer(&timer);
+ break;
+ }
+
+ timeout = expire - jiffies;
+
+ return timeout < 0 ? 0 : timeout;
+}
+
+
+signed long __sched schedule_timeout_interruptible(signed long timeout)
+{
+ __set_current_state(TASK_INTERRUPTIBLE);
+ return schedule_timeout(timeout);
+}
+
+
+signed long __sched schedule_timeout_uninterruptible(signed long timeout)
+{
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ return schedule_timeout(timeout);
+}
+
+/** Tasks may be forced to run only on a certain no. of CPUs. Since
+ * we only emulate a SMP-environment for the sake of having multiple
+ * threads, we do not need to implement this.
+ */
+int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
+{
+ return 0;
+}
+
+void set_user_nice(struct task_struct *p, long nice)
+{
+ //WARN_UNIMPL;
+}
+
+void __sched io_schedule(void)
+{
+ WARN_UNIMPL;
+}
+
+long __sched io_schedule_timeout(long timeout)
+{
+ WARN_UNIMPL;
+ return -1;
+}
+
+extern int sched_setscheduler_nocheck(struct task_struct *t, int flags,
+ struct sched_param *p)
+{
+ WARN_UNIMPL;
+ return -1;
+}
+
+void ignore_signals(struct task_struct *t) { }
diff --git a/libdde_linux26/lib/src/arch/l4/signal.c b/libdde_linux26/lib/src/arch/l4/signal.c
new file mode 100644
index 00000000..bd0bc0a7
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/signal.c
@@ -0,0 +1,24 @@
+#include "local.h"
+
+/******************************************************************************
+ ** Dummy signal implementation. **
+ ** DDE does not provide its own signal implementation. To make it compile, **
+ ** we provide dummy versions of signalling functions here. If later on **
+ ** someone *REALLY* wants to use signals in the DDE context, he might **
+ ** erase this file and use something like the L4 signalling library for **
+ ** such purposes. **
+*******************************************************************************/
+
+int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
+{
+ return 0;
+}
+
+void flush_signals(struct task_struct *t)
+{
+}
+
+int do_sigaction(int sig, struct k_sigaction *act, struct k_sigaction *oact)
+{
+ return 0;
+}
diff --git a/libdde_linux26/lib/src/arch/l4/smp.c b/libdde_linux26/lib/src/arch/l4/smp.c
new file mode 100644
index 00000000..1ebf08c2
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/smp.c
@@ -0,0 +1,37 @@
+#include <linux/cpumask.h>
+
+#include "local.h"
+
+static struct cpumask _possible = CPU_MASK_ALL;
+static struct cpumask _online = CPU_MASK_CPU0;
+static struct cpumask _present = CPU_MASK_CPU0;
+static struct cpumask _active = CPU_MASK_CPU0;
+
+const struct cpumask *const cpu_possible_mask = &_possible;
+const struct cpumask *const cpu_online_mask = &_online;
+const struct cpumask *const cpu_present_mask = &_present;
+const struct cpumask *const cpu_active_mask = &_active;
+
+cpumask_t cpu_mask_all = CPU_MASK_ALL;
+int nr_cpu_ids = NR_CPUS;
+const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
+
+/* cpu_bit_bitmap[0] is empty - so we can back into it */
+#define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x)
+#define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
+#define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
+#define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
+
+const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
+ MASK_DECLARE_8(0), MASK_DECLARE_8(8),
+ MASK_DECLARE_8(16), MASK_DECLARE_8(24),
+#if BITS_PER_LONG > 32
+ MASK_DECLARE_8(32), MASK_DECLARE_8(40),
+ MASK_DECLARE_8(48), MASK_DECLARE_8(56),
+#endif
+};
+
+void __smp_call_function_single(int cpuid, struct call_single_data *data)
+{
+ data->func(data->info);
+}
diff --git a/libdde_linux26/lib/src/arch/l4/softirq.c b/libdde_linux26/lib/src/arch/l4/softirq.c
new file mode 100644
index 00000000..21b36d17
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/softirq.c
@@ -0,0 +1,267 @@
+#include "local.h"
+
+#include <linux/interrupt.h>
+
+/* There are at most 32 softirqs in Linux, but only 6 are really used. */
+#define NUM_SOFTIRQS 6
+
+DECLARE_INITVAR(dde26_softirq);
+
+/* softirq threads and their wakeup semaphores */
+ddekit_thread_t *dde_softirq_thread;
+ddekit_sem_t *dde_softirq_sem;
+
+/* struct tasklet_head is not defined in a header in Linux 2.6 */
+struct tasklet_head
+{
+ struct tasklet_struct *list;
+ ddekit_lock_t lock; /* list lock */
+};
+
+/* What to do if a softirq occurs. */
+static struct softirq_action softirq_vec[32];
+
+/* tasklet queues for each softirq thread */
+struct tasklet_head tasklet_vec;
+struct tasklet_head tasklet_hi_vec;
+
+void open_softirq(int nr, void (*action)(struct softirq_action*))
+{
+ softirq_vec[nr].action = action;
+}
+
+static void raise_softirq_irqoff_cpu(unsigned int nr, unsigned int cpu)
+{
+ CHECK_INITVAR(dde26_softirq);
+
+ /* mark softirq scheduled */
+ __raise_softirq_irqoff(nr);
+ /* wake softirq thread */
+ ddekit_sem_up(dde_softirq_sem);
+}
+
+void raise_softirq_irqoff(unsigned int nr)
+{
+ raise_softirq_irqoff_cpu(nr, 0);
+}
+
+void raise_softirq(unsigned int nr)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ raise_softirq_irqoff(nr);
+ local_irq_restore(flags);
+}
+
+/**
+ * Initialize tasklet.
+ */
+void tasklet_init(struct tasklet_struct *t,
+ void (*func)(unsigned long), unsigned long data)
+{
+ t->next = NULL;
+ t->state = 0;
+ atomic_set(&t->count, 0);
+ t->func = func;
+ t->data = data;
+}
+
+/* enqueue tasklet */
+static void __tasklet_enqueue(struct tasklet_struct *t,
+ struct tasklet_head *listhead)
+{
+ ddekit_lock_lock(&listhead->lock);
+ t->next = listhead->list;
+ listhead->list = t;
+ ddekit_lock_unlock(&listhead->lock);
+}
+
+void __tasklet_schedule(struct tasklet_struct *t)
+{
+ unsigned long flags;
+
+ CHECK_INITVAR(dde26_softirq);
+
+ local_irq_save(flags);
+
+ __tasklet_enqueue(t, &tasklet_vec);
+ /* raise softirq */
+ raise_softirq_irqoff_cpu(TASKLET_SOFTIRQ, 0);
+
+ local_irq_restore(flags);
+}
+
+void __tasklet_hi_schedule(struct tasklet_struct *t)
+{
+ unsigned long flags;
+
+ CHECK_INITVAR(dde26_softirq);
+
+ local_irq_save(flags);
+ __tasklet_enqueue(t, &tasklet_hi_vec);
+ raise_softirq_irqoff_cpu(HI_SOFTIRQ, 0);
+ local_irq_restore(flags);
+}
+
+/* Execute tasklets */
+static void tasklet_action(struct softirq_action *a)
+{
+ struct tasklet_struct *list;
+
+ ddekit_lock_lock(&tasklet_vec.lock);
+ list = tasklet_vec.list;
+ tasklet_vec.list = NULL;
+ ddekit_lock_unlock(&tasklet_vec.lock);
+
+ while (list) {
+ struct tasklet_struct *t = list;
+
+ list = list->next;
+
+ if (tasklet_trylock(t)) {
+ if (!atomic_read(&t->count)) {
+ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ BUG();
+ t->func(t->data);
+ tasklet_unlock(t);
+ continue;
+ }
+ tasklet_unlock(t);
+ }
+
+ ddekit_lock_lock(&tasklet_vec.lock);
+ t->next = tasklet_vec.list;
+ tasklet_vec.list = t;
+ raise_softirq_irqoff_cpu(TASKLET_SOFTIRQ, 0);
+ ddekit_lock_unlock(&tasklet_vec.lock);
+ }
+}
+
+
+static void tasklet_hi_action(struct softirq_action *a)
+{
+ struct tasklet_struct *list;
+
+ ddekit_lock_lock(&tasklet_hi_vec.lock);
+ list = tasklet_hi_vec.list;
+ tasklet_hi_vec.list = NULL;
+ ddekit_lock_unlock(&tasklet_hi_vec.lock);
+
+ while (list) {
+ struct tasklet_struct *t = list;
+
+ list = list->next;
+
+ if (tasklet_trylock(t)) {
+ if (!atomic_read(&t->count)) {
+ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
+ BUG();
+ t->func(t->data);
+ tasklet_unlock(t);
+ continue;
+ }
+ tasklet_unlock(t);
+ }
+
+ ddekit_lock_lock(&tasklet_hi_vec.lock);
+ t->next = tasklet_hi_vec.list;
+ tasklet_hi_vec.list = t;
+ raise_softirq_irqoff_cpu(HI_SOFTIRQ, 0);
+ ddekit_lock_unlock(&tasklet_hi_vec.lock);
+ }
+}
+
+
+#define MAX_SOFTIRQ_RETRIES 10
+
+/** Run softirq handlers
+ */
+void __do_softirq(void)
+{
+ int retries = MAX_SOFTIRQ_RETRIES;
+ do {
+ struct softirq_action *h = softirq_vec;
+ unsigned long pending = local_softirq_pending();
+
+ /* reset softirq count */
+ set_softirq_pending(0);
+
+ /* While we have a softirq pending... */
+ while (pending) {
+ /* need to execute current softirq? */
+ if (pending & 1)
+ h->action(h);
+ /* try next softirq */
+ h++;
+ /* remove pending flag for last softirq */
+ pending >>= 1;
+ }
+
+ /* Somebody might have scheduled another softirq in between
+ * (e.g., an IRQ thread or another tasklet). */
+ } while (local_softirq_pending() && --retries);
+
+}
+
+
+void do_softirq(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ if (local_softirq_pending())
+ __do_softirq();
+ local_irq_restore(flags);
+}
+
+/** Softirq thread function.
+ *
+ * Once started, a softirq thread waits for tasklets to be scheduled
+ * and executes them.
+ *
+ * \param arg # of this softirq thread so that it grabs the correct lock
+ * if multiple softirq threads are running.
+ */
+void l4dde26_softirq_thread(void *arg)
+{
+ printk("Softirq daemon starting\n");
+ l4dde26_process_add_worker();
+
+ /* This thread will always be in a softirq, so set the
+ * corresponding flag right now.
+ */
+ preempt_count() |= SOFTIRQ_MASK;
+
+ while(1) {
+ ddekit_sem_down(dde_softirq_sem);
+ do_softirq();
+ }
+}
+
+/** Initialize softirq subsystem.
+ *
+ * Start NUM_SOFTIRQ_THREADS threads executing the \ref l4dde26_softirq_thread
+ * function.
+ */
+void l4dde26_softirq_init(void)
+{
+ char name[20];
+
+ dde_softirq_sem = ddekit_sem_init(0);
+
+ set_softirq_pending(0);
+
+ ddekit_lock_init_unlocked(&tasklet_vec.lock);
+ ddekit_lock_init_unlocked(&tasklet_hi_vec.lock);
+
+ snprintf(name, 20, ".softirqd");
+ dde_softirq_thread = ddekit_thread_create(
+ l4dde26_softirq_thread,
+ NULL, name);
+
+ open_softirq(TASKLET_SOFTIRQ, tasklet_action);
+ open_softirq(HI_SOFTIRQ, tasklet_hi_action);
+
+ INITIALIZE_INITVAR(dde26_softirq);
+}
diff --git a/libdde_linux26/lib/src/arch/l4/timer.c b/libdde_linux26/lib/src/arch/l4/timer.c
new file mode 100644
index 00000000..ea04b67e
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/timer.c
@@ -0,0 +1,184 @@
+#include "local.h"
+
+#include <linux/timer.h>
+#include <linux/fs.h>
+#include <asm/delay.h>
+
+DECLARE_INITVAR(dde26_timer);
+
+/* Definitions from linux/kernel/timer.c */
+
+/*
+ * per-CPU timer vector definitions:
+ */
+#define TVN_BITS (CONFIG_BASE_SMALL ? 4 : 6)
+#define TVR_BITS (CONFIG_BASE_SMALL ? 6 : 8)
+#define TVN_SIZE (1 << TVN_BITS)
+#define TVR_SIZE (1 << TVR_BITS)
+#define TVN_MASK (TVN_SIZE - 1)
+#define TVR_MASK (TVR_SIZE - 1)
+
+typedef struct tvec_s {
+ struct list_head vec[TVN_SIZE];
+} tvec_t;
+
+typedef struct tvec_root_s {
+ struct list_head vec[TVR_SIZE];
+} tvec_root_t;
+
+struct tvec_base {
+ spinlock_t lock;
+ struct timer_list *running_timer;
+ unsigned long timer_jiffies;
+ tvec_root_t tv1;
+ tvec_t tv2;
+ tvec_t tv3;
+ tvec_t tv4;
+ tvec_t tv5;
+} ____cacheline_aligned_in_smp;
+
+typedef struct tvec_t_base_s tvec_base_t;
+
+struct tvec_base boot_tvec_bases __attribute__((unused));
+
+static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) __attribute__((unused)) = &boot_tvec_bases;
+
+void init_timer(struct timer_list *timer)
+{
+ timer->ddekit_timer_id = DDEKIT_INVALID_TIMER_ID;
+}
+
+void add_timer(struct timer_list *timer)
+{
+ CHECK_INITVAR(dde26_timer);
+ /* DDE2.6 uses jiffies and HZ as exported from L4IO. Therefore
+ * we just need to hand over the timeout to DDEKit. */
+ timer->ddekit_timer_id = ddekit_add_timer((void *)timer->function,
+ (void *)timer->data,
+ timer->expires);
+}
+
+
+void add_timer_on(struct timer_list *timer, int cpu)
+{
+ add_timer(timer);
+}
+
+
+int del_timer(struct timer_list * timer)
+{
+ int ret;
+ CHECK_INITVAR(dde26_timer);
+ ret = ddekit_del_timer(timer->ddekit_timer_id);
+ timer->ddekit_timer_id = DDEKIT_INVALID_TIMER_ID;
+
+ return ret >= 0;
+}
+
+int del_timer_sync(struct timer_list *timer)
+{
+ return del_timer(timer);
+}
+
+
+int __mod_timer(struct timer_list *timer, unsigned long expires)
+{
+ /* XXX: Naive implementation. If we really need to be fast with
+ * this function, we can implement a faster version inside
+ * the DDEKit. Bjoern just does not think that this is the
+ * case.
+ */
+ int r;
+
+ CHECK_INITVAR(dde26_timer);
+ r = del_timer(timer);
+
+ timer->expires = expires;
+ add_timer(timer);
+
+ return (r > 0);
+}
+
+
+int mod_timer(struct timer_list *timer, unsigned long expires)
+{
+ return __mod_timer(timer, expires);
+}
+
+
+int timer_pending(const struct timer_list *timer)
+{
+ CHECK_INITVAR(dde26_timer);
+ /* There must be a valid DDEKit timer ID in the timer field
+ * *AND* it must be pending in the DDEKit.
+ */
+ return ((timer->ddekit_timer_id != DDEKIT_INVALID_TIMER_ID)
+ && ddekit_timer_pending(timer->ddekit_timer_id));
+}
+
+
+/**
+ * msleep - sleep safely even with waitqueue interruptions
+ * @msecs: Time in milliseconds to sleep for
+ */
+void msleep(unsigned int msecs)
+{
+ ddekit_thread_msleep(msecs);
+}
+
+
+void __const_udelay(unsigned long xloops)
+{
+ ddekit_thread_usleep(xloops);
+}
+
+
+void __udelay(unsigned long usecs)
+{
+ ddekit_thread_usleep(usecs);
+}
+
+
+void __ndelay(unsigned long nsecs)
+{
+ ddekit_thread_nsleep(nsecs);
+}
+
+
+void __init l4dde26_init_timers(void)
+{
+ ddekit_init_timers();
+
+ l4dde26_process_from_ddekit(ddekit_get_timer_thread());
+
+ INITIALIZE_INITVAR(dde26_timer);
+}
+
+core_initcall(l4dde26_init_timers);
+
+extern unsigned long volatile __jiffy_data jiffies;
+
+__attribute__((weak)) void do_gettimeofday (struct timeval *tv)
+{
+ WARN_UNIMPL;
+}
+
+struct timespec current_fs_time(struct super_block *sb)
+{
+ struct timespec now = {0,0};
+ WARN_UNIMPL;
+ return now;
+}
+
+ktime_t ktime_get_real(void)
+{
+ struct timespec now = {0,0};
+ WARN_UNIMPL;
+ return timespec_to_ktime(now);
+}
+
+
+void native_io_delay(void)
+{
+ udelay(2);
+}
diff --git a/libdde_linux26/lib/src/arch/l4/vmalloc.c b/libdde_linux26/lib/src/arch/l4/vmalloc.c
new file mode 100644
index 00000000..134b80c3
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/vmalloc.c
@@ -0,0 +1,30 @@
+/******************************************************************************
+ * Bjoern Doebel <doebel@tudos.org> *
+ * *
+ * (c) 2005 - 2007 Technische Universitaet Dresden *
+ * This file is part of DROPS, which is distributed under the terms of the *
+ * GNU General Public License 2. Please see the COPYING file for details. *
+ ******************************************************************************/
+
+/*
+ * \brief vmalloc implementation
+ * \author Bjoern Doebel
+ * \date 2007-07-30
+ */
+
+/* Linux */
+#include <linux/vmalloc.h>
+
+/* DDEKit */
+#include <l4/dde/ddekit/memory.h>
+#include <l4/dde/ddekit/lock.h>
+
+void *vmalloc(unsigned long size)
+{
+ return ddekit_simple_malloc(size);
+}
+
+void vfree(const void *addr)
+{
+ ddekit_simple_free((void*)addr);
+}
diff --git a/libdde_linux26/lib/src/arch/l4/vmstat.c b/libdde_linux26/lib/src/arch/l4/vmstat.c
new file mode 100644
index 00000000..2e87389e
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/l4/vmstat.c
@@ -0,0 +1,34 @@
+#include "local.h"
+
+#include <linux/fs.h>
+
+atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
+
+
+void dec_zone_page_state(struct page *page, enum zone_stat_item item)
+{
+ WARN_UNIMPL;
+}
+
+
+void inc_zone_page_state(struct page *page, enum zone_stat_item item)
+{
+ WARN_UNIMPL;
+}
+
+
+void __inc_zone_page_state(struct page *page, enum zone_stat_item item)
+{
+ WARN_UNIMPL;
+}
+
+void __get_zone_counts(unsigned long *active, unsigned long *inactive,
+ unsigned long *free, struct pglist_data *pgdat)
+{
+ WARN_UNIMPL;
+}
+
+void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
+{
+ WARN_UNIMPL;
+}
diff --git a/libdde_linux26/lib/src/arch/x86/.svn/all-wcprops b/libdde_linux26/lib/src/arch/x86/.svn/all-wcprops
new file mode 100644
index 00000000..2db9a887
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/x86/.svn/all-wcprops
@@ -0,0 +1,5 @@
+K 25
+svn:wc:ra_dav:version-url
+V 67
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/x86
+END
diff --git a/libdde_linux26/lib/src/arch/x86/.svn/entries b/libdde_linux26/lib/src/arch/x86/.svn/entries
new file mode 100644
index 00000000..cdbe1e1d
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/x86/.svn/entries
@@ -0,0 +1,31 @@
+9
+
+dir
+465
+http://svn.tudos.org/repos/tudos/trunk/l4/pkg/dde/linux26/lib/src/arch/x86
+http://svn.tudos.org/repos/tudos
+
+
+
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+svn:special svn:externals svn:needs-lock
+
+
+
+
+
+
+
+
+
+
+
+a704ac0b-3a55-4d43-a2a9-7be6f07c34fb
+
+lib
+dir
+
diff --git a/libdde_linux26/lib/src/arch/x86/.svn/format b/libdde_linux26/lib/src/arch/x86/.svn/format
new file mode 100644
index 00000000..ec635144
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/x86/.svn/format
@@ -0,0 +1 @@
+9
diff --git a/libdde_linux26/lib/src/arch/x86/lib/.svn/all-wcprops b/libdde_linux26/lib/src/arch/x86/lib/.svn/all-wcprops
new file mode 100644
index 00000000..61d9e4b5
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/x86/lib/.svn/all-wcprops
@@ -0,0 +1,11 @@
+K 25
+svn:wc:ra_dav:version-url
+V 71
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/x86/lib
+END
+semaphore_32.S
+K 25
+svn:wc:ra_dav:version-url
+V 86
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/lib/src/arch/x86/lib/semaphore_32.S
+END
diff --git a/libdde_linux26/lib/src/arch/x86/lib/.svn/entries b/libdde_linux26/lib/src/arch/x86/lib/.svn/entries
new file mode 100644
index 00000000..ee8219b2
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/x86/lib/.svn/entries
@@ -0,0 +1,62 @@
+9
+
+dir
+465
+http://svn.tudos.org/repos/tudos/trunk/l4/pkg/dde/linux26/lib/src/arch/x86/lib
+http://svn.tudos.org/repos/tudos
+
+
+
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+svn:special svn:externals svn:needs-lock
+
+
+
+
+
+
+
+
+
+
+
+a704ac0b-3a55-4d43-a2a9-7be6f07c34fb
+
+semaphore_32.S
+file
+
+
+
+
+2009-11-15T17:17:12.000000Z
+8781a421c002516577c2888bc85b51e9
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+2859
+
diff --git a/libdde_linux26/lib/src/arch/x86/lib/.svn/format b/libdde_linux26/lib/src/arch/x86/lib/.svn/format
new file mode 100644
index 00000000..ec635144
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/x86/lib/.svn/format
@@ -0,0 +1 @@
+9
diff --git a/libdde_linux26/lib/src/arch/x86/lib/.svn/text-base/semaphore_32.S.svn-base b/libdde_linux26/lib/src/arch/x86/lib/.svn/text-base/semaphore_32.S.svn-base
new file mode 100644
index 00000000..1850ca50
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/x86/lib/.svn/text-base/semaphore_32.S.svn-base
@@ -0,0 +1,138 @@
+/*
+ * i386 semaphore implementation.
+ *
+ * (C) Copyright 1999 Linus Torvalds
+ *
+ * Portions Copyright 1999 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
+ */
+
+#include <linux/linkage.h>
+#include <asm/rwlock.h>
+#include <asm/alternative-asm.h>
+#include <asm/frame.h>
+#include <asm/dwarf2.h>
+
+/*
+ * The semaphore operations have a special calling sequence that
+ * allow us to do a simpler in-line version of them. These routines
+ * need to convert that sequence back into the C sequence when
+ * there is contention on the semaphore.
+ *
+ * %eax contains the semaphore pointer on entry. Save the C-clobbered
+ * registers (%eax, %edx and %ecx) except %eax whish is either a return
+ * value or just clobbered..
+ */
+#ifndef DDE_LINUX
+ .section .sched.text, "ax"
+#endif
+
+/*
+ * rw spinlock fallbacks
+ */
+#ifdef CONFIG_SMP
+ENTRY(__write_lock_failed)
+ CFI_STARTPROC simple
+ FRAME
+2: LOCK_PREFIX
+ addl $ RW_LOCK_BIAS,(%eax)
+1: rep; nop
+ cmpl $ RW_LOCK_BIAS,(%eax)
+ jne 1b
+ LOCK_PREFIX
+ subl $ RW_LOCK_BIAS,(%eax)
+ jnz 2b
+ ENDFRAME
+ ret
+ CFI_ENDPROC
+ ENDPROC(__write_lock_failed)
+
+ENTRY(__read_lock_failed)
+ CFI_STARTPROC
+ FRAME
+2: LOCK_PREFIX
+ incl (%eax)
+1: rep; nop
+ cmpl $1,(%eax)
+ js 1b
+ LOCK_PREFIX
+ decl (%eax)
+ js 2b
+ ENDFRAME
+ ret
+ CFI_ENDPROC
+ ENDPROC(__read_lock_failed)
+
+#endif
+
+#ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM
+
+/* Fix up special calling conventions */
+ENTRY(call_rwsem_down_read_failed)
+ CFI_STARTPROC
+ push %ecx
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET ecx,0
+ push %edx
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET edx,0
+ call rwsem_down_read_failed
+ pop %edx
+ CFI_ADJUST_CFA_OFFSET -4
+ pop %ecx
+ CFI_ADJUST_CFA_OFFSET -4
+ ret
+ CFI_ENDPROC
+ ENDPROC(call_rwsem_down_read_failed)
+
+ENTRY(call_rwsem_down_write_failed)
+ CFI_STARTPROC
+ push %ecx
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET ecx,0
+ calll rwsem_down_write_failed
+ pop %ecx
+ CFI_ADJUST_CFA_OFFSET -4
+ ret
+ CFI_ENDPROC
+ ENDPROC(call_rwsem_down_write_failed)
+
+ENTRY(call_rwsem_wake)
+ CFI_STARTPROC
+ decw %dx /* do nothing if still outstanding active readers */
+ jnz 1f
+ push %ecx
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET ecx,0
+ call rwsem_wake
+ pop %ecx
+ CFI_ADJUST_CFA_OFFSET -4
+1: ret
+ CFI_ENDPROC
+ ENDPROC(call_rwsem_wake)
+
+/* Fix up special calling conventions */
+ENTRY(call_rwsem_downgrade_wake)
+ CFI_STARTPROC
+ push %ecx
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET ecx,0
+ push %edx
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET edx,0
+ call rwsem_downgrade_wake
+ pop %edx
+ CFI_ADJUST_CFA_OFFSET -4
+ pop %ecx
+ CFI_ADJUST_CFA_OFFSET -4
+ ret
+ CFI_ENDPROC
+ ENDPROC(call_rwsem_downgrade_wake)
+
+#endif
diff --git a/libdde_linux26/lib/src/arch/x86/lib/semaphore_32.S b/libdde_linux26/lib/src/arch/x86/lib/semaphore_32.S
new file mode 100644
index 00000000..1850ca50
--- /dev/null
+++ b/libdde_linux26/lib/src/arch/x86/lib/semaphore_32.S
@@ -0,0 +1,138 @@
+/*
+ * i386 semaphore implementation.
+ *
+ * (C) Copyright 1999 Linus Torvalds
+ *
+ * Portions Copyright 1999 Red Hat, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * rw semaphores implemented November 1999 by Benjamin LaHaise <bcrl@kvack.org>
+ */
+
+#include <linux/linkage.h>
+#include <asm/rwlock.h>
+#include <asm/alternative-asm.h>
+#include <asm/frame.h>
+#include <asm/dwarf2.h>
+
+/*
+ * The semaphore operations have a special calling sequence that
+ * allow us to do a simpler in-line version of them. These routines
+ * need to convert that sequence back into the C sequence when
+ * there is contention on the semaphore.
+ *
+ * %eax contains the semaphore pointer on entry. Save the C-clobbered
+ * registers (%eax, %edx and %ecx) except %eax whish is either a return
+ * value or just clobbered..
+ */
+#ifndef DDE_LINUX
+ .section .sched.text, "ax"
+#endif
+
+/*
+ * rw spinlock fallbacks
+ */
+#ifdef CONFIG_SMP
+ENTRY(__write_lock_failed)
+ CFI_STARTPROC simple
+ FRAME
+2: LOCK_PREFIX
+ addl $ RW_LOCK_BIAS,(%eax)
+1: rep; nop
+ cmpl $ RW_LOCK_BIAS,(%eax)
+ jne 1b
+ LOCK_PREFIX
+ subl $ RW_LOCK_BIAS,(%eax)
+ jnz 2b
+ ENDFRAME
+ ret
+ CFI_ENDPROC
+ ENDPROC(__write_lock_failed)
+
+ENTRY(__read_lock_failed)
+ CFI_STARTPROC
+ FRAME
+2: LOCK_PREFIX
+ incl (%eax)
+1: rep; nop
+ cmpl $1,(%eax)
+ js 1b
+ LOCK_PREFIX
+ decl (%eax)
+ js 2b
+ ENDFRAME
+ ret
+ CFI_ENDPROC
+ ENDPROC(__read_lock_failed)
+
+#endif
+
+#ifdef CONFIG_RWSEM_XCHGADD_ALGORITHM
+
+/* Fix up special calling conventions */
+ENTRY(call_rwsem_down_read_failed)
+ CFI_STARTPROC
+ push %ecx
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET ecx,0
+ push %edx
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET edx,0
+ call rwsem_down_read_failed
+ pop %edx
+ CFI_ADJUST_CFA_OFFSET -4
+ pop %ecx
+ CFI_ADJUST_CFA_OFFSET -4
+ ret
+ CFI_ENDPROC
+ ENDPROC(call_rwsem_down_read_failed)
+
+ENTRY(call_rwsem_down_write_failed)
+ CFI_STARTPROC
+ push %ecx
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET ecx,0
+ calll rwsem_down_write_failed
+ pop %ecx
+ CFI_ADJUST_CFA_OFFSET -4
+ ret
+ CFI_ENDPROC
+ ENDPROC(call_rwsem_down_write_failed)
+
+ENTRY(call_rwsem_wake)
+ CFI_STARTPROC
+ decw %dx /* do nothing if still outstanding active readers */
+ jnz 1f
+ push %ecx
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET ecx,0
+ call rwsem_wake
+ pop %ecx
+ CFI_ADJUST_CFA_OFFSET -4
+1: ret
+ CFI_ENDPROC
+ ENDPROC(call_rwsem_wake)
+
+/* Fix up special calling conventions */
+ENTRY(call_rwsem_downgrade_wake)
+ CFI_STARTPROC
+ push %ecx
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET ecx,0
+ push %edx
+ CFI_ADJUST_CFA_OFFSET 4
+ CFI_REL_OFFSET edx,0
+ call rwsem_downgrade_wake
+ pop %edx
+ CFI_ADJUST_CFA_OFFSET -4
+ pop %ecx
+ CFI_ADJUST_CFA_OFFSET -4
+ ret
+ CFI_ENDPROC
+ ENDPROC(call_rwsem_downgrade_wake)
+
+#endif