summaryrefslogtreecommitdiff
path: root/libdde-linux26/include/linux
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2013-07-27 22:15:01 +0000
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2013-07-27 22:15:01 +0000
commit7996a3d79d55b7f879dfd62e202bbfe2963718d3 (patch)
tree8d9f6759fec4099b9be503c11c7ed174f7204980 /libdde-linux26/include/linux
parent4fbe7358c7747a9165f776eb19addbb9baf7def2 (diff)
really properly move files
Diffstat (limited to 'libdde-linux26/include/linux')
-rw-r--r--libdde-linux26/include/linux/autoconf-arm.h64
-rw-r--r--libdde-linux26/include/linux/autoconf.h98
-rw-r--r--libdde-linux26/include/linux/cpu.h154
-rw-r--r--libdde-linux26/include/linux/device.h606
-rw-r--r--libdde-linux26/include/linux/firmware.h75
-rw-r--r--libdde-linux26/include/linux/hardirq.h183
-rw-r--r--libdde-linux26/include/linux/ide.h1642
-rw-r--r--libdde-linux26/include/linux/init.h374
-rw-r--r--libdde-linux26/include/linux/init_task.h193
-rw-r--r--libdde-linux26/include/linux/ioport.h186
-rw-r--r--libdde-linux26/include/linux/jiffies.h322
-rw-r--r--libdde-linux26/include/linux/kernel.h554
-rw-r--r--libdde-linux26/include/linux/kmalloc_sizes.h51
-rw-r--r--libdde-linux26/include/linux/mm.h1348
-rw-r--r--libdde-linux26/include/linux/mount.h127
-rw-r--r--libdde-linux26/include/linux/pagemap.h473
-rw-r--r--libdde-linux26/include/linux/percpu.h120
-rw-r--r--libdde-linux26/include/linux/percpu_counter.h149
-rw-r--r--libdde-linux26/include/linux/preempt.h139
-rw-r--r--libdde-linux26/include/linux/sched.h2384
-rw-r--r--libdde-linux26/include/linux/slab_def.h98
-rw-r--r--libdde-linux26/include/linux/smp.h190
-rw-r--r--libdde-linux26/include/linux/spinlock.h467
-rw-r--r--libdde-linux26/include/linux/spinlock_types.h124
-rw-r--r--libdde-linux26/include/linux/stddef.h28
-rw-r--r--libdde-linux26/include/linux/timer.h214
-rw-r--r--libdde-linux26/include/linux/usb.h1765
27 files changed, 12128 insertions, 0 deletions
diff --git a/libdde-linux26/include/linux/autoconf-arm.h b/libdde-linux26/include/linux/autoconf-arm.h
new file mode 100644
index 00000000..3ea9b3ab
--- /dev/null
+++ b/libdde-linux26/include/linux/autoconf-arm.h
@@ -0,0 +1,64 @@
+/* Include Linux' contributed autoconf file */
+#include_next <linux/autoconf.h>
+
+#undef CONFIG_MODULES
+
+/* Because we don't need INET support */
+#undef CONFIG_INET
+#undef CONFIG_XFRM
+#undef CONFIG_IP_NF_IPTABLES
+#undef CONFIG_IP_FIB_HASH
+#undef CONFIG_NETFILTER_XT_MATCH_STATE
+#undef CONFIG_INET_XFRM_MODE_TRANSPORT
+#undef CONFIG_INET_XFRM_MODE_TUNNEL
+#undef CONFIG_IP_NF_CONNTRACK
+#undef CONFIG_TCP_CONG_BIC
+#undef CONFIG_IP_NF_FILTER
+#undef CONFIG_IP_NF_FTP
+#undef CONFIG_IP_NF_TARGET_LOG
+
+/* No highmem for our drivers */
+#undef CONFIG_HIGHMEM
+#undef CONFIG_HIGHMEM4G
+#define CONFIG_NOHIGHMEM 1
+
+/* No PROC fs for us */
+#undef CONFIG_PROC_FS
+
+/* Also, no sysFS */
+#undef CONFIG_SYSFS
+
+/* No NFS support */
+#undef CONFIG_ROOT_NFS
+
+/* We don't support hotplug */
+#undef CONFIG_HOTPLUG
+
+/* No Sysctl */
+#undef CONFIG_SYSCTL
+
+/* No power management */
+#undef CONFIG_PM
+
+/* irqs assigned statically */
+#undef CONFIG_GENERIC_IRQ_PROBE
+
+/* No message-signalled interrupts for PCI. */
+#undef CONFIG_PCI_MSI
+
+/* We don't need event counters, because we don't have /proc/vmstat */
+#undef CONFIG_VM_EVENT_COUNTERS
+
+#define WANT_PAGE_VIRTUAL
+
+#define CONFIG_RWSEM_GENERIC_SPINLOCK
+#define CONFIG_CPU_SA1100
+#define CONFIG_CPU_ARM710
+#define CONFIG_CPU_V6
+#define CONFIG_CPU_TLB_V6
+#undef CONFIG_SECCOMP
+//#undef CONFIG_SMP
+#undef CONFIG_KPROBES
+#undef CONFIG_HUGETLB_PAGE
+#undef CONFIG_HUGETLBFS
+#define CONFIG_VECTORS_BASE 0
diff --git a/libdde-linux26/include/linux/autoconf.h b/libdde-linux26/include/linux/autoconf.h
new file mode 100644
index 00000000..908b8dad
--- /dev/null
+++ b/libdde-linux26/include/linux/autoconf.h
@@ -0,0 +1,98 @@
+/* Include Linux' contributed autoconf file */
+#include_next <linux/autoconf.h>
+
+#undef CONFIG_MODULES
+
+/* Because we don't need INET support */
+#undef CONFIG_INET
+#undef CONFIG_XFRM
+#undef CONFIG_IP_NF_IPTABLES
+#undef CONFIG_IP_FIB_HASH
+#undef CONFIG_NETFILTER_XT_MATCH_STATE
+#undef CONFIG_INET_XFRM_MODE_TRANSPORT
+#undef CONFIG_INET_XFRM_MODE_TUNNEL
+#undef CONFIG_IP_NF_CONNTRACK
+#undef CONFIG_TCP_CONG_BIC
+#undef CONFIG_IP_NF_FILTER
+#undef CONFIG_IP_NF_FTP
+#undef CONFIG_IP_NF_TARGET_LOG
+
+/* No highmem for our drivers */
+#undef CONFIG_HIGHMEM
+#undef CONFIG_HIGHMEM4G
+#define CONFIG_NOHIGHMEM 1
+
+/* No PROC fs for us */
+#undef CONFIG_PROC_FS
+#undef CONFIG_IDE_PROC_FS
+
+/* Also, no sysFS */
+#undef CONFIG_SYSFS
+
+/* No NFS support */
+#undef CONFIG_ROOT_NFS
+
+/* We don't support hotplug */
+#undef CONFIG_HOTPLUG
+#undef CONFIG_HOTPLUG_CPU
+
+/* No Sysctl */
+#undef CONFIG_SYSCTL
+
+/* No power management */
+#undef CONFIG_PM
+#undef CONFIG_PM_SLEEP
+
+/* irqs assigned statically */
+#undef CONFIG_GENERIC_IRQ_PROBE
+
+/* No message-signalled interrupts for PCI. */
+#undef CONFIG_PCI_MSI
+
+/* We don't need event counters, because we don't have /proc/vmstat */
+#undef CONFIG_VM_EVENT_COUNTERS
+
+/* No pnp for now */
+#undef CONFIG_PNP
+
+/* No IDE ACPI */
+#undef CONFIG_BLK_DEV_IDEACPI
+#define CONFIG_BLK_DEV_IDEDMA 1
+
+#undef CONFIG_TIMER_STATS
+#undef CONFIG_KALLSYMS
+
+/* We still use the SLAB impl, it's mapped to ddekit_slabs anyway. */
+#undef CONFIG_SLUB
+#define CONFIG_SLAB 1
+
+#define WANT_PAGE_VIRTUAL
+
+#undef CONFIG_SECURITY
+#undef CONFIG_FREEZER
+#undef CONFIG_AUDIT
+#undef CONFIG_AUDITSYSCALL
+#undef CONFIG_NETPOLL
+
+/* No wireless extensions for now */
+#undef CONFIG_WIRELESS_EXT
+
+/* We don't have IPtables in here */
+#undef CONFIG_NF_CONNTRACK
+#undef CONFIG_NF_CONNTRACK_IPV4
+#undef CONFIG_NF_CONNTRACK_IPV6
+#undef CONFIG_NF_CONNTRACK_PROC_COMPAT
+#undef CONFIG_NF_CONNTRACK_IRC
+#undef CONFIG_NF_CONNTRACK_FTP
+#undef CONFIG_NF_CONNTRACK_SIP
+#undef CONFIG_NF_CONNTRACK_SECMARK
+
+//#define DEBUG
+#undef CONFIG_DEBUG_FS
+#undef CONFIG_SCHED_MC
+#undef CONFIG_SCHED_SMT
+#undef CONFIG_BLK_DEV_IO_TRACE
+
+#undef CONFIG_FW_LOADER
+#undef CONFIG_DMI
+#undef CONFIG_PCIEAER
diff --git a/libdde-linux26/include/linux/cpu.h b/libdde-linux26/include/linux/cpu.h
new file mode 100644
index 00000000..759674d8
--- /dev/null
+++ b/libdde-linux26/include/linux/cpu.h
@@ -0,0 +1,154 @@
+/*
+ * include/linux/cpu.h - generic cpu definition
+ *
+ * This is mainly for topological representation. We define the
+ * basic 'struct cpu' here, which can be embedded in per-arch
+ * definitions of processors.
+ *
+ * Basic handling of the devices is done in drivers/base/cpu.c
+ * and system devices are handled in drivers/base/sys.c.
+ *
+ * CPUs are exported via sysfs in the class/cpu/devices/
+ * directory.
+ *
+ * Per-cpu interfaces can be implemented using a struct device_interface.
+ * See the following for how to do this:
+ * - drivers/base/intf.c
+ * - Documentation/driver-model/interface.txt
+ */
+#ifndef _LINUX_CPU_H_
+#define _LINUX_CPU_H_
+
+#include <linux/sysdev.h>
+#include <linux/node.h>
+#include <linux/compiler.h>
+#include <linux/cpumask.h>
+#include <linux/mutex.h>
+
+struct cpu {
+ int node_id; /* The node which contains the CPU */
+ int hotpluggable; /* creates sysfs control file if hotpluggable */
+ struct sys_device sysdev;
+};
+
+extern int register_cpu(struct cpu *cpu, int num);
+extern struct sys_device *get_cpu_sysdev(unsigned cpu);
+
+extern int cpu_add_sysdev_attr(struct sysdev_attribute *attr);
+extern void cpu_remove_sysdev_attr(struct sysdev_attribute *attr);
+
+extern int cpu_add_sysdev_attr_group(struct attribute_group *attrs);
+extern void cpu_remove_sysdev_attr_group(struct attribute_group *attrs);
+
+extern int sched_create_sysfs_power_savings_entries(struct sysdev_class *cls);
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern void unregister_cpu(struct cpu *cpu);
+#endif
+struct notifier_block;
+
+#if defined(CONFIG_SMP) && !defined(DDE_LINUX)
+/* Need to know about CPUs going up/down? */
+#ifdef CONFIG_HOTPLUG_CPU
+extern int register_cpu_notifier(struct notifier_block *nb);
+extern void unregister_cpu_notifier(struct notifier_block *nb);
+#else
+
+#ifndef MODULE
+extern int register_cpu_notifier(struct notifier_block *nb);
+#else
+static inline int register_cpu_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+#endif
+
+static inline void unregister_cpu_notifier(struct notifier_block *nb)
+{
+}
+#endif
+
+int cpu_up(unsigned int cpu);
+void notify_cpu_starting(unsigned int cpu);
+extern void cpu_hotplug_init(void);
+extern void cpu_maps_update_begin(void);
+extern void cpu_maps_update_done(void);
+
+#else /* CONFIG_SMP && !DDE_LINUX */
+
+static inline int register_cpu_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline void unregister_cpu_notifier(struct notifier_block *nb)
+{
+}
+
+static inline void cpu_hotplug_init(void)
+{
+}
+
+static inline void cpu_maps_update_begin(void)
+{
+}
+
+static inline void cpu_maps_update_done(void)
+{
+}
+
+#endif /* CONFIG_SMP */
+extern struct sysdev_class cpu_sysdev_class;
+
+#ifdef CONFIG_HOTPLUG_CPU
+/* Stop CPUs going up and down. */
+
+static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex)
+{
+ mutex_lock(cpu_hp_mutex);
+}
+
+static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
+{
+ mutex_unlock(cpu_hp_mutex);
+}
+
+extern void get_online_cpus(void);
+extern void put_online_cpus(void);
+#define hotcpu_notifier(fn, pri) { \
+ static struct notifier_block fn##_nb __cpuinitdata = \
+ { .notifier_call = fn, .priority = pri }; \
+ register_cpu_notifier(&fn##_nb); \
+}
+#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
+#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
+int cpu_down(unsigned int cpu);
+
+#else /* CONFIG_HOTPLUG_CPU */
+
+static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex)
+{ }
+static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex)
+{ }
+
+#define get_online_cpus() do { } while (0)
+#define put_online_cpus() do { } while (0)
+#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
+/* These aren't inline functions due to a GCC bug. */
+#define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
+#define unregister_hotcpu_notifier(nb) ({ (void)(nb); })
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#ifdef CONFIG_PM_SLEEP_SMP
+extern int suspend_cpu_hotplug;
+
+extern int disable_nonboot_cpus(void);
+extern void enable_nonboot_cpus(void);
+#else /* !CONFIG_PM_SLEEP_SMP */
+#define suspend_cpu_hotplug 0
+
+static inline int disable_nonboot_cpus(void) { return 0; }
+static inline void enable_nonboot_cpus(void) {}
+#endif /* !CONFIG_PM_SLEEP_SMP */
+
+#endif /* _LINUX_CPU_H_ */
diff --git a/libdde-linux26/include/linux/device.h b/libdde-linux26/include/linux/device.h
new file mode 100644
index 00000000..41b4227d
--- /dev/null
+++ b/libdde-linux26/include/linux/device.h
@@ -0,0 +1,606 @@
+/*
+ * device.h - generic, centralized driver model
+ *
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (c) 2004-2007 Greg Kroah-Hartman <gregkh@suse.de>
+ *
+ * This file is released under the GPLv2
+ *
+ * See Documentation/driver-model/ for more information.
+ */
+
+#ifndef _DEVICE_H_
+#define _DEVICE_H_
+
+#include <linux/ioport.h>
+#include <linux/kobject.h>
+#include <linux/klist.h>
+#include <linux/list.h>
+#include <linux/lockdep.h>
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/semaphore.h>
+#include <asm/atomic.h>
+#include <asm/device.h>
+
+#define BUS_ID_SIZE 20
+
+struct device;
+struct device_driver;
+struct driver_private;
+struct class;
+struct class_private;
+struct bus_type;
+struct bus_type_private;
+
+struct bus_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct bus_type *bus, char *buf);
+ ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
+};
+
+#define BUS_ATTR(_name, _mode, _show, _store) \
+struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store)
+
+extern int __must_check bus_create_file(struct bus_type *,
+ struct bus_attribute *);
+extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
+
+struct bus_type {
+ const char *name;
+ struct bus_attribute *bus_attrs;
+ struct device_attribute *dev_attrs;
+ struct driver_attribute *drv_attrs;
+
+ int (*match)(struct device *dev, struct device_driver *drv);
+ int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
+ int (*probe)(struct device *dev);
+ int (*remove)(struct device *dev);
+ void (*shutdown)(struct device *dev);
+
+ int (*suspend)(struct device *dev, pm_message_t state);
+ int (*suspend_late)(struct device *dev, pm_message_t state);
+ int (*resume_early)(struct device *dev);
+ int (*resume)(struct device *dev);
+
+ struct dev_pm_ops *pm;
+
+ struct bus_type_private *p;
+};
+
+extern int __must_check bus_register(struct bus_type *bus);
+extern void bus_unregister(struct bus_type *bus);
+
+extern int __must_check bus_rescan_devices(struct bus_type *bus);
+
+/* iterator helpers for buses */
+
+int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
+ int (*fn)(struct device *dev, void *data));
+struct device *bus_find_device(struct bus_type *bus, struct device *start,
+ void *data,
+ int (*match)(struct device *dev, void *data));
+struct device *bus_find_device_by_name(struct bus_type *bus,
+ struct device *start,
+ const char *name);
+
+int __must_check bus_for_each_drv(struct bus_type *bus,
+ struct device_driver *start, void *data,
+ int (*fn)(struct device_driver *, void *));
+
+void bus_sort_breadthfirst(struct bus_type *bus,
+ int (*compare)(const struct device *a,
+ const struct device *b));
+/*
+ * Bus notifiers: Get notified of addition/removal of devices
+ * and binding/unbinding of drivers to devices.
+ * In the long run, it should be a replacement for the platform
+ * notify hooks.
+ */
+struct notifier_block;
+
+extern int bus_register_notifier(struct bus_type *bus,
+ struct notifier_block *nb);
+extern int bus_unregister_notifier(struct bus_type *bus,
+ struct notifier_block *nb);
+
+/* All 4 notifers below get called with the target struct device *
+ * as an argument. Note that those functions are likely to be called
+ * with the device semaphore held in the core, so be careful.
+ */
+#define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */
+#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device removed */
+#define BUS_NOTIFY_BOUND_DRIVER 0x00000003 /* driver bound to device */
+#define BUS_NOTIFY_UNBIND_DRIVER 0x00000004 /* driver about to be
+ unbound */
+
+extern struct kset *bus_get_kset(struct bus_type *bus);
+extern struct klist *bus_get_device_klist(struct bus_type *bus);
+
+struct device_driver {
+ const char *name;
+ struct bus_type *bus;
+
+ struct module *owner;
+ const char *mod_name; /* used for built-in modules */
+
+ int (*probe) (struct device *dev);
+ int (*remove) (struct device *dev);
+ void (*shutdown) (struct device *dev);
+ int (*suspend) (struct device *dev, pm_message_t state);
+ int (*resume) (struct device *dev);
+ struct attribute_group **groups;
+
+ struct dev_pm_ops *pm;
+
+ struct driver_private *p;
+};
+
+
+extern int __must_check driver_register(struct device_driver *drv);
+extern void driver_unregister(struct device_driver *drv);
+
+extern struct device_driver *get_driver(struct device_driver *drv);
+extern void put_driver(struct device_driver *drv);
+extern struct device_driver *driver_find(const char *name,
+ struct bus_type *bus);
+extern int driver_probe_done(void);
+extern int wait_for_device_probe(void);
+
+
+/* sysfs interface for exporting driver attributes */
+
+struct driver_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct device_driver *driver, char *buf);
+ ssize_t (*store)(struct device_driver *driver, const char *buf,
+ size_t count);
+};
+
+#define DRIVER_ATTR(_name, _mode, _show, _store) \
+struct driver_attribute driver_attr_##_name = \
+ __ATTR(_name, _mode, _show, _store)
+
+extern int __must_check driver_create_file(struct device_driver *driver,
+ struct driver_attribute *attr);
+extern void driver_remove_file(struct device_driver *driver,
+ struct driver_attribute *attr);
+
+extern int __must_check driver_add_kobj(struct device_driver *drv,
+ struct kobject *kobj,
+ const char *fmt, ...);
+
+extern int __must_check driver_for_each_device(struct device_driver *drv,
+ struct device *start,
+ void *data,
+ int (*fn)(struct device *dev,
+ void *));
+struct device *driver_find_device(struct device_driver *drv,
+ struct device *start, void *data,
+ int (*match)(struct device *dev, void *data));
+
+/*
+ * device classes
+ */
+struct class {
+ const char *name;
+ struct module *owner;
+
+ struct class_attribute *class_attrs;
+ struct device_attribute *dev_attrs;
+ struct kobject *dev_kobj;
+
+ int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
+
+ void (*class_release)(struct class *class);
+ void (*dev_release)(struct device *dev);
+
+ int (*suspend)(struct device *dev, pm_message_t state);
+ int (*resume)(struct device *dev);
+
+ struct dev_pm_ops *pm;
+ struct class_private *p;
+};
+
+struct class_dev_iter {
+ struct klist_iter ki;
+ const struct device_type *type;
+};
+
+extern struct kobject *sysfs_dev_block_kobj;
+extern struct kobject *sysfs_dev_char_kobj;
+extern int __must_check __class_register(struct class *class,
+ struct lock_class_key *key);
+extern void class_unregister(struct class *class);
+
+/* This is a #define to keep the compiler from merging different
+ * instances of the __key variable */
+#define class_register(class) \
+({ \
+ static struct lock_class_key __key; \
+ __class_register(class, &__key); \
+})
+
+extern void class_dev_iter_init(struct class_dev_iter *iter,
+ struct class *class,
+ struct device *start,
+ const struct device_type *type);
+extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
+extern void class_dev_iter_exit(struct class_dev_iter *iter);
+
+extern int class_for_each_device(struct class *class, struct device *start,
+ void *data,
+ int (*fn)(struct device *dev, void *data));
+extern struct device *class_find_device(struct class *class,
+ struct device *start, void *data,
+ int (*match)(struct device *, void *));
+
+struct class_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct class *class, char *buf);
+ ssize_t (*store)(struct class *class, const char *buf, size_t count);
+};
+
+#define CLASS_ATTR(_name, _mode, _show, _store) \
+struct class_attribute class_attr_##_name = __ATTR(_name, _mode, _show, _store)
+
+extern int __must_check class_create_file(struct class *class,
+ const struct class_attribute *attr);
+extern void class_remove_file(struct class *class,
+ const struct class_attribute *attr);
+
+struct class_interface {
+ struct list_head node;
+ struct class *class;
+
+ int (*add_dev) (struct device *, struct class_interface *);
+ void (*remove_dev) (struct device *, struct class_interface *);
+};
+
+extern int __must_check class_interface_register(struct class_interface *);
+extern void class_interface_unregister(struct class_interface *);
+
+extern struct class * __must_check __class_create(struct module *owner,
+ const char *name,
+ struct lock_class_key *key);
+extern void class_destroy(struct class *cls);
+
+/* This is a #define to keep the compiler from merging different
+ * instances of the __key variable */
+#define class_create(owner, name) \
+({ \
+ static struct lock_class_key __key; \
+ __class_create(owner, name, &__key); \
+})
+
+/*
+ * The type of device, "struct device" is embedded in. A class
+ * or bus can contain devices of different types
+ * like "partitions" and "disks", "mouse" and "event".
+ * This identifies the device type and carries type-specific
+ * information, equivalent to the kobj_type of a kobject.
+ * If "name" is specified, the uevent will contain it in
+ * the DEVTYPE variable.
+ */
+struct device_type {
+ const char *name;
+ struct attribute_group **groups;
+ int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
+ void (*release)(struct device *dev);
+
+ int (*suspend)(struct device *dev, pm_message_t state);
+ int (*resume)(struct device *dev);
+
+ struct dev_pm_ops *pm;
+};
+
+/* interface for exporting device attributes */
+struct device_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct device *dev, struct device_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count);
+};
+
+#define DEVICE_ATTR(_name, _mode, _show, _store) \
+struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store)
+
+extern int __must_check device_create_file(struct device *device,
+ struct device_attribute *entry);
+extern void device_remove_file(struct device *dev,
+ struct device_attribute *attr);
+extern int __must_check device_create_bin_file(struct device *dev,
+ struct bin_attribute *attr);
+extern void device_remove_bin_file(struct device *dev,
+ struct bin_attribute *attr);
+extern int device_schedule_callback_owner(struct device *dev,
+ void (*func)(struct device *dev), struct module *owner);
+
+/* This is a macro to avoid include problems with THIS_MODULE */
+#define device_schedule_callback(dev, func) \
+ device_schedule_callback_owner(dev, func, THIS_MODULE)
+
+/* device resource management */
+typedef void (*dr_release_t)(struct device *dev, void *res);
+typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
+
+#ifdef CONFIG_DEBUG_DEVRES
+extern void *__devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
+ const char *name);
+#define devres_alloc(release, size, gfp) \
+ __devres_alloc(release, size, gfp, #release)
+#else
+extern void *devres_alloc(dr_release_t release, size_t size, gfp_t gfp);
+#endif
+extern void devres_free(void *res);
+extern void devres_add(struct device *dev, void *res);
+extern void *devres_find(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data);
+extern void *devres_get(struct device *dev, void *new_res,
+ dr_match_t match, void *match_data);
+extern void *devres_remove(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data);
+extern int devres_destroy(struct device *dev, dr_release_t release,
+ dr_match_t match, void *match_data);
+
+/* devres group */
+extern void * __must_check devres_open_group(struct device *dev, void *id,
+ gfp_t gfp);
+extern void devres_close_group(struct device *dev, void *id);
+extern void devres_remove_group(struct device *dev, void *id);
+extern int devres_release_group(struct device *dev, void *id);
+
+/* managed kzalloc/kfree for device drivers, no kmalloc, always use kzalloc */
+extern void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp);
+extern void devm_kfree(struct device *dev, void *p);
+
+struct device_dma_parameters {
+ /*
+ * a low level driver may set these to teach IOMMU code about
+ * sg limitations.
+ */
+ unsigned int max_segment_size;
+ unsigned long segment_boundary_mask;
+};
+
+struct device {
+ struct klist klist_children;
+ struct klist_node knode_parent; /* node in sibling list */
+ struct klist_node knode_driver;
+ struct klist_node knode_bus;
+ struct device *parent;
+
+ struct kobject kobj;
+ char bus_id[BUS_ID_SIZE]; /* position on parent bus */
+ unsigned uevent_suppress:1;
+ const char *init_name; /* initial name of the device */
+ struct device_type *type;
+
+ struct semaphore sem; /* semaphore to synchronize calls to
+ * its driver.
+ */
+
+ struct bus_type *bus; /* type of bus device is on */
+ struct device_driver *driver; /* which driver has allocated this
+ device */
+ void *driver_data; /* data private to the driver */
+ void *platform_data; /* Platform specific data, device
+ core doesn't touch it */
+ struct dev_pm_info power;
+
+#ifdef CONFIG_NUMA
+ int numa_node; /* NUMA node this device is close to */
+#endif
+ u64 *dma_mask; /* dma mask (if dma'able device) */
+ u64 coherent_dma_mask;/* Like dma_mask, but for
+ alloc_coherent mappings as
+ not all hardware supports
+ 64 bit addresses for consistent
+ allocations such descriptors. */
+
+ struct device_dma_parameters *dma_parms;
+
+ struct list_head dma_pools; /* dma pools (if dma'ble) */
+
+ struct dma_coherent_mem *dma_mem; /* internal for coherent mem
+ override */
+ /* arch specific additions */
+ struct dev_archdata archdata;
+
+ dev_t devt; /* dev_t, creates the sysfs "dev" */
+
+ spinlock_t devres_lock;
+ struct list_head devres_head;
+
+ struct klist_node knode_class;
+ struct class *class;
+ struct attribute_group **groups; /* optional groups */
+
+ void (*release)(struct device *dev);
+};
+
+/* Get the wakeup routines, which depend on struct device */
+#include <linux/pm_wakeup.h>
+
+static inline const char *dev_name(const struct device *dev)
+{
+ /* will be changed into kobject_name(&dev->kobj) in the near future */
+ return dev->bus_id;
+}
+
+extern int dev_set_name(struct device *dev, const char *name, ...)
+ __attribute__((format(printf, 2, 3)));
+
+#ifdef CONFIG_NUMA
+static inline int dev_to_node(struct device *dev)
+{
+ return dev->numa_node;
+}
+static inline void set_dev_node(struct device *dev, int node)
+{
+ dev->numa_node = node;
+}
+#else
+static inline int dev_to_node(struct device *dev)
+{
+ return -1;
+}
+static inline void set_dev_node(struct device *dev, int node)
+{
+}
+#endif
+
+static inline void *dev_get_drvdata(const struct device *dev)
+{
+ return dev->driver_data;
+}
+
+static inline void dev_set_drvdata(struct device *dev, void *data)
+{
+ dev->driver_data = data;
+}
+
+static inline int device_is_registered(struct device *dev)
+{
+ return dev->kobj.state_in_sysfs;
+}
+
+void driver_init(void);
+
+/*
+ * High level routines for use by the bus drivers
+ */
+extern int __must_check device_register(struct device *dev);
+extern void device_unregister(struct device *dev);
+extern void device_initialize(struct device *dev);
+extern int __must_check device_add(struct device *dev);
+extern void device_del(struct device *dev);
+extern int device_for_each_child(struct device *dev, void *data,
+ int (*fn)(struct device *dev, void *data));
+extern struct device *device_find_child(struct device *dev, void *data,
+ int (*match)(struct device *dev, void *data));
+extern int device_rename(struct device *dev, char *new_name);
+extern int device_move(struct device *dev, struct device *new_parent);
+
+/*
+ * Root device objects for grouping under /sys/devices
+ */
+extern struct device *__root_device_register(const char *name,
+ struct module *owner);
+static inline struct device *root_device_register(const char *name)
+{
+ return __root_device_register(name, THIS_MODULE);
+}
+extern void root_device_unregister(struct device *root);
+
+/*
+ * Manual binding of a device to driver. See drivers/base/bus.c
+ * for information on use.
+ */
+extern int __must_check device_bind_driver(struct device *dev);
+extern void device_release_driver(struct device *dev);
+extern int __must_check device_attach(struct device *dev);
+extern int __must_check driver_attach(struct device_driver *drv);
+extern int __must_check device_reprobe(struct device *dev);
+
+/*
+ * Easy functions for dynamically creating devices on the fly
+ */
+extern struct device *device_create_vargs(struct class *cls,
+ struct device *parent,
+ dev_t devt,
+ void *drvdata,
+ const char *fmt,
+ va_list vargs);
+extern struct device *device_create(struct class *cls, struct device *parent,
+ dev_t devt, void *drvdata,
+ const char *fmt, ...)
+ __attribute__((format(printf, 5, 6)));
+extern void device_destroy(struct class *cls, dev_t devt);
+
+/*
+ * Platform "fixup" functions - allow the platform to have their say
+ * about devices and actions that the general device layer doesn't
+ * know about.
+ */
+/* Notify platform of device discovery */
+extern int (*platform_notify)(struct device *dev);
+
+extern int (*platform_notify_remove)(struct device *dev);
+
+
+/**
+ * get_device - atomically increment the reference count for the device.
+ *
+ */
+extern struct device *get_device(struct device *dev);
+extern void put_device(struct device *dev);
+
+
+/* drivers/base/power/shutdown.c */
+extern void device_shutdown(void);
+
+/* drivers/base/sys.c */
+extern void sysdev_shutdown(void);
+
+/* debugging and troubleshooting/diagnostic helpers. */
+extern const char *dev_driver_string(const struct device *dev);
+#ifndef DDE_LINUX
+#define dev_printk(level, dev, format, arg...) \
+ printk(level "%s %s: " format , dev_driver_string(dev) , \
+ dev_name(dev) , ## arg)
+#else
+#define dev_printk(level, dev, format, arg...)
+#endif
+
+#define dev_emerg(dev, format, arg...) \
+ dev_printk(KERN_EMERG , dev , format , ## arg)
+#define dev_alert(dev, format, arg...) \
+ dev_printk(KERN_ALERT , dev , format , ## arg)
+#define dev_crit(dev, format, arg...) \
+ dev_printk(KERN_CRIT , dev , format , ## arg)
+#define dev_err(dev, format, arg...) \
+ dev_printk(KERN_ERR , dev , format , ## arg)
+#define dev_warn(dev, format, arg...) \
+ dev_printk(KERN_WARNING , dev , format , ## arg)
+#define dev_notice(dev, format, arg...) \
+ dev_printk(KERN_NOTICE , dev , format , ## arg)
+#define dev_info(dev, format, arg...) \
+ dev_printk(KERN_INFO , dev , format , ## arg)
+
+#if defined(DEBUG) && !defined(DDE_LINUX)
+#define dev_dbg(dev, format, arg...) \
+ dev_printk(KERN_DEBUG , dev , format , ## arg)
+#elif defined(CONFIG_DYNAMIC_PRINTK_DEBUG)
+#define dev_dbg(dev, format, ...) do { \
+ dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
+ } while (0)
+#else
+#define dev_dbg(dev, format, arg...) do {} while (0)
+#endif
+
+#ifdef VERBOSE_DEBUG
+#define dev_vdbg dev_dbg
+#else
+
+#define dev_vdbg(dev, format, arg...) \
+ ({ if (0) dev_printk(KERN_DEBUG, dev, format, ##arg); 0; })
+#endif
+
+/*
+ * dev_WARN() acts like dev_printk(), but with the key difference
+ * of using a WARN/WARN_ON to get the message out, including the
+ * file/line information and a backtrace.
+ */
+#define dev_WARN(dev, format, arg...) \
+ WARN(1, "Device: %s\n" format, dev_driver_string(dev), ## arg);
+
+/* Create alias, so I can be autoloaded. */
+#define MODULE_ALIAS_CHARDEV(major,minor) \
+ MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
+#define MODULE_ALIAS_CHARDEV_MAJOR(major) \
+ MODULE_ALIAS("char-major-" __stringify(major) "-*")
+#endif /* _DEVICE_H_ */
diff --git a/libdde-linux26/include/linux/firmware.h b/libdde-linux26/include/linux/firmware.h
new file mode 100644
index 00000000..06e7a1ce
--- /dev/null
+++ b/libdde-linux26/include/linux/firmware.h
@@ -0,0 +1,75 @@
+#ifndef _LINUX_FIRMWARE_H
+#define _LINUX_FIRMWARE_H
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+#define FIRMWARE_NAME_MAX 30
+#define FW_ACTION_NOHOTPLUG 0
+#define FW_ACTION_HOTPLUG 1
+
+struct firmware {
+ size_t size;
+ const u8 *data;
+};
+
+struct device;
+
+struct builtin_fw {
+ char *name;
+ void *data;
+ unsigned long size;
+};
+
+/* We have to play tricks here much like stringify() to get the
+ __COUNTER__ macro to be expanded as we want it */
+#define __fw_concat1(x, y) x##y
+#define __fw_concat(x, y) __fw_concat1(x, y)
+
+#define DECLARE_BUILTIN_FIRMWARE(name, blob) \
+ DECLARE_BUILTIN_FIRMWARE_SIZE(name, &(blob), sizeof(blob))
+
+#define DECLARE_BUILTIN_FIRMWARE_SIZE(name, blob, size) \
+ static const struct builtin_fw __fw_concat(__builtin_fw,__COUNTER__) \
+ __used __section(.builtin_fw) = { name, blob, size }
+
+#if defined(CONFIG_FW_LOADER) || (defined(CONFIG_FW_LOADER_MODULE) && defined(MODULE))
+int request_firmware(const struct firmware **fw, const char *name,
+ struct device *device);
+int request_firmware_nowait(
+ struct module *module, int uevent,
+ const char *name, struct device *device, void *context,
+ void (*cont)(const struct firmware *fw, void *context));
+
+void release_firmware(const struct firmware *fw);
+#else
+#ifdef DDE_LINUX
+#include <ddekit/printf.h>
+#endif
+static inline int request_firmware(const struct firmware **fw,
+ const char *name,
+ struct device *device)
+{
+#ifdef DDE_LINUX
+ ddekit_printf("firmware %s requested, but not implemented\n", name);
+#endif
+ return -EINVAL;
+}
+static inline int request_firmware_nowait(
+ struct module *module, int uevent,
+ const char *name, struct device *device, void *context,
+ void (*cont)(const struct firmware *fw, void *context))
+{
+#ifdef DDE_LINUX
+ ddekit_printf("firmware %s requested, but not implemented\n", name);
+#endif
+ return -EINVAL;
+}
+
+static inline void release_firmware(const struct firmware *fw)
+{
+}
+#endif
+
+#endif
diff --git a/libdde-linux26/include/linux/hardirq.h b/libdde-linux26/include/linux/hardirq.h
new file mode 100644
index 00000000..7a0082bd
--- /dev/null
+++ b/libdde-linux26/include/linux/hardirq.h
@@ -0,0 +1,183 @@
+#ifndef LINUX_HARDIRQ_H
+#define LINUX_HARDIRQ_H
+
+#include <linux/preempt.h>
+#include <linux/smp_lock.h>
+#include <linux/lockdep.h>
+#include <linux/ftrace_irq.h>
+#include <asm/hardirq.h>
+#include <asm/system.h>
+
+/*
+ * We put the hardirq and softirq counter into the preemption
+ * counter. The bitmask has the following meaning:
+ *
+ * - bits 0-7 are the preemption count (max preemption depth: 256)
+ * - bits 8-15 are the softirq count (max # of softirqs: 256)
+ *
+ * The hardirq count can be overridden per architecture, the default is:
+ *
+ * - bits 16-27 are the hardirq count (max # of hardirqs: 4096)
+ * - ( bit 28 is the PREEMPT_ACTIVE flag. )
+ *
+ * PREEMPT_MASK: 0x000000ff
+ * SOFTIRQ_MASK: 0x0000ff00
+ * HARDIRQ_MASK: 0x0fff0000
+ */
+#define PREEMPT_BITS 8
+#define SOFTIRQ_BITS 8
+
+#ifndef HARDIRQ_BITS
+#define HARDIRQ_BITS 12
+
+#ifndef MAX_HARDIRQS_PER_CPU
+#define MAX_HARDIRQS_PER_CPU NR_IRQS
+#endif
+
+/*
+ * The hardirq mask has to be large enough to have space for potentially
+ * all IRQ sources in the system nesting on a single CPU.
+ */
+#if (1 << HARDIRQ_BITS) < MAX_HARDIRQS_PER_CPU
+# error HARDIRQ_BITS is too low!
+#endif
+#endif
+
+#define PREEMPT_SHIFT 0
+#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
+#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
+
+#define __IRQ_MASK(x) ((1UL << (x))-1)
+
+#define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
+#define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
+#define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
+
+#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
+#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
+#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
+
+#if PREEMPT_ACTIVE < (1 << (HARDIRQ_SHIFT + HARDIRQ_BITS))
+#error PREEMPT_ACTIVE is too low!
+#endif
+
+#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
+#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
+
+/*
+ * Are we doing bottom half or hardware interrupt processing?
+ * Are we in a softirq context? Interrupt context?
+ */
+#define in_irq() (hardirq_count())
+#define in_softirq() (softirq_count())
+#define in_interrupt() (irq_count())
+
+#if defined(CONFIG_PREEMPT)
+# define PREEMPT_INATOMIC_BASE kernel_locked()
+# define PREEMPT_CHECK_OFFSET 1
+#else
+# define PREEMPT_INATOMIC_BASE 0
+# define PREEMPT_CHECK_OFFSET 0
+#endif
+
+/*
+ * Are we running in atomic context? WARNING: this macro cannot
+ * always detect atomic context; in particular, it cannot know about
+ * held spinlocks in non-preemptible kernels. Thus it should not be
+ * used in the general case to determine whether sleeping is possible.
+ * Do not use in_atomic() in driver code.
+ */
+#define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_INATOMIC_BASE)
+
+/*
+ * Check whether we were atomic before we did preempt_disable():
+ * (used by the scheduler, *after* releasing the kernel lock)
+ */
+#define in_atomic_preempt_off() \
+ ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
+
+#ifdef CONFIG_PREEMPT
+# define preemptible() (preempt_count() == 0 && !irqs_disabled())
+# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
+#else
+# define preemptible() 0
+# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
+#endif
+
+#if defined(CONFIG_SMP) & !defined(DDE_LINUX)
+extern void synchronize_irq(unsigned int irq);
+#else /* !SMP || DDE_LINUX */
+# define synchronize_irq(irq) barrier()
+#endif
+
+struct task_struct;
+
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+static inline void account_system_vtime(struct task_struct *tsk)
+{
+}
+#endif
+
+#if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU)
+extern void rcu_irq_enter(void);
+extern void rcu_irq_exit(void);
+extern void rcu_nmi_enter(void);
+extern void rcu_nmi_exit(void);
+#else
+# define rcu_irq_enter() do { } while (0)
+# define rcu_irq_exit() do { } while (0)
+# define rcu_nmi_enter() do { } while (0)
+# define rcu_nmi_exit() do { } while (0)
+#endif /* #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) */
+
+/*
+ * It is safe to do non-atomic ops on ->hardirq_context,
+ * because NMI handlers may not preempt and the ops are
+ * always balanced, so the interrupted value of ->hardirq_context
+ * will always be restored.
+ */
+#define __irq_enter() \
+ do { \
+ account_system_vtime(current); \
+ add_preempt_count(HARDIRQ_OFFSET); \
+ trace_hardirq_enter(); \
+ } while (0)
+
+/*
+ * Enter irq context (on NO_HZ, update jiffies):
+ */
+extern void irq_enter(void);
+
+/*
+ * Exit irq context without processing softirqs:
+ */
+#define __irq_exit() \
+ do { \
+ trace_hardirq_exit(); \
+ account_system_vtime(current); \
+ sub_preempt_count(HARDIRQ_OFFSET); \
+ } while (0)
+
+/*
+ * Exit irq context and process softirqs if needed:
+ */
+extern void irq_exit(void);
+
+#define nmi_enter() \
+ do { \
+ ftrace_nmi_enter(); \
+ lockdep_off(); \
+ rcu_nmi_enter(); \
+ __irq_enter(); \
+ } while (0)
+
+#define nmi_exit() \
+ do { \
+ __irq_exit(); \
+ rcu_nmi_exit(); \
+ lockdep_on(); \
+ ftrace_nmi_exit(); \
+ } while (0)
+
+#endif /* LINUX_HARDIRQ_H */
diff --git a/libdde-linux26/include/linux/ide.h b/libdde-linux26/include/linux/ide.h
new file mode 100644
index 00000000..511fbf3e
--- /dev/null
+++ b/libdde-linux26/include/linux/ide.h
@@ -0,0 +1,1642 @@
+#ifndef _IDE_H
+#define _IDE_H
+/*
+ * linux/include/linux/ide.h
+ *
+ * Copyright (C) 1994-2002 Linus Torvalds & authors
+ */
+
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/ata.h>
+#include <linux/blkdev.h>
+#include <linux/proc_fs.h>
+#include <linux/interrupt.h>
+#include <linux/bitops.h>
+#include <linux/bio.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/completion.h>
+#include <linux/pm.h>
+#ifdef CONFIG_BLK_DEV_IDEACPI
+#include <acpi/acpi.h>
+#endif
+#include <asm/byteorder.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/mutex.h>
+
+#if defined(CONFIG_CRIS) || defined(CONFIG_FRV)
+# define SUPPORT_VLB_SYNC 0
+#else
+# define SUPPORT_VLB_SYNC 1
+#endif
+
+/*
+ * Probably not wise to fiddle with these
+ */
+#define IDE_DEFAULT_MAX_FAILURES 1
+#define ERROR_MAX 8 /* Max read/write errors per sector */
+#define ERROR_RESET 3 /* Reset controller every 4th retry */
+#define ERROR_RECAL 1 /* Recalibrate every 2nd retry */
+
+/*
+ * Definitions for accessing IDE controller registers
+ */
+#define IDE_NR_PORTS (10)
+
+struct ide_io_ports {
+ unsigned long data_addr;
+
+ union {
+ unsigned long error_addr; /* read: error */
+ unsigned long feature_addr; /* write: feature */
+ };
+
+ unsigned long nsect_addr;
+ unsigned long lbal_addr;
+ unsigned long lbam_addr;
+ unsigned long lbah_addr;
+
+ unsigned long device_addr;
+
+ union {
+ unsigned long status_addr; /*  read: status  */
+ unsigned long command_addr; /* write: command */
+ };
+
+ unsigned long ctl_addr;
+
+ unsigned long irq_addr;
+};
+
+#define OK_STAT(stat,good,bad) (((stat)&((good)|(bad)))==(good))
+
+#define BAD_R_STAT (ATA_BUSY | ATA_ERR)
+#define BAD_W_STAT (BAD_R_STAT | ATA_DF)
+#define BAD_STAT (BAD_R_STAT | ATA_DRQ)
+#define DRIVE_READY (ATA_DRDY | ATA_DSC)
+
+#define BAD_CRC (ATA_ABORTED | ATA_ICRC)
+
+#define SATA_NR_PORTS (3) /* 16 possible ?? */
+
+#define SATA_STATUS_OFFSET (0)
+#define SATA_ERROR_OFFSET (1)
+#define SATA_CONTROL_OFFSET (2)
+
+/*
+ * Our Physical Region Descriptor (PRD) table should be large enough
+ * to handle the biggest I/O request we are likely to see. Since requests
+ * can have no more than 256 sectors, and since the typical blocksize is
+ * two or more sectors, we could get by with a limit of 128 entries here for
+ * the usual worst case. Most requests seem to include some contiguous blocks,
+ * further reducing the number of table entries required.
+ *
+ * The driver reverts to PIO mode for individual requests that exceed
+ * this limit (possible with 512 byte blocksizes, eg. MSDOS f/s), so handling
+ * 100% of all crazy scenarios here is not necessary.
+ *
+ * As it turns out though, we must allocate a full 4KB page for this,
+ * so the two PRD tables (ide0 & ide1) will each get half of that,
+ * allowing each to have about 256 entries (8 bytes each) from this.
+ */
+#define PRD_BYTES 8
+#define PRD_ENTRIES 256
+
+/*
+ * Some more useful definitions
+ */
+#define PARTN_BITS 6 /* number of minor dev bits for partitions */
+#define MAX_DRIVES 2 /* per interface; 2 assumed by lots of code */
+#define SECTOR_SIZE 512
+
+/*
+ * Timeouts for various operations:
+ */
+#ifndef DDE_LINUX
+enum {
+ /* spec allows up to 20ms */
+ WAIT_DRQ = HZ / 10, /* 100ms */
+ /* some laptops are very slow */
+ WAIT_READY = 5 * HZ, /* 5s */
+ /* should be less than 3ms (?), if all ATAPI CD is closed at boot */
+ WAIT_PIDENTIFY = 10 * HZ, /* 10s */
+ /* worst case when spinning up */
+ WAIT_WORSTCASE = 30 * HZ, /* 30s */
+ /* maximum wait for an IRQ to happen */
+ WAIT_CMD = 10 * HZ, /* 10s */
+ /* Some drives require a longer IRQ timeout. */
+ WAIT_FLOPPY_CMD = 50 * HZ, /* 50s */
+ /*
+ * Some drives (for example, Seagate STT3401A Travan) require a very
+ * long timeout, because they don't return an interrupt or clear their
+ * BSY bit until after the command completes (even retension commands).
+ */
+ WAIT_TAPE_CMD = 900 * HZ, /* 900s */
+ /* minimum sleep time */
+ WAIT_MIN_SLEEP = HZ / 50, /* 20ms */
+};
+#else
+enum {
+ /* spec allows up to 20ms */
+ WAIT_DRQ = 25, /* 100ms */
+ /* some laptops are very slow */
+ WAIT_READY = 1250, /* 5s */
+ /* should be less than 3ms (?), if all ATAPI CD is closed at boot */
+ WAIT_PIDENTIFY = 2500, /* 10s */
+ /* worst case when spinning up */
+ WAIT_WORSTCASE = 7500, /* 30s */
+ /* maximum wait for an IRQ to happen */
+ WAIT_CMD = 2500, /* 10s */
+ /* Some drives require a longer IRQ timeout. */
+ WAIT_FLOPPY_CMD = 12500, /* 50s */
+ /*
+ * Some drives (for example, Seagate STT3401A Travan) require a very
+ * long timeout, because they don't return an interrupt or clear their
+ * BSY bit until after the command completes (even retension commands).
+ */
+ WAIT_TAPE_CMD = 225000, /* 900s */
+ /* minimum sleep time */
+ WAIT_MIN_SLEEP = 5, /* 20ms */
+};
+#endif
+
+/*
+ * Op codes for special requests to be handled by ide_special_rq().
+ * Values should be in the range of 0x20 to 0x3f.
+ */
+#define REQ_DRIVE_RESET 0x20
+#define REQ_DEVSET_EXEC 0x21
+#define REQ_PARK_HEADS 0x22
+#define REQ_UNPARK_HEADS 0x23
+
+/*
+ * Check for an interrupt and acknowledge the interrupt status
+ */
+struct hwif_s;
+typedef int (ide_ack_intr_t)(struct hwif_s *);
+
+/*
+ * hwif_chipset_t is used to keep track of the specific hardware
+ * chipset used by each IDE interface, if known.
+ */
+enum { ide_unknown, ide_generic, ide_pci,
+ ide_cmd640, ide_dtc2278, ide_ali14xx,
+ ide_qd65xx, ide_umc8672, ide_ht6560b,
+ ide_4drives, ide_pmac, ide_acorn,
+ ide_au1xxx, ide_palm3710
+};
+
+typedef u8 hwif_chipset_t;
+
+/*
+ * Structure to hold all information about the location of this port
+ */
+typedef struct hw_regs_s {
+ union {
+ struct ide_io_ports io_ports;
+ unsigned long io_ports_array[IDE_NR_PORTS];
+ };
+
+ int irq; /* our irq number */
+ ide_ack_intr_t *ack_intr; /* acknowledge interrupt */
+ hwif_chipset_t chipset;
+ struct device *dev, *parent;
+ unsigned long config;
+} hw_regs_t;
+
+static inline void ide_std_init_ports(hw_regs_t *hw,
+ unsigned long io_addr,
+ unsigned long ctl_addr)
+{
+ unsigned int i;
+
+ for (i = 0; i <= 7; i++)
+ hw->io_ports_array[i] = io_addr++;
+
+ hw->io_ports.ctl_addr = ctl_addr;
+}
+
+/* for IDE PCI controllers in legacy mode, temporary */
+static inline int __ide_default_irq(unsigned long base)
+{
+ switch (base) {
+#ifdef CONFIG_IA64
+ case 0x1f0: return isa_irq_to_vector(14);
+ case 0x170: return isa_irq_to_vector(15);
+#else
+ case 0x1f0: return 14;
+ case 0x170: return 15;
+#endif
+ }
+ return 0;
+}
+
+#if defined(CONFIG_ARM) || defined(CONFIG_FRV) || defined(CONFIG_M68K) || \
+ defined(CONFIG_MIPS) || defined(CONFIG_MN10300) || defined(CONFIG_PARISC) \
+ || defined(CONFIG_PPC) || defined(CONFIG_SPARC) || defined(CONFIG_SPARC64)
+#include <asm/ide.h>
+#else
+#include <asm-generic/ide_iops.h>
+#endif
+
+#define MAX_HWIFS 10
+
+/* Currently only m68k, apus and m8xx need it */
+#ifndef IDE_ARCH_ACK_INTR
+# define ide_ack_intr(hwif) (1)
+#endif
+
+/* Currently only Atari needs it */
+#ifndef IDE_ARCH_LOCK
+# define ide_release_lock() do {} while (0)
+# define ide_get_lock(hdlr, data) do {} while (0)
+#endif /* IDE_ARCH_LOCK */
+
+/*
+ * Now for the data we need to maintain per-drive: ide_drive_t
+ */
+
+#define ide_scsi 0x21
+#define ide_disk 0x20
+#define ide_optical 0x7
+#define ide_cdrom 0x5
+#define ide_tape 0x1
+#define ide_floppy 0x0
+
+/*
+ * Special Driver Flags
+ *
+ * set_geometry : respecify drive geometry
+ * recalibrate : seek to cyl 0
+ * set_multmode : set multmode count
+ * reserved : unused
+ */
+typedef union {
+ unsigned all : 8;
+ struct {
+ unsigned set_geometry : 1;
+ unsigned recalibrate : 1;
+ unsigned set_multmode : 1;
+ unsigned reserved : 5;
+ } b;
+} special_t;
+
+/*
+ * Status returned from various ide_ functions
+ */
+typedef enum {
+ ide_stopped, /* no drive operation was started */
+ ide_started, /* a drive operation was started, handler was set */
+} ide_startstop_t;
+
+enum {
+ IDE_TFLAG_LBA48 = (1 << 0),
+ IDE_TFLAG_FLAGGED = (1 << 2),
+ IDE_TFLAG_OUT_DATA = (1 << 3),
+ IDE_TFLAG_OUT_HOB_FEATURE = (1 << 4),
+ IDE_TFLAG_OUT_HOB_NSECT = (1 << 5),
+ IDE_TFLAG_OUT_HOB_LBAL = (1 << 6),
+ IDE_TFLAG_OUT_HOB_LBAM = (1 << 7),
+ IDE_TFLAG_OUT_HOB_LBAH = (1 << 8),
+ IDE_TFLAG_OUT_HOB = IDE_TFLAG_OUT_HOB_FEATURE |
+ IDE_TFLAG_OUT_HOB_NSECT |
+ IDE_TFLAG_OUT_HOB_LBAL |
+ IDE_TFLAG_OUT_HOB_LBAM |
+ IDE_TFLAG_OUT_HOB_LBAH,
+ IDE_TFLAG_OUT_FEATURE = (1 << 9),
+ IDE_TFLAG_OUT_NSECT = (1 << 10),
+ IDE_TFLAG_OUT_LBAL = (1 << 11),
+ IDE_TFLAG_OUT_LBAM = (1 << 12),
+ IDE_TFLAG_OUT_LBAH = (1 << 13),
+ IDE_TFLAG_OUT_TF = IDE_TFLAG_OUT_FEATURE |
+ IDE_TFLAG_OUT_NSECT |
+ IDE_TFLAG_OUT_LBAL |
+ IDE_TFLAG_OUT_LBAM |
+ IDE_TFLAG_OUT_LBAH,
+ IDE_TFLAG_OUT_DEVICE = (1 << 14),
+ IDE_TFLAG_WRITE = (1 << 15),
+ IDE_TFLAG_FLAGGED_SET_IN_FLAGS = (1 << 16),
+ IDE_TFLAG_IN_DATA = (1 << 17),
+ IDE_TFLAG_CUSTOM_HANDLER = (1 << 18),
+ IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 19),
+ IDE_TFLAG_IN_HOB_FEATURE = (1 << 20),
+ IDE_TFLAG_IN_HOB_NSECT = (1 << 21),
+ IDE_TFLAG_IN_HOB_LBAL = (1 << 22),
+ IDE_TFLAG_IN_HOB_LBAM = (1 << 23),
+ IDE_TFLAG_IN_HOB_LBAH = (1 << 24),
+ IDE_TFLAG_IN_HOB_LBA = IDE_TFLAG_IN_HOB_LBAL |
+ IDE_TFLAG_IN_HOB_LBAM |
+ IDE_TFLAG_IN_HOB_LBAH,
+ IDE_TFLAG_IN_HOB = IDE_TFLAG_IN_HOB_FEATURE |
+ IDE_TFLAG_IN_HOB_NSECT |
+ IDE_TFLAG_IN_HOB_LBA,
+ IDE_TFLAG_IN_FEATURE = (1 << 1),
+ IDE_TFLAG_IN_NSECT = (1 << 25),
+ IDE_TFLAG_IN_LBAL = (1 << 26),
+ IDE_TFLAG_IN_LBAM = (1 << 27),
+ IDE_TFLAG_IN_LBAH = (1 << 28),
+ IDE_TFLAG_IN_LBA = IDE_TFLAG_IN_LBAL |
+ IDE_TFLAG_IN_LBAM |
+ IDE_TFLAG_IN_LBAH,
+ IDE_TFLAG_IN_TF = IDE_TFLAG_IN_NSECT |
+ IDE_TFLAG_IN_LBA,
+ IDE_TFLAG_IN_DEVICE = (1 << 29),
+ IDE_TFLAG_HOB = IDE_TFLAG_OUT_HOB |
+ IDE_TFLAG_IN_HOB,
+ IDE_TFLAG_TF = IDE_TFLAG_OUT_TF |
+ IDE_TFLAG_IN_TF,
+ IDE_TFLAG_DEVICE = IDE_TFLAG_OUT_DEVICE |
+ IDE_TFLAG_IN_DEVICE,
+ /* force 16-bit I/O operations */
+ IDE_TFLAG_IO_16BIT = (1 << 30),
+ /* ide_task_t was allocated using kmalloc() */
+ IDE_TFLAG_DYN = (1 << 31),
+};
+
+struct ide_taskfile {
+ u8 hob_data; /* 0: high data byte (for TASKFILE IOCTL) */
+
+ u8 hob_feature; /* 1-5: additional data to support LBA48 */
+ u8 hob_nsect;
+ u8 hob_lbal;
+ u8 hob_lbam;
+ u8 hob_lbah;
+
+ u8 data; /* 6: low data byte (for TASKFILE IOCTL) */
+
+ union { /*  7: */
+ u8 error; /* read: error */
+ u8 feature; /* write: feature */
+ };
+
+ u8 nsect; /* 8: number of sectors */
+ u8 lbal; /* 9: LBA low */
+ u8 lbam; /* 10: LBA mid */
+ u8 lbah; /* 11: LBA high */
+
+ u8 device; /* 12: device select */
+
+ union { /* 13: */
+ u8 status; /*  read: status  */
+ u8 command; /* write: command */
+ };
+};
+
+typedef struct ide_task_s {
+ union {
+ struct ide_taskfile tf;
+ u8 tf_array[14];
+ };
+ u32 tf_flags;
+ int data_phase;
+ struct request *rq; /* copy of request */
+ void *special; /* valid_t generally */
+} ide_task_t;
+
+/* ATAPI packet command flags */
+enum {
+ /* set when an error is considered normal - no retry (ide-tape) */
+ PC_FLAG_ABORT = (1 << 0),
+ PC_FLAG_SUPPRESS_ERROR = (1 << 1),
+ PC_FLAG_WAIT_FOR_DSC = (1 << 2),
+ PC_FLAG_DMA_OK = (1 << 3),
+ PC_FLAG_DMA_IN_PROGRESS = (1 << 4),
+ PC_FLAG_DMA_ERROR = (1 << 5),
+ PC_FLAG_WRITING = (1 << 6),
+ /* command timed out */
+ PC_FLAG_TIMEDOUT = (1 << 7),
+};
+
+/*
+ * With each packet command, we allocate a buffer of IDE_PC_BUFFER_SIZE bytes.
+ * This is used for several packet commands (not for READ/WRITE commands).
+ */
+#define IDE_PC_BUFFER_SIZE 256
+#define ATAPI_WAIT_PC (60 * HZ)
+
+struct ide_atapi_pc {
+ /* actual packet bytes */
+ u8 c[12];
+ /* incremented on each retry */
+ int retries;
+ int error;
+
+ /* bytes to transfer */
+ int req_xfer;
+ /* bytes actually transferred */
+ int xferred;
+
+ /* data buffer */
+ u8 *buf;
+ /* current buffer position */
+ u8 *cur_pos;
+ int buf_size;
+ /* missing/available data on the current buffer */
+ int b_count;
+
+ /* the corresponding request */
+ struct request *rq;
+
+ unsigned long flags;
+
+ /*
+ * those are more or less driver-specific and some of them are subject
+ * to change/removal later.
+ */
+ u8 pc_buf[IDE_PC_BUFFER_SIZE];
+
+ /* idetape only */
+ struct idetape_bh *bh;
+ char *b_data;
+
+ struct scatterlist *sg;
+ unsigned int sg_cnt;
+
+ unsigned long timeout;
+};
+
+struct ide_devset;
+struct ide_driver;
+
+#ifdef CONFIG_BLK_DEV_IDEACPI
+struct ide_acpi_drive_link;
+struct ide_acpi_hwif_link;
+#endif
+
+struct ide_drive_s;
+
+struct ide_disk_ops {
+ int (*check)(struct ide_drive_s *, const char *);
+ int (*get_capacity)(struct ide_drive_s *);
+ void (*setup)(struct ide_drive_s *);
+ void (*flush)(struct ide_drive_s *);
+ int (*init_media)(struct ide_drive_s *, struct gendisk *);
+ int (*set_doorlock)(struct ide_drive_s *, struct gendisk *,
+ int);
+ ide_startstop_t (*do_request)(struct ide_drive_s *, struct request *,
+ sector_t);
+ int (*end_request)(struct ide_drive_s *, int, int);
+ int (*ioctl)(struct ide_drive_s *, struct block_device *,
+ fmode_t, unsigned int, unsigned long);
+};
+
+/* ATAPI device flags */
+enum {
+ IDE_AFLAG_DRQ_INTERRUPT = (1 << 0),
+
+ /* ide-cd */
+ /* Drive cannot eject the disc. */
+ IDE_AFLAG_NO_EJECT = (1 << 1),
+ /* Drive is a pre ATAPI 1.2 drive. */
+ IDE_AFLAG_PRE_ATAPI12 = (1 << 2),
+ /* TOC addresses are in BCD. */
+ IDE_AFLAG_TOCADDR_AS_BCD = (1 << 3),
+ /* TOC track numbers are in BCD. */
+ IDE_AFLAG_TOCTRACKS_AS_BCD = (1 << 4),
+ /*
+ * Drive does not provide data in multiples of SECTOR_SIZE
+ * when more than one interrupt is needed.
+ */
+ IDE_AFLAG_LIMIT_NFRAMES = (1 << 5),
+ /* Saved TOC information is current. */
+ IDE_AFLAG_TOC_VALID = (1 << 6),
+ /* We think that the drive door is locked. */
+ IDE_AFLAG_DOOR_LOCKED = (1 << 7),
+ /* SET_CD_SPEED command is unsupported. */
+ IDE_AFLAG_NO_SPEED_SELECT = (1 << 8),
+ IDE_AFLAG_VERTOS_300_SSD = (1 << 9),
+ IDE_AFLAG_VERTOS_600_ESD = (1 << 10),
+ IDE_AFLAG_SANYO_3CD = (1 << 11),
+ IDE_AFLAG_FULL_CAPS_PAGE = (1 << 12),
+ IDE_AFLAG_PLAY_AUDIO_OK = (1 << 13),
+ IDE_AFLAG_LE_SPEED_FIELDS = (1 << 14),
+
+ /* ide-floppy */
+ /* Avoid commands not supported in Clik drive */
+ IDE_AFLAG_CLIK_DRIVE = (1 << 15),
+ /* Requires BH algorithm for packets */
+ IDE_AFLAG_ZIP_DRIVE = (1 << 16),
+ /* Supports format progress report */
+ IDE_AFLAG_SRFP = (1 << 17),
+
+ /* ide-tape */
+ IDE_AFLAG_IGNORE_DSC = (1 << 18),
+ /* 0 When the tape position is unknown */
+ IDE_AFLAG_ADDRESS_VALID = (1 << 19),
+ /* Device already opened */
+ IDE_AFLAG_BUSY = (1 << 20),
+ /* Attempt to auto-detect the current user block size */
+ IDE_AFLAG_DETECT_BS = (1 << 21),
+ /* Currently on a filemark */
+ IDE_AFLAG_FILEMARK = (1 << 22),
+ /* 0 = no tape is loaded, so we don't rewind after ejecting */
+ IDE_AFLAG_MEDIUM_PRESENT = (1 << 23),
+
+ IDE_AFLAG_NO_AUTOCLOSE = (1 << 24),
+};
+
+/* device flags */
+enum {
+ /* restore settings after device reset */
+ IDE_DFLAG_KEEP_SETTINGS = (1 << 0),
+ /* device is using DMA for read/write */
+ IDE_DFLAG_USING_DMA = (1 << 1),
+ /* okay to unmask other IRQs */
+ IDE_DFLAG_UNMASK = (1 << 2),
+ /* don't attempt flushes */
+ IDE_DFLAG_NOFLUSH = (1 << 3),
+ /* DSC overlap */
+ IDE_DFLAG_DSC_OVERLAP = (1 << 4),
+ /* give potential excess bandwidth */
+ IDE_DFLAG_NICE1 = (1 << 5),
+ /* device is physically present */
+ IDE_DFLAG_PRESENT = (1 << 6),
+ /* device ejected hint */
+ IDE_DFLAG_DEAD = (1 << 7),
+ /* id read from device (synthetic if not set) */
+ IDE_DFLAG_ID_READ = (1 << 8),
+ IDE_DFLAG_NOPROBE = (1 << 9),
+ /* need to do check_media_change() */
+ IDE_DFLAG_REMOVABLE = (1 << 10),
+ /* needed for removable devices */
+ IDE_DFLAG_ATTACH = (1 << 11),
+ IDE_DFLAG_FORCED_GEOM = (1 << 12),
+ /* disallow setting unmask bit */
+ IDE_DFLAG_NO_UNMASK = (1 << 13),
+ /* disallow enabling 32-bit I/O */
+ IDE_DFLAG_NO_IO_32BIT = (1 << 14),
+ /* for removable only: door lock/unlock works */
+ IDE_DFLAG_DOORLOCKING = (1 << 15),
+ /* disallow DMA */
+ IDE_DFLAG_NODMA = (1 << 16),
+ /* powermanagment told us not to do anything, so sleep nicely */
+ IDE_DFLAG_BLOCKED = (1 << 17),
+ /* sleeping & sleep field valid */
+ IDE_DFLAG_SLEEPING = (1 << 18),
+ IDE_DFLAG_POST_RESET = (1 << 19),
+ IDE_DFLAG_UDMA33_WARNED = (1 << 20),
+ IDE_DFLAG_LBA48 = (1 << 21),
+ /* status of write cache */
+ IDE_DFLAG_WCACHE = (1 << 22),
+ /* used for ignoring ATA_DF */
+ IDE_DFLAG_NOWERR = (1 << 23),
+ /* retrying in PIO */
+ IDE_DFLAG_DMA_PIO_RETRY = (1 << 24),
+ IDE_DFLAG_LBA = (1 << 25),
+ /* don't unload heads */
+ IDE_DFLAG_NO_UNLOAD = (1 << 26),
+ /* heads unloaded, please don't reset port */
+ IDE_DFLAG_PARKED = (1 << 27),
+ IDE_DFLAG_MEDIA_CHANGED = (1 << 28),
+ /* write protect */
+ IDE_DFLAG_WP = (1 << 29),
+ IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30),
+};
+
+struct ide_drive_s {
+ char name[4]; /* drive name, such as "hda" */
+ char driver_req[10]; /* requests specific driver */
+
+ struct request_queue *queue; /* request queue */
+
+ struct request *rq; /* current request */
+ void *driver_data; /* extra driver data */
+ u16 *id; /* identification info */
+#ifdef CONFIG_IDE_PROC_FS
+ struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
+ const struct ide_proc_devset *settings; /* /proc/ide/ drive settings */
+#endif
+ struct hwif_s *hwif; /* actually (ide_hwif_t *) */
+
+ const struct ide_disk_ops *disk_ops;
+
+ unsigned long dev_flags;
+
+ unsigned long sleep; /* sleep until this time */
+ unsigned long timeout; /* max time to wait for irq */
+
+ special_t special; /* special action flags */
+
+ u8 select; /* basic drive/head select reg value */
+ u8 retry_pio; /* retrying dma capable host in pio */
+ u8 waiting_for_dma; /* dma currently in progress */
+ u8 dma; /* atapi dma flag */
+
+ u8 quirk_list; /* considered quirky, set for a specific host */
+ u8 init_speed; /* transfer rate set at boot */
+ u8 current_speed; /* current transfer rate set */
+ u8 desired_speed; /* desired transfer rate set */
+ u8 dn; /* now wide spread use */
+ u8 acoustic; /* acoustic management */
+ u8 media; /* disk, cdrom, tape, floppy, ... */
+ u8 ready_stat; /* min status value for drive ready */
+ u8 mult_count; /* current multiple sector setting */
+ u8 mult_req; /* requested multiple sector setting */
+ u8 io_32bit; /* 0=16-bit, 1=32-bit, 2/3=32bit+sync */
+ u8 bad_wstat; /* used for ignoring ATA_DF */
+ u8 head; /* "real" number of heads */
+ u8 sect; /* "real" sectors per track */
+ u8 bios_head; /* BIOS/fdisk/LILO number of heads */
+ u8 bios_sect; /* BIOS/fdisk/LILO sectors per track */
+
+ /* delay this long before sending packet command */
+ u8 pc_delay;
+
+ unsigned int bios_cyl; /* BIOS/fdisk/LILO number of cyls */
+ unsigned int cyl; /* "real" number of cyls */
+ unsigned int drive_data; /* used by set_pio_mode/selectproc */
+ unsigned int failures; /* current failure count */
+ unsigned int max_failures; /* maximum allowed failure count */
+ u64 probed_capacity;/* initial reported media capacity (ide-cd only currently) */
+
+ u64 capacity64; /* total number of sectors */
+
+ int lun; /* logical unit */
+ int crc_count; /* crc counter to reduce drive speed */
+
+ unsigned long debug_mask; /* debugging levels switch */
+
+#ifdef CONFIG_BLK_DEV_IDEACPI
+ struct ide_acpi_drive_link *acpidata;
+#endif
+ struct list_head list;
+ struct device gendev;
+ struct completion gendev_rel_comp; /* to deal with device release() */
+
+ /* current packet command */
+ struct ide_atapi_pc *pc;
+
+ /* callback for packet commands */
+ void (*pc_callback)(struct ide_drive_s *, int);
+
+ void (*pc_update_buffers)(struct ide_drive_s *, struct ide_atapi_pc *);
+ int (*pc_io_buffers)(struct ide_drive_s *, struct ide_atapi_pc *,
+ unsigned int, int);
+
+ ide_startstop_t (*irq_handler)(struct ide_drive_s *);
+
+ unsigned long atapi_flags;
+
+ struct ide_atapi_pc request_sense_pc;
+ struct request request_sense_rq;
+};
+
+typedef struct ide_drive_s ide_drive_t;
+
+#define to_ide_device(dev) container_of(dev, ide_drive_t, gendev)
+
+#define to_ide_drv(obj, cont_type) \
+ container_of(obj, struct cont_type, dev)
+
+#define ide_drv_g(disk, cont_type) \
+ container_of((disk)->private_data, struct cont_type, driver)
+
+struct ide_port_info;
+
+struct ide_tp_ops {
+ void (*exec_command)(struct hwif_s *, u8);
+ u8 (*read_status)(struct hwif_s *);
+ u8 (*read_altstatus)(struct hwif_s *);
+
+ void (*set_irq)(struct hwif_s *, int);
+
+ void (*tf_load)(ide_drive_t *, struct ide_task_s *);
+ void (*tf_read)(ide_drive_t *, struct ide_task_s *);
+
+ void (*input_data)(ide_drive_t *, struct request *, void *,
+ unsigned int);
+ void (*output_data)(ide_drive_t *, struct request *, void *,
+ unsigned int);
+};
+
+extern const struct ide_tp_ops default_tp_ops;
+
+/**
+ * struct ide_port_ops - IDE port operations
+ *
+ * @init_dev: host specific initialization of a device
+ * @set_pio_mode: routine to program host for PIO mode
+ * @set_dma_mode: routine to program host for DMA mode
+ * @selectproc: tweaks hardware to select drive
+ * @reset_poll: chipset polling based on hba specifics
+ * @pre_reset: chipset specific changes to default for device-hba resets
+ * @resetproc: routine to reset controller after a disk reset
+ * @maskproc: special host masking for drive selection
+ * @quirkproc: check host's drive quirk list
+ * @clear_irq: clear IRQ
+ *
+ * @mdma_filter: filter MDMA modes
+ * @udma_filter: filter UDMA modes
+ *
+ * @cable_detect: detect cable type
+ */
+struct ide_port_ops {
+ void (*init_dev)(ide_drive_t *);
+ void (*set_pio_mode)(ide_drive_t *, const u8);
+ void (*set_dma_mode)(ide_drive_t *, const u8);
+ void (*selectproc)(ide_drive_t *);
+ int (*reset_poll)(ide_drive_t *);
+ void (*pre_reset)(ide_drive_t *);
+ void (*resetproc)(ide_drive_t *);
+ void (*maskproc)(ide_drive_t *, int);
+ void (*quirkproc)(ide_drive_t *);
+ void (*clear_irq)(ide_drive_t *);
+
+ u8 (*mdma_filter)(ide_drive_t *);
+ u8 (*udma_filter)(ide_drive_t *);
+
+ u8 (*cable_detect)(struct hwif_s *);
+};
+
+struct ide_dma_ops {
+ void (*dma_host_set)(struct ide_drive_s *, int);
+ int (*dma_setup)(struct ide_drive_s *);
+ void (*dma_exec_cmd)(struct ide_drive_s *, u8);
+ void (*dma_start)(struct ide_drive_s *);
+ int (*dma_end)(struct ide_drive_s *);
+ int (*dma_test_irq)(struct ide_drive_s *);
+ void (*dma_lost_irq)(struct ide_drive_s *);
+ void (*dma_timeout)(struct ide_drive_s *);
+ /*
+ * The following method is optional and only required to be
+ * implemented for the SFF-8038i compatible controllers.
+ */
+ u8 (*dma_sff_read_status)(struct hwif_s *);
+};
+
+struct ide_host;
+
+typedef struct hwif_s {
+ struct hwif_s *mate; /* other hwif from same PCI chip */
+ struct proc_dir_entry *proc; /* /proc/ide/ directory entry */
+
+ struct ide_host *host;
+
+ char name[6]; /* name of interface, eg. "ide0" */
+
+ struct ide_io_ports io_ports;
+
+ unsigned long sata_scr[SATA_NR_PORTS];
+
+ ide_drive_t *devices[MAX_DRIVES + 1];
+
+ u8 major; /* our major number */
+ u8 index; /* 0 for ide0; 1 for ide1; ... */
+ u8 channel; /* for dual-port chips: 0=primary, 1=secondary */
+
+ u32 host_flags;
+
+ u8 pio_mask;
+
+ u8 ultra_mask;
+ u8 mwdma_mask;
+ u8 swdma_mask;
+
+ u8 cbl; /* cable type */
+
+ hwif_chipset_t chipset; /* sub-module for tuning.. */
+
+ struct device *dev;
+
+ ide_ack_intr_t *ack_intr;
+
+ void (*rw_disk)(ide_drive_t *, struct request *);
+
+ const struct ide_tp_ops *tp_ops;
+ const struct ide_port_ops *port_ops;
+ const struct ide_dma_ops *dma_ops;
+
+ /* dma physical region descriptor table (cpu view) */
+ unsigned int *dmatable_cpu;
+ /* dma physical region descriptor table (dma view) */
+ dma_addr_t dmatable_dma;
+
+ /* maximum number of PRD table entries */
+ int prd_max_nents;
+ /* PRD entry size in bytes */
+ int prd_ent_size;
+
+ /* Scatter-gather list used to build the above */
+ struct scatterlist *sg_table;
+ int sg_max_nents; /* Maximum number of entries in it */
+ int sg_nents; /* Current number of entries in it */
+ int orig_sg_nents;
+ int sg_dma_direction; /* dma transfer direction */
+
+ /* data phase of the active command (currently only valid for PIO/DMA) */
+ int data_phase;
+
+ struct ide_task_s task; /* current command */
+
+ unsigned int nsect;
+ unsigned int nleft;
+ struct scatterlist *cursg;
+ unsigned int cursg_ofs;
+
+ int rqsize; /* max sectors per request */
+ int irq; /* our irq number */
+
+ unsigned long dma_base; /* base addr for dma ports */
+
+ unsigned long config_data; /* for use by chipset-specific code */
+ unsigned long select_data; /* for use by chipset-specific code */
+
+ unsigned long extra_base; /* extra addr for dma ports */
+ unsigned extra_ports; /* number of extra dma ports */
+
+ unsigned present : 1; /* this interface exists */
+ unsigned busy : 1; /* serializes devices on a port */
+
+ struct device gendev;
+ struct device *portdev;
+
+ struct completion gendev_rel_comp; /* To deal with device release() */
+
+ void *hwif_data; /* extra hwif data */
+
+#ifdef CONFIG_BLK_DEV_IDEACPI
+ struct ide_acpi_hwif_link *acpidata;
+#endif
+
+ /* IRQ handler, if active */
+ ide_startstop_t (*handler)(ide_drive_t *);
+
+ /* BOOL: polling active & poll_timeout field valid */
+ unsigned int polling : 1;
+
+ /* current drive */
+ ide_drive_t *cur_dev;
+
+ /* current request */
+ struct request *rq;
+
+ /* failsafe timer */
+ struct timer_list timer;
+ /* timeout value during long polls */
+ unsigned long poll_timeout;
+ /* queried upon timeouts */
+ int (*expiry)(ide_drive_t *);
+
+ int req_gen;
+ int req_gen_timer;
+
+ spinlock_t lock;
+} ____cacheline_internodealigned_in_smp ide_hwif_t;
+
+#define MAX_HOST_PORTS 4
+
+struct ide_host {
+ ide_hwif_t *ports[MAX_HOST_PORTS + 1];
+ unsigned int n_ports;
+ struct device *dev[2];
+ unsigned int (*init_chipset)(struct pci_dev *);
+ irq_handler_t irq_handler;
+ unsigned long host_flags;
+ void *host_priv;
+ ide_hwif_t *cur_port; /* for hosts requiring serialization */
+
+ /* used for hosts requiring serialization */
+ volatile unsigned long host_busy;
+};
+
+#define IDE_HOST_BUSY 0
+
+/*
+ * internal ide interrupt handler type
+ */
+typedef ide_startstop_t (ide_handler_t)(ide_drive_t *);
+typedef int (ide_expiry_t)(ide_drive_t *);
+
+/* used by ide-cd, ide-floppy, etc. */
+typedef void (xfer_func_t)(ide_drive_t *, struct request *rq, void *, unsigned);
+
+extern struct mutex ide_setting_mtx;
+
+/*
+ * configurable drive settings
+ */
+
+#define DS_SYNC (1 << 0)
+
+struct ide_devset {
+ int (*get)(ide_drive_t *);
+ int (*set)(ide_drive_t *, int);
+ unsigned int flags;
+};
+
+#define __DEVSET(_flags, _get, _set) { \
+ .flags = _flags, \
+ .get = _get, \
+ .set = _set, \
+}
+
+#define ide_devset_get(name, field) \
+static int get_##name(ide_drive_t *drive) \
+{ \
+ return drive->field; \
+}
+
+#define ide_devset_set(name, field) \
+static int set_##name(ide_drive_t *drive, int arg) \
+{ \
+ drive->field = arg; \
+ return 0; \
+}
+
+#define ide_devset_get_flag(name, flag) \
+static int get_##name(ide_drive_t *drive) \
+{ \
+ return !!(drive->dev_flags & flag); \
+}
+
+#define ide_devset_set_flag(name, flag) \
+static int set_##name(ide_drive_t *drive, int arg) \
+{ \
+ if (arg) \
+ drive->dev_flags |= flag; \
+ else \
+ drive->dev_flags &= ~flag; \
+ return 0; \
+}
+
+#define __IDE_DEVSET(_name, _flags, _get, _set) \
+const struct ide_devset ide_devset_##_name = \
+ __DEVSET(_flags, _get, _set)
+
+#define IDE_DEVSET(_name, _flags, _get, _set) \
+static __IDE_DEVSET(_name, _flags, _get, _set)
+
+#define ide_devset_rw(_name, _func) \
+IDE_DEVSET(_name, 0, get_##_func, set_##_func)
+
+#define ide_devset_w(_name, _func) \
+IDE_DEVSET(_name, 0, NULL, set_##_func)
+
+#define ide_ext_devset_rw(_name, _func) \
+__IDE_DEVSET(_name, 0, get_##_func, set_##_func)
+
+#define ide_ext_devset_rw_sync(_name, _func) \
+__IDE_DEVSET(_name, DS_SYNC, get_##_func, set_##_func)
+
+#define ide_decl_devset(_name) \
+extern const struct ide_devset ide_devset_##_name
+
+ide_decl_devset(io_32bit);
+ide_decl_devset(keepsettings);
+ide_decl_devset(pio_mode);
+ide_decl_devset(unmaskirq);
+ide_decl_devset(using_dma);
+
+#ifdef CONFIG_IDE_PROC_FS
+/*
+ * /proc/ide interface
+ */
+
+#define ide_devset_rw_field(_name, _field) \
+ide_devset_get(_name, _field); \
+ide_devset_set(_name, _field); \
+IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
+
+#define ide_devset_rw_flag(_name, _field) \
+ide_devset_get_flag(_name, _field); \
+ide_devset_set_flag(_name, _field); \
+IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
+
+struct ide_proc_devset {
+ const char *name;
+ const struct ide_devset *setting;
+ int min, max;
+ int (*mulf)(ide_drive_t *);
+ int (*divf)(ide_drive_t *);
+};
+
+#define __IDE_PROC_DEVSET(_name, _min, _max, _mulf, _divf) { \
+ .name = __stringify(_name), \
+ .setting = &ide_devset_##_name, \
+ .min = _min, \
+ .max = _max, \
+ .mulf = _mulf, \
+ .divf = _divf, \
+}
+
+#define IDE_PROC_DEVSET(_name, _min, _max) \
+__IDE_PROC_DEVSET(_name, _min, _max, NULL, NULL)
+
+typedef struct {
+ const char *name;
+ mode_t mode;
+ read_proc_t *read_proc;
+ write_proc_t *write_proc;
+} ide_proc_entry_t;
+
+void proc_ide_create(void);
+void proc_ide_destroy(void);
+void ide_proc_register_port(ide_hwif_t *);
+void ide_proc_port_register_devices(ide_hwif_t *);
+void ide_proc_unregister_device(ide_drive_t *);
+void ide_proc_unregister_port(ide_hwif_t *);
+void ide_proc_register_driver(ide_drive_t *, struct ide_driver *);
+void ide_proc_unregister_driver(ide_drive_t *, struct ide_driver *);
+
+read_proc_t proc_ide_read_capacity;
+read_proc_t proc_ide_read_geometry;
+
+/*
+ * Standard exit stuff:
+ */
+#define PROC_IDE_READ_RETURN(page,start,off,count,eof,len) \
+{ \
+ len -= off; \
+ if (len < count) { \
+ *eof = 1; \
+ if (len <= 0) \
+ return 0; \
+ } else \
+ len = count; \
+ *start = page + off; \
+ return len; \
+}
+#else
+static inline void proc_ide_create(void) { ; }
+static inline void proc_ide_destroy(void) { ; }
+static inline void ide_proc_register_port(ide_hwif_t *hwif) { ; }
+static inline void ide_proc_port_register_devices(ide_hwif_t *hwif) { ; }
+static inline void ide_proc_unregister_device(ide_drive_t *drive) { ; }
+static inline void ide_proc_unregister_port(ide_hwif_t *hwif) { ; }
+static inline void ide_proc_register_driver(ide_drive_t *drive,
+ struct ide_driver *driver) { ; }
+static inline void ide_proc_unregister_driver(ide_drive_t *drive,
+ struct ide_driver *driver) { ; }
+#define PROC_IDE_READ_RETURN(page,start,off,count,eof,len) return 0;
+#endif
+
+enum {
+ /* enter/exit functions */
+ IDE_DBG_FUNC = (1 << 0),
+ /* sense key/asc handling */
+ IDE_DBG_SENSE = (1 << 1),
+ /* packet commands handling */
+ IDE_DBG_PC = (1 << 2),
+ /* request handling */
+ IDE_DBG_RQ = (1 << 3),
+ /* driver probing/setup */
+ IDE_DBG_PROBE = (1 << 4),
+};
+
+/* DRV_NAME has to be defined in the driver before using the macro below */
+#define __ide_debug_log(lvl, fmt, args...) \
+{ \
+ if (unlikely(drive->debug_mask & lvl)) \
+ printk(KERN_INFO DRV_NAME ": " fmt, ## args); \
+}
+
+/*
+ * Power Management state machine (rq->pm->pm_step).
+ *
+ * For each step, the core calls ide_start_power_step() first.
+ * This can return:
+ * - ide_stopped : In this case, the core calls us back again unless
+ * step have been set to ide_power_state_completed.
+ * - ide_started : In this case, the channel is left busy until an
+ * async event (interrupt) occurs.
+ * Typically, ide_start_power_step() will issue a taskfile request with
+ * do_rw_taskfile().
+ *
+ * Upon reception of the interrupt, the core will call ide_complete_power_step()
+ * with the error code if any. This routine should update the step value
+ * and return. It should not start a new request. The core will call
+ * ide_start_power_step() for the new step value, unless step have been
+ * set to IDE_PM_COMPLETED.
+ */
+enum {
+ IDE_PM_START_SUSPEND,
+ IDE_PM_FLUSH_CACHE = IDE_PM_START_SUSPEND,
+ IDE_PM_STANDBY,
+
+ IDE_PM_START_RESUME,
+ IDE_PM_RESTORE_PIO = IDE_PM_START_RESUME,
+ IDE_PM_IDLE,
+ IDE_PM_RESTORE_DMA,
+
+ IDE_PM_COMPLETED,
+};
+
+int generic_ide_suspend(struct device *, pm_message_t);
+int generic_ide_resume(struct device *);
+
+void ide_complete_power_step(ide_drive_t *, struct request *);
+ide_startstop_t ide_start_power_step(ide_drive_t *, struct request *);
+void ide_complete_pm_request(ide_drive_t *, struct request *);
+void ide_check_pm_state(ide_drive_t *, struct request *);
+
+/*
+ * Subdrivers support.
+ *
+ * The gendriver.owner field should be set to the module owner of this driver.
+ * The gendriver.name field should be set to the name of this driver
+ */
+struct ide_driver {
+ const char *version;
+ ide_startstop_t (*do_request)(ide_drive_t *, struct request *, sector_t);
+ int (*end_request)(ide_drive_t *, int, int);
+ struct device_driver gen_driver;
+ int (*probe)(ide_drive_t *);
+ void (*remove)(ide_drive_t *);
+ void (*resume)(ide_drive_t *);
+ void (*shutdown)(ide_drive_t *);
+#ifdef CONFIG_IDE_PROC_FS
+ ide_proc_entry_t * (*proc_entries)(ide_drive_t *);
+ const struct ide_proc_devset * (*proc_devsets)(ide_drive_t *);
+#endif
+};
+
+#define to_ide_driver(drv) container_of(drv, struct ide_driver, gen_driver)
+
+int ide_device_get(ide_drive_t *);
+void ide_device_put(ide_drive_t *);
+
+struct ide_ioctl_devset {
+ unsigned int get_ioctl;
+ unsigned int set_ioctl;
+ const struct ide_devset *setting;
+};
+
+int ide_setting_ioctl(ide_drive_t *, struct block_device *, unsigned int,
+ unsigned long, const struct ide_ioctl_devset *);
+
+int generic_ide_ioctl(ide_drive_t *, struct block_device *, unsigned, unsigned long);
+
+extern int ide_vlb_clk;
+extern int ide_pci_clk;
+
+extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs);
+int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq,
+ int uptodate, int nr_sectors);
+
+extern void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout, ide_expiry_t *expiry);
+
+void ide_execute_command(ide_drive_t *, u8, ide_handler_t *, unsigned int,
+ ide_expiry_t *);
+
+void ide_execute_pkt_cmd(ide_drive_t *);
+
+void ide_pad_transfer(ide_drive_t *, int, int);
+
+ide_startstop_t ide_error(ide_drive_t *, const char *, u8);
+
+void ide_fix_driveid(u16 *);
+
+extern void ide_fixstring(u8 *, const int, const int);
+
+int ide_busy_sleep(ide_hwif_t *, unsigned long, int);
+
+int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long);
+
+extern ide_startstop_t ide_do_reset (ide_drive_t *);
+
+extern int ide_devset_execute(ide_drive_t *drive,
+ const struct ide_devset *setting, int arg);
+
+extern void ide_do_drive_cmd(ide_drive_t *, struct request *);
+
+extern void ide_end_drive_cmd(ide_drive_t *, u8, u8);
+
+void ide_tf_dump(const char *, struct ide_taskfile *);
+
+void ide_exec_command(ide_hwif_t *, u8);
+u8 ide_read_status(ide_hwif_t *);
+u8 ide_read_altstatus(ide_hwif_t *);
+
+void ide_set_irq(ide_hwif_t *, int);
+
+void ide_tf_load(ide_drive_t *, ide_task_t *);
+void ide_tf_read(ide_drive_t *, ide_task_t *);
+
+void ide_input_data(ide_drive_t *, struct request *, void *, unsigned int);
+void ide_output_data(ide_drive_t *, struct request *, void *, unsigned int);
+
+int ide_io_buffers(ide_drive_t *, struct ide_atapi_pc *, unsigned int, int);
+
+extern void SELECT_DRIVE(ide_drive_t *);
+void SELECT_MASK(ide_drive_t *, int);
+
+u8 ide_read_error(ide_drive_t *);
+void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *);
+
+extern int drive_is_ready(ide_drive_t *);
+
+void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8);
+
+int ide_check_atapi_device(ide_drive_t *, const char *);
+
+void ide_init_pc(struct ide_atapi_pc *);
+
+/* Disk head parking */
+extern wait_queue_head_t ide_park_wq;
+ssize_t ide_park_show(struct device *dev, struct device_attribute *attr,
+ char *buf);
+ssize_t ide_park_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len);
+
+/*
+ * Special requests for ide-tape block device strategy routine.
+ *
+ * In order to service a character device command, we add special requests to
+ * the tail of our block device request queue and wait for their completion.
+ */
+enum {
+ REQ_IDETAPE_PC1 = (1 << 0), /* packet command (first stage) */
+ REQ_IDETAPE_PC2 = (1 << 1), /* packet command (second stage) */
+ REQ_IDETAPE_READ = (1 << 2),
+ REQ_IDETAPE_WRITE = (1 << 3),
+};
+
+int ide_queue_pc_tail(ide_drive_t *, struct gendisk *, struct ide_atapi_pc *);
+
+int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *);
+int ide_do_start_stop(ide_drive_t *, struct gendisk *, int);
+int ide_set_media_lock(ide_drive_t *, struct gendisk *, int);
+void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *);
+void ide_retry_pc(ide_drive_t *, struct gendisk *);
+
+int ide_cd_expiry(ide_drive_t *);
+
+int ide_cd_get_xferlen(struct request *);
+
+ide_startstop_t ide_issue_pc(ide_drive_t *);
+
+ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *);
+
+void task_end_request(ide_drive_t *, struct request *, u8);
+
+int ide_raw_taskfile(ide_drive_t *, ide_task_t *, u8 *, u16);
+int ide_no_data_taskfile(ide_drive_t *, ide_task_t *);
+
+int ide_taskfile_ioctl(ide_drive_t *, unsigned int, unsigned long);
+
+extern int ide_driveid_update(ide_drive_t *);
+extern int ide_config_drive_speed(ide_drive_t *, u8);
+extern u8 eighty_ninty_three (ide_drive_t *);
+extern int taskfile_lib_get_identify(ide_drive_t *drive, u8 *);
+
+extern int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout);
+
+extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
+
+extern void ide_timer_expiry(unsigned long);
+extern irqreturn_t ide_intr(int irq, void *dev_id);
+extern void do_ide_request(struct request_queue *);
+
+void ide_init_disk(struct gendisk *, ide_drive_t *);
+
+#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
+extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner, const char *mod_name);
+#define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE, KBUILD_MODNAME)
+#else
+#define ide_pci_register_driver(d) pci_register_driver(d)
+#endif
+
+static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev)
+{
+ if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE && (dev->class & 5) != 5)
+ return 1;
+ return 0;
+}
+
+void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int,
+ hw_regs_t *, hw_regs_t **);
+void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
+
+#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
+int ide_pci_set_master(struct pci_dev *, const char *);
+unsigned long ide_pci_dma_base(ide_hwif_t *, const struct ide_port_info *);
+int ide_pci_check_simplex(ide_hwif_t *, const struct ide_port_info *);
+int ide_hwif_setup_dma(ide_hwif_t *, const struct ide_port_info *);
+#else
+static inline int ide_hwif_setup_dma(ide_hwif_t *hwif,
+ const struct ide_port_info *d)
+{
+ return -EINVAL;
+}
+#endif
+
+struct ide_pci_enablebit {
+ u8 reg; /* byte pci reg holding the enable-bit */
+ u8 mask; /* mask to isolate the enable-bit */
+ u8 val; /* value of masked reg when "enabled" */
+};
+
+enum {
+ /* Uses ISA control ports not PCI ones. */
+ IDE_HFLAG_ISA_PORTS = (1 << 0),
+ /* single port device */
+ IDE_HFLAG_SINGLE = (1 << 1),
+ /* don't use legacy PIO blacklist */
+ IDE_HFLAG_PIO_NO_BLACKLIST = (1 << 2),
+ /* set for the second port of QD65xx */
+ IDE_HFLAG_QD_2ND_PORT = (1 << 3),
+ /* use PIO8/9 for prefetch off/on */
+ IDE_HFLAG_ABUSE_PREFETCH = (1 << 4),
+ /* use PIO6/7 for fast-devsel off/on */
+ IDE_HFLAG_ABUSE_FAST_DEVSEL = (1 << 5),
+ /* use 100-102 and 200-202 PIO values to set DMA modes */
+ IDE_HFLAG_ABUSE_DMA_MODES = (1 << 6),
+ /*
+ * keep DMA setting when programming PIO mode, may be used only
+ * for hosts which have separate PIO and DMA timings (ie. PMAC)
+ */
+ IDE_HFLAG_SET_PIO_MODE_KEEP_DMA = (1 << 7),
+ /* program host for the transfer mode after programming device */
+ IDE_HFLAG_POST_SET_MODE = (1 << 8),
+ /* don't program host/device for the transfer mode ("smart" hosts) */
+ IDE_HFLAG_NO_SET_MODE = (1 << 9),
+ /* trust BIOS for programming chipset/device for DMA */
+ IDE_HFLAG_TRUST_BIOS_FOR_DMA = (1 << 10),
+ /* host is CS5510/CS5520 */
+ IDE_HFLAG_CS5520 = (1 << 11),
+ /* ATAPI DMA is unsupported */
+ IDE_HFLAG_NO_ATAPI_DMA = (1 << 12),
+ /* set if host is a "non-bootable" controller */
+ IDE_HFLAG_NON_BOOTABLE = (1 << 13),
+ /* host doesn't support DMA */
+ IDE_HFLAG_NO_DMA = (1 << 14),
+ /* check if host is PCI IDE device before allowing DMA */
+ IDE_HFLAG_NO_AUTODMA = (1 << 15),
+ /* host uses MMIO */
+ IDE_HFLAG_MMIO = (1 << 16),
+ /* no LBA48 */
+ IDE_HFLAG_NO_LBA48 = (1 << 17),
+ /* no LBA48 DMA */
+ IDE_HFLAG_NO_LBA48_DMA = (1 << 18),
+ /* data FIFO is cleared by an error */
+ IDE_HFLAG_ERROR_STOPS_FIFO = (1 << 19),
+ /* serialize ports */
+ IDE_HFLAG_SERIALIZE = (1 << 20),
+ /* use legacy IRQs */
+ IDE_HFLAG_LEGACY_IRQS = (1 << 21),
+ /* force use of legacy IRQs */
+ IDE_HFLAG_FORCE_LEGACY_IRQS = (1 << 22),
+ /* host is TRM290 */
+ IDE_HFLAG_TRM290 = (1 << 23),
+ /* use 32-bit I/O ops */
+ IDE_HFLAG_IO_32BIT = (1 << 24),
+ /* unmask IRQs */
+ IDE_HFLAG_UNMASK_IRQS = (1 << 25),
+ IDE_HFLAG_BROKEN_ALTSTATUS = (1 << 26),
+ /* serialize ports if DMA is possible (for sl82c105) */
+ IDE_HFLAG_SERIALIZE_DMA = (1 << 27),
+ /* force host out of "simplex" mode */
+ IDE_HFLAG_CLEAR_SIMPLEX = (1 << 28),
+ /* DSC overlap is unsupported */
+ IDE_HFLAG_NO_DSC = (1 << 29),
+ /* never use 32-bit I/O ops */
+ IDE_HFLAG_NO_IO_32BIT = (1 << 30),
+ /* never unmask IRQs */
+ IDE_HFLAG_NO_UNMASK_IRQS = (1 << 31),
+};
+
+#ifdef CONFIG_BLK_DEV_OFFBOARD
+# define IDE_HFLAG_OFF_BOARD 0
+#else
+# define IDE_HFLAG_OFF_BOARD IDE_HFLAG_NON_BOOTABLE
+#endif
+
+struct ide_port_info {
+ char *name;
+ unsigned int (*init_chipset)(struct pci_dev *);
+ void (*init_iops)(ide_hwif_t *);
+ void (*init_hwif)(ide_hwif_t *);
+ int (*init_dma)(ide_hwif_t *,
+ const struct ide_port_info *);
+
+ const struct ide_tp_ops *tp_ops;
+ const struct ide_port_ops *port_ops;
+ const struct ide_dma_ops *dma_ops;
+
+ struct ide_pci_enablebit enablebits[2];
+
+ hwif_chipset_t chipset;
+
+ u16 max_sectors; /* if < than the default one */
+
+ u32 host_flags;
+ u8 pio_mask;
+ u8 swdma_mask;
+ u8 mwdma_mask;
+ u8 udma_mask;
+};
+
+int ide_pci_init_one(struct pci_dev *, const struct ide_port_info *, void *);
+int ide_pci_init_two(struct pci_dev *, struct pci_dev *,
+ const struct ide_port_info *, void *);
+void ide_pci_remove(struct pci_dev *);
+
+#ifdef CONFIG_PM
+int ide_pci_suspend(struct pci_dev *, pm_message_t);
+int ide_pci_resume(struct pci_dev *);
+#else
+#define ide_pci_suspend NULL
+#define ide_pci_resume NULL
+#endif
+
+void ide_map_sg(ide_drive_t *, struct request *);
+void ide_init_sg_cmd(ide_drive_t *, struct request *);
+
+#define BAD_DMA_DRIVE 0
+#define GOOD_DMA_DRIVE 1
+
+struct drive_list_entry {
+ const char *id_model;
+ const char *id_firmware;
+};
+
+int ide_in_drive_list(u16 *, const struct drive_list_entry *);
+
+#ifdef CONFIG_BLK_DEV_IDEDMA
+int ide_dma_good_drive(ide_drive_t *);
+int __ide_dma_bad_drive(ide_drive_t *);
+int ide_id_dma_bug(ide_drive_t *);
+
+u8 ide_find_dma_mode(ide_drive_t *, u8);
+
+static inline u8 ide_max_dma_mode(ide_drive_t *drive)
+{
+ return ide_find_dma_mode(drive, XFER_UDMA_6);
+}
+
+void ide_dma_off_quietly(ide_drive_t *);
+void ide_dma_off(ide_drive_t *);
+void ide_dma_on(ide_drive_t *);
+int ide_set_dma(ide_drive_t *);
+void ide_check_dma_crc(ide_drive_t *);
+ide_startstop_t ide_dma_intr(ide_drive_t *);
+
+int ide_allocate_dma_engine(ide_hwif_t *);
+void ide_release_dma_engine(ide_hwif_t *);
+
+int ide_build_sglist(ide_drive_t *, struct request *);
+void ide_destroy_dmatable(ide_drive_t *);
+
+#ifdef CONFIG_BLK_DEV_IDEDMA_SFF
+int config_drive_for_dma(ide_drive_t *);
+extern int ide_build_dmatable(ide_drive_t *, struct request *);
+void ide_dma_host_set(ide_drive_t *, int);
+extern int ide_dma_setup(ide_drive_t *);
+void ide_dma_exec_cmd(ide_drive_t *, u8);
+extern void ide_dma_start(ide_drive_t *);
+int ide_dma_end(ide_drive_t *);
+int ide_dma_test_irq(ide_drive_t *);
+u8 ide_dma_sff_read_status(ide_hwif_t *);
+extern const struct ide_dma_ops sff_dma_ops;
+#else
+static inline int config_drive_for_dma(ide_drive_t *drive) { return 0; }
+#endif /* CONFIG_BLK_DEV_IDEDMA_SFF */
+
+void ide_dma_lost_irq(ide_drive_t *);
+void ide_dma_timeout(ide_drive_t *);
+
+#else
+static inline int ide_id_dma_bug(ide_drive_t *drive) { return 0; }
+static inline u8 ide_find_dma_mode(ide_drive_t *drive, u8 speed) { return 0; }
+static inline u8 ide_max_dma_mode(ide_drive_t *drive) { return 0; }
+static inline void ide_dma_off_quietly(ide_drive_t *drive) { ; }
+static inline void ide_dma_off(ide_drive_t *drive) { ; }
+static inline void ide_dma_on(ide_drive_t *drive) { ; }
+static inline void ide_dma_verbose(ide_drive_t *drive) { ; }
+static inline int ide_set_dma(ide_drive_t *drive) { return 1; }
+static inline void ide_check_dma_crc(ide_drive_t *drive) { ; }
+static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; }
+#endif /* CONFIG_BLK_DEV_IDEDMA */
+
+#ifdef CONFIG_BLK_DEV_IDEACPI
+extern int ide_acpi_exec_tfs(ide_drive_t *drive);
+extern void ide_acpi_get_timing(ide_hwif_t *hwif);
+extern void ide_acpi_push_timing(ide_hwif_t *hwif);
+extern void ide_acpi_init(ide_hwif_t *hwif);
+void ide_acpi_port_init_devices(ide_hwif_t *);
+extern void ide_acpi_set_state(ide_hwif_t *hwif, int on);
+#else
+static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; }
+static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; }
+static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; }
+static inline void ide_acpi_init(ide_hwif_t *hwif) { ; }
+static inline void ide_acpi_port_init_devices(ide_hwif_t *hwif) { ; }
+static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
+#endif
+
+void ide_register_region(struct gendisk *);
+void ide_unregister_region(struct gendisk *);
+
+void ide_undecoded_slave(ide_drive_t *);
+
+void ide_port_apply_params(ide_hwif_t *);
+int ide_sysfs_register_port(ide_hwif_t *);
+
+struct ide_host *ide_host_alloc(const struct ide_port_info *, hw_regs_t **);
+void ide_host_free(struct ide_host *);
+int ide_host_register(struct ide_host *, const struct ide_port_info *,
+ hw_regs_t **);
+int ide_host_add(const struct ide_port_info *, hw_regs_t **,
+ struct ide_host **);
+void ide_host_remove(struct ide_host *);
+int ide_legacy_device_add(const struct ide_port_info *, unsigned long);
+void ide_port_unregister_devices(ide_hwif_t *);
+void ide_port_scan(ide_hwif_t *);
+
+static inline void *ide_get_hwifdata (ide_hwif_t * hwif)
+{
+ return hwif->hwif_data;
+}
+
+static inline void ide_set_hwifdata (ide_hwif_t * hwif, void *data)
+{
+ hwif->hwif_data = data;
+}
+
+const char *ide_xfer_verbose(u8 mode);
+extern void ide_toggle_bounce(ide_drive_t *drive, int on);
+extern int ide_set_xfer_rate(ide_drive_t *drive, u8 rate);
+
+u64 ide_get_lba_addr(struct ide_taskfile *, int);
+u8 ide_dump_status(ide_drive_t *, const char *, u8);
+
+struct ide_timing {
+ u8 mode;
+ u8 setup; /* t1 */
+ u16 act8b; /* t2 for 8-bit io */
+ u16 rec8b; /* t2i for 8-bit io */
+ u16 cyc8b; /* t0 for 8-bit io */
+ u16 active; /* t2 or tD */
+ u16 recover; /* t2i or tK */
+ u16 cycle; /* t0 */
+ u16 udma; /* t2CYCTYP/2 */
+};
+
+enum {
+ IDE_TIMING_SETUP = (1 << 0),
+ IDE_TIMING_ACT8B = (1 << 1),
+ IDE_TIMING_REC8B = (1 << 2),
+ IDE_TIMING_CYC8B = (1 << 3),
+ IDE_TIMING_8BIT = IDE_TIMING_ACT8B | IDE_TIMING_REC8B |
+ IDE_TIMING_CYC8B,
+ IDE_TIMING_ACTIVE = (1 << 4),
+ IDE_TIMING_RECOVER = (1 << 5),
+ IDE_TIMING_CYCLE = (1 << 6),
+ IDE_TIMING_UDMA = (1 << 7),
+ IDE_TIMING_ALL = IDE_TIMING_SETUP | IDE_TIMING_8BIT |
+ IDE_TIMING_ACTIVE | IDE_TIMING_RECOVER |
+ IDE_TIMING_CYCLE | IDE_TIMING_UDMA,
+};
+
+struct ide_timing *ide_timing_find_mode(u8);
+u16 ide_pio_cycle_time(ide_drive_t *, u8);
+void ide_timing_merge(struct ide_timing *, struct ide_timing *,
+ struct ide_timing *, unsigned int);
+int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int);
+
+int ide_scan_pio_blacklist(char *);
+
+u8 ide_get_best_pio_mode(ide_drive_t *, u8, u8);
+
+int ide_set_pio_mode(ide_drive_t *, u8);
+int ide_set_dma_mode(ide_drive_t *, u8);
+
+void ide_set_pio(ide_drive_t *, u8);
+
+static inline void ide_set_max_pio(ide_drive_t *drive)
+{
+ ide_set_pio(drive, 255);
+}
+
+char *ide_media_string(ide_drive_t *);
+
+extern struct device_attribute ide_dev_attrs[];
+extern struct bus_type ide_bus_type;
+extern struct class *ide_port_class;
+
+static inline void ide_dump_identify(u8 *id)
+{
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 2, id, 512, 0);
+}
+
+static inline int hwif_to_node(ide_hwif_t *hwif)
+{
+ return hwif->dev ? dev_to_node(hwif->dev) : -1;
+}
+
+static inline ide_drive_t *ide_get_pair_dev(ide_drive_t *drive)
+{
+ ide_drive_t *peer = drive->hwif->devices[(drive->dn ^ 1) & 1];
+
+ return (peer->dev_flags & IDE_DFLAG_PRESENT) ? peer : NULL;
+}
+
+#define ide_port_for_each_dev(i, dev, port) \
+ for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++)
+
+#define ide_host_for_each_port(i, port, host) \
+ for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++)
+
+#endif /* _IDE_H */
diff --git a/libdde-linux26/include/linux/init.h b/libdde-linux26/include/linux/init.h
new file mode 100644
index 00000000..770546a2
--- /dev/null
+++ b/libdde-linux26/include/linux/init.h
@@ -0,0 +1,374 @@
+#ifndef _LINUX_INIT_H
+#define _LINUX_INIT_H
+
+#ifdef DDE_LINUX
+#include <ddekit/initcall.h>
+#endif
+#include <linux/compiler.h>
+
+/* These macros are used to mark some functions or
+ * initialized data (doesn't apply to uninitialized data)
+ * as `initialization' functions. The kernel can take this
+ * as hint that the function is used only during the initialization
+ * phase and free up used memory resources after
+ *
+ * Usage:
+ * For functions:
+ *
+ * You should add __init immediately before the function name, like:
+ *
+ * static void __init initme(int x, int y)
+ * {
+ * extern int z; z = x * y;
+ * }
+ *
+ * If the function has a prototype somewhere, you can also add
+ * __init between closing brace of the prototype and semicolon:
+ *
+ * extern int initialize_foobar_device(int, int, int) __init;
+ *
+ * For initialized data:
+ * You should insert __initdata between the variable name and equal
+ * sign followed by value, e.g.:
+ *
+ * static int init_variable __initdata = 0;
+ * static char linux_logo[] __initdata = { 0x32, 0x36, ... };
+ *
+ * Don't forget to initialize data not at file scope, i.e. within a function,
+ * as gcc otherwise puts the data into the bss section and not into the init
+ * section.
+ *
+ * Also note, that this data cannot be "const".
+ */
+
+/* These are for everybody (although not all archs will actually
+ discard it in modules) */
+#ifndef DDE_LINUX
+#define __init __section(.init.text) __cold notrace
+#else
+#define __init __used
+#endif
+#define __initdata __section(.init.data)
+#define __initconst __section(.init.rodata)
+#define __exitdata __section(.exit.data)
+#define __exit_call __used __section(.exitcall.exit)
+
+/* modpost check for section mismatches during the kernel build.
+ * A section mismatch happens when there are references from a
+ * code or data section to an init section (both code or data).
+ * The init sections are (for most archs) discarded by the kernel
+ * when early init has completed so all such references are potential bugs.
+ * For exit sections the same issue exists.
+ * The following markers are used for the cases where the reference to
+ * the *init / *exit section (code or data) is valid and will teach
+ * modpost not to issue a warning.
+ * The markers follow same syntax rules as __init / __initdata. */
+#define __ref __section(.ref.text) noinline
+#define __refdata __section(.ref.data)
+#define __refconst __section(.ref.rodata)
+
+/* backward compatibility note
+ * A few places hardcode the old section names:
+ * .text.init.refok
+ * .data.init.refok
+ * .exit.text.refok
+ * They should be converted to use the defines from this file
+ */
+
+/* compatibility defines */
+#define __init_refok __ref
+#define __initdata_refok __refdata
+#define __exit_refok __ref
+
+
+#ifdef MODULE
+#define __exitused
+#else
+#define __exitused __used
+#endif
+
+#define __exit __section(.exit.text) __exitused __cold
+
+/* Used for HOTPLUG */
+#define __devinit __section(.devinit.text) __cold
+#define __devinitdata __section(.devinit.data)
+#define __devinitconst __section(.devinit.rodata)
+#define __devexit __section(.devexit.text) __exitused __cold
+#define __devexitdata __section(.devexit.data)
+#define __devexitconst __section(.devexit.rodata)
+
+/* Used for HOTPLUG_CPU */
+#define __cpuinit __section(.cpuinit.text) __cold
+#define __cpuinitdata __section(.cpuinit.data)
+#define __cpuinitconst __section(.cpuinit.rodata)
+#define __cpuexit __section(.cpuexit.text) __exitused __cold
+#define __cpuexitdata __section(.cpuexit.data)
+#define __cpuexitconst __section(.cpuexit.rodata)
+
+/* Used for MEMORY_HOTPLUG */
+#define __meminit __section(.meminit.text) __cold
+#define __meminitdata __section(.meminit.data)
+#define __meminitconst __section(.meminit.rodata)
+#define __memexit __section(.memexit.text) __exitused __cold
+#define __memexitdata __section(.memexit.data)
+#define __memexitconst __section(.memexit.rodata)
+
+/* For assembly routines */
+#define __HEAD .section ".head.text","ax"
+#define __INIT .section ".init.text","ax"
+#define __FINIT .previous
+
+#define __INITDATA .section ".init.data","aw"
+#define __INITRODATA .section ".init.rodata","a"
+#define __FINITDATA .previous
+
+#define __DEVINIT .section ".devinit.text", "ax"
+#define __DEVINITDATA .section ".devinit.data", "aw"
+#define __DEVINITRODATA .section ".devinit.rodata", "a"
+
+#define __CPUINIT .section ".cpuinit.text", "ax"
+#define __CPUINITDATA .section ".cpuinit.data", "aw"
+#define __CPUINITRODATA .section ".cpuinit.rodata", "a"
+
+#define __MEMINIT .section ".meminit.text", "ax"
+#define __MEMINITDATA .section ".meminit.data", "aw"
+#define __MEMINITRODATA .section ".meminit.rodata", "a"
+
+/* silence warnings when references are OK */
+#define __REF .section ".ref.text", "ax"
+#define __REFDATA .section ".ref.data", "aw"
+#define __REFCONST .section ".ref.rodata", "a"
+
+#ifndef __ASSEMBLY__
+/*
+ * Used for initialization calls..
+ */
+typedef int (*initcall_t)(void);
+typedef void (*exitcall_t)(void);
+
+extern initcall_t __con_initcall_start[], __con_initcall_end[];
+extern initcall_t __security_initcall_start[], __security_initcall_end[];
+
+/* Defined in init/main.c */
+extern int do_one_initcall(initcall_t fn);
+extern char __initdata boot_command_line[];
+extern char *saved_command_line;
+extern unsigned int reset_devices;
+
+/* used by init/main.c */
+void setup_arch(char **);
+void prepare_namespace(void);
+
+extern void (*late_time_init)(void);
+
+#endif
+
+#ifndef MODULE
+
+#ifndef __ASSEMBLY__
+
+/* initcalls are now grouped by functionality into separate
+ * subsections. Ordering inside the subsections is determined
+ * by link order.
+ * For backwards compatibility, initcall() puts the call in
+ * the device init subsection.
+ *
+ * The `id' arg to __define_initcall() is needed so that multiple initcalls
+ * can point at the same handler without causing duplicate-symbol build errors.
+ */
+
+#ifndef DDE_LINUX
+#define __define_initcall(level,fn,id) \
+ static initcall_t __initcall_##fn##id __used \
+ __attribute__((__section__(".initcall" level ".init"))) = fn
+#else // DDE_LINUX
+// XXX: DDE CTORs are executed in reverse order as was done by
+// Linux' initcalls in earlier versions
+#include <ddekit/initcall.h>
+#define __define_initcall(level,fn,id) DDEKIT_CTOR(fn,level)
+#endif
+
+/*
+ * Early initcalls run before initializing SMP.
+ *
+ * Only for built-in code, not modules.
+ */
+#define early_initcall(fn) __define_initcall("early",fn,early)
+
+/*
+ * A "pure" initcall has no dependencies on anything else, and purely
+ * initializes variables that couldn't be statically initialized.
+ *
+ * This only exists for built-in code, not for modules.
+ */
+#ifndef DDE_LINUX
+#define pure_initcall(fn) __define_initcall("0",fn,0)
+
+#define core_initcall(fn) __define_initcall("1",fn,1)
+#define core_initcall_sync(fn) __define_initcall("1s",fn,1s)
+#define postcore_initcall(fn) __define_initcall("2",fn,2)
+#define postcore_initcall_sync(fn) __define_initcall("2s",fn,2s)
+#define arch_initcall(fn) __define_initcall("3",fn,3)
+#define arch_initcall_sync(fn) __define_initcall("3s",fn,3s)
+#define subsys_initcall(fn) __define_initcall("4",fn,4)
+#define subsys_initcall_sync(fn) __define_initcall("4s",fn,4s)
+#define fs_initcall(fn) __define_initcall("5",fn,5)
+#define fs_initcall_sync(fn) __define_initcall("5s",fn,5s)
+#define rootfs_initcall(fn) __define_initcall("rootfs",fn,rootfs)
+#define device_initcall(fn) __define_initcall("6",fn,6)
+#define device_initcall_sync(fn) __define_initcall("6s",fn,6s)
+#define late_initcall(fn) __define_initcall("7",fn,7)
+#define late_initcall_sync(fn) __define_initcall("7s",fn,7s)
+#else /* DDE_LINUX */
+#define pure_initcall(fn) __define_initcall(1000,fn,1000)
+
+//#define dde_initcall(fn) __define_initcall(1009,fn,10)
+//#define dde_process_initcall(fn) __define_initcall(1008,fn,10)
+#define core_initcall(fn) __define_initcall(1000,fn,7)
+#define core_initcall_sync(fn) __define_initcall(1007s,fn,7s)
+#define postcore_initcall(fn) __define_initcall(1001,fn,6)
+#define postcore_initcall_sync(fn) __define_initcall(1006s,fn,6s)
+#define arch_initcall(fn) __define_initcall(1002,fn,5)
+#define arch_initcall_sync(fn) __define_initcall(1005s,fn,5s)
+#define subsys_initcall(fn) __define_initcall(1003,fn,4)
+#define subsys_initcall_sync(fn) __define_initcall(1004s,fn,4s)
+#define fs_initcall(fn) __define_initcall(1004,fn,3)
+#define fs_initcall_sync(fn) __define_initcall(1003s,fn,3s)
+//#define rootfs_initcall(fn) __define_initcall(ootfs,fn,rootfs)
+#define device_initcall(fn) __define_initcall(1005,fn,2)
+#define device_initcall_sync(fn) __define_initcall(1002s,fn,2s)
+#define late_initcall(fn) __define_initcall(1006,fn,1)
+#define late_initcall_sync(fn) __define_initcall(1001s,fn,1s)
+#endif
+
+#define __initcall(fn) device_initcall(fn)
+
+#ifndef DDE_LINUX
+#define __exitcall(fn) \
+ static exitcall_t __exitcall_##fn __exit_call = fn
+#else
+#define __exitcall(fn)
+#endif
+
+#define console_initcall(fn) \
+ static initcall_t __initcall_##fn \
+ __used __section(.con_initcall.init) = fn
+
+#define security_initcall(fn) \
+ static initcall_t __initcall_##fn \
+ __used __section(.security_initcall.init) = fn
+
+struct obs_kernel_param {
+ const char *str;
+ int (*setup_func)(char *);
+ int early;
+};
+
+/*
+ * Only for really core code. See moduleparam.h for the normal way.
+ *
+ * Force the alignment so the compiler doesn't space elements of the
+ * obs_kernel_param "array" too far apart in .init.setup.
+ */
+#define __setup_param(str, unique_id, fn, early) \
+ static char __setup_str_##unique_id[] __initdata __aligned(1) = str; \
+ static struct obs_kernel_param __setup_##unique_id \
+ __used __section(.init.setup) \
+ __attribute__((aligned((sizeof(long))))) \
+ = { __setup_str_##unique_id, fn, early }
+
+#define __setup(str, fn) \
+ __setup_param(str, fn, fn, 0)
+
+/* NOTE: fn is as per module_param, not __setup! Emits warning if fn
+ * returns non-zero. */
+#define early_param(str, fn) \
+ __setup_param(str, fn, fn, 1)
+
+/* Relies on boot_command_line being set */
+void __init parse_early_param(void);
+#endif /* __ASSEMBLY__ */
+
+/**
+ * module_init() - driver initialization entry point
+ * @x: function to be run at kernel boot time or module insertion
+ *
+ * module_init() will either be called during do_initcalls() (if
+ * builtin) or at module insertion time (if a module). There can only
+ * be one per module.
+ */
+#define module_init(x) __initcall(x);
+
+/**
+ * module_exit() - driver exit entry point
+ * @x: function to be run when driver is removed
+ *
+ * module_exit() will wrap the driver clean-up code
+ * with cleanup_module() when used with rmmod when
+ * the driver is a module. If the driver is statically
+ * compiled into the kernel, module_exit() has no effect.
+ * There can only be one per module.
+ */
+#define module_exit(x) __exitcall(x);
+
+#else /* MODULE */
+
+/* Don't use these in modules, but some people do... */
+#define core_initcall(fn) module_init(fn)
+#define postcore_initcall(fn) module_init(fn)
+#define arch_initcall(fn) module_init(fn)
+#define subsys_initcall(fn) module_init(fn)
+#define fs_initcall(fn) module_init(fn)
+#define device_initcall(fn) module_init(fn)
+#define late_initcall(fn) module_init(fn)
+
+#define security_initcall(fn) module_init(fn)
+
+/* Each module must use one module_init(). */
+#define module_init(initfn) \
+ static inline initcall_t __inittest(void) \
+ { return initfn; } \
+ int init_module(void) __attribute__((alias(#initfn)));
+
+/* This is only required if you want to be unloadable. */
+#define module_exit(exitfn) \
+ static inline exitcall_t __exittest(void) \
+ { return exitfn; } \
+ void cleanup_module(void) __attribute__((alias(#exitfn)));
+
+#define __setup_param(str, unique_id, fn) /* nothing */
+#define __setup(str, func) /* nothing */
+#endif
+
+/* Data marked not to be saved by software suspend */
+#define __nosavedata __section(.data.nosave)
+
+/* This means "can be init if no module support, otherwise module load
+ may call it." */
+#ifdef CONFIG_MODULES
+#define __init_or_module
+#define __initdata_or_module
+#else
+#define __init_or_module __init
+#define __initdata_or_module __initdata
+#endif /*CONFIG_MODULES*/
+
+/* Functions marked as __devexit may be discarded at kernel link time, depending
+ on config options. Newer versions of binutils detect references from
+ retained sections to discarded sections and flag an error. Pointers to
+ __devexit functions must use __devexit_p(function_name), the wrapper will
+ insert either the function_name or NULL, depending on the config options.
+ */
+#if defined(MODULE) || defined(CONFIG_HOTPLUG)
+#define __devexit_p(x) x
+#else
+#define __devexit_p(x) NULL
+#endif
+
+#ifdef MODULE
+#define __exit_p(x) x
+#else
+#define __exit_p(x) NULL
+#endif
+
+#endif /* _LINUX_INIT_H */
diff --git a/libdde-linux26/include/linux/init_task.h b/libdde-linux26/include/linux/init_task.h
new file mode 100644
index 00000000..db5ec518
--- /dev/null
+++ b/libdde-linux26/include/linux/init_task.h
@@ -0,0 +1,193 @@
+#ifndef _LINUX__INIT_TASK_H
+#define _LINUX__INIT_TASK_H
+
+#include <linux/rcupdate.h>
+#include <linux/irqflags.h>
+#include <linux/utsname.h>
+#include <linux/lockdep.h>
+#include <linux/ipc.h>
+#include <linux/pid_namespace.h>
+#include <linux/user_namespace.h>
+#include <linux/securebits.h>
+#include <net/net_namespace.h>
+
+extern struct files_struct init_files;
+extern struct fs_struct init_fs;
+
+#define INIT_KIOCTX(name, which_mm) \
+{ \
+ .users = ATOMIC_INIT(1), \
+ .dead = 0, \
+ .mm = &which_mm, \
+ .user_id = 0, \
+ .next = NULL, \
+ .wait = __WAIT_QUEUE_HEAD_INITIALIZER(name.wait), \
+ .ctx_lock = __SPIN_LOCK_UNLOCKED(name.ctx_lock), \
+ .reqs_active = 0U, \
+ .max_reqs = ~0U, \
+}
+
+#define INIT_MM(name) \
+{ \
+ .mm_rb = RB_ROOT, \
+ .pgd = swapper_pg_dir, \
+ .mm_users = ATOMIC_INIT(2), \
+ .mm_count = ATOMIC_INIT(1), \
+ .mmap_sem = __RWSEM_INITIALIZER(name.mmap_sem), \
+ .page_table_lock = __SPIN_LOCK_UNLOCKED(name.page_table_lock), \
+ .mmlist = LIST_HEAD_INIT(name.mmlist), \
+ .cpu_vm_mask = CPU_MASK_ALL, \
+}
+
+#define INIT_SIGNALS(sig) { \
+ .count = ATOMIC_INIT(1), \
+ .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
+ .shared_pending = { \
+ .list = LIST_HEAD_INIT(sig.shared_pending.list), \
+ .signal = {{0}}}, \
+ .posix_timers = LIST_HEAD_INIT(sig.posix_timers), \
+ .cpu_timers = INIT_CPU_TIMERS(sig.cpu_timers), \
+ .rlim = INIT_RLIMITS, \
+ .cputimer = { \
+ .cputime = INIT_CPUTIME, \
+ .running = 0, \
+ .lock = __SPIN_LOCK_UNLOCKED(sig.cputimer.lock), \
+ }, \
+}
+
+extern struct nsproxy init_nsproxy;
+#define INIT_NSPROXY(nsproxy) { \
+ .pid_ns = &init_pid_ns, \
+ .count = ATOMIC_INIT(1), \
+ .uts_ns = &init_uts_ns, \
+ .mnt_ns = NULL, \
+ INIT_NET_NS(net_ns) \
+ INIT_IPC_NS(ipc_ns) \
+}
+
+#define INIT_SIGHAND(sighand) { \
+ .count = ATOMIC_INIT(1), \
+ .action = { { { .sa_handler = NULL, } }, }, \
+ .siglock = __SPIN_LOCK_UNLOCKED(sighand.siglock), \
+ .signalfd_wqh = __WAIT_QUEUE_HEAD_INITIALIZER(sighand.signalfd_wqh), \
+}
+
+extern struct group_info init_groups;
+
+#define INIT_STRUCT_PID { \
+ .count = ATOMIC_INIT(1), \
+ .tasks = { \
+ { .first = &init_task.pids[PIDTYPE_PID].node }, \
+ { .first = &init_task.pids[PIDTYPE_PGID].node }, \
+ { .first = &init_task.pids[PIDTYPE_SID].node }, \
+ }, \
+ .rcu = RCU_HEAD_INIT, \
+ .level = 0, \
+ .numbers = { { \
+ .nr = 0, \
+ .ns = &init_pid_ns, \
+ .pid_chain = { .next = NULL, .pprev = NULL }, \
+ }, } \
+}
+
+#define INIT_PID_LINK(type) \
+{ \
+ .node = { \
+ .next = NULL, \
+ .pprev = &init_struct_pid.tasks[type].first, \
+ }, \
+ .pid = &init_struct_pid, \
+}
+
+#ifdef CONFIG_AUDITSYSCALL
+#define INIT_IDS \
+ .loginuid = -1, \
+ .sessionid = -1,
+#else
+#define INIT_IDS
+#endif
+
+#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
+/*
+ * Because of the reduced scope of CAP_SETPCAP when filesystem
+ * capabilities are in effect, it is safe to allow CAP_SETPCAP to
+ * be available in the default configuration.
+ */
+# define CAP_INIT_BSET CAP_FULL_SET
+#else
+# define CAP_INIT_BSET CAP_INIT_EFF_SET
+#endif
+
+/*
+ * INIT_TASK is used to set up the first task table, touch at
+ * your own risk!. Base=0, limit=0x1fffff (=2MB)
+ */
+#define INIT_TASK(tsk) \
+{ \
+ .state = 0, \
+ .stack = &init_thread_info, \
+ .usage = ATOMIC_INIT(2), \
+ .flags = PF_KTHREAD, \
+ .lock_depth = -1, \
+ .prio = MAX_PRIO-20, \
+ .static_prio = MAX_PRIO-20, \
+ .normal_prio = MAX_PRIO-20, \
+ .policy = SCHED_NORMAL, \
+ .cpus_allowed = CPU_MASK_ALL, \
+ .mm = NULL, \
+ .active_mm = &init_mm, \
+ .se = { \
+ .group_node = LIST_HEAD_INIT(tsk.se.group_node), \
+ }, \
+ .rt = { \
+ .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
+ /* DDE uses a def of HZ which is no macro */ \
+ .time_slice = 250, \
+ .nr_cpus_allowed = NR_CPUS, \
+ }, \
+ .tasks = LIST_HEAD_INIT(tsk.tasks), \
+ .ptraced = LIST_HEAD_INIT(tsk.ptraced), \
+ .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \
+ .real_parent = &tsk, \
+ .parent = &tsk, \
+ .children = LIST_HEAD_INIT(tsk.children), \
+ .sibling = LIST_HEAD_INIT(tsk.sibling), \
+ .group_leader = &tsk, \
+ .comm = "swapper", \
+ .thread = INIT_THREAD, \
+ .fs = &init_fs, \
+ .files = &init_files, \
+ .signal = &init_signals, \
+ .sighand = &init_sighand, \
+ .nsproxy = &init_nsproxy, \
+ .pending = { \
+ .list = LIST_HEAD_INIT(tsk.pending.list), \
+ .signal = {{0}}}, \
+ .blocked = {{0}}, \
+ .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \
+ .journal_info = NULL, \
+ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \
+ .fs_excl = ATOMIC_INIT(0), \
+ .pi_lock = __SPIN_LOCK_UNLOCKED(tsk.pi_lock), \
+ .timer_slack_ns = 50000, /* 50 usec default slack */ \
+ .pids = { \
+ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
+ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
+ [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \
+ }, \
+ .dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
+ INIT_IDS \
+ INIT_TRACE_IRQFLAGS \
+ INIT_LOCKDEP \
+}
+
+
+#define INIT_CPU_TIMERS(cpu_timers) \
+{ \
+ LIST_HEAD_INIT(cpu_timers[0]), \
+ LIST_HEAD_INIT(cpu_timers[1]), \
+ LIST_HEAD_INIT(cpu_timers[2]), \
+}
+
+
+#endif
diff --git a/libdde-linux26/include/linux/ioport.h b/libdde-linux26/include/linux/ioport.h
new file mode 100644
index 00000000..32e4b2f7
--- /dev/null
+++ b/libdde-linux26/include/linux/ioport.h
@@ -0,0 +1,186 @@
+/*
+ * ioport.h Definitions of routines for detecting, reserving and
+ * allocating system resources.
+ *
+ * Authors: Linus Torvalds
+ */
+
+#ifndef _LINUX_IOPORT_H
+#define _LINUX_IOPORT_H
+
+#ifndef __ASSEMBLY__
+#include <linux/compiler.h>
+#include <linux/types.h>
+/*
+ * Resources are tree-like, allowing
+ * nesting etc..
+ */
+struct resource {
+ resource_size_t start;
+ resource_size_t end;
+ const char *name;
+ unsigned long flags;
+ struct resource *parent, *sibling, *child;
+};
+
+struct resource_list {
+ struct resource_list *next;
+ struct resource *res;
+ struct pci_dev *dev;
+};
+
+/*
+ * IO resources have these defined flags.
+ */
+#define IORESOURCE_BITS 0x000000ff /* Bus-specific bits */
+
+#define IORESOURCE_TYPE_BITS 0x00000f00 /* Resource type */
+#define IORESOURCE_IO 0x00000100
+#define IORESOURCE_MEM 0x00000200
+#define IORESOURCE_IRQ 0x00000400
+#define IORESOURCE_DMA 0x00000800
+
+#define IORESOURCE_PREFETCH 0x00001000 /* No side effects */
+#define IORESOURCE_READONLY 0x00002000
+#define IORESOURCE_CACHEABLE 0x00004000
+#define IORESOURCE_RANGELENGTH 0x00008000
+#define IORESOURCE_SHADOWABLE 0x00010000
+
+#define IORESOURCE_SIZEALIGN 0x00020000 /* size indicates alignment */
+#define IORESOURCE_STARTALIGN 0x00040000 /* start field is alignment */
+
+#define IORESOURCE_EXCLUSIVE 0x08000000 /* Userland may not map this resource */
+#define IORESOURCE_DISABLED 0x10000000
+#define IORESOURCE_UNSET 0x20000000
+#define IORESOURCE_AUTO 0x40000000
+#define IORESOURCE_BUSY 0x80000000 /* Driver has marked this resource busy */
+
+/* PnP IRQ specific bits (IORESOURCE_BITS) */
+#define IORESOURCE_IRQ_HIGHEDGE (1<<0)
+#define IORESOURCE_IRQ_LOWEDGE (1<<1)
+#define IORESOURCE_IRQ_HIGHLEVEL (1<<2)
+#define IORESOURCE_IRQ_LOWLEVEL (1<<3)
+#define IORESOURCE_IRQ_SHAREABLE (1<<4)
+#define IORESOURCE_IRQ_OPTIONAL (1<<5)
+
+/* PnP DMA specific bits (IORESOURCE_BITS) */
+#define IORESOURCE_DMA_TYPE_MASK (3<<0)
+#define IORESOURCE_DMA_8BIT (0<<0)
+#define IORESOURCE_DMA_8AND16BIT (1<<0)
+#define IORESOURCE_DMA_16BIT (2<<0)
+
+#define IORESOURCE_DMA_MASTER (1<<2)
+#define IORESOURCE_DMA_BYTE (1<<3)
+#define IORESOURCE_DMA_WORD (1<<4)
+
+#define IORESOURCE_DMA_SPEED_MASK (3<<6)
+#define IORESOURCE_DMA_COMPATIBLE (0<<6)
+#define IORESOURCE_DMA_TYPEA (1<<6)
+#define IORESOURCE_DMA_TYPEB (2<<6)
+#define IORESOURCE_DMA_TYPEF (3<<6)
+
+/* PnP memory I/O specific bits (IORESOURCE_BITS) */
+#define IORESOURCE_MEM_WRITEABLE (1<<0) /* dup: IORESOURCE_READONLY */
+#define IORESOURCE_MEM_CACHEABLE (1<<1) /* dup: IORESOURCE_CACHEABLE */
+#define IORESOURCE_MEM_RANGELENGTH (1<<2) /* dup: IORESOURCE_RANGELENGTH */
+#define IORESOURCE_MEM_TYPE_MASK (3<<3)
+#define IORESOURCE_MEM_8BIT (0<<3)
+#define IORESOURCE_MEM_16BIT (1<<3)
+#define IORESOURCE_MEM_8AND16BIT (2<<3)
+#define IORESOURCE_MEM_32BIT (3<<3)
+#define IORESOURCE_MEM_SHADOWABLE (1<<5) /* dup: IORESOURCE_SHADOWABLE */
+#define IORESOURCE_MEM_EXPANSIONROM (1<<6)
+
+/* PnP I/O specific bits (IORESOURCE_BITS) */
+#define IORESOURCE_IO_16BIT_ADDR (1<<0)
+#define IORESOURCE_IO_FIXED (1<<1)
+
+/* PCI ROM control bits (IORESOURCE_BITS) */
+#define IORESOURCE_ROM_ENABLE (1<<0) /* ROM is enabled, same as PCI_ROM_ADDRESS_ENABLE */
+#define IORESOURCE_ROM_SHADOW (1<<1) /* ROM is copy at C000:0 */
+#define IORESOURCE_ROM_COPY (1<<2) /* ROM is alloc'd copy, resource field overlaid */
+#define IORESOURCE_ROM_BIOS_COPY (1<<3) /* ROM is BIOS copy, resource field overlaid */
+
+/* PCI control bits. Shares IORESOURCE_BITS with above PCI ROM. */
+#define IORESOURCE_PCI_FIXED (1<<4) /* Do not move resource */
+
+/* PC/ISA/whatever - the normal PC address spaces: IO and memory */
+extern struct resource ioport_resource;
+extern struct resource iomem_resource;
+
+extern int request_resource(struct resource *root, struct resource *new);
+extern int release_resource(struct resource *new);
+extern void reserve_region_with_split(struct resource *root,
+ resource_size_t start, resource_size_t end,
+ const char *name);
+extern int insert_resource(struct resource *parent, struct resource *new);
+extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
+extern int allocate_resource(struct resource *root, struct resource *new,
+ resource_size_t size, resource_size_t min,
+ resource_size_t max, resource_size_t align,
+ void (*alignf)(void *, struct resource *,
+ resource_size_t, resource_size_t),
+ void *alignf_data);
+int adjust_resource(struct resource *res, resource_size_t start,
+ resource_size_t size);
+resource_size_t resource_alignment(struct resource *res);
+static inline resource_size_t resource_size(struct resource *res)
+{
+ return res->end - res->start + 1;
+}
+static inline unsigned long resource_type(struct resource *res)
+{
+ return res->flags & IORESOURCE_TYPE_BITS;
+}
+
+/* Convenience shorthand with allocation */
+#define request_region(start,n,name) __request_region(&ioport_resource, (start), (n), (name), 0)
+#define __request_mem_region(start,n,name, excl) __request_region(&iomem_resource, (start), (n), (name), excl)
+#define request_mem_region(start,n,name) __request_region(&iomem_resource, (start), (n), (name), 0)
+#define request_mem_region_exclusive(start,n,name) \
+ __request_region(&iomem_resource, (start), (n), (name), IORESOURCE_EXCLUSIVE)
+#define rename_region(region, newname) do { (region)->name = (newname); } while (0)
+
+extern struct resource * __request_region(struct resource *,
+ resource_size_t start,
+ resource_size_t n,
+ const char *name, int flags);
+
+/* Compatibility cruft */
+#define release_region(start,n) __release_region(&ioport_resource, (start), (n))
+#define check_mem_region(start,n) __check_region(&iomem_resource, (start), (n))
+#define release_mem_region(start,n) __release_region(&iomem_resource, (start), (n))
+
+extern int __check_region(struct resource *, resource_size_t, resource_size_t);
+extern void __release_region(struct resource *, resource_size_t,
+ resource_size_t);
+
+static inline int __deprecated check_region(resource_size_t s,
+ resource_size_t n)
+{
+ return __check_region(&ioport_resource, s, n);
+}
+
+/* Wrappers for managed devices */
+struct device;
+#define devm_request_region(dev,start,n,name) \
+ __devm_request_region(dev, &ioport_resource, (start), (n), (name))
+#define devm_request_mem_region(dev,start,n,name) \
+ __devm_request_region(dev, &iomem_resource, (start), (n), (name))
+
+extern struct resource * __devm_request_region(struct device *dev,
+ struct resource *parent, resource_size_t start,
+ resource_size_t n, const char *name);
+
+#define devm_release_region(dev, start, n) \
+ __devm_release_region(dev, &ioport_resource, (start), (n))
+#define devm_release_mem_region(dev, start, n) \
+ __devm_release_region(dev, &iomem_resource, (start), (n))
+
+extern void __devm_release_region(struct device *dev, struct resource *parent,
+ resource_size_t start, resource_size_t n);
+extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size);
+extern int iomem_is_exclusive(u64 addr);
+
+#endif /* __ASSEMBLY__ */
+#endif /* _LINUX_IOPORT_H */
diff --git a/libdde-linux26/include/linux/jiffies.h b/libdde-linux26/include/linux/jiffies.h
new file mode 100644
index 00000000..93169b5a
--- /dev/null
+++ b/libdde-linux26/include/linux/jiffies.h
@@ -0,0 +1,322 @@
+#ifndef _LINUX_JIFFIES_H
+#define _LINUX_JIFFIES_H
+
+#include <linux/math64.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <asm/param.h> /* for HZ */
+
+/*
+ * The following defines establish the engineering parameters of the PLL
+ * model. The HZ variable establishes the timer interrupt frequency, 100 Hz
+ * for the SunOS kernel, 256 Hz for the Ultrix kernel and 1024 Hz for the
+ * OSF/1 kernel. The SHIFT_HZ define expresses the same value as the
+ * nearest power of two in order to avoid hardware multiply operations.
+ */
+#ifndef DDE_LINUX
+#if HZ >= 12 && HZ < 24
+# define SHIFT_HZ 4
+#elif HZ >= 24 && HZ < 48
+# define SHIFT_HZ 5
+#elif HZ >= 48 && HZ < 96
+# define SHIFT_HZ 6
+#elif HZ >= 96 && HZ < 192
+# define SHIFT_HZ 7
+#elif HZ >= 192 && HZ < 384
+# define SHIFT_HZ 8
+#elif HZ >= 384 && HZ < 768
+# define SHIFT_HZ 9
+#elif HZ >= 768 && HZ < 1536
+# define SHIFT_HZ 10
+#elif HZ >= 1536 && HZ < 3072
+# define SHIFT_HZ 11
+#elif HZ >= 3072 && HZ < 6144
+# define SHIFT_HZ 12
+#elif HZ >= 6144 && HZ < 12288
+# define SHIFT_HZ 13
+#else
+# error Invalid value of HZ.
+#endif
+
+/* LATCH is used in the interval timer and ftape setup. */
+#define LATCH ((CLOCK_TICK_RATE + HZ/2) / HZ) /* For divider */
+
+#else
+
+/* HZ is 100 in DDE */
+#define SHIFT_HZ 8
+#define LATCH ((CLOCK_TICK_RATE + 50) / 50)
+#define LATCH_HPET ((HPET_TICK_RATE + 50) / 50)
+#endif /* DDE_LINUX */
+
+/* Suppose we want to devide two numbers NOM and DEN: NOM/DEN, the we can
+ * improve accuracy by shifting LSH bits, hence calculating:
+ * (NOM << LSH) / DEN
+ * This however means trouble for large NOM, because (NOM << LSH) may no
+ * longer fit in 32 bits. The following way of calculating this gives us
+ * some slack, under the following conditions:
+ * - (NOM / DEN) fits in (32 - LSH) bits.
+ * - (NOM % DEN) fits in (32 - LSH) bits.
+ */
+#define SH_DIV(NOM,DEN,LSH) ( (((NOM) / (DEN)) << (LSH)) \
+ + ((((NOM) % (DEN)) << (LSH)) + (DEN) / 2) / (DEN))
+
+/* HZ is the requested value. ACTHZ is actual HZ ("<< 8" is for accuracy) */
+#define ACTHZ (SH_DIV (CLOCK_TICK_RATE, LATCH, 8))
+
+/* TICK_NSEC is the time between ticks in nsec assuming real ACTHZ */
+#define TICK_NSEC (SH_DIV (1000000UL * 1000, ACTHZ, 8))
+
+/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
+#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
+
+/* TICK_USEC_TO_NSEC is the time between ticks in nsec assuming real ACTHZ and */
+/* a value TUSEC for TICK_USEC (can be set bij adjtimex) */
+#define TICK_USEC_TO_NSEC(TUSEC) (SH_DIV (TUSEC * USER_HZ * 1000, ACTHZ, 8))
+
+/* some arch's have a small-data section that can be accessed register-relative
+ * but that can only take up to, say, 4-byte variables. jiffies being part of
+ * an 8-byte variable may not be correctly accessed unless we force the issue
+ */
+#define __jiffy_data __attribute__((section(".data")))
+
+/*
+ * The 64-bit value is not atomic - you MUST NOT read it
+ * without sampling the sequence number in xtime_lock.
+ * get_jiffies_64() will do this for you as appropriate.
+ */
+extern u64 __jiffy_data jiffies_64;
+extern unsigned long volatile __jiffy_data jiffies;
+
+#if (BITS_PER_LONG < 64)
+u64 get_jiffies_64(void);
+#else
+static inline u64 get_jiffies_64(void)
+{
+ return (u64)jiffies;
+}
+#endif
+
+/*
+ * These inlines deal with timer wrapping correctly. You are
+ * strongly encouraged to use them
+ * 1. Because people otherwise forget
+ * 2. Because if the timer wrap changes in future you won't have to
+ * alter your driver code.
+ *
+ * time_after(a,b) returns true if the time a is after time b.
+ *
+ * Do this with "<0" and ">=0" to only test the sign of the result. A
+ * good compiler would generate better code (and a really good compiler
+ * wouldn't care). Gcc is currently neither.
+ */
+#define time_after(a,b) \
+ (typecheck(unsigned long, a) && \
+ typecheck(unsigned long, b) && \
+ ((long)(b) - (long)(a) < 0))
+#define time_before(a,b) time_after(b,a)
+
+#define time_after_eq(a,b) \
+ (typecheck(unsigned long, a) && \
+ typecheck(unsigned long, b) && \
+ ((long)(a) - (long)(b) >= 0))
+#define time_before_eq(a,b) time_after_eq(b,a)
+
+/*
+ * Calculate whether a is in the range of [b, c].
+ */
+#define time_in_range(a,b,c) \
+ (time_after_eq(a,b) && \
+ time_before_eq(a,c))
+
+/*
+ * Calculate whether a is in the range of [b, c).
+ */
+#define time_in_range_open(a,b,c) \
+ (time_after_eq(a,b) && \
+ time_before(a,c))
+
+/* Same as above, but does so with platform independent 64bit types.
+ * These must be used when utilizing jiffies_64 (i.e. return value of
+ * get_jiffies_64() */
+#define time_after64(a,b) \
+ (typecheck(__u64, a) && \
+ typecheck(__u64, b) && \
+ ((__s64)(b) - (__s64)(a) < 0))
+#define time_before64(a,b) time_after64(b,a)
+
+#define time_after_eq64(a,b) \
+ (typecheck(__u64, a) && \
+ typecheck(__u64, b) && \
+ ((__s64)(a) - (__s64)(b) >= 0))
+#define time_before_eq64(a,b) time_after_eq64(b,a)
+
+/*
+ * These four macros compare jiffies and 'a' for convenience.
+ */
+
+/* time_is_before_jiffies(a) return true if a is before jiffies */
+#define time_is_before_jiffies(a) time_after(jiffies, a)
+
+/* time_is_after_jiffies(a) return true if a is after jiffies */
+#define time_is_after_jiffies(a) time_before(jiffies, a)
+
+/* time_is_before_eq_jiffies(a) return true if a is before or equal to jiffies*/
+#define time_is_before_eq_jiffies(a) time_after_eq(jiffies, a)
+
+/* time_is_after_eq_jiffies(a) return true if a is after or equal to jiffies*/
+#define time_is_after_eq_jiffies(a) time_before_eq(jiffies, a)
+
+/*
+ * Have the 32 bit jiffies value wrap 5 minutes after boot
+ * so jiffies wrap bugs show up earlier.
+ */
+#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
+
+/*
+ * Change timeval to jiffies, trying to avoid the
+ * most obvious overflows..
+ *
+ * And some not so obvious.
+ *
+ * Note that we don't want to return LONG_MAX, because
+ * for various timeout reasons we often end up having
+ * to wait "jiffies+1" in order to guarantee that we wait
+ * at _least_ "jiffies" - so "jiffies+1" had better still
+ * be positive.
+ */
+#define MAX_JIFFY_OFFSET ((LONG_MAX >> 1)-1)
+
+extern unsigned long preset_lpj;
+
+/*
+ * We want to do realistic conversions of time so we need to use the same
+ * values the update wall clock code uses as the jiffies size. This value
+ * is: TICK_NSEC (which is defined in timex.h). This
+ * is a constant and is in nanoseconds. We will use scaled math
+ * with a set of scales defined here as SEC_JIFFIE_SC, USEC_JIFFIE_SC and
+ * NSEC_JIFFIE_SC. Note that these defines contain nothing but
+ * constants and so are computed at compile time. SHIFT_HZ (computed in
+ * timex.h) adjusts the scaling for different HZ values.
+
+ * Scaled math??? What is that?
+ *
+ * Scaled math is a way to do integer math on values that would,
+ * otherwise, either overflow, underflow, or cause undesired div
+ * instructions to appear in the execution path. In short, we "scale"
+ * up the operands so they take more bits (more precision, less
+ * underflow), do the desired operation and then "scale" the result back
+ * by the same amount. If we do the scaling by shifting we avoid the
+ * costly mpy and the dastardly div instructions.
+
+ * Suppose, for example, we want to convert from seconds to jiffies
+ * where jiffies is defined in nanoseconds as NSEC_PER_JIFFIE. The
+ * simple math is: jiff = (sec * NSEC_PER_SEC) / NSEC_PER_JIFFIE; We
+ * observe that (NSEC_PER_SEC / NSEC_PER_JIFFIE) is a constant which we
+ * might calculate at compile time, however, the result will only have
+ * about 3-4 bits of precision (less for smaller values of HZ).
+ *
+ * So, we scale as follows:
+ * jiff = (sec) * (NSEC_PER_SEC / NSEC_PER_JIFFIE);
+ * jiff = ((sec) * ((NSEC_PER_SEC * SCALE)/ NSEC_PER_JIFFIE)) / SCALE;
+ * Then we make SCALE a power of two so:
+ * jiff = ((sec) * ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE)) >> SCALE;
+ * Now we define:
+ * #define SEC_CONV = ((NSEC_PER_SEC << SCALE)/ NSEC_PER_JIFFIE))
+ * jiff = (sec * SEC_CONV) >> SCALE;
+ *
+ * Often the math we use will expand beyond 32-bits so we tell C how to
+ * do this and pass the 64-bit result of the mpy through the ">> SCALE"
+ * which should take the result back to 32-bits. We want this expansion
+ * to capture as much precision as possible. At the same time we don't
+ * want to overflow so we pick the SCALE to avoid this. In this file,
+ * that means using a different scale for each range of HZ values (as
+ * defined in timex.h).
+ *
+ * For those who want to know, gcc will give a 64-bit result from a "*"
+ * operator if the result is a long long AND at least one of the
+ * operands is cast to long long (usually just prior to the "*" so as
+ * not to confuse it into thinking it really has a 64-bit operand,
+ * which, buy the way, it can do, but it takes more code and at least 2
+ * mpys).
+
+ * We also need to be aware that one second in nanoseconds is only a
+ * couple of bits away from overflowing a 32-bit word, so we MUST use
+ * 64-bits to get the full range time in nanoseconds.
+
+ */
+
+/*
+ * Here are the scales we will use. One for seconds, nanoseconds and
+ * microseconds.
+ *
+ * Within the limits of cpp we do a rough cut at the SEC_JIFFIE_SC and
+ * check if the sign bit is set. If not, we bump the shift count by 1.
+ * (Gets an extra bit of precision where we can use it.)
+ * We know it is set for HZ = 1024 and HZ = 100 not for 1000.
+ * Haven't tested others.
+
+ * Limits of cpp (for #if expressions) only long (no long long), but
+ * then we only need the most signicant bit.
+ */
+
+#define SEC_JIFFIE_SC (31 - SHIFT_HZ)
+#if !((((NSEC_PER_SEC << 2) / TICK_NSEC) << (SEC_JIFFIE_SC - 2)) & 0x80000000)
+#undef SEC_JIFFIE_SC
+#define SEC_JIFFIE_SC (32 - SHIFT_HZ)
+#endif
+#define NSEC_JIFFIE_SC (SEC_JIFFIE_SC + 29)
+#define USEC_JIFFIE_SC (SEC_JIFFIE_SC + 19)
+#define SEC_CONVERSION ((unsigned long)((((u64)NSEC_PER_SEC << SEC_JIFFIE_SC) +\
+ TICK_NSEC -1) / (u64)TICK_NSEC))
+
+#define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\
+ TICK_NSEC -1) / (u64)TICK_NSEC))
+#define USEC_CONVERSION \
+ ((unsigned long)((((u64)NSEC_PER_USEC << USEC_JIFFIE_SC) +\
+ TICK_NSEC -1) / (u64)TICK_NSEC))
+/*
+ * USEC_ROUND is used in the timeval to jiffie conversion. See there
+ * for more details. It is the scaled resolution rounding value. Note
+ * that it is a 64-bit value. Since, when it is applied, we are already
+ * in jiffies (albit scaled), it is nothing but the bits we will shift
+ * off.
+ */
+#define USEC_ROUND (u64)(((u64)1 << USEC_JIFFIE_SC) - 1)
+/*
+ * The maximum jiffie value is (MAX_INT >> 1). Here we translate that
+ * into seconds. The 64-bit case will overflow if we are not careful,
+ * so use the messy SH_DIV macro to do it. Still all constants.
+ */
+#if BITS_PER_LONG < 64
+# define MAX_SEC_IN_JIFFIES \
+ (long)((u64)((u64)MAX_JIFFY_OFFSET * TICK_NSEC) / NSEC_PER_SEC)
+#else /* take care of overflow on 64 bits machines */
+# define MAX_SEC_IN_JIFFIES \
+ (SH_DIV((MAX_JIFFY_OFFSET >> SEC_JIFFIE_SC) * TICK_NSEC, NSEC_PER_SEC, 1) - 1)
+
+#endif
+
+/*
+ * Convert various time units to each other:
+ */
+extern unsigned int jiffies_to_msecs(const unsigned long j);
+extern unsigned int jiffies_to_usecs(const unsigned long j);
+extern unsigned long msecs_to_jiffies(const unsigned int m);
+extern unsigned long usecs_to_jiffies(const unsigned int u);
+extern unsigned long timespec_to_jiffies(const struct timespec *value);
+extern void jiffies_to_timespec(const unsigned long jiffies,
+ struct timespec *value);
+extern unsigned long timeval_to_jiffies(const struct timeval *value);
+extern void jiffies_to_timeval(const unsigned long jiffies,
+ struct timeval *value);
+extern clock_t jiffies_to_clock_t(long x);
+extern unsigned long clock_t_to_jiffies(unsigned long x);
+extern u64 jiffies_64_to_clock_t(u64 x);
+extern u64 nsec_to_clock_t(u64 x);
+
+#define TIMESTAMP_SIZE 30
+
+#endif
diff --git a/libdde-linux26/include/linux/kernel.h b/libdde-linux26/include/linux/kernel.h
new file mode 100644
index 00000000..573ed07c
--- /dev/null
+++ b/libdde-linux26/include/linux/kernel.h
@@ -0,0 +1,554 @@
+#ifndef _LINUX_KERNEL_H
+#define _LINUX_KERNEL_H
+
+/*
+ * 'kernel.h' contains some often-used function prototypes etc
+ */
+
+#ifdef __KERNEL__
+
+#include <stdarg.h>
+#include <linux/linkage.h>
+#include <linux/stddef.h>
+#include <linux/types.h>
+#include <linux/compiler.h>
+#include <linux/bitops.h>
+#include <linux/log2.h>
+#include <linux/typecheck.h>
+#include <linux/ratelimit.h>
+#include <linux/dynamic_printk.h>
+#include <asm/byteorder.h>
+#include <asm/bug.h>
+
+extern const char linux_banner[];
+extern const char linux_proc_banner[];
+
+#define USHORT_MAX ((u16)(~0U))
+#define SHORT_MAX ((s16)(USHORT_MAX>>1))
+#define SHORT_MIN (-SHORT_MAX - 1)
+#define INT_MAX ((int)(~0U>>1))
+#define INT_MIN (-INT_MAX - 1)
+#define UINT_MAX (~0U)
+#define LONG_MAX ((long)(~0UL>>1))
+#define LONG_MIN (-LONG_MAX - 1)
+#define ULONG_MAX (~0UL)
+#define LLONG_MAX ((long long)(~0ULL>>1))
+#define LLONG_MIN (-LLONG_MAX - 1)
+#define ULLONG_MAX (~0ULL)
+
+#define STACK_MAGIC 0xdeadbeef
+
+#define ALIGN(x,a) __ALIGN_MASK(x,(typeof(x))(a)-1)
+#define __ALIGN_MASK(x,mask) (((x)+(mask))&~(mask))
+#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
+#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
+
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
+
+#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+#define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y))
+#define DIV_ROUND_CLOSEST(x, divisor)( \
+{ \
+ typeof(divisor) __divisor = divisor; \
+ (((x) + ((__divisor) / 2)) / (__divisor)); \
+} \
+)
+
+#define _RET_IP_ (unsigned long)__builtin_return_address(0)
+#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
+
+#ifdef CONFIG_LBD
+# include <asm/div64.h>
+# define sector_div(a, b) do_div(a, b)
+#else
+# define sector_div(n, b)( \
+{ \
+ int _res; \
+ _res = (n) % (b); \
+ (n) /= (b); \
+ _res; \
+} \
+)
+#endif
+
+/**
+ * upper_32_bits - return bits 32-63 of a number
+ * @n: the number we're accessing
+ *
+ * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
+ * the "right shift count >= width of type" warning when that quantity is
+ * 32-bits.
+ */
+#define upper_32_bits(n) ((u32)(((n) >> 16) >> 16))
+
+/**
+ * lower_32_bits - return bits 0-31 of a number
+ * @n: the number we're accessing
+ */
+#define lower_32_bits(n) ((u32)(n))
+
+#define KERN_EMERG "<0>" /* system is unusable */
+#define KERN_ALERT "<1>" /* action must be taken immediately */
+#define KERN_CRIT "<2>" /* critical conditions */
+#define KERN_ERR "<3>" /* error conditions */
+#define KERN_WARNING "<4>" /* warning conditions */
+#define KERN_NOTICE "<5>" /* normal but significant condition */
+#define KERN_INFO "<6>" /* informational */
+#define KERN_DEBUG "<7>" /* debug-level messages */
+
+/*
+ * Annotation for a "continued" line of log printout (only done after a
+ * line that had no enclosing \n). Only to be used by core/arch code
+ * during early bootup (a continued line is not SMP-safe otherwise).
+ */
+#define KERN_CONT ""
+
+extern int console_printk[];
+
+#define console_loglevel (console_printk[0])
+#define default_message_loglevel (console_printk[1])
+#define minimum_console_loglevel (console_printk[2])
+#define default_console_loglevel (console_printk[3])
+
+struct completion;
+struct pt_regs;
+struct user;
+
+#ifdef CONFIG_PREEMPT_VOLUNTARY
+extern int _cond_resched(void);
+# define might_resched() _cond_resched()
+#else
+# define might_resched() do { } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
+ void __might_sleep(char *file, int line);
+/**
+ * might_sleep - annotation for functions that can sleep
+ *
+ * this macro will print a stack trace if it is executed in an atomic
+ * context (spinlock, irq-handler, ...).
+ *
+ * This is a useful debugging help to be able to catch problems early and not
+ * be bitten later when the calling function happens to sleep when it is not
+ * supposed to.
+ */
+# define might_sleep() \
+ do { __might_sleep(__FILE__, __LINE__); might_resched(); } while (0)
+#else
+# define might_sleep() do { might_resched(); } while (0)
+#endif
+
+#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
+
+#define abs(x) ({ \
+ int __x = (x); \
+ (__x < 0) ? -__x : __x; \
+ })
+
+#ifdef CONFIG_PROVE_LOCKING
+void might_fault(void);
+#else
+static inline void might_fault(void)
+{
+ might_sleep();
+}
+#endif
+
+extern struct atomic_notifier_head panic_notifier_list;
+extern long (*panic_blink)(long time);
+#ifndef DDE_LINUX
+NORET_TYPE void panic(const char * fmt, ...)
+ __attribute__ ((NORET_AND format (printf, 1, 2))) __cold;
+#else
+#include <ddekit/panic.h>
+#define panic ddekit_panic
+#endif
+extern void oops_enter(void);
+extern void oops_exit(void);
+extern int oops_may_print(void);
+NORET_TYPE void do_exit(long error_code)
+ ATTRIB_NORET;
+NORET_TYPE void complete_and_exit(struct completion *, long)
+ ATTRIB_NORET;
+extern unsigned long simple_strtoul(const char *,char **,unsigned int);
+extern long simple_strtol(const char *,char **,unsigned int);
+extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
+extern long long simple_strtoll(const char *,char **,unsigned int);
+extern int strict_strtoul(const char *, unsigned int, unsigned long *);
+extern int strict_strtol(const char *, unsigned int, long *);
+extern int strict_strtoull(const char *, unsigned int, unsigned long long *);
+extern int strict_strtoll(const char *, unsigned int, long long *);
+extern int sprintf(char * buf, const char * fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+extern int vsprintf(char *buf, const char *, va_list)
+ __attribute__ ((format (printf, 2, 0)));
+extern int snprintf(char * buf, size_t size, const char * fmt, ...)
+ __attribute__ ((format (printf, 3, 4)));
+extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
+ __attribute__ ((format (printf, 3, 0)));
+extern int scnprintf(char * buf, size_t size, const char * fmt, ...)
+ __attribute__ ((format (printf, 3, 4)));
+extern int vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
+ __attribute__ ((format (printf, 3, 0)));
+extern char *kasprintf(gfp_t gfp, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+extern char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
+
+extern int sscanf(const char *, const char *, ...)
+ __attribute__ ((format (scanf, 2, 3)));
+extern int vsscanf(const char *, const char *, va_list)
+ __attribute__ ((format (scanf, 2, 0)));
+
+extern int get_option(char **str, int *pint);
+extern char *get_options(const char *str, int nints, int *ints);
+extern unsigned long long memparse(const char *ptr, char **retptr);
+
+extern int core_kernel_text(unsigned long addr);
+extern int __kernel_text_address(unsigned long addr);
+extern int kernel_text_address(unsigned long addr);
+extern int func_ptr_is_kernel_text(void *ptr);
+
+struct pid;
+extern struct pid *session_of_pgrp(struct pid *pgrp);
+
+/*
+ * FW_BUG
+ * Add this to a message where you are sure the firmware is buggy or behaves
+ * really stupid or out of spec. Be aware that the responsible BIOS developer
+ * should be able to fix this issue or at least get a concrete idea of the
+ * problem by reading your message without the need of looking at the kernel
+ * code.
+ *
+ * Use it for definite and high priority BIOS bugs.
+ *
+ * FW_WARN
+ * Use it for not that clear (e.g. could the kernel messed up things already?)
+ * and medium priority BIOS bugs.
+ *
+ * FW_INFO
+ * Use this one if you want to tell the user or vendor about something
+ * suspicious, but generally harmless related to the firmware.
+ *
+ * Use it for information or very low priority BIOS bugs.
+ */
+#define FW_BUG "[Firmware Bug]: "
+#define FW_WARN "[Firmware Warn]: "
+#define FW_INFO "[Firmware Info]: "
+
+#ifdef CONFIG_PRINTK
+#ifndef DDE_LINUX
+asmlinkage int vprintk(const char *fmt, va_list args)
+ __attribute__ ((format (printf, 1, 0)));
+asmlinkage int printk(const char * fmt, ...)
+ __attribute__ ((format (printf, 1, 2))) __cold;
+
+extern struct ratelimit_state printk_ratelimit_state;
+extern int printk_ratelimit(void);
+extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
+ unsigned int interval_msec);
+#else // DDE_LINUX
+#include <ddekit/printf.h>
+#define printk ddekit_printf
+#endif
+#else
+static inline int vprintk(const char *s, va_list args)
+ __attribute__ ((format (printf, 1, 0)));
+static inline int vprintk(const char *s, va_list args) { return 0; }
+static inline int printk(const char *s, ...)
+ __attribute__ ((format (printf, 1, 2)));
+static inline int __cold printk(const char *s, ...) { return 0; }
+static inline int printk_ratelimit(void) { return 0; }
+static inline bool printk_timed_ratelimit(unsigned long *caller_jiffies, \
+ unsigned int interval_msec) \
+ { return false; }
+#endif
+
+extern int printk_needs_cpu(int cpu);
+extern void printk_tick(void);
+
+extern void asmlinkage __attribute__((format(printf, 1, 2)))
+ early_printk(const char *fmt, ...);
+
+unsigned long int_sqrt(unsigned long);
+
+static inline void console_silent(void)
+{
+ console_loglevel = 0;
+}
+
+static inline void console_verbose(void)
+{
+ if (console_loglevel)
+ console_loglevel = 15;
+}
+
+extern void bust_spinlocks(int yes);
+extern void wake_up_klogd(void);
+extern int oops_in_progress; /* If set, an oops, panic(), BUG() or die() is in progress */
+extern int panic_timeout;
+extern int panic_on_oops;
+extern int panic_on_unrecovered_nmi;
+extern const char *print_tainted(void);
+extern void add_taint(unsigned flag);
+extern int test_taint(unsigned flag);
+extern unsigned long get_taint(void);
+extern int root_mountflags;
+
+/* Values used for system_state */
+extern enum system_states {
+ SYSTEM_BOOTING,
+ SYSTEM_RUNNING,
+ SYSTEM_HALT,
+ SYSTEM_POWER_OFF,
+ SYSTEM_RESTART,
+ SYSTEM_SUSPEND_DISK,
+} system_state;
+
+#define TAINT_PROPRIETARY_MODULE 0
+#define TAINT_FORCED_MODULE 1
+#define TAINT_UNSAFE_SMP 2
+#define TAINT_FORCED_RMMOD 3
+#define TAINT_MACHINE_CHECK 4
+#define TAINT_BAD_PAGE 5
+#define TAINT_USER 6
+#define TAINT_DIE 7
+#define TAINT_OVERRIDDEN_ACPI_TABLE 8
+#define TAINT_WARN 9
+#define TAINT_CRAP 10
+
+extern void dump_stack(void) __cold;
+
+enum {
+ DUMP_PREFIX_NONE,
+ DUMP_PREFIX_ADDRESS,
+ DUMP_PREFIX_OFFSET
+};
+extern void hex_dump_to_buffer(const void *buf, size_t len,
+ int rowsize, int groupsize,
+ char *linebuf, size_t linebuflen, bool ascii);
+extern void print_hex_dump(const char *level, const char *prefix_str,
+ int prefix_type, int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii);
+extern void print_hex_dump_bytes(const char *prefix_str, int prefix_type,
+ const void *buf, size_t len);
+
+extern const char hex_asc[];
+#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
+#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
+
+static inline char *pack_hex_byte(char *buf, u8 byte)
+{
+ *buf++ = hex_asc_hi(byte);
+ *buf++ = hex_asc_lo(byte);
+ return buf;
+}
+
+#ifndef pr_fmt
+#define pr_fmt(fmt) fmt
+#endif
+
+#define pr_emerg(fmt, ...) \
+ printk(KERN_EMERG pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_alert(fmt, ...) \
+ printk(KERN_ALERT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_crit(fmt, ...) \
+ printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_err(fmt, ...) \
+ printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_warning(fmt, ...) \
+ printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_notice(fmt, ...) \
+ printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_info(fmt, ...) \
+ printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+
+#if defined(DEBUG)
+#ifndef DDE_LINUX
+/* If you are writing a driver, please use dev_dbg instead */
+#define pr_debug(fmt, arg...) \
+ printk(KERN_DEBUG fmt, ##arg)
+#else
+#define pr_debug(fmt, ...) printk("%s: \033[34m"fmt"\033[0m", __FUNCTION__, ##__VA_ARGS__)
+#endif
+#else
+#define pr_debug(fmt, ...) \
+ ({ if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); 0; })
+#endif
+
+/*
+ * Display an IP address in readable format.
+ */
+
+#define NIPQUAD(addr) \
+ ((unsigned char *)&addr)[0], \
+ ((unsigned char *)&addr)[1], \
+ ((unsigned char *)&addr)[2], \
+ ((unsigned char *)&addr)[3]
+#define NIPQUAD_FMT "%u.%u.%u.%u"
+
+#if defined(__LITTLE_ENDIAN)
+#define HIPQUAD(addr) \
+ ((unsigned char *)&addr)[3], \
+ ((unsigned char *)&addr)[2], \
+ ((unsigned char *)&addr)[1], \
+ ((unsigned char *)&addr)[0]
+#elif defined(__BIG_ENDIAN)
+#define HIPQUAD NIPQUAD
+#else
+#error "Please fix asm/byteorder.h"
+#endif /* __LITTLE_ENDIAN */
+
+/*
+ * min()/max()/clamp() macros that also do
+ * strict type-checking.. See the
+ * "unnecessary" pointer comparison.
+ */
+#define min(x, y) ({ \
+ typeof(x) _min1 = (x); \
+ typeof(y) _min2 = (y); \
+ (void) (&_min1 == &_min2); \
+ _min1 < _min2 ? _min1 : _min2; })
+
+#define max(x, y) ({ \
+ typeof(x) _max1 = (x); \
+ typeof(y) _max2 = (y); \
+ (void) (&_max1 == &_max2); \
+ _max1 > _max2 ? _max1 : _max2; })
+
+/**
+ * clamp - return a value clamped to a given range with strict typechecking
+ * @val: current value
+ * @min: minimum allowable value
+ * @max: maximum allowable value
+ *
+ * This macro does strict typechecking of min/max to make sure they are of the
+ * same type as val. See the unnecessary pointer comparisons.
+ */
+#define clamp(val, min, max) ({ \
+ typeof(val) __val = (val); \
+ typeof(min) __min = (min); \
+ typeof(max) __max = (max); \
+ (void) (&__val == &__min); \
+ (void) (&__val == &__max); \
+ __val = __val < __min ? __min: __val; \
+ __val > __max ? __max: __val; })
+
+/*
+ * ..and if you can't take the strict
+ * types, you can specify one yourself.
+ *
+ * Or not use min/max/clamp at all, of course.
+ */
+#define min_t(type, x, y) ({ \
+ type __min1 = (x); \
+ type __min2 = (y); \
+ __min1 < __min2 ? __min1: __min2; })
+
+#define max_t(type, x, y) ({ \
+ type __max1 = (x); \
+ type __max2 = (y); \
+ __max1 > __max2 ? __max1: __max2; })
+
+/**
+ * clamp_t - return a value clamped to a given range using a given type
+ * @type: the type of variable to use
+ * @val: current value
+ * @min: minimum allowable value
+ * @max: maximum allowable value
+ *
+ * This macro does no typechecking and uses temporary variables of type
+ * 'type' to make all the comparisons.
+ */
+#define clamp_t(type, val, min, max) ({ \
+ type __val = (val); \
+ type __min = (min); \
+ type __max = (max); \
+ __val = __val < __min ? __min: __val; \
+ __val > __max ? __max: __val; })
+
+/**
+ * clamp_val - return a value clamped to a given range using val's type
+ * @val: current value
+ * @min: minimum allowable value
+ * @max: maximum allowable value
+ *
+ * This macro does no typechecking and uses temporary variables of whatever
+ * type the input argument 'val' is. This is useful when val is an unsigned
+ * type and min and max are literals that will otherwise be assigned a signed
+ * integer type.
+ */
+#define clamp_val(val, min, max) ({ \
+ typeof(val) __val = (val); \
+ typeof(val) __min = (min); \
+ typeof(val) __max = (max); \
+ __val = __val < __min ? __min: __val; \
+ __val > __max ? __max: __val; })
+
+
+/*
+ * swap - swap value of @a and @b
+ */
+#define swap(a, b) \
+ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0)
+
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr: the pointer to the member.
+ * @type: the type of the container struct this is embedded in.
+ * @member: the name of the member within the struct.
+ *
+ */
+#define container_of(ptr, type, member) ({ \
+ const typeof( ((type *)0)->member ) *__mptr = (ptr); \
+ (type *)( (char *)__mptr - offsetof(type,member) );})
+
+struct sysinfo;
+extern int do_sysinfo(struct sysinfo *info);
+
+#endif /* __KERNEL__ */
+
+#define SI_LOAD_SHIFT 16
+struct sysinfo {
+ long uptime; /* Seconds since boot */
+ unsigned long loads[3]; /* 1, 5, and 15 minute load averages */
+ unsigned long totalram; /* Total usable main memory size */
+ unsigned long freeram; /* Available memory size */
+ unsigned long sharedram; /* Amount of shared memory */
+ unsigned long bufferram; /* Memory used by buffers */
+ unsigned long totalswap; /* Total swap space size */
+ unsigned long freeswap; /* swap space still available */
+ unsigned short procs; /* Number of current processes */
+ unsigned short pad; /* explicit padding for m68k */
+ unsigned long totalhigh; /* Total high memory size */
+ unsigned long freehigh; /* Available high memory size */
+ unsigned int mem_unit; /* Memory unit size in bytes */
+ char _f[20-2*sizeof(long)-sizeof(int)]; /* Padding: libc5 uses this.. */
+};
+
+/* Force a compilation error if condition is true */
+#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)]))
+
+/* Force a compilation error if condition is true, but also produce a
+ result (of value 0 and type size_t), so the expression can be used
+ e.g. in a structure initializer (or where-ever else comma expressions
+ aren't permitted). */
+#define BUILD_BUG_ON_ZERO(e) (sizeof(char[1 - 2 * !!(e)]) - 1)
+
+/* Trap pasters of __FUNCTION__ at compile-time */
+#define __FUNCTION__ (__func__)
+
+/* This helps us to avoid #ifdef CONFIG_NUMA */
+#ifdef CONFIG_NUMA
+#define NUMA_BUILD 1
+#else
+#define NUMA_BUILD 0
+#endif
+
+/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
+#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
+#endif
+
+#endif
diff --git a/libdde-linux26/include/linux/kmalloc_sizes.h b/libdde-linux26/include/linux/kmalloc_sizes.h
new file mode 100644
index 00000000..d84b14d2
--- /dev/null
+++ b/libdde-linux26/include/linux/kmalloc_sizes.h
@@ -0,0 +1,51 @@
+#if (PAGE_SIZE == 4096)
+ CACHE(32)
+#endif
+ CACHE(64)
+#ifndef DDE_LINUX
+#if L1_CACHE_BYTES < 64
+ CACHE(96)
+#endif
+#endif /* DDE_LINUX */
+ CACHE(128)
+#ifndef DDE_LINUX
+#if L1_CACHE_BYTES < 128
+ CACHE(192)
+#endif
+#endif /* DDE_LINUX */
+ CACHE(256)
+ CACHE(512)
+ CACHE(1024)
+ CACHE(2048)
+#ifndef DDE_LINUX
+ CACHE(4096)
+ CACHE(8192)
+ CACHE(16384)
+ CACHE(32768)
+ CACHE(65536)
+ CACHE(131072)
+#if KMALLOC_MAX_SIZE >= 262144
+ CACHE(262144)
+#endif
+#if KMALLOC_MAX_SIZE >= 524288
+ CACHE(524288)
+#endif
+#if KMALLOC_MAX_SIZE >= 1048576
+ CACHE(1048576)
+#endif
+#if KMALLOC_MAX_SIZE >= 2097152
+ CACHE(2097152)
+#endif
+#if KMALLOC_MAX_SIZE >= 4194304
+ CACHE(4194304)
+#endif
+#if KMALLOC_MAX_SIZE >= 8388608
+ CACHE(8388608)
+#endif
+#if KMALLOC_MAX_SIZE >= 16777216
+ CACHE(16777216)
+#endif
+#if KMALLOC_MAX_SIZE >= 33554432
+ CACHE(33554432)
+#endif
+#endif /* DDE_LINUX */
diff --git a/libdde-linux26/include/linux/mm.h b/libdde-linux26/include/linux/mm.h
new file mode 100644
index 00000000..8f61963c
--- /dev/null
+++ b/libdde-linux26/include/linux/mm.h
@@ -0,0 +1,1348 @@
+#ifndef _LINUX_MM_H
+#define _LINUX_MM_H
+
+#include <linux/errno.h>
+
+#ifdef __KERNEL__
+
+#include <linux/gfp.h>
+#include <linux/list.h>
+#include <linux/mmdebug.h>
+#include <linux/mmzone.h>
+#include <linux/rbtree.h>
+#include <linux/prio_tree.h>
+#include <linux/debug_locks.h>
+#include <linux/mm_types.h>
+
+#ifdef DDE_LINUX
+#include <dde26.h>
+#endif
+
+struct mempolicy;
+struct anon_vma;
+struct file_ra_state;
+struct user_struct;
+struct writeback_control;
+
+#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
+extern unsigned long max_mapnr;
+#endif
+
+extern unsigned long num_physpages;
+extern void * high_memory;
+extern int page_cluster;
+
+#ifdef CONFIG_SYSCTL
+extern int sysctl_legacy_va_layout;
+#else
+#define sysctl_legacy_va_layout 0
+#endif
+
+extern unsigned long mmap_min_addr;
+
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+
+#define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
+
+/* to align the pointer to the (next) page boundary */
+#define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
+
+/*
+ * Linux kernel virtual memory manager primitives.
+ * The idea being to have a "virtual" mm in the same way
+ * we have a virtual fs - giving a cleaner interface to the
+ * mm details, and allowing different kinds of memory mappings
+ * (from shared memory to executable loading to arbitrary
+ * mmap() functions).
+ */
+
+extern struct kmem_cache *vm_area_cachep;
+
+#ifndef CONFIG_MMU
+extern struct rb_root nommu_region_tree;
+extern struct rw_semaphore nommu_region_sem;
+
+extern unsigned int kobjsize(const void *objp);
+#endif
+
+/*
+ * vm_flags in vm_area_struct, see mm_types.h.
+ */
+#define VM_READ 0x00000001 /* currently active flags */
+#define VM_WRITE 0x00000002
+#define VM_EXEC 0x00000004
+#define VM_SHARED 0x00000008
+
+/* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
+#define VM_MAYREAD 0x00000010 /* limits for mprotect() etc */
+#define VM_MAYWRITE 0x00000020
+#define VM_MAYEXEC 0x00000040
+#define VM_MAYSHARE 0x00000080
+
+#define VM_GROWSDOWN 0x00000100 /* general info on the segment */
+#define VM_GROWSUP 0x00000200
+#define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */
+#define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */
+
+#define VM_EXECUTABLE 0x00001000
+#define VM_LOCKED 0x00002000
+#define VM_IO 0x00004000 /* Memory mapped I/O or similar */
+
+ /* Used by sys_madvise() */
+#define VM_SEQ_READ 0x00008000 /* App will access data sequentially */
+#define VM_RAND_READ 0x00010000 /* App will not benefit from clustered reads */
+
+#define VM_DONTCOPY 0x00020000 /* Do not copy this vma on fork */
+#define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
+#define VM_RESERVED 0x00080000 /* Count as reserved_vm like IO */
+#define VM_ACCOUNT 0x00100000 /* Is a VM accounted object */
+#define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */
+#define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
+#define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
+#define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */
+#define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */
+#define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */
+
+#define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
+#define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
+#define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
+
+#ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
+#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
+#endif
+
+#ifdef CONFIG_STACK_GROWSUP
+#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+#else
+#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+#endif
+
+#define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ)
+#define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK
+#define VM_NormalReadHint(v) (!((v)->vm_flags & VM_READHINTMASK))
+#define VM_SequentialReadHint(v) ((v)->vm_flags & VM_SEQ_READ)
+#define VM_RandomReadHint(v) ((v)->vm_flags & VM_RAND_READ)
+
+/*
+ * special vmas that are non-mergable, non-mlock()able
+ */
+#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
+
+/*
+ * mapping from the currently active vm_flags protection bits (the
+ * low four bits) to a page protection mask..
+ */
+extern pgprot_t protection_map[16];
+
+#define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */
+#define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */
+
+/*
+ * This interface is used by x86 PAT code to identify a pfn mapping that is
+ * linear over entire vma. This is to optimize PAT code that deals with
+ * marking the physical region with a particular prot. This is not for generic
+ * mm use. Note also that this check will not work if the pfn mapping is
+ * linear for a vma starting at physical address 0. In which case PAT code
+ * falls back to slow path of reserving physical range page by page.
+ */
+static inline int is_linear_pfn_mapping(struct vm_area_struct *vma)
+{
+ return ((vma->vm_flags & VM_PFNMAP) && vma->vm_pgoff);
+}
+
+static inline int is_pfn_mapping(struct vm_area_struct *vma)
+{
+ return (vma->vm_flags & VM_PFNMAP);
+}
+
+/*
+ * vm_fault is filled by the the pagefault handler and passed to the vma's
+ * ->fault function. The vma's ->fault is responsible for returning a bitmask
+ * of VM_FAULT_xxx flags that give details about how the fault was handled.
+ *
+ * pgoff should be used in favour of virtual_address, if possible. If pgoff
+ * is used, one may set VM_CAN_NONLINEAR in the vma->vm_flags to get nonlinear
+ * mapping support.
+ */
+struct vm_fault {
+ unsigned int flags; /* FAULT_FLAG_xxx flags */
+ pgoff_t pgoff; /* Logical page offset based on vma */
+ void __user *virtual_address; /* Faulting virtual address */
+
+ struct page *page; /* ->fault handlers should return a
+ * page here, unless VM_FAULT_NOPAGE
+ * is set (which is also implied by
+ * VM_FAULT_ERROR).
+ */
+};
+
+/*
+ * These are the virtual MM functions - opening of an area, closing and
+ * unmapping it (needed to keep files on disk up-to-date etc), pointer
+ * to the functions called when a no-page or a wp-page exception occurs.
+ */
+struct vm_operations_struct {
+ void (*open)(struct vm_area_struct * area);
+ void (*close)(struct vm_area_struct * area);
+ int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
+
+ /* notification that a previously read-only page is about to become
+ * writable, if an error is returned it will cause a SIGBUS */
+ int (*page_mkwrite)(struct vm_area_struct *vma, struct page *page);
+
+ /* called by access_process_vm when get_user_pages() fails, typically
+ * for use by special VMAs that can switch between memory and hardware
+ */
+ int (*access)(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write);
+#ifdef CONFIG_NUMA
+ /*
+ * set_policy() op must add a reference to any non-NULL @new mempolicy
+ * to hold the policy upon return. Caller should pass NULL @new to
+ * remove a policy and fall back to surrounding context--i.e. do not
+ * install a MPOL_DEFAULT policy, nor the task or system default
+ * mempolicy.
+ */
+ int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
+
+ /*
+ * get_policy() op must add reference [mpol_get()] to any policy at
+ * (vma,addr) marked as MPOL_SHARED. The shared policy infrastructure
+ * in mm/mempolicy.c will do this automatically.
+ * get_policy() must NOT add a ref if the policy at (vma,addr) is not
+ * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
+ * If no [shared/vma] mempolicy exists at the addr, get_policy() op
+ * must return NULL--i.e., do not "fallback" to task or system default
+ * policy.
+ */
+ struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
+ unsigned long addr);
+ int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
+ const nodemask_t *to, unsigned long flags);
+#endif
+};
+
+struct mmu_gather;
+struct inode;
+
+#define page_private(page) ((page)->private)
+#define set_page_private(page, v) ((page)->private = (v))
+
+/*
+ * FIXME: take this include out, include page-flags.h in
+ * files which need it (119 of them)
+ */
+#include <linux/page-flags.h>
+
+/*
+ * Methods to modify the page usage count.
+ *
+ * What counts for a page usage:
+ * - cache mapping (page->mapping)
+ * - private data (page->private)
+ * - page mapped in a task's page tables, each mapping
+ * is counted separately
+ *
+ * Also, many kernel routines increase the page count before a critical
+ * routine so they can be sure the page doesn't go away from under them.
+ */
+
+/*
+ * Drop a ref, return true if the refcount fell to zero (the page has no users)
+ */
+static inline int put_page_testzero(struct page *page)
+{
+ VM_BUG_ON(atomic_read(&page->_count) == 0);
+ return atomic_dec_and_test(&page->_count);
+}
+
+/*
+ * Try to grab a ref unless the page has a refcount of zero, return false if
+ * that is the case.
+ */
+static inline int get_page_unless_zero(struct page *page)
+{
+ return atomic_inc_not_zero(&page->_count);
+}
+
+/* Support for virtually mapped pages */
+struct page *vmalloc_to_page(const void *addr);
+unsigned long vmalloc_to_pfn(const void *addr);
+
+/*
+ * Determine if an address is within the vmalloc range
+ *
+ * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
+ * is no special casing required.
+ */
+static inline int is_vmalloc_addr(const void *x)
+{
+#ifdef CONFIG_MMU
+ unsigned long addr = (unsigned long)x;
+
+ return addr >= VMALLOC_START && addr < VMALLOC_END;
+#else
+ return 0;
+#endif
+}
+
+static inline struct page *compound_head(struct page *page)
+{
+ if (unlikely(PageTail(page)))
+ return page->first_page;
+ return page;
+}
+
+static inline int page_count(struct page *page)
+{
+ return atomic_read(&compound_head(page)->_count);
+}
+
+static inline void get_page(struct page *page)
+{
+ page = compound_head(page);
+ VM_BUG_ON(atomic_read(&page->_count) == 0);
+ atomic_inc(&page->_count);
+}
+
+static inline struct page *virt_to_head_page(const void *x)
+{
+ struct page *page = virt_to_page(x);
+ return compound_head(page);
+}
+
+/*
+ * Setup the page count before being freed into the page allocator for
+ * the first time (boot or memory hotplug)
+ */
+static inline void init_page_count(struct page *page)
+{
+ atomic_set(&page->_count, 1);
+}
+
+void put_page(struct page *page);
+void put_pages_list(struct list_head *pages);
+
+void split_page(struct page *page, unsigned int order);
+
+/*
+ * Compound pages have a destructor function. Provide a
+ * prototype for that function and accessor functions.
+ * These are _only_ valid on the head of a PG_compound page.
+ */
+typedef void compound_page_dtor(struct page *);
+
+static inline void set_compound_page_dtor(struct page *page,
+ compound_page_dtor *dtor)
+{
+ page[1].lru.next = (void *)dtor;
+}
+
+static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
+{
+ return (compound_page_dtor *)page[1].lru.next;
+}
+
+static inline int compound_order(struct page *page)
+{
+ if (!PageHead(page))
+ return 0;
+ return (unsigned long)page[1].lru.prev;
+}
+
+static inline void set_compound_order(struct page *page, unsigned long order)
+{
+ page[1].lru.prev = (void *)order;
+}
+
+/*
+ * Multiple processes may "see" the same page. E.g. for untouched
+ * mappings of /dev/null, all processes see the same page full of
+ * zeroes, and text pages of executables and shared libraries have
+ * only one copy in memory, at most, normally.
+ *
+ * For the non-reserved pages, page_count(page) denotes a reference count.
+ * page_count() == 0 means the page is free. page->lru is then used for
+ * freelist management in the buddy allocator.
+ * page_count() > 0 means the page has been allocated.
+ *
+ * Pages are allocated by the slab allocator in order to provide memory
+ * to kmalloc and kmem_cache_alloc. In this case, the management of the
+ * page, and the fields in 'struct page' are the responsibility of mm/slab.c
+ * unless a particular usage is carefully commented. (the responsibility of
+ * freeing the kmalloc memory is the caller's, of course).
+ *
+ * A page may be used by anyone else who does a __get_free_page().
+ * In this case, page_count still tracks the references, and should only
+ * be used through the normal accessor functions. The top bits of page->flags
+ * and page->virtual store page management information, but all other fields
+ * are unused and could be used privately, carefully. The management of this
+ * page is the responsibility of the one who allocated it, and those who have
+ * subsequently been given references to it.
+ *
+ * The other pages (we may call them "pagecache pages") are completely
+ * managed by the Linux memory manager: I/O, buffers, swapping etc.
+ * The following discussion applies only to them.
+ *
+ * A pagecache page contains an opaque `private' member, which belongs to the
+ * page's address_space. Usually, this is the address of a circular list of
+ * the page's disk buffers. PG_private must be set to tell the VM to call
+ * into the filesystem to release these pages.
+ *
+ * A page may belong to an inode's memory mapping. In this case, page->mapping
+ * is the pointer to the inode, and page->index is the file offset of the page,
+ * in units of PAGE_CACHE_SIZE.
+ *
+ * If pagecache pages are not associated with an inode, they are said to be
+ * anonymous pages. These may become associated with the swapcache, and in that
+ * case PG_swapcache is set, and page->private is an offset into the swapcache.
+ *
+ * In either case (swapcache or inode backed), the pagecache itself holds one
+ * reference to the page. Setting PG_private should also increment the
+ * refcount. The each user mapping also has a reference to the page.
+ *
+ * The pagecache pages are stored in a per-mapping radix tree, which is
+ * rooted at mapping->page_tree, and indexed by offset.
+ * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
+ * lists, we instead now tag pages as dirty/writeback in the radix tree.
+ *
+ * All pagecache pages may be subject to I/O:
+ * - inode pages may need to be read from disk,
+ * - inode pages which have been modified and are MAP_SHARED may need
+ * to be written back to the inode on disk,
+ * - anonymous pages (including MAP_PRIVATE file mappings) which have been
+ * modified may need to be swapped out to swap space and (later) to be read
+ * back into memory.
+ */
+
+/*
+ * The zone field is never updated after free_area_init_core()
+ * sets it, so none of the operations on it need to be atomic.
+ */
+
+
+/*
+ * page->flags layout:
+ *
+ * There are three possibilities for how page->flags get
+ * laid out. The first is for the normal case, without
+ * sparsemem. The second is for sparsemem when there is
+ * plenty of space for node and section. The last is when
+ * we have run out of space and have to fall back to an
+ * alternate (slower) way of determining the node.
+ *
+ * No sparsemem or sparsemem vmemmap: | NODE | ZONE | ... | FLAGS |
+ * classic sparse with space for node:| SECTION | NODE | ZONE | ... | FLAGS |
+ * classic sparse no space for node: | SECTION | ZONE | ... | FLAGS |
+ */
+#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
+#define SECTIONS_WIDTH SECTIONS_SHIFT
+#else
+#define SECTIONS_WIDTH 0
+#endif
+
+#define ZONES_WIDTH ZONES_SHIFT
+
+#if SECTIONS_WIDTH+ZONES_WIDTH+NODES_SHIFT <= BITS_PER_LONG - NR_PAGEFLAGS
+#define NODES_WIDTH NODES_SHIFT
+#else
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+#error "Vmemmap: No space for nodes field in page flags"
+#endif
+#define NODES_WIDTH 0
+#endif
+
+/* Page flags: | [SECTION] | [NODE] | ZONE | ... | FLAGS | */
+#define SECTIONS_PGOFF ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
+#define NODES_PGOFF (SECTIONS_PGOFF - NODES_WIDTH)
+#define ZONES_PGOFF (NODES_PGOFF - ZONES_WIDTH)
+
+/*
+ * We are going to use the flags for the page to node mapping if its in
+ * there. This includes the case where there is no node, so it is implicit.
+ */
+#if !(NODES_WIDTH > 0 || NODES_SHIFT == 0)
+#define NODE_NOT_IN_PAGE_FLAGS
+#endif
+
+#ifndef PFN_SECTION_SHIFT
+#define PFN_SECTION_SHIFT 0
+#endif
+
+/*
+ * Define the bit shifts to access each section. For non-existant
+ * sections we define the shift as 0; that plus a 0 mask ensures
+ * the compiler will optimise away reference to them.
+ */
+#define SECTIONS_PGSHIFT (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
+#define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0))
+#define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0))
+
+/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allcator */
+#ifdef NODE_NOT_IN_PAGEFLAGS
+#define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT)
+#define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \
+ SECTIONS_PGOFF : ZONES_PGOFF)
+#else
+#define ZONEID_SHIFT (NODES_SHIFT + ZONES_SHIFT)
+#define ZONEID_PGOFF ((NODES_PGOFF < ZONES_PGOFF)? \
+ NODES_PGOFF : ZONES_PGOFF)
+#endif
+
+#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
+
+#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
+#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
+#endif
+
+#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
+#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
+#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
+#define ZONEID_MASK ((1UL << ZONEID_SHIFT) - 1)
+
+static inline enum zone_type page_zonenum(struct page *page)
+{
+ return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
+}
+
+/*
+ * The identification function is only used by the buddy allocator for
+ * determining if two pages could be buddies. We are not really
+ * identifying a zone since we could be using a the section number
+ * id if we have not node id available in page flags.
+ * We guarantee only that it will return the same value for two
+ * combinable pages in a zone.
+ */
+static inline int page_zone_id(struct page *page)
+{
+ return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
+}
+
+static inline int zone_to_nid(struct zone *zone)
+{
+#ifdef CONFIG_NUMA
+ return zone->node;
+#else
+ return 0;
+#endif
+}
+
+#ifdef NODE_NOT_IN_PAGE_FLAGS
+extern int page_to_nid(struct page *page);
+#else
+static inline int page_to_nid(struct page *page)
+{
+ return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
+}
+#endif
+
+static inline struct zone *page_zone(struct page *page)
+{
+ return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
+}
+
+#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
+static inline unsigned long page_to_section(struct page *page)
+{
+ return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
+}
+#endif
+
+static inline void set_page_zone(struct page *page, enum zone_type zone)
+{
+ page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
+ page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
+}
+
+static inline void set_page_node(struct page *page, unsigned long node)
+{
+ page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
+ page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
+}
+
+static inline void set_page_section(struct page *page, unsigned long section)
+{
+ page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
+ page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
+}
+
+static inline void set_page_links(struct page *page, enum zone_type zone,
+ unsigned long node, unsigned long pfn)
+{
+ set_page_zone(page, zone);
+ set_page_node(page, node);
+ set_page_section(page, pfn_to_section_nr(pfn));
+}
+
+/*
+ * If a hint addr is less than mmap_min_addr change hint to be as
+ * low as possible but still greater than mmap_min_addr
+ */
+static inline unsigned long round_hint_to_min(unsigned long hint)
+{
+#ifdef CONFIG_SECURITY
+ hint &= PAGE_MASK;
+ if (((void *)hint != NULL) &&
+ (hint < mmap_min_addr))
+ return PAGE_ALIGN(mmap_min_addr);
+#endif
+ return hint;
+}
+
+/*
+ * Some inline functions in vmstat.h depend on page_zone()
+ */
+#include <linux/vmstat.h>
+
+static __always_inline void *lowmem_page_address(struct page *page)
+{
+ return __va(page_to_pfn(page) << PAGE_SHIFT);
+}
+
+#if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
+#define HASHED_PAGE_VIRTUAL
+#endif
+
+#if defined(WANT_PAGE_VIRTUAL)
+#define page_address(page) ((page)->virtual)
+#define set_page_address(page, address) \
+ do { \
+ (page)->virtual = (address); \
+ } while(0)
+#define page_address_init() do { } while(0)
+#endif
+
+#if defined(HASHED_PAGE_VIRTUAL)
+void *page_address(struct page *page);
+void set_page_address(struct page *page, void *virtual);
+void page_address_init(void);
+#endif
+
+#if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
+#define page_address(page) lowmem_page_address(page)
+#define set_page_address(page, address) do { } while(0)
+#define page_address_init() do { } while(0)
+#endif
+
+#ifndef DDE_LINUX
+/*
+ * On an anonymous page mapped into a user virtual memory area,
+ * page->mapping points to its anon_vma, not to a struct address_space;
+ * with the PAGE_MAPPING_ANON bit set to distinguish it.
+ *
+ * Please note that, confusingly, "page_mapping" refers to the inode
+ * address_space which maps the page from disk; whereas "page_mapped"
+ * refers to user virtual address space into which the page is mapped.
+ */
+#define PAGE_MAPPING_ANON 1
+
+extern struct address_space swapper_space;
+static inline struct address_space *page_mapping(struct page *page)
+{
+ struct address_space *mapping = page->mapping;
+
+ VM_BUG_ON(PageSlab(page));
+#ifdef CONFIG_SWAP
+ if (unlikely(PageSwapCache(page)))
+ mapping = &swapper_space;
+ else
+#endif
+ if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
+ mapping = NULL;
+ return mapping;
+}
+
+static inline int PageAnon(struct page *page)
+{
+ return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
+}
+
+/*
+ * Return the pagecache index of the passed page. Regular pagecache pages
+ * use ->index whereas swapcache pages use ->private
+ */
+static inline pgoff_t page_index(struct page *page)
+{
+ if (unlikely(PageSwapCache(page)))
+ return page_private(page);
+ return page->index;
+}
+#else
+
+static inline struct address_space *page_mapping(struct page *page)
+{
+ WARN_UNIMPL;
+ return NULL;
+}
+
+static inline int PageAnon(struct page *page)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+
+static inline pgoff_t page_index(struct page *page)
+{
+ WARN_UNIMPL;
+ return 0;
+}
+#endif /* DDE_LINUX */
+
+/*
+ * The atomic page->_mapcount, like _count, starts from -1:
+ * so that transitions both from it and to it can be tracked,
+ * using atomic_inc_and_test and atomic_add_negative(-1).
+ */
+static inline void reset_page_mapcount(struct page *page)
+{
+ atomic_set(&(page)->_mapcount, -1);
+}
+
+static inline int page_mapcount(struct page *page)
+{
+ return atomic_read(&(page)->_mapcount) + 1;
+}
+
+/*
+ * Return true if this page is mapped into pagetables.
+ */
+static inline int page_mapped(struct page *page)
+{
+ return atomic_read(&(page)->_mapcount) >= 0;
+}
+
+/*
+ * Different kinds of faults, as returned by handle_mm_fault().
+ * Used to decide whether a process gets delivered SIGBUS or
+ * just gets major/minor fault counters bumped up.
+ */
+
+#define VM_FAULT_MINOR 0 /* For backwards compat. Remove me quickly. */
+
+#define VM_FAULT_OOM 0x0001
+#define VM_FAULT_SIGBUS 0x0002
+#define VM_FAULT_MAJOR 0x0004
+#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
+
+#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
+#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
+
+#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS)
+
+/*
+ * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
+ */
+extern void pagefault_out_of_memory(void);
+
+#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
+
+extern void show_free_areas(void);
+
+#ifdef CONFIG_SHMEM
+extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
+#else
+static inline int shmem_lock(struct file *file, int lock,
+ struct user_struct *user)
+{
+ return 0;
+}
+#endif
+struct file *shmem_file_setup(char *name, loff_t size, unsigned long flags);
+
+int shmem_zero_setup(struct vm_area_struct *);
+
+#ifndef CONFIG_MMU
+extern unsigned long shmem_get_unmapped_area(struct file *file,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags);
+#endif
+
+extern int can_do_mlock(void);
+extern int user_shm_lock(size_t, struct user_struct *);
+extern void user_shm_unlock(size_t, struct user_struct *);
+
+/*
+ * Parameter block passed down to zap_pte_range in exceptional cases.
+ */
+struct zap_details {
+ struct vm_area_struct *nonlinear_vma; /* Check page->index if set */
+ struct address_space *check_mapping; /* Check page->mapping if set */
+ pgoff_t first_index; /* Lowest page->index to unmap */
+ pgoff_t last_index; /* Highest page->index to unmap */
+ spinlock_t *i_mmap_lock; /* For unmap_mapping_range: */
+ unsigned long truncate_count; /* Compare vm_truncate_count */
+};
+
+struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
+ pte_t pte);
+
+int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
+ unsigned long size);
+unsigned long zap_page_range(struct vm_area_struct *vma, unsigned long address,
+ unsigned long size, struct zap_details *);
+unsigned long unmap_vmas(struct mmu_gather **tlb,
+ struct vm_area_struct *start_vma, unsigned long start_addr,
+ unsigned long end_addr, unsigned long *nr_accounted,
+ struct zap_details *);
+
+/**
+ * mm_walk - callbacks for walk_page_range
+ * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
+ * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
+ * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
+ * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
+ * @pte_hole: if set, called for each hole at all levels
+ *
+ * (see walk_page_range for more details)
+ */
+struct mm_walk {
+ int (*pgd_entry)(pgd_t *, unsigned long, unsigned long, struct mm_walk *);
+ int (*pud_entry)(pud_t *, unsigned long, unsigned long, struct mm_walk *);
+ int (*pmd_entry)(pmd_t *, unsigned long, unsigned long, struct mm_walk *);
+ int (*pte_entry)(pte_t *, unsigned long, unsigned long, struct mm_walk *);
+ int (*pte_hole)(unsigned long, unsigned long, struct mm_walk *);
+ struct mm_struct *mm;
+ void *private;
+};
+
+int walk_page_range(unsigned long addr, unsigned long end,
+ struct mm_walk *walk);
+void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
+ unsigned long end, unsigned long floor, unsigned long ceiling);
+int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
+ struct vm_area_struct *vma);
+void unmap_mapping_range(struct address_space *mapping,
+ loff_t const holebegin, loff_t const holelen, int even_cows);
+int follow_phys(struct vm_area_struct *vma, unsigned long address,
+ unsigned int flags, unsigned long *prot, resource_size_t *phys);
+int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write);
+
+static inline void unmap_shared_mapping_range(struct address_space *mapping,
+ loff_t const holebegin, loff_t const holelen)
+{
+ unmap_mapping_range(mapping, holebegin, holelen, 0);
+}
+
+extern int vmtruncate(struct inode * inode, loff_t offset);
+extern int vmtruncate_range(struct inode * inode, loff_t offset, loff_t end);
+
+#ifdef CONFIG_MMU
+extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long address, int write_access);
+#else
+static inline int handle_mm_fault(struct mm_struct *mm,
+ struct vm_area_struct *vma, unsigned long address,
+ int write_access)
+{
+ /* should never happen if there's no MMU */
+ BUG();
+ return VM_FAULT_SIGBUS;
+}
+#endif
+
+extern int make_pages_present(unsigned long addr, unsigned long end);
+extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
+
+int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, unsigned long start,
+ int len, int write, int force, struct page **pages, struct vm_area_struct **vmas);
+
+extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
+extern void do_invalidatepage(struct page *page, unsigned long offset);
+
+int __set_page_dirty_nobuffers(struct page *page);
+int __set_page_dirty_no_writeback(struct page *page);
+int redirty_page_for_writepage(struct writeback_control *wbc,
+ struct page *page);
+int set_page_dirty(struct page *page);
+int set_page_dirty_lock(struct page *page);
+int clear_page_dirty_for_io(struct page *page);
+
+extern unsigned long move_page_tables(struct vm_area_struct *vma,
+ unsigned long old_addr, struct vm_area_struct *new_vma,
+ unsigned long new_addr, unsigned long len);
+extern unsigned long do_mremap(unsigned long addr,
+ unsigned long old_len, unsigned long new_len,
+ unsigned long flags, unsigned long new_addr);
+extern int mprotect_fixup(struct vm_area_struct *vma,
+ struct vm_area_struct **pprev, unsigned long start,
+ unsigned long end, unsigned long newflags);
+
+/*
+ * get_user_pages_fast provides equivalent functionality to get_user_pages,
+ * operating on current and current->mm (force=0 and doesn't return any vmas).
+ *
+ * get_user_pages_fast may take mmap_sem and page tables, so no assumptions
+ * can be made about locking. get_user_pages_fast is to be implemented in a
+ * way that is advantageous (vs get_user_pages()) when the user memory area is
+ * already faulted in and present in ptes. However if the pages have to be
+ * faulted in, it may turn out to be slightly slower).
+ */
+int get_user_pages_fast(unsigned long start, int nr_pages, int write,
+ struct page **pages);
+
+/*
+ * A callback you can register to apply pressure to ageable caches.
+ *
+ * 'shrink' is passed a count 'nr_to_scan' and a 'gfpmask'. It should
+ * look through the least-recently-used 'nr_to_scan' entries and
+ * attempt to free them up. It should return the number of objects
+ * which remain in the cache. If it returns -1, it means it cannot do
+ * any scanning at this time (eg. there is a risk of deadlock).
+ *
+ * The 'gfpmask' refers to the allocation we are currently trying to
+ * fulfil.
+ *
+ * Note that 'shrink' will be passed nr_to_scan == 0 when the VM is
+ * querying the cache size, so a fastpath for that case is appropriate.
+ */
+struct shrinker {
+ int (*shrink)(int nr_to_scan, gfp_t gfp_mask);
+ int seeks; /* seeks to recreate an obj */
+
+ /* These are for internal use */
+ struct list_head list;
+ long nr; /* objs pending delete */
+};
+#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
+extern void register_shrinker(struct shrinker *);
+extern void unregister_shrinker(struct shrinker *);
+
+int vma_wants_writenotify(struct vm_area_struct *vma);
+
+extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
+
+#ifdef __PAGETABLE_PUD_FOLDED
+static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
+ unsigned long address)
+{
+ return 0;
+}
+#else
+int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
+#endif
+
+#ifdef __PAGETABLE_PMD_FOLDED
+static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
+ unsigned long address)
+{
+ return 0;
+}
+#else
+int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
+#endif
+
+int __pte_alloc(struct mm_struct *mm, pmd_t *pmd, unsigned long address);
+int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
+
+/*
+ * The following ifdef needed to get the 4level-fixup.h header to work.
+ * Remove it when 4level-fixup.h has been removed.
+ */
+#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
+static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
+{
+ return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
+ NULL: pud_offset(pgd, address);
+}
+
+static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
+{
+ return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
+ NULL: pmd_offset(pud, address);
+}
+#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
+
+#if USE_SPLIT_PTLOCKS
+/*
+ * We tuck a spinlock to guard each pagetable page into its struct page,
+ * at page->private, with BUILD_BUG_ON to make sure that this will not
+ * overflow into the next struct page (as it might with DEBUG_SPINLOCK).
+ * When freeing, reset page->mapping so free_pages_check won't complain.
+ */
+#define __pte_lockptr(page) &((page)->ptl)
+#define pte_lock_init(_page) do { \
+ spin_lock_init(__pte_lockptr(_page)); \
+} while (0)
+#define pte_lock_deinit(page) ((page)->mapping = NULL)
+#define pte_lockptr(mm, pmd) ({(void)(mm); __pte_lockptr(pmd_page(*(pmd)));})
+#else /* !USE_SPLIT_PTLOCKS */
+/*
+ * We use mm->page_table_lock to guard all pagetable pages of the mm.
+ */
+#define pte_lock_init(page) do {} while (0)
+#define pte_lock_deinit(page) do {} while (0)
+#define pte_lockptr(mm, pmd) ({(void)(pmd); &(mm)->page_table_lock;})
+#endif /* USE_SPLIT_PTLOCKS */
+
+static inline void pgtable_page_ctor(struct page *page)
+{
+ pte_lock_init(page);
+ inc_zone_page_state(page, NR_PAGETABLE);
+}
+
+static inline void pgtable_page_dtor(struct page *page)
+{
+ pte_lock_deinit(page);
+ dec_zone_page_state(page, NR_PAGETABLE);
+}
+
+#define pte_offset_map_lock(mm, pmd, address, ptlp) \
+({ \
+ spinlock_t *__ptl = pte_lockptr(mm, pmd); \
+ pte_t *__pte = pte_offset_map(pmd, address); \
+ *(ptlp) = __ptl; \
+ spin_lock(__ptl); \
+ __pte; \
+})
+
+#define pte_unmap_unlock(pte, ptl) do { \
+ spin_unlock(ptl); \
+ pte_unmap(pte); \
+} while (0)
+
+#define pte_alloc_map(mm, pmd, address) \
+ ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
+ NULL: pte_offset_map(pmd, address))
+
+#define pte_alloc_map_lock(mm, pmd, address, ptlp) \
+ ((unlikely(!pmd_present(*(pmd))) && __pte_alloc(mm, pmd, address))? \
+ NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
+
+#define pte_alloc_kernel(pmd, address) \
+ ((unlikely(!pmd_present(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
+ NULL: pte_offset_kernel(pmd, address))
+
+extern void free_area_init(unsigned long * zones_size);
+extern void free_area_init_node(int nid, unsigned long * zones_size,
+ unsigned long zone_start_pfn, unsigned long *zholes_size);
+#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
+/*
+ * With CONFIG_ARCH_POPULATES_NODE_MAP set, an architecture may initialise its
+ * zones, allocate the backing mem_map and account for memory holes in a more
+ * architecture independent manner. This is a substitute for creating the
+ * zone_sizes[] and zholes_size[] arrays and passing them to
+ * free_area_init_node()
+ *
+ * An architecture is expected to register range of page frames backed by
+ * physical memory with add_active_range() before calling
+ * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
+ * usage, an architecture is expected to do something like
+ *
+ * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
+ * max_highmem_pfn};
+ * for_each_valid_physical_page_range()
+ * add_active_range(node_id, start_pfn, end_pfn)
+ * free_area_init_nodes(max_zone_pfns);
+ *
+ * If the architecture guarantees that there are no holes in the ranges
+ * registered with add_active_range(), free_bootmem_active_regions()
+ * will call free_bootmem_node() for each registered physical page range.
+ * Similarly sparse_memory_present_with_active_regions() calls
+ * memory_present() for each range when SPARSEMEM is enabled.
+ *
+ * See mm/page_alloc.c for more information on each function exposed by
+ * CONFIG_ARCH_POPULATES_NODE_MAP
+ */
+extern void free_area_init_nodes(unsigned long *max_zone_pfn);
+extern void add_active_range(unsigned int nid, unsigned long start_pfn,
+ unsigned long end_pfn);
+extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
+ unsigned long end_pfn);
+extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn,
+ unsigned long end_pfn);
+extern void remove_all_active_ranges(void);
+extern unsigned long absent_pages_in_range(unsigned long start_pfn,
+ unsigned long end_pfn);
+extern void get_pfn_range_for_nid(unsigned int nid,
+ unsigned long *start_pfn, unsigned long *end_pfn);
+extern unsigned long find_min_pfn_with_active_regions(void);
+extern void free_bootmem_with_active_regions(int nid,
+ unsigned long max_low_pfn);
+typedef int (*work_fn_t)(unsigned long, unsigned long, void *);
+extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data);
+extern void sparse_memory_present_with_active_regions(int nid);
+#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
+
+#if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \
+ !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
+static inline int __early_pfn_to_nid(unsigned long pfn)
+{
+ return 0;
+}
+#else
+/* please see mm/page_alloc.c */
+extern int __meminit early_pfn_to_nid(unsigned long pfn);
+#ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
+/* there is a per-arch backend function. */
+extern int __meminit __early_pfn_to_nid(unsigned long pfn);
+#endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
+#endif
+
+extern void set_dma_reserve(unsigned long new_dma_reserve);
+extern void memmap_init_zone(unsigned long, int, unsigned long,
+ unsigned long, enum memmap_context);
+extern void setup_per_zone_pages_min(void);
+extern void mem_init(void);
+extern void __init mmap_init(void);
+extern void show_mem(void);
+extern void si_meminfo(struct sysinfo * val);
+extern void si_meminfo_node(struct sysinfo *val, int nid);
+extern int after_bootmem;
+
+#ifdef CONFIG_NUMA
+extern void setup_per_cpu_pageset(void);
+#else
+static inline void setup_per_cpu_pageset(void) {}
+#endif
+
+/* nommu.c */
+extern atomic_t mmap_pages_allocated;
+
+/* prio_tree.c */
+void vma_prio_tree_add(struct vm_area_struct *, struct vm_area_struct *old);
+void vma_prio_tree_insert(struct vm_area_struct *, struct prio_tree_root *);
+void vma_prio_tree_remove(struct vm_area_struct *, struct prio_tree_root *);
+struct vm_area_struct *vma_prio_tree_next(struct vm_area_struct *vma,
+ struct prio_tree_iter *iter);
+
+#define vma_prio_tree_foreach(vma, iter, root, begin, end) \
+ for (prio_tree_iter_init(iter, root, begin, end), vma = NULL; \
+ (vma = vma_prio_tree_next(vma, iter)); )
+
+static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
+ struct list_head *list)
+{
+ vma->shared.vm_set.parent = NULL;
+ list_add_tail(&vma->shared.vm_set.list, list);
+}
+
+/* mmap.c */
+extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
+extern void vma_adjust(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
+extern struct vm_area_struct *vma_merge(struct mm_struct *,
+ struct vm_area_struct *prev, unsigned long addr, unsigned long end,
+ unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
+ struct mempolicy *);
+extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
+extern int split_vma(struct mm_struct *,
+ struct vm_area_struct *, unsigned long addr, int new_below);
+extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
+extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
+ struct rb_node **, struct rb_node *);
+extern void unlink_file_vma(struct vm_area_struct *);
+extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
+ unsigned long addr, unsigned long len, pgoff_t pgoff);
+extern void exit_mmap(struct mm_struct *);
+
+extern int mm_take_all_locks(struct mm_struct *mm);
+extern void mm_drop_all_locks(struct mm_struct *mm);
+
+#ifdef CONFIG_PROC_FS
+/* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */
+extern void added_exe_file_vma(struct mm_struct *mm);
+extern void removed_exe_file_vma(struct mm_struct *mm);
+#else
+static inline void added_exe_file_vma(struct mm_struct *mm)
+{}
+
+static inline void removed_exe_file_vma(struct mm_struct *mm)
+{}
+#endif /* CONFIG_PROC_FS */
+
+extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
+extern int install_special_mapping(struct mm_struct *mm,
+ unsigned long addr, unsigned long len,
+ unsigned long flags, struct page **pages);
+
+extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+
+extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long prot,
+ unsigned long flag, unsigned long pgoff);
+extern unsigned long mmap_region(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long flags,
+ unsigned int vm_flags, unsigned long pgoff);
+
+static inline unsigned long do_mmap(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long prot,
+ unsigned long flag, unsigned long offset)
+{
+ unsigned long ret = -EINVAL;
+ if ((offset + PAGE_ALIGN(len)) < offset)
+ goto out;
+ if (!(offset & ~PAGE_MASK))
+ ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
+out:
+ return ret;
+}
+
+extern int do_munmap(struct mm_struct *, unsigned long, size_t);
+
+extern unsigned long do_brk(unsigned long, unsigned long);
+
+/* filemap.c */
+extern unsigned long page_unuse(struct page *);
+extern void truncate_inode_pages(struct address_space *, loff_t);
+extern void truncate_inode_pages_range(struct address_space *,
+ loff_t lstart, loff_t lend);
+
+/* generic vm_area_ops exported for stackable file systems */
+extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
+
+/* mm/page-writeback.c */
+int write_one_page(struct page *page, int wait);
+void task_dirty_inc(struct task_struct *tsk);
+
+/* readahead.c */
+#define VM_MAX_READAHEAD 128 /* kbytes */
+#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
+
+int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
+ pgoff_t offset, unsigned long nr_to_read);
+int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
+ pgoff_t offset, unsigned long nr_to_read);
+
+void page_cache_sync_readahead(struct address_space *mapping,
+ struct file_ra_state *ra,
+ struct file *filp,
+ pgoff_t offset,
+ unsigned long size);
+
+void page_cache_async_readahead(struct address_space *mapping,
+ struct file_ra_state *ra,
+ struct file *filp,
+ struct page *pg,
+ pgoff_t offset,
+ unsigned long size);
+
+unsigned long max_sane_readahead(unsigned long nr);
+
+/* Do stack extension */
+extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
+#ifdef CONFIG_IA64
+extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
+#endif
+extern int expand_stack_downwards(struct vm_area_struct *vma,
+ unsigned long address);
+
+/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
+extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
+extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
+ struct vm_area_struct **pprev);
+
+/* Look up the first VMA which intersects the interval start_addr..end_addr-1,
+ NULL if none. Assume start_addr < end_addr. */
+static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
+{
+ struct vm_area_struct * vma = find_vma(mm,start_addr);
+
+ if (vma && end_addr <= vma->vm_start)
+ vma = NULL;
+ return vma;
+}
+
+static inline unsigned long vma_pages(struct vm_area_struct *vma)
+{
+ return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+}
+
+pgprot_t vm_get_page_prot(unsigned long vm_flags);
+struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
+int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
+ unsigned long pfn, unsigned long size, pgprot_t);
+int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
+int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn);
+int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn);
+
+struct page *follow_page(struct vm_area_struct *, unsigned long address,
+ unsigned int foll_flags);
+#define FOLL_WRITE 0x01 /* check pte is writable */
+#define FOLL_TOUCH 0x02 /* mark page accessed */
+#define FOLL_GET 0x04 /* do get_page on page */
+#define FOLL_ANON 0x08 /* give ZERO_PAGE if no pgtable */
+
+typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
+ void *data);
+extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
+ unsigned long size, pte_fn_t fn, void *data);
+
+#ifdef CONFIG_PROC_FS
+void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
+#else
+static inline void vm_stat_account(struct mm_struct *mm,
+ unsigned long flags, struct file *file, long pages)
+{
+}
+#endif /* CONFIG_PROC_FS */
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+extern int debug_pagealloc_enabled;
+
+extern void kernel_map_pages(struct page *page, int numpages, int enable);
+
+static inline void enable_debug_pagealloc(void)
+{
+ debug_pagealloc_enabled = 1;
+}
+#ifdef CONFIG_HIBERNATION
+extern bool kernel_page_present(struct page *page);
+#endif /* CONFIG_HIBERNATION */
+#else
+static inline void
+kernel_map_pages(struct page *page, int numpages, int enable) {}
+static inline void enable_debug_pagealloc(void)
+{
+}
+#ifdef CONFIG_HIBERNATION
+static inline bool kernel_page_present(struct page *page) { return true; }
+#endif /* CONFIG_HIBERNATION */
+#endif
+
+extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk);
+#ifdef __HAVE_ARCH_GATE_AREA
+int in_gate_area_no_task(unsigned long addr);
+int in_gate_area(struct task_struct *task, unsigned long addr);
+#else
+int in_gate_area_no_task(unsigned long addr);
+#define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);})
+#endif /* __HAVE_ARCH_GATE_AREA */
+
+int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *,
+ void __user *, size_t *, loff_t *);
+unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
+ unsigned long lru_pages);
+
+#ifndef CONFIG_MMU
+#define randomize_va_space 0
+#else
+extern int randomize_va_space;
+#endif
+
+const char * arch_vma_name(struct vm_area_struct *vma);
+void print_vma_addr(char *prefix, unsigned long rip);
+
+struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
+pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
+pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
+pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
+pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
+void *vmemmap_alloc_block(unsigned long size, int node);
+void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
+int vmemmap_populate_basepages(struct page *start_page,
+ unsigned long pages, int node);
+int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
+void vmemmap_populate_print_last(void);
+
+extern void *alloc_locked_buffer(size_t size);
+extern void free_locked_buffer(void *buffer, size_t size);
+extern void release_locked_buffer(void *buffer, size_t size);
+#endif /* __KERNEL__ */
+#endif /* _LINUX_MM_H */
diff --git a/libdde-linux26/include/linux/mount.h b/libdde-linux26/include/linux/mount.h
new file mode 100644
index 00000000..17f36e6b
--- /dev/null
+++ b/libdde-linux26/include/linux/mount.h
@@ -0,0 +1,127 @@
+/*
+ *
+ * Definitions for mount interface. This describes the in the kernel build
+ * linkedlist with mounted filesystems.
+ *
+ * Author: Marco van Wieringen <mvw@planets.elm.net>
+ *
+ */
+#ifndef _LINUX_MOUNT_H
+#define _LINUX_MOUNT_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/nodemask.h>
+#include <linux/spinlock.h>
+#include <asm/atomic.h>
+
+struct super_block;
+struct vfsmount;
+struct dentry;
+struct mnt_namespace;
+
+#define MNT_NOSUID 0x01
+#define MNT_NODEV 0x02
+#define MNT_NOEXEC 0x04
+#define MNT_NOATIME 0x08
+#define MNT_NODIRATIME 0x10
+#define MNT_RELATIME 0x20
+#define MNT_READONLY 0x40 /* does the user want this to be r/o? */
+
+#define MNT_SHRINKABLE 0x100
+#define MNT_IMBALANCED_WRITE_COUNT 0x200 /* just for debugging */
+
+#define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */
+#define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */
+#define MNT_PNODE_MASK 0x3000 /* propagation flag mask */
+
+struct vfsmount {
+#ifdef DDE_LINUX
+ struct file_system_type *fs_type;
+ struct list_head next;
+ char * name;
+ struct super_block *mnt_sb;
+ atomic_t mnt_count;
+ int mnt_flags;
+ struct dentry *mnt_root;
+#else
+ struct list_head mnt_hash;
+ struct vfsmount *mnt_parent; /* fs we are mounted on */
+ struct dentry *mnt_mountpoint; /* dentry of mountpoint */
+ struct dentry *mnt_root; /* root of the mounted tree */
+ struct super_block *mnt_sb; /* pointer to superblock */
+ struct list_head mnt_mounts; /* list of children, anchored here */
+ struct list_head mnt_child; /* and going through their mnt_child */
+ int mnt_flags;
+ /* 4 bytes hole on 64bits arches */
+ const char *mnt_devname; /* Name of device e.g. /dev/dsk/hda1 */
+ struct list_head mnt_list;
+ struct list_head mnt_expire; /* link in fs-specific expiry list */
+ struct list_head mnt_share; /* circular list of shared mounts */
+ struct list_head mnt_slave_list;/* list of slave mounts */
+ struct list_head mnt_slave; /* slave list entry */
+ struct vfsmount *mnt_master; /* slave is on master->mnt_slave_list */
+ struct mnt_namespace *mnt_ns; /* containing namespace */
+ int mnt_id; /* mount identifier */
+ int mnt_group_id; /* peer group identifier */
+ /*
+ * We put mnt_count & mnt_expiry_mark at the end of struct vfsmount
+ * to let these frequently modified fields in a separate cache line
+ * (so that reads of mnt_flags wont ping-pong on SMP machines)
+ */
+ atomic_t mnt_count;
+ int mnt_expiry_mark; /* true if marked for expiry */
+ int mnt_pinned;
+ int mnt_ghosts;
+ /*
+ * This value is not stable unless all of the mnt_writers[] spinlocks
+ * are held, and all mnt_writer[]s on this mount have 0 as their ->count
+ */
+ atomic_t __mnt_writers;
+#endif
+};
+
+static inline struct vfsmount *mntget(struct vfsmount *mnt)
+{
+ if (mnt)
+ atomic_inc(&mnt->mnt_count);
+ return mnt;
+}
+
+#ifndef DDE_LINUX
+extern int mnt_want_write(struct vfsmount *mnt);
+extern void mnt_drop_write(struct vfsmount *mnt);
+extern void mntput_no_expire(struct vfsmount *mnt);
+extern void mnt_pin(struct vfsmount *mnt);
+extern void mnt_unpin(struct vfsmount *mnt);
+extern int __mnt_is_readonly(struct vfsmount *mnt);
+
+static inline void mntput(struct vfsmount *mnt)
+{
+ if (mnt) {
+ mnt->mnt_expiry_mark = 0;
+ mntput_no_expire(mnt);
+ }
+}
+#endif /* DDE_LINUX */
+
+extern struct vfsmount *do_kern_mount(const char *fstype, int flags,
+ const char *name, void *data);
+
+struct file_system_type;
+extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
+ int flags, const char *name,
+ void *data);
+
+struct nameidata;
+
+struct path;
+extern int do_add_mount(struct vfsmount *newmnt, struct path *path,
+ int mnt_flags, struct list_head *fslist);
+
+extern void mark_mounts_for_expiry(struct list_head *mounts);
+
+extern spinlock_t vfsmount_lock;
+extern dev_t name_to_dev_t(char *name);
+
+#endif /* _LINUX_MOUNT_H */
diff --git a/libdde-linux26/include/linux/pagemap.h b/libdde-linux26/include/linux/pagemap.h
new file mode 100644
index 00000000..ebff43c5
--- /dev/null
+++ b/libdde-linux26/include/linux/pagemap.h
@@ -0,0 +1,473 @@
+#ifndef _LINUX_PAGEMAP_H
+#define _LINUX_PAGEMAP_H
+
+/*
+ * Copyright 1995 Linus Torvalds
+ */
+#include <linux/mm.h>
+#include <linux/fs.h>
+#include <linux/list.h>
+#include <linux/highmem.h>
+#include <linux/compiler.h>
+#include <asm/uaccess.h>
+#include <linux/gfp.h>
+#include <linux/bitops.h>
+#include <linux/hardirq.h> /* for in_interrupt() */
+
+/*
+ * Bits in mapping->flags. The lower __GFP_BITS_SHIFT bits are the page
+ * allocation mode flags.
+ */
+#define AS_EIO (__GFP_BITS_SHIFT + 0) /* IO error on async write */
+#define AS_ENOSPC (__GFP_BITS_SHIFT + 1) /* ENOSPC on async write */
+#define AS_MM_ALL_LOCKS (__GFP_BITS_SHIFT + 2) /* under mm_take_all_locks() */
+
+static inline void mapping_set_error(struct address_space *mapping, int error)
+{
+ if (unlikely(error)) {
+ if (error == -ENOSPC)
+ set_bit(AS_ENOSPC, &mapping->flags);
+ else
+ set_bit(AS_EIO, &mapping->flags);
+ }
+}
+
+#ifdef CONFIG_UNEVICTABLE_LRU
+#define AS_UNEVICTABLE (__GFP_BITS_SHIFT + 2) /* e.g., ramdisk, SHM_LOCK */
+
+static inline void mapping_set_unevictable(struct address_space *mapping)
+{
+ set_bit(AS_UNEVICTABLE, &mapping->flags);
+}
+
+static inline void mapping_clear_unevictable(struct address_space *mapping)
+{
+ clear_bit(AS_UNEVICTABLE, &mapping->flags);
+}
+
+static inline int mapping_unevictable(struct address_space *mapping)
+{
+ if (likely(mapping))
+ return test_bit(AS_UNEVICTABLE, &mapping->flags);
+ return !!mapping;
+}
+#else
+static inline void mapping_set_unevictable(struct address_space *mapping) { }
+static inline void mapping_clear_unevictable(struct address_space *mapping) { }
+static inline int mapping_unevictable(struct address_space *mapping)
+{
+ return 0;
+}
+#endif
+
+static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
+{
+ return (__force gfp_t)mapping->flags & __GFP_BITS_MASK;
+}
+
+/*
+ * This is non-atomic. Only to be used before the mapping is activated.
+ * Probably needs a barrier...
+ */
+static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
+{
+ m->flags = (m->flags & ~(__force unsigned long)__GFP_BITS_MASK) |
+ (__force unsigned long)mask;
+}
+
+/*
+ * The page cache can done in larger chunks than
+ * one page, because it allows for more efficient
+ * throughput (it can then be mapped into user
+ * space in smaller chunks for same flexibility).
+ *
+ * Or rather, it _will_ be done in larger chunks.
+ */
+#define PAGE_CACHE_SHIFT PAGE_SHIFT
+#define PAGE_CACHE_SIZE PAGE_SIZE
+#define PAGE_CACHE_MASK PAGE_MASK
+#define PAGE_CACHE_ALIGN(addr) (((addr)+PAGE_CACHE_SIZE-1)&PAGE_CACHE_MASK)
+
+#define page_cache_get(page) get_page(page)
+#define page_cache_release(page) put_page(page)
+void release_pages(struct page **pages, int nr, int cold);
+
+/*
+ * speculatively take a reference to a page.
+ * If the page is free (_count == 0), then _count is untouched, and 0
+ * is returned. Otherwise, _count is incremented by 1 and 1 is returned.
+ *
+ * This function must be called inside the same rcu_read_lock() section as has
+ * been used to lookup the page in the pagecache radix-tree (or page table):
+ * this allows allocators to use a synchronize_rcu() to stabilize _count.
+ *
+ * Unless an RCU grace period has passed, the count of all pages coming out
+ * of the allocator must be considered unstable. page_count may return higher
+ * than expected, and put_page must be able to do the right thing when the
+ * page has been finished with, no matter what it is subsequently allocated
+ * for (because put_page is what is used here to drop an invalid speculative
+ * reference).
+ *
+ * This is the interesting part of the lockless pagecache (and lockless
+ * get_user_pages) locking protocol, where the lookup-side (eg. find_get_page)
+ * has the following pattern:
+ * 1. find page in radix tree
+ * 2. conditionally increment refcount
+ * 3. check the page is still in pagecache (if no, goto 1)
+ *
+ * Remove-side that cares about stability of _count (eg. reclaim) has the
+ * following (with tree_lock held for write):
+ * A. atomically check refcount is correct and set it to 0 (atomic_cmpxchg)
+ * B. remove page from pagecache
+ * C. free the page
+ *
+ * There are 2 critical interleavings that matter:
+ * - 2 runs before A: in this case, A sees elevated refcount and bails out
+ * - A runs before 2: in this case, 2 sees zero refcount and retries;
+ * subsequently, B will complete and 1 will find no page, causing the
+ * lookup to return NULL.
+ *
+ * It is possible that between 1 and 2, the page is removed then the exact same
+ * page is inserted into the same position in pagecache. That's OK: the
+ * old find_get_page using tree_lock could equally have run before or after
+ * such a re-insertion, depending on order that locks are granted.
+ *
+ * Lookups racing against pagecache insertion isn't a big problem: either 1
+ * will find the page or it will not. Likewise, the old find_get_page could run
+ * either before the insertion or afterwards, depending on timing.
+ */
+static inline int page_cache_get_speculative(struct page *page)
+{
+ VM_BUG_ON(in_interrupt());
+
+#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU)
+# ifdef CONFIG_PREEMPT
+ VM_BUG_ON(!in_atomic());
+# endif
+ /*
+ * Preempt must be disabled here - we rely on rcu_read_lock doing
+ * this for us.
+ *
+ * Pagecache won't be truncated from interrupt context, so if we have
+ * found a page in the radix tree here, we have pinned its refcount by
+ * disabling preempt, and hence no need for the "speculative get" that
+ * SMP requires.
+ */
+ VM_BUG_ON(page_count(page) == 0);
+ atomic_inc(&page->_count);
+
+#else
+ if (unlikely(!get_page_unless_zero(page))) {
+ /*
+ * Either the page has been freed, or will be freed.
+ * In either case, retry here and the caller should
+ * do the right thing (see comments above).
+ */
+ return 0;
+ }
+#endif
+ VM_BUG_ON(PageTail(page));
+
+ return 1;
+}
+
+/*
+ * Same as above, but add instead of inc (could just be merged)
+ */
+static inline int page_cache_add_speculative(struct page *page, int count)
+{
+ VM_BUG_ON(in_interrupt());
+
+#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU)
+# ifdef CONFIG_PREEMPT
+ VM_BUG_ON(!in_atomic());
+# endif
+ VM_BUG_ON(page_count(page) == 0);
+ atomic_add(count, &page->_count);
+
+#else
+ if (unlikely(!atomic_add_unless(&page->_count, count, 0)))
+ return 0;
+#endif
+ VM_BUG_ON(PageCompound(page) && page != compound_head(page));
+
+ return 1;
+}
+
+static inline int page_freeze_refs(struct page *page, int count)
+{
+ return likely(atomic_cmpxchg(&page->_count, count, 0) == count);
+}
+
+static inline void page_unfreeze_refs(struct page *page, int count)
+{
+ VM_BUG_ON(page_count(page) != 0);
+ VM_BUG_ON(count == 0);
+
+ atomic_set(&page->_count, count);
+}
+
+#ifdef CONFIG_NUMA
+extern struct page *__page_cache_alloc(gfp_t gfp);
+#else
+static inline struct page *__page_cache_alloc(gfp_t gfp)
+{
+ return alloc_pages(gfp, 0);
+}
+#endif
+
+static inline struct page *page_cache_alloc(struct address_space *x)
+{
+ return __page_cache_alloc(mapping_gfp_mask(x));
+}
+
+static inline struct page *page_cache_alloc_cold(struct address_space *x)
+{
+ return __page_cache_alloc(mapping_gfp_mask(x)|__GFP_COLD);
+}
+
+typedef int filler_t(void *, struct page *);
+
+extern struct page * find_get_page(struct address_space *mapping,
+ pgoff_t index);
+extern struct page * find_lock_page(struct address_space *mapping,
+ pgoff_t index);
+extern struct page * find_or_create_page(struct address_space *mapping,
+ pgoff_t index, gfp_t gfp_mask);
+unsigned find_get_pages(struct address_space *mapping, pgoff_t start,
+ unsigned int nr_pages, struct page **pages);
+unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
+ unsigned int nr_pages, struct page **pages);
+unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
+ int tag, unsigned int nr_pages, struct page **pages);
+
+struct page *grab_cache_page_write_begin(struct address_space *mapping,
+ pgoff_t index, unsigned flags);
+
+/*
+ * Returns locked page at given index in given cache, creating it if needed.
+ */
+static inline struct page *grab_cache_page(struct address_space *mapping,
+ pgoff_t index)
+{
+ return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
+}
+
+extern struct page * grab_cache_page_nowait(struct address_space *mapping,
+ pgoff_t index);
+extern struct page * read_cache_page_async(struct address_space *mapping,
+ pgoff_t index, filler_t *filler,
+ void *data);
+extern struct page * read_cache_page(struct address_space *mapping,
+ pgoff_t index, filler_t *filler,
+ void *data);
+extern int read_cache_pages(struct address_space *mapping,
+ struct list_head *pages, filler_t *filler, void *data);
+
+static inline struct page *read_mapping_page_async(
+ struct address_space *mapping,
+ pgoff_t index, void *data)
+{
+ filler_t *filler = (filler_t *)mapping->a_ops->readpage;
+ return read_cache_page_async(mapping, index, filler, data);
+}
+
+static inline struct page *read_mapping_page(struct address_space *mapping,
+ pgoff_t index, void *data)
+{
+ filler_t *filler = (filler_t *)mapping->a_ops->readpage;
+ return read_cache_page(mapping, index, filler, data);
+}
+
+/*
+ * Return byte-offset into filesystem object for page.
+ */
+static inline loff_t page_offset(struct page *page)
+{
+#ifndef DDE_LINUX
+ return ((loff_t)page->index) << PAGE_CACHE_SHIFT;
+#else
+ return 0;
+#endif
+}
+
+static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
+ unsigned long address)
+{
+#ifndef DDE_LINUX
+ pgoff_t pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
+ pgoff += vma->vm_pgoff;
+ return pgoff >> (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+#else
+ return 0;
+#endif
+}
+
+extern void __lock_page(struct page *page);
+extern int __lock_page_killable(struct page *page);
+extern void __lock_page_nosync(struct page *page);
+extern void unlock_page(struct page *page);
+
+static inline void __set_page_locked(struct page *page)
+{
+ __set_bit(PG_locked, &page->flags);
+}
+
+static inline void __clear_page_locked(struct page *page)
+{
+ __clear_bit(PG_locked, &page->flags);
+}
+
+static inline int trylock_page(struct page *page)
+{
+ return (likely(!test_and_set_bit_lock(PG_locked, &page->flags)));
+}
+
+/*
+ * lock_page may only be called if we have the page's inode pinned.
+ */
+static inline void lock_page(struct page *page)
+{
+#ifndef DDE_LINUX
+ might_sleep();
+ if (!trylock_page(page))
+ __lock_page(page);
+#endif
+}
+
+
+/*
+ * lock_page_killable is like lock_page but can be interrupted by fatal
+ * signals. It returns 0 if it locked the page and -EINTR if it was
+ * killed while waiting.
+ */
+static inline int lock_page_killable(struct page *page)
+{
+ might_sleep();
+ if (!trylock_page(page))
+ return __lock_page_killable(page);
+ return 0;
+}
+
+/*
+ * lock_page_nosync should only be used if we can't pin the page's inode.
+ * Doesn't play quite so well with block device plugging.
+ */
+static inline void lock_page_nosync(struct page *page)
+{
+#ifndef DDE_LINUX
+ might_sleep();
+ if (!trylock_page(page))
+ __lock_page_nosync(page);
+#endif
+}
+
+/*
+ * This is exported only for wait_on_page_locked/wait_on_page_writeback.
+ * Never use this directly!
+ */
+extern void wait_on_page_bit(struct page *page, int bit_nr);
+
+/*
+ * Wait for a page to be unlocked.
+ *
+ * This must be called with the caller "holding" the page,
+ * ie with increased "page->count" so that the page won't
+ * go away during the wait..
+ */
+static inline void wait_on_page_locked(struct page *page)
+{
+#ifndef DDE_LINUX
+ if (PageLocked(page))
+ wait_on_page_bit(page, PG_locked);
+#endif
+}
+
+/*
+ * Wait for a page to complete writeback
+ */
+static inline void wait_on_page_writeback(struct page *page)
+{
+#ifndef DDE_LINUX
+ if (PageWriteback(page))
+ wait_on_page_bit(page, PG_writeback);
+#endif
+}
+
+extern void end_page_writeback(struct page *page);
+
+/*
+ * Fault a userspace page into pagetables. Return non-zero on a fault.
+ *
+ * This assumes that two userspace pages are always sufficient. That's
+ * not true if PAGE_CACHE_SIZE > PAGE_SIZE.
+ */
+static inline int fault_in_pages_writeable(char __user *uaddr, int size)
+{
+ int ret;
+
+ if (unlikely(size == 0))
+ return 0;
+
+ /*
+ * Writing zeroes into userspace here is OK, because we know that if
+ * the zero gets there, we'll be overwriting it.
+ */
+ ret = __put_user(0, uaddr);
+ if (ret == 0) {
+ char __user *end = uaddr + size - 1;
+
+ /*
+ * If the page was already mapped, this will get a cache miss
+ * for sure, so try to avoid doing it.
+ */
+ if (((unsigned long)uaddr & PAGE_MASK) !=
+ ((unsigned long)end & PAGE_MASK))
+ ret = __put_user(0, end);
+ }
+ return ret;
+}
+
+static inline int fault_in_pages_readable(const char __user *uaddr, int size)
+{
+ volatile char c;
+ int ret;
+
+ if (unlikely(size == 0))
+ return 0;
+
+ ret = __get_user(c, uaddr);
+ if (ret == 0) {
+ const char __user *end = uaddr + size - 1;
+
+ if (((unsigned long)uaddr & PAGE_MASK) !=
+ ((unsigned long)end & PAGE_MASK))
+ ret = __get_user(c, end);
+ }
+ return ret;
+}
+
+int add_to_page_cache_locked(struct page *page, struct address_space *mapping,
+ pgoff_t index, gfp_t gfp_mask);
+int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
+ pgoff_t index, gfp_t gfp_mask);
+extern void remove_from_page_cache(struct page *page);
+extern void __remove_from_page_cache(struct page *page);
+
+/*
+ * Like add_to_page_cache_locked, but used to add newly allocated pages:
+ * the page is new, so we can just run __set_page_locked() against it.
+ */
+static inline int add_to_page_cache(struct page *page,
+ struct address_space *mapping, pgoff_t offset, gfp_t gfp_mask)
+{
+ int error;
+
+ __set_page_locked(page);
+ error = add_to_page_cache_locked(page, mapping, offset, gfp_mask);
+ if (unlikely(error))
+ __clear_page_locked(page);
+ return error;
+}
+
+#endif /* _LINUX_PAGEMAP_H */
diff --git a/libdde-linux26/include/linux/percpu.h b/libdde-linux26/include/linux/percpu.h
new file mode 100644
index 00000000..6995e4b8
--- /dev/null
+++ b/libdde-linux26/include/linux/percpu.h
@@ -0,0 +1,120 @@
+#ifndef __LINUX_PERCPU_H
+#define __LINUX_PERCPU_H
+
+#include <linux/preempt.h>
+#include <linux/slab.h> /* For kmalloc() */
+#include <linux/smp.h>
+#include <linux/cpumask.h>
+
+#include <asm/percpu.h>
+
+#ifdef CONFIG_SMP
+#define DEFINE_PER_CPU(type, name) \
+ __attribute__((__section__(".data.percpu"))) \
+ PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
+
+#ifdef MODULE
+#define SHARED_ALIGNED_SECTION ".data.percpu"
+#else
+#define SHARED_ALIGNED_SECTION ".data.percpu.shared_aligned"
+#endif
+
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ __attribute__((__section__(SHARED_ALIGNED_SECTION))) \
+ PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \
+ ____cacheline_aligned_in_smp
+
+#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
+ __attribute__((__section__(".data.percpu.page_aligned"))) \
+ PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
+#else
+#define DEFINE_PER_CPU(type, name) \
+ PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name
+
+#define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \
+ DEFINE_PER_CPU(type, name)
+
+#define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \
+ DEFINE_PER_CPU(type, name)
+#endif
+
+#define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var)
+#define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var)
+
+/* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */
+#ifndef PERCPU_ENOUGH_ROOM
+#ifdef CONFIG_MODULES
+#define PERCPU_MODULE_RESERVE 8192
+#else
+#define PERCPU_MODULE_RESERVE 0
+#endif
+
+#define PERCPU_ENOUGH_ROOM \
+ (__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE)
+#endif /* PERCPU_ENOUGH_ROOM */
+
+/*
+ * Must be an lvalue. Since @var must be a simple identifier,
+ * we force a syntax error here if it isn't.
+ */
+#define get_cpu_var(var) (*({ \
+ extern int simple_identifier_##var(void); \
+ preempt_disable(); \
+ &__get_cpu_var(var); }))
+#define put_cpu_var(var) preempt_enable()
+
+/* DDE defines CONFIG_SMP, because our "processors" are multiple L4/DDEKit-Threads
+ * running in parallel. They need synchronization stuff. However, we don't
+ * need per-cpu variables & co. which are defined below.
+ */
+#if defined(CONFIG_SMP) && !defined(DDE_LINUX)
+
+struct percpu_data {
+ void *ptrs[1];
+};
+
+#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
+/*
+ * Use this to get to a cpu's version of the per-cpu object dynamically
+ * allocated. Non-atomic access to the current CPU's version should
+ * probably be combined with get_cpu()/put_cpu().
+ */
+#define percpu_ptr(ptr, cpu) \
+({ \
+ struct percpu_data *__p = __percpu_disguise(ptr); \
+ (__typeof__(ptr))__p->ptrs[(cpu)]; \
+})
+
+extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask);
+extern void percpu_free(void *__pdata);
+
+#else /* !CONFIG_SMP || DDE_LINUX */
+
+#define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
+
+static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
+{
+ return kzalloc(size, gfp);
+}
+
+static inline void percpu_free(void *__pdata)
+{
+ kfree(__pdata);
+}
+
+#endif /* CONFIG_SMP */
+
+#define percpu_alloc_mask(size, gfp, mask) \
+ __percpu_alloc_mask((size), (gfp), &(mask))
+
+#define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map)
+
+/* (legacy) interface for use without CPU hotplug handling */
+
+#define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \
+ cpu_possible_map)
+#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type))
+#define free_percpu(ptr) percpu_free((ptr))
+#define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu))
+
+#endif /* __LINUX_PERCPU_H */
diff --git a/libdde-linux26/include/linux/percpu_counter.h b/libdde-linux26/include/linux/percpu_counter.h
new file mode 100644
index 00000000..c1f9482f
--- /dev/null
+++ b/libdde-linux26/include/linux/percpu_counter.h
@@ -0,0 +1,149 @@
+#ifndef _LINUX_PERCPU_COUNTER_H
+#define _LINUX_PERCPU_COUNTER_H
+/*
+ * A simple "approximate counter" for use in ext2 and ext3 superblocks.
+ *
+ * WARNING: these things are HUGE. 4 kbytes per counter on 32-way P4.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/list.h>
+#include <linux/threads.h>
+#include <linux/percpu.h>
+#include <linux/types.h>
+
+#if defined(CONFIG_SMP) && !defined(DDE_LINUX)
+
+struct percpu_counter {
+ spinlock_t lock;
+ s64 count;
+#ifdef CONFIG_HOTPLUG_CPU
+ struct list_head list; /* All percpu_counters are on a list */
+#endif
+ s32 *counters;
+};
+
+extern int percpu_counter_batch;
+
+int __percpu_counter_init(struct percpu_counter *fbc, s64 amount,
+ struct lock_class_key *key);
+
+#define percpu_counter_init(fbc, value) \
+ ({ \
+ static struct lock_class_key __key; \
+ \
+ __percpu_counter_init(fbc, value, &__key); \
+ })
+
+void percpu_counter_destroy(struct percpu_counter *fbc);
+void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
+void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
+s64 __percpu_counter_sum(struct percpu_counter *fbc);
+
+static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
+{
+ __percpu_counter_add(fbc, amount, percpu_counter_batch);
+}
+
+static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
+{
+ s64 ret = __percpu_counter_sum(fbc);
+ return ret < 0 ? 0 : ret;
+}
+
+static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
+{
+ return __percpu_counter_sum(fbc);
+}
+
+static inline s64 percpu_counter_read(struct percpu_counter *fbc)
+{
+ return fbc->count;
+}
+
+/*
+ * It is possible for the percpu_counter_read() to return a small negative
+ * number for some counter which should never be negative.
+ *
+ */
+static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
+{
+ s64 ret = fbc->count;
+
+ barrier(); /* Prevent reloads of fbc->count */
+ if (ret >= 0)
+ return ret;
+ return 1;
+}
+
+#else
+
+struct percpu_counter {
+ s64 count;
+};
+
+static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount)
+{
+ fbc->count = amount;
+ return 0;
+}
+
+static inline void percpu_counter_destroy(struct percpu_counter *fbc)
+{
+}
+
+static inline void percpu_counter_set(struct percpu_counter *fbc, s64 amount)
+{
+ fbc->count = amount;
+}
+
+#define __percpu_counter_add(fbc, amount, batch) \
+ percpu_counter_add(fbc, amount)
+
+static inline void
+percpu_counter_add(struct percpu_counter *fbc, s64 amount)
+{
+ preempt_disable();
+ fbc->count += amount;
+ preempt_enable();
+}
+
+static inline s64 percpu_counter_read(struct percpu_counter *fbc)
+{
+ return fbc->count;
+}
+
+static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc)
+{
+ return fbc->count;
+}
+
+static inline s64 percpu_counter_sum_positive(struct percpu_counter *fbc)
+{
+ return percpu_counter_read_positive(fbc);
+}
+
+static inline s64 percpu_counter_sum(struct percpu_counter *fbc)
+{
+ return percpu_counter_read(fbc);
+}
+
+#endif /* CONFIG_SMP */
+
+static inline void percpu_counter_inc(struct percpu_counter *fbc)
+{
+ percpu_counter_add(fbc, 1);
+}
+
+static inline void percpu_counter_dec(struct percpu_counter *fbc)
+{
+ percpu_counter_add(fbc, -1);
+}
+
+static inline void percpu_counter_sub(struct percpu_counter *fbc, s64 amount)
+{
+ percpu_counter_add(fbc, -amount);
+}
+
+#endif /* _LINUX_PERCPU_COUNTER_H */
diff --git a/libdde-linux26/include/linux/preempt.h b/libdde-linux26/include/linux/preempt.h
new file mode 100644
index 00000000..203264ae
--- /dev/null
+++ b/libdde-linux26/include/linux/preempt.h
@@ -0,0 +1,139 @@
+#ifndef __LINUX_PREEMPT_H
+#define __LINUX_PREEMPT_H
+
+/*
+ * include/linux/preempt.h - macros for accessing and manipulating
+ * preempt_count (used for kernel preemption, interrupt count, etc.)
+ */
+
+#include <linux/thread_info.h>
+#include <linux/linkage.h>
+#include <linux/list.h>
+
+#if (defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)) && !defined(DDE_LINUX)
+ extern void add_preempt_count(int val);
+ extern void sub_preempt_count(int val);
+#else // DDE_LINUX
+# define add_preempt_count(val) do { preempt_count() += (val); } while (0)
+# define sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
+#endif
+
+#define inc_preempt_count() add_preempt_count(1)
+#define dec_preempt_count() sub_preempt_count(1)
+
+#define preempt_count() (current_thread_info()->preempt_count)
+
+#ifdef CONFIG_PREEMPT
+
+asmlinkage void preempt_schedule(void);
+
+#define preempt_disable() \
+do { \
+ inc_preempt_count(); \
+ barrier(); \
+} while (0)
+
+#define preempt_enable_no_resched() \
+do { \
+ barrier(); \
+ dec_preempt_count(); \
+} while (0)
+
+#define preempt_check_resched() \
+do { \
+ if (unlikely(test_thread_flag(TIF_NEED_RESCHED))) \
+ preempt_schedule(); \
+} while (0)
+
+#define preempt_enable() \
+do { \
+ preempt_enable_no_resched(); \
+ barrier(); \
+ preempt_check_resched(); \
+} while (0)
+
+/* For debugging and tracer internals only! */
+#define add_preempt_count_notrace(val) \
+ do { preempt_count() += (val); } while (0)
+#define sub_preempt_count_notrace(val) \
+ do { preempt_count() -= (val); } while (0)
+#define inc_preempt_count_notrace() add_preempt_count_notrace(1)
+#define dec_preempt_count_notrace() sub_preempt_count_notrace(1)
+
+#define preempt_disable_notrace() \
+do { \
+ inc_preempt_count_notrace(); \
+ barrier(); \
+} while (0)
+
+#define preempt_enable_no_resched_notrace() \
+do { \
+ barrier(); \
+ dec_preempt_count_notrace(); \
+} while (0)
+
+/* preempt_check_resched is OK to trace */
+#define preempt_enable_notrace() \
+do { \
+ preempt_enable_no_resched_notrace(); \
+ barrier(); \
+ preempt_check_resched(); \
+} while (0)
+
+#else
+
+#define preempt_disable() do { } while (0)
+#define preempt_enable_no_resched() do { } while (0)
+#define preempt_enable() do { } while (0)
+#define preempt_check_resched() do { } while (0)
+
+#define preempt_disable_notrace() do { } while (0)
+#define preempt_enable_no_resched_notrace() do { } while (0)
+#define preempt_enable_notrace() do { } while (0)
+
+#endif
+
+#ifdef CONFIG_PREEMPT_NOTIFIERS
+
+struct preempt_notifier;
+
+/**
+ * preempt_ops - notifiers called when a task is preempted and rescheduled
+ * @sched_in: we're about to be rescheduled:
+ * notifier: struct preempt_notifier for the task being scheduled
+ * cpu: cpu we're scheduled on
+ * @sched_out: we've just been preempted
+ * notifier: struct preempt_notifier for the task being preempted
+ * next: the task that's kicking us out
+ */
+struct preempt_ops {
+ void (*sched_in)(struct preempt_notifier *notifier, int cpu);
+ void (*sched_out)(struct preempt_notifier *notifier,
+ struct task_struct *next);
+};
+
+/**
+ * preempt_notifier - key for installing preemption notifiers
+ * @link: internal use
+ * @ops: defines the notifier functions to be called
+ *
+ * Usually used in conjunction with container_of().
+ */
+struct preempt_notifier {
+ struct hlist_node link;
+ struct preempt_ops *ops;
+};
+
+void preempt_notifier_register(struct preempt_notifier *notifier);
+void preempt_notifier_unregister(struct preempt_notifier *notifier);
+
+static inline void preempt_notifier_init(struct preempt_notifier *notifier,
+ struct preempt_ops *ops)
+{
+ INIT_HLIST_NODE(&notifier->link);
+ notifier->ops = ops;
+}
+
+#endif
+
+#endif /* __LINUX_PREEMPT_H */
diff --git a/libdde-linux26/include/linux/sched.h b/libdde-linux26/include/linux/sched.h
new file mode 100644
index 00000000..e91c0757
--- /dev/null
+++ b/libdde-linux26/include/linux/sched.h
@@ -0,0 +1,2384 @@
+#ifndef _LINUX_SCHED_H
+#define _LINUX_SCHED_H
+
+/*
+ * cloning flags:
+ */
+#define CSIGNAL 0x000000ff /* signal mask to be sent at exit */
+#define CLONE_VM 0x00000100 /* set if VM shared between processes */
+#define CLONE_FS 0x00000200 /* set if fs info shared between processes */
+#define CLONE_FILES 0x00000400 /* set if open files shared between processes */
+#define CLONE_SIGHAND 0x00000800 /* set if signal handlers and blocked signals shared */
+#define CLONE_PTRACE 0x00002000 /* set if we want to let tracing continue on the child too */
+#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */
+#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */
+#define CLONE_THREAD 0x00010000 /* Same thread group? */
+#define CLONE_NEWNS 0x00020000 /* New namespace group? */
+#define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */
+#define CLONE_SETTLS 0x00080000 /* create a new TLS for the child */
+#define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */
+#define CLONE_CHILD_CLEARTID 0x00200000 /* clear the TID in the child */
+#define CLONE_DETACHED 0x00400000 /* Unused, ignored */
+#define CLONE_UNTRACED 0x00800000 /* set if the tracing process can't force CLONE_PTRACE on this clone */
+#define CLONE_CHILD_SETTID 0x01000000 /* set the TID in the child */
+#define CLONE_STOPPED 0x02000000 /* Start in stopped state */
+#define CLONE_NEWUTS 0x04000000 /* New utsname group? */
+#define CLONE_NEWIPC 0x08000000 /* New ipcs */
+#define CLONE_NEWUSER 0x10000000 /* New user namespace */
+#define CLONE_NEWPID 0x20000000 /* New pid namespace */
+#define CLONE_NEWNET 0x40000000 /* New network namespace */
+#define CLONE_IO 0x80000000 /* Clone io context */
+
+/*
+ * Scheduling policies
+ */
+#define SCHED_NORMAL 0
+#define SCHED_FIFO 1
+#define SCHED_RR 2
+#define SCHED_BATCH 3
+/* SCHED_ISO: reserved but not implemented yet */
+#define SCHED_IDLE 5
+
+#ifdef __KERNEL__
+
+struct sched_param {
+ int sched_priority;
+};
+
+#include <asm/param.h> /* for HZ */
+
+#include <linux/capability.h>
+#include <linux/threads.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/timex.h>
+#include <linux/jiffies.h>
+#include <linux/rbtree.h>
+#include <linux/thread_info.h>
+#include <linux/cpumask.h>
+#include <linux/errno.h>
+#include <linux/nodemask.h>
+#include <linux/mm_types.h>
+
+#include <asm/system.h>
+#include <asm/page.h>
+#include <asm/ptrace.h>
+#include <asm/cputime.h>
+
+#include <linux/smp.h>
+#include <linux/sem.h>
+#include <linux/signal.h>
+#include <linux/fs_struct.h>
+#include <linux/compiler.h>
+#include <linux/completion.h>
+#include <linux/pid.h>
+#include <linux/percpu.h>
+#include <linux/topology.h>
+#include <linux/proportions.h>
+#include <linux/seccomp.h>
+#include <linux/rcupdate.h>
+#include <linux/rtmutex.h>
+
+#include <linux/time.h>
+#include <linux/param.h>
+#include <linux/resource.h>
+#include <linux/timer.h>
+#include <linux/hrtimer.h>
+#include <linux/task_io_accounting.h>
+#include <linux/kobject.h>
+#include <linux/latencytop.h>
+#include <linux/cred.h>
+
+#include <asm/processor.h>
+
+struct mem_cgroup;
+struct exec_domain;
+struct futex_pi_state;
+struct robust_list_head;
+struct bio;
+struct bts_tracer;
+
+/*
+ * List of flags we want to share for kernel threads,
+ * if only because they are not used by them anyway.
+ */
+#define CLONE_KERNEL (CLONE_FS | CLONE_FILES | CLONE_SIGHAND)
+
+/*
+ * These are the constant used to fake the fixed-point load-average
+ * counting. Some notes:
+ * - 11 bit fractions expand to 22 bits by the multiplies: this gives
+ * a load-average precision of 10 bits integer + 11 bits fractional
+ * - if you want to count load-averages more often, you need more
+ * precision, or rounding will get you. With 2-second counting freq,
+ * the EXP_n values would be 1981, 2034 and 2043 if still using only
+ * 11 bit fractions.
+ */
+extern unsigned long avenrun[]; /* Load averages */
+
+#define FSHIFT 11 /* nr of bits of precision */
+#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
+#define LOAD_FREQ (5*HZ+1) /* 5 sec intervals */
+#define EXP_1 1884 /* 1/exp(5sec/1min) as fixed-point */
+#define EXP_5 2014 /* 1/exp(5sec/5min) */
+#define EXP_15 2037 /* 1/exp(5sec/15min) */
+
+#define CALC_LOAD(load,exp,n) \
+ load *= exp; \
+ load += n*(FIXED_1-exp); \
+ load >>= FSHIFT;
+
+extern unsigned long total_forks;
+extern int nr_threads;
+DECLARE_PER_CPU(unsigned long, process_counts);
+extern int nr_processes(void);
+extern unsigned long nr_running(void);
+extern unsigned long nr_uninterruptible(void);
+extern unsigned long nr_active(void);
+extern unsigned long nr_iowait(void);
+
+struct seq_file;
+struct cfs_rq;
+struct task_group;
+#ifdef CONFIG_SCHED_DEBUG
+extern void proc_sched_show_task(struct task_struct *p, struct seq_file *m);
+extern void proc_sched_set_task(struct task_struct *p);
+extern void
+print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
+#else
+static inline void
+proc_sched_show_task(struct task_struct *p, struct seq_file *m)
+{
+}
+static inline void proc_sched_set_task(struct task_struct *p)
+{
+}
+static inline void
+print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
+{
+}
+#endif
+
+extern unsigned long long time_sync_thresh;
+
+/*
+ * Task state bitmask. NOTE! These bits are also
+ * encoded in fs/proc/array.c: get_task_state().
+ *
+ * We have two separate sets of flags: task->state
+ * is about runnability, while task->exit_state are
+ * about the task exiting. Confusing, but this way
+ * modifying one set can't modify the other one by
+ * mistake.
+ */
+#define TASK_RUNNING 0
+#define TASK_INTERRUPTIBLE 1
+#define TASK_UNINTERRUPTIBLE 2
+#define __TASK_STOPPED 4
+#define __TASK_TRACED 8
+/* in tsk->exit_state */
+#define EXIT_ZOMBIE 16
+#define EXIT_DEAD 32
+/* in tsk->state again */
+#define TASK_DEAD 64
+#define TASK_WAKEKILL 128
+
+/* Convenience macros for the sake of set_task_state */
+#define TASK_KILLABLE (TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
+#define TASK_STOPPED (TASK_WAKEKILL | __TASK_STOPPED)
+#define TASK_TRACED (TASK_WAKEKILL | __TASK_TRACED)
+
+/* Convenience macros for the sake of wake_up */
+#define TASK_NORMAL (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
+#define TASK_ALL (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
+
+/* get_task_state() */
+#define TASK_REPORT (TASK_RUNNING | TASK_INTERRUPTIBLE | \
+ TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
+ __TASK_TRACED)
+
+#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
+#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
+#define task_is_stopped_or_traced(task) \
+ ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
+#define task_contributes_to_load(task) \
+ ((task->state & TASK_UNINTERRUPTIBLE) != 0)
+
+#define __set_task_state(tsk, state_value) \
+ do { (tsk)->state = (state_value); } while (0)
+#define set_task_state(tsk, state_value) \
+ set_mb((tsk)->state, (state_value))
+
+/*
+ * set_current_state() includes a barrier so that the write of current->state
+ * is correctly serialised wrt the caller's subsequent test of whether to
+ * actually sleep:
+ *
+ * set_current_state(TASK_UNINTERRUPTIBLE);
+ * if (do_i_need_to_sleep())
+ * schedule();
+ *
+ * If the caller does not need such serialisation then use __set_current_state()
+ */
+#define __set_current_state(state_value) \
+ do { current->state = (state_value); } while (0)
+#define set_current_state(state_value) \
+ set_mb(current->state, (state_value))
+
+/* Task command name length */
+#define TASK_COMM_LEN 16
+
+#include <linux/spinlock.h>
+
+/*
+ * This serializes "schedule()" and also protects
+ * the run-queue from deletions/modifications (but
+ * _adding_ to the beginning of the run-queue has
+ * a separate lock).
+ */
+extern rwlock_t tasklist_lock;
+extern spinlock_t mmlist_lock;
+
+struct task_struct;
+
+extern void sched_init(void);
+extern void sched_init_smp(void);
+extern asmlinkage void schedule_tail(struct task_struct *prev);
+extern void init_idle(struct task_struct *idle, int cpu);
+extern void init_idle_bootup_task(struct task_struct *idle);
+
+extern int runqueue_is_locked(void);
+extern void task_rq_unlock_wait(struct task_struct *p);
+
+extern cpumask_var_t nohz_cpu_mask;
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
+extern int select_nohz_load_balancer(int cpu);
+#else
+static inline int select_nohz_load_balancer(int cpu)
+{
+ return 0;
+}
+#endif
+
+/*
+ * Only dump TASK_* tasks. (0 for all tasks)
+ */
+extern void show_state_filter(unsigned long state_filter);
+
+static inline void show_state(void)
+{
+ show_state_filter(0);
+}
+
+extern void show_regs(struct pt_regs *);
+
+/*
+ * TASK is a pointer to the task whose backtrace we want to see (or NULL for current
+ * task), SP is the stack pointer of the first frame that should be shown in the back
+ * trace (or NULL if the entire call-chain of the task should be shown).
+ */
+extern void show_stack(struct task_struct *task, unsigned long *sp);
+
+void io_schedule(void);
+long io_schedule_timeout(long timeout);
+
+extern void cpu_init (void);
+extern void trap_init(void);
+extern void update_process_times(int user);
+extern void scheduler_tick(void);
+
+extern void sched_show_task(struct task_struct *p);
+
+#ifdef CONFIG_DETECT_SOFTLOCKUP
+extern void softlockup_tick(void);
+extern void touch_softlockup_watchdog(void);
+extern void touch_all_softlockup_watchdogs(void);
+extern int proc_dosoftlockup_thresh(struct ctl_table *table, int write,
+ struct file *filp, void __user *buffer,
+ size_t *lenp, loff_t *ppos);
+extern unsigned int softlockup_panic;
+extern unsigned long sysctl_hung_task_check_count;
+extern unsigned long sysctl_hung_task_timeout_secs;
+extern unsigned long sysctl_hung_task_warnings;
+extern int softlockup_thresh;
+#else
+static inline void softlockup_tick(void)
+{
+}
+static inline void spawn_softlockup_task(void)
+{
+}
+static inline void touch_softlockup_watchdog(void)
+{
+}
+static inline void touch_all_softlockup_watchdogs(void)
+{
+}
+#endif
+
+
+/* Attach to any functions which should be ignored in wchan output. */
+#define __sched __attribute__((__section__(".sched.text")))
+
+/* Linker adds these: start and end of __sched functions */
+extern char __sched_text_start[], __sched_text_end[];
+
+/* Is this address in the __sched functions? */
+extern int in_sched_functions(unsigned long addr);
+
+#define MAX_SCHEDULE_TIMEOUT LONG_MAX
+extern signed long schedule_timeout(signed long timeout);
+extern signed long schedule_timeout_interruptible(signed long timeout);
+extern signed long schedule_timeout_killable(signed long timeout);
+extern signed long schedule_timeout_uninterruptible(signed long timeout);
+asmlinkage void schedule(void);
+
+struct nsproxy;
+struct user_namespace;
+
+/* Maximum number of active map areas.. This is a random (large) number */
+#define DEFAULT_MAX_MAP_COUNT 65536
+
+extern int sysctl_max_map_count;
+
+#include <linux/aio.h>
+
+extern unsigned long
+arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
+ unsigned long, unsigned long);
+extern unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags);
+extern void arch_unmap_area(struct mm_struct *, unsigned long);
+extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
+
+#if USE_SPLIT_PTLOCKS
+/*
+ * The mm counters are not protected by its page_table_lock,
+ * so must be incremented atomically.
+ */
+#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
+#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
+#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
+#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
+#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
+
+#else /* !USE_SPLIT_PTLOCKS */
+/*
+ * The mm counters are protected by its page_table_lock,
+ * so can be incremented directly.
+ */
+#define set_mm_counter(mm, member, value) (mm)->_##member = (value)
+#define get_mm_counter(mm, member) ((mm)->_##member)
+#define add_mm_counter(mm, member, value) (mm)->_##member += (value)
+#define inc_mm_counter(mm, member) (mm)->_##member++
+#define dec_mm_counter(mm, member) (mm)->_##member--
+
+#endif /* !USE_SPLIT_PTLOCKS */
+
+#define get_mm_rss(mm) \
+ (get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
+#define update_hiwater_rss(mm) do { \
+ unsigned long _rss = get_mm_rss(mm); \
+ if ((mm)->hiwater_rss < _rss) \
+ (mm)->hiwater_rss = _rss; \
+} while (0)
+#define update_hiwater_vm(mm) do { \
+ if ((mm)->hiwater_vm < (mm)->total_vm) \
+ (mm)->hiwater_vm = (mm)->total_vm; \
+} while (0)
+
+#define get_mm_hiwater_rss(mm) max((mm)->hiwater_rss, get_mm_rss(mm))
+#define get_mm_hiwater_vm(mm) max((mm)->hiwater_vm, (mm)->total_vm)
+
+extern void set_dumpable(struct mm_struct *mm, int value);
+extern int get_dumpable(struct mm_struct *mm);
+
+/* mm flags */
+/* dumpable bits */
+#define MMF_DUMPABLE 0 /* core dump is permitted */
+#define MMF_DUMP_SECURELY 1 /* core file is readable only by root */
+#define MMF_DUMPABLE_BITS 2
+
+/* coredump filter bits */
+#define MMF_DUMP_ANON_PRIVATE 2
+#define MMF_DUMP_ANON_SHARED 3
+#define MMF_DUMP_MAPPED_PRIVATE 4
+#define MMF_DUMP_MAPPED_SHARED 5
+#define MMF_DUMP_ELF_HEADERS 6
+#define MMF_DUMP_HUGETLB_PRIVATE 7
+#define MMF_DUMP_HUGETLB_SHARED 8
+#define MMF_DUMP_FILTER_SHIFT MMF_DUMPABLE_BITS
+#define MMF_DUMP_FILTER_BITS 7
+#define MMF_DUMP_FILTER_MASK \
+ (((1 << MMF_DUMP_FILTER_BITS) - 1) << MMF_DUMP_FILTER_SHIFT)
+#define MMF_DUMP_FILTER_DEFAULT \
+ ((1 << MMF_DUMP_ANON_PRIVATE) | (1 << MMF_DUMP_ANON_SHARED) |\
+ (1 << MMF_DUMP_HUGETLB_PRIVATE) | MMF_DUMP_MASK_DEFAULT_ELF)
+
+#ifdef CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS
+# define MMF_DUMP_MASK_DEFAULT_ELF (1 << MMF_DUMP_ELF_HEADERS)
+#else
+# define MMF_DUMP_MASK_DEFAULT_ELF 0
+#endif
+
+struct sighand_struct {
+ atomic_t count;
+ struct k_sigaction action[_NSIG];
+ spinlock_t siglock;
+ wait_queue_head_t signalfd_wqh;
+};
+
+struct pacct_struct {
+ int ac_flag;
+ long ac_exitcode;
+ unsigned long ac_mem;
+ cputime_t ac_utime, ac_stime;
+ unsigned long ac_minflt, ac_majflt;
+};
+
+/**
+ * struct task_cputime - collected CPU time counts
+ * @utime: time spent in user mode, in &cputime_t units
+ * @stime: time spent in kernel mode, in &cputime_t units
+ * @sum_exec_runtime: total time spent on the CPU, in nanoseconds
+ *
+ * This structure groups together three kinds of CPU time that are
+ * tracked for threads and thread groups. Most things considering
+ * CPU time want to group these counts together and treat all three
+ * of them in parallel.
+ */
+struct task_cputime {
+ cputime_t utime;
+ cputime_t stime;
+ unsigned long long sum_exec_runtime;
+};
+/* Alternate field names when used to cache expirations. */
+#define prof_exp stime
+#define virt_exp utime
+#define sched_exp sum_exec_runtime
+
+#define INIT_CPUTIME \
+ (struct task_cputime) { \
+ .utime = cputime_zero, \
+ .stime = cputime_zero, \
+ .sum_exec_runtime = 0, \
+ }
+
+/**
+ * struct thread_group_cputimer - thread group interval timer counts
+ * @cputime: thread group interval timers.
+ * @running: non-zero when there are timers running and
+ * @cputime receives updates.
+ * @lock: lock for fields in this struct.
+ *
+ * This structure contains the version of task_cputime, above, that is
+ * used for thread group CPU timer calculations.
+ */
+struct thread_group_cputimer {
+ struct task_cputime cputime;
+ int running;
+ spinlock_t lock;
+};
+
+/*
+ * NOTE! "signal_struct" does not have it's own
+ * locking, because a shared signal_struct always
+ * implies a shared sighand_struct, so locking
+ * sighand_struct is always a proper superset of
+ * the locking of signal_struct.
+ */
+struct signal_struct {
+ atomic_t count;
+ atomic_t live;
+
+ wait_queue_head_t wait_chldexit; /* for wait4() */
+
+ /* current thread group signal load-balancing target: */
+ struct task_struct *curr_target;
+
+ /* shared signal handling: */
+ struct sigpending shared_pending;
+
+ /* thread group exit support */
+ int group_exit_code;
+ /* overloaded:
+ * - notify group_exit_task when ->count is equal to notify_count
+ * - everyone except group_exit_task is stopped during signal delivery
+ * of fatal signals, group_exit_task processes the signal.
+ */
+ int notify_count;
+ struct task_struct *group_exit_task;
+
+ /* thread group stop support, overloads group_exit_code too */
+ int group_stop_count;
+ unsigned int flags; /* see SIGNAL_* flags below */
+
+ /* POSIX.1b Interval Timers */
+ struct list_head posix_timers;
+
+ /* ITIMER_REAL timer for the process */
+ struct hrtimer real_timer;
+ struct pid *leader_pid;
+ ktime_t it_real_incr;
+
+ /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */
+ cputime_t it_prof_expires, it_virt_expires;
+ cputime_t it_prof_incr, it_virt_incr;
+
+ /*
+ * Thread group totals for process CPU timers.
+ * See thread_group_cputimer(), et al, for details.
+ */
+ struct thread_group_cputimer cputimer;
+
+ /* Earliest-expiration cache. */
+ struct task_cputime cputime_expires;
+
+ struct list_head cpu_timers[3];
+
+ /* job control IDs */
+
+ /*
+ * pgrp and session fields are deprecated.
+ * use the task_session_Xnr and task_pgrp_Xnr routines below
+ */
+
+ union {
+ pid_t pgrp __deprecated;
+ pid_t __pgrp;
+ };
+
+ struct pid *tty_old_pgrp;
+
+ union {
+ pid_t session __deprecated;
+ pid_t __session;
+ };
+
+ /* boolean value for session group leader */
+ int leader;
+
+ struct tty_struct *tty; /* NULL if no tty */
+
+ /*
+ * Cumulative resource counters for dead threads in the group,
+ * and for reaped dead child processes forked by this group.
+ * Live threads maintain their own counters and add to these
+ * in __exit_signal, except for the group leader.
+ */
+ cputime_t utime, stime, cutime, cstime;
+ cputime_t gtime;
+ cputime_t cgtime;
+ unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
+ unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
+ unsigned long inblock, oublock, cinblock, coublock;
+ struct task_io_accounting ioac;
+
+ /*
+ * Cumulative ns of schedule CPU time fo dead threads in the
+ * group, not including a zombie group leader, (This only differs
+ * from jiffies_to_ns(utime + stime) if sched_clock uses something
+ * other than jiffies.)
+ */
+ unsigned long long sum_sched_runtime;
+
+ /*
+ * We don't bother to synchronize most readers of this at all,
+ * because there is no reader checking a limit that actually needs
+ * to get both rlim_cur and rlim_max atomically, and either one
+ * alone is a single word that can safely be read normally.
+ * getrlimit/setrlimit use task_lock(current->group_leader) to
+ * protect this instead of the siglock, because they really
+ * have no need to disable irqs.
+ */
+ struct rlimit rlim[RLIM_NLIMITS];
+
+#ifdef CONFIG_BSD_PROCESS_ACCT
+ struct pacct_struct pacct; /* per-process accounting information */
+#endif
+#ifdef CONFIG_TASKSTATS
+ struct taskstats *stats;
+#endif
+#ifdef CONFIG_AUDIT
+ unsigned audit_tty;
+ struct tty_audit_buf *tty_audit_buf;
+#endif
+};
+
+/* Context switch must be unlocked if interrupts are to be enabled */
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+# define __ARCH_WANT_UNLOCKED_CTXSW
+#endif
+
+/*
+ * Bits in flags field of signal_struct.
+ */
+#define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
+#define SIGNAL_STOP_DEQUEUED 0x00000002 /* stop signal dequeued */
+#define SIGNAL_STOP_CONTINUED 0x00000004 /* SIGCONT since WCONTINUED reap */
+#define SIGNAL_GROUP_EXIT 0x00000008 /* group exit in progress */
+/*
+ * Pending notifications to parent.
+ */
+#define SIGNAL_CLD_STOPPED 0x00000010
+#define SIGNAL_CLD_CONTINUED 0x00000020
+#define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
+
+#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
+
+/* If true, all threads except ->group_exit_task have pending SIGKILL */
+static inline int signal_group_exit(const struct signal_struct *sig)
+{
+ return (sig->flags & SIGNAL_GROUP_EXIT) ||
+ (sig->group_exit_task != NULL);
+}
+
+/*
+ * Some day this will be a full-fledged user tracking system..
+ */
+struct user_struct {
+ atomic_t __count; /* reference count */
+ atomic_t processes; /* How many processes does this user have? */
+ atomic_t files; /* How many open files does this user have? */
+ atomic_t sigpending; /* How many pending signals does this user have? */
+#ifdef CONFIG_INOTIFY_USER
+ atomic_t inotify_watches; /* How many inotify watches does this user have? */
+ atomic_t inotify_devs; /* How many inotify devs does this user have opened? */
+#endif
+#ifdef CONFIG_EPOLL
+ atomic_t epoll_watches; /* The number of file descriptors currently watched */
+#endif
+#ifdef CONFIG_POSIX_MQUEUE
+ /* protected by mq_lock */
+ unsigned long mq_bytes; /* How many bytes can be allocated to mqueue? */
+#endif
+ unsigned long locked_shm; /* How many pages of mlocked shm ? */
+
+#ifdef CONFIG_KEYS
+ struct key *uid_keyring; /* UID specific keyring */
+ struct key *session_keyring; /* UID's default session keyring */
+#endif
+
+ /* Hash table maintenance information */
+ struct hlist_node uidhash_node;
+ uid_t uid;
+ struct user_namespace *user_ns;
+
+#ifdef CONFIG_USER_SCHED
+ struct task_group *tg;
+#ifdef CONFIG_SYSFS
+ struct kobject kobj;
+ struct work_struct work;
+#endif
+#endif
+};
+
+extern int uids_sysfs_init(void);
+
+extern struct user_struct *find_user(uid_t);
+
+extern struct user_struct root_user;
+#define INIT_USER (&root_user)
+
+
+struct backing_dev_info;
+struct reclaim_state;
+
+#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
+struct sched_info {
+ /* cumulative counters */
+ unsigned long pcount; /* # of times run on this cpu */
+ unsigned long long run_delay; /* time spent waiting on a runqueue */
+
+ /* timestamps */
+ unsigned long long last_arrival,/* when we last ran on a cpu */
+ last_queued; /* when we were last queued to run */
+#ifdef CONFIG_SCHEDSTATS
+ /* BKL stats */
+ unsigned int bkl_count;
+#endif
+};
+#endif /* defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) */
+
+#ifdef CONFIG_TASK_DELAY_ACCT
+struct task_delay_info {
+ spinlock_t lock;
+ unsigned int flags; /* Private per-task flags */
+
+ /* For each stat XXX, add following, aligned appropriately
+ *
+ * struct timespec XXX_start, XXX_end;
+ * u64 XXX_delay;
+ * u32 XXX_count;
+ *
+ * Atomicity of updates to XXX_delay, XXX_count protected by
+ * single lock above (split into XXX_lock if contention is an issue).
+ */
+
+ /*
+ * XXX_count is incremented on every XXX operation, the delay
+ * associated with the operation is added to XXX_delay.
+ * XXX_delay contains the accumulated delay time in nanoseconds.
+ */
+ struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */
+ u64 blkio_delay; /* wait for sync block io completion */
+ u64 swapin_delay; /* wait for swapin block io completion */
+ u32 blkio_count; /* total count of the number of sync block */
+ /* io operations performed */
+ u32 swapin_count; /* total count of the number of swapin block */
+ /* io operations performed */
+
+ struct timespec freepages_start, freepages_end;
+ u64 freepages_delay; /* wait for memory reclaim */
+ u32 freepages_count; /* total count of memory reclaim */
+};
+#endif /* CONFIG_TASK_DELAY_ACCT */
+
+static inline int sched_info_on(void)
+{
+#ifdef CONFIG_SCHEDSTATS
+ return 1;
+#elif defined(CONFIG_TASK_DELAY_ACCT)
+ extern int delayacct_on;
+ return delayacct_on;
+#else
+ return 0;
+#endif
+}
+
+enum cpu_idle_type {
+ CPU_IDLE,
+ CPU_NOT_IDLE,
+ CPU_NEWLY_IDLE,
+ CPU_MAX_IDLE_TYPES
+};
+
+/*
+ * sched-domains (multiprocessor balancing) declarations:
+ */
+
+/*
+ * Increase resolution of nice-level calculations:
+ */
+#define SCHED_LOAD_SHIFT 10
+#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT)
+
+#define SCHED_LOAD_SCALE_FUZZ SCHED_LOAD_SCALE
+
+#ifdef CONFIG_SMP
+#define SD_LOAD_BALANCE 1 /* Do load balancing on this domain. */
+#define SD_BALANCE_NEWIDLE 2 /* Balance when about to become idle */
+#define SD_BALANCE_EXEC 4 /* Balance on exec */
+#define SD_BALANCE_FORK 8 /* Balance on fork, clone */
+#define SD_WAKE_IDLE 16 /* Wake to idle CPU on task wakeup */
+#define SD_WAKE_AFFINE 32 /* Wake task to waking CPU */
+#define SD_WAKE_BALANCE 64 /* Perform balancing at task wakeup */
+#define SD_SHARE_CPUPOWER 128 /* Domain members share cpu power */
+#define SD_POWERSAVINGS_BALANCE 256 /* Balance for power savings */
+#define SD_SHARE_PKG_RESOURCES 512 /* Domain members share cpu pkg resources */
+#define SD_SERIALIZE 1024 /* Only a single load balancing instance */
+#define SD_WAKE_IDLE_FAR 2048 /* Gain latency sacrificing cache hit */
+
+enum powersavings_balance_level {
+ POWERSAVINGS_BALANCE_NONE = 0, /* No power saving load balance */
+ POWERSAVINGS_BALANCE_BASIC, /* Fill one thread/core/package
+ * first for long running threads
+ */
+ POWERSAVINGS_BALANCE_WAKEUP, /* Also bias task wakeups to semi-idle
+ * cpu package for power savings
+ */
+ MAX_POWERSAVINGS_BALANCE_LEVELS
+};
+
+extern int sched_mc_power_savings, sched_smt_power_savings;
+
+static inline int sd_balance_for_mc_power(void)
+{
+ if (sched_smt_power_savings)
+ return SD_POWERSAVINGS_BALANCE;
+
+ return 0;
+}
+
+static inline int sd_balance_for_package_power(void)
+{
+ if (sched_mc_power_savings | sched_smt_power_savings)
+ return SD_POWERSAVINGS_BALANCE;
+
+ return 0;
+}
+
+/*
+ * Optimise SD flags for power savings:
+ * SD_BALANCE_NEWIDLE helps agressive task consolidation and power savings.
+ * Keep default SD flags if sched_{smt,mc}_power_saving=0
+ */
+
+static inline int sd_power_saving_flags(void)
+{
+ if (sched_mc_power_savings | sched_smt_power_savings)
+ return SD_BALANCE_NEWIDLE;
+
+ return 0;
+}
+
+struct sched_group {
+ struct sched_group *next; /* Must be a circular list */
+
+ /*
+ * CPU power of this group, SCHED_LOAD_SCALE being max power for a
+ * single CPU. This is read only (except for setup, hotplug CPU).
+ * Note : Never change cpu_power without recompute its reciprocal
+ */
+ unsigned int __cpu_power;
+ /*
+ * reciprocal value of cpu_power to avoid expensive divides
+ * (see include/linux/reciprocal_div.h)
+ */
+ u32 reciprocal_cpu_power;
+
+ unsigned long cpumask[];
+};
+
+static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
+{
+ return to_cpumask(sg->cpumask);
+}
+
+enum sched_domain_level {
+ SD_LV_NONE = 0,
+ SD_LV_SIBLING,
+ SD_LV_MC,
+ SD_LV_CPU,
+ SD_LV_NODE,
+ SD_LV_ALLNODES,
+ SD_LV_MAX
+};
+
+struct sched_domain_attr {
+ int relax_domain_level;
+};
+
+#define SD_ATTR_INIT (struct sched_domain_attr) { \
+ .relax_domain_level = -1, \
+}
+
+struct sched_domain {
+ /* These fields must be setup */
+ struct sched_domain *parent; /* top domain must be null terminated */
+ struct sched_domain *child; /* bottom domain must be null terminated */
+ struct sched_group *groups; /* the balancing groups of the domain */
+ unsigned long min_interval; /* Minimum balance interval ms */
+ unsigned long max_interval; /* Maximum balance interval ms */
+ unsigned int busy_factor; /* less balancing by factor if busy */
+ unsigned int imbalance_pct; /* No balance until over watermark */
+ unsigned int cache_nice_tries; /* Leave cache hot tasks for # tries */
+ unsigned int busy_idx;
+ unsigned int idle_idx;
+ unsigned int newidle_idx;
+ unsigned int wake_idx;
+ unsigned int forkexec_idx;
+ int flags; /* See SD_* */
+ enum sched_domain_level level;
+
+ /* Runtime fields. */
+ unsigned long last_balance; /* init to jiffies. units in jiffies */
+ unsigned int balance_interval; /* initialise to 1. units in ms. */
+ unsigned int nr_balance_failed; /* initialise to 0 */
+
+ u64 last_update;
+
+#ifdef CONFIG_SCHEDSTATS
+ /* load_balance() stats */
+ unsigned int lb_count[CPU_MAX_IDLE_TYPES];
+ unsigned int lb_failed[CPU_MAX_IDLE_TYPES];
+ unsigned int lb_balanced[CPU_MAX_IDLE_TYPES];
+ unsigned int lb_imbalance[CPU_MAX_IDLE_TYPES];
+ unsigned int lb_gained[CPU_MAX_IDLE_TYPES];
+ unsigned int lb_hot_gained[CPU_MAX_IDLE_TYPES];
+ unsigned int lb_nobusyg[CPU_MAX_IDLE_TYPES];
+ unsigned int lb_nobusyq[CPU_MAX_IDLE_TYPES];
+
+ /* Active load balancing */
+ unsigned int alb_count;
+ unsigned int alb_failed;
+ unsigned int alb_pushed;
+
+ /* SD_BALANCE_EXEC stats */
+ unsigned int sbe_count;
+ unsigned int sbe_balanced;
+ unsigned int sbe_pushed;
+
+ /* SD_BALANCE_FORK stats */
+ unsigned int sbf_count;
+ unsigned int sbf_balanced;
+ unsigned int sbf_pushed;
+
+ /* try_to_wake_up() stats */
+ unsigned int ttwu_wake_remote;
+ unsigned int ttwu_move_affine;
+ unsigned int ttwu_move_balance;
+#endif
+#ifdef CONFIG_SCHED_DEBUG
+ char *name;
+#endif
+
+ /* span of all CPUs in this domain */
+ unsigned long span[];
+};
+
+static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
+{
+ return to_cpumask(sd->span);
+}
+
+extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
+ struct sched_domain_attr *dattr_new);
+
+/* Test a flag in parent sched domain */
+static inline int test_sd_parent(struct sched_domain *sd, int flag)
+{
+ if (sd->parent && (sd->parent->flags & flag))
+ return 1;
+
+ return 0;
+}
+
+#else /* CONFIG_SMP */
+
+struct sched_domain_attr;
+
+static inline void
+partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
+ struct sched_domain_attr *dattr_new)
+{
+}
+#endif /* !CONFIG_SMP */
+
+struct io_context; /* See blkdev.h */
+
+
+#ifdef ARCH_HAS_PREFETCH_SWITCH_STACK
+extern void prefetch_stack(struct task_struct *t);
+#else
+static inline void prefetch_stack(struct task_struct *t) { }
+#endif
+
+struct audit_context; /* See audit.c */
+struct mempolicy;
+struct pipe_inode_info;
+struct uts_namespace;
+
+struct rq;
+struct sched_domain;
+
+struct sched_class {
+ const struct sched_class *next;
+
+ void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup);
+ void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep);
+ void (*yield_task) (struct rq *rq);
+
+ void (*check_preempt_curr) (struct rq *rq, struct task_struct *p, int sync);
+
+ struct task_struct * (*pick_next_task) (struct rq *rq);
+ void (*put_prev_task) (struct rq *rq, struct task_struct *p);
+
+#ifdef CONFIG_SMP
+ int (*select_task_rq)(struct task_struct *p, int sync);
+
+ unsigned long (*load_balance) (struct rq *this_rq, int this_cpu,
+ struct rq *busiest, unsigned long max_load_move,
+ struct sched_domain *sd, enum cpu_idle_type idle,
+ int *all_pinned, int *this_best_prio);
+
+ int (*move_one_task) (struct rq *this_rq, int this_cpu,
+ struct rq *busiest, struct sched_domain *sd,
+ enum cpu_idle_type idle);
+ void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
+ void (*post_schedule) (struct rq *this_rq);
+ void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
+
+ void (*set_cpus_allowed)(struct task_struct *p,
+ const struct cpumask *newmask);
+
+ void (*rq_online)(struct rq *rq);
+ void (*rq_offline)(struct rq *rq);
+#endif
+
+ void (*set_curr_task) (struct rq *rq);
+ void (*task_tick) (struct rq *rq, struct task_struct *p, int queued);
+ void (*task_new) (struct rq *rq, struct task_struct *p);
+
+ void (*switched_from) (struct rq *this_rq, struct task_struct *task,
+ int running);
+ void (*switched_to) (struct rq *this_rq, struct task_struct *task,
+ int running);
+ void (*prio_changed) (struct rq *this_rq, struct task_struct *task,
+ int oldprio, int running);
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ void (*moved_group) (struct task_struct *p);
+#endif
+};
+
+struct load_weight {
+ unsigned long weight, inv_weight;
+};
+
+/*
+ * CFS stats for a schedulable entity (task, task-group etc)
+ *
+ * Current field usage histogram:
+ *
+ * 4 se->block_start
+ * 4 se->run_node
+ * 4 se->sleep_start
+ * 6 se->load.weight
+ */
+struct sched_entity {
+ struct load_weight load; /* for load-balancing */
+ struct rb_node run_node;
+ struct list_head group_node;
+ unsigned int on_rq;
+
+ u64 exec_start;
+ u64 sum_exec_runtime;
+ u64 vruntime;
+ u64 prev_sum_exec_runtime;
+
+ u64 last_wakeup;
+ u64 avg_overlap;
+
+#ifdef CONFIG_SCHEDSTATS
+ u64 wait_start;
+ u64 wait_max;
+ u64 wait_count;
+ u64 wait_sum;
+
+ u64 sleep_start;
+ u64 sleep_max;
+ s64 sum_sleep_runtime;
+
+ u64 block_start;
+ u64 block_max;
+ u64 exec_max;
+ u64 slice_max;
+
+ u64 nr_migrations;
+ u64 nr_migrations_cold;
+ u64 nr_failed_migrations_affine;
+ u64 nr_failed_migrations_running;
+ u64 nr_failed_migrations_hot;
+ u64 nr_forced_migrations;
+ u64 nr_forced2_migrations;
+
+ u64 nr_wakeups;
+ u64 nr_wakeups_sync;
+ u64 nr_wakeups_migrate;
+ u64 nr_wakeups_local;
+ u64 nr_wakeups_remote;
+ u64 nr_wakeups_affine;
+ u64 nr_wakeups_affine_attempts;
+ u64 nr_wakeups_passive;
+ u64 nr_wakeups_idle;
+#endif
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+ struct sched_entity *parent;
+ /* rq on which this entity is (to be) queued: */
+ struct cfs_rq *cfs_rq;
+ /* rq "owned" by this entity/group: */
+ struct cfs_rq *my_q;
+#endif
+};
+
+struct sched_rt_entity {
+ struct list_head run_list;
+ unsigned long timeout;
+ unsigned int time_slice;
+ int nr_cpus_allowed;
+
+ struct sched_rt_entity *back;
+#ifdef CONFIG_RT_GROUP_SCHED
+ struct sched_rt_entity *parent;
+ /* rq on which this entity is (to be) queued: */
+ struct rt_rq *rt_rq;
+ /* rq "owned" by this entity/group: */
+ struct rt_rq *my_q;
+#endif
+};
+
+struct task_struct {
+ volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */
+ void *stack;
+ atomic_t usage;
+ unsigned int flags; /* per process flags, defined below */
+ unsigned int ptrace;
+
+ int lock_depth; /* BKL lock depth */
+
+#ifdef CONFIG_SMP
+#ifdef __ARCH_WANT_UNLOCKED_CTXSW
+ int oncpu;
+#endif
+#endif
+
+ int prio, static_prio, normal_prio;
+ unsigned int rt_priority;
+ const struct sched_class *sched_class;
+ struct sched_entity se;
+ struct sched_rt_entity rt;
+
+#ifdef CONFIG_PREEMPT_NOTIFIERS
+ /* list of struct preempt_notifier: */
+ struct hlist_head preempt_notifiers;
+#endif
+
+ /*
+ * fpu_counter contains the number of consecutive context switches
+ * that the FPU is used. If this is over a threshold, the lazy fpu
+ * saving becomes unlazy to save the trap. This is an unsigned char
+ * so that after 256 times the counter wraps and the behavior turns
+ * lazy again; this to deal with bursty apps that only use FPU for
+ * a short time
+ */
+ unsigned char fpu_counter;
+ s8 oomkilladj; /* OOM kill score adjustment (bit shift). */
+#ifdef CONFIG_BLK_DEV_IO_TRACE
+ unsigned int btrace_seq;
+#endif
+
+ unsigned int policy;
+ cpumask_t cpus_allowed;
+
+#ifdef CONFIG_PREEMPT_RCU
+ int rcu_read_lock_nesting;
+ int rcu_flipctr_idx;
+#endif /* #ifdef CONFIG_PREEMPT_RCU */
+
+#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT)
+ struct sched_info sched_info;
+#endif
+
+ struct list_head tasks;
+
+ struct mm_struct *mm, *active_mm;
+
+/* task state */
+ struct linux_binfmt *binfmt;
+ int exit_state;
+ int exit_code, exit_signal;
+ int pdeath_signal; /* The signal sent when the parent dies */
+ /* ??? */
+ unsigned int personality;
+ unsigned did_exec:1;
+ pid_t pid;
+ pid_t tgid;
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+ /* Canary value for the -fstack-protector gcc feature */
+ unsigned long stack_canary;
+#endif
+ /*
+ * pointers to (original) parent process, youngest child, younger sibling,
+ * older sibling, respectively. (p->father can be replaced with
+ * p->real_parent->pid)
+ */
+ struct task_struct *real_parent; /* real parent process */
+ struct task_struct *parent; /* recipient of SIGCHLD, wait4() reports */
+ /*
+ * children/sibling forms the list of my natural children
+ */
+ struct list_head children; /* list of my children */
+ struct list_head sibling; /* linkage in my parent's children list */
+ struct task_struct *group_leader; /* threadgroup leader */
+
+ /*
+ * ptraced is the list of tasks this task is using ptrace on.
+ * This includes both natural children and PTRACE_ATTACH targets.
+ * p->ptrace_entry is p's link on the p->parent->ptraced list.
+ */
+ struct list_head ptraced;
+ struct list_head ptrace_entry;
+
+#ifdef CONFIG_X86_PTRACE_BTS
+ /*
+ * This is the tracer handle for the ptrace BTS extension.
+ * This field actually belongs to the ptracer task.
+ */
+ struct bts_tracer *bts;
+ /*
+ * The buffer to hold the BTS data.
+ */
+ void *bts_buffer;
+ size_t bts_size;
+#endif /* CONFIG_X86_PTRACE_BTS */
+
+ /* PID/PID hash table linkage. */
+ struct pid_link pids[PIDTYPE_MAX];
+ struct list_head thread_group;
+
+ struct completion *vfork_done; /* for vfork() */
+ int __user *set_child_tid; /* CLONE_CHILD_SETTID */
+ int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
+
+ cputime_t utime, stime, utimescaled, stimescaled;
+ cputime_t gtime;
+ cputime_t prev_utime, prev_stime;
+ unsigned long nvcsw, nivcsw; /* context switch counts */
+ struct timespec start_time; /* monotonic time */
+ struct timespec real_start_time; /* boot based time */
+/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
+ unsigned long min_flt, maj_flt;
+
+ struct task_cputime cputime_expires;
+ struct list_head cpu_timers[3];
+
+/* process credentials */
+ const struct cred *real_cred; /* objective and real subjective task
+ * credentials (COW) */
+ const struct cred *cred; /* effective (overridable) subjective task
+ * credentials (COW) */
+ struct mutex cred_exec_mutex; /* execve vs ptrace cred calculation mutex */
+
+ char comm[TASK_COMM_LEN]; /* executable name excluding path
+ - access with [gs]et_task_comm (which lock
+ it with task_lock())
+ - initialized normally by flush_old_exec */
+/* file system info */
+ int link_count, total_link_count;
+#ifdef CONFIG_SYSVIPC
+/* ipc stuff */
+ struct sysv_sem sysvsem;
+#endif
+#ifdef CONFIG_DETECT_SOFTLOCKUP
+/* hung task detection */
+ unsigned long last_switch_timestamp;
+ unsigned long last_switch_count;
+#endif
+/* CPU-specific state of this task */
+ struct thread_struct thread;
+/* filesystem information */
+ struct fs_struct *fs;
+/* open file information */
+ struct files_struct *files;
+/* namespaces */
+ struct nsproxy *nsproxy;
+/* signal handlers */
+ struct signal_struct *signal;
+ struct sighand_struct *sighand;
+
+ sigset_t blocked, real_blocked;
+ sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */
+ struct sigpending pending;
+
+ unsigned long sas_ss_sp;
+ size_t sas_ss_size;
+ int (*notifier)(void *priv);
+ void *notifier_data;
+ sigset_t *notifier_mask;
+ struct audit_context *audit_context;
+#ifdef CONFIG_AUDITSYSCALL
+ uid_t loginuid;
+ unsigned int sessionid;
+#endif
+ seccomp_t seccomp;
+
+/* Thread group tracking */
+ u32 parent_exec_id;
+ u32 self_exec_id;
+/* Protection of (de-)allocation: mm, files, fs, tty, keyrings */
+ spinlock_t alloc_lock;
+
+ /* Protection of the PI data structures: */
+ spinlock_t pi_lock;
+
+#ifdef CONFIG_RT_MUTEXES
+ /* PI waiters blocked on a rt_mutex held by this task */
+ struct plist_head pi_waiters;
+ /* Deadlock detection and priority inheritance handling */
+ struct rt_mutex_waiter *pi_blocked_on;
+#endif
+
+#ifdef CONFIG_DEBUG_MUTEXES
+ /* mutex deadlock detection */
+ struct mutex_waiter *blocked_on;
+#endif
+#ifdef CONFIG_TRACE_IRQFLAGS
+ unsigned int irq_events;
+ int hardirqs_enabled;
+ unsigned long hardirq_enable_ip;
+ unsigned int hardirq_enable_event;
+ unsigned long hardirq_disable_ip;
+ unsigned int hardirq_disable_event;
+ int softirqs_enabled;
+ unsigned long softirq_disable_ip;
+ unsigned int softirq_disable_event;
+ unsigned long softirq_enable_ip;
+ unsigned int softirq_enable_event;
+ int hardirq_context;
+ int softirq_context;
+#endif
+#ifdef CONFIG_LOCKDEP
+# define MAX_LOCK_DEPTH 48UL
+ u64 curr_chain_key;
+ int lockdep_depth;
+ unsigned int lockdep_recursion;
+ struct held_lock held_locks[MAX_LOCK_DEPTH];
+#endif
+
+/* journalling filesystem info */
+ void *journal_info;
+
+/* stacked block device info */
+ struct bio *bio_list, **bio_tail;
+
+/* VM state */
+ struct reclaim_state *reclaim_state;
+
+ struct backing_dev_info *backing_dev_info;
+
+ struct io_context *io_context;
+
+ unsigned long ptrace_message;
+ siginfo_t *last_siginfo; /* For ptrace use. */
+ struct task_io_accounting ioac;
+#if defined(CONFIG_TASK_XACCT)
+ u64 acct_rss_mem1; /* accumulated rss usage */
+ u64 acct_vm_mem1; /* accumulated virtual memory usage */
+ cputime_t acct_timexpd; /* stime + utime since last update */
+#endif
+#ifdef CONFIG_CPUSETS
+ nodemask_t mems_allowed;
+ int cpuset_mems_generation;
+ int cpuset_mem_spread_rotor;
+#endif
+#ifdef CONFIG_CGROUPS
+ /* Control Group info protected by css_set_lock */
+ struct css_set *cgroups;
+ /* cg_list protected by css_set_lock and tsk->alloc_lock */
+ struct list_head cg_list;
+#endif
+#ifdef CONFIG_FUTEX
+ struct robust_list_head __user *robust_list;
+#ifdef CONFIG_COMPAT
+ struct compat_robust_list_head __user *compat_robust_list;
+#endif
+ struct list_head pi_state_list;
+ struct futex_pi_state *pi_state_cache;
+#endif
+#ifdef CONFIG_NUMA
+ struct mempolicy *mempolicy;
+ short il_next;
+#endif
+ atomic_t fs_excl; /* holding fs exclusive resources */
+ struct rcu_head rcu;
+
+ /*
+ * cache last used pipe for splice
+ */
+ struct pipe_inode_info *splice_pipe;
+#ifdef CONFIG_TASK_DELAY_ACCT
+ struct task_delay_info *delays;
+#endif
+#ifdef CONFIG_FAULT_INJECTION
+ int make_it_fail;
+#endif
+ struct prop_local_single dirties;
+#ifdef CONFIG_LATENCYTOP
+ int latency_record_count;
+ struct latency_record latency_record[LT_SAVECOUNT];
+#endif
+ /*
+ * time slack values; these are used to round up poll() and
+ * select() etc timeout values. These are in nanoseconds.
+ */
+ unsigned long timer_slack_ns;
+ unsigned long default_timer_slack_ns;
+
+ struct list_head *scm_work_list;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* Index of current stored adress in ret_stack */
+ int curr_ret_stack;
+ /* Stack of return addresses for return function tracing */
+ struct ftrace_ret_stack *ret_stack;
+ /*
+ * Number of functions that haven't been traced
+ * because of depth overrun.
+ */
+ atomic_t trace_overrun;
+ /* Pause for the tracing */
+ atomic_t tracing_graph_pause;
+#endif
+#ifdef CONFIG_TRACING
+ /* state flags for use by tracers */
+ unsigned long trace;
+#endif
+};
+
+/* Future-safe accessor for struct task_struct's cpus_allowed. */
+#define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
+
+/*
+ * Priority of a process goes from 0..MAX_PRIO-1, valid RT
+ * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH
+ * tasks are in the range MAX_RT_PRIO..MAX_PRIO-1. Priority
+ * values are inverted: lower p->prio value means higher priority.
+ *
+ * The MAX_USER_RT_PRIO value allows the actual maximum
+ * RT priority to be separate from the value exported to
+ * user-space. This allows kernel threads to set their
+ * priority to a value higher than any user task. Note:
+ * MAX_RT_PRIO must not be smaller than MAX_USER_RT_PRIO.
+ */
+
+#define MAX_USER_RT_PRIO 100
+#define MAX_RT_PRIO MAX_USER_RT_PRIO
+
+#define MAX_PRIO (MAX_RT_PRIO + 40)
+#define DEFAULT_PRIO (MAX_RT_PRIO + 20)
+
+static inline int rt_prio(int prio)
+{
+ if (unlikely(prio < MAX_RT_PRIO))
+ return 1;
+ return 0;
+}
+
+static inline int rt_task(struct task_struct *p)
+{
+ return rt_prio(p->prio);
+}
+
+static inline void set_task_session(struct task_struct *tsk, pid_t session)
+{
+ tsk->signal->__session = session;
+}
+
+static inline void set_task_pgrp(struct task_struct *tsk, pid_t pgrp)
+{
+ tsk->signal->__pgrp = pgrp;
+}
+
+static inline struct pid *task_pid(struct task_struct *task)
+{
+ return task->pids[PIDTYPE_PID].pid;
+}
+
+static inline struct pid *task_tgid(struct task_struct *task)
+{
+ return task->group_leader->pids[PIDTYPE_PID].pid;
+}
+
+static inline struct pid *task_pgrp(struct task_struct *task)
+{
+ return task->group_leader->pids[PIDTYPE_PGID].pid;
+}
+
+static inline struct pid *task_session(struct task_struct *task)
+{
+ return task->group_leader->pids[PIDTYPE_SID].pid;
+}
+
+struct pid_namespace;
+
+/*
+ * the helpers to get the task's different pids as they are seen
+ * from various namespaces
+ *
+ * task_xid_nr() : global id, i.e. the id seen from the init namespace;
+ * task_xid_vnr() : virtual id, i.e. the id seen from the pid namespace of
+ * current.
+ * task_xid_nr_ns() : id seen from the ns specified;
+ *
+ * set_task_vxid() : assigns a virtual id to a task;
+ *
+ * see also pid_nr() etc in include/linux/pid.h
+ */
+
+static inline pid_t task_pid_nr(struct task_struct *tsk)
+{
+ return tsk->pid;
+}
+
+pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
+
+static inline pid_t task_pid_vnr(struct task_struct *tsk)
+{
+ return pid_vnr(task_pid(tsk));
+}
+
+
+static inline pid_t task_tgid_nr(struct task_struct *tsk)
+{
+ return tsk->tgid;
+}
+
+pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
+
+static inline pid_t task_tgid_vnr(struct task_struct *tsk)
+{
+ return pid_vnr(task_tgid(tsk));
+}
+
+
+static inline pid_t task_pgrp_nr(struct task_struct *tsk)
+{
+ return tsk->signal->__pgrp;
+}
+
+pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
+
+static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
+{
+ return pid_vnr(task_pgrp(tsk));
+}
+
+
+static inline pid_t task_session_nr(struct task_struct *tsk)
+{
+ return tsk->signal->__session;
+}
+
+pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns);
+
+static inline pid_t task_session_vnr(struct task_struct *tsk)
+{
+ return pid_vnr(task_session(tsk));
+}
+
+
+/**
+ * pid_alive - check that a task structure is not stale
+ * @p: Task structure to be checked.
+ *
+ * Test if a process is not yet dead (at most zombie state)
+ * If pid_alive fails, then pointers within the task structure
+ * can be stale and must not be dereferenced.
+ */
+static inline int pid_alive(struct task_struct *p)
+{
+ return p->pids[PIDTYPE_PID].pid != NULL;
+}
+
+/**
+ * is_global_init - check if a task structure is init
+ * @tsk: Task structure to be checked.
+ *
+ * Check if a task structure is the first user space task the kernel created.
+ */
+static inline int is_global_init(struct task_struct *tsk)
+{
+ return tsk->pid == 1;
+}
+
+/*
+ * is_container_init:
+ * check whether in the task is init in its own pid namespace.
+ */
+extern int is_container_init(struct task_struct *tsk);
+
+extern struct pid *cad_pid;
+
+extern void free_task(struct task_struct *tsk);
+#define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
+
+#ifndef DDE_LINUX
+extern void __put_task_struct(struct task_struct *t);
+#else
+/* DDE_LINUX does not directly manage task structs, so we do not
+ * need a function to destroy them.
+ */
+static inline void __put_task_struct(struct task_struct *t) {}
+#endif /* DDE_LINUX */
+
+static inline void put_task_struct(struct task_struct *t)
+{
+ if (atomic_dec_and_test(&t->usage))
+ __put_task_struct(t);
+}
+
+extern cputime_t task_utime(struct task_struct *p);
+extern cputime_t task_stime(struct task_struct *p);
+extern cputime_t task_gtime(struct task_struct *p);
+
+/*
+ * Per process flags
+ */
+#define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */
+ /* Not implemented yet, only for 486*/
+#define PF_STARTING 0x00000002 /* being created */
+#define PF_EXITING 0x00000004 /* getting shut down */
+#define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */
+#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
+#define PF_FORKNOEXEC 0x00000040 /* forked but didn't exec */
+#define PF_SUPERPRIV 0x00000100 /* used super-user privileges */
+#define PF_DUMPCORE 0x00000200 /* dumped core */
+#define PF_SIGNALED 0x00000400 /* killed by a signal */
+#define PF_MEMALLOC 0x00000800 /* Allocating memory */
+#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
+#define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
+#define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
+#define PF_FROZEN 0x00010000 /* frozen for system suspend */
+#define PF_FSTRANS 0x00020000 /* inside a filesystem transaction */
+#define PF_KSWAPD 0x00040000 /* I am kswapd */
+#define PF_SWAPOFF 0x00080000 /* I am in swapoff */
+#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
+#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
+#define PF_RANDOMIZE 0x00400000 /* randomize virtual address space */
+#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
+#define PF_SPREAD_PAGE 0x01000000 /* Spread page cache over cpuset */
+#define PF_SPREAD_SLAB 0x02000000 /* Spread some slab caches over cpuset */
+#define PF_THREAD_BOUND 0x04000000 /* Thread bound to specific cpu */
+#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
+#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
+#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */
+#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
+
+/*
+ * Only the _current_ task can read/write to tsk->flags, but other
+ * tasks can access tsk->flags in readonly mode for example
+ * with tsk_used_math (like during threaded core dumping).
+ * There is however an exception to this rule during ptrace
+ * or during fork: the ptracer task is allowed to write to the
+ * child->flags of its traced child (same goes for fork, the parent
+ * can write to the child->flags), because we're guaranteed the
+ * child is not running and in turn not changing child->flags
+ * at the same time the parent does it.
+ */
+#define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
+#define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
+#define clear_used_math() clear_stopped_child_used_math(current)
+#define set_used_math() set_stopped_child_used_math(current)
+#define conditional_stopped_child_used_math(condition, child) \
+ do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
+#define conditional_used_math(condition) \
+ conditional_stopped_child_used_math(condition, current)
+#define copy_to_stopped_child_used_math(child) \
+ do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
+/* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
+#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
+#define used_math() tsk_used_math(current)
+
+#ifdef CONFIG_SMP
+extern int set_cpus_allowed_ptr(struct task_struct *p,
+ const struct cpumask *new_mask);
+#else
+static inline int set_cpus_allowed_ptr(struct task_struct *p,
+ const struct cpumask *new_mask)
+{
+ if (!cpumask_test_cpu(0, new_mask))
+ return -EINVAL;
+ return 0;
+}
+#endif
+static inline int set_cpus_allowed(struct task_struct *p, cpumask_t new_mask)
+{
+ return set_cpus_allowed_ptr(p, &new_mask);
+}
+
+extern unsigned long long sched_clock(void);
+
+extern void sched_clock_init(void);
+extern u64 sched_clock_cpu(int cpu);
+
+#ifndef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+static inline void sched_clock_tick(void)
+{
+}
+
+static inline void sched_clock_idle_sleep_event(void)
+{
+}
+
+static inline void sched_clock_idle_wakeup_event(u64 delta_ns)
+{
+}
+#else
+extern void sched_clock_tick(void);
+extern void sched_clock_idle_sleep_event(void);
+extern void sched_clock_idle_wakeup_event(u64 delta_ns);
+#endif
+
+/*
+ * For kernel-internal use: high-speed (but slightly incorrect) per-cpu
+ * clock constructed from sched_clock():
+ */
+extern unsigned long long cpu_clock(int cpu);
+
+extern unsigned long long
+task_sched_runtime(struct task_struct *task);
+extern unsigned long long thread_group_sched_runtime(struct task_struct *task);
+
+/* sched_exec is called by processes performing an exec */
+#ifdef CONFIG_SMP
+extern void sched_exec(void);
+#else
+#define sched_exec() {}
+#endif
+
+extern void sched_clock_idle_sleep_event(void);
+extern void sched_clock_idle_wakeup_event(u64 delta_ns);
+
+#ifdef CONFIG_HOTPLUG_CPU
+extern void idle_task_exit(void);
+#else
+static inline void idle_task_exit(void) {}
+#endif
+
+extern void sched_idle_next(void);
+
+#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
+extern void wake_up_idle_cpu(int cpu);
+#else
+static inline void wake_up_idle_cpu(int cpu) { }
+#endif
+
+extern unsigned int sysctl_sched_latency;
+extern unsigned int sysctl_sched_min_granularity;
+extern unsigned int sysctl_sched_wakeup_granularity;
+extern unsigned int sysctl_sched_shares_ratelimit;
+extern unsigned int sysctl_sched_shares_thresh;
+#ifdef CONFIG_SCHED_DEBUG
+extern unsigned int sysctl_sched_child_runs_first;
+extern unsigned int sysctl_sched_features;
+extern unsigned int sysctl_sched_migration_cost;
+extern unsigned int sysctl_sched_nr_migrate;
+
+int sched_nr_latency_handler(struct ctl_table *table, int write,
+ struct file *file, void __user *buffer, size_t *length,
+ loff_t *ppos);
+#endif
+extern unsigned int sysctl_sched_rt_period;
+extern int sysctl_sched_rt_runtime;
+
+int sched_rt_handler(struct ctl_table *table, int write,
+ struct file *filp, void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+
+extern unsigned int sysctl_sched_compat_yield;
+
+#ifdef CONFIG_RT_MUTEXES
+extern int rt_mutex_getprio(struct task_struct *p);
+extern void rt_mutex_setprio(struct task_struct *p, int prio);
+extern void rt_mutex_adjust_pi(struct task_struct *p);
+#else
+static inline int rt_mutex_getprio(struct task_struct *p)
+{
+ return p->normal_prio;
+}
+# define rt_mutex_adjust_pi(p) do { } while (0)
+#endif
+
+extern void set_user_nice(struct task_struct *p, long nice);
+extern int task_prio(const struct task_struct *p);
+extern int task_nice(const struct task_struct *p);
+extern int can_nice(const struct task_struct *p, const int nice);
+extern int task_curr(const struct task_struct *p);
+extern int idle_cpu(int cpu);
+extern int sched_setscheduler(struct task_struct *, int, struct sched_param *);
+extern int sched_setscheduler_nocheck(struct task_struct *, int,
+ struct sched_param *);
+extern struct task_struct *idle_task(int cpu);
+extern struct task_struct *curr_task(int cpu);
+extern void set_curr_task(int cpu, struct task_struct *p);
+
+void yield(void);
+
+/*
+ * The default (Linux) execution domain.
+ */
+extern struct exec_domain default_exec_domain;
+
+union thread_union {
+ struct thread_info thread_info;
+ unsigned long stack[THREAD_SIZE/sizeof(long)];
+};
+
+#ifndef __HAVE_ARCH_KSTACK_END
+static inline int kstack_end(void *addr)
+{
+ /* Reliable end of stack detection:
+ * Some APM bios versions misalign the stack
+ */
+ return !(((unsigned long)addr+sizeof(void*)-1) & (THREAD_SIZE-sizeof(void*)));
+}
+#endif
+
+extern union thread_union init_thread_union;
+extern struct task_struct init_task;
+
+extern struct mm_struct init_mm;
+
+extern struct pid_namespace init_pid_ns;
+
+/*
+ * find a task by one of its numerical ids
+ *
+ * find_task_by_pid_type_ns():
+ * it is the most generic call - it finds a task by all id,
+ * type and namespace specified
+ * find_task_by_pid_ns():
+ * finds a task by its pid in the specified namespace
+ * find_task_by_vpid():
+ * finds a task by its virtual pid
+ *
+ * see also find_vpid() etc in include/linux/pid.h
+ */
+
+extern struct task_struct *find_task_by_pid_type_ns(int type, int pid,
+ struct pid_namespace *ns);
+
+extern struct task_struct *find_task_by_vpid(pid_t nr);
+extern struct task_struct *find_task_by_pid_ns(pid_t nr,
+ struct pid_namespace *ns);
+
+extern void __set_special_pids(struct pid *pid);
+
+/* per-UID process charging. */
+extern struct user_struct * alloc_uid(struct user_namespace *, uid_t);
+static inline struct user_struct *get_uid(struct user_struct *u)
+{
+ atomic_inc(&u->__count);
+ return u;
+}
+extern void free_uid(struct user_struct *);
+extern void release_uids(struct user_namespace *ns);
+
+#include <asm/current.h>
+
+extern void do_timer(unsigned long ticks);
+
+extern int wake_up_state(struct task_struct *tsk, unsigned int state);
+extern int wake_up_process(struct task_struct *tsk);
+extern void wake_up_new_task(struct task_struct *tsk,
+ unsigned long clone_flags);
+#ifdef CONFIG_SMP
+ extern void kick_process(struct task_struct *tsk);
+#else
+ static inline void kick_process(struct task_struct *tsk) { }
+#endif
+extern void sched_fork(struct task_struct *p, int clone_flags);
+extern void sched_dead(struct task_struct *p);
+
+extern void proc_caches_init(void);
+extern void flush_signals(struct task_struct *);
+extern void ignore_signals(struct task_struct *);
+extern void flush_signal_handlers(struct task_struct *, int force_default);
+extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
+
+static inline int dequeue_signal_lock(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
+{
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&tsk->sighand->siglock, flags);
+ ret = dequeue_signal(tsk, mask, info);
+ spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
+
+ return ret;
+}
+
+extern void block_all_signals(int (*notifier)(void *priv), void *priv,
+ sigset_t *mask);
+extern void unblock_all_signals(void);
+extern void release_task(struct task_struct * p);
+extern int send_sig_info(int, struct siginfo *, struct task_struct *);
+extern int force_sigsegv(int, struct task_struct *);
+extern int force_sig_info(int, struct siginfo *, struct task_struct *);
+extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
+extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
+extern int kill_pid_info_as_uid(int, struct siginfo *, struct pid *, uid_t, uid_t, u32);
+extern int kill_pgrp(struct pid *pid, int sig, int priv);
+extern int kill_pid(struct pid *pid, int sig, int priv);
+extern int kill_proc_info(int, struct siginfo *, pid_t);
+extern int do_notify_parent(struct task_struct *, int);
+extern void force_sig(int, struct task_struct *);
+extern void force_sig_specific(int, struct task_struct *);
+extern int send_sig(int, struct task_struct *, int);
+extern void zap_other_threads(struct task_struct *p);
+extern struct sigqueue *sigqueue_alloc(void);
+extern void sigqueue_free(struct sigqueue *);
+extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
+extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
+extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
+
+static inline int kill_cad_pid(int sig, int priv)
+{
+ return kill_pid(cad_pid, sig, priv);
+}
+
+/* These can be the second arg to send_sig_info/send_group_sig_info. */
+#define SEND_SIG_NOINFO ((struct siginfo *) 0)
+#define SEND_SIG_PRIV ((struct siginfo *) 1)
+#define SEND_SIG_FORCED ((struct siginfo *) 2)
+
+static inline int is_si_special(const struct siginfo *info)
+{
+ return info <= SEND_SIG_FORCED;
+}
+
+/* True if we are on the alternate signal stack. */
+
+static inline int on_sig_stack(unsigned long sp)
+{
+ return (sp - current->sas_ss_sp < current->sas_ss_size);
+}
+
+static inline int sas_ss_flags(unsigned long sp)
+{
+ return (current->sas_ss_size == 0 ? SS_DISABLE
+ : on_sig_stack(sp) ? SS_ONSTACK : 0);
+}
+
+/*
+ * Routines for handling mm_structs
+ */
+extern struct mm_struct * mm_alloc(void);
+
+/* mmdrop drops the mm and the page tables */
+extern void __mmdrop(struct mm_struct *);
+static inline void mmdrop(struct mm_struct * mm)
+{
+ if (unlikely(atomic_dec_and_test(&mm->mm_count)))
+ __mmdrop(mm);
+}
+
+/* mmput gets rid of the mappings and all user-space */
+extern void mmput(struct mm_struct *);
+/* Grab a reference to a task's mm, if it is not already going away */
+extern struct mm_struct *get_task_mm(struct task_struct *task);
+/* Remove the current tasks stale references to the old mm_struct */
+extern void mm_release(struct task_struct *, struct mm_struct *);
+/* Allocate a new mm structure and copy contents from tsk->mm */
+extern struct mm_struct *dup_mm(struct task_struct *tsk);
+
+extern int copy_thread(int, unsigned long, unsigned long, unsigned long, struct task_struct *, struct pt_regs *);
+extern void flush_thread(void);
+extern void exit_thread(void);
+
+extern void exit_files(struct task_struct *);
+extern void __cleanup_signal(struct signal_struct *);
+extern void __cleanup_sighand(struct sighand_struct *);
+
+extern void exit_itimers(struct signal_struct *);
+extern void flush_itimer_signals(void);
+
+extern NORET_TYPE void do_group_exit(int);
+
+extern void daemonize(const char *, ...);
+extern int allow_signal(int);
+extern int disallow_signal(int);
+
+extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *);
+extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *);
+struct task_struct *fork_idle(int);
+
+extern void set_task_comm(struct task_struct *tsk, char *from);
+extern char *get_task_comm(char *to, struct task_struct *tsk);
+
+/* DDE_LINUX does not manage scheduling - this is done by Fiasco. Therefore we have no information
+ * available on whether an SMP task is active. For now, this is okay, since we only virtually use
+ * SMP support, so when a task calls wait_task_inactive(p), p is definitely not running.
+ * XXX: Fix this when doing real SMP!
+ */
+#if defined(CONFIG_SMP) && !defined(DDE_LINUX)
+extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
+#else
+static inline unsigned long wait_task_inactive(struct task_struct *p,
+ long match_state)
+{
+ return 1;
+}
+#endif
+
+#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
+
+#define for_each_process(p) \
+ for (p = &init_task ; (p = next_task(p)) != &init_task ; )
+
+extern bool is_single_threaded(struct task_struct *);
+
+/*
+ * Careful: do_each_thread/while_each_thread is a double loop so
+ * 'break' will not work as expected - use goto instead.
+ */
+#define do_each_thread(g, t) \
+ for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
+
+#define while_each_thread(g, t) \
+ while ((t = next_thread(t)) != g)
+
+/* de_thread depends on thread_group_leader not being a pid based check */
+#define thread_group_leader(p) (p == p->group_leader)
+
+/* Do to the insanities of de_thread it is possible for a process
+ * to have the pid of the thread group leader without actually being
+ * the thread group leader. For iteration through the pids in proc
+ * all we care about is that we have a task with the appropriate
+ * pid, we don't actually care if we have the right task.
+ */
+static inline int has_group_leader_pid(struct task_struct *p)
+{
+ return p->pid == p->tgid;
+}
+
+static inline
+int same_thread_group(struct task_struct *p1, struct task_struct *p2)
+{
+ return p1->tgid == p2->tgid;
+}
+
+static inline struct task_struct *next_thread(const struct task_struct *p)
+{
+ return list_entry(rcu_dereference(p->thread_group.next),
+ struct task_struct, thread_group);
+}
+
+static inline int thread_group_empty(struct task_struct *p)
+{
+ return list_empty(&p->thread_group);
+}
+
+#define delay_group_leader(p) \
+ (thread_group_leader(p) && !thread_group_empty(p))
+
+/*
+ * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
+ * subscriptions and synchronises with wait4(). Also used in procfs. Also
+ * pins the final release of task.io_context. Also protects ->cpuset and
+ * ->cgroup.subsys[].
+ *
+ * Nests both inside and outside of read_lock(&tasklist_lock).
+ * It must not be nested with write_lock_irq(&tasklist_lock),
+ * neither inside nor outside.
+ */
+static inline void task_lock(struct task_struct *p)
+{
+ spin_lock(&p->alloc_lock);
+}
+
+static inline void task_unlock(struct task_struct *p)
+{
+ spin_unlock(&p->alloc_lock);
+}
+
+extern struct sighand_struct *lock_task_sighand(struct task_struct *tsk,
+ unsigned long *flags);
+
+static inline void unlock_task_sighand(struct task_struct *tsk,
+ unsigned long *flags)
+{
+ spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
+}
+
+#ifndef __HAVE_THREAD_FUNCTIONS
+
+#define task_thread_info(task) ((struct thread_info *)(task)->stack)
+#define task_stack_page(task) ((task)->stack)
+
+static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org)
+{
+ *task_thread_info(p) = *task_thread_info(org);
+ task_thread_info(p)->task = p;
+}
+
+static inline unsigned long *end_of_stack(struct task_struct *p)
+{
+ return (unsigned long *)(task_thread_info(p) + 1);
+}
+
+#endif
+
+static inline int object_is_on_stack(void *obj)
+{
+ void *stack = task_stack_page(current);
+
+ return (obj >= stack) && (obj < (stack + THREAD_SIZE));
+}
+
+extern void thread_info_cache_init(void);
+
+/* set thread flags in other task's structures
+ * - see asm/thread_info.h for TIF_xxxx flags available
+ */
+static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
+{
+ set_ti_thread_flag(task_thread_info(tsk), flag);
+}
+
+static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
+{
+ clear_ti_thread_flag(task_thread_info(tsk), flag);
+}
+
+static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
+{
+ return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
+}
+
+static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
+{
+ return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
+}
+
+static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
+{
+ return test_ti_thread_flag(task_thread_info(tsk), flag);
+}
+
+static inline void set_tsk_need_resched(struct task_struct *tsk)
+{
+ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
+}
+
+static inline void clear_tsk_need_resched(struct task_struct *tsk)
+{
+ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
+}
+
+static inline int test_tsk_need_resched(struct task_struct *tsk)
+{
+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
+}
+
+static inline int signal_pending(struct task_struct *p)
+{
+ return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
+}
+
+extern int __fatal_signal_pending(struct task_struct *p);
+
+static inline int fatal_signal_pending(struct task_struct *p)
+{
+#ifndef DDE_LINUX
+ return signal_pending(p) && __fatal_signal_pending(p);
+#else /* no signals in DDE... */
+ return 0;
+#endif
+}
+
+static inline int signal_pending_state(long state, struct task_struct *p)
+{
+ if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
+ return 0;
+ if (!signal_pending(p))
+ return 0;
+
+ return (state & TASK_INTERRUPTIBLE)
+#ifndef DDE_LINUX
+ || __fatal_signal_pending(p)
+#endif
+ ;
+
+}
+
+static inline int need_resched(void)
+{
+ return unlikely(test_thread_flag(TIF_NEED_RESCHED));
+}
+
+/*
+ * cond_resched() and cond_resched_lock(): latency reduction via
+ * explicit rescheduling in places that are safe. The return
+ * value indicates whether a reschedule was done in fact.
+ * cond_resched_lock() will drop the spinlock before scheduling,
+ * cond_resched_softirq() will enable bhs before scheduling.
+ */
+extern int _cond_resched(void);
+#ifdef CONFIG_PREEMPT_BKL
+static inline int cond_resched(void)
+{
+ return 0;
+}
+#else
+static inline int cond_resched(void)
+{
+ return _cond_resched();
+}
+#endif
+extern int cond_resched_lock(spinlock_t * lock);
+extern int cond_resched_softirq(void);
+static inline int cond_resched_bkl(void)
+{
+ return _cond_resched();
+}
+
+/*
+ * Does a critical section need to be broken due to another
+ * task waiting?: (technically does not depend on CONFIG_PREEMPT,
+ * but a general need for low latency)
+ */
+static inline int spin_needbreak(spinlock_t *lock)
+{
+#ifdef CONFIG_PREEMPT
+ return spin_is_contended(lock);
+#else
+ return 0;
+#endif
+}
+
+/*
+ * Thread group CPU time accounting.
+ */
+void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
+void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
+
+static inline void thread_group_cputime_init(struct signal_struct *sig)
+{
+ sig->cputimer.cputime = INIT_CPUTIME;
+ spin_lock_init(&sig->cputimer.lock);
+ sig->cputimer.running = 0;
+}
+
+static inline void thread_group_cputime_free(struct signal_struct *sig)
+{
+}
+
+/*
+ * Reevaluate whether the task has signals pending delivery.
+ * Wake the task if so.
+ * This is required every time the blocked sigset_t changes.
+ * callers must hold sighand->siglock.
+ */
+extern void recalc_sigpending_and_wake(struct task_struct *t);
+extern void recalc_sigpending(void);
+
+extern void signal_wake_up(struct task_struct *t, int resume_stopped);
+
+/*
+ * Wrappers for p->thread_info->cpu access. No-op on UP.
+ */
+#if defined(CONFIG_SMP) && !defined(DDE_LINUX)
+
+static inline unsigned int task_cpu(const struct task_struct *p)
+{
+ return task_thread_info(p)->cpu;
+}
+
+extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
+
+#else
+
+static inline unsigned int task_cpu(const struct task_struct *p)
+{
+ return 0;
+}
+
+static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
+{
+}
+
+#endif /* CONFIG_SMP */
+
+extern void arch_pick_mmap_layout(struct mm_struct *mm);
+
+#ifdef CONFIG_TRACING
+extern void
+__trace_special(void *__tr, void *__data,
+ unsigned long arg1, unsigned long arg2, unsigned long arg3);
+#else
+static inline void
+__trace_special(void *__tr, void *__data,
+ unsigned long arg1, unsigned long arg2, unsigned long arg3)
+{
+}
+#endif
+
+extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
+extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
+
+extern void normalize_rt_tasks(void);
+
+#ifdef CONFIG_GROUP_SCHED
+
+extern struct task_group init_task_group;
+#ifdef CONFIG_USER_SCHED
+extern struct task_group root_task_group;
+extern void set_tg_uid(struct user_struct *user);
+#endif
+
+extern struct task_group *sched_create_group(struct task_group *parent);
+extern void sched_destroy_group(struct task_group *tg);
+extern void sched_move_task(struct task_struct *tsk);
+#ifdef CONFIG_FAIR_GROUP_SCHED
+extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
+extern unsigned long sched_group_shares(struct task_group *tg);
+#endif
+#ifdef CONFIG_RT_GROUP_SCHED
+extern int sched_group_set_rt_runtime(struct task_group *tg,
+ long rt_runtime_us);
+extern long sched_group_rt_runtime(struct task_group *tg);
+extern int sched_group_set_rt_period(struct task_group *tg,
+ long rt_period_us);
+extern long sched_group_rt_period(struct task_group *tg);
+extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
+#endif
+#endif
+
+extern int task_can_switch_user(struct user_struct *up,
+ struct task_struct *tsk);
+
+#ifdef CONFIG_TASK_XACCT
+static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
+{
+ tsk->ioac.rchar += amt;
+}
+
+static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
+{
+ tsk->ioac.wchar += amt;
+}
+
+static inline void inc_syscr(struct task_struct *tsk)
+{
+ tsk->ioac.syscr++;
+}
+
+static inline void inc_syscw(struct task_struct *tsk)
+{
+ tsk->ioac.syscw++;
+}
+#else
+static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
+{
+}
+
+static inline void add_wchar(struct task_struct *tsk, ssize_t amt)
+{
+}
+
+static inline void inc_syscr(struct task_struct *tsk)
+{
+}
+
+static inline void inc_syscw(struct task_struct *tsk)
+{
+}
+#endif
+
+#ifndef TASK_SIZE_OF
+#define TASK_SIZE_OF(tsk) TASK_SIZE
+#endif
+
+#ifdef CONFIG_MM_OWNER
+extern void mm_update_next_owner(struct mm_struct *mm);
+extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
+#else
+static inline void mm_update_next_owner(struct mm_struct *mm)
+{
+}
+
+static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
+{
+}
+#endif /* CONFIG_MM_OWNER */
+
+#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
+
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/libdde-linux26/include/linux/slab_def.h b/libdde-linux26/include/linux/slab_def.h
new file mode 100644
index 00000000..f0a8505b
--- /dev/null
+++ b/libdde-linux26/include/linux/slab_def.h
@@ -0,0 +1,98 @@
+#ifndef _LINUX_SLAB_DEF_H
+#define _LINUX_SLAB_DEF_H
+
+/*
+ * Definitions unique to the original Linux SLAB allocator.
+ *
+ * What we provide here is a way to optimize the frequent kmalloc
+ * calls in the kernel by selecting the appropriate general cache
+ * if kmalloc was called with a size that can be established at
+ * compile time.
+ */
+
+#include <linux/init.h>
+#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
+#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
+#include <linux/compiler.h>
+
+/* Size description struct for general caches. */
+struct cache_sizes {
+ size_t cs_size;
+ struct kmem_cache *cs_cachep;
+#ifdef CONFIG_ZONE_DMA
+ struct kmem_cache *cs_dmacachep;
+#endif
+};
+
+#ifndef DDE_LINUX
+extern struct cache_sizes malloc_sizes[];
+#endif
+void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
+void *__kmalloc(size_t size, gfp_t flags);
+
+static inline void *kmalloc(size_t size, gfp_t flags)
+{
+#ifndef DDE_LINUX
+ if (__builtin_constant_p(size)) {
+ int i = 0;
+
+ if (!size)
+ return ZERO_SIZE_PTR;
+
+#define CACHE(x) \
+ if (size <= x) \
+ goto found; \
+ else \
+ i++;
+#include <linux/kmalloc_sizes.h>
+#undef CACHE
+ return NULL;
+found:
+#ifdef CONFIG_ZONE_DMA
+ if (flags & GFP_DMA)
+ return kmem_cache_alloc(malloc_sizes[i].cs_dmacachep,
+ flags);
+#endif
+ return kmem_cache_alloc(malloc_sizes[i].cs_cachep, flags);
+ }
+#endif
+ return __kmalloc(size, flags);
+}
+
+#ifdef CONFIG_NUMA
+extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
+extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
+
+static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+{
+#ifndef DDE_LINUX
+ if (__builtin_constant_p(size)) {
+ int i = 0;
+
+ if (!size)
+ return ZERO_SIZE_PTR;
+
+#define CACHE(x) \
+ if (size <= x) \
+ goto found; \
+ else \
+ i++;
+#include <linux/kmalloc_sizes.h>
+#undef CACHE
+ return NULL;
+found:
+#ifdef CONFIG_ZONE_DMA
+ if (flags & GFP_DMA)
+ return kmem_cache_alloc_node(malloc_sizes[i].cs_dmacachep,
+ flags, node);
+#endif
+ return kmem_cache_alloc_node(malloc_sizes[i].cs_cachep,
+ flags, node);
+ }
+#endif
+ return __kmalloc_node(size, flags, node);
+}
+
+#endif /* CONFIG_NUMA */
+
+#endif /* _LINUX_SLAB_DEF_H */
diff --git a/libdde-linux26/include/linux/smp.h b/libdde-linux26/include/linux/smp.h
new file mode 100644
index 00000000..b9d71591
--- /dev/null
+++ b/libdde-linux26/include/linux/smp.h
@@ -0,0 +1,190 @@
+#ifndef __LINUX_SMP_H
+#define __LINUX_SMP_H
+
+/*
+ * Generic SMP support
+ * Alan Cox. <alan@redhat.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/cpumask.h>
+
+extern void cpu_idle(void);
+
+struct call_single_data {
+ struct list_head list;
+ void (*func) (void *info);
+ void *info;
+ u16 flags;
+ u16 priv;
+};
+
+/* total number of cpus in this system (may exceed NR_CPUS) */
+extern unsigned int total_cpus;
+
+int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
+ int wait);
+
+#if defined(CONFIG_SMP) && !defined(DDE_LINUX)
+
+#include <linux/preempt.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/thread_info.h>
+#include <asm/smp.h>
+
+/*
+ * main cross-CPU interfaces, handles INIT, TLB flush, STOP, etc.
+ * (defined in asm header):
+ */
+
+/*
+ * stops all CPUs but the current one:
+ */
+extern void smp_send_stop(void);
+
+/*
+ * sends a 'reschedule' event to another CPU:
+ */
+extern void smp_send_reschedule(int cpu);
+
+
+/*
+ * Prepare machine for booting other CPUs.
+ */
+extern void smp_prepare_cpus(unsigned int max_cpus);
+
+/*
+ * Bring a CPU up
+ */
+extern int __cpu_up(unsigned int cpunum);
+
+/*
+ * Final polishing of CPUs
+ */
+extern void smp_cpus_done(unsigned int max_cpus);
+
+/*
+ * Call a function on all other processors
+ */
+int smp_call_function(void(*func)(void *info), void *info, int wait);
+void smp_call_function_many(const struct cpumask *mask,
+ void (*func)(void *info), void *info, bool wait);
+
+/* Deprecated: Use smp_call_function_many which takes a pointer to the mask. */
+static inline int
+smp_call_function_mask(cpumask_t mask, void(*func)(void *info), void *info,
+ int wait)
+{
+ smp_call_function_many(&mask, func, info, wait);
+ return 0;
+}
+
+void __smp_call_function_single(int cpuid, struct call_single_data *data);
+
+/*
+ * Generic and arch helpers
+ */
+#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
+void generic_smp_call_function_single_interrupt(void);
+void generic_smp_call_function_interrupt(void);
+void ipi_call_lock(void);
+void ipi_call_unlock(void);
+void ipi_call_lock_irq(void);
+void ipi_call_unlock_irq(void);
+#endif
+
+/*
+ * Call a function on all processors
+ */
+int on_each_cpu(void (*func) (void *info), void *info, int wait);
+
+#define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */
+#define MSG_ALL 0x8001
+
+#define MSG_INVALIDATE_TLB 0x0001 /* Remote processor TLB invalidate */
+#define MSG_STOP_CPU 0x0002 /* Sent to shut down slave CPU's
+ * when rebooting
+ */
+#define MSG_RESCHEDULE 0x0003 /* Reschedule request from master CPU*/
+#define MSG_CALL_FUNCTION 0x0004 /* Call function on all other CPUs */
+
+/*
+ * Mark the boot cpu "online" so that it can call console drivers in
+ * printk() and can access its per-cpu storage.
+ */
+void smp_prepare_boot_cpu(void);
+
+extern unsigned int setup_max_cpus;
+
+#else /* !SMP */
+
+/*
+ * These macros fold the SMP functionality into a single CPU system
+ */
+#define raw_smp_processor_id() 0
+static inline int up_smp_call_function(void (*func)(void *), void *info)
+{
+ return 0;
+}
+#define smp_call_function(func, info, wait) \
+ (up_smp_call_function(func, info))
+#define on_each_cpu(func,info,wait) \
+ ({ \
+ local_irq_disable(); \
+ func(info); \
+ local_irq_enable(); \
+ 0; \
+ })
+static inline void smp_send_reschedule(int cpu) { }
+#define num_booting_cpus() 1
+#define smp_prepare_boot_cpu() do {} while (0)
+#define smp_call_function_mask(mask, func, info, wait) \
+ (up_smp_call_function(func, info))
+#define smp_call_function_many(mask, func, info, wait) \
+ (up_smp_call_function(func, info))
+static inline void init_call_single_data(void)
+{
+}
+#endif /* !SMP */
+
+/*
+ * smp_processor_id(): get the current CPU ID.
+ *
+ * if DEBUG_PREEMPT is enabled the we check whether it is
+ * used in a preemption-safe way. (smp_processor_id() is safe
+ * if it's used in a preemption-off critical section, or in
+ * a thread that is bound to the current CPU.)
+ *
+ * NOTE: raw_smp_processor_id() is for internal use only
+ * (smp_processor_id() is the preferred variant), but in rare
+ * instances it might also be used to turn off false positives
+ * (i.e. smp_processor_id() use that the debugging code reports but
+ * which use for some reason is legal). Don't use this to hack around
+ * the warning message, as your code might not work under PREEMPT.
+ */
+
+/* DDE_LINUX does use CONFIG_SMP in order to synchronize its various
+ * threads. However, we only have a single CPU and therefore always
+ * return 0 in smp_processor_id().
+ */
+#ifndef DDE_LINUX
+#ifdef CONFIG_DEBUG_PREEMPT
+ extern unsigned int debug_smp_processor_id(void);
+# define smp_processor_id() debug_smp_processor_id()
+#else
+# define smp_processor_id() raw_smp_processor_id()
+#endif
+#else
+#define smp_processor_id() 0
+#endif /* DDE_LINUX */
+
+#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
+#define put_cpu() preempt_enable()
+#define put_cpu_no_resched() preempt_enable_no_resched()
+
+void smp_setup_processor_id(void);
+
+#endif /* __LINUX_SMP_H */
diff --git a/libdde-linux26/include/linux/spinlock.h b/libdde-linux26/include/linux/spinlock.h
new file mode 100644
index 00000000..ab862f99
--- /dev/null
+++ b/libdde-linux26/include/linux/spinlock.h
@@ -0,0 +1,467 @@
+#ifndef __LINUX_SPINLOCK_H
+#define __LINUX_SPINLOCK_H
+
+/*
+ * include/linux/spinlock.h - generic spinlock/rwlock declarations
+ *
+ * here's the role of the various spinlock/rwlock related include files:
+ *
+ * on SMP builds:
+ *
+ * asm/spinlock_types.h: contains the raw_spinlock_t/raw_rwlock_t and the
+ * initializers
+ *
+ * linux/spinlock_types.h:
+ * defines the generic type and initializers
+ *
+ * asm/spinlock.h: contains the __raw_spin_*()/etc. lowlevel
+ * implementations, mostly inline assembly code
+ *
+ * (also included on UP-debug builds:)
+ *
+ * linux/spinlock_api_smp.h:
+ * contains the prototypes for the _spin_*() APIs.
+ *
+ * linux/spinlock.h: builds the final spin_*() APIs.
+ *
+ * on UP builds:
+ *
+ * linux/spinlock_type_up.h:
+ * contains the generic, simplified UP spinlock type.
+ * (which is an empty structure on non-debug builds)
+ *
+ * linux/spinlock_types.h:
+ * defines the generic type and initializers
+ *
+ * linux/spinlock_up.h:
+ * contains the __raw_spin_*()/etc. version of UP
+ * builds. (which are NOPs on non-debug, non-preempt
+ * builds)
+ *
+ * (included on UP-non-debug builds:)
+ *
+ * linux/spinlock_api_up.h:
+ * builds the _spin_*() APIs.
+ *
+ * linux/spinlock.h: builds the final spin_*() APIs.
+ */
+
+#include <linux/typecheck.h>
+#include <linux/preempt.h>
+#include <linux/linkage.h>
+#include <linux/compiler.h>
+#include <linux/thread_info.h>
+#include <linux/kernel.h>
+#include <linux/stringify.h>
+#include <linux/bottom_half.h>
+
+#include <asm/system.h>
+
+/*
+ * Must define these before including other files, inline functions need them
+ */
+#define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME
+
+#define LOCK_SECTION_START(extra) \
+ ".subsection 1\n\t" \
+ extra \
+ ".ifndef " LOCK_SECTION_NAME "\n\t" \
+ LOCK_SECTION_NAME ":\n\t" \
+ ".endif\n"
+
+#define LOCK_SECTION_END \
+ ".previous\n\t"
+
+#define __lockfunc __attribute__((section(".spinlock.text")))
+
+/*
+ * Pull the raw_spinlock_t and raw_rwlock_t definitions:
+ */
+#include <linux/spinlock_types.h>
+
+#ifndef DDE_LINUX
+extern int __lockfunc generic__raw_read_trylock(raw_rwlock_t *lock);
+
+/*
+ * Pull the __raw*() functions/declarations (UP-nondebug doesnt need them):
+ */
+#ifdef CONFIG_SMP
+# include <asm/spinlock.h>
+#else
+# include <linux/spinlock_up.h>
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void __spin_lock_init(spinlock_t *lock, const char *name,
+ struct lock_class_key *key);
+# define spin_lock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __spin_lock_init((lock), #lock, &__key); \
+} while (0)
+
+#else
+# define spin_lock_init(lock) \
+ do { *(lock) = SPIN_LOCK_UNLOCKED; } while (0)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void __rwlock_init(rwlock_t *lock, const char *name,
+ struct lock_class_key *key);
+# define rwlock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __rwlock_init((lock), #lock, &__key); \
+} while (0)
+#else
+# define rwlock_init(lock) \
+ do { *(lock) = RW_LOCK_UNLOCKED; } while (0)
+#endif
+
+#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
+
+#ifdef CONFIG_GENERIC_LOCKBREAK
+#define spin_is_contended(lock) ((lock)->break_lock)
+#else
+
+#ifdef __raw_spin_is_contended
+#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
+#else
+#define spin_is_contended(lock) (((void)(lock), 0))
+#endif /*__raw_spin_is_contended*/
+#endif
+
+/**
+ * spin_unlock_wait - wait until the spinlock gets unlocked
+ * @lock: the spinlock in question.
+ */
+#define spin_unlock_wait(lock) __raw_spin_unlock_wait(&(lock)->raw_lock)
+
+/*
+ * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+# include <linux/spinlock_api_smp.h>
+#else
+# include <linux/spinlock_api_up.h>
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ extern void _raw_spin_lock(spinlock_t *lock);
+#define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock)
+ extern int _raw_spin_trylock(spinlock_t *lock);
+ extern void _raw_spin_unlock(spinlock_t *lock);
+ extern void _raw_read_lock(rwlock_t *lock);
+ extern int _raw_read_trylock(rwlock_t *lock);
+ extern void _raw_read_unlock(rwlock_t *lock);
+ extern void _raw_write_lock(rwlock_t *lock);
+ extern int _raw_write_trylock(rwlock_t *lock);
+ extern void _raw_write_unlock(rwlock_t *lock);
+#else
+# define _raw_spin_lock(lock) __raw_spin_lock(&(lock)->raw_lock)
+# define _raw_spin_lock_flags(lock, flags) \
+ __raw_spin_lock_flags(&(lock)->raw_lock, *(flags))
+# define _raw_spin_trylock(lock) __raw_spin_trylock(&(lock)->raw_lock)
+# define _raw_spin_unlock(lock) __raw_spin_unlock(&(lock)->raw_lock)
+# define _raw_read_lock(rwlock) __raw_read_lock(&(rwlock)->raw_lock)
+# define _raw_read_trylock(rwlock) __raw_read_trylock(&(rwlock)->raw_lock)
+# define _raw_read_unlock(rwlock) __raw_read_unlock(&(rwlock)->raw_lock)
+# define _raw_write_lock(rwlock) __raw_write_lock(&(rwlock)->raw_lock)
+# define _raw_write_trylock(rwlock) __raw_write_trylock(&(rwlock)->raw_lock)
+# define _raw_write_unlock(rwlock) __raw_write_unlock(&(rwlock)->raw_lock)
+#endif
+
+#define read_can_lock(rwlock) __raw_read_can_lock(&(rwlock)->raw_lock)
+#define write_can_lock(rwlock) __raw_write_can_lock(&(rwlock)->raw_lock)
+
+/*
+ * Define the various spin_lock and rw_lock methods. Note we define these
+ * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The various
+ * methods are defined as nops in the case they are not required.
+ */
+#define spin_trylock(lock) __cond_lock(lock, _spin_trylock(lock))
+#define read_trylock(lock) __cond_lock(lock, _read_trylock(lock))
+#define write_trylock(lock) __cond_lock(lock, _write_trylock(lock))
+
+#define spin_lock(lock) _spin_lock(lock)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define spin_lock_nested(lock, subclass) _spin_lock_nested(lock, subclass)
+# define spin_lock_nest_lock(lock, nest_lock) \
+ do { \
+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
+ _spin_lock_nest_lock(lock, &(nest_lock)->dep_map); \
+ } while (0)
+#else
+# define spin_lock_nested(lock, subclass) _spin_lock(lock)
+# define spin_lock_nest_lock(lock, nest_lock) _spin_lock(lock)
+#endif
+
+#define write_lock(lock) _write_lock(lock)
+#define read_lock(lock) _read_lock(lock)
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+
+#define spin_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _spin_lock_irqsave(lock); \
+ } while (0)
+#define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _read_lock_irqsave(lock); \
+ } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _write_lock_irqsave(lock); \
+ } while (0)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define spin_lock_irqsave_nested(lock, flags, subclass) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _spin_lock_irqsave_nested(lock, subclass); \
+ } while (0)
+#else
+#define spin_lock_irqsave_nested(lock, flags, subclass) \
+ do { \
+ typecheck(unsigned long, flags); \
+ flags = _spin_lock_irqsave(lock); \
+ } while (0)
+#endif
+
+#else
+
+#define spin_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _spin_lock_irqsave(lock, flags); \
+ } while (0)
+#define read_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _read_lock_irqsave(lock, flags); \
+ } while (0)
+#define write_lock_irqsave(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _write_lock_irqsave(lock, flags); \
+ } while (0)
+#define spin_lock_irqsave_nested(lock, flags, subclass) \
+ spin_lock_irqsave(lock, flags)
+
+#endif
+
+#define spin_lock_irq(lock) _spin_lock_irq(lock)
+#define spin_lock_bh(lock) _spin_lock_bh(lock)
+
+#define read_lock_irq(lock) _read_lock_irq(lock)
+#define read_lock_bh(lock) _read_lock_bh(lock)
+
+#define write_lock_irq(lock) _write_lock_irq(lock)
+#define write_lock_bh(lock) _write_lock_bh(lock)
+
+/*
+ * We inline the unlock functions in the nondebug case:
+ */
+#if defined(CONFIG_DEBUG_SPINLOCK) || defined(CONFIG_PREEMPT) || \
+ !defined(CONFIG_SMP)
+# define spin_unlock(lock) _spin_unlock(lock)
+# define read_unlock(lock) _read_unlock(lock)
+# define write_unlock(lock) _write_unlock(lock)
+# define spin_unlock_irq(lock) _spin_unlock_irq(lock)
+# define read_unlock_irq(lock) _read_unlock_irq(lock)
+# define write_unlock_irq(lock) _write_unlock_irq(lock)
+#else
+# define spin_unlock(lock) \
+ do {__raw_spin_unlock(&(lock)->raw_lock); __release(lock); } while (0)
+# define read_unlock(lock) \
+ do {__raw_read_unlock(&(lock)->raw_lock); __release(lock); } while (0)
+# define write_unlock(lock) \
+ do {__raw_write_unlock(&(lock)->raw_lock); __release(lock); } while (0)
+# define spin_unlock_irq(lock) \
+do { \
+ __raw_spin_unlock(&(lock)->raw_lock); \
+ __release(lock); \
+ local_irq_enable(); \
+} while (0)
+# define read_unlock_irq(lock) \
+do { \
+ __raw_read_unlock(&(lock)->raw_lock); \
+ __release(lock); \
+ local_irq_enable(); \
+} while (0)
+# define write_unlock_irq(lock) \
+do { \
+ __raw_write_unlock(&(lock)->raw_lock); \
+ __release(lock); \
+ local_irq_enable(); \
+} while (0)
+#endif
+
+#define spin_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _spin_unlock_irqrestore(lock, flags); \
+ } while (0)
+#define spin_unlock_bh(lock) _spin_unlock_bh(lock)
+
+#define read_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _read_unlock_irqrestore(lock, flags); \
+ } while (0)
+#define read_unlock_bh(lock) _read_unlock_bh(lock)
+
+#define write_unlock_irqrestore(lock, flags) \
+ do { \
+ typecheck(unsigned long, flags); \
+ _write_unlock_irqrestore(lock, flags); \
+ } while (0)
+#define write_unlock_bh(lock) _write_unlock_bh(lock)
+
+#define spin_trylock_bh(lock) __cond_lock(lock, _spin_trylock_bh(lock))
+
+#define spin_trylock_irq(lock) \
+({ \
+ local_irq_disable(); \
+ spin_trylock(lock) ? \
+ 1 : ({ local_irq_enable(); 0; }); \
+})
+
+#define spin_trylock_irqsave(lock, flags) \
+({ \
+ local_irq_save(flags); \
+ spin_trylock(lock) ? \
+ 1 : ({ local_irq_restore(flags); 0; }); \
+})
+
+#define write_trylock_irqsave(lock, flags) \
+({ \
+ local_irq_save(flags); \
+ write_trylock(lock) ? \
+ 1 : ({ local_irq_restore(flags); 0; }); \
+})
+
+/*
+ * Pull the atomic_t declaration:
+ * (asm-mips/atomic.h needs above definitions)
+ */
+#include <asm/atomic.h>
+/**
+ * atomic_dec_and_lock - lock on reaching reference count zero
+ * @atomic: the atomic counter
+ * @lock: the spinlock in question
+ *
+ * Decrements @atomic by 1. If the result is 0, returns true and locks
+ * @lock. Returns false for all other cases.
+ */
+extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
+#define atomic_dec_and_lock(atomic, lock) \
+ __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
+
+/**
+ * spin_can_lock - would spin_trylock() succeed?
+ * @lock: the spinlock in question.
+ */
+#define spin_can_lock(lock) (!spin_is_locked(lock))
+
+#else /* DDE_LINUX */
+
+#define spin_lock_init(l) \
+ do { \
+ ddekit_lock_init(&(l)->ddekit_lock); \
+ (l)->need_init = 0; \
+ } while (0);
+
+#define rwlock_init(l) spin_lock_init(l)
+
+#define COND_INIT_LOCK(l) \
+ do { \
+ if ((l)->need_init) spin_lock_init(l); \
+ } while (0);
+
+#define spin_lock(lock) \
+ do { \
+ COND_INIT_LOCK(lock); \
+ preempt_disable(); \
+ ddekit_lock_lock(&(lock)->ddekit_lock); \
+ } while (0)
+
+#define read_lock(lock) spin_lock(lock)
+#define write_lock(lock) spin_lock(lock)
+#define spin_lock_irq(lock) local_irq_disable(); spin_lock(lock)
+#define spin_lock_bh(lock) spin_lock(lock)
+#define read_lock_irq(lock) spin_lock_irq(lock)
+#define read_lock_bh(lock) spin_lock_bh(lock)
+#define write_lock_irq(lock) spin_lock_irq(lock)
+#define write_lock_bh(lock) spin_lock_bh(lock)
+
+#define spin_unlock(lock) \
+ do { \
+ COND_INIT_LOCK(lock); \
+ ddekit_lock_unlock(&(lock)->ddekit_lock); \
+ preempt_enable(); \
+ } while (0)
+
+#define read_unlock(lock) spin_unlock(lock)
+#define write_unlock(lock) spin_unlock(lock)
+
+#define spin_unlock_irq(lock) spin_unlock(lock); local_irq_enable()
+#define spin_unlock_bh(lock) spin_unlock(lock)
+#define read_unlock_irq(lock) spin_unlock_irq(lock)
+#define read_unlock_bh(lock) spin_unlock_bh(lock)
+#define write_unlock_irq(lock) spin_unlock_irq(lock)
+#define write_unlock_bh(lock) spin_unlock_bh(lock)
+
+#define spin_lock_irqsave(lock, flags) \
+ do { \
+ local_irq_save(flags); \
+ spin_lock(lock);\
+ } while (0);
+
+#define read_lock_irqsave(lock, flags) spin_lock_irqsave(lock, flags)
+#define write_lock_irqsave(lock, flags) spin_lock_irqsave(lock, flags)
+
+#define spin_unlock_irqrestore(lock, flags) \
+ do { \
+ spin_unlock(lock); \
+ local_irq_restore(flags); \
+ } while (0);
+
+#define read_unlock_irqrestore(lock, flags) spin_unlock_irqrestore(lock, flags)
+#define write_unlock_irqrestore(lock, flags) spin_unlock_irqrestore(lock, flags)
+
+static int __lockfunc spin_trylock(spinlock_t *lock)
+{
+ COND_INIT_LOCK(lock);
+ return ddekit_lock_try_lock(&lock->ddekit_lock) == 0;
+}
+
+#define _raw_spin_lock(l) spin_lock(l)
+#define _raw_spin_unlock(l) spin_unlock(l)
+#define _raw_spin_trylock(l) spin_trylock(l)
+
+#define spin_trylock_irqsave(lock, flags) \
+({ \
+ local_irq_save(flags); \
+ spin_trylock(lock) ? \
+ 1 : ({ local_irq_restore(flags); 0; }); \
+})
+
+#define read_trylock(l) spin_trylock(l)
+#define write_trylock(l) read_trylock(l)
+
+#define spin_is_locked(x) \
+ (ddekit_lock_owner(&(x)->ddekit_lock) != 0)
+
+#define assert_spin_locked(x) BUG_ON(!spin_is_locked(x))
+
+
+#endif /* DDE_LINUX */
+
+#endif /* __LINUX_SPINLOCK_H */
diff --git a/libdde-linux26/include/linux/spinlock_types.h b/libdde-linux26/include/linux/spinlock_types.h
new file mode 100644
index 00000000..3d4dd796
--- /dev/null
+++ b/libdde-linux26/include/linux/spinlock_types.h
@@ -0,0 +1,124 @@
+#ifndef __LINUX_SPINLOCK_TYPES_H
+#define __LINUX_SPINLOCK_TYPES_H
+
+/*
+ * include/linux/spinlock_types.h - generic spinlock type definitions
+ * and initializers
+ *
+ * portions Copyright 2005, Red Hat, Inc., Ingo Molnar
+ * Released under the General Public License (GPL).
+ */
+
+#ifndef DDE_LINUX
+
+#if defined(CONFIG_SMP)
+# include <asm/spinlock_types.h>
+#else
+# include <linux/spinlock_types_up.h>
+#endif
+
+#include <linux/lockdep.h>
+
+typedef struct {
+ raw_spinlock_t raw_lock;
+#ifdef CONFIG_GENERIC_LOCKBREAK
+ unsigned int break_lock;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+ unsigned int magic, owner_cpu;
+ void *owner;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} spinlock_t;
+
+#define SPINLOCK_MAGIC 0xdead4ead
+
+typedef struct {
+ raw_rwlock_t raw_lock;
+#ifdef CONFIG_GENERIC_LOCKBREAK
+ unsigned int break_lock;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+ unsigned int magic, owner_cpu;
+ void *owner;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
+} rwlock_t;
+
+#define RWLOCK_MAGIC 0xdeaf1eed
+
+#define SPINLOCK_OWNER_INIT ((void *)-1L)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#else
+# define SPIN_DEP_MAP_INIT(lockname)
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+#else
+# define RW_DEP_MAP_INIT(lockname)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+# define __SPIN_LOCK_UNLOCKED(lockname) \
+ (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
+ .magic = SPINLOCK_MAGIC, \
+ .owner = SPINLOCK_OWNER_INIT, \
+ .owner_cpu = -1, \
+ SPIN_DEP_MAP_INIT(lockname) }
+#define __RW_LOCK_UNLOCKED(lockname) \
+ (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
+ .magic = RWLOCK_MAGIC, \
+ .owner = SPINLOCK_OWNER_INIT, \
+ .owner_cpu = -1, \
+ RW_DEP_MAP_INIT(lockname) }
+#else
+# define __SPIN_LOCK_UNLOCKED(lockname) \
+ (spinlock_t) { .raw_lock = __RAW_SPIN_LOCK_UNLOCKED, \
+ SPIN_DEP_MAP_INIT(lockname) }
+#define __RW_LOCK_UNLOCKED(lockname) \
+ (rwlock_t) { .raw_lock = __RAW_RW_LOCK_UNLOCKED, \
+ RW_DEP_MAP_INIT(lockname) }
+#endif
+
+/*
+ * SPIN_LOCK_UNLOCKED and RW_LOCK_UNLOCKED defeat lockdep state tracking and
+ * are hence deprecated.
+ * Please use DEFINE_SPINLOCK()/DEFINE_RWLOCK() or
+ * __SPIN_LOCK_UNLOCKED()/__RW_LOCK_UNLOCKED() as appropriate.
+ */
+#define SPIN_LOCK_UNLOCKED __SPIN_LOCK_UNLOCKED(old_style_spin_init)
+#define RW_LOCK_UNLOCKED __RW_LOCK_UNLOCKED(old_style_rw_init)
+
+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
+
+#else
+
+#include <ddekit/lock.h>
+
+typedef struct {
+ ddekit_lock_t ddekit_lock;
+ char need_init;
+} spinlock_t;
+
+typedef spinlock_t rwlock_t;
+
+#define SPIN_LOCK_UNLOCKED (spinlock_t) { .ddekit_lock = NULL, .need_init = 1 }
+#define RW_LOCK_UNLOCKED (spinlock_t) { .ddekit_lock = NULL, .need_init = 1 }
+
+#define __SPIN_LOCK_UNLOCKED(name) SPIN_LOCK_UNLOCKED
+#define __RW_LOCK_UNLOCKED(name) RW_LOCK_UNLOCKED
+
+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x)
+
+#endif /* DDE_LINUX */
+
+#endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/libdde-linux26/include/linux/stddef.h b/libdde-linux26/include/linux/stddef.h
new file mode 100644
index 00000000..6a40c76b
--- /dev/null
+++ b/libdde-linux26/include/linux/stddef.h
@@ -0,0 +1,28 @@
+#ifndef _LINUX_STDDEF_H
+#define _LINUX_STDDEF_H
+
+#include <linux/compiler.h>
+
+#undef NULL
+#if defined(__cplusplus)
+#define NULL 0
+#else
+#define NULL ((void *)0)
+#endif
+
+#ifdef __KERNEL__
+
+enum {
+ false = 0,
+ true = 1
+};
+
+#undef offsetof
+#ifdef __compiler_offsetof
+#define offsetof(TYPE,MEMBER) __compiler_offsetof(TYPE,MEMBER)
+#else
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+#endif
+#endif /* __KERNEL__ */
+
+#endif
diff --git a/libdde-linux26/include/linux/timer.h b/libdde-linux26/include/linux/timer.h
new file mode 100644
index 00000000..149b5207
--- /dev/null
+++ b/libdde-linux26/include/linux/timer.h
@@ -0,0 +1,214 @@
+#ifndef _LINUX_TIMER_H
+#define _LINUX_TIMER_H
+
+#include <linux/list.h>
+#include <linux/ktime.h>
+#include <linux/stddef.h>
+#include <linux/debugobjects.h>
+
+struct tvec_base;
+
+struct timer_list {
+ struct list_head entry;
+ unsigned long expires;
+
+ void (*function)(unsigned long);
+ unsigned long data;
+
+#ifndef DDE_LINUX
+ struct tvec_base *base;
+#else /* DDE_LINUX */
+ int ddekit_timer_id;
+#endif /* DDE_LINUX */
+#ifdef CONFIG_TIMER_STATS
+ void *start_site;
+ char start_comm[16];
+ int start_pid;
+#endif
+};
+
+extern struct tvec_base boot_tvec_bases;
+
+#ifndef DDE_LINUX
+#define TIMER_INITIALIZER(_function, _expires, _data) { \
+ .function = (_function), \
+ .expires = (_expires), \
+ .data = (_data), \
+ .base = &boot_tvec_bases, \
+ }
+#else
+#define TIMER_INITIALIZER(_function, _expires, _data) { \
+ .function = (_function), \
+ .expires = (_expires), \
+ .data = (_data), \
+ .ddekit_timer_id = -1, \
+ }
+#endif /* DDE_LINUX */
+
+#define DEFINE_TIMER(_name, _function, _expires, _data) \
+ struct timer_list _name = \
+ TIMER_INITIALIZER(_function, _expires, _data)
+
+void init_timer(struct timer_list *timer);
+void init_timer_deferrable(struct timer_list *timer);
+
+#ifdef CONFIG_DEBUG_OBJECTS_TIMERS
+extern void init_timer_on_stack(struct timer_list *timer);
+extern void destroy_timer_on_stack(struct timer_list *timer);
+#else
+static inline void destroy_timer_on_stack(struct timer_list *timer) { }
+static inline void init_timer_on_stack(struct timer_list *timer)
+{
+ init_timer(timer);
+}
+#endif
+
+static inline void setup_timer(struct timer_list * timer,
+ void (*function)(unsigned long),
+ unsigned long data)
+{
+ timer->function = function;
+ timer->data = data;
+ init_timer(timer);
+}
+
+#ifndef DDE_LINUX
+static inline void setup_timer_on_stack(struct timer_list *timer,
+ void (*function)(unsigned long),
+ unsigned long data)
+{
+ timer->function = function;
+ timer->data = data;
+ init_timer_on_stack(timer);
+}
+
+/**
+ * timer_pending - is a timer pending?
+ * @timer: the timer in question
+ *
+ * timer_pending will tell whether a given timer is currently pending,
+ * or not. Callers must ensure serialization wrt. other operations done
+ * to this timer, eg. interrupt contexts, or other CPUs on SMP.
+ *
+ * return value: 1 if the timer is pending, 0 if not.
+ */
+static inline int timer_pending(const struct timer_list * timer)
+{
+ return timer->entry.next != NULL;
+}
+#else
+int timer_pending(const struct timer_list *timer);
+#endif /* DDE_LINUX */
+
+extern void add_timer_on(struct timer_list *timer, int cpu);
+extern int del_timer(struct timer_list * timer);
+extern int __mod_timer(struct timer_list *timer, unsigned long expires);
+extern int mod_timer(struct timer_list *timer, unsigned long expires);
+
+/*
+ * The jiffies value which is added to now, when there is no timer
+ * in the timer wheel:
+ */
+#define NEXT_TIMER_MAX_DELTA ((1UL << 30) - 1)
+
+/*
+ * Return when the next timer-wheel timeout occurs (in absolute jiffies),
+ * locks the timer base:
+ */
+extern unsigned long next_timer_interrupt(void);
+/*
+ * Return when the next timer-wheel timeout occurs (in absolute jiffies),
+ * locks the timer base and does the comparison against the given
+ * jiffie.
+ */
+extern unsigned long get_next_timer_interrupt(unsigned long now);
+
+/*
+ * Timer-statistics info:
+ */
+#ifdef CONFIG_TIMER_STATS
+
+#define TIMER_STATS_FLAG_DEFERRABLE 0x1
+
+extern void init_timer_stats(void);
+
+extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
+ void *timerf, char *comm,
+ unsigned int timer_flag);
+
+extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
+ void *addr);
+
+static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
+{
+ __timer_stats_timer_set_start_info(timer, __builtin_return_address(0));
+}
+
+static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
+{
+ timer->start_site = NULL;
+}
+#else
+static inline void init_timer_stats(void)
+{
+}
+
+static inline void timer_stats_timer_set_start_info(struct timer_list *timer)
+{
+}
+
+static inline void timer_stats_timer_clear_start_info(struct timer_list *timer)
+{
+}
+#endif
+
+/**
+ * add_timer - start a timer
+ * @timer: the timer to be added
+ *
+ * The kernel will do a ->function(->data) callback from the
+ * timer interrupt at the ->expires point in the future. The
+ * current time is 'jiffies'.
+ *
+ * The timer's ->expires, ->function (and if the handler uses it, ->data)
+ * fields must be set prior calling this function.
+ *
+ * Timers with an ->expires field in the past will be executed in the next
+ * timer tick.
+ */
+#ifndef DDE_LINUX
+static inline void add_timer(struct timer_list *timer)
+{
+ BUG_ON(timer_pending(timer));
+ __mod_timer(timer, timer->expires);
+}
+#else
+extern void add_timer(struct timer_list *timer);
+#endif
+
+#ifdef CONFIG_SMP
+ extern int try_to_del_timer_sync(struct timer_list *timer);
+ extern int del_timer_sync(struct timer_list *timer);
+#else
+# define try_to_del_timer_sync(t) del_timer(t)
+# define del_timer_sync(t) del_timer(t)
+#endif
+
+#define del_singleshot_timer_sync(t) del_timer_sync(t)
+
+extern void init_timers(void);
+extern void run_local_timers(void);
+struct hrtimer;
+extern enum hrtimer_restart it_real_fn(struct hrtimer *);
+
+unsigned long __round_jiffies(unsigned long j, int cpu);
+unsigned long __round_jiffies_relative(unsigned long j, int cpu);
+unsigned long round_jiffies(unsigned long j);
+unsigned long round_jiffies_relative(unsigned long j);
+
+unsigned long __round_jiffies_up(unsigned long j, int cpu);
+unsigned long __round_jiffies_up_relative(unsigned long j, int cpu);
+unsigned long round_jiffies_up(unsigned long j);
+unsigned long round_jiffies_up_relative(unsigned long j);
+
+#endif
diff --git a/libdde-linux26/include/linux/usb.h b/libdde-linux26/include/linux/usb.h
new file mode 100644
index 00000000..85e072b9
--- /dev/null
+++ b/libdde-linux26/include/linux/usb.h
@@ -0,0 +1,1765 @@
+#ifndef __LINUX_USB_H
+#define __LINUX_USB_H
+
+#include <linux/mod_devicetable.h>
+#include <linux/usb/ch9.h>
+
+#define USB_MAJOR 180
+#define USB_DEVICE_MAJOR 189
+
+
+#ifdef __KERNEL__
+
+#include <linux/errno.h> /* for -ENODEV */
+#include <linux/delay.h> /* for mdelay() */
+#include <linux/interrupt.h> /* for in_interrupt() */
+#include <linux/list.h> /* for struct list_head */
+#include <linux/kref.h> /* for struct kref */
+#include <linux/device.h> /* for struct device */
+#include <linux/fs.h> /* for struct file_operations */
+#include <linux/completion.h> /* for struct completion */
+#include <linux/sched.h> /* for current && schedule_timeout */
+#include <linux/mutex.h> /* for struct mutex */
+
+struct usb_device;
+struct usb_driver;
+struct wusb_dev;
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * Host-side wrappers for standard USB descriptors ... these are parsed
+ * from the data provided by devices. Parsing turns them from a flat
+ * sequence of descriptors into a hierarchy:
+ *
+ * - devices have one (usually) or more configs;
+ * - configs have one (often) or more interfaces;
+ * - interfaces have one (usually) or more settings;
+ * - each interface setting has zero or (usually) more endpoints.
+ *
+ * And there might be other descriptors mixed in with those.
+ *
+ * Devices may also have class-specific or vendor-specific descriptors.
+ */
+
+struct ep_device;
+
+/**
+ * struct usb_host_endpoint - host-side endpoint descriptor and queue
+ * @desc: descriptor for this endpoint, wMaxPacketSize in native byteorder
+ * @urb_list: urbs queued to this endpoint; maintained by usbcore
+ * @hcpriv: for use by HCD; typically holds hardware dma queue head (QH)
+ * with one or more transfer descriptors (TDs) per urb
+ * @ep_dev: ep_device for sysfs info
+ * @extra: descriptors following this endpoint in the configuration
+ * @extralen: how many bytes of "extra" are valid
+ * @enabled: URBs may be submitted to this endpoint
+ *
+ * USB requests are always queued to a given endpoint, identified by a
+ * descriptor within an active interface in a given USB configuration.
+ */
+struct usb_host_endpoint {
+ struct usb_endpoint_descriptor desc;
+ struct list_head urb_list;
+ void *hcpriv;
+ struct ep_device *ep_dev; /* For sysfs info */
+
+ unsigned char *extra; /* Extra descriptors */
+ int extralen;
+ int enabled;
+};
+
+/* host-side wrapper for one interface setting's parsed descriptors */
+struct usb_host_interface {
+ struct usb_interface_descriptor desc;
+
+ /* array of desc.bNumEndpoint endpoints associated with this
+ * interface setting. these will be in no particular order.
+ */
+ struct usb_host_endpoint *endpoint;
+
+ char *string; /* iInterface string, if present */
+ unsigned char *extra; /* Extra descriptors */
+ int extralen;
+};
+
+enum usb_interface_condition {
+ USB_INTERFACE_UNBOUND = 0,
+ USB_INTERFACE_BINDING,
+ USB_INTERFACE_BOUND,
+ USB_INTERFACE_UNBINDING,
+};
+
+/**
+ * struct usb_interface - what usb device drivers talk to
+ * @altsetting: array of interface structures, one for each alternate
+ * setting that may be selected. Each one includes a set of
+ * endpoint configurations. They will be in no particular order.
+ * @cur_altsetting: the current altsetting.
+ * @num_altsetting: number of altsettings defined.
+ * @intf_assoc: interface association descriptor
+ * @minor: the minor number assigned to this interface, if this
+ * interface is bound to a driver that uses the USB major number.
+ * If this interface does not use the USB major, this field should
+ * be unused. The driver should set this value in the probe()
+ * function of the driver, after it has been assigned a minor
+ * number from the USB core by calling usb_register_dev().
+ * @condition: binding state of the interface: not bound, binding
+ * (in probe()), bound to a driver, or unbinding (in disconnect())
+ * @is_active: flag set when the interface is bound and not suspended.
+ * @sysfs_files_created: sysfs attributes exist
+ * @ep_devs_created: endpoint child pseudo-devices exist
+ * @unregistering: flag set when the interface is being unregistered
+ * @needs_remote_wakeup: flag set when the driver requires remote-wakeup
+ * capability during autosuspend.
+ * @needs_altsetting0: flag set when a set-interface request for altsetting 0
+ * has been deferred.
+ * @needs_binding: flag set when the driver should be re-probed or unbound
+ * following a reset or suspend operation it doesn't support.
+ * @dev: driver model's view of this device
+ * @usb_dev: if an interface is bound to the USB major, this will point
+ * to the sysfs representation for that device.
+ * @pm_usage_cnt: PM usage counter for this interface; autosuspend is not
+ * allowed unless the counter is 0.
+ * @reset_ws: Used for scheduling resets from atomic context.
+ * @reset_running: set to 1 if the interface is currently running a
+ * queued reset so that usb_cancel_queued_reset() doesn't try to
+ * remove from the workqueue when running inside the worker
+ * thread. See __usb_queue_reset_device().
+ *
+ * USB device drivers attach to interfaces on a physical device. Each
+ * interface encapsulates a single high level function, such as feeding
+ * an audio stream to a speaker or reporting a change in a volume control.
+ * Many USB devices only have one interface. The protocol used to talk to
+ * an interface's endpoints can be defined in a usb "class" specification,
+ * or by a product's vendor. The (default) control endpoint is part of
+ * every interface, but is never listed among the interface's descriptors.
+ *
+ * The driver that is bound to the interface can use standard driver model
+ * calls such as dev_get_drvdata() on the dev member of this structure.
+ *
+ * Each interface may have alternate settings. The initial configuration
+ * of a device sets altsetting 0, but the device driver can change
+ * that setting using usb_set_interface(). Alternate settings are often
+ * used to control the use of periodic endpoints, such as by having
+ * different endpoints use different amounts of reserved USB bandwidth.
+ * All standards-conformant USB devices that use isochronous endpoints
+ * will use them in non-default settings.
+ *
+ * The USB specification says that alternate setting numbers must run from
+ * 0 to one less than the total number of alternate settings. But some
+ * devices manage to mess this up, and the structures aren't necessarily
+ * stored in numerical order anyhow. Use usb_altnum_to_altsetting() to
+ * look up an alternate setting in the altsetting array based on its number.
+ */
+struct usb_interface {
+ /* array of alternate settings for this interface,
+ * stored in no particular order */
+ struct usb_host_interface *altsetting;
+
+ struct usb_host_interface *cur_altsetting; /* the currently
+ * active alternate setting */
+ unsigned num_altsetting; /* number of alternate settings */
+
+ /* If there is an interface association descriptor then it will list
+ * the associated interfaces */
+ struct usb_interface_assoc_descriptor *intf_assoc;
+
+ int minor; /* minor number this interface is
+ * bound to */
+ enum usb_interface_condition condition; /* state of binding */
+ unsigned is_active:1; /* the interface is not suspended */
+ unsigned sysfs_files_created:1; /* the sysfs attributes exist */
+ unsigned ep_devs_created:1; /* endpoint "devices" exist */
+ unsigned unregistering:1; /* unregistration is in progress */
+ unsigned needs_remote_wakeup:1; /* driver requires remote wakeup */
+ unsigned needs_altsetting0:1; /* switch to altsetting 0 is pending */
+ unsigned needs_binding:1; /* needs delayed unbind/rebind */
+ unsigned reset_running:1;
+
+ struct device dev; /* interface specific device info */
+ struct device *usb_dev;
+ int pm_usage_cnt; /* usage counter for autosuspend */
+ struct work_struct reset_ws; /* for resets in atomic context */
+};
+#define to_usb_interface(d) container_of(d, struct usb_interface, dev)
+#define interface_to_usbdev(intf) \
+ container_of(intf->dev.parent, struct usb_device, dev)
+
+static inline void *usb_get_intfdata(struct usb_interface *intf)
+{
+ return dev_get_drvdata(&intf->dev);
+}
+
+static inline void usb_set_intfdata(struct usb_interface *intf, void *data)
+{
+ dev_set_drvdata(&intf->dev, data);
+}
+
+struct usb_interface *usb_get_intf(struct usb_interface *intf);
+void usb_put_intf(struct usb_interface *intf);
+
+/* this maximum is arbitrary */
+#define USB_MAXINTERFACES 32
+#define USB_MAXIADS USB_MAXINTERFACES/2
+
+/**
+ * struct usb_interface_cache - long-term representation of a device interface
+ * @num_altsetting: number of altsettings defined.
+ * @ref: reference counter.
+ * @altsetting: variable-length array of interface structures, one for
+ * each alternate setting that may be selected. Each one includes a
+ * set of endpoint configurations. They will be in no particular order.
+ *
+ * These structures persist for the lifetime of a usb_device, unlike
+ * struct usb_interface (which persists only as long as its configuration
+ * is installed). The altsetting arrays can be accessed through these
+ * structures at any time, permitting comparison of configurations and
+ * providing support for the /proc/bus/usb/devices pseudo-file.
+ */
+struct usb_interface_cache {
+ unsigned num_altsetting; /* number of alternate settings */
+ struct kref ref; /* reference counter */
+
+ /* variable-length array of alternate settings for this interface,
+ * stored in no particular order */
+ struct usb_host_interface altsetting[0];
+};
+#define ref_to_usb_interface_cache(r) \
+ container_of(r, struct usb_interface_cache, ref)
+#define altsetting_to_usb_interface_cache(a) \
+ container_of(a, struct usb_interface_cache, altsetting[0])
+
+/**
+ * struct usb_host_config - representation of a device's configuration
+ * @desc: the device's configuration descriptor.
+ * @string: pointer to the cached version of the iConfiguration string, if
+ * present for this configuration.
+ * @intf_assoc: list of any interface association descriptors in this config
+ * @interface: array of pointers to usb_interface structures, one for each
+ * interface in the configuration. The number of interfaces is stored
+ * in desc.bNumInterfaces. These pointers are valid only while the
+ * the configuration is active.
+ * @intf_cache: array of pointers to usb_interface_cache structures, one
+ * for each interface in the configuration. These structures exist
+ * for the entire life of the device.
+ * @extra: pointer to buffer containing all extra descriptors associated
+ * with this configuration (those preceding the first interface
+ * descriptor).
+ * @extralen: length of the extra descriptors buffer.
+ *
+ * USB devices may have multiple configurations, but only one can be active
+ * at any time. Each encapsulates a different operational environment;
+ * for example, a dual-speed device would have separate configurations for
+ * full-speed and high-speed operation. The number of configurations
+ * available is stored in the device descriptor as bNumConfigurations.
+ *
+ * A configuration can contain multiple interfaces. Each corresponds to
+ * a different function of the USB device, and all are available whenever
+ * the configuration is active. The USB standard says that interfaces
+ * are supposed to be numbered from 0 to desc.bNumInterfaces-1, but a lot
+ * of devices get this wrong. In addition, the interface array is not
+ * guaranteed to be sorted in numerical order. Use usb_ifnum_to_if() to
+ * look up an interface entry based on its number.
+ *
+ * Device drivers should not attempt to activate configurations. The choice
+ * of which configuration to install is a policy decision based on such
+ * considerations as available power, functionality provided, and the user's
+ * desires (expressed through userspace tools). However, drivers can call
+ * usb_reset_configuration() to reinitialize the current configuration and
+ * all its interfaces.
+ */
+struct usb_host_config {
+ struct usb_config_descriptor desc;
+
+ char *string; /* iConfiguration string, if present */
+
+ /* List of any Interface Association Descriptors in this
+ * configuration. */
+ struct usb_interface_assoc_descriptor *intf_assoc[USB_MAXIADS];
+
+ /* the interfaces associated with this configuration,
+ * stored in no particular order */
+ struct usb_interface *interface[USB_MAXINTERFACES];
+
+ /* Interface information available even when this is not the
+ * active configuration */
+ struct usb_interface_cache *intf_cache[USB_MAXINTERFACES];
+
+ unsigned char *extra; /* Extra descriptors */
+ int extralen;
+};
+
+int __usb_get_extra_descriptor(char *buffer, unsigned size,
+ unsigned char type, void **ptr);
+#define usb_get_extra_descriptor(ifpoint, type, ptr) \
+ __usb_get_extra_descriptor((ifpoint)->extra, \
+ (ifpoint)->extralen, \
+ type, (void **)ptr)
+
+/* ----------------------------------------------------------------------- */
+
+/* USB device number allocation bitmap */
+struct usb_devmap {
+ unsigned long devicemap[128 / (8*sizeof(unsigned long))];
+};
+
+/*
+ * Allocated per bus (tree of devices) we have:
+ */
+struct usb_bus {
+ struct device *controller; /* host/master side hardware */
+ int busnum; /* Bus number (in order of reg) */
+ const char *bus_name; /* stable id (PCI slot_name etc) */
+ u8 uses_dma; /* Does the host controller use DMA? */
+ u8 otg_port; /* 0, or number of OTG/HNP port */
+ unsigned is_b_host:1; /* true during some HNP roleswitches */
+ unsigned b_hnp_enable:1; /* OTG: did A-Host enable HNP? */
+
+ int devnum_next; /* Next open device number in
+ * round-robin allocation */
+
+ struct usb_devmap devmap; /* device address allocation map */
+ struct usb_device *root_hub; /* Root hub */
+ struct list_head bus_list; /* list of busses */
+
+ int bandwidth_allocated; /* on this bus: how much of the time
+ * reserved for periodic (intr/iso)
+ * requests is used, on average?
+ * Units: microseconds/frame.
+ * Limits: Full/low speed reserve 90%,
+ * while high speed reserves 80%.
+ */
+ int bandwidth_int_reqs; /* number of Interrupt requests */
+ int bandwidth_isoc_reqs; /* number of Isoc. requests */
+
+#ifdef CONFIG_USB_DEVICEFS
+ struct dentry *usbfs_dentry; /* usbfs dentry entry for the bus */
+#endif
+ struct device *dev; /* device for this bus */
+
+#if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
+ struct mon_bus *mon_bus; /* non-null when associated */
+ int monitored; /* non-zero when monitored */
+#endif
+};
+
+/* ----------------------------------------------------------------------- */
+
+/* This is arbitrary.
+ * From USB 2.0 spec Table 11-13, offset 7, a hub can
+ * have up to 255 ports. The most yet reported is 10.
+ *
+ * Current Wireless USB host hardware (Intel i1480 for example) allows
+ * up to 22 devices to connect. Upcoming hardware might raise that
+ * limit. Because the arrays need to add a bit for hub status data, we
+ * do 31, so plus one evens out to four bytes.
+ */
+#define USB_MAXCHILDREN (31)
+
+struct usb_tt;
+
+/**
+ * struct usb_device - kernel's representation of a USB device
+ * @devnum: device number; address on a USB bus
+ * @devpath: device ID string for use in messages (e.g., /port/...)
+ * @state: device state: configured, not attached, etc.
+ * @speed: device speed: high/full/low (or error)
+ * @tt: Transaction Translator info; used with low/full speed dev, highspeed hub
+ * @ttport: device port on that tt hub
+ * @toggle: one bit for each endpoint, with ([0] = IN, [1] = OUT) endpoints
+ * @parent: our hub, unless we're the root
+ * @bus: bus we're part of
+ * @ep0: endpoint 0 data (default control pipe)
+ * @dev: generic device interface
+ * @descriptor: USB device descriptor
+ * @config: all of the device's configs
+ * @actconfig: the active configuration
+ * @ep_in: array of IN endpoints
+ * @ep_out: array of OUT endpoints
+ * @rawdescriptors: raw descriptors for each config
+ * @bus_mA: Current available from the bus
+ * @portnum: parent port number (origin 1)
+ * @level: number of USB hub ancestors
+ * @can_submit: URBs may be submitted
+ * @discon_suspended: disconnected while suspended
+ * @persist_enabled: USB_PERSIST enabled for this device
+ * @have_langid: whether string_langid is valid
+ * @authorized: policy has said we can use it;
+ * (user space) policy determines if we authorize this device to be
+ * used or not. By default, wired USB devices are authorized.
+ * WUSB devices are not, until we authorize them from user space.
+ * FIXME -- complete doc
+ * @authenticated: Crypto authentication passed
+ * @wusb: device is Wireless USB
+ * @string_langid: language ID for strings
+ * @product: iProduct string, if present (static)
+ * @manufacturer: iManufacturer string, if present (static)
+ * @serial: iSerialNumber string, if present (static)
+ * @filelist: usbfs files that are open to this device
+ * @usb_classdev: USB class device that was created for usbfs device
+ * access from userspace
+ * @usbfs_dentry: usbfs dentry entry for the device
+ * @maxchild: number of ports if hub
+ * @children: child devices - USB devices that are attached to this hub
+ * @pm_usage_cnt: usage counter for autosuspend
+ * @quirks: quirks of the whole device
+ * @urbnum: number of URBs submitted for the whole device
+ * @active_duration: total time device is not suspended
+ * @autosuspend: for delayed autosuspends
+ * @autoresume: for autoresumes requested while in_interrupt
+ * @pm_mutex: protects PM operations
+ * @last_busy: time of last use
+ * @autosuspend_delay: in jiffies
+ * @connect_time: time device was first connected
+ * @auto_pm: autosuspend/resume in progress
+ * @do_remote_wakeup: remote wakeup should be enabled
+ * @reset_resume: needs reset instead of resume
+ * @autosuspend_disabled: autosuspend disabled by the user
+ * @autoresume_disabled: autoresume disabled by the user
+ * @skip_sys_resume: skip the next system resume
+ * @wusb_dev: if this is a Wireless USB device, link to the WUSB
+ * specific data for the device.
+ *
+ * Notes:
+ * Usbcore drivers should not set usbdev->state directly. Instead use
+ * usb_set_device_state().
+ */
+struct usb_device {
+ int devnum;
+ char devpath [16];
+ enum usb_device_state state;
+ enum usb_device_speed speed;
+
+ struct usb_tt *tt;
+ int ttport;
+
+ unsigned int toggle[2];
+
+ struct usb_device *parent;
+ struct usb_bus *bus;
+ struct usb_host_endpoint ep0;
+
+ struct device dev;
+
+ struct usb_device_descriptor descriptor;
+ struct usb_host_config *config;
+
+ struct usb_host_config *actconfig;
+ struct usb_host_endpoint *ep_in[16];
+ struct usb_host_endpoint *ep_out[16];
+
+ char **rawdescriptors;
+
+ unsigned short bus_mA;
+ u8 portnum;
+ u8 level;
+
+ unsigned can_submit:1;
+ unsigned discon_suspended:1;
+ unsigned persist_enabled:1;
+ unsigned have_langid:1;
+ unsigned authorized:1;
+ unsigned authenticated:1;
+ unsigned wusb:1;
+ int string_langid;
+
+ /* static strings from the device */
+ char *product;
+ char *manufacturer;
+ char *serial;
+
+ struct list_head filelist;
+#ifdef CONFIG_USB_DEVICE_CLASS
+ struct device *usb_classdev;
+#endif
+#ifdef CONFIG_USB_DEVICEFS
+ struct dentry *usbfs_dentry;
+#endif
+
+ int maxchild;
+ struct usb_device *children[USB_MAXCHILDREN];
+
+ int pm_usage_cnt;
+ u32 quirks;
+ atomic_t urbnum;
+
+ unsigned long active_duration;
+
+#ifdef CONFIG_PM
+ struct delayed_work autosuspend;
+ struct work_struct autoresume;
+ struct mutex pm_mutex;
+
+ unsigned long last_busy;
+ int autosuspend_delay;
+ unsigned long connect_time;
+
+ unsigned auto_pm:1;
+ unsigned do_remote_wakeup:1;
+ unsigned reset_resume:1;
+ unsigned autosuspend_disabled:1;
+ unsigned autoresume_disabled:1;
+ unsigned skip_sys_resume:1;
+#endif
+ struct wusb_dev *wusb_dev;
+};
+#define to_usb_device(d) container_of(d, struct usb_device, dev)
+
+extern struct usb_device *usb_get_dev(struct usb_device *dev);
+extern void usb_put_dev(struct usb_device *dev);
+
+/* USB device locking */
+#define usb_lock_device(udev) down(&(udev)->dev.sem)
+#define usb_unlock_device(udev) up(&(udev)->dev.sem)
+#define usb_trylock_device(udev) down_trylock(&(udev)->dev.sem)
+extern int usb_lock_device_for_reset(struct usb_device *udev,
+ const struct usb_interface *iface);
+
+/* USB port reset for device reinitialization */
+extern int usb_reset_device(struct usb_device *dev);
+extern void usb_queue_reset_device(struct usb_interface *dev);
+
+extern struct usb_device *usb_find_device(u16 vendor_id, u16 product_id);
+
+/* USB autosuspend and autoresume */
+#ifdef CONFIG_USB_SUSPEND
+extern int usb_autopm_set_interface(struct usb_interface *intf);
+extern int usb_autopm_get_interface(struct usb_interface *intf);
+extern void usb_autopm_put_interface(struct usb_interface *intf);
+extern int usb_autopm_get_interface_async(struct usb_interface *intf);
+extern void usb_autopm_put_interface_async(struct usb_interface *intf);
+
+static inline void usb_autopm_enable(struct usb_interface *intf)
+{
+ intf->pm_usage_cnt = 0;
+ usb_autopm_set_interface(intf);
+}
+
+static inline void usb_autopm_disable(struct usb_interface *intf)
+{
+ intf->pm_usage_cnt = 1;
+ usb_autopm_set_interface(intf);
+}
+
+static inline void usb_mark_last_busy(struct usb_device *udev)
+{
+ udev->last_busy = jiffies;
+}
+
+#else
+
+static inline int usb_autopm_set_interface(struct usb_interface *intf)
+{ return 0; }
+
+static inline int usb_autopm_get_interface(struct usb_interface *intf)
+{ return 0; }
+
+static inline int usb_autopm_get_interface_async(struct usb_interface *intf)
+{ return 0; }
+
+static inline void usb_autopm_put_interface(struct usb_interface *intf)
+{ }
+static inline void usb_autopm_put_interface_async(struct usb_interface *intf)
+{ }
+static inline void usb_autopm_enable(struct usb_interface *intf)
+{ }
+static inline void usb_autopm_disable(struct usb_interface *intf)
+{ }
+static inline void usb_mark_last_busy(struct usb_device *udev)
+{ }
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+/* for drivers using iso endpoints */
+extern int usb_get_current_frame_number(struct usb_device *usb_dev);
+
+/* used these for multi-interface device registration */
+extern int usb_driver_claim_interface(struct usb_driver *driver,
+ struct usb_interface *iface, void *priv);
+
+/**
+ * usb_interface_claimed - returns true iff an interface is claimed
+ * @iface: the interface being checked
+ *
+ * Returns true (nonzero) iff the interface is claimed, else false (zero).
+ * Callers must own the driver model's usb bus readlock. So driver
+ * probe() entries don't need extra locking, but other call contexts
+ * may need to explicitly claim that lock.
+ *
+ */
+static inline int usb_interface_claimed(struct usb_interface *iface)
+{
+ return (iface->dev.driver != NULL);
+}
+
+extern void usb_driver_release_interface(struct usb_driver *driver,
+ struct usb_interface *iface);
+const struct usb_device_id *usb_match_id(struct usb_interface *interface,
+ const struct usb_device_id *id);
+extern int usb_match_one_id(struct usb_interface *interface,
+ const struct usb_device_id *id);
+
+extern struct usb_interface *usb_find_interface(struct usb_driver *drv,
+ int minor);
+extern struct usb_interface *usb_ifnum_to_if(const struct usb_device *dev,
+ unsigned ifnum);
+extern struct usb_host_interface *usb_altnum_to_altsetting(
+ const struct usb_interface *intf, unsigned int altnum);
+
+
+/**
+ * usb_make_path - returns stable device path in the usb tree
+ * @dev: the device whose path is being constructed
+ * @buf: where to put the string
+ * @size: how big is "buf"?
+ *
+ * Returns length of the string (> 0) or negative if size was too small.
+ *
+ * This identifier is intended to be "stable", reflecting physical paths in
+ * hardware such as physical bus addresses for host controllers or ports on
+ * USB hubs. That makes it stay the same until systems are physically
+ * reconfigured, by re-cabling a tree of USB devices or by moving USB host
+ * controllers. Adding and removing devices, including virtual root hubs
+ * in host controller driver modules, does not change these path identifers;
+ * neither does rebooting or re-enumerating. These are more useful identifiers
+ * than changeable ("unstable") ones like bus numbers or device addresses.
+ *
+ * With a partial exception for devices connected to USB 2.0 root hubs, these
+ * identifiers are also predictable. So long as the device tree isn't changed,
+ * plugging any USB device into a given hub port always gives it the same path.
+ * Because of the use of "companion" controllers, devices connected to ports on
+ * USB 2.0 root hubs (EHCI host controllers) will get one path ID if they are
+ * high speed, and a different one if they are full or low speed.
+ */
+static inline int usb_make_path(struct usb_device *dev, char *buf, size_t size)
+{
+ int actual;
+ actual = snprintf(buf, size, "usb-%s-%s", dev->bus->bus_name,
+ dev->devpath);
+ return (actual >= (int)size) ? -1 : actual;
+}
+
+/*-------------------------------------------------------------------------*/
+
+/**
+ * usb_endpoint_num - get the endpoint's number
+ * @epd: endpoint to be checked
+ *
+ * Returns @epd's number: 0 to 15.
+ */
+static inline int usb_endpoint_num(const struct usb_endpoint_descriptor *epd)
+{
+ return epd->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK;
+}
+
+/**
+ * usb_endpoint_type - get the endpoint's transfer type
+ * @epd: endpoint to be checked
+ *
+ * Returns one of USB_ENDPOINT_XFER_{CONTROL, ISOC, BULK, INT} according
+ * to @epd's transfer type.
+ */
+static inline int usb_endpoint_type(const struct usb_endpoint_descriptor *epd)
+{
+ return epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK;
+}
+
+/**
+ * usb_endpoint_dir_in - check if the endpoint has IN direction
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint is of type IN, otherwise it returns false.
+ */
+static inline int usb_endpoint_dir_in(const struct usb_endpoint_descriptor *epd)
+{
+ return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN);
+}
+
+/**
+ * usb_endpoint_dir_out - check if the endpoint has OUT direction
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint is of type OUT, otherwise it returns false.
+ */
+static inline int usb_endpoint_dir_out(
+ const struct usb_endpoint_descriptor *epd)
+{
+ return ((epd->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT);
+}
+
+/**
+ * usb_endpoint_xfer_bulk - check if the endpoint has bulk transfer type
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint is of type bulk, otherwise it returns false.
+ */
+static inline int usb_endpoint_xfer_bulk(
+ const struct usb_endpoint_descriptor *epd)
+{
+ return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_BULK);
+}
+
+/**
+ * usb_endpoint_xfer_control - check if the endpoint has control transfer type
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint is of type control, otherwise it returns false.
+ */
+static inline int usb_endpoint_xfer_control(
+ const struct usb_endpoint_descriptor *epd)
+{
+ return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_CONTROL);
+}
+
+/**
+ * usb_endpoint_xfer_int - check if the endpoint has interrupt transfer type
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint is of type interrupt, otherwise it returns
+ * false.
+ */
+static inline int usb_endpoint_xfer_int(
+ const struct usb_endpoint_descriptor *epd)
+{
+ return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_INT);
+}
+
+/**
+ * usb_endpoint_xfer_isoc - check if the endpoint has isochronous transfer type
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint is of type isochronous, otherwise it returns
+ * false.
+ */
+static inline int usb_endpoint_xfer_isoc(
+ const struct usb_endpoint_descriptor *epd)
+{
+ return ((epd->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_ISOC);
+}
+
+/**
+ * usb_endpoint_is_bulk_in - check if the endpoint is bulk IN
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint has bulk transfer type and IN direction,
+ * otherwise it returns false.
+ */
+static inline int usb_endpoint_is_bulk_in(
+ const struct usb_endpoint_descriptor *epd)
+{
+ return (usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_in(epd));
+}
+
+/**
+ * usb_endpoint_is_bulk_out - check if the endpoint is bulk OUT
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint has bulk transfer type and OUT direction,
+ * otherwise it returns false.
+ */
+static inline int usb_endpoint_is_bulk_out(
+ const struct usb_endpoint_descriptor *epd)
+{
+ return (usb_endpoint_xfer_bulk(epd) && usb_endpoint_dir_out(epd));
+}
+
+/**
+ * usb_endpoint_is_int_in - check if the endpoint is interrupt IN
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint has interrupt transfer type and IN direction,
+ * otherwise it returns false.
+ */
+static inline int usb_endpoint_is_int_in(
+ const struct usb_endpoint_descriptor *epd)
+{
+ return (usb_endpoint_xfer_int(epd) && usb_endpoint_dir_in(epd));
+}
+
+/**
+ * usb_endpoint_is_int_out - check if the endpoint is interrupt OUT
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint has interrupt transfer type and OUT direction,
+ * otherwise it returns false.
+ */
+static inline int usb_endpoint_is_int_out(
+ const struct usb_endpoint_descriptor *epd)
+{
+ return (usb_endpoint_xfer_int(epd) && usb_endpoint_dir_out(epd));
+}
+
+/**
+ * usb_endpoint_is_isoc_in - check if the endpoint is isochronous IN
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint has isochronous transfer type and IN direction,
+ * otherwise it returns false.
+ */
+static inline int usb_endpoint_is_isoc_in(
+ const struct usb_endpoint_descriptor *epd)
+{
+ return (usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_in(epd));
+}
+
+/**
+ * usb_endpoint_is_isoc_out - check if the endpoint is isochronous OUT
+ * @epd: endpoint to be checked
+ *
+ * Returns true if the endpoint has isochronous transfer type and OUT direction,
+ * otherwise it returns false.
+ */
+static inline int usb_endpoint_is_isoc_out(
+ const struct usb_endpoint_descriptor *epd)
+{
+ return (usb_endpoint_xfer_isoc(epd) && usb_endpoint_dir_out(epd));
+}
+
+/*-------------------------------------------------------------------------*/
+
+#define USB_DEVICE_ID_MATCH_DEVICE \
+ (USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT)
+#define USB_DEVICE_ID_MATCH_DEV_RANGE \
+ (USB_DEVICE_ID_MATCH_DEV_LO | USB_DEVICE_ID_MATCH_DEV_HI)
+#define USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION \
+ (USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_DEV_RANGE)
+#define USB_DEVICE_ID_MATCH_DEV_INFO \
+ (USB_DEVICE_ID_MATCH_DEV_CLASS | \
+ USB_DEVICE_ID_MATCH_DEV_SUBCLASS | \
+ USB_DEVICE_ID_MATCH_DEV_PROTOCOL)
+#define USB_DEVICE_ID_MATCH_INT_INFO \
+ (USB_DEVICE_ID_MATCH_INT_CLASS | \
+ USB_DEVICE_ID_MATCH_INT_SUBCLASS | \
+ USB_DEVICE_ID_MATCH_INT_PROTOCOL)
+
+/**
+ * USB_DEVICE - macro used to describe a specific usb device
+ * @vend: the 16 bit USB Vendor ID
+ * @prod: the 16 bit USB Product ID
+ *
+ * This macro is used to create a struct usb_device_id that matches a
+ * specific device.
+ */
+#define USB_DEVICE(vend,prod) \
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE, \
+ .idVendor = (vend), \
+ .idProduct = (prod)
+/**
+ * USB_DEVICE_VER - describe a specific usb device with a version range
+ * @vend: the 16 bit USB Vendor ID
+ * @prod: the 16 bit USB Product ID
+ * @lo: the bcdDevice_lo value
+ * @hi: the bcdDevice_hi value
+ *
+ * This macro is used to create a struct usb_device_id that matches a
+ * specific device, with a version range.
+ */
+#define USB_DEVICE_VER(vend, prod, lo, hi) \
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION, \
+ .idVendor = (vend), \
+ .idProduct = (prod), \
+ .bcdDevice_lo = (lo), \
+ .bcdDevice_hi = (hi)
+
+/**
+ * USB_DEVICE_INTERFACE_PROTOCOL - describe a usb device with a specific interface protocol
+ * @vend: the 16 bit USB Vendor ID
+ * @prod: the 16 bit USB Product ID
+ * @pr: bInterfaceProtocol value
+ *
+ * This macro is used to create a struct usb_device_id that matches a
+ * specific interface protocol of devices.
+ */
+#define USB_DEVICE_INTERFACE_PROTOCOL(vend, prod, pr) \
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE | \
+ USB_DEVICE_ID_MATCH_INT_PROTOCOL, \
+ .idVendor = (vend), \
+ .idProduct = (prod), \
+ .bInterfaceProtocol = (pr)
+
+/**
+ * USB_DEVICE_INFO - macro used to describe a class of usb devices
+ * @cl: bDeviceClass value
+ * @sc: bDeviceSubClass value
+ * @pr: bDeviceProtocol value
+ *
+ * This macro is used to create a struct usb_device_id that matches a
+ * specific class of devices.
+ */
+#define USB_DEVICE_INFO(cl, sc, pr) \
+ .match_flags = USB_DEVICE_ID_MATCH_DEV_INFO, \
+ .bDeviceClass = (cl), \
+ .bDeviceSubClass = (sc), \
+ .bDeviceProtocol = (pr)
+
+/**
+ * USB_INTERFACE_INFO - macro used to describe a class of usb interfaces
+ * @cl: bInterfaceClass value
+ * @sc: bInterfaceSubClass value
+ * @pr: bInterfaceProtocol value
+ *
+ * This macro is used to create a struct usb_device_id that matches a
+ * specific class of interfaces.
+ */
+#define USB_INTERFACE_INFO(cl, sc, pr) \
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO, \
+ .bInterfaceClass = (cl), \
+ .bInterfaceSubClass = (sc), \
+ .bInterfaceProtocol = (pr)
+
+/**
+ * USB_DEVICE_AND_INTERFACE_INFO - describe a specific usb device with a class of usb interfaces
+ * @vend: the 16 bit USB Vendor ID
+ * @prod: the 16 bit USB Product ID
+ * @cl: bInterfaceClass value
+ * @sc: bInterfaceSubClass value
+ * @pr: bInterfaceProtocol value
+ *
+ * This macro is used to create a struct usb_device_id that matches a
+ * specific device with a specific class of interfaces.
+ *
+ * This is especially useful when explicitly matching devices that have
+ * vendor specific bDeviceClass values, but standards-compliant interfaces.
+ */
+#define USB_DEVICE_AND_INTERFACE_INFO(vend, prod, cl, sc, pr) \
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
+ | USB_DEVICE_ID_MATCH_DEVICE, \
+ .idVendor = (vend), \
+ .idProduct = (prod), \
+ .bInterfaceClass = (cl), \
+ .bInterfaceSubClass = (sc), \
+ .bInterfaceProtocol = (pr)
+
+/* ----------------------------------------------------------------------- */
+
+/* Stuff for dynamic usb ids */
+struct usb_dynids {
+ spinlock_t lock;
+ struct list_head list;
+};
+
+struct usb_dynid {
+ struct list_head node;
+ struct usb_device_id id;
+};
+
+extern ssize_t usb_store_new_id(struct usb_dynids *dynids,
+ struct device_driver *driver,
+ const char *buf, size_t count);
+
+/**
+ * struct usbdrv_wrap - wrapper for driver-model structure
+ * @driver: The driver-model core driver structure.
+ * @for_devices: Non-zero for device drivers, 0 for interface drivers.
+ */
+struct usbdrv_wrap {
+ struct device_driver driver;
+ int for_devices;
+};
+
+/**
+ * struct usb_driver - identifies USB interface driver to usbcore
+ * @name: The driver name should be unique among USB drivers,
+ * and should normally be the same as the module name.
+ * @probe: Called to see if the driver is willing to manage a particular
+ * interface on a device. If it is, probe returns zero and uses
+ * usb_set_intfdata() to associate driver-specific data with the
+ * interface. It may also use usb_set_interface() to specify the
+ * appropriate altsetting. If unwilling to manage the interface,
+ * return -ENODEV, if genuine IO errors occured, an appropriate
+ * negative errno value.
+ * @disconnect: Called when the interface is no longer accessible, usually
+ * because its device has been (or is being) disconnected or the
+ * driver module is being unloaded.
+ * @ioctl: Used for drivers that want to talk to userspace through
+ * the "usbfs" filesystem. This lets devices provide ways to
+ * expose information to user space regardless of where they
+ * do (or don't) show up otherwise in the filesystem.
+ * @suspend: Called when the device is going to be suspended by the system.
+ * @resume: Called when the device is being resumed by the system.
+ * @reset_resume: Called when the suspended device has been reset instead
+ * of being resumed.
+ * @pre_reset: Called by usb_reset_device() when the device
+ * is about to be reset.
+ * @post_reset: Called by usb_reset_device() after the device
+ * has been reset
+ * @id_table: USB drivers use ID table to support hotplugging.
+ * Export this with MODULE_DEVICE_TABLE(usb,...). This must be set
+ * or your driver's probe function will never get called.
+ * @dynids: used internally to hold the list of dynamically added device
+ * ids for this driver.
+ * @drvwrap: Driver-model core structure wrapper.
+ * @no_dynamic_id: if set to 1, the USB core will not allow dynamic ids to be
+ * added to this driver by preventing the sysfs file from being created.
+ * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend
+ * for interfaces bound to this driver.
+ * @soft_unbind: if set to 1, the USB core will not kill URBs and disable
+ * endpoints before calling the driver's disconnect method.
+ *
+ * USB interface drivers must provide a name, probe() and disconnect()
+ * methods, and an id_table. Other driver fields are optional.
+ *
+ * The id_table is used in hotplugging. It holds a set of descriptors,
+ * and specialized data may be associated with each entry. That table
+ * is used by both user and kernel mode hotplugging support.
+ *
+ * The probe() and disconnect() methods are called in a context where
+ * they can sleep, but they should avoid abusing the privilege. Most
+ * work to connect to a device should be done when the device is opened,
+ * and undone at the last close. The disconnect code needs to address
+ * concurrency issues with respect to open() and close() methods, as
+ * well as forcing all pending I/O requests to complete (by unlinking
+ * them as necessary, and blocking until the unlinks complete).
+ */
+struct usb_driver {
+ const char *name;
+
+ int (*probe) (struct usb_interface *intf,
+ const struct usb_device_id *id);
+
+ void (*disconnect) (struct usb_interface *intf);
+
+ int (*ioctl) (struct usb_interface *intf, unsigned int code,
+ void *buf);
+
+ int (*suspend) (struct usb_interface *intf, pm_message_t message);
+ int (*resume) (struct usb_interface *intf);
+ int (*reset_resume)(struct usb_interface *intf);
+
+ int (*pre_reset)(struct usb_interface *intf);
+ int (*post_reset)(struct usb_interface *intf);
+
+ const struct usb_device_id *id_table;
+
+ struct usb_dynids dynids;
+ struct usbdrv_wrap drvwrap;
+ unsigned int no_dynamic_id:1;
+ unsigned int supports_autosuspend:1;
+ unsigned int soft_unbind:1;
+};
+#define to_usb_driver(d) container_of(d, struct usb_driver, drvwrap.driver)
+
+/**
+ * struct usb_device_driver - identifies USB device driver to usbcore
+ * @name: The driver name should be unique among USB drivers,
+ * and should normally be the same as the module name.
+ * @probe: Called to see if the driver is willing to manage a particular
+ * device. If it is, probe returns zero and uses dev_set_drvdata()
+ * to associate driver-specific data with the device. If unwilling
+ * to manage the device, return a negative errno value.
+ * @disconnect: Called when the device is no longer accessible, usually
+ * because it has been (or is being) disconnected or the driver's
+ * module is being unloaded.
+ * @suspend: Called when the device is going to be suspended by the system.
+ * @resume: Called when the device is being resumed by the system.
+ * @drvwrap: Driver-model core structure wrapper.
+ * @supports_autosuspend: if set to 0, the USB core will not allow autosuspend
+ * for devices bound to this driver.
+ *
+ * USB drivers must provide all the fields listed above except drvwrap.
+ */
+struct usb_device_driver {
+ const char *name;
+
+ int (*probe) (struct usb_device *udev);
+ void (*disconnect) (struct usb_device *udev);
+
+ int (*suspend) (struct usb_device *udev, pm_message_t message);
+ int (*resume) (struct usb_device *udev, pm_message_t message);
+ struct usbdrv_wrap drvwrap;
+ unsigned int supports_autosuspend:1;
+};
+#define to_usb_device_driver(d) container_of(d, struct usb_device_driver, \
+ drvwrap.driver)
+
+extern struct bus_type usb_bus_type;
+
+/**
+ * struct usb_class_driver - identifies a USB driver that wants to use the USB major number
+ * @name: the usb class device name for this driver. Will show up in sysfs.
+ * @fops: pointer to the struct file_operations of this driver.
+ * @minor_base: the start of the minor range for this driver.
+ *
+ * This structure is used for the usb_register_dev() and
+ * usb_unregister_dev() functions, to consolidate a number of the
+ * parameters used for them.
+ */
+struct usb_class_driver {
+ char *name;
+ const struct file_operations *fops;
+ int minor_base;
+};
+
+/*
+ * use these in module_init()/module_exit()
+ * and don't forget MODULE_DEVICE_TABLE(usb, ...)
+ */
+extern int usb_register_driver(struct usb_driver *, struct module *,
+ const char *);
+#ifndef DDEUSB26_GADGET
+
+static inline int usb_register(struct usb_driver *driver)
+{
+ return usb_register_driver(driver, THIS_MODULE, KBUILD_MODNAME);
+}
+extern void usb_deregister(struct usb_driver *);
+
+#else /* ifndef DDEUSB26_GADGET */
+
+extern int libddeusb26_register_driver(struct usb_driver *, struct module *);
+static inline int usb_register(struct usb_driver *driver)
+{
+ return libddeusb26_register_driver(driver, THIS_MODULE);
+}
+
+extern void libddeusb26_dergister_driver(struct usb_driver *);
+#define usb_deregister(x) libddeusb26_deregister_driver( (x) )
+
+#endif /* DDEUSB26_GADGET */
+
+extern int usb_register_device_driver(struct usb_device_driver *,
+ struct module *);
+extern void usb_deregister_device_driver(struct usb_device_driver *);
+
+extern int usb_register_dev(struct usb_interface *intf,
+ struct usb_class_driver *class_driver);
+extern void usb_deregister_dev(struct usb_interface *intf,
+ struct usb_class_driver *class_driver);
+
+extern int usb_disabled(void);
+
+/* ----------------------------------------------------------------------- */
+
+/*
+ * URB support, for asynchronous request completions
+ */
+
+/*
+ * urb->transfer_flags:
+ *
+ * Note: URB_DIR_IN/OUT is automatically set in usb_submit_urb().
+ */
+#define URB_SHORT_NOT_OK 0x0001 /* report short reads as errors */
+#define URB_ISO_ASAP 0x0002 /* iso-only, urb->start_frame
+ * ignored */
+#define URB_NO_TRANSFER_DMA_MAP 0x0004 /* urb->transfer_dma valid on submit */
+#define URB_NO_SETUP_DMA_MAP 0x0008 /* urb->setup_dma valid on submit */
+#define URB_NO_FSBR 0x0020 /* UHCI-specific */
+#define URB_ZERO_PACKET 0x0040 /* Finish bulk OUT with short packet */
+#define URB_NO_INTERRUPT 0x0080 /* HINT: no non-error interrupt
+ * needed */
+#define URB_FREE_BUFFER 0x0100 /* Free transfer buffer with the URB */
+
+#define URB_DIR_IN 0x0200 /* Transfer from device to host */
+#define URB_DIR_OUT 0
+#define URB_DIR_MASK URB_DIR_IN
+
+struct usb_iso_packet_descriptor {
+ unsigned int offset;
+ unsigned int length; /* expected length */
+ unsigned int actual_length;
+ int status;
+};
+
+struct urb;
+
+struct usb_anchor {
+ struct list_head urb_list;
+ wait_queue_head_t wait;
+ spinlock_t lock;
+ unsigned int poisoned:1;
+};
+
+static inline void init_usb_anchor(struct usb_anchor *anchor)
+{
+ INIT_LIST_HEAD(&anchor->urb_list);
+ init_waitqueue_head(&anchor->wait);
+ spin_lock_init(&anchor->lock);
+}
+
+typedef void (*usb_complete_t)(struct urb *);
+
+/**
+ * struct urb - USB Request Block
+ * @urb_list: For use by current owner of the URB.
+ * @anchor_list: membership in the list of an anchor
+ * @anchor: to anchor URBs to a common mooring
+ * @ep: Points to the endpoint's data structure. Will eventually
+ * replace @pipe.
+ * @pipe: Holds endpoint number, direction, type, and more.
+ * Create these values with the eight macros available;
+ * usb_{snd,rcv}TYPEpipe(dev,endpoint), where the TYPE is "ctrl"
+ * (control), "bulk", "int" (interrupt), or "iso" (isochronous).
+ * For example usb_sndbulkpipe() or usb_rcvintpipe(). Endpoint
+ * numbers range from zero to fifteen. Note that "in" endpoint two
+ * is a different endpoint (and pipe) from "out" endpoint two.
+ * The current configuration controls the existence, type, and
+ * maximum packet size of any given endpoint.
+ * @dev: Identifies the USB device to perform the request.
+ * @status: This is read in non-iso completion functions to get the
+ * status of the particular request. ISO requests only use it
+ * to tell whether the URB was unlinked; detailed status for
+ * each frame is in the fields of the iso_frame-desc.
+ * @transfer_flags: A variety of flags may be used to affect how URB
+ * submission, unlinking, or operation are handled. Different
+ * kinds of URB can use different flags.
+ * @transfer_buffer: This identifies the buffer to (or from) which
+ * the I/O request will be performed (unless URB_NO_TRANSFER_DMA_MAP
+ * is set). This buffer must be suitable for DMA; allocate it with
+ * kmalloc() or equivalent. For transfers to "in" endpoints, contents
+ * of this buffer will be modified. This buffer is used for the data
+ * stage of control transfers.
+ * @transfer_dma: When transfer_flags includes URB_NO_TRANSFER_DMA_MAP,
+ * the device driver is saying that it provided this DMA address,
+ * which the host controller driver should use in preference to the
+ * transfer_buffer.
+ * @transfer_buffer_length: How big is transfer_buffer. The transfer may
+ * be broken up into chunks according to the current maximum packet
+ * size for the endpoint, which is a function of the configuration
+ * and is encoded in the pipe. When the length is zero, neither
+ * transfer_buffer nor transfer_dma is used.
+ * @actual_length: This is read in non-iso completion functions, and
+ * it tells how many bytes (out of transfer_buffer_length) were
+ * transferred. It will normally be the same as requested, unless
+ * either an error was reported or a short read was performed.
+ * The URB_SHORT_NOT_OK transfer flag may be used to make such
+ * short reads be reported as errors.
+ * @setup_packet: Only used for control transfers, this points to eight bytes
+ * of setup data. Control transfers always start by sending this data
+ * to the device. Then transfer_buffer is read or written, if needed.
+ * @setup_dma: For control transfers with URB_NO_SETUP_DMA_MAP set, the
+ * device driver has provided this DMA address for the setup packet.
+ * The host controller driver should use this in preference to
+ * setup_packet.
+ * @start_frame: Returns the initial frame for isochronous transfers.
+ * @number_of_packets: Lists the number of ISO transfer buffers.
+ * @interval: Specifies the polling interval for interrupt or isochronous
+ * transfers. The units are frames (milliseconds) for for full and low
+ * speed devices, and microframes (1/8 millisecond) for highspeed ones.
+ * @error_count: Returns the number of ISO transfers that reported errors.
+ * @context: For use in completion functions. This normally points to
+ * request-specific driver context.
+ * @complete: Completion handler. This URB is passed as the parameter to the
+ * completion function. The completion function may then do what
+ * it likes with the URB, including resubmitting or freeing it.
+ * @iso_frame_desc: Used to provide arrays of ISO transfer buffers and to
+ * collect the transfer status for each buffer.
+ *
+ * This structure identifies USB transfer requests. URBs must be allocated by
+ * calling usb_alloc_urb() and freed with a call to usb_free_urb().
+ * Initialization may be done using various usb_fill_*_urb() functions. URBs
+ * are submitted using usb_submit_urb(), and pending requests may be canceled
+ * using usb_unlink_urb() or usb_kill_urb().
+ *
+ * Data Transfer Buffers:
+ *
+ * Normally drivers provide I/O buffers allocated with kmalloc() or otherwise
+ * taken from the general page pool. That is provided by transfer_buffer
+ * (control requests also use setup_packet), and host controller drivers
+ * perform a dma mapping (and unmapping) for each buffer transferred. Those
+ * mapping operations can be expensive on some platforms (perhaps using a dma
+ * bounce buffer or talking to an IOMMU),
+ * although they're cheap on commodity x86 and ppc hardware.
+ *
+ * Alternatively, drivers may pass the URB_NO_xxx_DMA_MAP transfer flags,
+ * which tell the host controller driver that no such mapping is needed since
+ * the device driver is DMA-aware. For example, a device driver might
+ * allocate a DMA buffer with usb_buffer_alloc() or call usb_buffer_map().
+ * When these transfer flags are provided, host controller drivers will
+ * attempt to use the dma addresses found in the transfer_dma and/or
+ * setup_dma fields rather than determining a dma address themselves. (Note
+ * that transfer_buffer and setup_packet must still be set because not all
+ * host controllers use DMA, nor do virtual root hubs).
+ *
+ * Initialization:
+ *
+ * All URBs submitted must initialize the dev, pipe, transfer_flags (may be
+ * zero), and complete fields. All URBs must also initialize
+ * transfer_buffer and transfer_buffer_length. They may provide the
+ * URB_SHORT_NOT_OK transfer flag, indicating that short reads are
+ * to be treated as errors; that flag is invalid for write requests.
+ *
+ * Bulk URBs may
+ * use the URB_ZERO_PACKET transfer flag, indicating that bulk OUT transfers
+ * should always terminate with a short packet, even if it means adding an
+ * extra zero length packet.
+ *
+ * Control URBs must provide a setup_packet. The setup_packet and
+ * transfer_buffer may each be mapped for DMA or not, independently of
+ * the other. The transfer_flags bits URB_NO_TRANSFER_DMA_MAP and
+ * URB_NO_SETUP_DMA_MAP indicate which buffers have already been mapped.
+ * URB_NO_SETUP_DMA_MAP is ignored for non-control URBs.
+ *
+ * Interrupt URBs must provide an interval, saying how often (in milliseconds
+ * or, for highspeed devices, 125 microsecond units)
+ * to poll for transfers. After the URB has been submitted, the interval
+ * field reflects how the transfer was actually scheduled.
+ * The polling interval may be more frequent than requested.
+ * For example, some controllers have a maximum interval of 32 milliseconds,
+ * while others support intervals of up to 1024 milliseconds.
+ * Isochronous URBs also have transfer intervals. (Note that for isochronous
+ * endpoints, as well as high speed interrupt endpoints, the encoding of
+ * the transfer interval in the endpoint descriptor is logarithmic.
+ * Device drivers must convert that value to linear units themselves.)
+ *
+ * Isochronous URBs normally use the URB_ISO_ASAP transfer flag, telling
+ * the host controller to schedule the transfer as soon as bandwidth
+ * utilization allows, and then set start_frame to reflect the actual frame
+ * selected during submission. Otherwise drivers must specify the start_frame
+ * and handle the case where the transfer can't begin then. However, drivers
+ * won't know how bandwidth is currently allocated, and while they can
+ * find the current frame using usb_get_current_frame_number () they can't
+ * know the range for that frame number. (Ranges for frame counter values
+ * are HC-specific, and can go from 256 to 65536 frames from "now".)
+ *
+ * Isochronous URBs have a different data transfer model, in part because
+ * the quality of service is only "best effort". Callers provide specially
+ * allocated URBs, with number_of_packets worth of iso_frame_desc structures
+ * at the end. Each such packet is an individual ISO transfer. Isochronous
+ * URBs are normally queued, submitted by drivers to arrange that
+ * transfers are at least double buffered, and then explicitly resubmitted
+ * in completion handlers, so
+ * that data (such as audio or video) streams at as constant a rate as the
+ * host controller scheduler can support.
+ *
+ * Completion Callbacks:
+ *
+ * The completion callback is made in_interrupt(), and one of the first
+ * things that a completion handler should do is check the status field.
+ * The status field is provided for all URBs. It is used to report
+ * unlinked URBs, and status for all non-ISO transfers. It should not
+ * be examined before the URB is returned to the completion handler.
+ *
+ * The context field is normally used to link URBs back to the relevant
+ * driver or request state.
+ *
+ * When the completion callback is invoked for non-isochronous URBs, the
+ * actual_length field tells how many bytes were transferred. This field
+ * is updated even when the URB terminated with an error or was unlinked.
+ *
+ * ISO transfer status is reported in the status and actual_length fields
+ * of the iso_frame_desc array, and the number of errors is reported in
+ * error_count. Completion callbacks for ISO transfers will normally
+ * (re)submit URBs to ensure a constant transfer rate.
+ *
+ * Note that even fields marked "public" should not be touched by the driver
+ * when the urb is owned by the hcd, that is, since the call to
+ * usb_submit_urb() till the entry into the completion routine.
+ */
+struct urb {
+ /* private: usb core and host controller only fields in the urb */
+ struct kref kref; /* reference count of the URB */
+ void *hcpriv; /* private data for host controller */
+ atomic_t use_count; /* concurrent submissions counter */
+ atomic_t reject; /* submissions will fail */
+ int unlinked; /* unlink error code */
+
+ /* public: documented fields in the urb that can be used by drivers */
+ struct list_head urb_list; /* list head for use by the urb's
+ * current owner */
+ struct list_head anchor_list; /* the URB may be anchored */
+ struct usb_anchor *anchor;
+ struct usb_device *dev; /* (in) pointer to associated device */
+ struct usb_host_endpoint *ep; /* (internal) pointer to endpoint */
+ unsigned int pipe; /* (in) pipe information */
+ int status; /* (return) non-ISO status */
+ unsigned int transfer_flags; /* (in) URB_SHORT_NOT_OK | ...*/
+ void *transfer_buffer; /* (in) associated data buffer */
+ dma_addr_t transfer_dma; /* (in) dma addr for transfer_buffer */
+ int transfer_buffer_length; /* (in) data buffer length */
+ int actual_length; /* (return) actual transfer length */
+ unsigned char *setup_packet; /* (in) setup packet (control only) */
+ dma_addr_t setup_dma; /* (in) dma addr for setup_packet */
+ int start_frame; /* (modify) start frame (ISO) */
+ int number_of_packets; /* (in) number of ISO packets */
+ int interval; /* (modify) transfer interval
+ * (INT/ISO) */
+ int error_count; /* (return) number of ISO errors */
+ void *context; /* (in) context for completion */
+ usb_complete_t complete; /* (in) completion routine */
+ struct usb_iso_packet_descriptor iso_frame_desc[0];
+ /* (in) ISO ONLY */
+};
+
+/* ----------------------------------------------------------------------- */
+
+/**
+ * usb_fill_control_urb - initializes a control urb
+ * @urb: pointer to the urb to initialize.
+ * @dev: pointer to the struct usb_device for this urb.
+ * @pipe: the endpoint pipe
+ * @setup_packet: pointer to the setup_packet buffer
+ * @transfer_buffer: pointer to the transfer buffer
+ * @buffer_length: length of the transfer buffer
+ * @complete_fn: pointer to the usb_complete_t function
+ * @context: what to set the urb context to.
+ *
+ * Initializes a control urb with the proper information needed to submit
+ * it to a device.
+ */
+static inline void usb_fill_control_urb(struct urb *urb,
+ struct usb_device *dev,
+ unsigned int pipe,
+ unsigned char *setup_packet,
+ void *transfer_buffer,
+ int buffer_length,
+ usb_complete_t complete_fn,
+ void *context)
+{
+ urb->dev = dev;
+ urb->pipe = pipe;
+ urb->setup_packet = setup_packet;
+ urb->transfer_buffer = transfer_buffer;
+ urb->transfer_buffer_length = buffer_length;
+ urb->complete = complete_fn;
+ urb->context = context;
+}
+
+/**
+ * usb_fill_bulk_urb - macro to help initialize a bulk urb
+ * @urb: pointer to the urb to initialize.
+ * @dev: pointer to the struct usb_device for this urb.
+ * @pipe: the endpoint pipe
+ * @transfer_buffer: pointer to the transfer buffer
+ * @buffer_length: length of the transfer buffer
+ * @complete_fn: pointer to the usb_complete_t function
+ * @context: what to set the urb context to.
+ *
+ * Initializes a bulk urb with the proper information needed to submit it
+ * to a device.
+ */
+static inline void usb_fill_bulk_urb(struct urb *urb,
+ struct usb_device *dev,
+ unsigned int pipe,
+ void *transfer_buffer,
+ int buffer_length,
+ usb_complete_t complete_fn,
+ void *context)
+{
+ urb->dev = dev;
+ urb->pipe = pipe;
+ urb->transfer_buffer = transfer_buffer;
+ urb->transfer_buffer_length = buffer_length;
+ urb->complete = complete_fn;
+ urb->context = context;
+}
+
+/**
+ * usb_fill_int_urb - macro to help initialize a interrupt urb
+ * @urb: pointer to the urb to initialize.
+ * @dev: pointer to the struct usb_device for this urb.
+ * @pipe: the endpoint pipe
+ * @transfer_buffer: pointer to the transfer buffer
+ * @buffer_length: length of the transfer buffer
+ * @complete_fn: pointer to the usb_complete_t function
+ * @context: what to set the urb context to.
+ * @interval: what to set the urb interval to, encoded like
+ * the endpoint descriptor's bInterval value.
+ *
+ * Initializes a interrupt urb with the proper information needed to submit
+ * it to a device.
+ * Note that high speed interrupt endpoints use a logarithmic encoding of
+ * the endpoint interval, and express polling intervals in microframes
+ * (eight per millisecond) rather than in frames (one per millisecond).
+ */
+static inline void usb_fill_int_urb(struct urb *urb,
+ struct usb_device *dev,
+ unsigned int pipe,
+ void *transfer_buffer,
+ int buffer_length,
+ usb_complete_t complete_fn,
+ void *context,
+ int interval)
+{
+ urb->dev = dev;
+ urb->pipe = pipe;
+ urb->transfer_buffer = transfer_buffer;
+ urb->transfer_buffer_length = buffer_length;
+ urb->complete = complete_fn;
+ urb->context = context;
+ if (dev->speed == USB_SPEED_HIGH)
+ urb->interval = 1 << (interval - 1);
+ else
+ urb->interval = interval;
+ urb->start_frame = -1;
+}
+
+extern void usb_init_urb(struct urb *urb);
+extern struct urb *usb_alloc_urb(int iso_packets, gfp_t mem_flags);
+extern void usb_free_urb(struct urb *urb);
+#define usb_put_urb usb_free_urb
+extern struct urb *usb_get_urb(struct urb *urb);
+extern int usb_submit_urb(struct urb *urb, gfp_t mem_flags);
+extern int usb_unlink_urb(struct urb *urb);
+extern void usb_kill_urb(struct urb *urb);
+extern void usb_poison_urb(struct urb *urb);
+extern void usb_unpoison_urb(struct urb *urb);
+extern void usb_kill_anchored_urbs(struct usb_anchor *anchor);
+extern void usb_poison_anchored_urbs(struct usb_anchor *anchor);
+extern void usb_unpoison_anchored_urbs(struct usb_anchor *anchor);
+extern void usb_unlink_anchored_urbs(struct usb_anchor *anchor);
+extern void usb_anchor_urb(struct urb *urb, struct usb_anchor *anchor);
+extern void usb_unanchor_urb(struct urb *urb);
+extern int usb_wait_anchor_empty_timeout(struct usb_anchor *anchor,
+ unsigned int timeout);
+extern struct urb *usb_get_from_anchor(struct usb_anchor *anchor);
+extern void usb_scuttle_anchored_urbs(struct usb_anchor *anchor);
+extern int usb_anchor_empty(struct usb_anchor *anchor);
+
+/**
+ * usb_urb_dir_in - check if an URB describes an IN transfer
+ * @urb: URB to be checked
+ *
+ * Returns 1 if @urb describes an IN transfer (device-to-host),
+ * otherwise 0.
+ */
+static inline int usb_urb_dir_in(struct urb *urb)
+{
+ return (urb->transfer_flags & URB_DIR_MASK) == URB_DIR_IN;
+}
+
+/**
+ * usb_urb_dir_out - check if an URB describes an OUT transfer
+ * @urb: URB to be checked
+ *
+ * Returns 1 if @urb describes an OUT transfer (host-to-device),
+ * otherwise 0.
+ */
+static inline int usb_urb_dir_out(struct urb *urb)
+{
+ return (urb->transfer_flags & URB_DIR_MASK) == URB_DIR_OUT;
+}
+
+void *usb_buffer_alloc(struct usb_device *dev, size_t size,
+ gfp_t mem_flags, dma_addr_t *dma);
+void usb_buffer_free(struct usb_device *dev, size_t size,
+ void *addr, dma_addr_t dma);
+
+#if 0
+struct urb *usb_buffer_map(struct urb *urb);
+void usb_buffer_dmasync(struct urb *urb);
+void usb_buffer_unmap(struct urb *urb);
+#endif
+
+struct scatterlist;
+int usb_buffer_map_sg(const struct usb_device *dev, int is_in,
+ struct scatterlist *sg, int nents);
+#if 0
+void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
+ struct scatterlist *sg, int n_hw_ents);
+#endif
+void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
+ struct scatterlist *sg, int n_hw_ents);
+
+/*-------------------------------------------------------------------*
+ * SYNCHRONOUS CALL SUPPORT *
+ *-------------------------------------------------------------------*/
+
+extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
+ __u8 request, __u8 requesttype, __u16 value, __u16 index,
+ void *data, __u16 size, int timeout);
+extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
+ void *data, int len, int *actual_length, int timeout);
+extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
+ void *data, int len, int *actual_length,
+ int timeout);
+
+/* wrappers around usb_control_msg() for the most common standard requests */
+extern int usb_get_descriptor(struct usb_device *dev, unsigned char desctype,
+ unsigned char descindex, void *buf, int size);
+extern int usb_get_status(struct usb_device *dev,
+ int type, int target, void *data);
+extern int usb_string(struct usb_device *dev, int index,
+ char *buf, size_t size);
+
+/* wrappers that also update important state inside usbcore */
+extern int usb_clear_halt(struct usb_device *dev, int pipe);
+extern int usb_reset_configuration(struct usb_device *dev);
+extern int usb_set_interface(struct usb_device *dev, int ifnum, int alternate);
+
+/* this request isn't really synchronous, but it belongs with the others */
+extern int usb_driver_set_configuration(struct usb_device *udev, int config);
+
+/*
+ * timeouts, in milliseconds, used for sending/receiving control messages
+ * they typically complete within a few frames (msec) after they're issued
+ * USB identifies 5 second timeouts, maybe more in a few cases, and a few
+ * slow devices (like some MGE Ellipse UPSes) actually push that limit.
+ */
+#define USB_CTRL_GET_TIMEOUT 5000
+#define USB_CTRL_SET_TIMEOUT 5000
+
+
+/**
+ * struct usb_sg_request - support for scatter/gather I/O
+ * @status: zero indicates success, else negative errno
+ * @bytes: counts bytes transferred.
+ *
+ * These requests are initialized using usb_sg_init(), and then are used
+ * as request handles passed to usb_sg_wait() or usb_sg_cancel(). Most
+ * members of the request object aren't for driver access.
+ *
+ * The status and bytecount values are valid only after usb_sg_wait()
+ * returns. If the status is zero, then the bytecount matches the total
+ * from the request.
+ *
+ * After an error completion, drivers may need to clear a halt condition
+ * on the endpoint.
+ */
+struct usb_sg_request {
+ int status;
+ size_t bytes;
+
+ /*
+ * members below are private: to usbcore,
+ * and are not provided for driver access!
+ */
+ spinlock_t lock;
+
+ struct usb_device *dev;
+ int pipe;
+ struct scatterlist *sg;
+ int nents;
+
+ int entries;
+ struct urb **urbs;
+
+ int count;
+ struct completion complete;
+};
+
+int usb_sg_init(
+ struct usb_sg_request *io,
+ struct usb_device *dev,
+ unsigned pipe,
+ unsigned period,
+ struct scatterlist *sg,
+ int nents,
+ size_t length,
+ gfp_t mem_flags
+);
+void usb_sg_cancel(struct usb_sg_request *io);
+void usb_sg_wait(struct usb_sg_request *io);
+
+
+/* ----------------------------------------------------------------------- */
+
+/*
+ * For various legacy reasons, Linux has a small cookie that's paired with
+ * a struct usb_device to identify an endpoint queue. Queue characteristics
+ * are defined by the endpoint's descriptor. This cookie is called a "pipe",
+ * an unsigned int encoded as:
+ *
+ * - direction: bit 7 (0 = Host-to-Device [Out],
+ * 1 = Device-to-Host [In] ...
+ * like endpoint bEndpointAddress)
+ * - device address: bits 8-14 ... bit positions known to uhci-hcd
+ * - endpoint: bits 15-18 ... bit positions known to uhci-hcd
+ * - pipe type: bits 30-31 (00 = isochronous, 01 = interrupt,
+ * 10 = control, 11 = bulk)
+ *
+ * Given the device address and endpoint descriptor, pipes are redundant.
+ */
+
+/* NOTE: these are not the standard USB_ENDPOINT_XFER_* values!! */
+/* (yet ... they're the values used by usbfs) */
+#define PIPE_ISOCHRONOUS 0
+#define PIPE_INTERRUPT 1
+#define PIPE_CONTROL 2
+#define PIPE_BULK 3
+
+#define usb_pipein(pipe) ((pipe) & USB_DIR_IN)
+#define usb_pipeout(pipe) (!usb_pipein(pipe))
+
+#define usb_pipedevice(pipe) (((pipe) >> 8) & 0x7f)
+#define usb_pipeendpoint(pipe) (((pipe) >> 15) & 0xf)
+
+#define usb_pipetype(pipe) (((pipe) >> 30) & 3)
+#define usb_pipeisoc(pipe) (usb_pipetype((pipe)) == PIPE_ISOCHRONOUS)
+#define usb_pipeint(pipe) (usb_pipetype((pipe)) == PIPE_INTERRUPT)
+#define usb_pipecontrol(pipe) (usb_pipetype((pipe)) == PIPE_CONTROL)
+#define usb_pipebulk(pipe) (usb_pipetype((pipe)) == PIPE_BULK)
+
+/* The D0/D1 toggle bits ... USE WITH CAUTION (they're almost hcd-internal) */
+#define usb_gettoggle(dev, ep, out) (((dev)->toggle[out] >> (ep)) & 1)
+#define usb_dotoggle(dev, ep, out) ((dev)->toggle[out] ^= (1 << (ep)))
+#define usb_settoggle(dev, ep, out, bit) \
+ ((dev)->toggle[out] = ((dev)->toggle[out] & ~(1 << (ep))) | \
+ ((bit) << (ep)))
+
+
+static inline unsigned int __create_pipe(struct usb_device *dev,
+ unsigned int endpoint)
+{
+ return (dev->devnum << 8) | (endpoint << 15);
+}
+
+/* Create various pipes... */
+#define usb_sndctrlpipe(dev,endpoint) \
+ ((PIPE_CONTROL << 30) | __create_pipe(dev, endpoint))
+#define usb_rcvctrlpipe(dev,endpoint) \
+ ((PIPE_CONTROL << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN)
+#define usb_sndisocpipe(dev,endpoint) \
+ ((PIPE_ISOCHRONOUS << 30) | __create_pipe(dev, endpoint))
+#define usb_rcvisocpipe(dev,endpoint) \
+ ((PIPE_ISOCHRONOUS << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN)
+#define usb_sndbulkpipe(dev,endpoint) \
+ ((PIPE_BULK << 30) | __create_pipe(dev, endpoint))
+#define usb_rcvbulkpipe(dev,endpoint) \
+ ((PIPE_BULK << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN)
+#define usb_sndintpipe(dev,endpoint) \
+ ((PIPE_INTERRUPT << 30) | __create_pipe(dev, endpoint))
+#define usb_rcvintpipe(dev,endpoint) \
+ ((PIPE_INTERRUPT << 30) | __create_pipe(dev, endpoint) | USB_DIR_IN)
+
+/*-------------------------------------------------------------------------*/
+
+static inline __u16
+usb_maxpacket(struct usb_device *udev, int pipe, int is_out)
+{
+ struct usb_host_endpoint *ep;
+ unsigned epnum = usb_pipeendpoint(pipe);
+
+ if (is_out) {
+ WARN_ON(usb_pipein(pipe));
+ ep = udev->ep_out[epnum];
+ } else {
+ WARN_ON(usb_pipeout(pipe));
+ ep = udev->ep_in[epnum];
+ }
+ if (!ep)
+ return 0;
+
+ /* NOTE: only 0x07ff bits are for packet size... */
+ return le16_to_cpu(ep->desc.wMaxPacketSize);
+}
+
+/* ----------------------------------------------------------------------- */
+
+/* Events from the usb core */
+#define USB_DEVICE_ADD 0x0001
+#define USB_DEVICE_REMOVE 0x0002
+#define USB_BUS_ADD 0x0003
+#define USB_BUS_REMOVE 0x0004
+extern void usb_register_notify(struct notifier_block *nb);
+extern void usb_unregister_notify(struct notifier_block *nb);
+
+#ifdef DEBUG
+#define dbg(format, arg...) printk(KERN_DEBUG "%s: " format "\n" , \
+ __FILE__ , ## arg)
+#else
+#define dbg(format, arg...) do {} while (0)
+#endif
+
+#define err(format, arg...) printk(KERN_ERR KBUILD_MODNAME ": " \
+ format "\n" , ## arg)
+
+#endif /* __KERNEL__ */
+
+#endif