summaryrefslogtreecommitdiff
path: root/libdde-linux26/lib/src/drivers
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2013-07-27 22:15:01 +0000
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2013-07-27 22:15:01 +0000
commit7996a3d79d55b7f879dfd62e202bbfe2963718d3 (patch)
tree8d9f6759fec4099b9be503c11c7ed174f7204980 /libdde-linux26/lib/src/drivers
parent4fbe7358c7747a9165f776eb19addbb9baf7def2 (diff)
really properly move files
Diffstat (limited to 'libdde-linux26/lib/src/drivers')
-rw-r--r--libdde-linux26/lib/src/drivers/base/class.c505
-rw-r--r--libdde-linux26/lib/src/drivers/base/core.c1633
-rw-r--r--libdde-linux26/lib/src/drivers/base/init.c41
-rw-r--r--libdde-linux26/lib/src/drivers/char/random.c1709
-rw-r--r--libdde-linux26/lib/src/drivers/pci/pci-driver.c1008
-rw-r--r--libdde-linux26/lib/src/drivers/pci/pci.c2478
-rw-r--r--libdde-linux26/lib/src/drivers/pci/probe.c1232
7 files changed, 8606 insertions, 0 deletions
diff --git a/libdde-linux26/lib/src/drivers/base/class.c b/libdde-linux26/lib/src/drivers/base/class.c
new file mode 100644
index 00000000..1417d80b
--- /dev/null
+++ b/libdde-linux26/lib/src/drivers/base/class.c
@@ -0,0 +1,505 @@
+/*
+ * class.c - basic device class management
+ *
+ * Copyright (c) 2002-3 Patrick Mochel
+ * Copyright (c) 2002-3 Open Source Development Labs
+ * Copyright (c) 2003-2004 Greg Kroah-Hartman
+ * Copyright (c) 2003-2004 IBM Corp.
+ *
+ * This file is released under the GPLv2
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/kdev_t.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/genhd.h>
+#include <linux/mutex.h>
+#include "base.h"
+
+#define to_class_attr(_attr) container_of(_attr, struct class_attribute, attr)
+
+static ssize_t class_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct class_attribute *class_attr = to_class_attr(attr);
+ struct class_private *cp = to_class(kobj);
+ ssize_t ret = -EIO;
+
+ if (class_attr->show)
+ ret = class_attr->show(cp->class, buf);
+ return ret;
+}
+
+static ssize_t class_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct class_attribute *class_attr = to_class_attr(attr);
+ struct class_private *cp = to_class(kobj);
+ ssize_t ret = -EIO;
+
+ if (class_attr->store)
+ ret = class_attr->store(cp->class, buf, count);
+ return ret;
+}
+
+static void class_release(struct kobject *kobj)
+{
+ struct class_private *cp = to_class(kobj);
+ struct class *class = cp->class;
+
+ pr_debug("class '%s': release.\n", class->name);
+
+ if (class->class_release)
+ class->class_release(class);
+ else
+ pr_debug("class '%s' does not have a release() function, "
+ "be careful\n", class->name);
+}
+
+static struct sysfs_ops class_sysfs_ops = {
+ .show = class_attr_show,
+ .store = class_attr_store,
+};
+
+static struct kobj_type class_ktype = {
+ .sysfs_ops = &class_sysfs_ops,
+ .release = class_release,
+};
+
+/* Hotplug events for classes go to the class class_subsys */
+static struct kset *class_kset;
+
+
+int class_create_file(struct class *cls, const struct class_attribute *attr)
+{
+ int error;
+ if (cls)
+ error = sysfs_create_file(&cls->p->class_subsys.kobj,
+ &attr->attr);
+ else
+ error = -EINVAL;
+ return error;
+}
+
+void class_remove_file(struct class *cls, const struct class_attribute *attr)
+{
+ if (cls)
+ sysfs_remove_file(&cls->p->class_subsys.kobj, &attr->attr);
+}
+
+static struct class *class_get(struct class *cls)
+{
+ if (cls)
+ kset_get(&cls->p->class_subsys);
+ return cls;
+}
+
+static void class_put(struct class *cls)
+{
+ if (cls)
+ kset_put(&cls->p->class_subsys);
+}
+
+static int add_class_attrs(struct class *cls)
+{
+ int i;
+ int error = 0;
+
+ if (cls->class_attrs) {
+ for (i = 0; attr_name(cls->class_attrs[i]); i++) {
+ error = class_create_file(cls, &cls->class_attrs[i]);
+ if (error)
+ goto error;
+ }
+ }
+done:
+ return error;
+error:
+ while (--i >= 0)
+ class_remove_file(cls, &cls->class_attrs[i]);
+ goto done;
+}
+
+static void remove_class_attrs(struct class *cls)
+{
+ int i;
+
+ if (cls->class_attrs) {
+ for (i = 0; attr_name(cls->class_attrs[i]); i++)
+ class_remove_file(cls, &cls->class_attrs[i]);
+ }
+}
+
+static void klist_class_dev_get(struct klist_node *n)
+{
+ struct device *dev = container_of(n, struct device, knode_class);
+
+ get_device(dev);
+}
+
+static void klist_class_dev_put(struct klist_node *n)
+{
+ struct device *dev = container_of(n, struct device, knode_class);
+
+ put_device(dev);
+}
+
+int __class_register(struct class *cls, struct lock_class_key *key)
+{
+ struct class_private *cp;
+ int error;
+
+ pr_debug("device class '%s': registering\n", cls->name);
+
+ cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+ if (!cp)
+ return -ENOMEM;
+ klist_init(&cp->class_devices, klist_class_dev_get, klist_class_dev_put);
+ INIT_LIST_HEAD(&cp->class_interfaces);
+ kset_init(&cp->class_dirs);
+ __mutex_init(&cp->class_mutex, "struct class mutex", key);
+ error = kobject_set_name(&cp->class_subsys.kobj, "%s", cls->name);
+ if (error) {
+ kfree(cp);
+ return error;
+ }
+
+ /* set the default /sys/dev directory for devices of this class */
+ if (!cls->dev_kobj)
+ cls->dev_kobj = sysfs_dev_char_kobj;
+
+#if defined(CONFIG_SYSFS_DEPRECATED) && defined(CONFIG_BLOCK) && !defined(DDE_LINUX)
+ /* let the block class directory show up in the root of sysfs */
+ if (cls != &block_class)
+ cp->class_subsys.kobj.kset = class_kset;
+#else
+ cp->class_subsys.kobj.kset = class_kset;
+#endif
+ cp->class_subsys.kobj.ktype = &class_ktype;
+ cp->class = cls;
+ cls->p = cp;
+
+ error = kset_register(&cp->class_subsys);
+ if (error) {
+ kfree(cp);
+ return error;
+ }
+ error = add_class_attrs(class_get(cls));
+ class_put(cls);
+ return error;
+}
+EXPORT_SYMBOL_GPL(__class_register);
+
+void class_unregister(struct class *cls)
+{
+ pr_debug("device class '%s': unregistering\n", cls->name);
+ remove_class_attrs(cls);
+ kset_unregister(&cls->p->class_subsys);
+}
+
+static void class_create_release(struct class *cls)
+{
+ pr_debug("%s called for %s\n", __func__, cls->name);
+ kfree(cls);
+}
+
+/**
+ * class_create - create a struct class structure
+ * @owner: pointer to the module that is to "own" this struct class
+ * @name: pointer to a string for the name of this class.
+ * @key: the lock_class_key for this class; used by mutex lock debugging
+ *
+ * This is used to create a struct class pointer that can then be used
+ * in calls to device_create().
+ *
+ * Note, the pointer created here is to be destroyed when finished by
+ * making a call to class_destroy().
+ */
+struct class *__class_create(struct module *owner, const char *name,
+ struct lock_class_key *key)
+{
+ struct class *cls;
+ int retval;
+
+ cls = kzalloc(sizeof(*cls), GFP_KERNEL);
+ if (!cls) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ cls->name = name;
+ cls->owner = owner;
+ cls->class_release = class_create_release;
+
+ retval = __class_register(cls, key);
+ if (retval)
+ goto error;
+
+ return cls;
+
+error:
+ kfree(cls);
+ return ERR_PTR(retval);
+}
+EXPORT_SYMBOL_GPL(__class_create);
+
+/**
+ * class_destroy - destroys a struct class structure
+ * @cls: pointer to the struct class that is to be destroyed
+ *
+ * Note, the pointer to be destroyed must have been created with a call
+ * to class_create().
+ */
+void class_destroy(struct class *cls)
+{
+ if ((cls == NULL) || (IS_ERR(cls)))
+ return;
+
+ class_unregister(cls);
+}
+
+#ifdef CONFIG_SYSFS_DEPRECATED
+char *make_class_name(const char *name, struct kobject *kobj)
+{
+ char *class_name;
+ int size;
+
+ size = strlen(name) + strlen(kobject_name(kobj)) + 2;
+
+ class_name = kmalloc(size, GFP_KERNEL);
+ if (!class_name)
+ return NULL;
+
+ strcpy(class_name, name);
+ strcat(class_name, ":");
+ strcat(class_name, kobject_name(kobj));
+ return class_name;
+}
+#endif
+
+/**
+ * class_dev_iter_init - initialize class device iterator
+ * @iter: class iterator to initialize
+ * @class: the class we wanna iterate over
+ * @start: the device to start iterating from, if any
+ * @type: device_type of the devices to iterate over, NULL for all
+ *
+ * Initialize class iterator @iter such that it iterates over devices
+ * of @class. If @start is set, the list iteration will start there,
+ * otherwise if it is NULL, the iteration starts at the beginning of
+ * the list.
+ */
+void class_dev_iter_init(struct class_dev_iter *iter, struct class *class,
+ struct device *start, const struct device_type *type)
+{
+ struct klist_node *start_knode = NULL;
+
+ if (start)
+ start_knode = &start->knode_class;
+ klist_iter_init_node(&class->p->class_devices, &iter->ki, start_knode);
+ iter->type = type;
+}
+EXPORT_SYMBOL_GPL(class_dev_iter_init);
+
+/**
+ * class_dev_iter_next - iterate to the next device
+ * @iter: class iterator to proceed
+ *
+ * Proceed @iter to the next device and return it. Returns NULL if
+ * iteration is complete.
+ *
+ * The returned device is referenced and won't be released till
+ * iterator is proceed to the next device or exited. The caller is
+ * free to do whatever it wants to do with the device including
+ * calling back into class code.
+ */
+struct device *class_dev_iter_next(struct class_dev_iter *iter)
+{
+ struct klist_node *knode;
+ struct device *dev;
+
+ while (1) {
+ knode = klist_next(&iter->ki);
+ if (!knode)
+ return NULL;
+ dev = container_of(knode, struct device, knode_class);
+ if (!iter->type || iter->type == dev->type)
+ return dev;
+ }
+}
+EXPORT_SYMBOL_GPL(class_dev_iter_next);
+
+/**
+ * class_dev_iter_exit - finish iteration
+ * @iter: class iterator to finish
+ *
+ * Finish an iteration. Always call this function after iteration is
+ * complete whether the iteration ran till the end or not.
+ */
+void class_dev_iter_exit(struct class_dev_iter *iter)
+{
+ klist_iter_exit(&iter->ki);
+}
+EXPORT_SYMBOL_GPL(class_dev_iter_exit);
+
+/**
+ * class_for_each_device - device iterator
+ * @class: the class we're iterating
+ * @start: the device to start with in the list, if any.
+ * @data: data for the callback
+ * @fn: function to be called for each device
+ *
+ * Iterate over @class's list of devices, and call @fn for each,
+ * passing it @data. If @start is set, the list iteration will start
+ * there, otherwise if it is NULL, the iteration starts at the
+ * beginning of the list.
+ *
+ * We check the return of @fn each time. If it returns anything
+ * other than 0, we break out and return that value.
+ *
+ * @fn is allowed to do anything including calling back into class
+ * code. There's no locking restriction.
+ */
+int class_for_each_device(struct class *class, struct device *start,
+ void *data, int (*fn)(struct device *, void *))
+{
+ struct class_dev_iter iter;
+ struct device *dev;
+ int error = 0;
+
+ if (!class)
+ return -EINVAL;
+ if (!class->p) {
+ WARN(1, "%s called for class '%s' before it was initialized",
+ __func__, class->name);
+ return -EINVAL;
+ }
+
+ class_dev_iter_init(&iter, class, start, NULL);
+ while ((dev = class_dev_iter_next(&iter))) {
+ error = fn(dev, data);
+ if (error)
+ break;
+ }
+ class_dev_iter_exit(&iter);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(class_for_each_device);
+
+/**
+ * class_find_device - device iterator for locating a particular device
+ * @class: the class we're iterating
+ * @start: Device to begin with
+ * @data: data for the match function
+ * @match: function to check device
+ *
+ * This is similar to the class_for_each_dev() function above, but it
+ * returns a reference to a device that is 'found' for later use, as
+ * determined by the @match callback.
+ *
+ * The callback should return 0 if the device doesn't match and non-zero
+ * if it does. If the callback returns non-zero, this function will
+ * return to the caller and not iterate over any more devices.
+ *
+ * Note, you will need to drop the reference with put_device() after use.
+ *
+ * @fn is allowed to do anything including calling back into class
+ * code. There's no locking restriction.
+ */
+struct device *class_find_device(struct class *class, struct device *start,
+ void *data,
+ int (*match)(struct device *, void *))
+{
+ struct class_dev_iter iter;
+ struct device *dev;
+
+ if (!class)
+ return NULL;
+ if (!class->p) {
+ WARN(1, "%s called for class '%s' before it was initialized",
+ __func__, class->name);
+ return NULL;
+ }
+
+ class_dev_iter_init(&iter, class, start, NULL);
+ while ((dev = class_dev_iter_next(&iter))) {
+ if (match(dev, data)) {
+ get_device(dev);
+ break;
+ }
+ }
+ class_dev_iter_exit(&iter);
+
+ return dev;
+}
+EXPORT_SYMBOL_GPL(class_find_device);
+
+int class_interface_register(struct class_interface *class_intf)
+{
+ struct class *parent;
+ struct class_dev_iter iter;
+ struct device *dev;
+
+ if (!class_intf || !class_intf->class)
+ return -ENODEV;
+
+ parent = class_get(class_intf->class);
+ if (!parent)
+ return -EINVAL;
+
+ mutex_lock(&parent->p->class_mutex);
+ list_add_tail(&class_intf->node, &parent->p->class_interfaces);
+ if (class_intf->add_dev) {
+ class_dev_iter_init(&iter, parent, NULL, NULL);
+ while ((dev = class_dev_iter_next(&iter)))
+ class_intf->add_dev(dev, class_intf);
+ class_dev_iter_exit(&iter);
+ }
+ mutex_unlock(&parent->p->class_mutex);
+
+ return 0;
+}
+
+void class_interface_unregister(struct class_interface *class_intf)
+{
+ struct class *parent = class_intf->class;
+ struct class_dev_iter iter;
+ struct device *dev;
+
+ if (!parent)
+ return;
+
+ mutex_lock(&parent->p->class_mutex);
+ list_del_init(&class_intf->node);
+ if (class_intf->remove_dev) {
+ class_dev_iter_init(&iter, parent, NULL, NULL);
+ while ((dev = class_dev_iter_next(&iter)))
+ class_intf->remove_dev(dev, class_intf);
+ class_dev_iter_exit(&iter);
+ }
+ mutex_unlock(&parent->p->class_mutex);
+
+ class_put(parent);
+}
+
+int __init classes_init(void)
+{
+ class_kset = kset_create_and_add("class", NULL, NULL);
+ if (!class_kset)
+ return -ENOMEM;
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(class_create_file);
+EXPORT_SYMBOL_GPL(class_remove_file);
+EXPORT_SYMBOL_GPL(class_unregister);
+EXPORT_SYMBOL_GPL(class_destroy);
+
+EXPORT_SYMBOL_GPL(class_interface_register);
+EXPORT_SYMBOL_GPL(class_interface_unregister);
diff --git a/libdde-linux26/lib/src/drivers/base/core.c b/libdde-linux26/lib/src/drivers/base/core.c
new file mode 100644
index 00000000..e3800714
--- /dev/null
+++ b/libdde-linux26/lib/src/drivers/base/core.c
@@ -0,0 +1,1633 @@
+/*
+ * drivers/base/core.c - core driver model code (device registration, etc)
+ *
+ * Copyright (c) 2002-3 Patrick Mochel
+ * Copyright (c) 2002-3 Open Source Development Labs
+ * Copyright (c) 2006 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2006 Novell, Inc.
+ *
+ * This file is released under the GPLv2
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/kdev_t.h>
+#include <linux/notifier.h>
+#include <linux/genhd.h>
+#include <linux/kallsyms.h>
+#include <linux/semaphore.h>
+#include <linux/mutex.h>
+
+#include "base.h"
+#include "power/power.h"
+
+int (*platform_notify)(struct device *dev) = NULL;
+int (*platform_notify_remove)(struct device *dev) = NULL;
+static struct kobject *dev_kobj;
+struct kobject *sysfs_dev_char_kobj;
+struct kobject *sysfs_dev_block_kobj;
+
+#ifdef DDE_LINUX
+#include "local.h"
+#endif
+
+#if defined(CONFIG_BLOCK) && !defined(DDE_LINUX)
+static inline int device_is_not_partition(struct device *dev)
+{
+ return !(dev->type == &part_type);
+}
+#else
+static inline int device_is_not_partition(struct device *dev)
+{
+ return 1;
+}
+#endif
+
+/**
+ * dev_driver_string - Return a device's driver name, if at all possible
+ * @dev: struct device to get the name of
+ *
+ * Will return the device's driver's name if it is bound to a device. If
+ * the device is not bound to a device, it will return the name of the bus
+ * it is attached to. If it is not attached to a bus either, an empty
+ * string will be returned.
+ */
+const char *dev_driver_string(const struct device *dev)
+{
+ return dev->driver ? dev->driver->name :
+ (dev->bus ? dev->bus->name :
+ (dev->class ? dev->class->name : ""));
+}
+EXPORT_SYMBOL(dev_driver_string);
+
+#define to_dev(obj) container_of(obj, struct device, kobj)
+#define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
+
+static ssize_t dev_attr_show(struct kobject *kobj, struct attribute *attr,
+ char *buf)
+{
+ struct device_attribute *dev_attr = to_dev_attr(attr);
+ struct device *dev = to_dev(kobj);
+ ssize_t ret = -EIO;
+
+ if (dev_attr->show)
+ ret = dev_attr->show(dev, dev_attr, buf);
+ if (ret >= (ssize_t)PAGE_SIZE) {
+ print_symbol("dev_attr_show: %s returned bad count\n",
+ (unsigned long)dev_attr->show);
+ }
+ return ret;
+}
+
+static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
+ const char *buf, size_t count)
+{
+ struct device_attribute *dev_attr = to_dev_attr(attr);
+ struct device *dev = to_dev(kobj);
+ ssize_t ret = -EIO;
+
+ if (dev_attr->store)
+ ret = dev_attr->store(dev, dev_attr, buf, count);
+ return ret;
+}
+
+static struct sysfs_ops dev_sysfs_ops = {
+ .show = dev_attr_show,
+ .store = dev_attr_store,
+};
+
+
+/**
+ * device_release - free device structure.
+ * @kobj: device's kobject.
+ *
+ * This is called once the reference count for the object
+ * reaches 0. We forward the call to the device's release
+ * method, which should handle actually freeing the structure.
+ */
+static void device_release(struct kobject *kobj)
+{
+ struct device *dev = to_dev(kobj);
+
+ if (dev->release)
+ dev->release(dev);
+ else if (dev->type && dev->type->release)
+ dev->type->release(dev);
+ else if (dev->class && dev->class->dev_release)
+ dev->class->dev_release(dev);
+ else
+ WARN(1, KERN_ERR "Device '%s' does not have a release() "
+ "function, it is broken and must be fixed.\n",
+ dev_name(dev));
+}
+
+static struct kobj_type device_ktype = {
+ .release = device_release,
+ .sysfs_ops = &dev_sysfs_ops,
+};
+
+
+static int dev_uevent_filter(struct kset *kset, struct kobject *kobj)
+{
+ struct kobj_type *ktype = get_ktype(kobj);
+
+ if (ktype == &device_ktype) {
+ struct device *dev = to_dev(kobj);
+ if (dev->uevent_suppress)
+ return 0;
+ if (dev->bus)
+ return 1;
+ if (dev->class)
+ return 1;
+ }
+ return 0;
+}
+
+static const char *dev_uevent_name(struct kset *kset, struct kobject *kobj)
+{
+ struct device *dev = to_dev(kobj);
+
+ if (dev->bus)
+ return dev->bus->name;
+ if (dev->class)
+ return dev->class->name;
+ return NULL;
+}
+
+static int dev_uevent(struct kset *kset, struct kobject *kobj,
+ struct kobj_uevent_env *env)
+{
+ struct device *dev = to_dev(kobj);
+ int retval = 0;
+
+#ifndef DDE_LINUX
+ /* add the major/minor if present */
+ if (MAJOR(dev->devt)) {
+ add_uevent_var(env, "MAJOR=%u", MAJOR(dev->devt));
+ add_uevent_var(env, "MINOR=%u", MINOR(dev->devt));
+ }
+
+ if (dev->type && dev->type->name)
+ add_uevent_var(env, "DEVTYPE=%s", dev->type->name);
+
+ if (dev->driver)
+ add_uevent_var(env, "DRIVER=%s", dev->driver->name);
+
+#ifdef CONFIG_SYSFS_DEPRECATED
+ if (dev->class) {
+ struct device *parent = dev->parent;
+
+ /* find first bus device in parent chain */
+ while (parent && !parent->bus)
+ parent = parent->parent;
+ if (parent && parent->bus) {
+ const char *path;
+
+ path = kobject_get_path(&parent->kobj, GFP_KERNEL);
+ if (path) {
+ add_uevent_var(env, "PHYSDEVPATH=%s", path);
+ kfree(path);
+ }
+
+ add_uevent_var(env, "PHYSDEVBUS=%s", parent->bus->name);
+
+ if (parent->driver)
+ add_uevent_var(env, "PHYSDEVDRIVER=%s",
+ parent->driver->name);
+ }
+ } else if (dev->bus) {
+ add_uevent_var(env, "PHYSDEVBUS=%s", dev->bus->name);
+
+ if (dev->driver)
+ add_uevent_var(env, "PHYSDEVDRIVER=%s",
+ dev->driver->name);
+ }
+#endif
+
+ /* have the bus specific function add its stuff */
+ if (dev->bus && dev->bus->uevent) {
+ retval = dev->bus->uevent(dev, env);
+ if (retval)
+ pr_debug("device: '%s': %s: bus uevent() returned %d\n",
+ dev_name(dev), __func__, retval);
+ }
+
+ /* have the class specific function add its stuff */
+ if (dev->class && dev->class->dev_uevent) {
+ retval = dev->class->dev_uevent(dev, env);
+ if (retval)
+ pr_debug("device: '%s': %s: class uevent() "
+ "returned %d\n", dev_name(dev),
+ __func__, retval);
+ }
+
+ /* have the device type specific fuction add its stuff */
+ if (dev->type && dev->type->uevent) {
+ retval = dev->type->uevent(dev, env);
+ if (retval)
+ pr_debug("device: '%s': %s: dev_type uevent() "
+ "returned %d\n", dev_name(dev),
+ __func__, retval);
+ }
+#endif
+
+ return retval;
+}
+
+static struct kset_uevent_ops device_uevent_ops = {
+ .filter = dev_uevent_filter,
+ .name = dev_uevent_name,
+ .uevent = dev_uevent,
+};
+
+static ssize_t show_uevent(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct kobject *top_kobj;
+ struct kset *kset;
+ struct kobj_uevent_env *env = NULL;
+ int i;
+ size_t count = 0;
+ int retval;
+
+ /* search the kset, the device belongs to */
+ top_kobj = &dev->kobj;
+ while (!top_kobj->kset && top_kobj->parent)
+ top_kobj = top_kobj->parent;
+ if (!top_kobj->kset)
+ goto out;
+
+ kset = top_kobj->kset;
+ if (!kset->uevent_ops || !kset->uevent_ops->uevent)
+ goto out;
+
+ /* respect filter */
+ if (kset->uevent_ops && kset->uevent_ops->filter)
+ if (!kset->uevent_ops->filter(kset, &dev->kobj))
+ goto out;
+
+ env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
+ if (!env)
+ return -ENOMEM;
+
+ /* let the kset specific function add its keys */
+ retval = kset->uevent_ops->uevent(kset, &dev->kobj, env);
+ if (retval)
+ goto out;
+
+ /* copy keys to file */
+ for (i = 0; i < env->envp_idx; i++)
+ count += sprintf(&buf[count], "%s\n", env->envp[i]);
+out:
+ kfree(env);
+ return count;
+}
+
+static ssize_t store_uevent(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ enum kobject_action action;
+
+ if (kobject_action_type(buf, count, &action) == 0) {
+ kobject_uevent(&dev->kobj, action);
+ goto out;
+ }
+
+ dev_err(dev, "uevent: unsupported action-string; this will "
+ "be ignored in a future kernel version\n");
+ kobject_uevent(&dev->kobj, KOBJ_ADD);
+out:
+ return count;
+}
+
+static struct device_attribute uevent_attr =
+ __ATTR(uevent, S_IRUGO | S_IWUSR, show_uevent, store_uevent);
+
+static int device_add_attributes(struct device *dev,
+ struct device_attribute *attrs)
+{
+ int error = 0;
+ int i;
+
+ if (attrs) {
+ for (i = 0; attr_name(attrs[i]); i++) {
+ error = device_create_file(dev, &attrs[i]);
+ if (error)
+ break;
+ }
+ if (error)
+ while (--i >= 0)
+ device_remove_file(dev, &attrs[i]);
+ }
+ return error;
+}
+
+static void device_remove_attributes(struct device *dev,
+ struct device_attribute *attrs)
+{
+ int i;
+
+ if (attrs)
+ for (i = 0; attr_name(attrs[i]); i++)
+ device_remove_file(dev, &attrs[i]);
+}
+
+static int device_add_groups(struct device *dev,
+ struct attribute_group **groups)
+{
+ int error = 0;
+ int i;
+
+ if (groups) {
+ for (i = 0; groups[i]; i++) {
+ error = sysfs_create_group(&dev->kobj, groups[i]);
+ if (error) {
+ while (--i >= 0)
+ sysfs_remove_group(&dev->kobj,
+ groups[i]);
+ break;
+ }
+ }
+ }
+ return error;
+}
+
+static void device_remove_groups(struct device *dev,
+ struct attribute_group **groups)
+{
+ int i;
+
+ if (groups)
+ for (i = 0; groups[i]; i++)
+ sysfs_remove_group(&dev->kobj, groups[i]);
+}
+
+static int device_add_attrs(struct device *dev)
+{
+ struct class *class = dev->class;
+ struct device_type *type = dev->type;
+ int error;
+
+ if (class) {
+ error = device_add_attributes(dev, class->dev_attrs);
+ if (error)
+ return error;
+ }
+
+ if (type) {
+ error = device_add_groups(dev, type->groups);
+ if (error)
+ goto err_remove_class_attrs;
+ }
+
+ error = device_add_groups(dev, dev->groups);
+ if (error)
+ goto err_remove_type_groups;
+
+ return 0;
+
+ err_remove_type_groups:
+ if (type)
+ device_remove_groups(dev, type->groups);
+ err_remove_class_attrs:
+ if (class)
+ device_remove_attributes(dev, class->dev_attrs);
+
+ return error;
+}
+
+static void device_remove_attrs(struct device *dev)
+{
+ struct class *class = dev->class;
+ struct device_type *type = dev->type;
+
+ device_remove_groups(dev, dev->groups);
+
+ if (type)
+ device_remove_groups(dev, type->groups);
+
+ if (class)
+ device_remove_attributes(dev, class->dev_attrs);
+}
+
+
+static ssize_t show_dev(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ return print_dev_t(buf, dev->devt);
+}
+
+static struct device_attribute devt_attr =
+ __ATTR(dev, S_IRUGO, show_dev, NULL);
+
+/* kset to create /sys/devices/ */
+struct kset *devices_kset;
+
+/**
+ * device_create_file - create sysfs attribute file for device.
+ * @dev: device.
+ * @attr: device attribute descriptor.
+ */
+int device_create_file(struct device *dev, struct device_attribute *attr)
+{
+ int error = 0;
+ if (dev)
+ error = sysfs_create_file(&dev->kobj, &attr->attr);
+ return error;
+}
+
+/**
+ * device_remove_file - remove sysfs attribute file.
+ * @dev: device.
+ * @attr: device attribute descriptor.
+ */
+void device_remove_file(struct device *dev, struct device_attribute *attr)
+{
+ if (dev)
+ sysfs_remove_file(&dev->kobj, &attr->attr);
+}
+
+/**
+ * device_create_bin_file - create sysfs binary attribute file for device.
+ * @dev: device.
+ * @attr: device binary attribute descriptor.
+ */
+int device_create_bin_file(struct device *dev, struct bin_attribute *attr)
+{
+ int error = -EINVAL;
+ if (dev)
+ error = sysfs_create_bin_file(&dev->kobj, attr);
+ return error;
+}
+EXPORT_SYMBOL_GPL(device_create_bin_file);
+
+/**
+ * device_remove_bin_file - remove sysfs binary attribute file
+ * @dev: device.
+ * @attr: device binary attribute descriptor.
+ */
+void device_remove_bin_file(struct device *dev, struct bin_attribute *attr)
+{
+ if (dev)
+ sysfs_remove_bin_file(&dev->kobj, attr);
+}
+EXPORT_SYMBOL_GPL(device_remove_bin_file);
+
+/**
+ * device_schedule_callback_owner - helper to schedule a callback for a device
+ * @dev: device.
+ * @func: callback function to invoke later.
+ * @owner: module owning the callback routine
+ *
+ * Attribute methods must not unregister themselves or their parent device
+ * (which would amount to the same thing). Attempts to do so will deadlock,
+ * since unregistration is mutually exclusive with driver callbacks.
+ *
+ * Instead methods can call this routine, which will attempt to allocate
+ * and schedule a workqueue request to call back @func with @dev as its
+ * argument in the workqueue's process context. @dev will be pinned until
+ * @func returns.
+ *
+ * This routine is usually called via the inline device_schedule_callback(),
+ * which automatically sets @owner to THIS_MODULE.
+ *
+ * Returns 0 if the request was submitted, -ENOMEM if storage could not
+ * be allocated, -ENODEV if a reference to @owner isn't available.
+ *
+ * NOTE: This routine won't work if CONFIG_SYSFS isn't set! It uses an
+ * underlying sysfs routine (since it is intended for use by attribute
+ * methods), and if sysfs isn't available you'll get nothing but -ENOSYS.
+ */
+int device_schedule_callback_owner(struct device *dev,
+ void (*func)(struct device *), struct module *owner)
+{
+ return sysfs_schedule_callback(&dev->kobj,
+ (void (*)(void *)) func, dev, owner);
+}
+EXPORT_SYMBOL_GPL(device_schedule_callback_owner);
+
+static void klist_children_get(struct klist_node *n)
+{
+ struct device *dev = container_of(n, struct device, knode_parent);
+
+ get_device(dev);
+}
+
+static void klist_children_put(struct klist_node *n)
+{
+ struct device *dev = container_of(n, struct device, knode_parent);
+
+ put_device(dev);
+}
+
+/**
+ * device_initialize - init device structure.
+ * @dev: device.
+ *
+ * This prepares the device for use by other layers by initializing
+ * its fields.
+ * It is the first half of device_register(), if called by
+ * that function, though it can also be called separately, so one
+ * may use @dev's fields. In particular, get_device()/put_device()
+ * may be used for reference counting of @dev after calling this
+ * function.
+ *
+ * NOTE: Use put_device() to give up your reference instead of freeing
+ * @dev directly once you have called this function.
+ */
+void device_initialize(struct device *dev)
+{
+ dev->kobj.kset = devices_kset;
+ kobject_init(&dev->kobj, &device_ktype);
+ klist_init(&dev->klist_children, klist_children_get,
+ klist_children_put);
+ INIT_LIST_HEAD(&dev->dma_pools);
+ init_MUTEX(&dev->sem);
+ spin_lock_init(&dev->devres_lock);
+ INIT_LIST_HEAD(&dev->devres_head);
+ device_init_wakeup(dev, 0);
+ device_pm_init(dev);
+ set_dev_node(dev, -1);
+}
+
+#ifdef CONFIG_SYSFS_DEPRECATED
+static struct kobject *get_device_parent(struct device *dev,
+ struct device *parent)
+{
+ /* class devices without a parent live in /sys/class/<classname>/ */
+ if (dev->class && (!parent || parent->class != dev->class))
+ return &dev->class->p->class_subsys.kobj;
+ /* all other devices keep their parent */
+ else if (parent)
+ return &parent->kobj;
+
+ return NULL;
+}
+
+static inline void cleanup_device_parent(struct device *dev) {}
+static inline void cleanup_glue_dir(struct device *dev,
+ struct kobject *glue_dir) {}
+#else
+static struct kobject *virtual_device_parent(struct device *dev)
+{
+ static struct kobject *virtual_dir = NULL;
+
+ if (!virtual_dir)
+ virtual_dir = kobject_create_and_add("virtual",
+ &devices_kset->kobj);
+
+ return virtual_dir;
+}
+
+static struct kobject *get_device_parent(struct device *dev,
+ struct device *parent)
+{
+ int retval;
+
+ if (dev->class) {
+ struct kobject *kobj = NULL;
+ struct kobject *parent_kobj;
+ struct kobject *k;
+
+ /*
+ * If we have no parent, we live in "virtual".
+ * Class-devices with a non class-device as parent, live
+ * in a "glue" directory to prevent namespace collisions.
+ */
+ if (parent == NULL)
+ parent_kobj = virtual_device_parent(dev);
+ else if (parent->class)
+ return &parent->kobj;
+ else
+ parent_kobj = &parent->kobj;
+
+ /* find our class-directory at the parent and reference it */
+ spin_lock(&dev->class->p->class_dirs.list_lock);
+ list_for_each_entry(k, &dev->class->p->class_dirs.list, entry)
+ if (k->parent == parent_kobj) {
+ kobj = kobject_get(k);
+ break;
+ }
+ spin_unlock(&dev->class->p->class_dirs.list_lock);
+ if (kobj)
+ return kobj;
+
+ /* or create a new class-directory at the parent device */
+ k = kobject_create();
+ if (!k)
+ return NULL;
+ k->kset = &dev->class->p->class_dirs;
+ retval = kobject_add(k, parent_kobj, "%s", dev->class->name);
+ if (retval < 0) {
+ kobject_put(k);
+ return NULL;
+ }
+ /* do not emit an uevent for this simple "glue" directory */
+ return k;
+ }
+
+ if (parent)
+ return &parent->kobj;
+ return NULL;
+}
+
+static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
+{
+ /* see if we live in a "glue" directory */
+ if (!glue_dir || !dev->class ||
+ glue_dir->kset != &dev->class->p->class_dirs)
+ return;
+
+ kobject_put(glue_dir);
+}
+
+static void cleanup_device_parent(struct device *dev)
+{
+ cleanup_glue_dir(dev, dev->kobj.parent);
+}
+#endif
+
+static void setup_parent(struct device *dev, struct device *parent)
+{
+ struct kobject *kobj;
+ kobj = get_device_parent(dev, parent);
+ if (kobj)
+ dev->kobj.parent = kobj;
+}
+
+static int device_add_class_symlinks(struct device *dev)
+{
+ int error;
+
+ if (!dev->class)
+ return 0;
+
+ error = sysfs_create_link(&dev->kobj,
+ &dev->class->p->class_subsys.kobj,
+ "subsystem");
+ if (error)
+ goto out;
+
+#ifdef CONFIG_SYSFS_DEPRECATED
+ /* stacked class devices need a symlink in the class directory */
+ if (dev->kobj.parent != &dev->class->p->class_subsys.kobj &&
+ device_is_not_partition(dev)) {
+ error = sysfs_create_link(&dev->class->p->class_subsys.kobj,
+ &dev->kobj, dev_name(dev));
+ if (error)
+ goto out_subsys;
+ }
+
+ if (dev->parent && device_is_not_partition(dev)) {
+ struct device *parent = dev->parent;
+ char *class_name;
+
+ /*
+ * stacked class devices have the 'device' link
+ * pointing to the bus device instead of the parent
+ */
+ while (parent->class && !parent->bus && parent->parent)
+ parent = parent->parent;
+
+ error = sysfs_create_link(&dev->kobj,
+ &parent->kobj,
+ "device");
+ if (error)
+ goto out_busid;
+
+ class_name = make_class_name(dev->class->name,
+ &dev->kobj);
+ if (class_name)
+ error = sysfs_create_link(&dev->parent->kobj,
+ &dev->kobj, class_name);
+ kfree(class_name);
+ if (error)
+ goto out_device;
+ }
+ return 0;
+
+out_device:
+ if (dev->parent && device_is_not_partition(dev))
+ sysfs_remove_link(&dev->kobj, "device");
+out_busid
+ if (dev->kobj.parent != &dev->class->p->class_subsys.kobj &&
+ device_is_not_partition(dev))
+ sysfs_remove_link(&dev->class->p->class_subsys.kobj,
+ dev_name(dev));
+#else
+ /* link in the class directory pointing to the device */
+ error = sysfs_create_link(&dev->class->p->class_subsys.kobj,
+ &dev->kobj, dev_name(dev));
+ if (error)
+ goto out_subsys;
+
+ if (dev->parent && device_is_not_partition(dev)) {
+ error = sysfs_create_link(&dev->kobj, &dev->parent->kobj,
+ "device");
+ if (error)
+ goto out_busid;
+ }
+ return 0;
+
+out_busid:
+ sysfs_remove_link(&dev->class->p->class_subsys.kobj, dev_name(dev));
+#endif
+
+out_subsys:
+ sysfs_remove_link(&dev->kobj, "subsystem");
+out:
+ return error;
+}
+
+static void device_remove_class_symlinks(struct device *dev)
+{
+ if (!dev->class)
+ return;
+
+#ifdef CONFIG_SYSFS_DEPRECATED
+ if (dev->parent && device_is_not_partition(dev)) {
+ char *class_name;
+
+ class_name = make_class_name(dev->class->name, &dev->kobj);
+ if (class_name) {
+ sysfs_remove_link(&dev->parent->kobj, class_name);
+ kfree(class_name);
+ }
+ sysfs_remove_link(&dev->kobj, "device");
+ }
+
+ if (dev->kobj.parent != &dev->class->p->class_subsys.kobj &&
+ device_is_not_partition(dev))
+ sysfs_remove_link(&dev->class->p->class_subsys.kobj,
+ dev_name(dev));
+#else
+ if (dev->parent && device_is_not_partition(dev))
+ sysfs_remove_link(&dev->kobj, "device");
+
+ sysfs_remove_link(&dev->class->p->class_subsys.kobj, dev_name(dev));
+#endif
+
+ sysfs_remove_link(&dev->kobj, "subsystem");
+}
+
+/**
+ * dev_set_name - set a device name
+ * @dev: device
+ * @fmt: format string for the device's name
+ */
+int dev_set_name(struct device *dev, const char *fmt, ...)
+{
+ va_list vargs;
+ char *s;
+
+ va_start(vargs, fmt);
+ vsnprintf(dev->bus_id, sizeof(dev->bus_id), fmt, vargs);
+ va_end(vargs);
+
+ /* ewww... some of these buggers have / in the name... */
+ while ((s = strchr(dev->bus_id, '/')))
+ *s = '!';
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_set_name);
+
+/**
+ * device_to_dev_kobj - select a /sys/dev/ directory for the device
+ * @dev: device
+ *
+ * By default we select char/ for new entries. Setting class->dev_obj
+ * to NULL prevents an entry from being created. class->dev_kobj must
+ * be set (or cleared) before any devices are registered to the class
+ * otherwise device_create_sys_dev_entry() and
+ * device_remove_sys_dev_entry() will disagree about the the presence
+ * of the link.
+ */
+static struct kobject *device_to_dev_kobj(struct device *dev)
+{
+ struct kobject *kobj;
+
+ if (dev->class)
+ kobj = dev->class->dev_kobj;
+ else
+ kobj = sysfs_dev_char_kobj;
+
+ return kobj;
+}
+
+static int device_create_sys_dev_entry(struct device *dev)
+{
+ struct kobject *kobj = device_to_dev_kobj(dev);
+ int error = 0;
+ char devt_str[15];
+
+ if (kobj) {
+ format_dev_t(devt_str, dev->devt);
+ error = sysfs_create_link(kobj, &dev->kobj, devt_str);
+ }
+
+ return error;
+}
+
+static void device_remove_sys_dev_entry(struct device *dev)
+{
+ struct kobject *kobj = device_to_dev_kobj(dev);
+ char devt_str[15];
+
+ if (kobj) {
+ format_dev_t(devt_str, dev->devt);
+ sysfs_remove_link(kobj, devt_str);
+ }
+}
+
+/**
+ * device_add - add device to device hierarchy.
+ * @dev: device.
+ *
+ * This is part 2 of device_register(), though may be called
+ * separately _iff_ device_initialize() has been called separately.
+ *
+ * This adds @dev to the kobject hierarchy via kobject_add(), adds it
+ * to the global and sibling lists for the device, then
+ * adds it to the other relevant subsystems of the driver model.
+ *
+ * NOTE: _Never_ directly free @dev after calling this function, even
+ * if it returned an error! Always use put_device() to give up your
+ * reference instead.
+ */
+int device_add(struct device *dev)
+{
+ struct device *parent = NULL;
+ struct class_interface *class_intf;
+ int error = -EINVAL;
+
+ dev = get_device(dev);
+ if (!dev)
+ goto done;
+
+ /* Temporarily support init_name if it is set.
+ * It will override bus_id for now */
+ if (dev->init_name)
+ dev_set_name(dev, "%s", dev->init_name);
+
+ if (!strlen(dev->bus_id))
+ goto done;
+
+ pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
+
+ parent = get_device(dev->parent);
+ setup_parent(dev, parent);
+
+ /* use parent numa_node */
+ if (parent)
+ set_dev_node(dev, dev_to_node(parent));
+
+ /* first, register with generic layer. */
+ error = kobject_add(&dev->kobj, dev->kobj.parent, "%s", dev_name(dev));
+ if (error)
+ goto Error;
+
+ /* notify platform of device entry */
+ if (platform_notify)
+ platform_notify(dev);
+
+ error = device_create_file(dev, &uevent_attr);
+ if (error)
+ goto attrError;
+
+ if (MAJOR(dev->devt)) {
+ error = device_create_file(dev, &devt_attr);
+ if (error)
+ goto ueventattrError;
+
+ error = device_create_sys_dev_entry(dev);
+ if (error)
+ goto devtattrError;
+ }
+
+ error = device_add_class_symlinks(dev);
+ if (error)
+ goto SymlinkError;
+ error = device_add_attrs(dev);
+ if (error)
+ goto AttrsError;
+ error = bus_add_device(dev);
+ if (error)
+ goto BusError;
+ error = dpm_sysfs_add(dev);
+ if (error)
+ goto DPMError;
+ device_pm_add(dev);
+
+ /* Notify clients of device addition. This call must come
+ * after dpm_sysf_add() and before kobject_uevent().
+ */
+ if (dev->bus)
+ blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ BUS_NOTIFY_ADD_DEVICE, dev);
+
+ kobject_uevent(&dev->kobj, KOBJ_ADD);
+ bus_attach_device(dev);
+ if (parent)
+ klist_add_tail(&dev->knode_parent, &parent->klist_children);
+
+ if (dev->class) {
+ mutex_lock(&dev->class->p->class_mutex);
+ /* tie the class to the device */
+ klist_add_tail(&dev->knode_class,
+ &dev->class->p->class_devices);
+
+ /* notify any interfaces that the device is here */
+ list_for_each_entry(class_intf,
+ &dev->class->p->class_interfaces, node)
+ if (class_intf->add_dev)
+ class_intf->add_dev(dev, class_intf);
+ mutex_unlock(&dev->class->p->class_mutex);
+ }
+done:
+ put_device(dev);
+ return error;
+ DPMError:
+ bus_remove_device(dev);
+ BusError:
+ device_remove_attrs(dev);
+ AttrsError:
+ device_remove_class_symlinks(dev);
+ SymlinkError:
+ if (MAJOR(dev->devt))
+ device_remove_sys_dev_entry(dev);
+ devtattrError:
+ if (MAJOR(dev->devt))
+ device_remove_file(dev, &devt_attr);
+ ueventattrError:
+ device_remove_file(dev, &uevent_attr);
+ attrError:
+ kobject_uevent(&dev->kobj, KOBJ_REMOVE);
+ kobject_del(&dev->kobj);
+ Error:
+ cleanup_device_parent(dev);
+ if (parent)
+ put_device(parent);
+ goto done;
+}
+
+/**
+ * device_register - register a device with the system.
+ * @dev: pointer to the device structure
+ *
+ * This happens in two clean steps - initialize the device
+ * and add it to the system. The two steps can be called
+ * separately, but this is the easiest and most common.
+ * I.e. you should only call the two helpers separately if
+ * have a clearly defined need to use and refcount the device
+ * before it is added to the hierarchy.
+ *
+ * NOTE: _Never_ directly free @dev after calling this function, even
+ * if it returned an error! Always use put_device() to give up the
+ * reference initialized in this function instead.
+ */
+int device_register(struct device *dev)
+{
+ device_initialize(dev);
+ return device_add(dev);
+}
+
+/**
+ * get_device - increment reference count for device.
+ * @dev: device.
+ *
+ * This simply forwards the call to kobject_get(), though
+ * we do take care to provide for the case that we get a NULL
+ * pointer passed in.
+ */
+struct device *get_device(struct device *dev)
+{
+ return dev ? to_dev(kobject_get(&dev->kobj)) : NULL;
+}
+
+/**
+ * put_device - decrement reference count.
+ * @dev: device in question.
+ */
+void put_device(struct device *dev)
+{
+ /* might_sleep(); */
+ if (dev)
+ kobject_put(&dev->kobj);
+}
+
+/**
+ * device_del - delete device from system.
+ * @dev: device.
+ *
+ * This is the first part of the device unregistration
+ * sequence. This removes the device from the lists we control
+ * from here, has it removed from the other driver model
+ * subsystems it was added to in device_add(), and removes it
+ * from the kobject hierarchy.
+ *
+ * NOTE: this should be called manually _iff_ device_add() was
+ * also called manually.
+ */
+void device_del(struct device *dev)
+{
+ struct device *parent = dev->parent;
+ struct class_interface *class_intf;
+
+ /* Notify clients of device removal. This call must come
+ * before dpm_sysfs_remove().
+ */
+ if (dev->bus)
+ blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ BUS_NOTIFY_DEL_DEVICE, dev);
+ device_pm_remove(dev);
+ dpm_sysfs_remove(dev);
+ if (parent)
+ klist_del(&dev->knode_parent);
+ if (MAJOR(dev->devt)) {
+ device_remove_sys_dev_entry(dev);
+ device_remove_file(dev, &devt_attr);
+ }
+ if (dev->class) {
+ device_remove_class_symlinks(dev);
+
+ mutex_lock(&dev->class->p->class_mutex);
+ /* notify any interfaces that the device is now gone */
+ list_for_each_entry(class_intf,
+ &dev->class->p->class_interfaces, node)
+ if (class_intf->remove_dev)
+ class_intf->remove_dev(dev, class_intf);
+ /* remove the device from the class list */
+ klist_del(&dev->knode_class);
+ mutex_unlock(&dev->class->p->class_mutex);
+ }
+ device_remove_file(dev, &uevent_attr);
+ device_remove_attrs(dev);
+ bus_remove_device(dev);
+
+ /*
+ * Some platform devices are driven without driver attached
+ * and managed resources may have been acquired. Make sure
+ * all resources are released.
+ */
+ devres_release_all(dev);
+
+ /* Notify the platform of the removal, in case they
+ * need to do anything...
+ */
+ if (platform_notify_remove)
+ platform_notify_remove(dev);
+ kobject_uevent(&dev->kobj, KOBJ_REMOVE);
+ cleanup_device_parent(dev);
+ kobject_del(&dev->kobj);
+ put_device(parent);
+}
+
+/**
+ * device_unregister - unregister device from system.
+ * @dev: device going away.
+ *
+ * We do this in two parts, like we do device_register(). First,
+ * we remove it from all the subsystems with device_del(), then
+ * we decrement the reference count via put_device(). If that
+ * is the final reference count, the device will be cleaned up
+ * via device_release() above. Otherwise, the structure will
+ * stick around until the final reference to the device is dropped.
+ */
+void device_unregister(struct device *dev)
+{
+ pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
+ device_del(dev);
+ put_device(dev);
+}
+
+static struct device *next_device(struct klist_iter *i)
+{
+ struct klist_node *n = klist_next(i);
+ return n ? container_of(n, struct device, knode_parent) : NULL;
+}
+
+/**
+ * device_for_each_child - device child iterator.
+ * @parent: parent struct device.
+ * @data: data for the callback.
+ * @fn: function to be called for each device.
+ *
+ * Iterate over @parent's child devices, and call @fn for each,
+ * passing it @data.
+ *
+ * We check the return of @fn each time. If it returns anything
+ * other than 0, we break out and return that value.
+ */
+int device_for_each_child(struct device *parent, void *data,
+ int (*fn)(struct device *dev, void *data))
+{
+ struct klist_iter i;
+ struct device *child;
+ int error = 0;
+
+ klist_iter_init(&parent->klist_children, &i);
+ while ((child = next_device(&i)) && !error)
+ error = fn(child, data);
+ klist_iter_exit(&i);
+ return error;
+}
+
+/**
+ * device_find_child - device iterator for locating a particular device.
+ * @parent: parent struct device
+ * @data: Data to pass to match function
+ * @match: Callback function to check device
+ *
+ * This is similar to the device_for_each_child() function above, but it
+ * returns a reference to a device that is 'found' for later use, as
+ * determined by the @match callback.
+ *
+ * The callback should return 0 if the device doesn't match and non-zero
+ * if it does. If the callback returns non-zero and a reference to the
+ * current device can be obtained, this function will return to the caller
+ * and not iterate over any more devices.
+ */
+struct device *device_find_child(struct device *parent, void *data,
+ int (*match)(struct device *dev, void *data))
+{
+ struct klist_iter i;
+ struct device *child;
+
+ if (!parent)
+ return NULL;
+
+ klist_iter_init(&parent->klist_children, &i);
+ while ((child = next_device(&i)))
+ if (match(child, data) && get_device(child))
+ break;
+ klist_iter_exit(&i);
+ return child;
+}
+
+int __init devices_init(void)
+{
+ devices_kset = kset_create_and_add("devices", &device_uevent_ops, NULL);
+ if (!devices_kset)
+ return -ENOMEM;
+ dev_kobj = kobject_create_and_add("dev", NULL);
+ if (!dev_kobj)
+ goto dev_kobj_err;
+ sysfs_dev_block_kobj = kobject_create_and_add("block", dev_kobj);
+ if (!sysfs_dev_block_kobj)
+ goto block_kobj_err;
+ sysfs_dev_char_kobj = kobject_create_and_add("char", dev_kobj);
+ if (!sysfs_dev_char_kobj)
+ goto char_kobj_err;
+
+ return 0;
+
+ char_kobj_err:
+ kobject_put(sysfs_dev_block_kobj);
+ block_kobj_err:
+ kobject_put(dev_kobj);
+ dev_kobj_err:
+ kset_unregister(devices_kset);
+ return -ENOMEM;
+}
+
+EXPORT_SYMBOL_GPL(device_for_each_child);
+EXPORT_SYMBOL_GPL(device_find_child);
+
+EXPORT_SYMBOL_GPL(device_initialize);
+EXPORT_SYMBOL_GPL(device_add);
+EXPORT_SYMBOL_GPL(device_register);
+
+EXPORT_SYMBOL_GPL(device_del);
+EXPORT_SYMBOL_GPL(device_unregister);
+EXPORT_SYMBOL_GPL(get_device);
+EXPORT_SYMBOL_GPL(put_device);
+
+EXPORT_SYMBOL_GPL(device_create_file);
+EXPORT_SYMBOL_GPL(device_remove_file);
+
+struct root_device
+{
+ struct device dev;
+ struct module *owner;
+};
+
+#define to_root_device(dev) container_of(dev, struct root_device, dev)
+
+static void root_device_release(struct device *dev)
+{
+ kfree(to_root_device(dev));
+}
+
+/**
+ * __root_device_register - allocate and register a root device
+ * @name: root device name
+ * @owner: owner module of the root device, usually THIS_MODULE
+ *
+ * This function allocates a root device and registers it
+ * using device_register(). In order to free the returned
+ * device, use root_device_unregister().
+ *
+ * Root devices are dummy devices which allow other devices
+ * to be grouped under /sys/devices. Use this function to
+ * allocate a root device and then use it as the parent of
+ * any device which should appear under /sys/devices/{name}
+ *
+ * The /sys/devices/{name} directory will also contain a
+ * 'module' symlink which points to the @owner directory
+ * in sysfs.
+ *
+ * Note: You probably want to use root_device_register().
+ */
+struct device *__root_device_register(const char *name, struct module *owner)
+{
+ struct root_device *root;
+ int err = -ENOMEM;
+
+ root = kzalloc(sizeof(struct root_device), GFP_KERNEL);
+ if (!root)
+ return ERR_PTR(err);
+
+ err = dev_set_name(&root->dev, name);
+ if (err) {
+ kfree(root);
+ return ERR_PTR(err);
+ }
+
+ root->dev.release = root_device_release;
+
+ err = device_register(&root->dev);
+ if (err) {
+ put_device(&root->dev);
+ return ERR_PTR(err);
+ }
+
+#ifdef CONFIG_MODULE /* gotta find a "cleaner" way to do this */
+ if (owner) {
+ struct module_kobject *mk = &owner->mkobj;
+
+ err = sysfs_create_link(&root->dev.kobj, &mk->kobj, "module");
+ if (err) {
+ device_unregister(&root->dev);
+ return ERR_PTR(err);
+ }
+ root->owner = owner;
+ }
+#endif
+
+ return &root->dev;
+}
+EXPORT_SYMBOL_GPL(__root_device_register);
+
+/**
+ * root_device_unregister - unregister and free a root device
+ * @dev: device going away
+ *
+ * This function unregisters and cleans up a device that was created by
+ * root_device_register().
+ */
+void root_device_unregister(struct device *dev)
+{
+ struct root_device *root = to_root_device(dev);
+
+ if (root->owner)
+ sysfs_remove_link(&root->dev.kobj, "module");
+
+ device_unregister(dev);
+}
+EXPORT_SYMBOL_GPL(root_device_unregister);
+
+
+static void device_create_release(struct device *dev)
+{
+ pr_debug("device: '%s': %s\n", dev_name(dev), __func__);
+ kfree(dev);
+}
+
+/**
+ * device_create_vargs - creates a device and registers it with sysfs
+ * @class: pointer to the struct class that this device should be registered to
+ * @parent: pointer to the parent struct device of this new device, if any
+ * @devt: the dev_t for the char device to be added
+ * @drvdata: the data to be added to the device for callbacks
+ * @fmt: string for the device's name
+ * @args: va_list for the device's name
+ *
+ * This function can be used by char device classes. A struct device
+ * will be created in sysfs, registered to the specified class.
+ *
+ * A "dev" file will be created, showing the dev_t for the device, if
+ * the dev_t is not 0,0.
+ * If a pointer to a parent struct device is passed in, the newly created
+ * struct device will be a child of that device in sysfs.
+ * The pointer to the struct device will be returned from the call.
+ * Any further sysfs files that might be required can be created using this
+ * pointer.
+ *
+ * Note: the struct class passed to this function must have previously
+ * been created with a call to class_create().
+ */
+struct device *device_create_vargs(struct class *class, struct device *parent,
+ dev_t devt, void *drvdata, const char *fmt,
+ va_list args)
+{
+ struct device *dev = NULL;
+ int retval = -ENODEV;
+
+ if (class == NULL || IS_ERR(class))
+ goto error;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ retval = -ENOMEM;
+ goto error;
+ }
+
+ dev->devt = devt;
+ dev->class = class;
+ dev->parent = parent;
+ dev->release = device_create_release;
+ dev_set_drvdata(dev, drvdata);
+
+ vsnprintf(dev->bus_id, BUS_ID_SIZE, fmt, args);
+ retval = device_register(dev);
+ if (retval)
+ goto error;
+
+ return dev;
+
+error:
+ put_device(dev);
+ return ERR_PTR(retval);
+}
+EXPORT_SYMBOL_GPL(device_create_vargs);
+
+/**
+ * device_create - creates a device and registers it with sysfs
+ * @class: pointer to the struct class that this device should be registered to
+ * @parent: pointer to the parent struct device of this new device, if any
+ * @devt: the dev_t for the char device to be added
+ * @drvdata: the data to be added to the device for callbacks
+ * @fmt: string for the device's name
+ *
+ * This function can be used by char device classes. A struct device
+ * will be created in sysfs, registered to the specified class.
+ *
+ * A "dev" file will be created, showing the dev_t for the device, if
+ * the dev_t is not 0,0.
+ * If a pointer to a parent struct device is passed in, the newly created
+ * struct device will be a child of that device in sysfs.
+ * The pointer to the struct device will be returned from the call.
+ * Any further sysfs files that might be required can be created using this
+ * pointer.
+ *
+ * Note: the struct class passed to this function must have previously
+ * been created with a call to class_create().
+ */
+struct device *device_create(struct class *class, struct device *parent,
+ dev_t devt, void *drvdata, const char *fmt, ...)
+{
+ va_list vargs;
+ struct device *dev;
+
+ va_start(vargs, fmt);
+ dev = device_create_vargs(class, parent, devt, drvdata, fmt, vargs);
+ va_end(vargs);
+ return dev;
+}
+EXPORT_SYMBOL_GPL(device_create);
+
+static int __match_devt(struct device *dev, void *data)
+{
+ dev_t *devt = data;
+
+ return dev->devt == *devt;
+}
+
+/**
+ * device_destroy - removes a device that was created with device_create()
+ * @class: pointer to the struct class that this device was registered with
+ * @devt: the dev_t of the device that was previously registered
+ *
+ * This call unregisters and cleans up a device that was created with a
+ * call to device_create().
+ */
+void device_destroy(struct class *class, dev_t devt)
+{
+ struct device *dev;
+
+ dev = class_find_device(class, NULL, &devt, __match_devt);
+ if (dev) {
+ put_device(dev);
+ device_unregister(dev);
+ }
+}
+EXPORT_SYMBOL_GPL(device_destroy);
+
+/**
+ * device_rename - renames a device
+ * @dev: the pointer to the struct device to be renamed
+ * @new_name: the new name of the device
+ *
+ * It is the responsibility of the caller to provide mutual
+ * exclusion between two different calls of device_rename
+ * on the same device to ensure that new_name is valid and
+ * won't conflict with other devices.
+ */
+int device_rename(struct device *dev, char *new_name)
+{
+ char *old_class_name = NULL;
+ char *new_class_name = NULL;
+ char *old_device_name = NULL;
+ int error;
+
+ dev = get_device(dev);
+ if (!dev)
+ return -EINVAL;
+
+ pr_debug("device: '%s': %s: renaming to '%s'\n", dev_name(dev),
+ __func__, new_name);
+
+#ifdef CONFIG_SYSFS_DEPRECATED
+ if ((dev->class) && (dev->parent))
+ old_class_name = make_class_name(dev->class->name, &dev->kobj);
+#endif
+
+ old_device_name = kmalloc(BUS_ID_SIZE, GFP_KERNEL);
+ if (!old_device_name) {
+ error = -ENOMEM;
+ goto out;
+ }
+ strlcpy(old_device_name, dev->bus_id, BUS_ID_SIZE);
+ strlcpy(dev->bus_id, new_name, BUS_ID_SIZE);
+
+ error = kobject_rename(&dev->kobj, new_name);
+ if (error) {
+ strlcpy(dev->bus_id, old_device_name, BUS_ID_SIZE);
+ goto out;
+ }
+
+#ifdef CONFIG_SYSFS_DEPRECATED
+ if (old_class_name) {
+ new_class_name = make_class_name(dev->class->name, &dev->kobj);
+ if (new_class_name) {
+ error = sysfs_create_link_nowarn(&dev->parent->kobj,
+ &dev->kobj,
+ new_class_name);
+ if (error)
+ goto out;
+ sysfs_remove_link(&dev->parent->kobj, old_class_name);
+ }
+ }
+#else
+ if (dev->class) {
+ error = sysfs_create_link_nowarn(&dev->class->p->class_subsys.kobj,
+ &dev->kobj, dev_name(dev));
+ if (error)
+ goto out;
+ sysfs_remove_link(&dev->class->p->class_subsys.kobj,
+ old_device_name);
+ }
+#endif
+
+out:
+ put_device(dev);
+
+ kfree(new_class_name);
+ kfree(old_class_name);
+ kfree(old_device_name);
+
+ return error;
+}
+EXPORT_SYMBOL_GPL(device_rename);
+
+static int device_move_class_links(struct device *dev,
+ struct device *old_parent,
+ struct device *new_parent)
+{
+ int error = 0;
+#ifdef CONFIG_SYSFS_DEPRECATED
+ char *class_name;
+
+ class_name = make_class_name(dev->class->name, &dev->kobj);
+ if (!class_name) {
+ error = -ENOMEM;
+ goto out;
+ }
+ if (old_parent) {
+ sysfs_remove_link(&dev->kobj, "device");
+ sysfs_remove_link(&old_parent->kobj, class_name);
+ }
+ if (new_parent) {
+ error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
+ "device");
+ if (error)
+ goto out;
+ error = sysfs_create_link(&new_parent->kobj, &dev->kobj,
+ class_name);
+ if (error)
+ sysfs_remove_link(&dev->kobj, "device");
+ } else
+ error = 0;
+out:
+ kfree(class_name);
+ return error;
+#else
+ if (old_parent)
+ sysfs_remove_link(&dev->kobj, "device");
+ if (new_parent)
+ error = sysfs_create_link(&dev->kobj, &new_parent->kobj,
+ "device");
+ return error;
+#endif
+}
+
+/**
+ * device_move - moves a device to a new parent
+ * @dev: the pointer to the struct device to be moved
+ * @new_parent: the new parent of the device (can by NULL)
+ */
+int device_move(struct device *dev, struct device *new_parent)
+{
+ int error;
+ struct device *old_parent;
+ struct kobject *new_parent_kobj;
+
+ dev = get_device(dev);
+ if (!dev)
+ return -EINVAL;
+
+ new_parent = get_device(new_parent);
+ new_parent_kobj = get_device_parent(dev, new_parent);
+
+ pr_debug("device: '%s': %s: moving to '%s'\n", dev_name(dev),
+ __func__, new_parent ? dev_name(new_parent) : "<NULL>");
+ error = kobject_move(&dev->kobj, new_parent_kobj);
+ if (error) {
+ cleanup_glue_dir(dev, new_parent_kobj);
+ put_device(new_parent);
+ goto out;
+ }
+ old_parent = dev->parent;
+ dev->parent = new_parent;
+ if (old_parent)
+ klist_remove(&dev->knode_parent);
+ if (new_parent) {
+ klist_add_tail(&dev->knode_parent, &new_parent->klist_children);
+ set_dev_node(dev, dev_to_node(new_parent));
+ }
+
+ if (!dev->class)
+ goto out_put;
+ error = device_move_class_links(dev, old_parent, new_parent);
+ if (error) {
+ /* We ignore errors on cleanup since we're hosed anyway... */
+ device_move_class_links(dev, new_parent, old_parent);
+ if (!kobject_move(&dev->kobj, &old_parent->kobj)) {
+ if (new_parent)
+ klist_remove(&dev->knode_parent);
+ dev->parent = old_parent;
+ if (old_parent) {
+ klist_add_tail(&dev->knode_parent,
+ &old_parent->klist_children);
+ set_dev_node(dev, dev_to_node(old_parent));
+ }
+ }
+ cleanup_glue_dir(dev, new_parent_kobj);
+ put_device(new_parent);
+ goto out;
+ }
+out_put:
+ put_device(old_parent);
+out:
+ put_device(dev);
+ return error;
+}
+EXPORT_SYMBOL_GPL(device_move);
+
+/**
+ * device_shutdown - call ->shutdown() on each device to shutdown.
+ */
+void device_shutdown(void)
+{
+ struct device *dev, *devn;
+
+ list_for_each_entry_safe_reverse(dev, devn, &devices_kset->list,
+ kobj.entry) {
+ if (dev->bus && dev->bus->shutdown) {
+ dev_dbg(dev, "shutdown\n");
+ dev->bus->shutdown(dev);
+ } else if (dev->driver && dev->driver->shutdown) {
+ dev_dbg(dev, "shutdown\n");
+ dev->driver->shutdown(dev);
+ }
+ }
+ kobject_put(sysfs_dev_char_kobj);
+ kobject_put(sysfs_dev_block_kobj);
+ kobject_put(dev_kobj);
+}
diff --git a/libdde-linux26/lib/src/drivers/base/init.c b/libdde-linux26/lib/src/drivers/base/init.c
new file mode 100644
index 00000000..ca5ac986
--- /dev/null
+++ b/libdde-linux26/lib/src/drivers/base/init.c
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2002-3 Patrick Mochel
+ * Copyright (c) 2002-3 Open Source Development Labs
+ *
+ * This file is released under the GPLv2
+ */
+
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/memory.h>
+
+#include "base.h"
+
+/**
+ * driver_init - initialize driver model.
+ *
+ * Call the driver model init functions to initialize their
+ * subsystems. Called early from init/main.c.
+ */
+void __init driver_init(void)
+{
+ /* These are the core pieces */
+ devices_init();
+ buses_init();
+ classes_init();
+#ifndef DDE_LINUX
+ firmware_init();
+ hypervisor_init();
+#endif
+
+ /* These are also core pieces, but must come after the
+ * core core pieces.
+ */
+ platform_bus_init();
+#ifndef DDE_LINUX
+ system_bus_init();
+ cpu_dev_init();
+ memory_dev_init();
+ attribute_container_init();
+#endif
+}
diff --git a/libdde-linux26/lib/src/drivers/char/random.c b/libdde-linux26/lib/src/drivers/char/random.c
new file mode 100644
index 00000000..0430c9d0
--- /dev/null
+++ b/libdde-linux26/lib/src/drivers/char/random.c
@@ -0,0 +1,1709 @@
+/*
+ * random.c -- A strong random number generator
+ *
+ * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
+ *
+ * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
+ * rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, and the entire permission notice in its entirety,
+ * including the disclaimer of warranties.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the author may not be used to endorse or promote
+ * products derived from this software without specific prior
+ * written permission.
+ *
+ * ALTERNATIVELY, this product may be distributed under the terms of
+ * the GNU General Public License, in which case the provisions of the GPL are
+ * required INSTEAD OF the above restrictions. (This clause is
+ * necessary due to a potential bad interaction between the GPL and
+ * the restrictions contained in a BSD-style copyright.)
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
+ * WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+ * USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
+ */
+
+/*
+ * (now, with legal B.S. out of the way.....)
+ *
+ * This routine gathers environmental noise from device drivers, etc.,
+ * and returns good random numbers, suitable for cryptographic use.
+ * Besides the obvious cryptographic uses, these numbers are also good
+ * for seeding TCP sequence numbers, and other places where it is
+ * desirable to have numbers which are not only random, but hard to
+ * predict by an attacker.
+ *
+ * Theory of operation
+ * ===================
+ *
+ * Computers are very predictable devices. Hence it is extremely hard
+ * to produce truly random numbers on a computer --- as opposed to
+ * pseudo-random numbers, which can easily generated by using a
+ * algorithm. Unfortunately, it is very easy for attackers to guess
+ * the sequence of pseudo-random number generators, and for some
+ * applications this is not acceptable. So instead, we must try to
+ * gather "environmental noise" from the computer's environment, which
+ * must be hard for outside attackers to observe, and use that to
+ * generate random numbers. In a Unix environment, this is best done
+ * from inside the kernel.
+ *
+ * Sources of randomness from the environment include inter-keyboard
+ * timings, inter-interrupt timings from some interrupts, and other
+ * events which are both (a) non-deterministic and (b) hard for an
+ * outside observer to measure. Randomness from these sources are
+ * added to an "entropy pool", which is mixed using a CRC-like function.
+ * This is not cryptographically strong, but it is adequate assuming
+ * the randomness is not chosen maliciously, and it is fast enough that
+ * the overhead of doing it on every interrupt is very reasonable.
+ * As random bytes are mixed into the entropy pool, the routines keep
+ * an *estimate* of how many bits of randomness have been stored into
+ * the random number generator's internal state.
+ *
+ * When random bytes are desired, they are obtained by taking the SHA
+ * hash of the contents of the "entropy pool". The SHA hash avoids
+ * exposing the internal state of the entropy pool. It is believed to
+ * be computationally infeasible to derive any useful information
+ * about the input of SHA from its output. Even if it is possible to
+ * analyze SHA in some clever way, as long as the amount of data
+ * returned from the generator is less than the inherent entropy in
+ * the pool, the output data is totally unpredictable. For this
+ * reason, the routine decreases its internal estimate of how many
+ * bits of "true randomness" are contained in the entropy pool as it
+ * outputs random numbers.
+ *
+ * If this estimate goes to zero, the routine can still generate
+ * random numbers; however, an attacker may (at least in theory) be
+ * able to infer the future output of the generator from prior
+ * outputs. This requires successful cryptanalysis of SHA, which is
+ * not believed to be feasible, but there is a remote possibility.
+ * Nonetheless, these numbers should be useful for the vast majority
+ * of purposes.
+ *
+ * Exported interfaces ---- output
+ * ===============================
+ *
+ * There are three exported interfaces; the first is one designed to
+ * be used from within the kernel:
+ *
+ * void get_random_bytes(void *buf, int nbytes);
+ *
+ * This interface will return the requested number of random bytes,
+ * and place it in the requested buffer.
+ *
+ * The two other interfaces are two character devices /dev/random and
+ * /dev/urandom. /dev/random is suitable for use when very high
+ * quality randomness is desired (for example, for key generation or
+ * one-time pads), as it will only return a maximum of the number of
+ * bits of randomness (as estimated by the random number generator)
+ * contained in the entropy pool.
+ *
+ * The /dev/urandom device does not have this limit, and will return
+ * as many bytes as are requested. As more and more random bytes are
+ * requested without giving time for the entropy pool to recharge,
+ * this will result in random numbers that are merely cryptographically
+ * strong. For many applications, however, this is acceptable.
+ *
+ * Exported interfaces ---- input
+ * ==============================
+ *
+ * The current exported interfaces for gathering environmental noise
+ * from the devices are:
+ *
+ * void add_input_randomness(unsigned int type, unsigned int code,
+ * unsigned int value);
+ * void add_interrupt_randomness(int irq);
+ *
+ * add_input_randomness() uses the input layer interrupt timing, as well as
+ * the event type information from the hardware.
+ *
+ * add_interrupt_randomness() uses the inter-interrupt timing as random
+ * inputs to the entropy pool. Note that not all interrupts are good
+ * sources of randomness! For example, the timer interrupts is not a
+ * good choice, because the periodicity of the interrupts is too
+ * regular, and hence predictable to an attacker. Disk interrupts are
+ * a better measure, since the timing of the disk interrupts are more
+ * unpredictable.
+ *
+ * All of these routines try to estimate how many bits of randomness a
+ * particular randomness source. They do this by keeping track of the
+ * first and second order deltas of the event timings.
+ *
+ * Ensuring unpredictability at system startup
+ * ============================================
+ *
+ * When any operating system starts up, it will go through a sequence
+ * of actions that are fairly predictable by an adversary, especially
+ * if the start-up does not involve interaction with a human operator.
+ * This reduces the actual number of bits of unpredictability in the
+ * entropy pool below the value in entropy_count. In order to
+ * counteract this effect, it helps to carry information in the
+ * entropy pool across shut-downs and start-ups. To do this, put the
+ * following lines an appropriate script which is run during the boot
+ * sequence:
+ *
+ * echo "Initializing random number generator..."
+ * random_seed=/var/run/random-seed
+ * # Carry a random seed from start-up to start-up
+ * # Load and then save the whole entropy pool
+ * if [ -f $random_seed ]; then
+ * cat $random_seed >/dev/urandom
+ * else
+ * touch $random_seed
+ * fi
+ * chmod 600 $random_seed
+ * dd if=/dev/urandom of=$random_seed count=1 bs=512
+ *
+ * and the following lines in an appropriate script which is run as
+ * the system is shutdown:
+ *
+ * # Carry a random seed from shut-down to start-up
+ * # Save the whole entropy pool
+ * echo "Saving random seed..."
+ * random_seed=/var/run/random-seed
+ * touch $random_seed
+ * chmod 600 $random_seed
+ * dd if=/dev/urandom of=$random_seed count=1 bs=512
+ *
+ * For example, on most modern systems using the System V init
+ * scripts, such code fragments would be found in
+ * /etc/rc.d/init.d/random. On older Linux systems, the correct script
+ * location might be in /etc/rcb.d/rc.local or /etc/rc.d/rc.0.
+ *
+ * Effectively, these commands cause the contents of the entropy pool
+ * to be saved at shut-down time and reloaded into the entropy pool at
+ * start-up. (The 'dd' in the addition to the bootup script is to
+ * make sure that /etc/random-seed is different for every start-up,
+ * even if the system crashes without executing rc.0.) Even with
+ * complete knowledge of the start-up activities, predicting the state
+ * of the entropy pool requires knowledge of the previous history of
+ * the system.
+ *
+ * Configuring the /dev/random driver under Linux
+ * ==============================================
+ *
+ * The /dev/random driver under Linux uses minor numbers 8 and 9 of
+ * the /dev/mem major number (#1). So if your system does not have
+ * /dev/random and /dev/urandom created already, they can be created
+ * by using the commands:
+ *
+ * mknod /dev/random c 1 8
+ * mknod /dev/urandom c 1 9
+ *
+ * Acknowledgements:
+ * =================
+ *
+ * Ideas for constructing this random number generator were derived
+ * from Pretty Good Privacy's random number generator, and from private
+ * discussions with Phil Karn. Colin Plumb provided a faster random
+ * number generator, which speed up the mixing function of the entropy
+ * pool, taken from PGPfone. Dale Worley has also contributed many
+ * useful ideas and suggestions to improve this driver.
+ *
+ * Any flaws in the design are solely my responsibility, and should
+ * not be attributed to the Phil, Colin, or any of authors of PGP.
+ *
+ * Further background information on this topic may be obtained from
+ * RFC 1750, "Randomness Recommendations for Security", by Donald
+ * Eastlake, Steve Crocker, and Jeff Schiller.
+ */
+
+#ifdef DDE_LINUX
+#include <ddekit/resources.h>
+#else
+
+#include <linux/utsname.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/major.h>
+#include <linux/string.h>
+#include <linux/fcntl.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/poll.h>
+#include <linux/init.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/percpu.h>
+#include <linux/cryptohash.h>
+
+#include <asm/processor.h>
+#include <asm/uaccess.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+
+/*
+ * Configuration information
+ */
+#define INPUT_POOL_WORDS 128
+#define OUTPUT_POOL_WORDS 32
+#define SEC_XFER_SIZE 512
+
+/*
+ * The minimum number of bits of entropy before we wake up a read on
+ * /dev/random. Should be enough to do a significant reseed.
+ */
+static int random_read_wakeup_thresh = 64;
+
+/*
+ * If the entropy count falls under this number of bits, then we
+ * should wake up processes which are selecting or polling on write
+ * access to /dev/random.
+ */
+static int random_write_wakeup_thresh = 128;
+
+/*
+ * When the input pool goes over trickle_thresh, start dropping most
+ * samples to avoid wasting CPU time and reduce lock contention.
+ */
+
+static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28;
+
+static DEFINE_PER_CPU(int, trickle_count);
+
+/*
+ * A pool of size .poolwords is stirred with a primitive polynomial
+ * of degree .poolwords over GF(2). The taps for various sizes are
+ * defined below. They are chosen to be evenly spaced (minimum RMS
+ * distance from evenly spaced; the numbers in the comments are a
+ * scaled squared error sum) except for the last tap, which is 1 to
+ * get the twisting happening as fast as possible.
+ */
+static struct poolinfo {
+ int poolwords;
+ int tap1, tap2, tap3, tap4, tap5;
+} poolinfo_table[] = {
+ /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
+ { 128, 103, 76, 51, 25, 1 },
+ /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
+ { 32, 26, 20, 14, 7, 1 },
+#if 0
+ /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
+ { 2048, 1638, 1231, 819, 411, 1 },
+
+ /* x^1024 + x^817 + x^615 + x^412 + x^204 + x + 1 -- 290 */
+ { 1024, 817, 615, 412, 204, 1 },
+
+ /* x^1024 + x^819 + x^616 + x^410 + x^207 + x^2 + 1 -- 115 */
+ { 1024, 819, 616, 410, 207, 2 },
+
+ /* x^512 + x^411 + x^308 + x^208 + x^104 + x + 1 -- 225 */
+ { 512, 411, 308, 208, 104, 1 },
+
+ /* x^512 + x^409 + x^307 + x^206 + x^102 + x^2 + 1 -- 95 */
+ { 512, 409, 307, 206, 102, 2 },
+ /* x^512 + x^409 + x^309 + x^205 + x^103 + x^2 + 1 -- 95 */
+ { 512, 409, 309, 205, 103, 2 },
+
+ /* x^256 + x^205 + x^155 + x^101 + x^52 + x + 1 -- 125 */
+ { 256, 205, 155, 101, 52, 1 },
+
+ /* x^128 + x^103 + x^78 + x^51 + x^27 + x^2 + 1 -- 70 */
+ { 128, 103, 78, 51, 27, 2 },
+
+ /* x^64 + x^52 + x^39 + x^26 + x^14 + x + 1 -- 15 */
+ { 64, 52, 39, 26, 14, 1 },
+#endif
+};
+
+#define POOLBITS poolwords*32
+#define POOLBYTES poolwords*4
+
+/*
+ * For the purposes of better mixing, we use the CRC-32 polynomial as
+ * well to make a twisted Generalized Feedback Shift Reigster
+ *
+ * (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR generators. ACM
+ * Transactions on Modeling and Computer Simulation 2(3):179-194.
+ * Also see M. Matsumoto & Y. Kurita, 1994. Twisted GFSR generators
+ * II. ACM Transactions on Mdeling and Computer Simulation 4:254-266)
+ *
+ * Thanks to Colin Plumb for suggesting this.
+ *
+ * We have not analyzed the resultant polynomial to prove it primitive;
+ * in fact it almost certainly isn't. Nonetheless, the irreducible factors
+ * of a random large-degree polynomial over GF(2) are more than large enough
+ * that periodicity is not a concern.
+ *
+ * The input hash is much less sensitive than the output hash. All
+ * that we want of it is that it be a good non-cryptographic hash;
+ * i.e. it not produce collisions when fed "random" data of the sort
+ * we expect to see. As long as the pool state differs for different
+ * inputs, we have preserved the input entropy and done a good job.
+ * The fact that an intelligent attacker can construct inputs that
+ * will produce controlled alterations to the pool's state is not
+ * important because we don't consider such inputs to contribute any
+ * randomness. The only property we need with respect to them is that
+ * the attacker can't increase his/her knowledge of the pool's state.
+ * Since all additions are reversible (knowing the final state and the
+ * input, you can reconstruct the initial state), if an attacker has
+ * any uncertainty about the initial state, he/she can only shuffle
+ * that uncertainty about, but never cause any collisions (which would
+ * decrease the uncertainty).
+ *
+ * The chosen system lets the state of the pool be (essentially) the input
+ * modulo the generator polymnomial. Now, for random primitive polynomials,
+ * this is a universal class of hash functions, meaning that the chance
+ * of a collision is limited by the attacker's knowledge of the generator
+ * polynomail, so if it is chosen at random, an attacker can never force
+ * a collision. Here, we use a fixed polynomial, but we *can* assume that
+ * ###--> it is unknown to the processes generating the input entropy. <-###
+ * Because of this important property, this is a good, collision-resistant
+ * hash; hash collisions will occur no more often than chance.
+ */
+
+/*
+ * Static global variables
+ */
+static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
+static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
+static struct fasync_struct *fasync;
+
+#if 0
+static int debug;
+module_param(debug, bool, 0644);
+#define DEBUG_ENT(fmt, arg...) do { \
+ if (debug) \
+ printk(KERN_DEBUG "random %04d %04d %04d: " \
+ fmt,\
+ input_pool.entropy_count,\
+ blocking_pool.entropy_count,\
+ nonblocking_pool.entropy_count,\
+ ## arg); } while (0)
+#else
+#define DEBUG_ENT(fmt, arg...) do {} while (0)
+#endif
+
+/**********************************************************************
+ *
+ * OS independent entropy store. Here are the functions which handle
+ * storing entropy in an entropy pool.
+ *
+ **********************************************************************/
+
+struct entropy_store;
+struct entropy_store {
+ /* read-only data: */
+ struct poolinfo *poolinfo;
+ __u32 *pool;
+ const char *name;
+ int limit;
+ struct entropy_store *pull;
+
+ /* read-write data: */
+ spinlock_t lock;
+ unsigned add_ptr;
+ int entropy_count;
+ int input_rotate;
+};
+
+static __u32 input_pool_data[INPUT_POOL_WORDS];
+static __u32 blocking_pool_data[OUTPUT_POOL_WORDS];
+static __u32 nonblocking_pool_data[OUTPUT_POOL_WORDS];
+
+static struct entropy_store input_pool = {
+ .poolinfo = &poolinfo_table[0],
+ .name = "input",
+ .limit = 1,
+ .lock = __SPIN_LOCK_UNLOCKED(&input_pool.lock),
+ .pool = input_pool_data
+};
+
+static struct entropy_store blocking_pool = {
+ .poolinfo = &poolinfo_table[1],
+ .name = "blocking",
+ .limit = 1,
+ .pull = &input_pool,
+ .lock = __SPIN_LOCK_UNLOCKED(&blocking_pool.lock),
+ .pool = blocking_pool_data
+};
+
+static struct entropy_store nonblocking_pool = {
+ .poolinfo = &poolinfo_table[1],
+ .name = "nonblocking",
+ .pull = &input_pool,
+ .lock = __SPIN_LOCK_UNLOCKED(&nonblocking_pool.lock),
+ .pool = nonblocking_pool_data
+};
+
+/*
+ * This function adds bytes into the entropy "pool". It does not
+ * update the entropy estimate. The caller should call
+ * credit_entropy_bits if this is appropriate.
+ *
+ * The pool is stirred with a primitive polynomial of the appropriate
+ * degree, and then twisted. We twist by three bits at a time because
+ * it's cheap to do so and helps slightly in the expected case where
+ * the entropy is concentrated in the low-order bits.
+ */
+static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
+ int nbytes, __u8 out[64])
+{
+ static __u32 const twist_table[8] = {
+ 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
+ 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
+ unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
+ int input_rotate;
+ int wordmask = r->poolinfo->poolwords - 1;
+ const char *bytes = in;
+ __u32 w;
+ unsigned long flags;
+
+ /* Taps are constant, so we can load them without holding r->lock. */
+ tap1 = r->poolinfo->tap1;
+ tap2 = r->poolinfo->tap2;
+ tap3 = r->poolinfo->tap3;
+ tap4 = r->poolinfo->tap4;
+ tap5 = r->poolinfo->tap5;
+
+ spin_lock_irqsave(&r->lock, flags);
+ input_rotate = r->input_rotate;
+ i = r->add_ptr;
+
+ /* mix one byte at a time to simplify size handling and churn faster */
+ while (nbytes--) {
+ w = rol32(*bytes++, input_rotate & 31);
+ i = (i - 1) & wordmask;
+
+ /* XOR in the various taps */
+ w ^= r->pool[i];
+ w ^= r->pool[(i + tap1) & wordmask];
+ w ^= r->pool[(i + tap2) & wordmask];
+ w ^= r->pool[(i + tap3) & wordmask];
+ w ^= r->pool[(i + tap4) & wordmask];
+ w ^= r->pool[(i + tap5) & wordmask];
+
+ /* Mix the result back in with a twist */
+ r->pool[i] = (w >> 3) ^ twist_table[w & 7];
+
+ /*
+ * Normally, we add 7 bits of rotation to the pool.
+ * At the beginning of the pool, add an extra 7 bits
+ * rotation, so that successive passes spread the
+ * input bits across the pool evenly.
+ */
+ input_rotate += i ? 7 : 14;
+ }
+
+ r->input_rotate = input_rotate;
+ r->add_ptr = i;
+
+ if (out)
+ for (j = 0; j < 16; j++)
+ ((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
+
+ spin_unlock_irqrestore(&r->lock, flags);
+}
+
+static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
+{
+ mix_pool_bytes_extract(r, in, bytes, NULL);
+}
+
+/*
+ * Credit (or debit) the entropy store with n bits of entropy
+ */
+static void credit_entropy_bits(struct entropy_store *r, int nbits)
+{
+ unsigned long flags;
+ int entropy_count;
+
+ if (!nbits)
+ return;
+
+ spin_lock_irqsave(&r->lock, flags);
+
+ DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
+ entropy_count = r->entropy_count;
+ entropy_count += nbits;
+ if (entropy_count < 0) {
+ DEBUG_ENT("negative entropy/overflow\n");
+ entropy_count = 0;
+ } else if (entropy_count > r->poolinfo->POOLBITS)
+ entropy_count = r->poolinfo->POOLBITS;
+ r->entropy_count = entropy_count;
+
+ /* should we wake readers? */
+ if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
+ wake_up_interruptible(&random_read_wait);
+ kill_fasync(&fasync, SIGIO, POLL_IN);
+ }
+ spin_unlock_irqrestore(&r->lock, flags);
+}
+
+/*********************************************************************
+ *
+ * Entropy input management
+ *
+ *********************************************************************/
+
+/* There is one of these per entropy source */
+struct timer_rand_state {
+ cycles_t last_time;
+ long last_delta, last_delta2;
+ unsigned dont_count_entropy:1;
+};
+
+#ifndef CONFIG_SPARSE_IRQ
+
+static struct timer_rand_state *irq_timer_state[NR_IRQS];
+
+static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
+{
+ return irq_timer_state[irq];
+}
+
+static void set_timer_rand_state(unsigned int irq,
+ struct timer_rand_state *state)
+{
+ irq_timer_state[irq] = state;
+}
+
+#else
+
+static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
+{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+
+ return desc->timer_rand_state;
+}
+
+static void set_timer_rand_state(unsigned int irq,
+ struct timer_rand_state *state)
+{
+ struct irq_desc *desc;
+
+ desc = irq_to_desc(irq);
+
+ desc->timer_rand_state = state;
+}
+#endif
+
+static struct timer_rand_state input_timer_state;
+
+/*
+ * This function adds entropy to the entropy "pool" by using timing
+ * delays. It uses the timer_rand_state structure to make an estimate
+ * of how many bits of entropy this call has added to the pool.
+ *
+ * The number "num" is also added to the pool - it should somehow describe
+ * the type of event which just happened. This is currently 0-255 for
+ * keyboard scan codes, and 256 upwards for interrupts.
+ *
+ */
+static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+{
+ struct {
+ cycles_t cycles;
+ long jiffies;
+ unsigned num;
+ } sample;
+ long delta, delta2, delta3;
+
+ preempt_disable();
+ /* if over the trickle threshold, use only 1 in 4096 samples */
+ if (input_pool.entropy_count > trickle_thresh &&
+ (__get_cpu_var(trickle_count)++ & 0xfff))
+ goto out;
+
+ sample.jiffies = jiffies;
+ sample.cycles = get_cycles();
+ sample.num = num;
+ mix_pool_bytes(&input_pool, &sample, sizeof(sample));
+
+ /*
+ * Calculate number of bits of randomness we probably added.
+ * We take into account the first, second and third-order deltas
+ * in order to make our estimate.
+ */
+
+ if (!state->dont_count_entropy) {
+ delta = sample.jiffies - state->last_time;
+ state->last_time = sample.jiffies;
+
+ delta2 = delta - state->last_delta;
+ state->last_delta = delta;
+
+ delta3 = delta2 - state->last_delta2;
+ state->last_delta2 = delta2;
+
+ if (delta < 0)
+ delta = -delta;
+ if (delta2 < 0)
+ delta2 = -delta2;
+ if (delta3 < 0)
+ delta3 = -delta3;
+ if (delta > delta2)
+ delta = delta2;
+ if (delta > delta3)
+ delta = delta3;
+
+ /*
+ * delta is now minimum absolute delta.
+ * Round down by 1 bit on general principles,
+ * and limit entropy entimate to 12 bits.
+ */
+ credit_entropy_bits(&input_pool,
+ min_t(int, fls(delta>>1), 11));
+ }
+out:
+ preempt_enable();
+}
+
+void add_input_randomness(unsigned int type, unsigned int code,
+ unsigned int value)
+{
+ static unsigned char last_value;
+
+ /* ignore autorepeat and the like */
+ if (value == last_value)
+ return;
+
+ DEBUG_ENT("input event\n");
+ last_value = value;
+ add_timer_randomness(&input_timer_state,
+ (type << 4) ^ code ^ (code >> 4) ^ value);
+}
+EXPORT_SYMBOL_GPL(add_input_randomness);
+
+void add_interrupt_randomness(int irq)
+{
+ struct timer_rand_state *state;
+
+ state = get_timer_rand_state(irq);
+
+ if (state == NULL)
+ return;
+
+ DEBUG_ENT("irq event %d\n", irq);
+ add_timer_randomness(state, 0x100 + irq);
+}
+
+#ifdef CONFIG_BLOCK
+void add_disk_randomness(struct gendisk *disk)
+{
+ if (!disk || !disk->random)
+ return;
+ /* first major is 1, so we get >= 0x200 here */
+ DEBUG_ENT("disk event %d:%d\n",
+ MAJOR(disk_devt(disk)), MINOR(disk_devt(disk)));
+
+ add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
+}
+#endif
+
+#define EXTRACT_SIZE 10
+
+/*********************************************************************
+ *
+ * Entropy extraction routines
+ *
+ *********************************************************************/
+
+static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+ size_t nbytes, int min, int rsvd);
+
+/*
+ * This utility inline function is responsible for transfering entropy
+ * from the primary pool to the secondary extraction pool. We make
+ * sure we pull enough for a 'catastrophic reseed'.
+ */
+static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
+{
+ __u32 tmp[OUTPUT_POOL_WORDS];
+
+ if (r->pull && r->entropy_count < nbytes * 8 &&
+ r->entropy_count < r->poolinfo->POOLBITS) {
+ /* If we're limited, always leave two wakeup worth's BITS */
+ int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4;
+ int bytes = nbytes;
+
+ /* pull at least as many as BYTES as wakeup BITS */
+ bytes = max_t(int, bytes, random_read_wakeup_thresh / 8);
+ /* but never more than the buffer size */
+ bytes = min_t(int, bytes, sizeof(tmp));
+
+ DEBUG_ENT("going to reseed %s with %d bits "
+ "(%d of %d requested)\n",
+ r->name, bytes * 8, nbytes * 8, r->entropy_count);
+
+ bytes = extract_entropy(r->pull, tmp, bytes,
+ random_read_wakeup_thresh / 8, rsvd);
+ mix_pool_bytes(r, tmp, bytes);
+ credit_entropy_bits(r, bytes*8);
+ }
+}
+
+/*
+ * These functions extracts randomness from the "entropy pool", and
+ * returns it in a buffer.
+ *
+ * The min parameter specifies the minimum amount we can pull before
+ * failing to avoid races that defeat catastrophic reseeding while the
+ * reserved parameter indicates how much entropy we must leave in the
+ * pool after each pull to avoid starving other readers.
+ *
+ * Note: extract_entropy() assumes that .poolwords is a multiple of 16 words.
+ */
+
+static size_t account(struct entropy_store *r, size_t nbytes, int min,
+ int reserved)
+{
+ unsigned long flags;
+
+ /* Hold lock while accounting */
+ spin_lock_irqsave(&r->lock, flags);
+
+ BUG_ON(r->entropy_count > r->poolinfo->POOLBITS);
+ DEBUG_ENT("trying to extract %d bits from %s\n",
+ nbytes * 8, r->name);
+
+ /* Can we pull enough? */
+ if (r->entropy_count / 8 < min + reserved) {
+ nbytes = 0;
+ } else {
+ /* If limited, never pull more than available */
+ if (r->limit && nbytes + reserved >= r->entropy_count / 8)
+ nbytes = r->entropy_count/8 - reserved;
+
+ if (r->entropy_count / 8 >= nbytes + reserved)
+ r->entropy_count -= nbytes*8;
+ else
+ r->entropy_count = reserved;
+
+ if (r->entropy_count < random_write_wakeup_thresh) {
+ wake_up_interruptible(&random_write_wait);
+ kill_fasync(&fasync, SIGIO, POLL_OUT);
+ }
+ }
+
+ DEBUG_ENT("debiting %d entropy credits from %s%s\n",
+ nbytes * 8, r->name, r->limit ? "" : " (unlimited)");
+
+ spin_unlock_irqrestore(&r->lock, flags);
+
+ return nbytes;
+}
+
+static void extract_buf(struct entropy_store *r, __u8 *out)
+{
+ int i;
+ __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
+ __u8 extract[64];
+
+ /* Generate a hash across the pool, 16 words (512 bits) at a time */
+ sha_init(hash);
+ for (i = 0; i < r->poolinfo->poolwords; i += 16)
+ sha_transform(hash, (__u8 *)(r->pool + i), workspace);
+
+ /*
+ * We mix the hash back into the pool to prevent backtracking
+ * attacks (where the attacker knows the state of the pool
+ * plus the current outputs, and attempts to find previous
+ * ouputs), unless the hash function can be inverted. By
+ * mixing at least a SHA1 worth of hash data back, we make
+ * brute-forcing the feedback as hard as brute-forcing the
+ * hash.
+ */
+ mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
+
+ /*
+ * To avoid duplicates, we atomically extract a portion of the
+ * pool while mixing, and hash one final time.
+ */
+ sha_transform(hash, extract, workspace);
+ memset(extract, 0, sizeof(extract));
+ memset(workspace, 0, sizeof(workspace));
+
+ /*
+ * In case the hash function has some recognizable output
+ * pattern, we fold it in half. Thus, we always feed back
+ * twice as much data as we output.
+ */
+ hash[0] ^= hash[3];
+ hash[1] ^= hash[4];
+ hash[2] ^= rol32(hash[2], 16);
+ memcpy(out, hash, EXTRACT_SIZE);
+ memset(hash, 0, sizeof(hash));
+}
+
+static ssize_t extract_entropy(struct entropy_store *r, void *buf,
+ size_t nbytes, int min, int reserved)
+{
+ ssize_t ret = 0, i;
+ __u8 tmp[EXTRACT_SIZE];
+
+ xfer_secondary_pool(r, nbytes);
+ nbytes = account(r, nbytes, min, reserved);
+
+ while (nbytes) {
+ extract_buf(r, tmp);
+ i = min_t(int, nbytes, EXTRACT_SIZE);
+ memcpy(buf, tmp, i);
+ nbytes -= i;
+ buf += i;
+ ret += i;
+ }
+
+ /* Wipe data just returned from memory */
+ memset(tmp, 0, sizeof(tmp));
+
+ return ret;
+}
+
+static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
+ size_t nbytes)
+{
+ ssize_t ret = 0, i;
+ __u8 tmp[EXTRACT_SIZE];
+
+ xfer_secondary_pool(r, nbytes);
+ nbytes = account(r, nbytes, 0, 0);
+
+ while (nbytes) {
+ if (need_resched()) {
+ if (signal_pending(current)) {
+ if (ret == 0)
+ ret = -ERESTARTSYS;
+ break;
+ }
+ schedule();
+ }
+
+ extract_buf(r, tmp);
+ i = min_t(int, nbytes, EXTRACT_SIZE);
+ if (copy_to_user(buf, tmp, i)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ nbytes -= i;
+ buf += i;
+ ret += i;
+ }
+
+ /* Wipe data just returned from memory */
+ memset(tmp, 0, sizeof(tmp));
+
+ return ret;
+}
+
+#endif
+
+/*
+ * This function is the exported kernel interface. It returns some
+ * number of good random numbers, suitable for seeding TCP sequence
+ * numbers, etc.
+ */
+void get_random_bytes(void *buf, int nbytes)
+{
+#ifndef DDE_LINUX
+ extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0);
+#else
+ int i;
+ int nlwords = nbytes / sizeof (long);
+ for (i = 0; i < nlwords; i++)
+ ((long *) buf)[i] = ddekit_random ();
+ for (i = nlwords * sizeof (long); i < nbytes; i++)
+ ((char *) buf)[i] = (char) ddekit_random ();
+#endif
+}
+EXPORT_SYMBOL(get_random_bytes);
+
+#ifndef DDE_LINUX
+/*
+ * init_std_data - initialize pool with system data
+ *
+ * @r: pool to initialize
+ *
+ * This function clears the pool's entropy count and mixes some system
+ * data into the pool to prepare it for use. The pool is not cleared
+ * as that can only decrease the entropy in the pool.
+ */
+static void init_std_data(struct entropy_store *r)
+{
+ ktime_t now;
+ unsigned long flags;
+
+ spin_lock_irqsave(&r->lock, flags);
+ r->entropy_count = 0;
+ spin_unlock_irqrestore(&r->lock, flags);
+
+ now = ktime_get_real();
+ mix_pool_bytes(r, &now, sizeof(now));
+ mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
+}
+
+static int rand_initialize(void)
+{
+ init_std_data(&input_pool);
+ init_std_data(&blocking_pool);
+ init_std_data(&nonblocking_pool);
+ return 0;
+}
+module_init(rand_initialize);
+
+void rand_initialize_irq(int irq)
+{
+ struct timer_rand_state *state;
+
+ state = get_timer_rand_state(irq);
+
+ if (state)
+ return;
+
+ /*
+ * If kzalloc returns null, we just won't use that entropy
+ * source.
+ */
+ state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
+ if (state)
+ set_timer_rand_state(irq, state);
+}
+
+#ifdef CONFIG_BLOCK
+void rand_initialize_disk(struct gendisk *disk)
+{
+ struct timer_rand_state *state;
+
+ /*
+ * If kzalloc returns null, we just won't use that entropy
+ * source.
+ */
+ state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
+ if (state)
+ disk->random = state;
+}
+#endif
+
+static ssize_t
+random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+{
+ ssize_t n, retval = 0, count = 0;
+
+ if (nbytes == 0)
+ return 0;
+
+ while (nbytes > 0) {
+ n = nbytes;
+ if (n > SEC_XFER_SIZE)
+ n = SEC_XFER_SIZE;
+
+ DEBUG_ENT("reading %d bits\n", n*8);
+
+ n = extract_entropy_user(&blocking_pool, buf, n);
+
+ DEBUG_ENT("read got %d bits (%d still needed)\n",
+ n*8, (nbytes-n)*8);
+
+ if (n == 0) {
+ if (file->f_flags & O_NONBLOCK) {
+ retval = -EAGAIN;
+ break;
+ }
+
+ DEBUG_ENT("sleeping?\n");
+
+ wait_event_interruptible(random_read_wait,
+ input_pool.entropy_count >=
+ random_read_wakeup_thresh);
+
+ DEBUG_ENT("awake\n");
+
+ if (signal_pending(current)) {
+ retval = -ERESTARTSYS;
+ break;
+ }
+
+ continue;
+ }
+
+ if (n < 0) {
+ retval = n;
+ break;
+ }
+ count += n;
+ buf += n;
+ nbytes -= n;
+ break; /* This break makes the device work */
+ /* like a named pipe */
+ }
+
+ /*
+ * If we gave the user some bytes, update the access time.
+ */
+ if (count)
+ file_accessed(file);
+
+ return (count ? count : retval);
+}
+
+static ssize_t
+urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
+{
+ return extract_entropy_user(&nonblocking_pool, buf, nbytes);
+}
+
+static unsigned int
+random_poll(struct file *file, poll_table * wait)
+{
+ unsigned int mask;
+
+ poll_wait(file, &random_read_wait, wait);
+ poll_wait(file, &random_write_wait, wait);
+ mask = 0;
+ if (input_pool.entropy_count >= random_read_wakeup_thresh)
+ mask |= POLLIN | POLLRDNORM;
+ if (input_pool.entropy_count < random_write_wakeup_thresh)
+ mask |= POLLOUT | POLLWRNORM;
+ return mask;
+}
+
+static int
+write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
+{
+ size_t bytes;
+ __u32 buf[16];
+ const char __user *p = buffer;
+
+ while (count > 0) {
+ bytes = min(count, sizeof(buf));
+ if (copy_from_user(&buf, p, bytes))
+ return -EFAULT;
+
+ count -= bytes;
+ p += bytes;
+
+ mix_pool_bytes(r, buf, bytes);
+ cond_resched();
+ }
+
+ return 0;
+}
+
+static ssize_t random_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ size_t ret;
+ struct inode *inode = file->f_path.dentry->d_inode;
+
+ ret = write_pool(&blocking_pool, buffer, count);
+ if (ret)
+ return ret;
+ ret = write_pool(&nonblocking_pool, buffer, count);
+ if (ret)
+ return ret;
+
+ inode->i_mtime = current_fs_time(inode->i_sb);
+ mark_inode_dirty(inode);
+ return (ssize_t)count;
+}
+
+static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ int size, ent_count;
+ int __user *p = (int __user *)arg;
+ int retval;
+
+ switch (cmd) {
+ case RNDGETENTCNT:
+ /* inherently racy, no point locking */
+ if (put_user(input_pool.entropy_count, p))
+ return -EFAULT;
+ return 0;
+ case RNDADDTOENTCNT:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (get_user(ent_count, p))
+ return -EFAULT;
+ credit_entropy_bits(&input_pool, ent_count);
+ return 0;
+ case RNDADDENTROPY:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (get_user(ent_count, p++))
+ return -EFAULT;
+ if (ent_count < 0)
+ return -EINVAL;
+ if (get_user(size, p++))
+ return -EFAULT;
+ retval = write_pool(&input_pool, (const char __user *)p,
+ size);
+ if (retval < 0)
+ return retval;
+ credit_entropy_bits(&input_pool, ent_count);
+ return 0;
+ case RNDZAPENTCNT:
+ case RNDCLEARPOOL:
+ /* Clear the entropy pool counters. */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ rand_initialize();
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int random_fasync(int fd, struct file *filp, int on)
+{
+ return fasync_helper(fd, filp, on, &fasync);
+}
+
+const struct file_operations random_fops = {
+ .read = random_read,
+ .write = random_write,
+ .poll = random_poll,
+ .unlocked_ioctl = random_ioctl,
+ .fasync = random_fasync,
+};
+
+const struct file_operations urandom_fops = {
+ .read = urandom_read,
+ .write = random_write,
+ .unlocked_ioctl = random_ioctl,
+ .fasync = random_fasync,
+};
+
+/***************************************************************
+ * Random UUID interface
+ *
+ * Used here for a Boot ID, but can be useful for other kernel
+ * drivers.
+ ***************************************************************/
+
+/*
+ * Generate random UUID
+ */
+void generate_random_uuid(unsigned char uuid_out[16])
+{
+ get_random_bytes(uuid_out, 16);
+ /* Set UUID version to 4 --- truely random generation */
+ uuid_out[6] = (uuid_out[6] & 0x0F) | 0x40;
+ /* Set the UUID variant to DCE */
+ uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80;
+}
+EXPORT_SYMBOL(generate_random_uuid);
+
+/********************************************************************
+ *
+ * Sysctl interface
+ *
+ ********************************************************************/
+
+#ifdef CONFIG_SYSCTL
+
+#include <linux/sysctl.h>
+
+static int min_read_thresh = 8, min_write_thresh;
+static int max_read_thresh = INPUT_POOL_WORDS * 32;
+static int max_write_thresh = INPUT_POOL_WORDS * 32;
+static char sysctl_bootid[16];
+
+/*
+ * These functions is used to return both the bootid UUID, and random
+ * UUID. The difference is in whether table->data is NULL; if it is,
+ * then a new UUID is generated and returned to the user.
+ *
+ * If the user accesses this via the proc interface, it will be returned
+ * as an ASCII string in the standard UUID format. If accesses via the
+ * sysctl system call, it is returned as 16 bytes of binary data.
+ */
+static int proc_do_uuid(ctl_table *table, int write, struct file *filp,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ ctl_table fake_table;
+ unsigned char buf[64], tmp_uuid[16], *uuid;
+
+ uuid = table->data;
+ if (!uuid) {
+ uuid = tmp_uuid;
+ uuid[8] = 0;
+ }
+ if (uuid[8] == 0)
+ generate_random_uuid(uuid);
+
+ sprintf(buf, "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-"
+ "%02x%02x%02x%02x%02x%02x",
+ uuid[0], uuid[1], uuid[2], uuid[3],
+ uuid[4], uuid[5], uuid[6], uuid[7],
+ uuid[8], uuid[9], uuid[10], uuid[11],
+ uuid[12], uuid[13], uuid[14], uuid[15]);
+ fake_table.data = buf;
+ fake_table.maxlen = sizeof(buf);
+
+ return proc_dostring(&fake_table, write, filp, buffer, lenp, ppos);
+}
+
+static int uuid_strategy(ctl_table *table,
+ void __user *oldval, size_t __user *oldlenp,
+ void __user *newval, size_t newlen)
+{
+ unsigned char tmp_uuid[16], *uuid;
+ unsigned int len;
+
+ if (!oldval || !oldlenp)
+ return 1;
+
+ uuid = table->data;
+ if (!uuid) {
+ uuid = tmp_uuid;
+ uuid[8] = 0;
+ }
+ if (uuid[8] == 0)
+ generate_random_uuid(uuid);
+
+ if (get_user(len, oldlenp))
+ return -EFAULT;
+ if (len) {
+ if (len > 16)
+ len = 16;
+ if (copy_to_user(oldval, uuid, len) ||
+ put_user(len, oldlenp))
+ return -EFAULT;
+ }
+ return 1;
+}
+
+static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
+ctl_table random_table[] = {
+ {
+ .ctl_name = RANDOM_POOLSIZE,
+ .procname = "poolsize",
+ .data = &sysctl_poolsize,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec,
+ },
+ {
+ .ctl_name = RANDOM_ENTROPY_COUNT,
+ .procname = "entropy_avail",
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec,
+ .data = &input_pool.entropy_count,
+ },
+ {
+ .ctl_name = RANDOM_READ_THRESH,
+ .procname = "read_wakeup_threshold",
+ .data = &random_read_wakeup_thresh,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &min_read_thresh,
+ .extra2 = &max_read_thresh,
+ },
+ {
+ .ctl_name = RANDOM_WRITE_THRESH,
+ .procname = "write_wakeup_threshold",
+ .data = &random_write_wakeup_thresh,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &min_write_thresh,
+ .extra2 = &max_write_thresh,
+ },
+ {
+ .ctl_name = RANDOM_BOOT_ID,
+ .procname = "boot_id",
+ .data = &sysctl_bootid,
+ .maxlen = 16,
+ .mode = 0444,
+ .proc_handler = &proc_do_uuid,
+ .strategy = &uuid_strategy,
+ },
+ {
+ .ctl_name = RANDOM_UUID,
+ .procname = "uuid",
+ .maxlen = 16,
+ .mode = 0444,
+ .proc_handler = &proc_do_uuid,
+ .strategy = &uuid_strategy,
+ },
+ { .ctl_name = 0 }
+};
+#endif /* CONFIG_SYSCTL */
+
+/********************************************************************
+ *
+ * Random funtions for networking
+ *
+ ********************************************************************/
+
+/*
+ * TCP initial sequence number picking. This uses the random number
+ * generator to pick an initial secret value. This value is hashed
+ * along with the TCP endpoint information to provide a unique
+ * starting point for each pair of TCP endpoints. This defeats
+ * attacks which rely on guessing the initial TCP sequence number.
+ * This algorithm was suggested by Steve Bellovin.
+ *
+ * Using a very strong hash was taking an appreciable amount of the total
+ * TCP connection establishment time, so this is a weaker hash,
+ * compensated for by changing the secret periodically.
+ */
+
+/* F, G and H are basic MD4 functions: selection, majority, parity */
+#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
+#define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z)))
+#define H(x, y, z) ((x) ^ (y) ^ (z))
+
+/*
+ * The generic round function. The application is so specific that
+ * we don't bother protecting all the arguments with parens, as is generally
+ * good macro practice, in favor of extra legibility.
+ * Rotation is separate from addition to prevent recomputation
+ */
+#define ROUND(f, a, b, c, d, x, s) \
+ (a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s)))
+#define K1 0
+#define K2 013240474631UL
+#define K3 015666365641UL
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+
+static __u32 twothirdsMD4Transform(__u32 const buf[4], __u32 const in[12])
+{
+ __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
+
+ /* Round 1 */
+ ROUND(F, a, b, c, d, in[ 0] + K1, 3);
+ ROUND(F, d, a, b, c, in[ 1] + K1, 7);
+ ROUND(F, c, d, a, b, in[ 2] + K1, 11);
+ ROUND(F, b, c, d, a, in[ 3] + K1, 19);
+ ROUND(F, a, b, c, d, in[ 4] + K1, 3);
+ ROUND(F, d, a, b, c, in[ 5] + K1, 7);
+ ROUND(F, c, d, a, b, in[ 6] + K1, 11);
+ ROUND(F, b, c, d, a, in[ 7] + K1, 19);
+ ROUND(F, a, b, c, d, in[ 8] + K1, 3);
+ ROUND(F, d, a, b, c, in[ 9] + K1, 7);
+ ROUND(F, c, d, a, b, in[10] + K1, 11);
+ ROUND(F, b, c, d, a, in[11] + K1, 19);
+
+ /* Round 2 */
+ ROUND(G, a, b, c, d, in[ 1] + K2, 3);
+ ROUND(G, d, a, b, c, in[ 3] + K2, 5);
+ ROUND(G, c, d, a, b, in[ 5] + K2, 9);
+ ROUND(G, b, c, d, a, in[ 7] + K2, 13);
+ ROUND(G, a, b, c, d, in[ 9] + K2, 3);
+ ROUND(G, d, a, b, c, in[11] + K2, 5);
+ ROUND(G, c, d, a, b, in[ 0] + K2, 9);
+ ROUND(G, b, c, d, a, in[ 2] + K2, 13);
+ ROUND(G, a, b, c, d, in[ 4] + K2, 3);
+ ROUND(G, d, a, b, c, in[ 6] + K2, 5);
+ ROUND(G, c, d, a, b, in[ 8] + K2, 9);
+ ROUND(G, b, c, d, a, in[10] + K2, 13);
+
+ /* Round 3 */
+ ROUND(H, a, b, c, d, in[ 3] + K3, 3);
+ ROUND(H, d, a, b, c, in[ 7] + K3, 9);
+ ROUND(H, c, d, a, b, in[11] + K3, 11);
+ ROUND(H, b, c, d, a, in[ 2] + K3, 15);
+ ROUND(H, a, b, c, d, in[ 6] + K3, 3);
+ ROUND(H, d, a, b, c, in[10] + K3, 9);
+ ROUND(H, c, d, a, b, in[ 1] + K3, 11);
+ ROUND(H, b, c, d, a, in[ 5] + K3, 15);
+ ROUND(H, a, b, c, d, in[ 9] + K3, 3);
+ ROUND(H, d, a, b, c, in[ 0] + K3, 9);
+ ROUND(H, c, d, a, b, in[ 4] + K3, 11);
+ ROUND(H, b, c, d, a, in[ 8] + K3, 15);
+
+ return buf[1] + b; /* "most hashed" word */
+ /* Alternative: return sum of all words? */
+}
+#endif
+
+#undef ROUND
+#undef F
+#undef G
+#undef H
+#undef K1
+#undef K2
+#undef K3
+
+/* This should not be decreased so low that ISNs wrap too fast. */
+#define REKEY_INTERVAL (300 * HZ)
+/*
+ * Bit layout of the tcp sequence numbers (before adding current time):
+ * bit 24-31: increased after every key exchange
+ * bit 0-23: hash(source,dest)
+ *
+ * The implementation is similar to the algorithm described
+ * in the Appendix of RFC 1185, except that
+ * - it uses a 1 MHz clock instead of a 250 kHz clock
+ * - it performs a rekey every 5 minutes, which is equivalent
+ * to a (source,dest) tulple dependent forward jump of the
+ * clock by 0..2^(HASH_BITS+1)
+ *
+ * Thus the average ISN wraparound time is 68 minutes instead of
+ * 4.55 hours.
+ *
+ * SMP cleanup and lock avoidance with poor man's RCU.
+ * Manfred Spraul <manfred@colorfullife.com>
+ *
+ */
+#define COUNT_BITS 8
+#define COUNT_MASK ((1 << COUNT_BITS) - 1)
+#define HASH_BITS 24
+#define HASH_MASK ((1 << HASH_BITS) - 1)
+
+static struct keydata {
+ __u32 count; /* already shifted to the final position */
+ __u32 secret[12];
+} ____cacheline_aligned ip_keydata[2];
+
+static unsigned int ip_cnt;
+
+static void rekey_seq_generator(struct work_struct *work);
+
+static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator);
+
+/*
+ * Lock avoidance:
+ * The ISN generation runs lockless - it's just a hash over random data.
+ * State changes happen every 5 minutes when the random key is replaced.
+ * Synchronization is performed by having two copies of the hash function
+ * state and rekey_seq_generator always updates the inactive copy.
+ * The copy is then activated by updating ip_cnt.
+ * The implementation breaks down if someone blocks the thread
+ * that processes SYN requests for more than 5 minutes. Should never
+ * happen, and even if that happens only a not perfectly compliant
+ * ISN is generated, nothing fatal.
+ */
+static void rekey_seq_generator(struct work_struct *work)
+{
+ struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)];
+
+ get_random_bytes(keyptr->secret, sizeof(keyptr->secret));
+ keyptr->count = (ip_cnt & COUNT_MASK) << HASH_BITS;
+ smp_wmb();
+ ip_cnt++;
+ schedule_delayed_work(&rekey_work, REKEY_INTERVAL);
+}
+
+static inline struct keydata *get_keyptr(void)
+{
+ struct keydata *keyptr = &ip_keydata[ip_cnt & 1];
+
+ smp_rmb();
+
+ return keyptr;
+}
+
+static __init int seqgen_init(void)
+{
+ rekey_seq_generator(NULL);
+ return 0;
+}
+late_initcall(seqgen_init);
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
+ __be16 sport, __be16 dport)
+{
+ __u32 seq;
+ __u32 hash[12];
+ struct keydata *keyptr = get_keyptr();
+
+ /* The procedure is the same as for IPv4, but addresses are longer.
+ * Thus we must use twothirdsMD4Transform.
+ */
+
+ memcpy(hash, saddr, 16);
+ hash[4] = ((__force u16)sport << 16) + (__force u16)dport;
+ memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
+
+ seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
+ seq += keyptr->count;
+
+ seq += ktime_to_ns(ktime_get_real());
+
+ return seq;
+}
+EXPORT_SYMBOL(secure_tcpv6_sequence_number);
+#endif
+
+/* The code below is shamelessly stolen from secure_tcp_sequence_number().
+ * All blames to Andrey V. Savochkin <saw@msu.ru>.
+ */
+__u32 secure_ip_id(__be32 daddr)
+{
+ struct keydata *keyptr;
+ __u32 hash[4];
+
+ keyptr = get_keyptr();
+
+ /*
+ * Pick a unique starting offset for each IP destination.
+ * The dest ip address is placed in the starting vector,
+ * which is then hashed with random data.
+ */
+ hash[0] = (__force __u32)daddr;
+ hash[1] = keyptr->secret[9];
+ hash[2] = keyptr->secret[10];
+ hash[3] = keyptr->secret[11];
+
+ return half_md4_transform(hash, keyptr->secret);
+}
+
+#ifdef CONFIG_INET
+
+__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport)
+{
+ __u32 seq;
+ __u32 hash[4];
+ struct keydata *keyptr = get_keyptr();
+
+ /*
+ * Pick a unique starting offset for each TCP connection endpoints
+ * (saddr, daddr, sport, dport).
+ * Note that the words are placed into the starting vector, which is
+ * then mixed with a partial MD4 over random data.
+ */
+ hash[0] = (__force u32)saddr;
+ hash[1] = (__force u32)daddr;
+ hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
+ hash[3] = keyptr->secret[11];
+
+ seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK;
+ seq += keyptr->count;
+ /*
+ * As close as possible to RFC 793, which
+ * suggests using a 250 kHz clock.
+ * Further reading shows this assumes 2 Mb/s networks.
+ * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
+ * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but
+ * we also need to limit the resolution so that the u32 seq
+ * overlaps less than one time per MSL (2 minutes).
+ * Choosing a clock of 64 ns period is OK. (period of 274 s)
+ */
+ seq += ktime_to_ns(ktime_get_real()) >> 6;
+
+ return seq;
+}
+
+/* Generate secure starting point for ephemeral IPV4 transport port search */
+u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
+{
+ struct keydata *keyptr = get_keyptr();
+ u32 hash[4];
+
+ /*
+ * Pick a unique starting offset for each ephemeral port search
+ * (saddr, daddr, dport) and 48bits of random data.
+ */
+ hash[0] = (__force u32)saddr;
+ hash[1] = (__force u32)daddr;
+ hash[2] = (__force u32)dport ^ keyptr->secret[10];
+ hash[3] = keyptr->secret[11];
+
+ return half_md4_transform(hash, keyptr->secret);
+}
+EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
+
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
+ __be16 dport)
+{
+ struct keydata *keyptr = get_keyptr();
+ u32 hash[12];
+
+ memcpy(hash, saddr, 16);
+ hash[4] = (__force u32)dport;
+ memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
+
+ return twothirdsMD4Transform((const __u32 *)daddr, hash);
+}
+#endif
+
+#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
+/* Similar to secure_tcp_sequence_number but generate a 48 bit value
+ * bit's 32-47 increase every key exchange
+ * 0-31 hash(source, dest)
+ */
+u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport)
+{
+ u64 seq;
+ __u32 hash[4];
+ struct keydata *keyptr = get_keyptr();
+
+ hash[0] = (__force u32)saddr;
+ hash[1] = (__force u32)daddr;
+ hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
+ hash[3] = keyptr->secret[11];
+
+ seq = half_md4_transform(hash, keyptr->secret);
+ seq |= ((u64)keyptr->count) << (32 - HASH_BITS);
+
+ seq += ktime_to_ns(ktime_get_real());
+ seq &= (1ull << 48) - 1;
+
+ return seq;
+}
+EXPORT_SYMBOL(secure_dccp_sequence_number);
+#endif
+
+#endif /* CONFIG_INET */
+
+
+/*
+ * Get a random word for internal kernel use only. Similar to urandom but
+ * with the goal of minimal entropy pool depletion. As a result, the random
+ * value is not cryptographically secure but for several uses the cost of
+ * depleting entropy is too high
+ */
+unsigned int get_random_int(void)
+{
+ /*
+ * Use IP's RNG. It suits our purpose perfectly: it re-keys itself
+ * every second, from the entropy pool (and thus creates a limited
+ * drain on it), and uses halfMD4Transform within the second. We
+ * also mix it with jiffies and the PID:
+ */
+ return secure_ip_id((__force __be32)(current->pid + jiffies));
+}
+
+/*
+ * randomize_range() returns a start address such that
+ *
+ * [...... <range> .....]
+ * start end
+ *
+ * a <range> with size "len" starting at the return value is inside in the
+ * area defined by [start, end], but is otherwise randomized.
+ */
+unsigned long
+randomize_range(unsigned long start, unsigned long end, unsigned long len)
+{
+ unsigned long range = end - len - start;
+
+ if (end <= start + len)
+ return 0;
+ return PAGE_ALIGN(get_random_int() % range + start);
+}
+
+#endif
diff --git a/libdde-linux26/lib/src/drivers/pci/pci-driver.c b/libdde-linux26/lib/src/drivers/pci/pci-driver.c
new file mode 100644
index 00000000..199ec8a7
--- /dev/null
+++ b/libdde-linux26/lib/src/drivers/pci/pci-driver.c
@@ -0,0 +1,1008 @@
+/*
+ * drivers/pci/pci-driver.c
+ *
+ * (C) Copyright 2002-2004, 2007 Greg Kroah-Hartman <greg@kroah.com>
+ * (C) Copyright 2007 Novell Inc.
+ *
+ * Released under the GPL v2 only.
+ *
+ */
+
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/mempolicy.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/cpu.h>
+#include "pci.h"
+
+#ifdef DDE_LINUX
+#include "local.h"
+#endif /* DDE_LINUX */
+
+/*
+ * Dynamic device IDs are disabled for !CONFIG_HOTPLUG
+ */
+
+struct pci_dynid {
+ struct list_head node;
+ struct pci_device_id id;
+};
+
+#ifdef CONFIG_HOTPLUG
+
+/**
+ * store_new_id - add a new PCI device ID to this driver and re-probe devices
+ * @driver: target device driver
+ * @buf: buffer for scanning device ID data
+ * @count: input size
+ *
+ * Adds a new dynamic pci device ID to this driver,
+ * and causes the driver to probe for all devices again.
+ */
+static ssize_t
+store_new_id(struct device_driver *driver, const char *buf, size_t count)
+{
+ struct pci_dynid *dynid;
+ struct pci_driver *pdrv = to_pci_driver(driver);
+ const struct pci_device_id *ids = pdrv->id_table;
+ __u32 vendor, device, subvendor=PCI_ANY_ID,
+ subdevice=PCI_ANY_ID, class=0, class_mask=0;
+ unsigned long driver_data=0;
+ int fields=0;
+ int retval=0;
+
+ fields = sscanf(buf, "%x %x %x %x %x %x %lx",
+ &vendor, &device, &subvendor, &subdevice,
+ &class, &class_mask, &driver_data);
+ if (fields < 2)
+ return -EINVAL;
+
+ /* Only accept driver_data values that match an existing id_table
+ entry */
+ if (ids) {
+ retval = -EINVAL;
+ while (ids->vendor || ids->subvendor || ids->class_mask) {
+ if (driver_data == ids->driver_data) {
+ retval = 0;
+ break;
+ }
+ ids++;
+ }
+ if (retval) /* No match */
+ return retval;
+ }
+
+ dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
+ if (!dynid)
+ return -ENOMEM;
+
+ dynid->id.vendor = vendor;
+ dynid->id.device = device;
+ dynid->id.subvendor = subvendor;
+ dynid->id.subdevice = subdevice;
+ dynid->id.class = class;
+ dynid->id.class_mask = class_mask;
+ dynid->id.driver_data = driver_data;
+
+ spin_lock(&pdrv->dynids.lock);
+ list_add_tail(&dynid->node, &pdrv->dynids.list);
+ spin_unlock(&pdrv->dynids.lock);
+
+ if (get_driver(&pdrv->driver)) {
+ retval = driver_attach(&pdrv->driver);
+ put_driver(&pdrv->driver);
+ }
+
+ if (retval)
+ return retval;
+ return count;
+}
+static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);
+
+static void
+pci_free_dynids(struct pci_driver *drv)
+{
+ struct pci_dynid *dynid, *n;
+
+ spin_lock(&drv->dynids.lock);
+ list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
+ list_del(&dynid->node);
+ kfree(dynid);
+ }
+ spin_unlock(&drv->dynids.lock);
+}
+
+static int
+pci_create_newid_file(struct pci_driver *drv)
+{
+ int error = 0;
+ if (drv->probe != NULL)
+ error = driver_create_file(&drv->driver, &driver_attr_new_id);
+ return error;
+}
+
+static void pci_remove_newid_file(struct pci_driver *drv)
+{
+ driver_remove_file(&drv->driver, &driver_attr_new_id);
+}
+#else /* !CONFIG_HOTPLUG */
+static inline void pci_free_dynids(struct pci_driver *drv) {}
+static inline int pci_create_newid_file(struct pci_driver *drv)
+{
+ return 0;
+}
+static inline void pci_remove_newid_file(struct pci_driver *drv) {}
+#endif
+
+/**
+ * pci_match_id - See if a pci device matches a given pci_id table
+ * @ids: array of PCI device id structures to search in
+ * @dev: the PCI device structure to match against.
+ *
+ * Used by a driver to check whether a PCI device present in the
+ * system is in its list of supported devices. Returns the matching
+ * pci_device_id structure or %NULL if there is no match.
+ *
+ * Deprecated, don't use this as it will not catch any dynamic ids
+ * that a driver might want to check for.
+ */
+const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
+ struct pci_dev *dev)
+{
+ if (ids) {
+ while (ids->vendor || ids->subvendor || ids->class_mask) {
+ if (pci_match_one_device(ids, dev))
+ return ids;
+ ids++;
+ }
+ }
+ return NULL;
+}
+
+/**
+ * pci_match_device - Tell if a PCI device structure has a matching PCI device id structure
+ * @drv: the PCI driver to match against
+ * @dev: the PCI device structure to match against
+ *
+ * Used by a driver to check whether a PCI device present in the
+ * system is in its list of supported devices. Returns the matching
+ * pci_device_id structure or %NULL if there is no match.
+ */
+static const struct pci_device_id *pci_match_device(struct pci_driver *drv,
+ struct pci_dev *dev)
+{
+ struct pci_dynid *dynid;
+
+ /* Look at the dynamic ids first, before the static ones */
+ spin_lock(&drv->dynids.lock);
+ list_for_each_entry(dynid, &drv->dynids.list, node) {
+ if (pci_match_one_device(&dynid->id, dev)) {
+ spin_unlock(&drv->dynids.lock);
+ return &dynid->id;
+ }
+ }
+ spin_unlock(&drv->dynids.lock);
+
+ return pci_match_id(drv->id_table, dev);
+}
+
+struct drv_dev_and_id {
+ struct pci_driver *drv;
+ struct pci_dev *dev;
+ const struct pci_device_id *id;
+};
+
+static long local_pci_probe(void *_ddi)
+{
+ struct drv_dev_and_id *ddi = _ddi;
+
+ return ddi->drv->probe(ddi->dev, ddi->id);
+}
+
+static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ int error, node;
+ struct drv_dev_and_id ddi = { drv, dev, id };
+
+ /* Execute driver initialization on node where the device's
+ bus is attached to. This way the driver likely allocates
+ its local memory on the right node without any need to
+ change it. */
+ node = dev_to_node(&dev->dev);
+ if (node >= 0) {
+ int cpu;
+ node_to_cpumask_ptr(nodecpumask, node);
+
+ get_online_cpus();
+ cpu = cpumask_any_and(nodecpumask, cpu_online_mask);
+ if (cpu < nr_cpu_ids)
+ error = work_on_cpu(cpu, local_pci_probe, &ddi);
+ else
+ error = local_pci_probe(&ddi);
+ put_online_cpus();
+ } else
+ error = local_pci_probe(&ddi);
+ return error;
+}
+
+/**
+ * __pci_device_probe()
+ * @drv: driver to call to check if it wants the PCI device
+ * @pci_dev: PCI device being probed
+ *
+ * returns 0 on success, else error.
+ * side-effect: pci_dev->driver is set to drv when drv claims pci_dev.
+ */
+static int
+__pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
+{
+ const struct pci_device_id *id;
+ int error = 0;
+
+ if (!pci_dev->driver && drv->probe) {
+ error = -ENODEV;
+
+ id = pci_match_device(drv, pci_dev);
+ if (id)
+ error = pci_call_probe(drv, pci_dev, id);
+ if (error >= 0) {
+ pci_dev->driver = drv;
+ error = 0;
+ }
+ }
+ return error;
+}
+
+static int pci_device_probe(struct device * dev)
+{
+ int error = 0;
+ struct pci_driver *drv;
+ struct pci_dev *pci_dev;
+
+ drv = to_pci_driver(dev->driver);
+ pci_dev = to_pci_dev(dev);
+ pci_dev_get(pci_dev);
+ error = __pci_device_probe(drv, pci_dev);
+ if (error)
+ pci_dev_put(pci_dev);
+
+ return error;
+}
+
+static int pci_device_remove(struct device * dev)
+{
+ struct pci_dev * pci_dev = to_pci_dev(dev);
+ struct pci_driver * drv = pci_dev->driver;
+
+ if (drv) {
+ if (drv->remove)
+ drv->remove(pci_dev);
+ pci_dev->driver = NULL;
+ }
+
+ /*
+ * If the device is still on, set the power state as "unknown",
+ * since it might change by the next time we load the driver.
+ */
+ if (pci_dev->current_state == PCI_D0)
+ pci_dev->current_state = PCI_UNKNOWN;
+
+ /*
+ * We would love to complain here if pci_dev->is_enabled is set, that
+ * the driver should have called pci_disable_device(), but the
+ * unfortunate fact is there are too many odd BIOS and bridge setups
+ * that don't like drivers doing that all of the time.
+ * Oh well, we can dream of sane hardware when we sleep, no matter how
+ * horrible the crap we have to deal with is when we are awake...
+ */
+
+ pci_dev_put(pci_dev);
+ return 0;
+}
+
+static void pci_device_shutdown(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct pci_driver *drv = pci_dev->driver;
+
+ if (drv && drv->shutdown)
+ drv->shutdown(pci_dev);
+ pci_msi_shutdown(pci_dev);
+ pci_msix_shutdown(pci_dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+/*
+ * Default "suspend" method for devices that have no driver provided suspend,
+ * or not even a driver at all (second part).
+ */
+static void pci_pm_set_unknown_state(struct pci_dev *pci_dev)
+{
+ /*
+ * mark its power state as "unknown", since we don't know if
+ * e.g. the BIOS will change its device state when we suspend.
+ */
+ if (pci_dev->current_state == PCI_D0)
+ pci_dev->current_state = PCI_UNKNOWN;
+}
+
+/*
+ * Default "resume" method for devices that have no driver provided resume,
+ * or not even a driver at all (second part).
+ */
+static int pci_pm_reenable_device(struct pci_dev *pci_dev)
+{
+ int retval;
+
+ /* if the device was enabled before suspend, reenable */
+ retval = pci_reenable_device(pci_dev);
+ /*
+ * if the device was busmaster before the suspend, make it busmaster
+ * again
+ */
+ if (pci_dev->is_busmaster)
+ pci_set_master(pci_dev);
+
+ return retval;
+}
+
+static int pci_legacy_suspend(struct device *dev, pm_message_t state)
+{
+#ifndef DDE_LINUX
+ struct pci_dev * pci_dev = to_pci_dev(dev);
+ struct pci_driver * drv = pci_dev->driver;
+ int i = 0;
+
+ if (drv && drv->suspend) {
+ pci_power_t prev = pci_dev->current_state;
+
+ pci_dev->state_saved = false;
+
+ i = drv->suspend(pci_dev, state);
+ suspend_report_result(drv->suspend, i);
+ if (i)
+ return i;
+
+ if (pci_dev->state_saved)
+ goto Fixup;
+
+ if (pci_dev->current_state != PCI_D0
+ && pci_dev->current_state != PCI_UNKNOWN) {
+ WARN_ONCE(pci_dev->current_state != prev,
+ "PCI PM: Device state not saved by %pF\n",
+ drv->suspend);
+ goto Fixup;
+ }
+ }
+
+ pci_save_state(pci_dev);
+ /*
+ * This is for compatibility with existing code with legacy PM support.
+ */
+ pci_pm_set_unknown_state(pci_dev);
+
+ Fixup:
+ pci_fixup_device(pci_fixup_suspend, pci_dev);
+
+ return i;
+#else
+ WARN_UNIMPL;
+ return 0;
+#endif /* DDE_LINUX */
+}
+
+static int pci_legacy_suspend_late(struct device *dev, pm_message_t state)
+{
+#ifndef DDE_LINUX
+ struct pci_dev * pci_dev = to_pci_dev(dev);
+ struct pci_driver * drv = pci_dev->driver;
+ int i = 0;
+
+ if (drv && drv->suspend_late) {
+ i = drv->suspend_late(pci_dev, state);
+ suspend_report_result(drv->suspend_late, i);
+ }
+ return i;
+#else
+ WARN_UNIMPL;
+ return 0;
+#endif
+}
+
+static int pci_legacy_resume_early(struct device *dev)
+{
+ struct pci_dev * pci_dev = to_pci_dev(dev);
+ struct pci_driver * drv = pci_dev->driver;
+
+ return drv && drv->resume_early ?
+ drv->resume_early(pci_dev) : 0;
+}
+
+static int pci_legacy_resume(struct device *dev)
+{
+ struct pci_dev * pci_dev = to_pci_dev(dev);
+ struct pci_driver * drv = pci_dev->driver;
+
+ pci_fixup_device(pci_fixup_resume, pci_dev);
+
+ return drv && drv->resume ?
+ drv->resume(pci_dev) : pci_pm_reenable_device(pci_dev);
+}
+
+/* Auxiliary functions used by the new power management framework */
+
+static void pci_pm_default_resume_noirq(struct pci_dev *pci_dev)
+{
+ pci_restore_standard_config(pci_dev);
+ pci_dev->state_saved = false;
+ pci_fixup_device(pci_fixup_resume_early, pci_dev);
+}
+
+static void pci_pm_default_resume(struct pci_dev *pci_dev)
+{
+ pci_fixup_device(pci_fixup_resume, pci_dev);
+
+ if (!pci_is_bridge(pci_dev))
+ pci_enable_wake(pci_dev, PCI_D0, false);
+}
+
+static void pci_pm_default_suspend(struct pci_dev *pci_dev)
+{
+ /* Disable non-bridge devices without PM support */
+ if (!pci_is_bridge(pci_dev))
+ pci_disable_enabled_device(pci_dev);
+ pci_save_state(pci_dev);
+}
+
+static bool pci_has_legacy_pm_support(struct pci_dev *pci_dev)
+{
+ struct pci_driver *drv = pci_dev->driver;
+ bool ret = drv && (drv->suspend || drv->suspend_late || drv->resume
+ || drv->resume_early);
+
+ /*
+ * Legacy PM support is used by default, so warn if the new framework is
+ * supported as well. Drivers are supposed to support either the
+ * former, or the latter, but not both at the same time.
+ */
+ WARN_ON(ret && drv->driver.pm);
+
+ return ret;
+}
+
+/* New power management framework */
+
+static int pci_pm_prepare(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int error = 0;
+
+ if (drv && drv->pm && drv->pm->prepare)
+ error = drv->pm->prepare(dev);
+
+ return error;
+}
+
+static void pci_pm_complete(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+
+ if (drv && drv->pm && drv->pm->complete)
+ drv->pm->complete(dev);
+}
+
+#ifdef CONFIG_SUSPEND
+
+static int pci_pm_suspend(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ if (pci_has_legacy_pm_support(pci_dev))
+ return pci_legacy_suspend(dev, PMSG_SUSPEND);
+
+ if (!pm) {
+ pci_pm_default_suspend(pci_dev);
+ goto Fixup;
+ }
+
+ pci_dev->state_saved = false;
+
+ if (pm->suspend) {
+ pci_power_t prev = pci_dev->current_state;
+ int error;
+
+ error = pm->suspend(dev);
+ suspend_report_result(pm->suspend, error);
+ if (error)
+ return error;
+
+ if (pci_dev->state_saved)
+ goto Fixup;
+
+ if (pci_dev->current_state != PCI_D0
+ && pci_dev->current_state != PCI_UNKNOWN) {
+ WARN_ONCE(pci_dev->current_state != prev,
+ "PCI PM: State of device not saved by %pF\n",
+ pm->suspend);
+ goto Fixup;
+ }
+ }
+
+ if (!pci_dev->state_saved) {
+ pci_save_state(pci_dev);
+ if (!pci_is_bridge(pci_dev))
+ pci_prepare_to_sleep(pci_dev);
+ }
+
+ Fixup:
+ pci_fixup_device(pci_fixup_suspend, pci_dev);
+
+ return 0;
+}
+
+static int pci_pm_suspend_noirq(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct device_driver *drv = dev->driver;
+ int error = 0;
+
+ if (pci_has_legacy_pm_support(pci_dev))
+ return pci_legacy_suspend_late(dev, PMSG_SUSPEND);
+
+ if (drv && drv->pm && drv->pm->suspend_noirq) {
+ error = drv->pm->suspend_noirq(dev);
+ suspend_report_result(drv->pm->suspend_noirq, error);
+ }
+
+ if (!error)
+ pci_pm_set_unknown_state(pci_dev);
+
+ return error;
+}
+
+static int pci_pm_resume_noirq(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct device_driver *drv = dev->driver;
+ int error = 0;
+
+ pci_pm_default_resume_noirq(pci_dev);
+
+ if (pci_has_legacy_pm_support(pci_dev))
+ return pci_legacy_resume_early(dev);
+
+ if (drv && drv->pm && drv->pm->resume_noirq)
+ error = drv->pm->resume_noirq(dev);
+
+ return error;
+}
+
+static int pci_pm_resume(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int error = 0;
+
+ /*
+ * This is necessary for the suspend error path in which resume is
+ * called without restoring the standard config registers of the device.
+ */
+ if (pci_dev->state_saved)
+ pci_restore_standard_config(pci_dev);
+
+ if (pci_has_legacy_pm_support(pci_dev))
+ return pci_legacy_resume(dev);
+
+ pci_pm_default_resume(pci_dev);
+
+ if (pm) {
+ if (pm->resume)
+ error = pm->resume(dev);
+ } else {
+ pci_pm_reenable_device(pci_dev);
+ }
+
+ return 0;
+}
+
+#else /* !CONFIG_SUSPEND */
+
+#define pci_pm_suspend NULL
+#define pci_pm_suspend_noirq NULL
+#define pci_pm_resume NULL
+#define pci_pm_resume_noirq NULL
+
+#endif /* !CONFIG_SUSPEND */
+
+#ifdef CONFIG_HIBERNATION
+
+static int pci_pm_freeze(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+
+ if (pci_has_legacy_pm_support(pci_dev))
+ return pci_legacy_suspend(dev, PMSG_FREEZE);
+
+ if (!pm) {
+ pci_pm_default_suspend(pci_dev);
+ return 0;
+ }
+
+ pci_dev->state_saved = false;
+
+ if (pm->freeze) {
+ int error;
+
+ error = pm->freeze(dev);
+ suspend_report_result(pm->freeze, error);
+ if (error)
+ return error;
+ }
+
+ if (!pci_dev->state_saved)
+ pci_save_state(pci_dev);
+
+ return 0;
+}
+
+static int pci_pm_freeze_noirq(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct device_driver *drv = dev->driver;
+ int error = 0;
+
+ if (pci_has_legacy_pm_support(pci_dev))
+ return pci_legacy_suspend_late(dev, PMSG_FREEZE);
+
+ if (drv && drv->pm && drv->pm->freeze_noirq) {
+ error = drv->pm->freeze_noirq(dev);
+ suspend_report_result(drv->pm->freeze_noirq, error);
+ }
+
+ if (!error)
+ pci_pm_set_unknown_state(pci_dev);
+
+ return error;
+}
+
+static int pci_pm_thaw_noirq(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct device_driver *drv = dev->driver;
+ int error = 0;
+
+ if (pci_has_legacy_pm_support(pci_dev))
+ return pci_legacy_resume_early(dev);
+
+ pci_update_current_state(pci_dev, PCI_D0);
+
+ if (drv && drv->pm && drv->pm->thaw_noirq)
+ error = drv->pm->thaw_noirq(dev);
+
+ return error;
+}
+
+static int pci_pm_thaw(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int error = 0;
+
+ if (pci_has_legacy_pm_support(pci_dev))
+ return pci_legacy_resume(dev);
+
+ if (pm) {
+ if (pm->thaw)
+ error = pm->thaw(dev);
+ } else {
+ pci_pm_reenable_device(pci_dev);
+ }
+
+ return error;
+}
+
+static int pci_pm_poweroff(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int error = 0;
+
+ if (pci_has_legacy_pm_support(pci_dev))
+ return pci_legacy_suspend(dev, PMSG_HIBERNATE);
+
+ if (!pm) {
+ pci_pm_default_suspend(pci_dev);
+ goto Fixup;
+ }
+
+ pci_dev->state_saved = false;
+
+ if (pm->poweroff) {
+ error = pm->poweroff(dev);
+ suspend_report_result(pm->poweroff, error);
+ }
+
+ if (!pci_dev->state_saved && !pci_is_bridge(pci_dev))
+ pci_prepare_to_sleep(pci_dev);
+
+ Fixup:
+ pci_fixup_device(pci_fixup_suspend, pci_dev);
+
+ return error;
+}
+
+static int pci_pm_poweroff_noirq(struct device *dev)
+{
+ struct device_driver *drv = dev->driver;
+ int error = 0;
+
+ if (pci_has_legacy_pm_support(to_pci_dev(dev)))
+ return pci_legacy_suspend_late(dev, PMSG_HIBERNATE);
+
+ if (drv && drv->pm && drv->pm->poweroff_noirq) {
+ error = drv->pm->poweroff_noirq(dev);
+ suspend_report_result(drv->pm->poweroff_noirq, error);
+ }
+
+ return error;
+}
+
+static int pci_pm_restore_noirq(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct device_driver *drv = dev->driver;
+ int error = 0;
+
+ pci_pm_default_resume_noirq(pci_dev);
+
+ if (pci_has_legacy_pm_support(pci_dev))
+ return pci_legacy_resume_early(dev);
+
+ if (drv && drv->pm && drv->pm->restore_noirq)
+ error = drv->pm->restore_noirq(dev);
+
+ return error;
+}
+
+static int pci_pm_restore(struct device *dev)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
+ int error = 0;
+
+ /*
+ * This is necessary for the hibernation error path in which restore is
+ * called without restoring the standard config registers of the device.
+ */
+ if (pci_dev->state_saved)
+ pci_restore_standard_config(pci_dev);
+
+ if (pci_has_legacy_pm_support(pci_dev))
+ return pci_legacy_resume(dev);
+
+ pci_pm_default_resume(pci_dev);
+
+ if (pm) {
+ if (pm->restore)
+ error = pm->restore(dev);
+ } else {
+ pci_pm_reenable_device(pci_dev);
+ }
+
+ return error;
+}
+
+#else /* !CONFIG_HIBERNATION */
+
+#define pci_pm_freeze NULL
+#define pci_pm_freeze_noirq NULL
+#define pci_pm_thaw NULL
+#define pci_pm_thaw_noirq NULL
+#define pci_pm_poweroff NULL
+#define pci_pm_poweroff_noirq NULL
+#define pci_pm_restore NULL
+#define pci_pm_restore_noirq NULL
+
+#endif /* !CONFIG_HIBERNATION */
+
+struct dev_pm_ops pci_dev_pm_ops = {
+ .prepare = pci_pm_prepare,
+ .complete = pci_pm_complete,
+ .suspend = pci_pm_suspend,
+ .resume = pci_pm_resume,
+ .freeze = pci_pm_freeze,
+ .thaw = pci_pm_thaw,
+ .poweroff = pci_pm_poweroff,
+ .restore = pci_pm_restore,
+ .suspend_noirq = pci_pm_suspend_noirq,
+ .resume_noirq = pci_pm_resume_noirq,
+ .freeze_noirq = pci_pm_freeze_noirq,
+ .thaw_noirq = pci_pm_thaw_noirq,
+ .poweroff_noirq = pci_pm_poweroff_noirq,
+ .restore_noirq = pci_pm_restore_noirq,
+};
+
+#define PCI_PM_OPS_PTR (&pci_dev_pm_ops)
+
+#else /* !CONFIG_PM_SLEEP */
+
+#define PCI_PM_OPS_PTR NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
+/**
+ * __pci_register_driver - register a new pci driver
+ * @drv: the driver structure to register
+ * @owner: owner module of drv
+ * @mod_name: module name string
+ *
+ * Adds the driver structure to the list of registered drivers.
+ * Returns a negative value on error, otherwise 0.
+ * If no error occurred, the driver remains registered even if
+ * no device was claimed during registration.
+ */
+int __pci_register_driver(struct pci_driver *drv, struct module *owner,
+ const char *mod_name)
+{
+ int error;
+
+ /* initialize common driver fields */
+ drv->driver.name = drv->name;
+ drv->driver.bus = &pci_bus_type;
+ drv->driver.owner = owner;
+ drv->driver.mod_name = mod_name;
+
+ spin_lock_init(&drv->dynids.lock);
+ INIT_LIST_HEAD(&drv->dynids.list);
+
+ /* register with core */
+ error = driver_register(&drv->driver);
+ if (error)
+ return error;
+
+ error = pci_create_newid_file(drv);
+ if (error)
+ driver_unregister(&drv->driver);
+
+ return error;
+}
+
+/**
+ * pci_unregister_driver - unregister a pci driver
+ * @drv: the driver structure to unregister
+ *
+ * Deletes the driver structure from the list of registered PCI drivers,
+ * gives it a chance to clean up by calling its remove() function for
+ * each device it was responsible for, and marks those devices as
+ * driverless.
+ */
+
+void
+pci_unregister_driver(struct pci_driver *drv)
+{
+ pci_remove_newid_file(drv);
+ driver_unregister(&drv->driver);
+ pci_free_dynids(drv);
+}
+
+static struct pci_driver pci_compat_driver = {
+ .name = "compat"
+};
+
+/**
+ * pci_dev_driver - get the pci_driver of a device
+ * @dev: the device to query
+ *
+ * Returns the appropriate pci_driver structure or %NULL if there is no
+ * registered driver for the device.
+ */
+struct pci_driver *
+pci_dev_driver(const struct pci_dev *dev)
+{
+ if (dev->driver)
+ return dev->driver;
+ else {
+ int i;
+ for(i=0; i<=PCI_ROM_RESOURCE; i++)
+ if (dev->resource[i].flags & IORESOURCE_BUSY)
+ return &pci_compat_driver;
+ }
+ return NULL;
+}
+
+/**
+ * pci_bus_match - Tell if a PCI device structure has a matching PCI device id structure
+ * @dev: the PCI device structure to match against
+ * @drv: the device driver to search for matching PCI device id structures
+ *
+ * Used by a driver to check whether a PCI device present in the
+ * system is in its list of supported devices. Returns the matching
+ * pci_device_id structure or %NULL if there is no match.
+ */
+static int pci_bus_match(struct device *dev, struct device_driver *drv)
+{
+ struct pci_dev *pci_dev = to_pci_dev(dev);
+ struct pci_driver *pci_drv = to_pci_driver(drv);
+ const struct pci_device_id *found_id;
+
+ found_id = pci_match_device(pci_drv, pci_dev);
+ if (found_id)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * pci_dev_get - increments the reference count of the pci device structure
+ * @dev: the device being referenced
+ *
+ * Each live reference to a device should be refcounted.
+ *
+ * Drivers for PCI devices should normally record such references in
+ * their probe() methods, when they bind to a device, and release
+ * them by calling pci_dev_put(), in their disconnect() methods.
+ *
+ * A pointer to the device with the incremented reference counter is returned.
+ */
+struct pci_dev *pci_dev_get(struct pci_dev *dev)
+{
+ if (dev)
+ get_device(&dev->dev);
+ return dev;
+}
+
+/**
+ * pci_dev_put - release a use of the pci device structure
+ * @dev: device that's been disconnected
+ *
+ * Must be called when a user of a device is finished with it. When the last
+ * user of the device calls this function, the memory of the device is freed.
+ */
+void pci_dev_put(struct pci_dev *dev)
+{
+ if (dev)
+ put_device(&dev->dev);
+}
+
+#ifndef CONFIG_HOTPLUG
+int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ return -ENODEV;
+}
+#endif
+
+struct bus_type pci_bus_type = {
+ .name = "pci",
+ .match = pci_bus_match,
+ .uevent = pci_uevent,
+ .probe = pci_device_probe,
+ .remove = pci_device_remove,
+ .shutdown = pci_device_shutdown,
+#ifndef DDE_LINUX
+ .dev_attrs = pci_dev_attrs,
+#endif
+ .pm = PCI_PM_OPS_PTR,
+};
+
+static int __init pci_driver_init(void)
+{
+ return bus_register(&pci_bus_type);
+}
+
+postcore_initcall(pci_driver_init);
+
+EXPORT_SYMBOL(pci_match_id);
+EXPORT_SYMBOL(__pci_register_driver);
+EXPORT_SYMBOL(pci_unregister_driver);
+EXPORT_SYMBOL(pci_dev_driver);
+EXPORT_SYMBOL(pci_bus_type);
+EXPORT_SYMBOL(pci_dev_get);
+EXPORT_SYMBOL(pci_dev_put);
diff --git a/libdde-linux26/lib/src/drivers/pci/pci.c b/libdde-linux26/lib/src/drivers/pci/pci.c
new file mode 100644
index 00000000..9350ffed
--- /dev/null
+++ b/libdde-linux26/lib/src/drivers/pci/pci.c
@@ -0,0 +1,2478 @@
+/*
+ * PCI Bus Services, see include/linux/pci.h for further explanation.
+ *
+ * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
+ * David Mosberger-Tang
+ *
+ * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pm.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+#include <linux/log2.h>
+#include <linux/pci-aspm.h>
+#include <linux/pm_wakeup.h>
+#include <linux/interrupt.h>
+#include <asm/dma.h> /* isa_dma_bridge_buggy */
+#include "pci.h"
+
+#ifdef DDE_LINUX
+#include "local.h"
+#endif
+
+unsigned int pci_pm_d3_delay = PCI_PM_D3_WAIT;
+
+#ifdef CONFIG_PCI_DOMAINS
+int pci_domains_supported = 1;
+#endif
+
+#define DEFAULT_CARDBUS_IO_SIZE (256)
+#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
+/* pci=cbmemsize=nnM,cbiosize=nn can override this */
+unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
+unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
+
+/**
+ * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
+ * @bus: pointer to PCI bus structure to search
+ *
+ * Given a PCI bus, returns the highest PCI bus number present in the set
+ * including the given PCI bus and its list of child PCI buses.
+ */
+unsigned char pci_bus_max_busnr(struct pci_bus* bus)
+{
+ struct list_head *tmp;
+ unsigned char max, n;
+
+ max = bus->subordinate;
+ list_for_each(tmp, &bus->children) {
+ n = pci_bus_max_busnr(pci_bus_b(tmp));
+ if(n > max)
+ max = n;
+ }
+ return max;
+}
+EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
+
+#ifdef CONFIG_HAS_IOMEM
+void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
+{
+ /*
+ * Make sure the BAR is actually a memory resource, not an IO resource
+ */
+ if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
+ WARN_ON(1);
+ return NULL;
+ }
+ return ioremap_nocache(pci_resource_start(pdev, bar),
+ pci_resource_len(pdev, bar));
+}
+EXPORT_SYMBOL_GPL(pci_ioremap_bar);
+#endif
+
+#if 0
+/**
+ * pci_max_busnr - returns maximum PCI bus number
+ *
+ * Returns the highest PCI bus number present in the system global list of
+ * PCI buses.
+ */
+unsigned char __devinit
+pci_max_busnr(void)
+{
+ struct pci_bus *bus = NULL;
+ unsigned char max, n;
+
+ max = 0;
+ while ((bus = pci_find_next_bus(bus)) != NULL) {
+ n = pci_bus_max_busnr(bus);
+ if(n > max)
+ max = n;
+ }
+ return max;
+}
+
+#endif /* 0 */
+
+#define PCI_FIND_CAP_TTL 48
+
+static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
+ u8 pos, int cap, int *ttl)
+{
+ u8 id;
+
+ while ((*ttl)--) {
+ pci_bus_read_config_byte(bus, devfn, pos, &pos);
+ if (pos < 0x40)
+ break;
+ pos &= ~3;
+ pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
+ &id);
+ if (id == 0xff)
+ break;
+ if (id == cap)
+ return pos;
+ pos += PCI_CAP_LIST_NEXT;
+ }
+ return 0;
+}
+
+static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
+ u8 pos, int cap)
+{
+ int ttl = PCI_FIND_CAP_TTL;
+
+ return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
+}
+
+int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
+{
+ return __pci_find_next_cap(dev->bus, dev->devfn,
+ pos + PCI_CAP_LIST_NEXT, cap);
+}
+EXPORT_SYMBOL_GPL(pci_find_next_capability);
+
+static int __pci_bus_find_cap_start(struct pci_bus *bus,
+ unsigned int devfn, u8 hdr_type)
+{
+ u16 status;
+
+ pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
+ if (!(status & PCI_STATUS_CAP_LIST))
+ return 0;
+
+ switch (hdr_type) {
+ case PCI_HEADER_TYPE_NORMAL:
+ case PCI_HEADER_TYPE_BRIDGE:
+ return PCI_CAPABILITY_LIST;
+ case PCI_HEADER_TYPE_CARDBUS:
+ return PCI_CB_CAPABILITY_LIST;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+/**
+ * pci_find_capability - query for devices' capabilities
+ * @dev: PCI device to query
+ * @cap: capability code
+ *
+ * Tell if a device supports a given PCI capability.
+ * Returns the address of the requested capability structure within the
+ * device's PCI configuration space or 0 in case the device does not
+ * support it. Possible values for @cap:
+ *
+ * %PCI_CAP_ID_PM Power Management
+ * %PCI_CAP_ID_AGP Accelerated Graphics Port
+ * %PCI_CAP_ID_VPD Vital Product Data
+ * %PCI_CAP_ID_SLOTID Slot Identification
+ * %PCI_CAP_ID_MSI Message Signalled Interrupts
+ * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
+ * %PCI_CAP_ID_PCIX PCI-X
+ * %PCI_CAP_ID_EXP PCI Express
+ */
+int pci_find_capability(struct pci_dev *dev, int cap)
+{
+ int pos;
+
+ pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
+ if (pos)
+ pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
+
+ return pos;
+}
+
+/**
+ * pci_bus_find_capability - query for devices' capabilities
+ * @bus: the PCI bus to query
+ * @devfn: PCI device to query
+ * @cap: capability code
+ *
+ * Like pci_find_capability() but works for pci devices that do not have a
+ * pci_dev structure set up yet.
+ *
+ * Returns the address of the requested capability structure within the
+ * device's PCI configuration space or 0 in case the device does not
+ * support it.
+ */
+int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
+{
+ int pos;
+ u8 hdr_type;
+
+ pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
+
+ pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
+ if (pos)
+ pos = __pci_find_next_cap(bus, devfn, pos, cap);
+
+ return pos;
+}
+
+/**
+ * pci_find_ext_capability - Find an extended capability
+ * @dev: PCI device to query
+ * @cap: capability code
+ *
+ * Returns the address of the requested extended capability structure
+ * within the device's PCI configuration space or 0 if the device does
+ * not support it. Possible values for @cap:
+ *
+ * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
+ * %PCI_EXT_CAP_ID_VC Virtual Channel
+ * %PCI_EXT_CAP_ID_DSN Device Serial Number
+ * %PCI_EXT_CAP_ID_PWR Power Budgeting
+ */
+int pci_find_ext_capability(struct pci_dev *dev, int cap)
+{
+ u32 header;
+ int ttl;
+ int pos = PCI_CFG_SPACE_SIZE;
+
+ /* minimum 8 bytes per capability */
+ ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
+
+ if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
+ return 0;
+
+ if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
+ return 0;
+
+ /*
+ * If we have no capabilities, this is indicated by cap ID,
+ * cap version and next pointer all being 0.
+ */
+ if (header == 0)
+ return 0;
+
+ while (ttl-- > 0) {
+ if (PCI_EXT_CAP_ID(header) == cap)
+ return pos;
+
+ pos = PCI_EXT_CAP_NEXT(header);
+ if (pos < PCI_CFG_SPACE_SIZE)
+ break;
+
+ if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pci_find_ext_capability);
+
+static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
+{
+ int rc, ttl = PCI_FIND_CAP_TTL;
+ u8 cap, mask;
+
+ if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
+ mask = HT_3BIT_CAP_MASK;
+ else
+ mask = HT_5BIT_CAP_MASK;
+
+ pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
+ PCI_CAP_ID_HT, &ttl);
+ while (pos) {
+ rc = pci_read_config_byte(dev, pos + 3, &cap);
+ if (rc != PCIBIOS_SUCCESSFUL)
+ return 0;
+
+ if ((cap & mask) == ht_cap)
+ return pos;
+
+ pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
+ pos + PCI_CAP_LIST_NEXT,
+ PCI_CAP_ID_HT, &ttl);
+ }
+
+ return 0;
+}
+/**
+ * pci_find_next_ht_capability - query a device's Hypertransport capabilities
+ * @dev: PCI device to query
+ * @pos: Position from which to continue searching
+ * @ht_cap: Hypertransport capability code
+ *
+ * To be used in conjunction with pci_find_ht_capability() to search for
+ * all capabilities matching @ht_cap. @pos should always be a value returned
+ * from pci_find_ht_capability().
+ *
+ * NB. To be 100% safe against broken PCI devices, the caller should take
+ * steps to avoid an infinite loop.
+ */
+int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
+{
+ return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
+}
+EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
+
+/**
+ * pci_find_ht_capability - query a device's Hypertransport capabilities
+ * @dev: PCI device to query
+ * @ht_cap: Hypertransport capability code
+ *
+ * Tell if a device supports a given Hypertransport capability.
+ * Returns an address within the device's PCI configuration space
+ * or 0 in case the device does not support the request capability.
+ * The address points to the PCI capability, of type PCI_CAP_ID_HT,
+ * which has a Hypertransport capability matching @ht_cap.
+ */
+int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
+{
+ int pos;
+
+ pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
+ if (pos)
+ pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
+
+ return pos;
+}
+EXPORT_SYMBOL_GPL(pci_find_ht_capability);
+
+/**
+ * pci_find_parent_resource - return resource region of parent bus of given region
+ * @dev: PCI device structure contains resources to be searched
+ * @res: child resource record for which parent is sought
+ *
+ * For given resource region of given device, return the resource
+ * region of parent bus the given region is contained in or where
+ * it should be allocated from.
+ */
+struct resource *
+pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
+{
+ const struct pci_bus *bus = dev->bus;
+ int i;
+ struct resource *best = NULL;
+
+ for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
+ struct resource *r = bus->resource[i];
+ if (!r)
+ continue;
+ if (res->start && !(res->start >= r->start && res->end <= r->end))
+ continue; /* Not contained */
+ if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
+ continue; /* Wrong type */
+ if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
+ return r; /* Exact match */
+ if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH))
+ best = r; /* Approximating prefetchable by non-prefetchable */
+ }
+ return best;
+}
+
+/**
+ * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
+ * @dev: PCI device to have its BARs restored
+ *
+ * Restore the BAR values for a given device, so as to make it
+ * accessible by its driver.
+ */
+static void
+pci_restore_bars(struct pci_dev *dev)
+{
+ int i;
+
+ for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
+ pci_update_resource(dev, i);
+}
+
+static struct pci_platform_pm_ops *pci_platform_pm;
+
+int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
+{
+ if (!ops->is_manageable || !ops->set_state || !ops->choose_state
+ || !ops->sleep_wake || !ops->can_wakeup)
+ return -EINVAL;
+ pci_platform_pm = ops;
+ return 0;
+}
+
+static inline bool platform_pci_power_manageable(struct pci_dev *dev)
+{
+ return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
+}
+
+static inline int platform_pci_set_power_state(struct pci_dev *dev,
+ pci_power_t t)
+{
+ return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
+}
+
+static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
+{
+ return pci_platform_pm ?
+ pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
+}
+
+static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
+{
+ return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
+}
+
+static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
+{
+ return pci_platform_pm ?
+ pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
+}
+
+/**
+ * pci_raw_set_power_state - Use PCI PM registers to set the power state of
+ * given PCI device
+ * @dev: PCI device to handle.
+ * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
+ * @wait: If 'true', wait for the device to change its power state
+ *
+ * RETURN VALUE:
+ * -EINVAL if the requested state is invalid.
+ * -EIO if device does not support PCI PM or its PM capabilities register has a
+ * wrong version, or device doesn't support the requested state.
+ * 0 if device already is in the requested state.
+ * 0 if device's power state has been successfully changed.
+ */
+static int
+pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state, bool wait)
+{
+ u16 pmcsr;
+ bool need_restore = false;
+
+ if (!dev->pm_cap)
+ return -EIO;
+
+ if (state < PCI_D0 || state > PCI_D3hot)
+ return -EINVAL;
+
+ /* Validate current state:
+ * Can enter D0 from any state, but if we can only go deeper
+ * to sleep if we're already in a low power state
+ */
+ if (dev->current_state == state) {
+ /* we're already there */
+ return 0;
+ } else if (state != PCI_D0 && dev->current_state <= PCI_D3cold
+ && dev->current_state > state) {
+ dev_err(&dev->dev, "invalid power transition "
+ "(from state %d to %d)\n", dev->current_state, state);
+ return -EINVAL;
+ }
+
+ /* check if this device supports the desired state */
+ if ((state == PCI_D1 && !dev->d1_support)
+ || (state == PCI_D2 && !dev->d2_support))
+ return -EIO;
+
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+
+ /* If we're (effectively) in D3, force entire word to 0.
+ * This doesn't affect PME_Status, disables PME_En, and
+ * sets PowerState to 0.
+ */
+ switch (dev->current_state) {
+ case PCI_D0:
+ case PCI_D1:
+ case PCI_D2:
+ pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
+ pmcsr |= state;
+ break;
+ case PCI_UNKNOWN: /* Boot-up */
+ if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
+ && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET)) {
+ need_restore = true;
+ wait = true;
+ }
+ /* Fall-through: force to D0 */
+ default:
+ pmcsr = 0;
+ break;
+ }
+
+ /* enter specified state */
+ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
+
+ if (!wait)
+ return 0;
+
+ /* Mandatory power management transition delays */
+ /* see PCI PM 1.1 5.6.1 table 18 */
+ if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
+ msleep(pci_pm_d3_delay);
+ else if (state == PCI_D2 || dev->current_state == PCI_D2)
+ udelay(PCI_PM_D2_DELAY);
+
+ dev->current_state = state;
+
+ /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
+ * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
+ * from D3hot to D0 _may_ perform an internal reset, thereby
+ * going to "D0 Uninitialized" rather than "D0 Initialized".
+ * For example, at least some versions of the 3c905B and the
+ * 3c556B exhibit this behaviour.
+ *
+ * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
+ * devices in a D3hot state at boot. Consequently, we need to
+ * restore at least the BARs so that the device will be
+ * accessible to its driver.
+ */
+ if (need_restore)
+ pci_restore_bars(dev);
+
+ if (wait && dev->bus->self)
+ pcie_aspm_pm_state_change(dev->bus->self);
+
+ return 0;
+}
+
+/**
+ * pci_update_current_state - Read PCI power state of given device from its
+ * PCI PM registers and cache it
+ * @dev: PCI device to handle.
+ * @state: State to cache in case the device doesn't have the PM capability
+ */
+void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
+{
+ if (dev->pm_cap) {
+ u16 pmcsr;
+
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
+ } else {
+ dev->current_state = state;
+ }
+}
+
+/**
+ * pci_set_power_state - Set the power state of a PCI device
+ * @dev: PCI device to handle.
+ * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
+ *
+ * Transition a device to a new power state, using the platform formware and/or
+ * the device's PCI PM registers.
+ *
+ * RETURN VALUE:
+ * -EINVAL if the requested state is invalid.
+ * -EIO if device does not support PCI PM or its PM capabilities register has a
+ * wrong version, or device doesn't support the requested state.
+ * 0 if device already is in the requested state.
+ * 0 if device's power state has been successfully changed.
+ */
+int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
+{
+ int error;
+
+ /* bound the state we're entering */
+ if (state > PCI_D3hot)
+ state = PCI_D3hot;
+ else if (state < PCI_D0)
+ state = PCI_D0;
+ else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
+ /*
+ * If the device or the parent bridge do not support PCI PM,
+ * ignore the request if we're doing anything other than putting
+ * it into D0 (which would only happen on boot).
+ */
+ return 0;
+
+ if (state == PCI_D0 && platform_pci_power_manageable(dev)) {
+ /*
+ * Allow the platform to change the state, for example via ACPI
+ * _PR0, _PS0 and some such, but do not trust it.
+ */
+ int ret = platform_pci_set_power_state(dev, PCI_D0);
+ if (!ret)
+ pci_update_current_state(dev, PCI_D0);
+ }
+ /* This device is quirked not to be put into D3, so
+ don't put it in D3 */
+ if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
+ return 0;
+
+ error = pci_raw_set_power_state(dev, state, true);
+
+ if (state > PCI_D0 && platform_pci_power_manageable(dev)) {
+ /* Allow the platform to finalize the transition */
+ int ret = platform_pci_set_power_state(dev, state);
+ if (!ret) {
+ pci_update_current_state(dev, state);
+ error = 0;
+ }
+ }
+
+ return error;
+}
+
+/**
+ * pci_choose_state - Choose the power state of a PCI device
+ * @dev: PCI device to be suspended
+ * @state: target sleep state for the whole system. This is the value
+ * that is passed to suspend() function.
+ *
+ * Returns PCI power state suitable for given device and given system
+ * message.
+ */
+
+pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
+{
+ pci_power_t ret;
+
+ if (!pci_find_capability(dev, PCI_CAP_ID_PM))
+ return PCI_D0;
+
+ ret = platform_pci_choose_state(dev);
+ if (ret != PCI_POWER_ERROR)
+ return ret;
+
+ switch (state.event) {
+ case PM_EVENT_ON:
+ return PCI_D0;
+ case PM_EVENT_FREEZE:
+ case PM_EVENT_PRETHAW:
+ /* REVISIT both freeze and pre-thaw "should" use D0 */
+ case PM_EVENT_SUSPEND:
+ case PM_EVENT_HIBERNATE:
+ return PCI_D3hot;
+ default:
+ dev_info(&dev->dev, "unrecognized suspend event %d\n",
+ state.event);
+ BUG();
+ }
+ return PCI_D0;
+}
+
+EXPORT_SYMBOL(pci_choose_state);
+
+static int pci_save_pcie_state(struct pci_dev *dev)
+{
+ int pos, i = 0;
+ struct pci_cap_saved_state *save_state;
+ u16 *cap;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (pos <= 0)
+ return 0;
+
+ save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
+ if (!save_state) {
+ dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__);
+ return -ENOMEM;
+ }
+ cap = (u16 *)&save_state->data[0];
+
+ pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
+ pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
+ pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
+ pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
+
+ return 0;
+}
+
+static void pci_restore_pcie_state(struct pci_dev *dev)
+{
+ int i = 0, pos;
+ struct pci_cap_saved_state *save_state;
+ u16 *cap;
+
+ save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!save_state || pos <= 0)
+ return;
+ cap = (u16 *)&save_state->data[0];
+
+ pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
+ pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
+ pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
+ pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
+}
+
+
+static int pci_save_pcix_state(struct pci_dev *dev)
+{
+ int pos;
+ struct pci_cap_saved_state *save_state;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
+ if (pos <= 0)
+ return 0;
+
+ save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
+ if (!save_state) {
+ dev_err(&dev->dev, "buffer not found in %s\n", __FUNCTION__);
+ return -ENOMEM;
+ }
+
+ pci_read_config_word(dev, pos + PCI_X_CMD, (u16 *)save_state->data);
+
+ return 0;
+}
+
+static void pci_restore_pcix_state(struct pci_dev *dev)
+{
+ int i = 0, pos;
+ struct pci_cap_saved_state *save_state;
+ u16 *cap;
+
+ save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
+ pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
+ if (!save_state || pos <= 0)
+ return;
+ cap = (u16 *)&save_state->data[0];
+
+ pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
+}
+
+
+/**
+ * pci_save_state - save the PCI configuration space of a device before suspending
+ * @dev: - PCI device that we're dealing with
+ */
+int
+pci_save_state(struct pci_dev *dev)
+{
+ int i;
+ /* XXX: 100% dword access ok here? */
+ for (i = 0; i < 16; i++)
+ pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]);
+ dev->state_saved = true;
+ if ((i = pci_save_pcie_state(dev)) != 0)
+ return i;
+ if ((i = pci_save_pcix_state(dev)) != 0)
+ return i;
+ return 0;
+}
+
+/**
+ * pci_restore_state - Restore the saved state of a PCI device
+ * @dev: - PCI device that we're dealing with
+ */
+int
+pci_restore_state(struct pci_dev *dev)
+{
+ int i;
+ u32 val;
+
+ /* PCI Express register must be restored first */
+ pci_restore_pcie_state(dev);
+
+ /*
+ * The Base Address register should be programmed before the command
+ * register(s)
+ */
+ for (i = 15; i >= 0; i--) {
+ pci_read_config_dword(dev, i * 4, &val);
+ if (val != dev->saved_config_space[i]) {
+ dev_printk(KERN_DEBUG, &dev->dev, "restoring config "
+ "space at offset %#x (was %#x, writing %#x)\n",
+ i, val, (int)dev->saved_config_space[i]);
+ pci_write_config_dword(dev,i * 4,
+ dev->saved_config_space[i]);
+ }
+ }
+ pci_restore_pcix_state(dev);
+ pci_restore_msi_state(dev);
+
+ return 0;
+}
+
+static int do_pci_enable_device(struct pci_dev *dev, int bars)
+{
+ int err;
+
+ err = pci_set_power_state(dev, PCI_D0);
+ if (err < 0 && err != -EIO)
+ return err;
+ err = pcibios_enable_device(dev, bars);
+ if (err < 0)
+ return err;
+ pci_fixup_device(pci_fixup_enable, dev);
+
+ return 0;
+}
+
+/**
+ * pci_reenable_device - Resume abandoned device
+ * @dev: PCI device to be resumed
+ *
+ * Note this function is a backend of pci_default_resume and is not supposed
+ * to be called by normal code, write proper resume handler and use it instead.
+ */
+int pci_reenable_device(struct pci_dev *dev)
+{
+ if (atomic_read(&dev->enable_cnt))
+ return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
+ return 0;
+}
+
+static int __pci_enable_device_flags(struct pci_dev *dev,
+ resource_size_t flags)
+{
+ int err;
+ int i, bars = 0;
+
+ if (atomic_add_return(1, &dev->enable_cnt) > 1)
+ return 0; /* already enabled */
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+ if (dev->resource[i].flags & flags)
+ bars |= (1 << i);
+
+ err = do_pci_enable_device(dev, bars);
+ if (err < 0)
+ atomic_dec(&dev->enable_cnt);
+ return err;
+}
+
+/**
+ * pci_enable_device_io - Initialize a device for use with IO space
+ * @dev: PCI device to be initialized
+ *
+ * Initialize device before it's used by a driver. Ask low-level code
+ * to enable I/O resources. Wake up the device if it was suspended.
+ * Beware, this function can fail.
+ */
+int pci_enable_device_io(struct pci_dev *dev)
+{
+ return __pci_enable_device_flags(dev, IORESOURCE_IO);
+}
+
+/**
+ * pci_enable_device_mem - Initialize a device for use with Memory space
+ * @dev: PCI device to be initialized
+ *
+ * Initialize device before it's used by a driver. Ask low-level code
+ * to enable Memory resources. Wake up the device if it was suspended.
+ * Beware, this function can fail.
+ */
+int pci_enable_device_mem(struct pci_dev *dev)
+{
+ return __pci_enable_device_flags(dev, IORESOURCE_MEM);
+}
+
+/** pci_enable_device() is implemented by the DDE. */
+#ifndef DDE_LINUX
+/**
+ * pci_enable_device - Initialize device before it's used by a driver.
+ * @dev: PCI device to be initialized
+ *
+ * Initialize device before it's used by a driver. Ask low-level code
+ * to enable I/O and memory. Wake up the device if it was suspended.
+ * Beware, this function can fail.
+ *
+ * Note we don't actually enable the device many times if we call
+ * this function repeatedly (we just increment the count).
+ */
+int pci_enable_device(struct pci_dev *dev)
+{
+ return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
+}
+#endif
+
+/*
+ * Managed PCI resources. This manages device on/off, intx/msi/msix
+ * on/off and BAR regions. pci_dev itself records msi/msix status, so
+ * there's no need to track it separately. pci_devres is initialized
+ * when a device is enabled using managed PCI device enable interface.
+ */
+struct pci_devres {
+ unsigned int enabled:1;
+ unsigned int pinned:1;
+ unsigned int orig_intx:1;
+ unsigned int restore_intx:1;
+ u32 region_mask;
+};
+
+static void pcim_release(struct device *gendev, void *res)
+{
+ struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
+ struct pci_devres *this = res;
+ int i;
+
+ if (dev->msi_enabled)
+ pci_disable_msi(dev);
+ if (dev->msix_enabled)
+ pci_disable_msix(dev);
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
+ if (this->region_mask & (1 << i))
+ pci_release_region(dev, i);
+
+ if (this->restore_intx)
+ pci_intx(dev, this->orig_intx);
+
+ if (this->enabled && !this->pinned)
+ pci_disable_device(dev);
+}
+
+static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
+{
+ struct pci_devres *dr, *new_dr;
+
+ dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
+ if (dr)
+ return dr;
+
+ new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
+ if (!new_dr)
+ return NULL;
+ return devres_get(&pdev->dev, new_dr, NULL, NULL);
+}
+
+static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
+{
+ if (pci_is_managed(pdev))
+ return devres_find(&pdev->dev, pcim_release, NULL, NULL);
+ return NULL;
+}
+
+/**
+ * pcim_enable_device - Managed pci_enable_device()
+ * @pdev: PCI device to be initialized
+ *
+ * Managed pci_enable_device().
+ */
+int pcim_enable_device(struct pci_dev *pdev)
+{
+ struct pci_devres *dr;
+ int rc;
+
+ dr = get_pci_dr(pdev);
+ if (unlikely(!dr))
+ return -ENOMEM;
+ if (dr->enabled)
+ return 0;
+
+ rc = pci_enable_device(pdev);
+ if (!rc) {
+ pdev->is_managed = 1;
+ dr->enabled = 1;
+ }
+ return rc;
+}
+
+/**
+ * pcim_pin_device - Pin managed PCI device
+ * @pdev: PCI device to pin
+ *
+ * Pin managed PCI device @pdev. Pinned device won't be disabled on
+ * driver detach. @pdev must have been enabled with
+ * pcim_enable_device().
+ */
+void pcim_pin_device(struct pci_dev *pdev)
+{
+ struct pci_devres *dr;
+
+ dr = find_pci_dr(pdev);
+ WARN_ON(!dr || !dr->enabled);
+ if (dr)
+ dr->pinned = 1;
+}
+
+#ifndef DDE_LINUX
+/**
+ * pcibios_disable_device - disable arch specific PCI resources for device dev
+ * @dev: the PCI device to disable
+ *
+ * Disables architecture specific PCI resources for the device. This
+ * is the default implementation. Architecture implementations can
+ * override this.
+ */
+void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
+
+static void do_pci_disable_device(struct pci_dev *dev)
+{
+ u16 pci_command;
+
+ pci_read_config_word(dev, PCI_COMMAND, &pci_command);
+ if (pci_command & PCI_COMMAND_MASTER) {
+ pci_command &= ~PCI_COMMAND_MASTER;
+ pci_write_config_word(dev, PCI_COMMAND, pci_command);
+ }
+
+ pcibios_disable_device(dev);
+}
+
+/**
+ * pci_disable_enabled_device - Disable device without updating enable_cnt
+ * @dev: PCI device to disable
+ *
+ * NOTE: This function is a backend of PCI power management routines and is
+ * not supposed to be called drivers.
+ */
+void pci_disable_enabled_device(struct pci_dev *dev)
+{
+ if (atomic_read(&dev->enable_cnt))
+ do_pci_disable_device(dev);
+}
+
+/**
+ * pci_disable_device - Disable PCI device after use
+ * @dev: PCI device to be disabled
+ *
+ * Signal to the system that the PCI device is not in use by the system
+ * anymore. This only involves disabling PCI bus-mastering, if active.
+ *
+ * Note we don't actually disable the device until all callers of
+ * pci_device_enable() have called pci_device_disable().
+ */
+void
+pci_disable_device(struct pci_dev *dev)
+{
+ struct pci_devres *dr;
+
+ dr = find_pci_dr(dev);
+ if (dr)
+ dr->enabled = 0;
+
+ if (atomic_sub_return(1, &dev->enable_cnt) != 0)
+ return;
+
+ do_pci_disable_device(dev);
+
+ dev->is_busmaster = 0;
+}
+
+/**
+ * pcibios_set_pcie_reset_state - set reset state for device dev
+ * @dev: the PCI-E device reset
+ * @state: Reset state to enter into
+ *
+ *
+ * Sets the PCI-E reset state for the device. This is the default
+ * implementation. Architecture implementations can override this.
+ */
+int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
+ enum pcie_reset_state state)
+{
+ return -EINVAL;
+}
+#endif
+
+/**
+ * pci_set_pcie_reset_state - set reset state for device dev
+ * @dev: the PCI-E device reset
+ * @state: Reset state to enter into
+ *
+ *
+ * Sets the PCI reset state for the device.
+ */
+int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
+{
+ return pcibios_set_pcie_reset_state(dev, state);
+}
+
+/**
+ * pci_pme_capable - check the capability of PCI device to generate PME#
+ * @dev: PCI device to handle.
+ * @state: PCI state from which device will issue PME#.
+ */
+bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
+{
+ if (!dev->pm_cap)
+ return false;
+
+ return !!(dev->pme_support & (1 << state));
+}
+
+/**
+ * pci_pme_active - enable or disable PCI device's PME# function
+ * @dev: PCI device to handle.
+ * @enable: 'true' to enable PME# generation; 'false' to disable it.
+ *
+ * The caller must verify that the device is capable of generating PME# before
+ * calling this function with @enable equal to 'true'.
+ */
+void pci_pme_active(struct pci_dev *dev, bool enable)
+{
+ u16 pmcsr;
+
+ if (!dev->pm_cap)
+ return;
+
+ pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
+ /* Clear PME_Status by writing 1 to it and enable PME# */
+ pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
+ if (!enable)
+ pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
+
+ pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
+
+ dev_printk(KERN_INFO, &dev->dev, "PME# %s\n",
+ enable ? "enabled" : "disabled");
+}
+
+/**
+ * pci_enable_wake - enable PCI device as wakeup event source
+ * @dev: PCI device affected
+ * @state: PCI state from which device will issue wakeup events
+ * @enable: True to enable event generation; false to disable
+ *
+ * This enables the device as a wakeup event source, or disables it.
+ * When such events involves platform-specific hooks, those hooks are
+ * called automatically by this routine.
+ *
+ * Devices with legacy power management (no standard PCI PM capabilities)
+ * always require such platform hooks.
+ *
+ * RETURN VALUE:
+ * 0 is returned on success
+ * -EINVAL is returned if device is not supposed to wake up the system
+ * Error code depending on the platform is returned if both the platform and
+ * the native mechanism fail to enable the generation of wake-up events
+ */
+int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
+{
+ int error = 0;
+ bool pme_done = false;
+
+ if (enable && !device_may_wakeup(&dev->dev))
+ return -EINVAL;
+
+ /*
+ * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
+ * Anderson we should be doing PME# wake enable followed by ACPI wake
+ * enable. To disable wake-up we call the platform first, for symmetry.
+ */
+
+ if (!enable && platform_pci_can_wakeup(dev))
+ error = platform_pci_sleep_wake(dev, false);
+
+ if (!enable || pci_pme_capable(dev, state)) {
+ pci_pme_active(dev, enable);
+ pme_done = true;
+ }
+
+ if (enable && platform_pci_can_wakeup(dev))
+ error = platform_pci_sleep_wake(dev, true);
+
+ return pme_done ? 0 : error;
+}
+
+/**
+ * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
+ * @dev: PCI device to prepare
+ * @enable: True to enable wake-up event generation; false to disable
+ *
+ * Many drivers want the device to wake up the system from D3_hot or D3_cold
+ * and this function allows them to set that up cleanly - pci_enable_wake()
+ * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
+ * ordering constraints.
+ *
+ * This function only returns error code if the device is not capable of
+ * generating PME# from both D3_hot and D3_cold, and the platform is unable to
+ * enable wake-up power for it.
+ */
+int pci_wake_from_d3(struct pci_dev *dev, bool enable)
+{
+ return pci_pme_capable(dev, PCI_D3cold) ?
+ pci_enable_wake(dev, PCI_D3cold, enable) :
+ pci_enable_wake(dev, PCI_D3hot, enable);
+}
+
+/**
+ * pci_target_state - find an appropriate low power state for a given PCI dev
+ * @dev: PCI device
+ *
+ * Use underlying platform code to find a supported low power state for @dev.
+ * If the platform can't manage @dev, return the deepest state from which it
+ * can generate wake events, based on any available PME info.
+ */
+pci_power_t pci_target_state(struct pci_dev *dev)
+{
+ pci_power_t target_state = PCI_D3hot;
+
+ if (platform_pci_power_manageable(dev)) {
+ /*
+ * Call the platform to choose the target state of the device
+ * and enable wake-up from this state if supported.
+ */
+ pci_power_t state = platform_pci_choose_state(dev);
+
+ switch (state) {
+ case PCI_POWER_ERROR:
+ case PCI_UNKNOWN:
+ break;
+ case PCI_D1:
+ case PCI_D2:
+ if (pci_no_d1d2(dev))
+ break;
+ default:
+ target_state = state;
+ }
+ } else if (device_may_wakeup(&dev->dev)) {
+ /*
+ * Find the deepest state from which the device can generate
+ * wake-up events, make it the target state and enable device
+ * to generate PME#.
+ */
+ if (!dev->pm_cap)
+ return PCI_POWER_ERROR;
+
+ if (dev->pme_support) {
+ while (target_state
+ && !(dev->pme_support & (1 << target_state)))
+ target_state--;
+ }
+ }
+
+ return target_state;
+}
+
+/**
+ * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
+ * @dev: Device to handle.
+ *
+ * Choose the power state appropriate for the device depending on whether
+ * it can wake up the system and/or is power manageable by the platform
+ * (PCI_D3hot is the default) and put the device into that state.
+ */
+int pci_prepare_to_sleep(struct pci_dev *dev)
+{
+ pci_power_t target_state = pci_target_state(dev);
+ int error;
+
+ if (target_state == PCI_POWER_ERROR)
+ return -EIO;
+
+ pci_enable_wake(dev, target_state, true);
+
+ error = pci_set_power_state(dev, target_state);
+
+ if (error)
+ pci_enable_wake(dev, target_state, false);
+
+ return error;
+}
+
+/**
+ * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
+ * @dev: Device to handle.
+ *
+ * Disable device's sytem wake-up capability and put it into D0.
+ */
+int pci_back_from_sleep(struct pci_dev *dev)
+{
+ pci_enable_wake(dev, PCI_D0, false);
+ return pci_set_power_state(dev, PCI_D0);
+}
+
+/**
+ * pci_pm_init - Initialize PM functions of given PCI device
+ * @dev: PCI device to handle.
+ */
+void pci_pm_init(struct pci_dev *dev)
+{
+ int pm;
+ u16 pmc;
+
+ dev->pm_cap = 0;
+
+ /* find PCI PM capability in list */
+ pm = pci_find_capability(dev, PCI_CAP_ID_PM);
+ if (!pm)
+ return;
+ /* Check device's ability to generate PME# */
+ pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
+
+ if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
+ dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
+ pmc & PCI_PM_CAP_VER_MASK);
+ return;
+ }
+
+ dev->pm_cap = pm;
+
+ dev->d1_support = false;
+ dev->d2_support = false;
+ if (!pci_no_d1d2(dev)) {
+ if (pmc & PCI_PM_CAP_D1)
+ dev->d1_support = true;
+ if (pmc & PCI_PM_CAP_D2)
+ dev->d2_support = true;
+
+ if (dev->d1_support || dev->d2_support)
+ dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
+ dev->d1_support ? " D1" : "",
+ dev->d2_support ? " D2" : "");
+ }
+
+ pmc &= PCI_PM_CAP_PME_MASK;
+ if (pmc) {
+ dev_info(&dev->dev, "PME# supported from%s%s%s%s%s\n",
+ (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
+ (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
+ (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
+ (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
+ (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
+ dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
+ /*
+ * Make device's PM flags reflect the wake-up capability, but
+ * let the user space enable it to wake up the system as needed.
+ */
+ device_set_wakeup_capable(&dev->dev, true);
+ device_set_wakeup_enable(&dev->dev, false);
+ /* Disable the PME# generation functionality */
+ pci_pme_active(dev, false);
+ } else {
+ dev->pme_support = 0;
+ }
+}
+
+/**
+ * platform_pci_wakeup_init - init platform wakeup if present
+ * @dev: PCI device
+ *
+ * Some devices don't have PCI PM caps but can still generate wakeup
+ * events through platform methods (like ACPI events). If @dev supports
+ * platform wakeup events, set the device flag to indicate as much. This
+ * may be redundant if the device also supports PCI PM caps, but double
+ * initialization should be safe in that case.
+ */
+void platform_pci_wakeup_init(struct pci_dev *dev)
+{
+ if (!platform_pci_can_wakeup(dev))
+ return;
+
+ device_set_wakeup_capable(&dev->dev, true);
+ device_set_wakeup_enable(&dev->dev, false);
+ platform_pci_sleep_wake(dev, false);
+}
+
+
+/**
+ * pci_add_save_buffer - allocate buffer for saving given capability registers
+ * @dev: the PCI device
+ * @cap: the capability to allocate the buffer for
+ * @size: requested size of the buffer
+ */
+static int pci_add_cap_save_buffer(
+ struct pci_dev *dev, char cap, unsigned int size)
+{
+ int pos;
+ struct pci_cap_saved_state *save_state;
+
+ pos = pci_find_capability(dev, cap);
+ if (pos <= 0)
+ return 0;
+
+ save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
+ if (!save_state)
+ return -ENOMEM;
+
+ save_state->cap_nr = cap;
+ pci_add_saved_cap(dev, save_state);
+
+ return 0;
+}
+
+/**
+ * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
+ * @dev: the PCI device
+ */
+void pci_allocate_cap_save_buffers(struct pci_dev *dev)
+{
+ int error;
+
+ error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP, 4 * sizeof(u16));
+ if (error)
+ dev_err(&dev->dev,
+ "unable to preallocate PCI Express save buffer\n");
+
+ error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
+ if (error)
+ dev_err(&dev->dev,
+ "unable to preallocate PCI-X save buffer\n");
+}
+
+/**
+ * pci_restore_standard_config - restore standard config registers of PCI device
+ * @dev: PCI device to handle
+ *
+ * This function assumes that the device's configuration space is accessible.
+ * If the device needs to be powered up, the function will wait for it to
+ * change the state.
+ */
+int pci_restore_standard_config(struct pci_dev *dev)
+{
+ pci_power_t prev_state;
+ int error;
+
+ pci_update_current_state(dev, PCI_D0);
+
+ prev_state = dev->current_state;
+ if (prev_state == PCI_D0)
+ goto Restore;
+
+ error = pci_raw_set_power_state(dev, PCI_D0, false);
+ if (error)
+ return error;
+
+ /*
+ * This assumes that we won't get a bus in B2 or B3 from the BIOS, but
+ * we've made this assumption forever and it appears to be universally
+ * satisfied.
+ */
+ switch(prev_state) {
+ case PCI_D3cold:
+ case PCI_D3hot:
+ mdelay(pci_pm_d3_delay);
+ break;
+ case PCI_D2:
+ udelay(PCI_PM_D2_DELAY);
+ break;
+ }
+
+ pci_update_current_state(dev, PCI_D0);
+
+ Restore:
+ return dev->state_saved ? pci_restore_state(dev) : 0;
+}
+
+/**
+ * pci_enable_ari - enable ARI forwarding if hardware support it
+ * @dev: the PCI device
+ */
+void pci_enable_ari(struct pci_dev *dev)
+{
+ int pos;
+ u32 cap;
+ u16 ctrl;
+ struct pci_dev *bridge;
+
+ if (!dev->is_pcie || dev->devfn)
+ return;
+
+ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
+ if (!pos)
+ return;
+
+ bridge = dev->bus->self;
+ if (!bridge || !bridge->is_pcie)
+ return;
+
+ pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
+ if (!pos)
+ return;
+
+ pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
+ if (!(cap & PCI_EXP_DEVCAP2_ARI))
+ return;
+
+ pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
+ ctrl |= PCI_EXP_DEVCTL2_ARI;
+ pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
+
+ bridge->ari_enabled = 1;
+}
+
+/**
+ * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
+ * @dev: the PCI device
+ * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
+ *
+ * Perform INTx swizzling for a device behind one level of bridge. This is
+ * required by section 9.1 of the PCI-to-PCI bridge specification for devices
+ * behind bridges on add-in cards.
+ */
+u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin)
+{
+ return (((pin - 1) + PCI_SLOT(dev->devfn)) % 4) + 1;
+}
+
+int
+pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
+{
+ u8 pin;
+
+ pin = dev->pin;
+ if (!pin)
+ return -1;
+
+ while (dev->bus->self) {
+ pin = pci_swizzle_interrupt_pin(dev, pin);
+ dev = dev->bus->self;
+ }
+ *bridge = dev;
+ return pin;
+}
+
+/**
+ * pci_common_swizzle - swizzle INTx all the way to root bridge
+ * @dev: the PCI device
+ * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
+ *
+ * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
+ * bridges all the way up to a PCI root bus.
+ */
+u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
+{
+ u8 pin = *pinp;
+
+ while (dev->bus->self) {
+ pin = pci_swizzle_interrupt_pin(dev, pin);
+ dev = dev->bus->self;
+ }
+ *pinp = pin;
+ return PCI_SLOT(dev->devfn);
+}
+
+/**
+ * pci_release_region - Release a PCI bar
+ * @pdev: PCI device whose resources were previously reserved by pci_request_region
+ * @bar: BAR to release
+ *
+ * Releases the PCI I/O and memory resources previously reserved by a
+ * successful call to pci_request_region. Call this function only
+ * after all use of the PCI regions has ceased.
+ */
+void pci_release_region(struct pci_dev *pdev, int bar)
+{
+ struct pci_devres *dr;
+
+ if (pci_resource_len(pdev, bar) == 0)
+ return;
+ if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
+ release_region(pci_resource_start(pdev, bar),
+ pci_resource_len(pdev, bar));
+ else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
+ release_mem_region(pci_resource_start(pdev, bar),
+ pci_resource_len(pdev, bar));
+
+ dr = find_pci_dr(pdev);
+ if (dr)
+ dr->region_mask &= ~(1 << bar);
+}
+
+/**
+ * __pci_request_region - Reserved PCI I/O and memory resource
+ * @pdev: PCI device whose resources are to be reserved
+ * @bar: BAR to be reserved
+ * @res_name: Name to be associated with resource.
+ * @exclusive: whether the region access is exclusive or not
+ *
+ * Mark the PCI region associated with PCI device @pdev BR @bar as
+ * being reserved by owner @res_name. Do not access any
+ * address inside the PCI regions unless this call returns
+ * successfully.
+ *
+ * If @exclusive is set, then the region is marked so that userspace
+ * is explicitly not allowed to map the resource via /dev/mem or
+ * sysfs MMIO access.
+ *
+ * Returns 0 on success, or %EBUSY on error. A warning
+ * message is also printed on failure.
+ */
+static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
+ int exclusive)
+{
+ struct pci_devres *dr;
+
+ if (pci_resource_len(pdev, bar) == 0)
+ return 0;
+
+ if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
+ if (!request_region(pci_resource_start(pdev, bar),
+ pci_resource_len(pdev, bar), res_name))
+ goto err_out;
+ }
+ else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
+ if (!__request_mem_region(pci_resource_start(pdev, bar),
+ pci_resource_len(pdev, bar), res_name,
+ exclusive))
+ goto err_out;
+ }
+
+ dr = find_pci_dr(pdev);
+ if (dr)
+ dr->region_mask |= 1 << bar;
+
+ return 0;
+
+err_out:
+ dev_warn(&pdev->dev, "BAR %d: can't reserve %s region %pR\n",
+ bar,
+ pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
+ &pdev->resource[bar]);
+ return -EBUSY;
+}
+
+/**
+ * pci_request_region - Reserve PCI I/O and memory resource
+ * @pdev: PCI device whose resources are to be reserved
+ * @bar: BAR to be reserved
+ * @res_name: Name to be associated with resource
+ *
+ * Mark the PCI region associated with PCI device @pdev BAR @bar as
+ * being reserved by owner @res_name. Do not access any
+ * address inside the PCI regions unless this call returns
+ * successfully.
+ *
+ * Returns 0 on success, or %EBUSY on error. A warning
+ * message is also printed on failure.
+ */
+int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
+{
+ return __pci_request_region(pdev, bar, res_name, 0);
+}
+
+/**
+ * pci_request_region_exclusive - Reserved PCI I/O and memory resource
+ * @pdev: PCI device whose resources are to be reserved
+ * @bar: BAR to be reserved
+ * @res_name: Name to be associated with resource.
+ *
+ * Mark the PCI region associated with PCI device @pdev BR @bar as
+ * being reserved by owner @res_name. Do not access any
+ * address inside the PCI regions unless this call returns
+ * successfully.
+ *
+ * Returns 0 on success, or %EBUSY on error. A warning
+ * message is also printed on failure.
+ *
+ * The key difference that _exclusive makes it that userspace is
+ * explicitly not allowed to map the resource via /dev/mem or
+ * sysfs.
+ */
+int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
+{
+ return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
+}
+/**
+ * pci_release_selected_regions - Release selected PCI I/O and memory resources
+ * @pdev: PCI device whose resources were previously reserved
+ * @bars: Bitmask of BARs to be released
+ *
+ * Release selected PCI I/O and memory resources previously reserved.
+ * Call this function only after all use of the PCI regions has ceased.
+ */
+void pci_release_selected_regions(struct pci_dev *pdev, int bars)
+{
+ int i;
+
+ for (i = 0; i < 6; i++)
+ if (bars & (1 << i))
+ pci_release_region(pdev, i);
+}
+
+int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
+ const char *res_name, int excl)
+{
+ int i;
+
+ for (i = 0; i < 6; i++)
+ if (bars & (1 << i))
+ if (__pci_request_region(pdev, i, res_name, excl))
+ goto err_out;
+ return 0;
+
+err_out:
+ while(--i >= 0)
+ if (bars & (1 << i))
+ pci_release_region(pdev, i);
+
+ return -EBUSY;
+}
+
+
+/**
+ * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
+ * @pdev: PCI device whose resources are to be reserved
+ * @bars: Bitmask of BARs to be requested
+ * @res_name: Name to be associated with resource
+ */
+int pci_request_selected_regions(struct pci_dev *pdev, int bars,
+ const char *res_name)
+{
+ return __pci_request_selected_regions(pdev, bars, res_name, 0);
+}
+
+int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
+ int bars, const char *res_name)
+{
+ return __pci_request_selected_regions(pdev, bars, res_name,
+ IORESOURCE_EXCLUSIVE);
+}
+
+/**
+ * pci_release_regions - Release reserved PCI I/O and memory resources
+ * @pdev: PCI device whose resources were previously reserved by pci_request_regions
+ *
+ * Releases all PCI I/O and memory resources previously reserved by a
+ * successful call to pci_request_regions. Call this function only
+ * after all use of the PCI regions has ceased.
+ */
+
+void pci_release_regions(struct pci_dev *pdev)
+{
+ pci_release_selected_regions(pdev, (1 << 6) - 1);
+}
+
+/**
+ * pci_request_regions - Reserved PCI I/O and memory resources
+ * @pdev: PCI device whose resources are to be reserved
+ * @res_name: Name to be associated with resource.
+ *
+ * Mark all PCI regions associated with PCI device @pdev as
+ * being reserved by owner @res_name. Do not access any
+ * address inside the PCI regions unless this call returns
+ * successfully.
+ *
+ * Returns 0 on success, or %EBUSY on error. A warning
+ * message is also printed on failure.
+ */
+int pci_request_regions(struct pci_dev *pdev, const char *res_name)
+{
+ return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
+}
+
+/**
+ * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
+ * @pdev: PCI device whose resources are to be reserved
+ * @res_name: Name to be associated with resource.
+ *
+ * Mark all PCI regions associated with PCI device @pdev as
+ * being reserved by owner @res_name. Do not access any
+ * address inside the PCI regions unless this call returns
+ * successfully.
+ *
+ * pci_request_regions_exclusive() will mark the region so that
+ * /dev/mem and the sysfs MMIO access will not be allowed.
+ *
+ * Returns 0 on success, or %EBUSY on error. A warning
+ * message is also printed on failure.
+ */
+int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
+{
+ return pci_request_selected_regions_exclusive(pdev,
+ ((1 << 6) - 1), res_name);
+}
+
+static void __pci_set_master(struct pci_dev *dev, bool enable)
+{
+ u16 old_cmd, cmd;
+
+ pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
+ if (enable)
+ cmd = old_cmd | PCI_COMMAND_MASTER;
+ else
+ cmd = old_cmd & ~PCI_COMMAND_MASTER;
+ if (cmd != old_cmd) {
+ dev_dbg(&dev->dev, "%s bus mastering\n",
+ enable ? "enabling" : "disabling");
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ dev->is_busmaster = enable;
+}
+
+/**
+ * pci_set_master - enables bus-mastering for device dev
+ * @dev: the PCI device to enable
+ *
+ * Enables bus-mastering on the device and calls pcibios_set_master()
+ * to do the needed arch specific settings.
+ */
+void pci_set_master(struct pci_dev *dev)
+{
+ __pci_set_master(dev, true);
+ pcibios_set_master(dev);
+}
+
+/**
+ * pci_clear_master - disables bus-mastering for device dev
+ * @dev: the PCI device to disable
+ */
+void pci_clear_master(struct pci_dev *dev)
+{
+ __pci_set_master(dev, false);
+}
+
+#ifdef PCI_DISABLE_MWI
+int pci_set_mwi(struct pci_dev *dev)
+{
+ return 0;
+}
+
+int pci_try_set_mwi(struct pci_dev *dev)
+{
+ return 0;
+}
+
+void pci_clear_mwi(struct pci_dev *dev)
+{
+}
+
+#else
+
+#ifndef PCI_CACHE_LINE_BYTES
+#define PCI_CACHE_LINE_BYTES L1_CACHE_BYTES
+#endif
+
+/* This can be overridden by arch code. */
+/* Don't forget this is measured in 32-bit words, not bytes */
+u8 pci_cache_line_size = PCI_CACHE_LINE_BYTES / 4;
+
+/**
+ * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
+ * @dev: the PCI device for which MWI is to be enabled
+ *
+ * Helper function for pci_set_mwi.
+ * Originally copied from drivers/net/acenic.c.
+ * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+static int
+pci_set_cacheline_size(struct pci_dev *dev)
+{
+ u8 cacheline_size;
+
+ if (!pci_cache_line_size)
+ return -EINVAL; /* The system doesn't support MWI. */
+
+ /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
+ equal to or multiple of the right value. */
+ pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
+ if (cacheline_size >= pci_cache_line_size &&
+ (cacheline_size % pci_cache_line_size) == 0)
+ return 0;
+
+ /* Write the correct value. */
+ pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
+ /* Read it back. */
+ pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
+ if (cacheline_size == pci_cache_line_size)
+ return 0;
+
+ dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
+ "supported\n", pci_cache_line_size << 2);
+
+ return -EINVAL;
+}
+
+/**
+ * pci_set_mwi - enables memory-write-invalidate PCI transaction
+ * @dev: the PCI device for which MWI is enabled
+ *
+ * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int
+pci_set_mwi(struct pci_dev *dev)
+{
+ int rc;
+ u16 cmd;
+
+ rc = pci_set_cacheline_size(dev);
+ if (rc)
+ return rc;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (! (cmd & PCI_COMMAND_INVALIDATE)) {
+ dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
+ cmd |= PCI_COMMAND_INVALIDATE;
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+
+ return 0;
+}
+
+/**
+ * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
+ * @dev: the PCI device for which MWI is enabled
+ *
+ * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
+ * Callers are not required to check the return value.
+ *
+ * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
+ */
+int pci_try_set_mwi(struct pci_dev *dev)
+{
+ int rc = pci_set_mwi(dev);
+ return rc;
+}
+
+/**
+ * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
+ * @dev: the PCI device to disable
+ *
+ * Disables PCI Memory-Write-Invalidate transaction on the device
+ */
+void
+pci_clear_mwi(struct pci_dev *dev)
+{
+ u16 cmd;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (cmd & PCI_COMMAND_INVALIDATE) {
+ cmd &= ~PCI_COMMAND_INVALIDATE;
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+}
+#endif /* ! PCI_DISABLE_MWI */
+
+/**
+ * pci_intx - enables/disables PCI INTx for device dev
+ * @pdev: the PCI device to operate on
+ * @enable: boolean: whether to enable or disable PCI INTx
+ *
+ * Enables/disables PCI INTx for device dev
+ */
+void
+pci_intx(struct pci_dev *pdev, int enable)
+{
+ u16 pci_command, new;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+
+ if (enable) {
+ new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
+ } else {
+ new = pci_command | PCI_COMMAND_INTX_DISABLE;
+ }
+
+ if (new != pci_command) {
+ struct pci_devres *dr;
+
+ pci_write_config_word(pdev, PCI_COMMAND, new);
+
+ dr = find_pci_dr(pdev);
+ if (dr && !dr->restore_intx) {
+ dr->restore_intx = 1;
+ dr->orig_intx = !enable;
+ }
+ }
+}
+
+/**
+ * pci_msi_off - disables any msi or msix capabilities
+ * @dev: the PCI device to operate on
+ *
+ * If you want to use msi see pci_enable_msi and friends.
+ * This is a lower level primitive that allows us to disable
+ * msi operation at the device level.
+ */
+void pci_msi_off(struct pci_dev *dev)
+{
+ int pos;
+ u16 control;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ if (pos) {
+ pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
+ control &= ~PCI_MSI_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
+ }
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ if (pos) {
+ pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
+ control &= ~PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+ }
+}
+
+#ifndef HAVE_ARCH_PCI_SET_DMA_MASK
+/*
+ * These can be overridden by arch-specific implementations
+ */
+int
+pci_set_dma_mask(struct pci_dev *dev, u64 mask)
+{
+ if (!pci_dma_supported(dev, mask))
+ return -EIO;
+
+ dev->dma_mask = mask;
+
+ return 0;
+}
+
+int
+pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
+{
+ if (!pci_dma_supported(dev, mask))
+ return -EIO;
+
+ dev->dev.coherent_dma_mask = mask;
+
+ return 0;
+}
+#endif
+
+#ifndef HAVE_ARCH_PCI_SET_DMA_MAX_SEGMENT_SIZE
+int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
+{
+ return dma_set_max_seg_size(&dev->dev, size);
+}
+EXPORT_SYMBOL(pci_set_dma_max_seg_size);
+#endif
+
+#ifndef HAVE_ARCH_PCI_SET_DMA_SEGMENT_BOUNDARY
+int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
+{
+ return dma_set_seg_boundary(&dev->dev, mask);
+}
+EXPORT_SYMBOL(pci_set_dma_seg_boundary);
+#endif
+
+static int __pcie_flr(struct pci_dev *dev, int probe)
+{
+ u16 status;
+ u32 cap;
+ int exppos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+
+ if (!exppos)
+ return -ENOTTY;
+ pci_read_config_dword(dev, exppos + PCI_EXP_DEVCAP, &cap);
+ if (!(cap & PCI_EXP_DEVCAP_FLR))
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ pci_block_user_cfg_access(dev);
+
+ /* Wait for Transaction Pending bit clean */
+ msleep(100);
+ pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
+ if (status & PCI_EXP_DEVSTA_TRPND) {
+ dev_info(&dev->dev, "Busy after 100ms while trying to reset; "
+ "sleeping for 1 second\n");
+ ssleep(1);
+ pci_read_config_word(dev, exppos + PCI_EXP_DEVSTA, &status);
+ if (status & PCI_EXP_DEVSTA_TRPND)
+ dev_info(&dev->dev, "Still busy after 1s; "
+ "proceeding with reset anyway\n");
+ }
+
+ pci_write_config_word(dev, exppos + PCI_EXP_DEVCTL,
+ PCI_EXP_DEVCTL_BCR_FLR);
+ mdelay(100);
+
+ pci_unblock_user_cfg_access(dev);
+ return 0;
+}
+
+static int __pci_af_flr(struct pci_dev *dev, int probe)
+{
+ int cappos = pci_find_capability(dev, PCI_CAP_ID_AF);
+ u8 status;
+ u8 cap;
+
+ if (!cappos)
+ return -ENOTTY;
+ pci_read_config_byte(dev, cappos + PCI_AF_CAP, &cap);
+ if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
+ return -ENOTTY;
+
+ if (probe)
+ return 0;
+
+ pci_block_user_cfg_access(dev);
+
+ /* Wait for Transaction Pending bit clean */
+ msleep(100);
+ pci_read_config_byte(dev, cappos + PCI_AF_STATUS, &status);
+ if (status & PCI_AF_STATUS_TP) {
+ dev_info(&dev->dev, "Busy after 100ms while trying to"
+ " reset; sleeping for 1 second\n");
+ ssleep(1);
+ pci_read_config_byte(dev,
+ cappos + PCI_AF_STATUS, &status);
+ if (status & PCI_AF_STATUS_TP)
+ dev_info(&dev->dev, "Still busy after 1s; "
+ "proceeding with reset anyway\n");
+ }
+ pci_write_config_byte(dev, cappos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
+ mdelay(100);
+
+ pci_unblock_user_cfg_access(dev);
+ return 0;
+}
+
+static int __pci_reset_function(struct pci_dev *pdev, int probe)
+{
+ int res;
+
+ res = __pcie_flr(pdev, probe);
+ if (res != -ENOTTY)
+ return res;
+
+ res = __pci_af_flr(pdev, probe);
+ if (res != -ENOTTY)
+ return res;
+
+ return res;
+}
+
+/**
+ * pci_execute_reset_function() - Reset a PCI device function
+ * @dev: Device function to reset
+ *
+ * Some devices allow an individual function to be reset without affecting
+ * other functions in the same device. The PCI device must be responsive
+ * to PCI config space in order to use this function.
+ *
+ * The device function is presumed to be unused when this function is called.
+ * Resetting the device will make the contents of PCI configuration space
+ * random, so any caller of this must be prepared to reinitialise the
+ * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
+ * etc.
+ *
+ * Returns 0 if the device function was successfully reset or -ENOTTY if the
+ * device doesn't support resetting a single function.
+ */
+int pci_execute_reset_function(struct pci_dev *dev)
+{
+ return __pci_reset_function(dev, 0);
+}
+EXPORT_SYMBOL_GPL(pci_execute_reset_function);
+
+/**
+ * pci_reset_function() - quiesce and reset a PCI device function
+ * @dev: Device function to reset
+ *
+ * Some devices allow an individual function to be reset without affecting
+ * other functions in the same device. The PCI device must be responsive
+ * to PCI config space in order to use this function.
+ *
+ * This function does not just reset the PCI portion of a device, but
+ * clears all the state associated with the device. This function differs
+ * from pci_execute_reset_function in that it saves and restores device state
+ * over the reset.
+ *
+ * Returns 0 if the device function was successfully reset or -ENOTTY if the
+ * device doesn't support resetting a single function.
+ */
+int pci_reset_function(struct pci_dev *dev)
+{
+ int r = __pci_reset_function(dev, 1);
+
+ if (r < 0)
+ return r;
+
+ if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
+ disable_irq(dev->irq);
+ pci_save_state(dev);
+
+ pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
+
+ r = pci_execute_reset_function(dev);
+
+ pci_restore_state(dev);
+ if (!dev->msi_enabled && !dev->msix_enabled && dev->irq != 0)
+ enable_irq(dev->irq);
+
+ return r;
+}
+EXPORT_SYMBOL_GPL(pci_reset_function);
+
+/**
+ * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
+ * @dev: PCI device to query
+ *
+ * Returns mmrbc: maximum designed memory read count in bytes
+ * or appropriate error value.
+ */
+int pcix_get_max_mmrbc(struct pci_dev *dev)
+{
+ int err, cap;
+ u32 stat;
+
+ cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
+ if (!cap)
+ return -EINVAL;
+
+ err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat);
+ if (err)
+ return -EINVAL;
+
+ return (stat & PCI_X_STATUS_MAX_READ) >> 12;
+}
+EXPORT_SYMBOL(pcix_get_max_mmrbc);
+
+/**
+ * pcix_get_mmrbc - get PCI-X maximum memory read byte count
+ * @dev: PCI device to query
+ *
+ * Returns mmrbc: maximum memory read count in bytes
+ * or appropriate error value.
+ */
+int pcix_get_mmrbc(struct pci_dev *dev)
+{
+ int ret, cap;
+ u32 cmd;
+
+ cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
+ if (!cap)
+ return -EINVAL;
+
+ ret = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd);
+ if (!ret)
+ ret = 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
+
+ return ret;
+}
+EXPORT_SYMBOL(pcix_get_mmrbc);
+
+/**
+ * pcix_set_mmrbc - set PCI-X maximum memory read byte count
+ * @dev: PCI device to query
+ * @mmrbc: maximum memory read count in bytes
+ * valid values are 512, 1024, 2048, 4096
+ *
+ * If possible sets maximum memory read byte count, some bridges have erratas
+ * that prevent this.
+ */
+int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
+{
+ int cap, err = -EINVAL;
+ u32 stat, cmd, v, o;
+
+ if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
+ goto out;
+
+ v = ffs(mmrbc) - 10;
+
+ cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
+ if (!cap)
+ goto out;
+
+ err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat);
+ if (err)
+ goto out;
+
+ if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
+ return -E2BIG;
+
+ err = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd);
+ if (err)
+ goto out;
+
+ o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
+ if (o != v) {
+ if (v > o && dev->bus &&
+ (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
+ return -EIO;
+
+ cmd &= ~PCI_X_CMD_MAX_READ;
+ cmd |= v << 2;
+ err = pci_write_config_dword(dev, cap + PCI_X_CMD, cmd);
+ }
+out:
+ return err;
+}
+EXPORT_SYMBOL(pcix_set_mmrbc);
+
+/**
+ * pcie_get_readrq - get PCI Express read request size
+ * @dev: PCI device to query
+ *
+ * Returns maximum memory read request in bytes
+ * or appropriate error value.
+ */
+int pcie_get_readrq(struct pci_dev *dev)
+{
+ int ret, cap;
+ u16 ctl;
+
+ cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!cap)
+ return -EINVAL;
+
+ ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
+ if (!ret)
+ ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
+
+ return ret;
+}
+EXPORT_SYMBOL(pcie_get_readrq);
+
+/**
+ * pcie_set_readrq - set PCI Express maximum memory read request
+ * @dev: PCI device to query
+ * @rq: maximum memory read count in bytes
+ * valid values are 128, 256, 512, 1024, 2048, 4096
+ *
+ * If possible sets maximum read byte count
+ */
+int pcie_set_readrq(struct pci_dev *dev, int rq)
+{
+ int cap, err = -EINVAL;
+ u16 ctl, v;
+
+ if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
+ goto out;
+
+ v = (ffs(rq) - 8) << 12;
+
+ cap = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!cap)
+ goto out;
+
+ err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
+ if (err)
+ goto out;
+
+ if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
+ ctl &= ~PCI_EXP_DEVCTL_READRQ;
+ ctl |= v;
+ err = pci_write_config_dword(dev, cap + PCI_EXP_DEVCTL, ctl);
+ }
+
+out:
+ return err;
+}
+EXPORT_SYMBOL(pcie_set_readrq);
+
+/**
+ * pci_select_bars - Make BAR mask from the type of resource
+ * @dev: the PCI device for which BAR mask is made
+ * @flags: resource type mask to be selected
+ *
+ * This helper routine makes bar mask from the type of resource.
+ */
+int pci_select_bars(struct pci_dev *dev, unsigned long flags)
+{
+ int i, bars = 0;
+ for (i = 0; i < PCI_NUM_RESOURCES; i++)
+ if (pci_resource_flags(dev, i) & flags)
+ bars |= (1 << i);
+ return bars;
+}
+
+/**
+ * pci_resource_bar - get position of the BAR associated with a resource
+ * @dev: the PCI device
+ * @resno: the resource number
+ * @type: the BAR type to be filled in
+ *
+ * Returns BAR position in config space, or 0 if the BAR is invalid.
+ */
+int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
+{
+ if (resno < PCI_ROM_RESOURCE) {
+ *type = pci_bar_unknown;
+ return PCI_BASE_ADDRESS_0 + 4 * resno;
+ } else if (resno == PCI_ROM_RESOURCE) {
+ *type = pci_bar_mem32;
+ return dev->rom_base_reg;
+ }
+
+ dev_err(&dev->dev, "BAR: invalid resource #%d\n", resno);
+ return 0;
+}
+
+static void __devinit pci_no_domains(void)
+{
+#ifdef CONFIG_PCI_DOMAINS
+ pci_domains_supported = 0;
+#endif
+}
+
+/**
+ * pci_ext_cfg_enabled - can we access extended PCI config space?
+ * @dev: The PCI device of the root bridge.
+ *
+ * Returns 1 if we can access PCI extended config space (offsets
+ * greater than 0xff). This is the default implementation. Architecture
+ * implementations can override this.
+ */
+int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
+{
+ return 1;
+}
+
+#ifndef DDE_LINUX
+static
+#endif
+int __devinit pci_init(void)
+{
+ struct pci_dev *dev = NULL;
+
+ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
+ pci_fixup_device(pci_fixup_final, dev);
+ }
+
+ return 0;
+}
+
+static int __init pci_setup(char *str)
+{
+#ifndef DDE_LINUX
+ while (str) {
+ char *k = strchr(str, ',');
+ if (k)
+ *k++ = 0;
+ if (*str && (str = pcibios_setup(str)) && *str) {
+ if (!strcmp(str, "nomsi")) {
+ pci_no_msi();
+ } else if (!strcmp(str, "noaer")) {
+ pci_no_aer();
+ } else if (!strcmp(str, "nodomains")) {
+ pci_no_domains();
+ } else if (!strncmp(str, "cbiosize=", 9)) {
+ pci_cardbus_io_size = memparse(str + 9, &str);
+ } else if (!strncmp(str, "cbmemsize=", 10)) {
+ pci_cardbus_mem_size = memparse(str + 10, &str);
+ } else {
+ printk(KERN_ERR "PCI: Unknown option `%s'\n",
+ str);
+ }
+ }
+ str = k;
+ }
+#endif
+ return 0;
+}
+early_param("pci", pci_setup);
+
+device_initcall(pci_init);
+
+EXPORT_SYMBOL(pci_reenable_device);
+EXPORT_SYMBOL(pci_enable_device_io);
+EXPORT_SYMBOL(pci_enable_device_mem);
+EXPORT_SYMBOL(pci_enable_device);
+EXPORT_SYMBOL(pcim_enable_device);
+EXPORT_SYMBOL(pcim_pin_device);
+EXPORT_SYMBOL(pci_disable_device);
+EXPORT_SYMBOL(pci_find_capability);
+EXPORT_SYMBOL(pci_bus_find_capability);
+EXPORT_SYMBOL(pci_release_regions);
+EXPORT_SYMBOL(pci_request_regions);
+EXPORT_SYMBOL(pci_request_regions_exclusive);
+EXPORT_SYMBOL(pci_release_region);
+EXPORT_SYMBOL(pci_request_region);
+EXPORT_SYMBOL(pci_request_region_exclusive);
+EXPORT_SYMBOL(pci_release_selected_regions);
+EXPORT_SYMBOL(pci_request_selected_regions);
+EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
+EXPORT_SYMBOL(pci_set_master);
+EXPORT_SYMBOL(pci_clear_master);
+EXPORT_SYMBOL(pci_set_mwi);
+EXPORT_SYMBOL(pci_try_set_mwi);
+EXPORT_SYMBOL(pci_clear_mwi);
+EXPORT_SYMBOL_GPL(pci_intx);
+EXPORT_SYMBOL(pci_set_dma_mask);
+EXPORT_SYMBOL(pci_set_consistent_dma_mask);
+EXPORT_SYMBOL(pci_assign_resource);
+EXPORT_SYMBOL(pci_find_parent_resource);
+EXPORT_SYMBOL(pci_select_bars);
+
+EXPORT_SYMBOL(pci_set_power_state);
+EXPORT_SYMBOL(pci_save_state);
+EXPORT_SYMBOL(pci_restore_state);
+EXPORT_SYMBOL(pci_pme_capable);
+EXPORT_SYMBOL(pci_pme_active);
+EXPORT_SYMBOL(pci_enable_wake);
+EXPORT_SYMBOL(pci_wake_from_d3);
+EXPORT_SYMBOL(pci_target_state);
+EXPORT_SYMBOL(pci_prepare_to_sleep);
+EXPORT_SYMBOL(pci_back_from_sleep);
+EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
+
diff --git a/libdde-linux26/lib/src/drivers/pci/probe.c b/libdde-linux26/lib/src/drivers/pci/probe.c
new file mode 100644
index 00000000..32da5108
--- /dev/null
+++ b/libdde-linux26/lib/src/drivers/pci/probe.c
@@ -0,0 +1,1232 @@
+/*
+ * probe.c - PCI detection and setup code
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/cpumask.h>
+#include <linux/pci-aspm.h>
+#include "pci.h"
+
+#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
+#define CARDBUS_RESERVE_BUSNR 3
+
+/* Ugh. Need to stop exporting this to modules. */
+LIST_HEAD(pci_root_buses);
+EXPORT_SYMBOL(pci_root_buses);
+
+#ifdef DDE_LINUX
+#include "local.h"
+#endif
+
+static int find_anything(struct device *dev, void *data)
+{
+ return 1;
+}
+
+/*
+ * Some device drivers need know if pci is initiated.
+ * Basically, we think pci is not initiated when there
+ * is no device to be found on the pci_bus_type.
+ */
+int no_pci_devices(void)
+{
+ struct device *dev;
+ int no_devices;
+
+ dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
+ no_devices = (dev == NULL);
+ put_device(dev);
+ return no_devices;
+}
+EXPORT_SYMBOL(no_pci_devices);
+
+/*
+ * PCI Bus Class Devices
+ */
+static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
+ int type,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ const struct cpumask *cpumask;
+
+ cpumask = cpumask_of_pcibus(to_pci_bus(dev));
+ ret = type?
+ cpulist_scnprintf(buf, PAGE_SIZE-2, cpumask) :
+ cpumask_scnprintf(buf, PAGE_SIZE-2, cpumask);
+ buf[ret++] = '\n';
+ buf[ret] = '\0';
+ return ret;
+}
+
+static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
+}
+
+static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return pci_bus_show_cpuaffinity(dev, 1, attr, buf);
+}
+
+DEVICE_ATTR(cpuaffinity, S_IRUGO, pci_bus_show_cpumaskaffinity, NULL);
+DEVICE_ATTR(cpulistaffinity, S_IRUGO, pci_bus_show_cpulistaffinity, NULL);
+
+/*
+ * PCI Bus Class
+ */
+static void release_pcibus_dev(struct device *dev)
+{
+ struct pci_bus *pci_bus = to_pci_bus(dev);
+
+ if (pci_bus->bridge)
+ put_device(pci_bus->bridge);
+ kfree(pci_bus);
+}
+
+static struct class pcibus_class = {
+ .name = "pci_bus",
+ .dev_release = &release_pcibus_dev,
+};
+
+static int __init pcibus_class_init(void)
+{
+ return class_register(&pcibus_class);
+}
+postcore_initcall(pcibus_class_init);
+
+/*
+ * Translate the low bits of the PCI base
+ * to the resource type
+ */
+static inline unsigned int pci_calc_resource_flags(unsigned int flags)
+{
+ if (flags & PCI_BASE_ADDRESS_SPACE_IO)
+ return IORESOURCE_IO;
+
+ if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
+ return IORESOURCE_MEM | IORESOURCE_PREFETCH;
+
+ return IORESOURCE_MEM;
+}
+
+static u64 pci_size(u64 base, u64 maxbase, u64 mask)
+{
+ u64 size = mask & maxbase; /* Find the significant bits */
+ if (!size)
+ return 0;
+
+ /* Get the lowest of them to find the decode size, and
+ from that the extent. */
+ size = (size & ~(size-1)) - 1;
+
+ /* base == maxbase can be valid only if the BAR has
+ already been programmed with all 1s. */
+ if (base == maxbase && ((base | size) & mask) != mask)
+ return 0;
+
+ return size;
+}
+
+static inline enum pci_bar_type decode_bar(struct resource *res, u32 bar)
+{
+ if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
+ res->flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
+ return pci_bar_io;
+ }
+
+ res->flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
+
+ if (res->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
+ return pci_bar_mem64;
+ return pci_bar_mem32;
+}
+
+/**
+ * pci_read_base - read a PCI BAR
+ * @dev: the PCI device
+ * @type: type of the BAR
+ * @res: resource buffer to be filled in
+ * @pos: BAR position in the config space
+ *
+ * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
+ */
+int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
+ struct resource *res, unsigned int pos)
+{
+ u32 l, sz, mask;
+
+ mask = type ? ~PCI_ROM_ADDRESS_ENABLE : ~0;
+
+ res->name = pci_name(dev);
+
+ pci_read_config_dword(dev, pos, &l);
+ pci_write_config_dword(dev, pos, mask);
+ pci_read_config_dword(dev, pos, &sz);
+ pci_write_config_dword(dev, pos, l);
+
+ /*
+ * All bits set in sz means the device isn't working properly.
+ * If the BAR isn't implemented, all bits must be 0. If it's a
+ * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
+ * 1 must be clear.
+ */
+ if (!sz || sz == 0xffffffff)
+ goto fail;
+
+ /*
+ * I don't know how l can have all bits set. Copied from old code.
+ * Maybe it fixes a bug on some ancient platform.
+ */
+ if (l == 0xffffffff)
+ l = 0;
+
+ if (type == pci_bar_unknown) {
+ type = decode_bar(res, l);
+ res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN;
+ if (type == pci_bar_io) {
+ l &= PCI_BASE_ADDRESS_IO_MASK;
+ mask = PCI_BASE_ADDRESS_IO_MASK & 0xffff;
+ } else {
+ l &= PCI_BASE_ADDRESS_MEM_MASK;
+ mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
+ }
+ } else {
+ res->flags |= (l & IORESOURCE_ROM_ENABLE);
+ l &= PCI_ROM_ADDRESS_MASK;
+ mask = (u32)PCI_ROM_ADDRESS_MASK;
+ }
+
+ if (type == pci_bar_mem64) {
+ u64 l64 = l;
+ u64 sz64 = sz;
+ u64 mask64 = mask | (u64)~0 << 32;
+
+ pci_read_config_dword(dev, pos + 4, &l);
+ pci_write_config_dword(dev, pos + 4, ~0);
+ pci_read_config_dword(dev, pos + 4, &sz);
+ pci_write_config_dword(dev, pos + 4, l);
+
+ l64 |= ((u64)l << 32);
+ sz64 |= ((u64)sz << 32);
+
+ sz64 = pci_size(l64, sz64, mask64);
+
+ if (!sz64)
+ goto fail;
+
+ if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
+ dev_err(&dev->dev, "can't handle 64-bit BAR\n");
+ goto fail;
+ } else if ((sizeof(resource_size_t) < 8) && l) {
+ /* Address above 32-bit boundary; disable the BAR */
+ pci_write_config_dword(dev, pos, 0);
+ pci_write_config_dword(dev, pos + 4, 0);
+ res->start = 0;
+ res->end = sz64;
+ } else {
+ res->start = l64;
+ res->end = l64 + sz64;
+ dev_printk(KERN_DEBUG, &dev->dev,
+ "reg %x 64bit mmio: %pR\n", pos, res);
+ }
+ } else {
+ sz = pci_size(l, sz, mask);
+
+ if (!sz)
+ goto fail;
+
+ res->start = l;
+ res->end = l + sz;
+
+ dev_printk(KERN_DEBUG, &dev->dev, "reg %x %s: %pR\n", pos,
+ (res->flags & IORESOURCE_IO) ? "io port" : "32bit mmio",
+ res);
+ }
+
+ out:
+ return (type == pci_bar_mem64) ? 1 : 0;
+ fail:
+ res->flags = 0;
+ goto out;
+}
+
+static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
+{
+ unsigned int pos, reg;
+
+ for (pos = 0; pos < howmany; pos++) {
+ struct resource *res = &dev->resource[pos];
+ reg = PCI_BASE_ADDRESS_0 + (pos << 2);
+ pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
+ }
+
+ if (rom) {
+ struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
+ dev->rom_base_reg = rom;
+ res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
+ IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
+ IORESOURCE_SIZEALIGN;
+ __pci_read_base(dev, pci_bar_mem32, res, rom);
+ }
+}
+
+void __devinit pci_read_bridge_bases(struct pci_bus *child)
+{
+ struct pci_dev *dev = child->self;
+ u8 io_base_lo, io_limit_lo;
+ u16 mem_base_lo, mem_limit_lo;
+ unsigned long base, limit;
+ struct resource *res;
+ int i;
+
+ if (!dev) /* It's a host bus, nothing to read */
+ return;
+
+ if (dev->transparent) {
+ dev_info(&dev->dev, "transparent bridge\n");
+ for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++)
+ child->resource[i] = child->parent->resource[i - 3];
+ }
+
+ res = child->resource[0];
+ pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
+ pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
+ base = (io_base_lo & PCI_IO_RANGE_MASK) << 8;
+ limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8;
+
+ if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
+ u16 io_base_hi, io_limit_hi;
+ pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
+ pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
+ base |= (io_base_hi << 16);
+ limit |= (io_limit_hi << 16);
+ }
+
+ if (base <= limit) {
+ res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
+ if (!res->start)
+ res->start = base;
+ if (!res->end)
+ res->end = limit + 0xfff;
+ dev_printk(KERN_DEBUG, &dev->dev, "bridge io port: %pR\n", res);
+ }
+
+ res = child->resource[1];
+ pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
+ pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
+ base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
+ limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
+ if (base <= limit) {
+ res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
+ res->start = base;
+ res->end = limit + 0xfffff;
+ dev_printk(KERN_DEBUG, &dev->dev, "bridge 32bit mmio: %pR\n",
+ res);
+ }
+
+ res = child->resource[2];
+ pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
+ pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
+ base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
+ limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
+
+ if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
+ u32 mem_base_hi, mem_limit_hi;
+ pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
+ pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
+
+ /*
+ * Some bridges set the base > limit by default, and some
+ * (broken) BIOSes do not initialize them. If we find
+ * this, just assume they are not being used.
+ */
+ if (mem_base_hi <= mem_limit_hi) {
+#if BITS_PER_LONG == 64
+ base |= ((long) mem_base_hi) << 32;
+ limit |= ((long) mem_limit_hi) << 32;
+#else
+ if (mem_base_hi || mem_limit_hi) {
+ dev_err(&dev->dev, "can't handle 64-bit "
+ "address space for bridge\n");
+ return;
+ }
+#endif
+ }
+ }
+ if (base <= limit) {
+ res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM | IORESOURCE_PREFETCH;
+ res->start = base;
+ res->end = limit + 0xfffff;
+ dev_printk(KERN_DEBUG, &dev->dev, "bridge %sbit mmio pref: %pR\n",
+ (res->flags & PCI_PREF_RANGE_TYPE_64) ? "64" : "32",
+ res);
+ }
+}
+
+static struct pci_bus * pci_alloc_bus(void)
+{
+ struct pci_bus *b;
+
+ b = kzalloc(sizeof(*b), GFP_KERNEL);
+ if (b) {
+ INIT_LIST_HEAD(&b->node);
+ INIT_LIST_HEAD(&b->children);
+ INIT_LIST_HEAD(&b->devices);
+ INIT_LIST_HEAD(&b->slots);
+ }
+ return b;
+}
+
+static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
+ struct pci_dev *bridge, int busnr)
+{
+ struct pci_bus *child;
+ int i;
+
+ /*
+ * Allocate a new bus, and inherit stuff from the parent..
+ */
+ child = pci_alloc_bus();
+ if (!child)
+ return NULL;
+
+ child->parent = parent;
+ child->ops = parent->ops;
+ child->sysdata = parent->sysdata;
+ child->bus_flags = parent->bus_flags;
+
+ /* initialize some portions of the bus device, but don't register it
+ * now as the parent is not properly set up yet. This device will get
+ * registered later in pci_bus_add_devices()
+ */
+ child->dev.class = &pcibus_class;
+ dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
+
+ /*
+ * Set up the primary, secondary and subordinate
+ * bus numbers.
+ */
+ child->number = child->secondary = busnr;
+ child->primary = parent->secondary;
+ child->subordinate = 0xff;
+
+ if (!bridge)
+ return child;
+
+ child->self = bridge;
+ child->bridge = get_device(&bridge->dev);
+
+ /* Set up default resource pointers and names.. */
+ for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
+ child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
+ child->resource[i]->name = child->name;
+ }
+ bridge->subordinate = child;
+
+ return child;
+}
+
+struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
+{
+ struct pci_bus *child;
+
+ child = pci_alloc_child_bus(parent, dev, busnr);
+ if (child) {
+ down_write(&pci_bus_sem);
+ list_add_tail(&child->node, &parent->children);
+ up_write(&pci_bus_sem);
+ }
+ return child;
+}
+
+static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
+{
+ struct pci_bus *parent = child->parent;
+
+#ifndef DDE_LINUX
+ /* Attempts to fix that up are really dangerous unless
+ we're going to re-assign all bus numbers. */
+ if (!pcibios_assign_all_busses())
+ return;
+#endif
+
+ while (parent->parent && parent->subordinate < max) {
+ parent->subordinate = max;
+ pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
+ parent = parent->parent;
+ }
+}
+
+/*
+ * If it's a bridge, configure it and scan the bus behind it.
+ * For CardBus bridges, we don't scan behind as the devices will
+ * be handled by the bridge driver itself.
+ *
+ * We need to process bridges in two passes -- first we scan those
+ * already configured by the BIOS and after we are done with all of
+ * them, we proceed to assigning numbers to the remaining buses in
+ * order to avoid overlaps between old and new bus numbers.
+ */
+int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
+{
+ struct pci_bus *child;
+ int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
+ u32 buses, i, j = 0;
+ u16 bctl;
+ int broken = 0;
+
+ pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
+
+ dev_dbg(&dev->dev, "scanning behind bridge, config %06x, pass %d\n",
+ buses & 0xffffff, pass);
+
+ /* Check if setup is sensible at all */
+ if (!pass &&
+ ((buses & 0xff) != bus->number || ((buses >> 8) & 0xff) <= bus->number)) {
+ dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n");
+ broken = 1;
+ }
+
+ /* Disable MasterAbortMode during probing to avoid reporting
+ of bus errors (in some architectures) */
+ pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
+ bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
+
+ if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus && !broken) {
+ unsigned int cmax, busnr;
+ /*
+ * Bus already configured by firmware, process it in the first
+ * pass and just note the configuration.
+ */
+ if (pass)
+ goto out;
+ busnr = (buses >> 8) & 0xFF;
+
+ /*
+ * If we already got to this bus through a different bridge,
+ * ignore it. This can happen with the i450NX chipset.
+ */
+ if (pci_find_bus(pci_domain_nr(bus), busnr)) {
+ dev_info(&dev->dev, "bus %04x:%02x already known\n",
+ pci_domain_nr(bus), busnr);
+ goto out;
+ }
+
+ child = pci_add_new_bus(bus, dev, busnr);
+ if (!child)
+ goto out;
+ child->primary = buses & 0xFF;
+ child->subordinate = (buses >> 16) & 0xFF;
+ child->bridge_ctl = bctl;
+
+ cmax = pci_scan_child_bus(child);
+ if (cmax > max)
+ max = cmax;
+ if (child->subordinate > max)
+ max = child->subordinate;
+ } else {
+#ifndef DDE_LINUX
+ /*
+ * We need to assign a number to this bus which we always
+ * do in the second pass.
+ */
+ if (!pass) {
+ if (pcibios_assign_all_busses() || broken)
+ /* Temporarily disable forwarding of the
+ configuration cycles on all bridges in
+ this bus segment to avoid possible
+ conflicts in the second pass between two
+ bridges programmed with overlapping
+ bus ranges. */
+ pci_write_config_dword(dev, PCI_PRIMARY_BUS,
+ buses & ~0xffffff);
+ goto out;
+ }
+#endif /* DDE_LINUX */
+
+ /* Clear errors */
+ pci_write_config_word(dev, PCI_STATUS, 0xffff);
+
+ /* Prevent assigning a bus number that already exists.
+ * This can happen when a bridge is hot-plugged */
+ if (pci_find_bus(pci_domain_nr(bus), max+1))
+ goto out;
+ child = pci_add_new_bus(bus, dev, ++max);
+ buses = (buses & 0xff000000)
+ | ((unsigned int)(child->primary) << 0)
+ | ((unsigned int)(child->secondary) << 8)
+ | ((unsigned int)(child->subordinate) << 16);
+
+ /*
+ * yenta.c forces a secondary latency timer of 176.
+ * Copy that behaviour here.
+ */
+ if (is_cardbus) {
+ buses &= ~0xff000000;
+ buses |= CARDBUS_LATENCY_TIMER << 24;
+ }
+
+ /*
+ * We need to blast all three values with a single write.
+ */
+ pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
+
+ if (!is_cardbus) {
+ child->bridge_ctl = bctl;
+ /*
+ * Adjust subordinate busnr in parent buses.
+ * We do this before scanning for children because
+ * some devices may not be detected if the bios
+ * was lazy.
+ */
+ pci_fixup_parent_subordinate_busnr(child, max);
+ /* Now we can scan all subordinate buses... */
+ max = pci_scan_child_bus(child);
+ /*
+ * now fix it up again since we have found
+ * the real value of max.
+ */
+ pci_fixup_parent_subordinate_busnr(child, max);
+ } else {
+ /*
+ * For CardBus bridges, we leave 4 bus numbers
+ * as cards with a PCI-to-PCI bridge can be
+ * inserted later.
+ */
+ for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) {
+ struct pci_bus *parent = bus;
+ if (pci_find_bus(pci_domain_nr(bus),
+ max+i+1))
+ break;
+ while (parent->parent) {
+ if ((!pcibios_assign_all_busses()) &&
+ (parent->subordinate > max) &&
+ (parent->subordinate <= max+i)) {
+ j = 1;
+ }
+ parent = parent->parent;
+ }
+ if (j) {
+ /*
+ * Often, there are two cardbus bridges
+ * -- try to leave one valid bus number
+ * for each one.
+ */
+ i /= 2;
+ break;
+ }
+ }
+ max += i;
+ pci_fixup_parent_subordinate_busnr(child, max);
+ }
+ /*
+ * Set the subordinate bus number to its real value.
+ */
+ child->subordinate = max;
+ pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
+ }
+
+ sprintf(child->name,
+ (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
+ pci_domain_nr(bus), child->number);
+
+ /* Has only triggered on CardBus, fixup is in yenta_socket */
+ while (bus->parent) {
+ if ((child->subordinate > bus->subordinate) ||
+ (child->number > bus->subordinate) ||
+ (child->number < bus->number) ||
+ (child->subordinate < bus->number)) {
+ pr_debug("PCI: Bus #%02x (-#%02x) is %s "
+ "hidden behind%s bridge #%02x (-#%02x)\n",
+ child->number, child->subordinate,
+ (bus->number > child->subordinate &&
+ bus->subordinate < child->number) ?
+ "wholly" : "partially",
+ bus->self->transparent ? " transparent" : "",
+ bus->number, bus->subordinate);
+ }
+ bus = bus->parent;
+ }
+
+out:
+ pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
+
+ return max;
+}
+
+/*
+ * Read interrupt line and base address registers.
+ * The architecture-dependent code can tweak these, of course.
+ */
+static void pci_read_irq(struct pci_dev *dev)
+{
+ unsigned char irq;
+
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
+ dev->pin = irq;
+ if (irq)
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
+ dev->irq = irq;
+}
+
+#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
+
+/**
+ * pci_setup_device - fill in class and map information of a device
+ * @dev: the device structure to fill
+ *
+ * Initialize the device structure with information about the device's
+ * vendor,class,memory and IO-space addresses,IRQ lines etc.
+ * Called at initialisation of the PCI subsystem and by CardBus services.
+ * Returns 0 on success and -1 if unknown type of device (not normal, bridge
+ * or CardBus).
+ */
+static int pci_setup_device(struct pci_dev * dev)
+{
+ u32 class;
+
+ dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
+ dev->bus->number, PCI_SLOT(dev->devfn),
+ PCI_FUNC(dev->devfn));
+
+ pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
+ dev->revision = class & 0xff;
+ class >>= 8; /* upper 3 bytes */
+ dev->class = class;
+ class >>= 8;
+
+ dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n",
+ dev->vendor, dev->device, class, dev->hdr_type);
+
+ /* "Unknown power state" */
+ dev->current_state = PCI_UNKNOWN;
+
+ /* Early fixups, before probing the BARs */
+ pci_fixup_device(pci_fixup_early, dev);
+ class = dev->class >> 8;
+
+ switch (dev->hdr_type) { /* header type */
+ case PCI_HEADER_TYPE_NORMAL: /* standard header */
+ if (class == PCI_CLASS_BRIDGE_PCI)
+ goto bad;
+ pci_read_irq(dev);
+ pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
+ pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
+ pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
+
+ /*
+ * Do the ugly legacy mode stuff here rather than broken chip
+ * quirk code. Legacy mode ATA controllers have fixed
+ * addresses. These are not always echoed in BAR0-3, and
+ * BAR0-3 in a few cases contain junk!
+ */
+ if (class == PCI_CLASS_STORAGE_IDE) {
+ u8 progif;
+ pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
+ if ((progif & 1) == 0) {
+ dev->resource[0].start = 0x1F0;
+ dev->resource[0].end = 0x1F7;
+ dev->resource[0].flags = LEGACY_IO_RESOURCE;
+ dev->resource[1].start = 0x3F6;
+ dev->resource[1].end = 0x3F6;
+ dev->resource[1].flags = LEGACY_IO_RESOURCE;
+ }
+ if ((progif & 4) == 0) {
+ dev->resource[2].start = 0x170;
+ dev->resource[2].end = 0x177;
+ dev->resource[2].flags = LEGACY_IO_RESOURCE;
+ dev->resource[3].start = 0x376;
+ dev->resource[3].end = 0x376;
+ dev->resource[3].flags = LEGACY_IO_RESOURCE;
+ }
+ }
+ break;
+
+ case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
+ if (class != PCI_CLASS_BRIDGE_PCI)
+ goto bad;
+ /* The PCI-to-PCI bridge spec requires that subtractive
+ decoding (i.e. transparent) bridge must have programming
+ interface code of 0x01. */
+ pci_read_irq(dev);
+ dev->transparent = ((dev->class & 0xff) == 1);
+ pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
+ break;
+
+ case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
+ if (class != PCI_CLASS_BRIDGE_CARDBUS)
+ goto bad;
+ pci_read_irq(dev);
+ pci_read_bases(dev, 1, 0);
+ pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
+ pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
+ break;
+
+ default: /* unknown header */
+ dev_err(&dev->dev, "unknown header type %02x, "
+ "ignoring device\n", dev->hdr_type);
+ return -1;
+
+ bad:
+ dev_err(&dev->dev, "ignoring class %02x (doesn't match header "
+ "type %02x)\n", class, dev->hdr_type);
+ dev->class = PCI_CLASS_NOT_DEFINED;
+ }
+
+ /* We found a fine healthy device, go go go... */
+ return 0;
+}
+
+static void pci_release_capabilities(struct pci_dev *dev)
+{
+ pci_vpd_release(dev);
+}
+
+/**
+ * pci_release_dev - free a pci device structure when all users of it are finished.
+ * @dev: device that's been disconnected
+ *
+ * Will be called only by the device core when all users of this pci device are
+ * done.
+ */
+static void pci_release_dev(struct device *dev)
+{
+ struct pci_dev *pci_dev;
+
+ pci_dev = to_pci_dev(dev);
+ pci_release_capabilities(pci_dev);
+ kfree(pci_dev);
+}
+
+static void set_pcie_port_type(struct pci_dev *pdev)
+{
+ int pos;
+ u16 reg16;
+
+ pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (!pos)
+ return;
+ pdev->is_pcie = 1;
+ pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
+ pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
+}
+
+/**
+ * pci_cfg_space_size - get the configuration space size of the PCI device.
+ * @dev: PCI device
+ *
+ * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
+ * have 4096 bytes. Even if the device is capable, that doesn't mean we can
+ * access it. Maybe we don't have a way to generate extended config space
+ * accesses, or the device is behind a reverse Express bridge. So we try
+ * reading the dword at 0x100 which must either be 0 or a valid extended
+ * capability header.
+ */
+int pci_cfg_space_size_ext(struct pci_dev *dev)
+{
+ u32 status;
+ int pos = PCI_CFG_SPACE_SIZE;
+
+ if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
+ goto fail;
+ if (status == 0xffffffff)
+ goto fail;
+
+ return PCI_CFG_SPACE_EXP_SIZE;
+
+ fail:
+ return PCI_CFG_SPACE_SIZE;
+}
+
+int pci_cfg_space_size(struct pci_dev *dev)
+{
+ int pos;
+ u32 status;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
+ if (!pos) {
+ pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
+ if (!pos)
+ goto fail;
+
+ pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
+ if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
+ goto fail;
+ }
+
+ return pci_cfg_space_size_ext(dev);
+
+ fail:
+ return PCI_CFG_SPACE_SIZE;
+}
+
+static void pci_release_bus_bridge_dev(struct device *dev)
+{
+ kfree(dev);
+}
+
+struct pci_dev *alloc_pci_dev(void)
+{
+ struct pci_dev *dev;
+
+ dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ INIT_LIST_HEAD(&dev->bus_list);
+
+ return dev;
+}
+EXPORT_SYMBOL(alloc_pci_dev);
+
+/*
+ * Read the config data for a PCI device, sanity-check it
+ * and fill in the dev structure...
+ */
+static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
+{
+ struct pci_dev *dev;
+ struct pci_slot *slot;
+ u32 l;
+ u8 hdr_type;
+ int delay = 1;
+
+ if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l))
+ return NULL;
+
+ /* some broken boards return 0 or ~0 if a slot is empty: */
+ if (l == 0xffffffff || l == 0x00000000 ||
+ l == 0x0000ffff || l == 0xffff0000)
+ return NULL;
+
+ /* Configuration request Retry Status */
+ while (l == 0xffff0001) {
+ msleep(delay);
+ delay *= 2;
+ if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l))
+ return NULL;
+ /* Card hasn't responded in 60 seconds? Must be stuck. */
+ if (delay > 60 * 1000) {
+ printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not "
+ "responding\n", pci_domain_nr(bus),
+ bus->number, PCI_SLOT(devfn),
+ PCI_FUNC(devfn));
+ return NULL;
+ }
+ }
+
+ if (pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type))
+ return NULL;
+
+ dev = alloc_pci_dev();
+ if (!dev)
+ return NULL;
+
+ dev->bus = bus;
+ dev->sysdata = bus->sysdata;
+ dev->dev.parent = bus->bridge;
+ dev->dev.bus = &pci_bus_type;
+ dev->devfn = devfn;
+ dev->hdr_type = hdr_type & 0x7f;
+ dev->multifunction = !!(hdr_type & 0x80);
+ dev->vendor = l & 0xffff;
+ dev->device = (l >> 16) & 0xffff;
+ dev->cfg_size = pci_cfg_space_size(dev);
+ dev->error_state = pci_channel_io_normal;
+ set_pcie_port_type(dev);
+
+ list_for_each_entry(slot, &bus->slots, list)
+ if (PCI_SLOT(devfn) == slot->number)
+ dev->slot = slot;
+
+ /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
+ set this higher, assuming the system even supports it. */
+ dev->dma_mask = 0xffffffff;
+ if (pci_setup_device(dev) < 0) {
+ kfree(dev);
+ return NULL;
+ }
+
+ return dev;
+}
+
+static void pci_init_capabilities(struct pci_dev *dev)
+{
+ /* MSI/MSI-X list */
+ pci_msi_init_pci_dev(dev);
+
+ /* Buffers for saving PCIe and PCI-X capabilities */
+ pci_allocate_cap_save_buffers(dev);
+
+ /* Power Management */
+ pci_pm_init(dev);
+ platform_pci_wakeup_init(dev);
+
+ /* Vital Product Data */
+ pci_vpd_pci22_init(dev);
+
+ /* Alternative Routing-ID Forwarding */
+ pci_enable_ari(dev);
+}
+
+void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
+{
+ device_initialize(&dev->dev);
+ dev->dev.release = pci_release_dev;
+ pci_dev_get(dev);
+
+ dev->dev.dma_mask = &dev->dma_mask;
+ dev->dev.dma_parms = &dev->dma_parms;
+ dev->dev.coherent_dma_mask = 0xffffffffull;
+
+ pci_set_dma_max_seg_size(dev, 65536);
+ pci_set_dma_seg_boundary(dev, 0xffffffff);
+
+ /* Fix up broken headers */
+ pci_fixup_device(pci_fixup_header, dev);
+
+ /* Initialize various capabilities */
+ pci_init_capabilities(dev);
+
+ /*
+ * Add the device to our list of discovered devices
+ * and the bus list for fixup functions, etc.
+ */
+ down_write(&pci_bus_sem);
+ list_add_tail(&dev->bus_list, &bus->devices);
+ up_write(&pci_bus_sem);
+}
+
+struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
+{
+ struct pci_dev *dev;
+
+ dev = pci_scan_device(bus, devfn);
+ if (!dev)
+ return NULL;
+
+ pci_device_add(dev, bus);
+
+ return dev;
+}
+EXPORT_SYMBOL(pci_scan_single_device);
+
+/**
+ * pci_scan_slot - scan a PCI slot on a bus for devices.
+ * @bus: PCI bus to scan
+ * @devfn: slot number to scan (must have zero function.)
+ *
+ * Scan a PCI slot on the specified PCI bus for devices, adding
+ * discovered devices to the @bus->devices list. New devices
+ * will not have is_added set.
+ */
+int pci_scan_slot(struct pci_bus *bus, int devfn)
+{
+ int func, nr = 0;
+ int scan_all_fns;
+
+ scan_all_fns = pcibios_scan_all_fns(bus, devfn);
+
+ for (func = 0; func < 8; func++, devfn++) {
+ struct pci_dev *dev;
+
+ dev = pci_scan_single_device(bus, devfn);
+ if (dev) {
+ nr++;
+
+ /*
+ * If this is a single function device,
+ * don't scan past the first function.
+ */
+ if (!dev->multifunction) {
+ if (func > 0) {
+ dev->multifunction = 1;
+ } else {
+ break;
+ }
+ }
+ } else {
+ if (func == 0 && !scan_all_fns)
+ break;
+ }
+ }
+
+ /* only one slot has pcie device */
+ if (bus->self && nr)
+ pcie_aspm_init_link_state(bus->self);
+
+ return nr;
+}
+
+unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
+{
+ unsigned int devfn, pass, max = bus->secondary;
+ struct pci_dev *dev;
+
+ pr_debug("PCI: Scanning bus %04x:%02x\n", pci_domain_nr(bus), bus->number);
+
+ /* Go find them, Rover! */
+ for (devfn = 0; devfn < 0x100; devfn += 8)
+ pci_scan_slot(bus, devfn);
+
+#ifndef DDE_LINUX
+ /*
+ * After performing arch-dependent fixup of the bus, look behind
+ * all PCI-to-PCI bridges on this bus.
+ */
+ pr_debug("PCI: Fixups for bus %04x:%02x\n", pci_domain_nr(bus), bus->number);
+ pcibios_fixup_bus(bus);
+ for (pass=0; pass < 2; pass++)
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
+ dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
+ max = pci_scan_bridge(bus, dev, max, pass);
+ }
+#endif
+
+ /*
+ * We've scanned the bus and so we know all about what's on
+ * the other side of any bridges that may be on this bus plus
+ * any devices.
+ *
+ * Return how far we've got finding sub-buses.
+ */
+ pr_debug("PCI: Bus scan for %04x:%02x returning with max=%02x\n",
+ pci_domain_nr(bus), bus->number, max);
+ return max;
+}
+
+void __attribute__((weak)) set_pci_bus_resources_arch_default(struct pci_bus *b)
+{
+}
+
+struct pci_bus * pci_create_bus(struct device *parent,
+ int bus, struct pci_ops *ops, void *sysdata)
+{
+ int error;
+ struct pci_bus *b;
+ struct device *dev;
+
+ b = pci_alloc_bus();
+ if (!b)
+ return NULL;
+
+ dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev){
+ kfree(b);
+ return NULL;
+ }
+
+ b->sysdata = sysdata;
+ b->ops = ops;
+
+ if (pci_find_bus(pci_domain_nr(b), bus)) {
+ /* If we already got to this bus through a different bridge, ignore it */
+ pr_debug("PCI: Bus %04x:%02x already known\n", pci_domain_nr(b), bus);
+ goto err_out;
+ }
+
+ down_write(&pci_bus_sem);
+ list_add_tail(&b->node, &pci_root_buses);
+ up_write(&pci_bus_sem);
+
+ memset(dev, 0, sizeof(*dev));
+ dev->parent = parent;
+ dev->release = pci_release_bus_bridge_dev;
+ dev_set_name(dev, "pci%04x:%02x", pci_domain_nr(b), bus);
+ error = device_register(dev);
+ if (error)
+ goto dev_reg_err;
+ b->bridge = get_device(dev);
+
+ if (!parent)
+ set_dev_node(b->bridge, pcibus_to_node(b));
+
+ b->dev.class = &pcibus_class;
+ b->dev.parent = b->bridge;
+ dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
+ error = device_register(&b->dev);
+ if (error)
+ goto class_dev_reg_err;
+ error = device_create_file(&b->dev, &dev_attr_cpuaffinity);
+ if (error)
+ goto dev_create_file_err;
+
+ /* Create legacy_io and legacy_mem files for this bus */
+ pci_create_legacy_files(b);
+
+ b->number = b->secondary = bus;
+ b->resource[0] = &ioport_resource;
+ b->resource[1] = &iomem_resource;
+
+ set_pci_bus_resources_arch_default(b);
+
+ return b;
+
+dev_create_file_err:
+ device_unregister(&b->dev);
+class_dev_reg_err:
+ device_unregister(dev);
+dev_reg_err:
+ down_write(&pci_bus_sem);
+ list_del(&b->node);
+ up_write(&pci_bus_sem);
+err_out:
+ kfree(dev);
+ kfree(b);
+ return NULL;
+}
+
+struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent,
+ int bus, struct pci_ops *ops, void *sysdata)
+{
+ struct pci_bus *b;
+
+ b = pci_create_bus(parent, bus, ops, sysdata);
+ if (b)
+ b->subordinate = pci_scan_child_bus(b);
+ return b;
+}
+EXPORT_SYMBOL(pci_scan_bus_parented);
+
+#ifdef CONFIG_HOTPLUG
+EXPORT_SYMBOL(pci_add_new_bus);
+EXPORT_SYMBOL(pci_scan_slot);
+EXPORT_SYMBOL(pci_scan_bridge);
+EXPORT_SYMBOL_GPL(pci_scan_child_bus);
+#endif
+
+static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b)
+{
+ const struct pci_dev *a = to_pci_dev(d_a);
+ const struct pci_dev *b = to_pci_dev(d_b);
+
+ if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
+ else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
+
+ if (a->bus->number < b->bus->number) return -1;
+ else if (a->bus->number > b->bus->number) return 1;
+
+ if (a->devfn < b->devfn) return -1;
+ else if (a->devfn > b->devfn) return 1;
+
+ return 0;
+}
+
+void __init pci_sort_breadthfirst(void)
+{
+ bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
+}