summaryrefslogtreecommitdiff
path: root/libdde_linux26/contrib/net
diff options
context:
space:
mode:
authorZheng Da <zhengda1936@gmail.com>2009-12-06 05:26:23 +0100
committerZheng Da <zhengda1936@gmail.com>2009-12-06 05:26:23 +0100
commit8a6d48c0542876eb3acfc0970c0ab7872db08d5f (patch)
tree496e78bc728317ea779781b92f897d16936ee231 /libdde_linux26/contrib/net
parentb4bffcfcdf3ab7a55d664e9aa5907f88da503f38 (diff)
check in the original version of dde linux26.
Diffstat (limited to 'libdde_linux26/contrib/net')
-rw-r--r--libdde_linux26/contrib/net/.svn/all-wcprops11
-rw-r--r--libdde_linux26/contrib/net/.svn/entries74
-rw-r--r--libdde_linux26/contrib/net/.svn/format1
-rw-r--r--libdde_linux26/contrib/net/.svn/text-base/socket.c.svn-base2406
-rw-r--r--libdde_linux26/contrib/net/core/.svn/all-wcprops77
-rw-r--r--libdde_linux26/contrib/net/core/.svn/entries436
-rw-r--r--libdde_linux26/contrib/net/core/.svn/format1
-rw-r--r--libdde_linux26/contrib/net/core/.svn/text-base/dev.c.svn-base5277
-rw-r--r--libdde_linux26/contrib/net/core/.svn/text-base/dev_mcast.c.svn-base229
-rw-r--r--libdde_linux26/contrib/net/core/.svn/text-base/ethtool.c.svn-base1091
-rw-r--r--libdde_linux26/contrib/net/core/.svn/text-base/filter.c.svn-base541
-rw-r--r--libdde_linux26/contrib/net/core/.svn/text-base/kmap_skb.h.svn-base19
-rw-r--r--libdde_linux26/contrib/net/core/.svn/text-base/neighbour.c.svn-base2824
-rw-r--r--libdde_linux26/contrib/net/core/.svn/text-base/net-sysfs.c.svn-base540
-rw-r--r--libdde_linux26/contrib/net/core/.svn/text-base/net-sysfs.h.svn-base8
-rw-r--r--libdde_linux26/contrib/net/core/.svn/text-base/netevent.c.svn-base70
-rw-r--r--libdde_linux26/contrib/net/core/.svn/text-base/rtnetlink.c.svn-base1430
-rw-r--r--libdde_linux26/contrib/net/core/.svn/text-base/skb_dma_map.c.svn-base66
-rw-r--r--libdde_linux26/contrib/net/core/.svn/text-base/sock.c.svn-base2297
-rw-r--r--libdde_linux26/contrib/net/core/dev.c5277
-rw-r--r--libdde_linux26/contrib/net/core/dev_mcast.c229
-rw-r--r--libdde_linux26/contrib/net/core/ethtool.c1091
-rw-r--r--libdde_linux26/contrib/net/core/filter.c541
-rw-r--r--libdde_linux26/contrib/net/core/kmap_skb.h19
-rw-r--r--libdde_linux26/contrib/net/core/neighbour.c2824
-rw-r--r--libdde_linux26/contrib/net/core/net-sysfs.c540
-rw-r--r--libdde_linux26/contrib/net/core/net-sysfs.h8
-rw-r--r--libdde_linux26/contrib/net/core/netevent.c70
-rw-r--r--libdde_linux26/contrib/net/core/rtnetlink.c1430
-rw-r--r--libdde_linux26/contrib/net/core/skb_dma_map.c66
-rw-r--r--libdde_linux26/contrib/net/core/sock.c2297
-rw-r--r--libdde_linux26/contrib/net/ethernet/.svn/all-wcprops11
-rw-r--r--libdde_linux26/contrib/net/ethernet/.svn/entries62
-rw-r--r--libdde_linux26/contrib/net/ethernet/.svn/format1
-rw-r--r--libdde_linux26/contrib/net/ethernet/.svn/text-base/eth.c.svn-base407
-rw-r--r--libdde_linux26/contrib/net/ethernet/eth.c407
-rw-r--r--libdde_linux26/contrib/net/netlink/.svn/all-wcprops11
-rw-r--r--libdde_linux26/contrib/net/netlink/.svn/entries62
-rw-r--r--libdde_linux26/contrib/net/netlink/.svn/format1
-rw-r--r--libdde_linux26/contrib/net/netlink/.svn/text-base/attr.c.svn-base473
-rw-r--r--libdde_linux26/contrib/net/netlink/attr.c473
-rw-r--r--libdde_linux26/contrib/net/sched/.svn/all-wcprops5
-rw-r--r--libdde_linux26/contrib/net/sched/.svn/entries28
-rw-r--r--libdde_linux26/contrib/net/sched/.svn/format1
-rw-r--r--libdde_linux26/contrib/net/socket.c2406
45 files changed, 36138 insertions, 0 deletions
diff --git a/libdde_linux26/contrib/net/.svn/all-wcprops b/libdde_linux26/contrib/net/.svn/all-wcprops
new file mode 100644
index 00000000..5af71256
--- /dev/null
+++ b/libdde_linux26/contrib/net/.svn/all-wcprops
@@ -0,0 +1,11 @@
+K 25
+svn:wc:ra_dav:version-url
+V 62
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/contrib/net
+END
+socket.c
+K 25
+svn:wc:ra_dav:version-url
+V 71
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/contrib/net/socket.c
+END
diff --git a/libdde_linux26/contrib/net/.svn/entries b/libdde_linux26/contrib/net/.svn/entries
new file mode 100644
index 00000000..042f5985
--- /dev/null
+++ b/libdde_linux26/contrib/net/.svn/entries
@@ -0,0 +1,74 @@
+9
+
+dir
+465
+http://svn.tudos.org/repos/tudos/trunk/l4/pkg/dde/linux26/contrib/net
+http://svn.tudos.org/repos/tudos
+
+
+
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+svn:special svn:externals svn:needs-lock
+
+
+
+
+
+
+
+
+
+
+
+a704ac0b-3a55-4d43-a2a9-7be6f07c34fb
+
+netlink
+dir
+
+sched
+dir
+
+core
+dir
+
+socket.c
+file
+
+
+
+
+2009-11-15T17:16:35.000000Z
+03baa764b9a13ce360545b14ee578b06
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+58332
+
+ethernet
+dir
+
diff --git a/libdde_linux26/contrib/net/.svn/format b/libdde_linux26/contrib/net/.svn/format
new file mode 100644
index 00000000..ec635144
--- /dev/null
+++ b/libdde_linux26/contrib/net/.svn/format
@@ -0,0 +1 @@
+9
diff --git a/libdde_linux26/contrib/net/.svn/text-base/socket.c.svn-base b/libdde_linux26/contrib/net/.svn/text-base/socket.c.svn-base
new file mode 100644
index 00000000..35dd7371
--- /dev/null
+++ b/libdde_linux26/contrib/net/.svn/text-base/socket.c.svn-base
@@ -0,0 +1,2406 @@
+/*
+ * NET An implementation of the SOCKET network access protocol.
+ *
+ * Version: @(#)socket.c 1.1.93 18/02/95
+ *
+ * Authors: Orest Zborowski, <obz@Kodak.COM>
+ * Ross Biro
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * Fixes:
+ * Anonymous : NOTSOCK/BADF cleanup. Error fix in
+ * shutdown()
+ * Alan Cox : verify_area() fixes
+ * Alan Cox : Removed DDI
+ * Jonathan Kamens : SOCK_DGRAM reconnect bug
+ * Alan Cox : Moved a load of checks to the very
+ * top level.
+ * Alan Cox : Move address structures to/from user
+ * mode above the protocol layers.
+ * Rob Janssen : Allow 0 length sends.
+ * Alan Cox : Asynchronous I/O support (cribbed from the
+ * tty drivers).
+ * Niibe Yutaka : Asynchronous I/O for writes (4.4BSD style)
+ * Jeff Uphoff : Made max number of sockets command-line
+ * configurable.
+ * Matti Aarnio : Made the number of sockets dynamic,
+ * to be allocated when needed, and mr.
+ * Uphoff's max is used as max to be
+ * allowed to allocate.
+ * Linus : Argh. removed all the socket allocation
+ * altogether: it's in the inode now.
+ * Alan Cox : Made sock_alloc()/sock_release() public
+ * for NetROM and future kernel nfsd type
+ * stuff.
+ * Alan Cox : sendmsg/recvmsg basics.
+ * Tom Dyas : Export net symbols.
+ * Marcin Dalecki : Fixed problems with CONFIG_NET="n".
+ * Alan Cox : Added thread locking to sys_* calls
+ * for sockets. May have errors at the
+ * moment.
+ * Kevin Buhr : Fixed the dumb errors in the above.
+ * Andi Kleen : Some small cleanups, optimizations,
+ * and fixed a copy_from_user() bug.
+ * Tigran Aivazian : sys_send(args) calls sys_sendto(args, NULL, 0)
+ * Tigran Aivazian : Made listen(2) backlog sanity checks
+ * protocol-independent
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ *
+ * This module is effectively the top level interface to the BSD socket
+ * paradigm.
+ *
+ * Based upon Swansea University Computer Society NET3.039
+ */
+
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/file.h>
+#include <linux/net.h>
+#include <linux/interrupt.h>
+#include <linux/thread_info.h>
+#include <linux/rcupdate.h>
+#include <linux/netdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/mutex.h>
+#include <linux/wanrouter.h>
+#include <linux/if_bridge.h>
+#include <linux/if_frad.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/cache.h>
+#include <linux/module.h>
+#include <linux/highmem.h>
+#include <linux/mount.h>
+#include <linux/security.h>
+#include <linux/syscalls.h>
+#include <linux/compat.h>
+#include <linux/kmod.h>
+#include <linux/audit.h>
+#include <linux/wireless.h>
+#include <linux/nsproxy.h>
+
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+
+#include <net/compat.h>
+#include <net/wext.h>
+
+#include <net/sock.h>
+#include <linux/netfilter.h>
+
+static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
+static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos);
+static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos);
+static int sock_mmap(struct file *file, struct vm_area_struct *vma);
+
+static int sock_close(struct inode *inode, struct file *file);
+static unsigned int sock_poll(struct file *file,
+ struct poll_table_struct *wait);
+static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+#ifdef CONFIG_COMPAT
+static long compat_sock_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
+#endif
+static int sock_fasync(int fd, struct file *filp, int on);
+static ssize_t sock_sendpage(struct file *file, struct page *page,
+ int offset, size_t size, loff_t *ppos, int more);
+static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags);
+
+/*
+ * Socket files have a set of 'special' operations as well as the generic file ones. These don't appear
+ * in the operation structures but are done directly via the socketcall() multiplexor.
+ */
+
+static const struct file_operations socket_file_ops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .aio_read = sock_aio_read,
+ .aio_write = sock_aio_write,
+ .poll = sock_poll,
+ .unlocked_ioctl = sock_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_sock_ioctl,
+#endif
+ .mmap = sock_mmap,
+ .open = sock_no_open, /* special open code to disallow open via /proc */
+ .release = sock_close,
+ .fasync = sock_fasync,
+ .sendpage = sock_sendpage,
+ .splice_write = generic_splice_sendpage,
+ .splice_read = sock_splice_read,
+};
+
+/*
+ * The protocol list. Each protocol is registered in here.
+ */
+
+static DEFINE_SPINLOCK(net_family_lock);
+static const struct net_proto_family *net_families[NPROTO] __read_mostly;
+
+/*
+ * Statistics counters of the socket lists
+ */
+
+static DEFINE_PER_CPU(int, sockets_in_use) = 0;
+
+/*
+ * Support routines.
+ * Move socket addresses back and forth across the kernel/user
+ * divide and look after the messy bits.
+ */
+
+#define MAX_SOCK_ADDR 128 /* 108 for Unix domain -
+ 16 for IP, 16 for IPX,
+ 24 for IPv6,
+ about 80 for AX.25
+ must be at least one bigger than
+ the AF_UNIX size (see net/unix/af_unix.c
+ :unix_mkname()).
+ */
+
+/**
+ * move_addr_to_kernel - copy a socket address into kernel space
+ * @uaddr: Address in user space
+ * @kaddr: Address in kernel space
+ * @ulen: Length in user space
+ *
+ * The address is copied into kernel space. If the provided address is
+ * too long an error code of -EINVAL is returned. If the copy gives
+ * invalid addresses -EFAULT is returned. On a success 0 is returned.
+ */
+
+int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr)
+{
+ if (ulen < 0 || ulen > sizeof(struct sockaddr_storage))
+ return -EINVAL;
+ if (ulen == 0)
+ return 0;
+ if (copy_from_user(kaddr, uaddr, ulen))
+ return -EFAULT;
+ return audit_sockaddr(ulen, kaddr);
+}
+
+/**
+ * move_addr_to_user - copy an address to user space
+ * @kaddr: kernel space address
+ * @klen: length of address in kernel
+ * @uaddr: user space address
+ * @ulen: pointer to user length field
+ *
+ * The value pointed to by ulen on entry is the buffer length available.
+ * This is overwritten with the buffer space used. -EINVAL is returned
+ * if an overlong buffer is specified or a negative buffer size. -EFAULT
+ * is returned if either the buffer or the length field are not
+ * accessible.
+ * After copying the data up to the limit the user specifies, the true
+ * length of the data is written over the length limit the user
+ * specified. Zero is returned for a success.
+ */
+
+int move_addr_to_user(struct sockaddr *kaddr, int klen, void __user *uaddr,
+ int __user *ulen)
+{
+ int err;
+ int len;
+
+ err = get_user(len, ulen);
+ if (err)
+ return err;
+ if (len > klen)
+ len = klen;
+ if (len < 0 || len > sizeof(struct sockaddr_storage))
+ return -EINVAL;
+ if (len) {
+ if (audit_sockaddr(klen, kaddr))
+ return -ENOMEM;
+ if (copy_to_user(uaddr, kaddr, len))
+ return -EFAULT;
+ }
+ /*
+ * "fromlen shall refer to the value before truncation.."
+ * 1003.1g
+ */
+ return __put_user(klen, ulen);
+}
+
+#define SOCKFS_MAGIC 0x534F434B
+
+static struct kmem_cache *sock_inode_cachep __read_mostly;
+
+static struct inode *sock_alloc_inode(struct super_block *sb)
+{
+ struct socket_alloc *ei;
+
+ ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL);
+ if (!ei)
+ return NULL;
+ init_waitqueue_head(&ei->socket.wait);
+
+ ei->socket.fasync_list = NULL;
+ ei->socket.state = SS_UNCONNECTED;
+ ei->socket.flags = 0;
+ ei->socket.ops = NULL;
+ ei->socket.sk = NULL;
+ ei->socket.file = NULL;
+
+ return &ei->vfs_inode;
+}
+
+static void sock_destroy_inode(struct inode *inode)
+{
+ kmem_cache_free(sock_inode_cachep,
+ container_of(inode, struct socket_alloc, vfs_inode));
+}
+
+static void init_once(void *foo)
+{
+ struct socket_alloc *ei = (struct socket_alloc *)foo;
+
+ inode_init_once(&ei->vfs_inode);
+}
+
+static int init_inodecache(void)
+{
+ sock_inode_cachep = kmem_cache_create("sock_inode_cache",
+ sizeof(struct socket_alloc),
+ 0,
+ (SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_MEM_SPREAD),
+ init_once);
+ if (sock_inode_cachep == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+static struct super_operations sockfs_ops = {
+ .alloc_inode = sock_alloc_inode,
+ .destroy_inode =sock_destroy_inode,
+ .statfs = simple_statfs,
+};
+
+static int sockfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
+{
+ return get_sb_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC,
+ mnt);
+}
+
+static struct vfsmount *sock_mnt __read_mostly;
+
+static struct file_system_type sock_fs_type = {
+ .name = "sockfs",
+ .get_sb = sockfs_get_sb,
+ .kill_sb = kill_anon_super,
+};
+
+static int sockfs_delete_dentry(struct dentry *dentry)
+{
+ /*
+ * At creation time, we pretended this dentry was hashed
+ * (by clearing DCACHE_UNHASHED bit in d_flags)
+ * At delete time, we restore the truth : not hashed.
+ * (so that dput() can proceed correctly)
+ */
+ dentry->d_flags |= DCACHE_UNHASHED;
+ return 0;
+}
+
+/*
+ * sockfs_dname() is called from d_path().
+ */
+static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen)
+{
+ return dynamic_dname(dentry, buffer, buflen, "socket:[%lu]",
+ dentry->d_inode->i_ino);
+}
+
+static struct dentry_operations sockfs_dentry_operations = {
+ .d_delete = sockfs_delete_dentry,
+ .d_dname = sockfs_dname,
+};
+
+/*
+ * Obtains the first available file descriptor and sets it up for use.
+ *
+ * These functions create file structures and maps them to fd space
+ * of the current process. On success it returns file descriptor
+ * and file struct implicitly stored in sock->file.
+ * Note that another thread may close file descriptor before we return
+ * from this function. We use the fact that now we do not refer
+ * to socket after mapping. If one day we will need it, this
+ * function will increment ref. count on file by 1.
+ *
+ * In any case returned fd MAY BE not valid!
+ * This race condition is unavoidable
+ * with shared fd spaces, we cannot solve it inside kernel,
+ * but we take care of internal coherence yet.
+ */
+
+static int sock_alloc_fd(struct file **filep, int flags)
+{
+ int fd;
+
+ fd = get_unused_fd_flags(flags);
+ if (likely(fd >= 0)) {
+ struct file *file = get_empty_filp();
+
+ *filep = file;
+ if (unlikely(!file)) {
+ put_unused_fd(fd);
+ return -ENFILE;
+ }
+ } else
+ *filep = NULL;
+ return fd;
+}
+
+static int sock_attach_fd(struct socket *sock, struct file *file, int flags)
+{
+ struct dentry *dentry;
+ struct qstr name = { .name = "" };
+
+ dentry = d_alloc(sock_mnt->mnt_sb->s_root, &name);
+ if (unlikely(!dentry))
+ return -ENOMEM;
+
+ dentry->d_op = &sockfs_dentry_operations;
+ /*
+ * We dont want to push this dentry into global dentry hash table.
+ * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED
+ * This permits a working /proc/$pid/fd/XXX on sockets
+ */
+ dentry->d_flags &= ~DCACHE_UNHASHED;
+ d_instantiate(dentry, SOCK_INODE(sock));
+
+ sock->file = file;
+ init_file(file, sock_mnt, dentry, FMODE_READ | FMODE_WRITE,
+ &socket_file_ops);
+ SOCK_INODE(sock)->i_fop = &socket_file_ops;
+ file->f_flags = O_RDWR | (flags & O_NONBLOCK);
+ file->f_pos = 0;
+ file->private_data = sock;
+
+ return 0;
+}
+
+int sock_map_fd(struct socket *sock, int flags)
+{
+ struct file *newfile;
+ int fd = sock_alloc_fd(&newfile, flags);
+
+ if (likely(fd >= 0)) {
+ int err = sock_attach_fd(sock, newfile, flags);
+
+ if (unlikely(err < 0)) {
+ put_filp(newfile);
+ put_unused_fd(fd);
+ return err;
+ }
+ fd_install(fd, newfile);
+ }
+ return fd;
+}
+
+static struct socket *sock_from_file(struct file *file, int *err)
+{
+ if (file->f_op == &socket_file_ops)
+ return file->private_data; /* set in sock_map_fd */
+
+ *err = -ENOTSOCK;
+ return NULL;
+}
+
+/**
+ * sockfd_lookup - Go from a file number to its socket slot
+ * @fd: file handle
+ * @err: pointer to an error code return
+ *
+ * The file handle passed in is locked and the socket it is bound
+ * too is returned. If an error occurs the err pointer is overwritten
+ * with a negative errno code and NULL is returned. The function checks
+ * for both invalid handles and passing a handle which is not a socket.
+ *
+ * On a success the socket object pointer is returned.
+ */
+
+struct socket *sockfd_lookup(int fd, int *err)
+{
+ struct file *file;
+ struct socket *sock;
+
+ file = fget(fd);
+ if (!file) {
+ *err = -EBADF;
+ return NULL;
+ }
+
+ sock = sock_from_file(file, err);
+ if (!sock)
+ fput(file);
+ return sock;
+}
+
+static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
+{
+ struct file *file;
+ struct socket *sock;
+
+ *err = -EBADF;
+ file = fget_light(fd, fput_needed);
+ if (file) {
+ sock = sock_from_file(file, err);
+ if (sock)
+ return sock;
+ fput_light(file, *fput_needed);
+ }
+ return NULL;
+}
+
+/**
+ * sock_alloc - allocate a socket
+ *
+ * Allocate a new inode and socket object. The two are bound together
+ * and initialised. The socket is then returned. If we are out of inodes
+ * NULL is returned.
+ */
+
+static struct socket *sock_alloc(void)
+{
+ struct inode *inode;
+ struct socket *sock;
+
+ inode = new_inode(sock_mnt->mnt_sb);
+ if (!inode)
+ return NULL;
+
+ sock = SOCKET_I(inode);
+
+ inode->i_mode = S_IFSOCK | S_IRWXUGO;
+ inode->i_uid = current_fsuid();
+ inode->i_gid = current_fsgid();
+
+ get_cpu_var(sockets_in_use)++;
+ put_cpu_var(sockets_in_use);
+ return sock;
+}
+
+/*
+ * In theory you can't get an open on this inode, but /proc provides
+ * a back door. Remember to keep it shut otherwise you'll let the
+ * creepy crawlies in.
+ */
+
+static int sock_no_open(struct inode *irrelevant, struct file *dontcare)
+{
+ return -ENXIO;
+}
+
+const struct file_operations bad_sock_fops = {
+ .owner = THIS_MODULE,
+ .open = sock_no_open,
+};
+
+/**
+ * sock_release - close a socket
+ * @sock: socket to close
+ *
+ * The socket is released from the protocol stack if it has a release
+ * callback, and the inode is then released if the socket is bound to
+ * an inode not a file.
+ */
+
+void sock_release(struct socket *sock)
+{
+ if (sock->ops) {
+ struct module *owner = sock->ops->owner;
+
+ sock->ops->release(sock);
+ sock->ops = NULL;
+ module_put(owner);
+ }
+
+ if (sock->fasync_list)
+ printk(KERN_ERR "sock_release: fasync list not empty!\n");
+
+ get_cpu_var(sockets_in_use)--;
+ put_cpu_var(sockets_in_use);
+ if (!sock->file) {
+ iput(SOCK_INODE(sock));
+ return;
+ }
+ sock->file = NULL;
+}
+
+static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t size)
+{
+ struct sock_iocb *si = kiocb_to_siocb(iocb);
+ int err;
+
+ si->sock = sock;
+ si->scm = NULL;
+ si->msg = msg;
+ si->size = size;
+
+ err = security_socket_sendmsg(sock, msg, size);
+ if (err)
+ return err;
+
+ return sock->ops->sendmsg(iocb, sock, msg, size);
+}
+
+int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+{
+ struct kiocb iocb;
+ struct sock_iocb siocb;
+ int ret;
+
+ init_sync_kiocb(&iocb, NULL);
+ iocb.private = &siocb;
+ ret = __sock_sendmsg(&iocb, sock, msg, size);
+ if (-EIOCBQUEUED == ret)
+ ret = wait_on_sync_kiocb(&iocb);
+ return ret;
+}
+
+int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
+ struct kvec *vec, size_t num, size_t size)
+{
+ mm_segment_t oldfs = get_fs();
+ int result;
+
+ set_fs(KERNEL_DS);
+ /*
+ * the following is safe, since for compiler definitions of kvec and
+ * iovec are identical, yielding the same in-core layout and alignment
+ */
+ msg->msg_iov = (struct iovec *)vec;
+ msg->msg_iovlen = num;
+ result = sock_sendmsg(sock, msg, size);
+ set_fs(oldfs);
+ return result;
+}
+
+/*
+ * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP)
+ */
+void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb)
+{
+ ktime_t kt = skb->tstamp;
+
+ if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) {
+ struct timeval tv;
+ /* Race occurred between timestamp enabling and packet
+ receiving. Fill in the current time for now. */
+ if (kt.tv64 == 0)
+ kt = ktime_get_real();
+ skb->tstamp = kt;
+ tv = ktime_to_timeval(kt);
+ put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, sizeof(tv), &tv);
+ } else {
+ struct timespec ts;
+ /* Race occurred between timestamp enabling and packet
+ receiving. Fill in the current time for now. */
+ if (kt.tv64 == 0)
+ kt = ktime_get_real();
+ skb->tstamp = kt;
+ ts = ktime_to_timespec(kt);
+ put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, sizeof(ts), &ts);
+ }
+}
+
+EXPORT_SYMBOL_GPL(__sock_recv_timestamp);
+
+static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t size, int flags)
+{
+ int err;
+ struct sock_iocb *si = kiocb_to_siocb(iocb);
+
+ si->sock = sock;
+ si->scm = NULL;
+ si->msg = msg;
+ si->size = size;
+ si->flags = flags;
+
+ err = security_socket_recvmsg(sock, msg, size, flags);
+ if (err)
+ return err;
+
+ return sock->ops->recvmsg(iocb, sock, msg, size, flags);
+}
+
+int sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t size, int flags)
+{
+ struct kiocb iocb;
+ struct sock_iocb siocb;
+ int ret;
+
+ init_sync_kiocb(&iocb, NULL);
+ iocb.private = &siocb;
+ ret = __sock_recvmsg(&iocb, sock, msg, size, flags);
+ if (-EIOCBQUEUED == ret)
+ ret = wait_on_sync_kiocb(&iocb);
+ return ret;
+}
+
+int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
+ struct kvec *vec, size_t num, size_t size, int flags)
+{
+ mm_segment_t oldfs = get_fs();
+ int result;
+
+ set_fs(KERNEL_DS);
+ /*
+ * the following is safe, since for compiler definitions of kvec and
+ * iovec are identical, yielding the same in-core layout and alignment
+ */
+ msg->msg_iov = (struct iovec *)vec, msg->msg_iovlen = num;
+ result = sock_recvmsg(sock, msg, size, flags);
+ set_fs(oldfs);
+ return result;
+}
+
+static void sock_aio_dtor(struct kiocb *iocb)
+{
+ kfree(iocb->private);
+}
+
+static ssize_t sock_sendpage(struct file *file, struct page *page,
+ int offset, size_t size, loff_t *ppos, int more)
+{
+ struct socket *sock;
+ int flags;
+
+ sock = file->private_data;
+
+ flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT;
+ if (more)
+ flags |= MSG_MORE;
+
+ return sock->ops->sendpage(sock, page, offset, size, flags);
+}
+
+static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
+{
+ struct socket *sock = file->private_data;
+
+ if (unlikely(!sock->ops->splice_read))
+ return -EINVAL;
+
+ return sock->ops->splice_read(sock, ppos, pipe, len, flags);
+}
+
+static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb,
+ struct sock_iocb *siocb)
+{
+ if (!is_sync_kiocb(iocb)) {
+ siocb = kmalloc(sizeof(*siocb), GFP_KERNEL);
+ if (!siocb)
+ return NULL;
+ iocb->ki_dtor = sock_aio_dtor;
+ }
+
+ siocb->kiocb = iocb;
+ iocb->private = siocb;
+ return siocb;
+}
+
+static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb,
+ struct file *file, const struct iovec *iov,
+ unsigned long nr_segs)
+{
+ struct socket *sock = file->private_data;
+ size_t size = 0;
+ int i;
+
+ for (i = 0; i < nr_segs; i++)
+ size += iov[i].iov_len;
+
+ msg->msg_name = NULL;
+ msg->msg_namelen = 0;
+ msg->msg_control = NULL;
+ msg->msg_controllen = 0;
+ msg->msg_iov = (struct iovec *)iov;
+ msg->msg_iovlen = nr_segs;
+ msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
+
+ return __sock_recvmsg(iocb, sock, msg, size, msg->msg_flags);
+}
+
+static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct sock_iocb siocb, *x;
+
+ if (pos != 0)
+ return -ESPIPE;
+
+ if (iocb->ki_left == 0) /* Match SYS5 behaviour */
+ return 0;
+
+
+ x = alloc_sock_iocb(iocb, &siocb);
+ if (!x)
+ return -ENOMEM;
+ return do_sock_read(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs);
+}
+
+static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb,
+ struct file *file, const struct iovec *iov,
+ unsigned long nr_segs)
+{
+ struct socket *sock = file->private_data;
+ size_t size = 0;
+ int i;
+
+ for (i = 0; i < nr_segs; i++)
+ size += iov[i].iov_len;
+
+ msg->msg_name = NULL;
+ msg->msg_namelen = 0;
+ msg->msg_control = NULL;
+ msg->msg_controllen = 0;
+ msg->msg_iov = (struct iovec *)iov;
+ msg->msg_iovlen = nr_segs;
+ msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
+ if (sock->type == SOCK_SEQPACKET)
+ msg->msg_flags |= MSG_EOR;
+
+ return __sock_sendmsg(iocb, sock, msg, size);
+}
+
+static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct sock_iocb siocb, *x;
+
+ if (pos != 0)
+ return -ESPIPE;
+
+ x = alloc_sock_iocb(iocb, &siocb);
+ if (!x)
+ return -ENOMEM;
+
+ return do_sock_write(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs);
+}
+
+/*
+ * Atomic setting of ioctl hooks to avoid race
+ * with module unload.
+ */
+
+static DEFINE_MUTEX(br_ioctl_mutex);
+static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg) = NULL;
+
+void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *))
+{
+ mutex_lock(&br_ioctl_mutex);
+ br_ioctl_hook = hook;
+ mutex_unlock(&br_ioctl_mutex);
+}
+
+EXPORT_SYMBOL(brioctl_set);
+
+static DEFINE_MUTEX(vlan_ioctl_mutex);
+static int (*vlan_ioctl_hook) (struct net *, void __user *arg);
+
+void vlan_ioctl_set(int (*hook) (struct net *, void __user *))
+{
+ mutex_lock(&vlan_ioctl_mutex);
+ vlan_ioctl_hook = hook;
+ mutex_unlock(&vlan_ioctl_mutex);
+}
+
+EXPORT_SYMBOL(vlan_ioctl_set);
+
+static DEFINE_MUTEX(dlci_ioctl_mutex);
+static int (*dlci_ioctl_hook) (unsigned int, void __user *);
+
+void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
+{
+ mutex_lock(&dlci_ioctl_mutex);
+ dlci_ioctl_hook = hook;
+ mutex_unlock(&dlci_ioctl_mutex);
+}
+
+EXPORT_SYMBOL(dlci_ioctl_set);
+
+/*
+ * With an ioctl, arg may well be a user mode pointer, but we don't know
+ * what to do with it - that's up to the protocol still.
+ */
+
+static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+{
+ struct socket *sock;
+ struct sock *sk;
+ void __user *argp = (void __user *)arg;
+ int pid, err;
+ struct net *net;
+
+ sock = file->private_data;
+ sk = sock->sk;
+ net = sock_net(sk);
+ if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) {
+ err = dev_ioctl(net, cmd, argp);
+ } else
+#ifdef CONFIG_WIRELESS_EXT
+ if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
+ err = dev_ioctl(net, cmd, argp);
+ } else
+#endif /* CONFIG_WIRELESS_EXT */
+ switch (cmd) {
+ case FIOSETOWN:
+ case SIOCSPGRP:
+ err = -EFAULT;
+ if (get_user(pid, (int __user *)argp))
+ break;
+ err = f_setown(sock->file, pid, 1);
+ break;
+ case FIOGETOWN:
+ case SIOCGPGRP:
+ err = put_user(f_getown(sock->file),
+ (int __user *)argp);
+ break;
+ case SIOCGIFBR:
+ case SIOCSIFBR:
+ case SIOCBRADDBR:
+ case SIOCBRDELBR:
+ err = -ENOPKG;
+ if (!br_ioctl_hook)
+ request_module("bridge");
+
+ mutex_lock(&br_ioctl_mutex);
+ if (br_ioctl_hook)
+ err = br_ioctl_hook(net, cmd, argp);
+ mutex_unlock(&br_ioctl_mutex);
+ break;
+ case SIOCGIFVLAN:
+ case SIOCSIFVLAN:
+ err = -ENOPKG;
+ if (!vlan_ioctl_hook)
+ request_module("8021q");
+
+ mutex_lock(&vlan_ioctl_mutex);
+ if (vlan_ioctl_hook)
+ err = vlan_ioctl_hook(net, argp);
+ mutex_unlock(&vlan_ioctl_mutex);
+ break;
+ case SIOCADDDLCI:
+ case SIOCDELDLCI:
+ err = -ENOPKG;
+ if (!dlci_ioctl_hook)
+ request_module("dlci");
+
+ mutex_lock(&dlci_ioctl_mutex);
+ if (dlci_ioctl_hook)
+ err = dlci_ioctl_hook(cmd, argp);
+ mutex_unlock(&dlci_ioctl_mutex);
+ break;
+ default:
+ err = sock->ops->ioctl(sock, cmd, arg);
+
+ /*
+ * If this ioctl is unknown try to hand it down
+ * to the NIC driver.
+ */
+ if (err == -ENOIOCTLCMD)
+ err = dev_ioctl(net, cmd, argp);
+ break;
+ }
+ return err;
+}
+
+int sock_create_lite(int family, int type, int protocol, struct socket **res)
+{
+ int err;
+ struct socket *sock = NULL;
+
+ err = security_socket_create(family, type, protocol, 1);
+ if (err)
+ goto out;
+
+ sock = sock_alloc();
+ if (!sock) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ sock->type = type;
+ err = security_socket_post_create(sock, family, type, protocol, 1);
+ if (err)
+ goto out_release;
+
+out:
+ *res = sock;
+ return err;
+out_release:
+ sock_release(sock);
+ sock = NULL;
+ goto out;
+}
+
+/* No kernel lock held - perfect */
+static unsigned int sock_poll(struct file *file, poll_table *wait)
+{
+ struct socket *sock;
+
+ /*
+ * We can't return errors to poll, so it's either yes or no.
+ */
+ sock = file->private_data;
+ return sock->ops->poll(file, sock, wait);
+}
+
+static int sock_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct socket *sock = file->private_data;
+
+ return sock->ops->mmap(file, sock, vma);
+}
+
+static int sock_close(struct inode *inode, struct file *filp)
+{
+ /*
+ * It was possible the inode is NULL we were
+ * closing an unfinished socket.
+ */
+
+ if (!inode) {
+ printk(KERN_DEBUG "sock_close: NULL inode\n");
+ return 0;
+ }
+ sock_release(SOCKET_I(inode));
+ return 0;
+}
+
+/*
+ * Update the socket async list
+ *
+ * Fasync_list locking strategy.
+ *
+ * 1. fasync_list is modified only under process context socket lock
+ * i.e. under semaphore.
+ * 2. fasync_list is used under read_lock(&sk->sk_callback_lock)
+ * or under socket lock.
+ * 3. fasync_list can be used from softirq context, so that
+ * modification under socket lock have to be enhanced with
+ * write_lock_bh(&sk->sk_callback_lock).
+ * --ANK (990710)
+ */
+
+static int sock_fasync(int fd, struct file *filp, int on)
+{
+ struct fasync_struct *fa, *fna = NULL, **prev;
+ struct socket *sock;
+ struct sock *sk;
+
+ if (on) {
+ fna = kmalloc(sizeof(struct fasync_struct), GFP_KERNEL);
+ if (fna == NULL)
+ return -ENOMEM;
+ }
+
+ sock = filp->private_data;
+
+ sk = sock->sk;
+ if (sk == NULL) {
+ kfree(fna);
+ return -EINVAL;
+ }
+
+ lock_sock(sk);
+
+ prev = &(sock->fasync_list);
+
+ for (fa = *prev; fa != NULL; prev = &fa->fa_next, fa = *prev)
+ if (fa->fa_file == filp)
+ break;
+
+ if (on) {
+ if (fa != NULL) {
+ write_lock_bh(&sk->sk_callback_lock);
+ fa->fa_fd = fd;
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ kfree(fna);
+ goto out;
+ }
+ fna->fa_file = filp;
+ fna->fa_fd = fd;
+ fna->magic = FASYNC_MAGIC;
+ fna->fa_next = sock->fasync_list;
+ write_lock_bh(&sk->sk_callback_lock);
+ sock->fasync_list = fna;
+ write_unlock_bh(&sk->sk_callback_lock);
+ } else {
+ if (fa != NULL) {
+ write_lock_bh(&sk->sk_callback_lock);
+ *prev = fa->fa_next;
+ write_unlock_bh(&sk->sk_callback_lock);
+ kfree(fa);
+ }
+ }
+
+out:
+ release_sock(sock->sk);
+ return 0;
+}
+
+/* This function may be called only under socket lock or callback_lock */
+
+int sock_wake_async(struct socket *sock, int how, int band)
+{
+ if (!sock || !sock->fasync_list)
+ return -1;
+ switch (how) {
+ case SOCK_WAKE_WAITD:
+ if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
+ break;
+ goto call_kill;
+ case SOCK_WAKE_SPACE:
+ if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags))
+ break;
+ /* fall through */
+ case SOCK_WAKE_IO:
+call_kill:
+ __kill_fasync(sock->fasync_list, SIGIO, band);
+ break;
+ case SOCK_WAKE_URG:
+ __kill_fasync(sock->fasync_list, SIGURG, band);
+ }
+ return 0;
+}
+
+static int __sock_create(struct net *net, int family, int type, int protocol,
+ struct socket **res, int kern)
+{
+ int err;
+ struct socket *sock;
+ const struct net_proto_family *pf;
+
+ /*
+ * Check protocol is in range
+ */
+ if (family < 0 || family >= NPROTO)
+ return -EAFNOSUPPORT;
+ if (type < 0 || type >= SOCK_MAX)
+ return -EINVAL;
+
+ /* Compatibility.
+
+ This uglymoron is moved from INET layer to here to avoid
+ deadlock in module load.
+ */
+ if (family == PF_INET && type == SOCK_PACKET) {
+ static int warned;
+ if (!warned) {
+ warned = 1;
+ printk(KERN_INFO "%s uses obsolete (PF_INET,SOCK_PACKET)\n",
+ current->comm);
+ }
+ family = PF_PACKET;
+ }
+
+ err = security_socket_create(family, type, protocol, kern);
+ if (err)
+ return err;
+
+ /*
+ * Allocate the socket and allow the family to set things up. if
+ * the protocol is 0, the family is instructed to select an appropriate
+ * default.
+ */
+ sock = sock_alloc();
+ if (!sock) {
+ if (net_ratelimit())
+ printk(KERN_WARNING "socket: no more sockets\n");
+ return -ENFILE; /* Not exactly a match, but its the
+ closest posix thing */
+ }
+
+ sock->type = type;
+
+#ifdef CONFIG_MODULES
+ /* Attempt to load a protocol module if the find failed.
+ *
+ * 12/09/1996 Marcin: But! this makes REALLY only sense, if the user
+ * requested real, full-featured networking support upon configuration.
+ * Otherwise module support will break!
+ */
+ if (net_families[family] == NULL)
+ request_module("net-pf-%d", family);
+#endif
+
+ rcu_read_lock();
+ pf = rcu_dereference(net_families[family]);
+ err = -EAFNOSUPPORT;
+ if (!pf)
+ goto out_release;
+
+ /*
+ * We will call the ->create function, that possibly is in a loadable
+ * module, so we have to bump that loadable module refcnt first.
+ */
+ if (!try_module_get(pf->owner))
+ goto out_release;
+
+ /* Now protected by module ref count */
+ rcu_read_unlock();
+
+ err = pf->create(net, sock, protocol);
+ if (err < 0)
+ goto out_module_put;
+
+ /*
+ * Now to bump the refcnt of the [loadable] module that owns this
+ * socket at sock_release time we decrement its refcnt.
+ */
+ if (!try_module_get(sock->ops->owner))
+ goto out_module_busy;
+
+ /*
+ * Now that we're done with the ->create function, the [loadable]
+ * module can have its refcnt decremented
+ */
+ module_put(pf->owner);
+ err = security_socket_post_create(sock, family, type, protocol, kern);
+ if (err)
+ goto out_sock_release;
+ *res = sock;
+
+ return 0;
+
+out_module_busy:
+ err = -EAFNOSUPPORT;
+out_module_put:
+ sock->ops = NULL;
+ module_put(pf->owner);
+out_sock_release:
+ sock_release(sock);
+ return err;
+
+out_release:
+ rcu_read_unlock();
+ goto out_sock_release;
+}
+
+int sock_create(int family, int type, int protocol, struct socket **res)
+{
+ return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0);
+}
+
+int sock_create_kern(int family, int type, int protocol, struct socket **res)
+{
+ return __sock_create(&init_net, family, type, protocol, res, 1);
+}
+
+SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
+{
+ int retval;
+ struct socket *sock;
+ int flags;
+
+ /* Check the SOCK_* constants for consistency. */
+ BUILD_BUG_ON(SOCK_CLOEXEC != O_CLOEXEC);
+ BUILD_BUG_ON((SOCK_MAX | SOCK_TYPE_MASK) != SOCK_TYPE_MASK);
+ BUILD_BUG_ON(SOCK_CLOEXEC & SOCK_TYPE_MASK);
+ BUILD_BUG_ON(SOCK_NONBLOCK & SOCK_TYPE_MASK);
+
+ flags = type & ~SOCK_TYPE_MASK;
+ if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
+ return -EINVAL;
+ type &= SOCK_TYPE_MASK;
+
+ if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
+ flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
+
+ retval = sock_create(family, type, protocol, &sock);
+ if (retval < 0)
+ goto out;
+
+ retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
+ if (retval < 0)
+ goto out_release;
+
+out:
+ /* It may be already another descriptor 8) Not kernel problem. */
+ return retval;
+
+out_release:
+ sock_release(sock);
+ return retval;
+}
+
+/*
+ * Create a pair of connected sockets.
+ */
+
+SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
+ int __user *, usockvec)
+{
+ struct socket *sock1, *sock2;
+ int fd1, fd2, err;
+ struct file *newfile1, *newfile2;
+ int flags;
+
+ flags = type & ~SOCK_TYPE_MASK;
+ if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
+ return -EINVAL;
+ type &= SOCK_TYPE_MASK;
+
+ if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
+ flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
+
+ /*
+ * Obtain the first socket and check if the underlying protocol
+ * supports the socketpair call.
+ */
+
+ err = sock_create(family, type, protocol, &sock1);
+ if (err < 0)
+ goto out;
+
+ err = sock_create(family, type, protocol, &sock2);
+ if (err < 0)
+ goto out_release_1;
+
+ err = sock1->ops->socketpair(sock1, sock2);
+ if (err < 0)
+ goto out_release_both;
+
+ fd1 = sock_alloc_fd(&newfile1, flags & O_CLOEXEC);
+ if (unlikely(fd1 < 0)) {
+ err = fd1;
+ goto out_release_both;
+ }
+
+ fd2 = sock_alloc_fd(&newfile2, flags & O_CLOEXEC);
+ if (unlikely(fd2 < 0)) {
+ err = fd2;
+ put_filp(newfile1);
+ put_unused_fd(fd1);
+ goto out_release_both;
+ }
+
+ err = sock_attach_fd(sock1, newfile1, flags & O_NONBLOCK);
+ if (unlikely(err < 0)) {
+ goto out_fd2;
+ }
+
+ err = sock_attach_fd(sock2, newfile2, flags & O_NONBLOCK);
+ if (unlikely(err < 0)) {
+ fput(newfile1);
+ goto out_fd1;
+ }
+
+ audit_fd_pair(fd1, fd2);
+ fd_install(fd1, newfile1);
+ fd_install(fd2, newfile2);
+ /* fd1 and fd2 may be already another descriptors.
+ * Not kernel problem.
+ */
+
+ err = put_user(fd1, &usockvec[0]);
+ if (!err)
+ err = put_user(fd2, &usockvec[1]);
+ if (!err)
+ return 0;
+
+ sys_close(fd2);
+ sys_close(fd1);
+ return err;
+
+out_release_both:
+ sock_release(sock2);
+out_release_1:
+ sock_release(sock1);
+out:
+ return err;
+
+out_fd2:
+ put_filp(newfile1);
+ sock_release(sock1);
+out_fd1:
+ put_filp(newfile2);
+ sock_release(sock2);
+ put_unused_fd(fd1);
+ put_unused_fd(fd2);
+ goto out;
+}
+
+/*
+ * Bind a name to a socket. Nothing much to do here since it's
+ * the protocol's responsibility to handle the local address.
+ *
+ * We move the socket address to kernel space before we call
+ * the protocol layer (having also checked the address is ok).
+ */
+
+SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
+{
+ struct socket *sock;
+ struct sockaddr_storage address;
+ int err, fput_needed;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (sock) {
+ err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
+ if (err >= 0) {
+ err = security_socket_bind(sock,
+ (struct sockaddr *)&address,
+ addrlen);
+ if (!err)
+ err = sock->ops->bind(sock,
+ (struct sockaddr *)
+ &address, addrlen);
+ }
+ fput_light(sock->file, fput_needed);
+ }
+ return err;
+}
+
+/*
+ * Perform a listen. Basically, we allow the protocol to do anything
+ * necessary for a listen, and if that works, we mark the socket as
+ * ready for listening.
+ */
+
+SYSCALL_DEFINE2(listen, int, fd, int, backlog)
+{
+ struct socket *sock;
+ int err, fput_needed;
+ int somaxconn;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (sock) {
+ somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn;
+ if ((unsigned)backlog > somaxconn)
+ backlog = somaxconn;
+
+ err = security_socket_listen(sock, backlog);
+ if (!err)
+ err = sock->ops->listen(sock, backlog);
+
+ fput_light(sock->file, fput_needed);
+ }
+ return err;
+}
+
+/*
+ * For accept, we attempt to create a new socket, set up the link
+ * with the client, wake up the client, then return the new
+ * connected fd. We collect the address of the connector in kernel
+ * space and move it to user at the very end. This is unclean because
+ * we open the socket then return an error.
+ *
+ * 1003.1g adds the ability to recvmsg() to query connection pending
+ * status to recvmsg. We need to add that support in a way thats
+ * clean when we restucture accept also.
+ */
+
+SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
+ int __user *, upeer_addrlen, int, flags)
+{
+ struct socket *sock, *newsock;
+ struct file *newfile;
+ int err, len, newfd, fput_needed;
+ struct sockaddr_storage address;
+
+ if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
+ return -EINVAL;
+
+ if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
+ flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ goto out;
+
+ err = -ENFILE;
+ if (!(newsock = sock_alloc()))
+ goto out_put;
+
+ newsock->type = sock->type;
+ newsock->ops = sock->ops;
+
+ /*
+ * We don't need try_module_get here, as the listening socket (sock)
+ * has the protocol module (sock->ops->owner) held.
+ */
+ __module_get(newsock->ops->owner);
+
+ newfd = sock_alloc_fd(&newfile, flags & O_CLOEXEC);
+ if (unlikely(newfd < 0)) {
+ err = newfd;
+ sock_release(newsock);
+ goto out_put;
+ }
+
+ err = sock_attach_fd(newsock, newfile, flags & O_NONBLOCK);
+ if (err < 0)
+ goto out_fd_simple;
+
+ err = security_socket_accept(sock, newsock);
+ if (err)
+ goto out_fd;
+
+ err = sock->ops->accept(sock, newsock, sock->file->f_flags);
+ if (err < 0)
+ goto out_fd;
+
+ if (upeer_sockaddr) {
+ if (newsock->ops->getname(newsock, (struct sockaddr *)&address,
+ &len, 2) < 0) {
+ err = -ECONNABORTED;
+ goto out_fd;
+ }
+ err = move_addr_to_user((struct sockaddr *)&address,
+ len, upeer_sockaddr, upeer_addrlen);
+ if (err < 0)
+ goto out_fd;
+ }
+
+ /* File flags are not inherited via accept() unlike another OSes. */
+
+ fd_install(newfd, newfile);
+ err = newfd;
+
+ security_socket_post_accept(sock, newsock);
+
+out_put:
+ fput_light(sock->file, fput_needed);
+out:
+ return err;
+out_fd_simple:
+ sock_release(newsock);
+ put_filp(newfile);
+ put_unused_fd(newfd);
+ goto out_put;
+out_fd:
+ fput(newfile);
+ put_unused_fd(newfd);
+ goto out_put;
+}
+
+SYSCALL_DEFINE3(accept, int, fd, struct sockaddr __user *, upeer_sockaddr,
+ int __user *, upeer_addrlen)
+{
+ return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0);
+}
+
+/*
+ * Attempt to connect to a socket with the server address. The address
+ * is in user space so we verify it is OK and move it to kernel space.
+ *
+ * For 1003.1g we need to add clean support for a bind to AF_UNSPEC to
+ * break bindings
+ *
+ * NOTE: 1003.1g draft 6.3 is broken with respect to AX.25/NetROM and
+ * other SEQPACKET protocols that take time to connect() as it doesn't
+ * include the -EINPROGRESS status for such sockets.
+ */
+
+SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
+ int, addrlen)
+{
+ struct socket *sock;
+ struct sockaddr_storage address;
+ int err, fput_needed;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ goto out;
+ err = move_addr_to_kernel(uservaddr, addrlen, (struct sockaddr *)&address);
+ if (err < 0)
+ goto out_put;
+
+ err =
+ security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
+ if (err)
+ goto out_put;
+
+ err = sock->ops->connect(sock, (struct sockaddr *)&address, addrlen,
+ sock->file->f_flags);
+out_put:
+ fput_light(sock->file, fput_needed);
+out:
+ return err;
+}
+
+/*
+ * Get the local address ('name') of a socket object. Move the obtained
+ * name to user space.
+ */
+
+SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr,
+ int __user *, usockaddr_len)
+{
+ struct socket *sock;
+ struct sockaddr_storage address;
+ int len, err, fput_needed;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ goto out;
+
+ err = security_socket_getsockname(sock);
+ if (err)
+ goto out_put;
+
+ err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 0);
+ if (err)
+ goto out_put;
+ err = move_addr_to_user((struct sockaddr *)&address, len, usockaddr, usockaddr_len);
+
+out_put:
+ fput_light(sock->file, fput_needed);
+out:
+ return err;
+}
+
+/*
+ * Get the remote address ('name') of a socket object. Move the obtained
+ * name to user space.
+ */
+
+SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
+ int __user *, usockaddr_len)
+{
+ struct socket *sock;
+ struct sockaddr_storage address;
+ int len, err, fput_needed;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (sock != NULL) {
+ err = security_socket_getpeername(sock);
+ if (err) {
+ fput_light(sock->file, fput_needed);
+ return err;
+ }
+
+ err =
+ sock->ops->getname(sock, (struct sockaddr *)&address, &len,
+ 1);
+ if (!err)
+ err = move_addr_to_user((struct sockaddr *)&address, len, usockaddr,
+ usockaddr_len);
+ fput_light(sock->file, fput_needed);
+ }
+ return err;
+}
+
+/*
+ * Send a datagram to a given address. We move the address into kernel
+ * space and check the user space data area is readable before invoking
+ * the protocol.
+ */
+
+SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
+ unsigned, flags, struct sockaddr __user *, addr,
+ int, addr_len)
+{
+ struct socket *sock;
+ struct sockaddr_storage address;
+ int err;
+ struct msghdr msg;
+ struct iovec iov;
+ int fput_needed;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ goto out;
+
+ iov.iov_base = buff;
+ iov.iov_len = len;
+ msg.msg_name = NULL;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_namelen = 0;
+ if (addr) {
+ err = move_addr_to_kernel(addr, addr_len, (struct sockaddr *)&address);
+ if (err < 0)
+ goto out_put;
+ msg.msg_name = (struct sockaddr *)&address;
+ msg.msg_namelen = addr_len;
+ }
+ if (sock->file->f_flags & O_NONBLOCK)
+ flags |= MSG_DONTWAIT;
+ msg.msg_flags = flags;
+ err = sock_sendmsg(sock, &msg, len);
+
+out_put:
+ fput_light(sock->file, fput_needed);
+out:
+ return err;
+}
+
+/*
+ * Send a datagram down a socket.
+ */
+
+SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len,
+ unsigned, flags)
+{
+ return sys_sendto(fd, buff, len, flags, NULL, 0);
+}
+
+/*
+ * Receive a frame from the socket and optionally record the address of the
+ * sender. We verify the buffers are writable and if needed move the
+ * sender address from kernel to user space.
+ */
+
+SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
+ unsigned, flags, struct sockaddr __user *, addr,
+ int __user *, addr_len)
+{
+ struct socket *sock;
+ struct iovec iov;
+ struct msghdr msg;
+ struct sockaddr_storage address;
+ int err, err2;
+ int fput_needed;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ goto out;
+
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_iovlen = 1;
+ msg.msg_iov = &iov;
+ iov.iov_len = size;
+ iov.iov_base = ubuf;
+ msg.msg_name = (struct sockaddr *)&address;
+ msg.msg_namelen = sizeof(address);
+ if (sock->file->f_flags & O_NONBLOCK)
+ flags |= MSG_DONTWAIT;
+ err = sock_recvmsg(sock, &msg, size, flags);
+
+ if (err >= 0 && addr != NULL) {
+ err2 = move_addr_to_user((struct sockaddr *)&address,
+ msg.msg_namelen, addr, addr_len);
+ if (err2 < 0)
+ err = err2;
+ }
+
+ fput_light(sock->file, fput_needed);
+out:
+ return err;
+}
+
+/*
+ * Receive a datagram from a socket.
+ */
+
+asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size,
+ unsigned flags)
+{
+ return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL);
+}
+
+/*
+ * Set a socket option. Because we don't know the option lengths we have
+ * to pass the user mode parameter for the protocols to sort out.
+ */
+
+SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname,
+ char __user *, optval, int, optlen)
+{
+ int err, fput_needed;
+ struct socket *sock;
+
+ if (optlen < 0)
+ return -EINVAL;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (sock != NULL) {
+ err = security_socket_setsockopt(sock, level, optname);
+ if (err)
+ goto out_put;
+
+ if (level == SOL_SOCKET)
+ err =
+ sock_setsockopt(sock, level, optname, optval,
+ optlen);
+ else
+ err =
+ sock->ops->setsockopt(sock, level, optname, optval,
+ optlen);
+out_put:
+ fput_light(sock->file, fput_needed);
+ }
+ return err;
+}
+
+/*
+ * Get a socket option. Because we don't know the option lengths we have
+ * to pass a user mode parameter for the protocols to sort out.
+ */
+
+SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname,
+ char __user *, optval, int __user *, optlen)
+{
+ int err, fput_needed;
+ struct socket *sock;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (sock != NULL) {
+ err = security_socket_getsockopt(sock, level, optname);
+ if (err)
+ goto out_put;
+
+ if (level == SOL_SOCKET)
+ err =
+ sock_getsockopt(sock, level, optname, optval,
+ optlen);
+ else
+ err =
+ sock->ops->getsockopt(sock, level, optname, optval,
+ optlen);
+out_put:
+ fput_light(sock->file, fput_needed);
+ }
+ return err;
+}
+
+/*
+ * Shutdown a socket.
+ */
+
+SYSCALL_DEFINE2(shutdown, int, fd, int, how)
+{
+ int err, fput_needed;
+ struct socket *sock;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (sock != NULL) {
+ err = security_socket_shutdown(sock, how);
+ if (!err)
+ err = sock->ops->shutdown(sock, how);
+ fput_light(sock->file, fput_needed);
+ }
+ return err;
+}
+
+/* A couple of helpful macros for getting the address of the 32/64 bit
+ * fields which are the same type (int / unsigned) on our platforms.
+ */
+#define COMPAT_MSG(msg, member) ((MSG_CMSG_COMPAT & flags) ? &msg##_compat->member : &msg->member)
+#define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen)
+#define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags)
+
+/*
+ * BSD sendmsg interface
+ */
+
+SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
+{
+ struct compat_msghdr __user *msg_compat =
+ (struct compat_msghdr __user *)msg;
+ struct socket *sock;
+ struct sockaddr_storage address;
+ struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
+ unsigned char ctl[sizeof(struct cmsghdr) + 20]
+ __attribute__ ((aligned(sizeof(__kernel_size_t))));
+ /* 20 is size of ipv6_pktinfo */
+ unsigned char *ctl_buf = ctl;
+ struct msghdr msg_sys;
+ int err, ctl_len, iov_size, total_len;
+ int fput_needed;
+
+ err = -EFAULT;
+ if (MSG_CMSG_COMPAT & flags) {
+ if (get_compat_msghdr(&msg_sys, msg_compat))
+ return -EFAULT;
+ }
+ else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr)))
+ return -EFAULT;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ goto out;
+
+ /* do not move before msg_sys is valid */
+ err = -EMSGSIZE;
+ if (msg_sys.msg_iovlen > UIO_MAXIOV)
+ goto out_put;
+
+ /* Check whether to allocate the iovec area */
+ err = -ENOMEM;
+ iov_size = msg_sys.msg_iovlen * sizeof(struct iovec);
+ if (msg_sys.msg_iovlen > UIO_FASTIOV) {
+ iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
+ if (!iov)
+ goto out_put;
+ }
+
+ /* This will also move the address data into kernel space */
+ if (MSG_CMSG_COMPAT & flags) {
+ err = verify_compat_iovec(&msg_sys, iov,
+ (struct sockaddr *)&address,
+ VERIFY_READ);
+ } else
+ err = verify_iovec(&msg_sys, iov,
+ (struct sockaddr *)&address,
+ VERIFY_READ);
+ if (err < 0)
+ goto out_freeiov;
+ total_len = err;
+
+ err = -ENOBUFS;
+
+ if (msg_sys.msg_controllen > INT_MAX)
+ goto out_freeiov;
+ ctl_len = msg_sys.msg_controllen;
+ if ((MSG_CMSG_COMPAT & flags) && ctl_len) {
+ err =
+ cmsghdr_from_user_compat_to_kern(&msg_sys, sock->sk, ctl,
+ sizeof(ctl));
+ if (err)
+ goto out_freeiov;
+ ctl_buf = msg_sys.msg_control;
+ ctl_len = msg_sys.msg_controllen;
+ } else if (ctl_len) {
+ if (ctl_len > sizeof(ctl)) {
+ ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL);
+ if (ctl_buf == NULL)
+ goto out_freeiov;
+ }
+ err = -EFAULT;
+ /*
+ * Careful! Before this, msg_sys.msg_control contains a user pointer.
+ * Afterwards, it will be a kernel pointer. Thus the compiler-assisted
+ * checking falls down on this.
+ */
+ if (copy_from_user(ctl_buf, (void __user *)msg_sys.msg_control,
+ ctl_len))
+ goto out_freectl;
+ msg_sys.msg_control = ctl_buf;
+ }
+ msg_sys.msg_flags = flags;
+
+ if (sock->file->f_flags & O_NONBLOCK)
+ msg_sys.msg_flags |= MSG_DONTWAIT;
+ err = sock_sendmsg(sock, &msg_sys, total_len);
+
+out_freectl:
+ if (ctl_buf != ctl)
+ sock_kfree_s(sock->sk, ctl_buf, ctl_len);
+out_freeiov:
+ if (iov != iovstack)
+ sock_kfree_s(sock->sk, iov, iov_size);
+out_put:
+ fput_light(sock->file, fput_needed);
+out:
+ return err;
+}
+
+/*
+ * BSD recvmsg interface
+ */
+
+SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
+ unsigned int, flags)
+{
+ struct compat_msghdr __user *msg_compat =
+ (struct compat_msghdr __user *)msg;
+ struct socket *sock;
+ struct iovec iovstack[UIO_FASTIOV];
+ struct iovec *iov = iovstack;
+ struct msghdr msg_sys;
+ unsigned long cmsg_ptr;
+ int err, iov_size, total_len, len;
+ int fput_needed;
+
+ /* kernel mode address */
+ struct sockaddr_storage addr;
+
+ /* user mode address pointers */
+ struct sockaddr __user *uaddr;
+ int __user *uaddr_len;
+
+ if (MSG_CMSG_COMPAT & flags) {
+ if (get_compat_msghdr(&msg_sys, msg_compat))
+ return -EFAULT;
+ }
+ else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr)))
+ return -EFAULT;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ goto out;
+
+ err = -EMSGSIZE;
+ if (msg_sys.msg_iovlen > UIO_MAXIOV)
+ goto out_put;
+
+ /* Check whether to allocate the iovec area */
+ err = -ENOMEM;
+ iov_size = msg_sys.msg_iovlen * sizeof(struct iovec);
+ if (msg_sys.msg_iovlen > UIO_FASTIOV) {
+ iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
+ if (!iov)
+ goto out_put;
+ }
+
+ /*
+ * Save the user-mode address (verify_iovec will change the
+ * kernel msghdr to use the kernel address space)
+ */
+
+ uaddr = (__force void __user *)msg_sys.msg_name;
+ uaddr_len = COMPAT_NAMELEN(msg);
+ if (MSG_CMSG_COMPAT & flags) {
+ err = verify_compat_iovec(&msg_sys, iov,
+ (struct sockaddr *)&addr,
+ VERIFY_WRITE);
+ } else
+ err = verify_iovec(&msg_sys, iov,
+ (struct sockaddr *)&addr,
+ VERIFY_WRITE);
+ if (err < 0)
+ goto out_freeiov;
+ total_len = err;
+
+ cmsg_ptr = (unsigned long)msg_sys.msg_control;
+ msg_sys.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
+
+ if (sock->file->f_flags & O_NONBLOCK)
+ flags |= MSG_DONTWAIT;
+ err = sock_recvmsg(sock, &msg_sys, total_len, flags);
+ if (err < 0)
+ goto out_freeiov;
+ len = err;
+
+ if (uaddr != NULL) {
+ err = move_addr_to_user((struct sockaddr *)&addr,
+ msg_sys.msg_namelen, uaddr,
+ uaddr_len);
+ if (err < 0)
+ goto out_freeiov;
+ }
+ err = __put_user((msg_sys.msg_flags & ~MSG_CMSG_COMPAT),
+ COMPAT_FLAGS(msg));
+ if (err)
+ goto out_freeiov;
+ if (MSG_CMSG_COMPAT & flags)
+ err = __put_user((unsigned long)msg_sys.msg_control - cmsg_ptr,
+ &msg_compat->msg_controllen);
+ else
+ err = __put_user((unsigned long)msg_sys.msg_control - cmsg_ptr,
+ &msg->msg_controllen);
+ if (err)
+ goto out_freeiov;
+ err = len;
+
+out_freeiov:
+ if (iov != iovstack)
+ sock_kfree_s(sock->sk, iov, iov_size);
+out_put:
+ fput_light(sock->file, fput_needed);
+out:
+ return err;
+}
+
+#ifdef __ARCH_WANT_SYS_SOCKETCALL
+
+/* Argument list sizes for sys_socketcall */
+#define AL(x) ((x) * sizeof(unsigned long))
+static const unsigned char nargs[19]={
+ AL(0),AL(3),AL(3),AL(3),AL(2),AL(3),
+ AL(3),AL(3),AL(4),AL(4),AL(4),AL(6),
+ AL(6),AL(2),AL(5),AL(5),AL(3),AL(3),
+ AL(4)
+};
+
+#undef AL
+
+/*
+ * System call vectors.
+ *
+ * Argument checking cleaned up. Saved 20% in size.
+ * This function doesn't need to set the kernel lock because
+ * it is set by the callees.
+ */
+
+SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
+{
+ unsigned long a[6];
+ unsigned long a0, a1;
+ int err;
+
+ if (call < 1 || call > SYS_ACCEPT4)
+ return -EINVAL;
+
+ /* copy_from_user should be SMP safe. */
+ if (copy_from_user(a, args, nargs[call]))
+ return -EFAULT;
+
+ audit_socketcall(nargs[call] / sizeof(unsigned long), a);
+
+ a0 = a[0];
+ a1 = a[1];
+
+ switch (call) {
+ case SYS_SOCKET:
+ err = sys_socket(a0, a1, a[2]);
+ break;
+ case SYS_BIND:
+ err = sys_bind(a0, (struct sockaddr __user *)a1, a[2]);
+ break;
+ case SYS_CONNECT:
+ err = sys_connect(a0, (struct sockaddr __user *)a1, a[2]);
+ break;
+ case SYS_LISTEN:
+ err = sys_listen(a0, a1);
+ break;
+ case SYS_ACCEPT:
+ err = sys_accept4(a0, (struct sockaddr __user *)a1,
+ (int __user *)a[2], 0);
+ break;
+ case SYS_GETSOCKNAME:
+ err =
+ sys_getsockname(a0, (struct sockaddr __user *)a1,
+ (int __user *)a[2]);
+ break;
+ case SYS_GETPEERNAME:
+ err =
+ sys_getpeername(a0, (struct sockaddr __user *)a1,
+ (int __user *)a[2]);
+ break;
+ case SYS_SOCKETPAIR:
+ err = sys_socketpair(a0, a1, a[2], (int __user *)a[3]);
+ break;
+ case SYS_SEND:
+ err = sys_send(a0, (void __user *)a1, a[2], a[3]);
+ break;
+ case SYS_SENDTO:
+ err = sys_sendto(a0, (void __user *)a1, a[2], a[3],
+ (struct sockaddr __user *)a[4], a[5]);
+ break;
+ case SYS_RECV:
+ err = sys_recv(a0, (void __user *)a1, a[2], a[3]);
+ break;
+ case SYS_RECVFROM:
+ err = sys_recvfrom(a0, (void __user *)a1, a[2], a[3],
+ (struct sockaddr __user *)a[4],
+ (int __user *)a[5]);
+ break;
+ case SYS_SHUTDOWN:
+ err = sys_shutdown(a0, a1);
+ break;
+ case SYS_SETSOCKOPT:
+ err = sys_setsockopt(a0, a1, a[2], (char __user *)a[3], a[4]);
+ break;
+ case SYS_GETSOCKOPT:
+ err =
+ sys_getsockopt(a0, a1, a[2], (char __user *)a[3],
+ (int __user *)a[4]);
+ break;
+ case SYS_SENDMSG:
+ err = sys_sendmsg(a0, (struct msghdr __user *)a1, a[2]);
+ break;
+ case SYS_RECVMSG:
+ err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]);
+ break;
+ case SYS_ACCEPT4:
+ err = sys_accept4(a0, (struct sockaddr __user *)a1,
+ (int __user *)a[2], a[3]);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+#endif /* __ARCH_WANT_SYS_SOCKETCALL */
+
+/**
+ * sock_register - add a socket protocol handler
+ * @ops: description of protocol
+ *
+ * This function is called by a protocol handler that wants to
+ * advertise its address family, and have it linked into the
+ * socket interface. The value ops->family coresponds to the
+ * socket system call protocol family.
+ */
+int sock_register(const struct net_proto_family *ops)
+{
+ int err;
+
+ if (ops->family >= NPROTO) {
+ printk(KERN_CRIT "protocol %d >= NPROTO(%d)\n", ops->family,
+ NPROTO);
+ return -ENOBUFS;
+ }
+
+ spin_lock(&net_family_lock);
+ if (net_families[ops->family])
+ err = -EEXIST;
+ else {
+ net_families[ops->family] = ops;
+ err = 0;
+ }
+ spin_unlock(&net_family_lock);
+
+ printk(KERN_INFO "NET: Registered protocol family %d\n", ops->family);
+ return err;
+}
+
+/**
+ * sock_unregister - remove a protocol handler
+ * @family: protocol family to remove
+ *
+ * This function is called by a protocol handler that wants to
+ * remove its address family, and have it unlinked from the
+ * new socket creation.
+ *
+ * If protocol handler is a module, then it can use module reference
+ * counts to protect against new references. If protocol handler is not
+ * a module then it needs to provide its own protection in
+ * the ops->create routine.
+ */
+void sock_unregister(int family)
+{
+ BUG_ON(family < 0 || family >= NPROTO);
+
+ spin_lock(&net_family_lock);
+ net_families[family] = NULL;
+ spin_unlock(&net_family_lock);
+
+ synchronize_rcu();
+
+ printk(KERN_INFO "NET: Unregistered protocol family %d\n", family);
+}
+
+static int __init sock_init(void)
+{
+ /*
+ * Initialize sock SLAB cache.
+ */
+
+ sk_init();
+
+ /*
+ * Initialize skbuff SLAB cache
+ */
+ skb_init();
+
+ /*
+ * Initialize the protocols module.
+ */
+
+ init_inodecache();
+ register_filesystem(&sock_fs_type);
+ sock_mnt = kern_mount(&sock_fs_type);
+
+ /* The real protocol initialization is performed in later initcalls.
+ */
+
+#ifdef CONFIG_NETFILTER
+ netfilter_init();
+#endif
+
+ return 0;
+}
+
+core_initcall(sock_init); /* early initcall */
+
+#ifdef CONFIG_PROC_FS
+void socket_seq_show(struct seq_file *seq)
+{
+ int cpu;
+ int counter = 0;
+
+ for_each_possible_cpu(cpu)
+ counter += per_cpu(sockets_in_use, cpu);
+
+ /* It can be negative, by the way. 8) */
+ if (counter < 0)
+ counter = 0;
+
+ seq_printf(seq, "sockets: used %d\n", counter);
+}
+#endif /* CONFIG_PROC_FS */
+
+#ifdef CONFIG_COMPAT
+static long compat_sock_ioctl(struct file *file, unsigned cmd,
+ unsigned long arg)
+{
+ struct socket *sock = file->private_data;
+ int ret = -ENOIOCTLCMD;
+ struct sock *sk;
+ struct net *net;
+
+ sk = sock->sk;
+ net = sock_net(sk);
+
+ if (sock->ops->compat_ioctl)
+ ret = sock->ops->compat_ioctl(sock, cmd, arg);
+
+ if (ret == -ENOIOCTLCMD &&
+ (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST))
+ ret = compat_wext_handle_ioctl(net, cmd, arg);
+
+ return ret;
+}
+#endif
+
+int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
+{
+ return sock->ops->bind(sock, addr, addrlen);
+}
+
+int kernel_listen(struct socket *sock, int backlog)
+{
+ return sock->ops->listen(sock, backlog);
+}
+
+int kernel_accept(struct socket *sock, struct socket **newsock, int flags)
+{
+ struct sock *sk = sock->sk;
+ int err;
+
+ err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
+ newsock);
+ if (err < 0)
+ goto done;
+
+ err = sock->ops->accept(sock, *newsock, flags);
+ if (err < 0) {
+ sock_release(*newsock);
+ *newsock = NULL;
+ goto done;
+ }
+
+ (*newsock)->ops = sock->ops;
+ __module_get((*newsock)->ops->owner);
+
+done:
+ return err;
+}
+
+int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
+ int flags)
+{
+ return sock->ops->connect(sock, addr, addrlen, flags);
+}
+
+int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
+ int *addrlen)
+{
+ return sock->ops->getname(sock, addr, addrlen, 0);
+}
+
+int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
+ int *addrlen)
+{
+ return sock->ops->getname(sock, addr, addrlen, 1);
+}
+
+int kernel_getsockopt(struct socket *sock, int level, int optname,
+ char *optval, int *optlen)
+{
+ mm_segment_t oldfs = get_fs();
+ int err;
+
+ set_fs(KERNEL_DS);
+ if (level == SOL_SOCKET)
+ err = sock_getsockopt(sock, level, optname, optval, optlen);
+ else
+ err = sock->ops->getsockopt(sock, level, optname, optval,
+ optlen);
+ set_fs(oldfs);
+ return err;
+}
+
+int kernel_setsockopt(struct socket *sock, int level, int optname,
+ char *optval, int optlen)
+{
+ mm_segment_t oldfs = get_fs();
+ int err;
+
+ set_fs(KERNEL_DS);
+ if (level == SOL_SOCKET)
+ err = sock_setsockopt(sock, level, optname, optval, optlen);
+ else
+ err = sock->ops->setsockopt(sock, level, optname, optval,
+ optlen);
+ set_fs(oldfs);
+ return err;
+}
+
+int kernel_sendpage(struct socket *sock, struct page *page, int offset,
+ size_t size, int flags)
+{
+ if (sock->ops->sendpage)
+ return sock->ops->sendpage(sock, page, offset, size, flags);
+
+ return sock_no_sendpage(sock, page, offset, size, flags);
+}
+
+int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg)
+{
+ mm_segment_t oldfs = get_fs();
+ int err;
+
+ set_fs(KERNEL_DS);
+ err = sock->ops->ioctl(sock, cmd, arg);
+ set_fs(oldfs);
+
+ return err;
+}
+
+int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
+{
+ return sock->ops->shutdown(sock, how);
+}
+
+EXPORT_SYMBOL(sock_create);
+EXPORT_SYMBOL(sock_create_kern);
+EXPORT_SYMBOL(sock_create_lite);
+EXPORT_SYMBOL(sock_map_fd);
+EXPORT_SYMBOL(sock_recvmsg);
+EXPORT_SYMBOL(sock_register);
+EXPORT_SYMBOL(sock_release);
+EXPORT_SYMBOL(sock_sendmsg);
+EXPORT_SYMBOL(sock_unregister);
+EXPORT_SYMBOL(sock_wake_async);
+EXPORT_SYMBOL(sockfd_lookup);
+EXPORT_SYMBOL(kernel_sendmsg);
+EXPORT_SYMBOL(kernel_recvmsg);
+EXPORT_SYMBOL(kernel_bind);
+EXPORT_SYMBOL(kernel_listen);
+EXPORT_SYMBOL(kernel_accept);
+EXPORT_SYMBOL(kernel_connect);
+EXPORT_SYMBOL(kernel_getsockname);
+EXPORT_SYMBOL(kernel_getpeername);
+EXPORT_SYMBOL(kernel_getsockopt);
+EXPORT_SYMBOL(kernel_setsockopt);
+EXPORT_SYMBOL(kernel_sendpage);
+EXPORT_SYMBOL(kernel_sock_ioctl);
+EXPORT_SYMBOL(kernel_sock_shutdown);
diff --git a/libdde_linux26/contrib/net/core/.svn/all-wcprops b/libdde_linux26/contrib/net/core/.svn/all-wcprops
new file mode 100644
index 00000000..0b670d94
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/.svn/all-wcprops
@@ -0,0 +1,77 @@
+K 25
+svn:wc:ra_dav:version-url
+V 67
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/contrib/net/core
+END
+dev.c
+K 25
+svn:wc:ra_dav:version-url
+V 73
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/contrib/net/core/dev.c
+END
+ethtool.c
+K 25
+svn:wc:ra_dav:version-url
+V 77
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/contrib/net/core/ethtool.c
+END
+sock.c
+K 25
+svn:wc:ra_dav:version-url
+V 74
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/contrib/net/core/sock.c
+END
+net-sysfs.h
+K 25
+svn:wc:ra_dav:version-url
+V 79
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/contrib/net/core/net-sysfs.h
+END
+neighbour.c
+K 25
+svn:wc:ra_dav:version-url
+V 79
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/contrib/net/core/neighbour.c
+END
+dev_mcast.c
+K 25
+svn:wc:ra_dav:version-url
+V 79
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/contrib/net/core/dev_mcast.c
+END
+filter.c
+K 25
+svn:wc:ra_dav:version-url
+V 76
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/contrib/net/core/filter.c
+END
+netevent.c
+K 25
+svn:wc:ra_dav:version-url
+V 78
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/contrib/net/core/netevent.c
+END
+rtnetlink.c
+K 25
+svn:wc:ra_dav:version-url
+V 79
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/contrib/net/core/rtnetlink.c
+END
+net-sysfs.c
+K 25
+svn:wc:ra_dav:version-url
+V 79
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/contrib/net/core/net-sysfs.c
+END
+kmap_skb.h
+K 25
+svn:wc:ra_dav:version-url
+V 78
+/repos/tudos/!svn/ver/188/trunk/l4/pkg/dde/linux26/contrib/net/core/kmap_skb.h
+END
+skb_dma_map.c
+K 25
+svn:wc:ra_dav:version-url
+V 81
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/contrib/net/core/skb_dma_map.c
+END
diff --git a/libdde_linux26/contrib/net/core/.svn/entries b/libdde_linux26/contrib/net/core/.svn/entries
new file mode 100644
index 00000000..963fd146
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/.svn/entries
@@ -0,0 +1,436 @@
+9
+
+dir
+465
+http://svn.tudos.org/repos/tudos/trunk/l4/pkg/dde/linux26/contrib/net/core
+http://svn.tudos.org/repos/tudos
+
+
+
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+svn:special svn:externals svn:needs-lock
+
+
+
+
+
+
+
+
+
+
+
+a704ac0b-3a55-4d43-a2a9-7be6f07c34fb
+
+dev.c
+file
+
+
+
+
+2009-11-15T17:16:35.000000Z
+9a23c563416176602c530d37fe7bf369
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+129785
+
+ethtool.c
+file
+
+
+
+
+2009-11-15T17:16:35.000000Z
+266e5aa6b127622f7a3b00574800a69b
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+25199
+
+sock.c
+file
+
+
+
+
+2009-11-15T17:16:35.000000Z
+d7fcc1125f9f19eb15f9662d4ce6df75
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+57794
+
+net-sysfs.h
+file
+
+
+
+
+2009-11-15T17:16:35.000000Z
+8c3cd9452327fa177f9b8150d13eb2ad
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+243
+
+neighbour.c
+file
+
+
+
+
+2009-11-15T17:16:35.000000Z
+a923815456fb10bef441bd8bbaa2b58f
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+68158
+
+dev_mcast.c
+file
+
+
+
+
+2009-11-15T17:16:35.000000Z
+6c796069f8cf28d49132eed3dc5d8110
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+5650
+
+filter.c
+file
+
+
+
+
+2009-11-15T17:16:35.000000Z
+890f8ddba6b64959215aa4baf156ee44
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+12589
+
+netevent.c
+file
+
+
+
+
+2009-11-15T17:16:35.000000Z
+bde97ba216631dd98321adc454f80d53
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+2078
+
+rtnetlink.c
+file
+
+
+
+
+2009-11-15T17:16:35.000000Z
+01f4f3b7252455460ce9c46f34f1351a
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+34276
+
+net-sysfs.c
+file
+
+
+
+
+2009-11-15T17:16:35.000000Z
+54928f891b5e2e40336074eca26a2b2b
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+14072
+
+kmap_skb.h
+file
+
+
+
+
+2009-11-15T17:16:35.000000Z
+dd4f4f93c1ab135f2d31314d41b1c373
+2007-09-13T02:53:43.697541Z
+188
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+360
+
+skb_dma_map.c
+file
+
+
+
+
+2009-11-15T17:16:35.000000Z
+dd4a1adb13e56cb4ecf75c1f4b7d6aa9
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+1468
+
diff --git a/libdde_linux26/contrib/net/core/.svn/format b/libdde_linux26/contrib/net/core/.svn/format
new file mode 100644
index 00000000..ec635144
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/.svn/format
@@ -0,0 +1 @@
+9
diff --git a/libdde_linux26/contrib/net/core/.svn/text-base/dev.c.svn-base b/libdde_linux26/contrib/net/core/.svn/text-base/dev.c.svn-base
new file mode 100644
index 00000000..e3fe5c70
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/.svn/text-base/dev.c.svn-base
@@ -0,0 +1,5277 @@
+/*
+ * NET3 Protocol independent device support routines.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Derived from the non IP parts of dev.c 1.0.19
+ * Authors: Ross Biro
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Mark Evans, <evansmp@uhura.aston.ac.uk>
+ *
+ * Additional Authors:
+ * Florian la Roche <rzsfl@rz.uni-sb.de>
+ * Alan Cox <gw4pts@gw4pts.ampr.org>
+ * David Hinds <dahinds@users.sourceforge.net>
+ * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
+ * Adam Sulmicki <adam@cfar.umd.edu>
+ * Pekka Riikonen <priikone@poesidon.pspt.fi>
+ *
+ * Changes:
+ * D.J. Barrow : Fixed bug where dev->refcnt gets set
+ * to 2 if register_netdev gets called
+ * before net_dev_init & also removed a
+ * few lines of code in the process.
+ * Alan Cox : device private ioctl copies fields back.
+ * Alan Cox : Transmit queue code does relevant
+ * stunts to keep the queue safe.
+ * Alan Cox : Fixed double lock.
+ * Alan Cox : Fixed promisc NULL pointer trap
+ * ???????? : Support the full private ioctl range
+ * Alan Cox : Moved ioctl permission check into
+ * drivers
+ * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
+ * Alan Cox : 100 backlog just doesn't cut it when
+ * you start doing multicast video 8)
+ * Alan Cox : Rewrote net_bh and list manager.
+ * Alan Cox : Fix ETH_P_ALL echoback lengths.
+ * Alan Cox : Took out transmit every packet pass
+ * Saved a few bytes in the ioctl handler
+ * Alan Cox : Network driver sets packet type before
+ * calling netif_rx. Saves a function
+ * call a packet.
+ * Alan Cox : Hashed net_bh()
+ * Richard Kooijman: Timestamp fixes.
+ * Alan Cox : Wrong field in SIOCGIFDSTADDR
+ * Alan Cox : Device lock protection.
+ * Alan Cox : Fixed nasty side effect of device close
+ * changes.
+ * Rudi Cilibrasi : Pass the right thing to
+ * set_mac_address()
+ * Dave Miller : 32bit quantity for the device lock to
+ * make it work out on a Sparc.
+ * Bjorn Ekwall : Added KERNELD hack.
+ * Alan Cox : Cleaned up the backlog initialise.
+ * Craig Metz : SIOCGIFCONF fix if space for under
+ * 1 device.
+ * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
+ * is no device open function.
+ * Andi Kleen : Fix error reporting for SIOCGIFCONF
+ * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
+ * Cyrus Durgin : Cleaned for KMOD
+ * Adam Sulmicki : Bug Fix : Network Device Unload
+ * A network device unload needs to purge
+ * the backlog queue.
+ * Paul Rusty Russell : SIOCSIFNAME
+ * Pekka Riikonen : Netdev boot-time settings code
+ * Andrew Morton : Make unregister_netdevice wait
+ * indefinitely on dev->refcnt
+ * J Hadi Salim : - Backlog queue sampling
+ * - netif_rx() feedback
+ */
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <linux/bitops.h>
+#include <linux/capability.h>
+#include <linux/cpu.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/notifier.h>
+#include <linux/skbuff.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+#include <linux/rtnetlink.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/stat.h>
+#include <linux/if_bridge.h>
+#include <linux/if_macvlan.h>
+#include <net/dst.h>
+#include <net/pkt_sched.h>
+#include <net/checksum.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/module.h>
+#include <linux/netpoll.h>
+#include <linux/rcupdate.h>
+#include <linux/delay.h>
+#include <net/wext.h>
+#include <net/iw_handler.h>
+#include <asm/current.h>
+#include <linux/audit.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/jhash.h>
+#include <linux/random.h>
+
+#include "net-sysfs.h"
+
+/* Instead of increasing this, you should create a hash table. */
+#define MAX_GRO_SKBS 8
+
+/* This should be increased if a protocol with a bigger head is added. */
+#define GRO_MAX_HEAD (MAX_HEADER + 128)
+
+/*
+ * The list of packet types we will receive (as opposed to discard)
+ * and the routines to invoke.
+ *
+ * Why 16. Because with 16 the only overlap we get on a hash of the
+ * low nibble of the protocol value is RARP/SNAP/X.25.
+ *
+ * NOTE: That is no longer true with the addition of VLAN tags. Not
+ * sure which should go first, but I bet it won't make much
+ * difference if we are running VLANs. The good news is that
+ * this protocol won't be in the list unless compiled in, so
+ * the average user (w/out VLANs) will not be adversely affected.
+ * --BLG
+ *
+ * 0800 IP
+ * 8100 802.1Q VLAN
+ * 0001 802.3
+ * 0002 AX.25
+ * 0004 802.2
+ * 8035 RARP
+ * 0005 SNAP
+ * 0805 X.25
+ * 0806 ARP
+ * 8137 IPX
+ * 0009 Localtalk
+ * 86DD IPv6
+ */
+
+#define PTYPE_HASH_SIZE (16)
+#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
+
+static DEFINE_SPINLOCK(ptype_lock);
+static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
+static struct list_head ptype_all __read_mostly; /* Taps */
+
+/*
+ * The @dev_base_head list is protected by @dev_base_lock and the rtnl
+ * semaphore.
+ *
+ * Pure readers hold dev_base_lock for reading.
+ *
+ * Writers must hold the rtnl semaphore while they loop through the
+ * dev_base_head list, and hold dev_base_lock for writing when they do the
+ * actual updates. This allows pure readers to access the list even
+ * while a writer is preparing to update it.
+ *
+ * To put it another way, dev_base_lock is held for writing only to
+ * protect against pure readers; the rtnl semaphore provides the
+ * protection against other writers.
+ *
+ * See, for example usages, register_netdevice() and
+ * unregister_netdevice(), which must be called with the rtnl
+ * semaphore held.
+ */
+DEFINE_RWLOCK(dev_base_lock);
+
+EXPORT_SYMBOL(dev_base_lock);
+
+#define NETDEV_HASHBITS 8
+#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
+
+static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
+{
+ unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
+ return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
+}
+
+static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
+{
+ return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
+}
+
+/* Device list insertion */
+static int list_netdevice(struct net_device *dev)
+{
+ struct net *net = dev_net(dev);
+
+ ASSERT_RTNL();
+
+ write_lock_bh(&dev_base_lock);
+ list_add_tail(&dev->dev_list, &net->dev_base_head);
+ hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
+ hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
+ write_unlock_bh(&dev_base_lock);
+ return 0;
+}
+
+/* Device list removal */
+static void unlist_netdevice(struct net_device *dev)
+{
+ ASSERT_RTNL();
+
+ /* Unlink dev from the device chain */
+ write_lock_bh(&dev_base_lock);
+ list_del(&dev->dev_list);
+ hlist_del(&dev->name_hlist);
+ hlist_del(&dev->index_hlist);
+ write_unlock_bh(&dev_base_lock);
+}
+
+/*
+ * Our notifier list
+ */
+
+static RAW_NOTIFIER_HEAD(netdev_chain);
+
+/*
+ * Device drivers call our routines to queue packets here. We empty the
+ * queue in the local softnet handler.
+ */
+
+DEFINE_PER_CPU(struct softnet_data, softnet_data);
+
+#ifdef CONFIG_LOCKDEP
+/*
+ * register_netdevice() inits txq->_xmit_lock and sets lockdep class
+ * according to dev->type
+ */
+static const unsigned short netdev_lock_type[] =
+ {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
+ ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
+ ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
+ ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
+ ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
+ ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
+ ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
+ ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
+ ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
+ ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
+ ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
+ ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
+ ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
+ ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
+ ARPHRD_PHONET_PIPE, ARPHRD_VOID, ARPHRD_NONE};
+
+static const char *netdev_lock_name[] =
+ {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
+ "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
+ "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
+ "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
+ "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
+ "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
+ "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
+ "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
+ "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
+ "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
+ "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
+ "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
+ "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
+ "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
+ "_xmit_PHONET_PIPE", "_xmit_VOID", "_xmit_NONE"};
+
+static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
+static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
+
+static inline unsigned short netdev_lock_pos(unsigned short dev_type)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
+ if (netdev_lock_type[i] == dev_type)
+ return i;
+ /* the last key is used by default */
+ return ARRAY_SIZE(netdev_lock_type) - 1;
+}
+
+static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
+ unsigned short dev_type)
+{
+ int i;
+
+ i = netdev_lock_pos(dev_type);
+ lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
+ netdev_lock_name[i]);
+}
+
+static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
+{
+ int i;
+
+ i = netdev_lock_pos(dev->type);
+ lockdep_set_class_and_name(&dev->addr_list_lock,
+ &netdev_addr_lock_key[i],
+ netdev_lock_name[i]);
+}
+#else
+static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
+ unsigned short dev_type)
+{
+}
+static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
+{
+}
+#endif
+
+/*******************************************************************************
+
+ Protocol management and registration routines
+
+*******************************************************************************/
+
+/*
+ * Add a protocol ID to the list. Now that the input handler is
+ * smarter we can dispense with all the messy stuff that used to be
+ * here.
+ *
+ * BEWARE!!! Protocol handlers, mangling input packets,
+ * MUST BE last in hash buckets and checking protocol handlers
+ * MUST start from promiscuous ptype_all chain in net_bh.
+ * It is true now, do not change it.
+ * Explanation follows: if protocol handler, mangling packet, will
+ * be the first on list, it is not able to sense, that packet
+ * is cloned and should be copied-on-write, so that it will
+ * change it and subsequent readers will get broken packet.
+ * --ANK (980803)
+ */
+
+/**
+ * dev_add_pack - add packet handler
+ * @pt: packet type declaration
+ *
+ * Add a protocol handler to the networking stack. The passed &packet_type
+ * is linked into kernel lists and may not be freed until it has been
+ * removed from the kernel lists.
+ *
+ * This call does not sleep therefore it can not
+ * guarantee all CPU's that are in middle of receiving packets
+ * will see the new packet type (until the next received packet).
+ */
+
+void dev_add_pack(struct packet_type *pt)
+{
+ int hash;
+
+ spin_lock_bh(&ptype_lock);
+ if (pt->type == htons(ETH_P_ALL))
+ list_add_rcu(&pt->list, &ptype_all);
+ else {
+ hash = ntohs(pt->type) & PTYPE_HASH_MASK;
+ list_add_rcu(&pt->list, &ptype_base[hash]);
+ }
+ spin_unlock_bh(&ptype_lock);
+}
+
+/**
+ * __dev_remove_pack - remove packet handler
+ * @pt: packet type declaration
+ *
+ * Remove a protocol handler that was previously added to the kernel
+ * protocol handlers by dev_add_pack(). The passed &packet_type is removed
+ * from the kernel lists and can be freed or reused once this function
+ * returns.
+ *
+ * The packet type might still be in use by receivers
+ * and must not be freed until after all the CPU's have gone
+ * through a quiescent state.
+ */
+void __dev_remove_pack(struct packet_type *pt)
+{
+ struct list_head *head;
+ struct packet_type *pt1;
+
+ spin_lock_bh(&ptype_lock);
+
+ if (pt->type == htons(ETH_P_ALL))
+ head = &ptype_all;
+ else
+ head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
+
+ list_for_each_entry(pt1, head, list) {
+ if (pt == pt1) {
+ list_del_rcu(&pt->list);
+ goto out;
+ }
+ }
+
+ printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
+out:
+ spin_unlock_bh(&ptype_lock);
+}
+/**
+ * dev_remove_pack - remove packet handler
+ * @pt: packet type declaration
+ *
+ * Remove a protocol handler that was previously added to the kernel
+ * protocol handlers by dev_add_pack(). The passed &packet_type is removed
+ * from the kernel lists and can be freed or reused once this function
+ * returns.
+ *
+ * This call sleeps to guarantee that no CPU is looking at the packet
+ * type after return.
+ */
+void dev_remove_pack(struct packet_type *pt)
+{
+ __dev_remove_pack(pt);
+
+ synchronize_net();
+}
+
+/******************************************************************************
+
+ Device Boot-time Settings Routines
+
+*******************************************************************************/
+
+/* Boot time configuration table */
+static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
+
+/**
+ * netdev_boot_setup_add - add new setup entry
+ * @name: name of the device
+ * @map: configured settings for the device
+ *
+ * Adds new setup entry to the dev_boot_setup list. The function
+ * returns 0 on error and 1 on success. This is a generic routine to
+ * all netdevices.
+ */
+static int netdev_boot_setup_add(char *name, struct ifmap *map)
+{
+ struct netdev_boot_setup *s;
+ int i;
+
+ s = dev_boot_setup;
+ for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
+ if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
+ memset(s[i].name, 0, sizeof(s[i].name));
+ strlcpy(s[i].name, name, IFNAMSIZ);
+ memcpy(&s[i].map, map, sizeof(s[i].map));
+ break;
+ }
+ }
+
+ return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
+}
+
+/**
+ * netdev_boot_setup_check - check boot time settings
+ * @dev: the netdevice
+ *
+ * Check boot time settings for the device.
+ * The found settings are set for the device to be used
+ * later in the device probing.
+ * Returns 0 if no settings found, 1 if they are.
+ */
+int netdev_boot_setup_check(struct net_device *dev)
+{
+ struct netdev_boot_setup *s = dev_boot_setup;
+ int i;
+
+ for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
+ if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
+ !strcmp(dev->name, s[i].name)) {
+ dev->irq = s[i].map.irq;
+ dev->base_addr = s[i].map.base_addr;
+ dev->mem_start = s[i].map.mem_start;
+ dev->mem_end = s[i].map.mem_end;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+
+/**
+ * netdev_boot_base - get address from boot time settings
+ * @prefix: prefix for network device
+ * @unit: id for network device
+ *
+ * Check boot time settings for the base address of device.
+ * The found settings are set for the device to be used
+ * later in the device probing.
+ * Returns 0 if no settings found.
+ */
+unsigned long netdev_boot_base(const char *prefix, int unit)
+{
+ const struct netdev_boot_setup *s = dev_boot_setup;
+ char name[IFNAMSIZ];
+ int i;
+
+ sprintf(name, "%s%d", prefix, unit);
+
+ /*
+ * If device already registered then return base of 1
+ * to indicate not to probe for this interface
+ */
+ if (__dev_get_by_name(&init_net, name))
+ return 1;
+
+ for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
+ if (!strcmp(name, s[i].name))
+ return s[i].map.base_addr;
+ return 0;
+}
+
+/*
+ * Saves at boot time configured settings for any netdevice.
+ */
+int __init netdev_boot_setup(char *str)
+{
+ int ints[5];
+ struct ifmap map;
+
+ str = get_options(str, ARRAY_SIZE(ints), ints);
+ if (!str || !*str)
+ return 0;
+
+ /* Save settings */
+ memset(&map, 0, sizeof(map));
+ if (ints[0] > 0)
+ map.irq = ints[1];
+ if (ints[0] > 1)
+ map.base_addr = ints[2];
+ if (ints[0] > 2)
+ map.mem_start = ints[3];
+ if (ints[0] > 3)
+ map.mem_end = ints[4];
+
+ /* Add new entry to the list */
+ return netdev_boot_setup_add(str, &map);
+}
+
+__setup("netdev=", netdev_boot_setup);
+
+/*******************************************************************************
+
+ Device Interface Subroutines
+
+*******************************************************************************/
+
+/**
+ * __dev_get_by_name - find a device by its name
+ * @net: the applicable net namespace
+ * @name: name to find
+ *
+ * Find an interface by name. Must be called under RTNL semaphore
+ * or @dev_base_lock. If the name is found a pointer to the device
+ * is returned. If the name is not found then %NULL is returned. The
+ * reference counters are not incremented so the caller must be
+ * careful with locks.
+ */
+
+struct net_device *__dev_get_by_name(struct net *net, const char *name)
+{
+ struct hlist_node *p;
+
+ hlist_for_each(p, dev_name_hash(net, name)) {
+ struct net_device *dev
+ = hlist_entry(p, struct net_device, name_hlist);
+ if (!strncmp(dev->name, name, IFNAMSIZ))
+ return dev;
+ }
+ return NULL;
+}
+
+/**
+ * dev_get_by_name - find a device by its name
+ * @net: the applicable net namespace
+ * @name: name to find
+ *
+ * Find an interface by name. This can be called from any
+ * context and does its own locking. The returned handle has
+ * the usage count incremented and the caller must use dev_put() to
+ * release it when it is no longer needed. %NULL is returned if no
+ * matching device is found.
+ */
+
+struct net_device *dev_get_by_name(struct net *net, const char *name)
+{
+ struct net_device *dev;
+
+ read_lock(&dev_base_lock);
+ dev = __dev_get_by_name(net, name);
+ if (dev)
+ dev_hold(dev);
+ read_unlock(&dev_base_lock);
+ return dev;
+}
+
+/**
+ * __dev_get_by_index - find a device by its ifindex
+ * @net: the applicable net namespace
+ * @ifindex: index of device
+ *
+ * Search for an interface by index. Returns %NULL if the device
+ * is not found or a pointer to the device. The device has not
+ * had its reference counter increased so the caller must be careful
+ * about locking. The caller must hold either the RTNL semaphore
+ * or @dev_base_lock.
+ */
+
+struct net_device *__dev_get_by_index(struct net *net, int ifindex)
+{
+ struct hlist_node *p;
+
+ hlist_for_each(p, dev_index_hash(net, ifindex)) {
+ struct net_device *dev
+ = hlist_entry(p, struct net_device, index_hlist);
+ if (dev->ifindex == ifindex)
+ return dev;
+ }
+ return NULL;
+}
+
+
+/**
+ * dev_get_by_index - find a device by its ifindex
+ * @net: the applicable net namespace
+ * @ifindex: index of device
+ *
+ * Search for an interface by index. Returns NULL if the device
+ * is not found or a pointer to the device. The device returned has
+ * had a reference added and the pointer is safe until the user calls
+ * dev_put to indicate they have finished with it.
+ */
+
+struct net_device *dev_get_by_index(struct net *net, int ifindex)
+{
+ struct net_device *dev;
+
+ read_lock(&dev_base_lock);
+ dev = __dev_get_by_index(net, ifindex);
+ if (dev)
+ dev_hold(dev);
+ read_unlock(&dev_base_lock);
+ return dev;
+}
+
+/**
+ * dev_getbyhwaddr - find a device by its hardware address
+ * @net: the applicable net namespace
+ * @type: media type of device
+ * @ha: hardware address
+ *
+ * Search for an interface by MAC address. Returns NULL if the device
+ * is not found or a pointer to the device. The caller must hold the
+ * rtnl semaphore. The returned device has not had its ref count increased
+ * and the caller must therefore be careful about locking
+ *
+ * BUGS:
+ * If the API was consistent this would be __dev_get_by_hwaddr
+ */
+
+struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
+{
+ struct net_device *dev;
+
+ ASSERT_RTNL();
+
+ for_each_netdev(net, dev)
+ if (dev->type == type &&
+ !memcmp(dev->dev_addr, ha, dev->addr_len))
+ return dev;
+
+ return NULL;
+}
+
+EXPORT_SYMBOL(dev_getbyhwaddr);
+
+struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
+{
+ struct net_device *dev;
+
+ ASSERT_RTNL();
+ for_each_netdev(net, dev)
+ if (dev->type == type)
+ return dev;
+
+ return NULL;
+}
+
+EXPORT_SYMBOL(__dev_getfirstbyhwtype);
+
+struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
+{
+ struct net_device *dev;
+
+ rtnl_lock();
+ dev = __dev_getfirstbyhwtype(net, type);
+ if (dev)
+ dev_hold(dev);
+ rtnl_unlock();
+ return dev;
+}
+
+EXPORT_SYMBOL(dev_getfirstbyhwtype);
+
+/**
+ * dev_get_by_flags - find any device with given flags
+ * @net: the applicable net namespace
+ * @if_flags: IFF_* values
+ * @mask: bitmask of bits in if_flags to check
+ *
+ * Search for any interface with the given flags. Returns NULL if a device
+ * is not found or a pointer to the device. The device returned has
+ * had a reference added and the pointer is safe until the user calls
+ * dev_put to indicate they have finished with it.
+ */
+
+struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
+{
+ struct net_device *dev, *ret;
+
+ ret = NULL;
+ read_lock(&dev_base_lock);
+ for_each_netdev(net, dev) {
+ if (((dev->flags ^ if_flags) & mask) == 0) {
+ dev_hold(dev);
+ ret = dev;
+ break;
+ }
+ }
+ read_unlock(&dev_base_lock);
+ return ret;
+}
+
+/**
+ * dev_valid_name - check if name is okay for network device
+ * @name: name string
+ *
+ * Network device names need to be valid file names to
+ * to allow sysfs to work. We also disallow any kind of
+ * whitespace.
+ */
+int dev_valid_name(const char *name)
+{
+ if (*name == '\0')
+ return 0;
+ if (strlen(name) >= IFNAMSIZ)
+ return 0;
+ if (!strcmp(name, ".") || !strcmp(name, ".."))
+ return 0;
+
+ while (*name) {
+ if (*name == '/' || isspace(*name))
+ return 0;
+ name++;
+ }
+ return 1;
+}
+
+/**
+ * __dev_alloc_name - allocate a name for a device
+ * @net: network namespace to allocate the device name in
+ * @name: name format string
+ * @buf: scratch buffer and result name string
+ *
+ * Passed a format string - eg "lt%d" it will try and find a suitable
+ * id. It scans list of devices to build up a free map, then chooses
+ * the first empty slot. The caller must hold the dev_base or rtnl lock
+ * while allocating the name and adding the device in order to avoid
+ * duplicates.
+ * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
+ * Returns the number of the unit assigned or a negative errno code.
+ */
+
+static int __dev_alloc_name(struct net *net, const char *name, char *buf)
+{
+ int i = 0;
+ const char *p;
+ const int max_netdevices = 8*PAGE_SIZE;
+ unsigned long *inuse;
+ struct net_device *d;
+
+ p = strnchr(name, IFNAMSIZ-1, '%');
+ if (p) {
+ /*
+ * Verify the string as this thing may have come from
+ * the user. There must be either one "%d" and no other "%"
+ * characters.
+ */
+ if (p[1] != 'd' || strchr(p + 2, '%'))
+ return -EINVAL;
+
+ /* Use one page as a bit array of possible slots */
+ inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
+ if (!inuse)
+ return -ENOMEM;
+
+ for_each_netdev(net, d) {
+ if (!sscanf(d->name, name, &i))
+ continue;
+ if (i < 0 || i >= max_netdevices)
+ continue;
+
+ /* avoid cases where sscanf is not exact inverse of printf */
+ snprintf(buf, IFNAMSIZ, name, i);
+ if (!strncmp(buf, d->name, IFNAMSIZ))
+ set_bit(i, inuse);
+ }
+
+ i = find_first_zero_bit(inuse, max_netdevices);
+ free_page((unsigned long) inuse);
+ }
+
+ snprintf(buf, IFNAMSIZ, name, i);
+ if (!__dev_get_by_name(net, buf))
+ return i;
+
+ /* It is possible to run out of possible slots
+ * when the name is long and there isn't enough space left
+ * for the digits, or if all bits are used.
+ */
+ return -ENFILE;
+}
+
+/**
+ * dev_alloc_name - allocate a name for a device
+ * @dev: device
+ * @name: name format string
+ *
+ * Passed a format string - eg "lt%d" it will try and find a suitable
+ * id. It scans list of devices to build up a free map, then chooses
+ * the first empty slot. The caller must hold the dev_base or rtnl lock
+ * while allocating the name and adding the device in order to avoid
+ * duplicates.
+ * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
+ * Returns the number of the unit assigned or a negative errno code.
+ */
+
+int dev_alloc_name(struct net_device *dev, const char *name)
+{
+ char buf[IFNAMSIZ];
+ struct net *net;
+ int ret;
+
+ BUG_ON(!dev_net(dev));
+ net = dev_net(dev);
+ ret = __dev_alloc_name(net, name, buf);
+ if (ret >= 0)
+ strlcpy(dev->name, buf, IFNAMSIZ);
+ return ret;
+}
+
+
+/**
+ * dev_change_name - change name of a device
+ * @dev: device
+ * @newname: name (or format string) must be at least IFNAMSIZ
+ *
+ * Change name of a device, can pass format strings "eth%d".
+ * for wildcarding.
+ */
+int dev_change_name(struct net_device *dev, const char *newname)
+{
+ char oldname[IFNAMSIZ];
+ int err = 0;
+ int ret;
+ struct net *net;
+
+ ASSERT_RTNL();
+ BUG_ON(!dev_net(dev));
+
+ net = dev_net(dev);
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ if (!dev_valid_name(newname))
+ return -EINVAL;
+
+ if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
+ return 0;
+
+ memcpy(oldname, dev->name, IFNAMSIZ);
+
+ if (strchr(newname, '%')) {
+ err = dev_alloc_name(dev, newname);
+ if (err < 0)
+ return err;
+ }
+ else if (__dev_get_by_name(net, newname))
+ return -EEXIST;
+ else
+ strlcpy(dev->name, newname, IFNAMSIZ);
+
+rollback:
+ /* For now only devices in the initial network namespace
+ * are in sysfs.
+ */
+ if (net == &init_net) {
+ ret = device_rename(&dev->dev, dev->name);
+ if (ret) {
+ memcpy(dev->name, oldname, IFNAMSIZ);
+ return ret;
+ }
+ }
+
+ write_lock_bh(&dev_base_lock);
+ hlist_del(&dev->name_hlist);
+ hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
+ write_unlock_bh(&dev_base_lock);
+
+ ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
+ ret = notifier_to_errno(ret);
+
+ if (ret) {
+ if (err) {
+ printk(KERN_ERR
+ "%s: name change rollback failed: %d.\n",
+ dev->name, ret);
+ } else {
+ err = ret;
+ memcpy(dev->name, oldname, IFNAMSIZ);
+ goto rollback;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * dev_set_alias - change ifalias of a device
+ * @dev: device
+ * @alias: name up to IFALIASZ
+ * @len: limit of bytes to copy from info
+ *
+ * Set ifalias for a device,
+ */
+int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
+{
+ ASSERT_RTNL();
+
+ if (len >= IFALIASZ)
+ return -EINVAL;
+
+ if (!len) {
+ if (dev->ifalias) {
+ kfree(dev->ifalias);
+ dev->ifalias = NULL;
+ }
+ return 0;
+ }
+
+ dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
+ if (!dev->ifalias)
+ return -ENOMEM;
+
+ strlcpy(dev->ifalias, alias, len+1);
+ return len;
+}
+
+
+/**
+ * netdev_features_change - device changes features
+ * @dev: device to cause notification
+ *
+ * Called to indicate a device has changed features.
+ */
+void netdev_features_change(struct net_device *dev)
+{
+ call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
+}
+EXPORT_SYMBOL(netdev_features_change);
+
+/**
+ * netdev_state_change - device changes state
+ * @dev: device to cause notification
+ *
+ * Called to indicate a device has changed state. This function calls
+ * the notifier chains for netdev_chain and sends a NEWLINK message
+ * to the routing socket.
+ */
+void netdev_state_change(struct net_device *dev)
+{
+ if (dev->flags & IFF_UP) {
+ call_netdevice_notifiers(NETDEV_CHANGE, dev);
+ rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
+ }
+}
+
+void netdev_bonding_change(struct net_device *dev)
+{
+ call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
+}
+EXPORT_SYMBOL(netdev_bonding_change);
+
+/**
+ * dev_load - load a network module
+ * @net: the applicable net namespace
+ * @name: name of interface
+ *
+ * If a network interface is not present and the process has suitable
+ * privileges this function loads the module. If module loading is not
+ * available in this kernel then it becomes a nop.
+ */
+
+void dev_load(struct net *net, const char *name)
+{
+ struct net_device *dev;
+
+ read_lock(&dev_base_lock);
+ dev = __dev_get_by_name(net, name);
+ read_unlock(&dev_base_lock);
+
+ if (!dev && capable(CAP_SYS_MODULE))
+ request_module("%s", name);
+}
+
+/**
+ * dev_open - prepare an interface for use.
+ * @dev: device to open
+ *
+ * Takes a device from down to up state. The device's private open
+ * function is invoked and then the multicast lists are loaded. Finally
+ * the device is moved into the up state and a %NETDEV_UP message is
+ * sent to the netdev notifier chain.
+ *
+ * Calling this function on an active interface is a nop. On a failure
+ * a negative errno code is returned.
+ */
+int dev_open(struct net_device *dev)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+ int ret = 0;
+
+ ASSERT_RTNL();
+
+ /*
+ * Is it already up?
+ */
+
+ if (dev->flags & IFF_UP)
+ return 0;
+
+ /*
+ * Is it even present?
+ */
+ if (!netif_device_present(dev))
+ return -ENODEV;
+
+ /*
+ * Call device private open method
+ */
+ set_bit(__LINK_STATE_START, &dev->state);
+
+ if (ops->ndo_validate_addr)
+ ret = ops->ndo_validate_addr(dev);
+
+ if (!ret && ops->ndo_open)
+ ret = ops->ndo_open(dev);
+
+ /*
+ * If it went open OK then:
+ */
+
+ if (ret)
+ clear_bit(__LINK_STATE_START, &dev->state);
+ else {
+ /*
+ * Set the flags.
+ */
+ dev->flags |= IFF_UP;
+
+ /*
+ * Enable NET_DMA
+ */
+ net_dmaengine_get();
+
+ /*
+ * Initialize multicasting status
+ */
+ dev_set_rx_mode(dev);
+
+ /*
+ * Wakeup transmit queue engine
+ */
+ dev_activate(dev);
+
+ /*
+ * ... and announce new interface.
+ */
+ call_netdevice_notifiers(NETDEV_UP, dev);
+ }
+
+ return ret;
+}
+
+/**
+ * dev_close - shutdown an interface.
+ * @dev: device to shutdown
+ *
+ * This function moves an active device into down state. A
+ * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
+ * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
+ * chain.
+ */
+int dev_close(struct net_device *dev)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+ ASSERT_RTNL();
+
+ might_sleep();
+
+ if (!(dev->flags & IFF_UP))
+ return 0;
+
+ /*
+ * Tell people we are going down, so that they can
+ * prepare to death, when device is still operating.
+ */
+ call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
+
+ clear_bit(__LINK_STATE_START, &dev->state);
+
+ /* Synchronize to scheduled poll. We cannot touch poll list,
+ * it can be even on different cpu. So just clear netif_running().
+ *
+ * dev->stop() will invoke napi_disable() on all of it's
+ * napi_struct instances on this device.
+ */
+ smp_mb__after_clear_bit(); /* Commit netif_running(). */
+
+ dev_deactivate(dev);
+
+ /*
+ * Call the device specific close. This cannot fail.
+ * Only if device is UP
+ *
+ * We allow it to be called even after a DETACH hot-plug
+ * event.
+ */
+ if (ops->ndo_stop)
+ ops->ndo_stop(dev);
+
+ /*
+ * Device is now down.
+ */
+
+ dev->flags &= ~IFF_UP;
+
+ /*
+ * Tell people we are down
+ */
+ call_netdevice_notifiers(NETDEV_DOWN, dev);
+
+ /*
+ * Shutdown NET_DMA
+ */
+ net_dmaengine_put();
+
+ return 0;
+}
+
+
+/**
+ * dev_disable_lro - disable Large Receive Offload on a device
+ * @dev: device
+ *
+ * Disable Large Receive Offload (LRO) on a net device. Must be
+ * called under RTNL. This is needed if received packets may be
+ * forwarded to another interface.
+ */
+void dev_disable_lro(struct net_device *dev)
+{
+ if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
+ dev->ethtool_ops->set_flags) {
+ u32 flags = dev->ethtool_ops->get_flags(dev);
+ if (flags & ETH_FLAG_LRO) {
+ flags &= ~ETH_FLAG_LRO;
+ dev->ethtool_ops->set_flags(dev, flags);
+ }
+ }
+ WARN_ON(dev->features & NETIF_F_LRO);
+}
+EXPORT_SYMBOL(dev_disable_lro);
+
+
+static int dev_boot_phase = 1;
+
+/*
+ * Device change register/unregister. These are not inline or static
+ * as we export them to the world.
+ */
+
+/**
+ * register_netdevice_notifier - register a network notifier block
+ * @nb: notifier
+ *
+ * Register a notifier to be called when network device events occur.
+ * The notifier passed is linked into the kernel structures and must
+ * not be reused until it has been unregistered. A negative errno code
+ * is returned on a failure.
+ *
+ * When registered all registration and up events are replayed
+ * to the new notifier to allow device to have a race free
+ * view of the network device list.
+ */
+
+int register_netdevice_notifier(struct notifier_block *nb)
+{
+ struct net_device *dev;
+ struct net_device *last;
+ struct net *net;
+ int err;
+
+ rtnl_lock();
+ err = raw_notifier_chain_register(&netdev_chain, nb);
+ if (err)
+ goto unlock;
+ if (dev_boot_phase)
+ goto unlock;
+ for_each_net(net) {
+ for_each_netdev(net, dev) {
+ err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
+ err = notifier_to_errno(err);
+ if (err)
+ goto rollback;
+
+ if (!(dev->flags & IFF_UP))
+ continue;
+
+ nb->notifier_call(nb, NETDEV_UP, dev);
+ }
+ }
+
+unlock:
+ rtnl_unlock();
+ return err;
+
+rollback:
+ last = dev;
+ for_each_net(net) {
+ for_each_netdev(net, dev) {
+ if (dev == last)
+ break;
+
+ if (dev->flags & IFF_UP) {
+ nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
+ nb->notifier_call(nb, NETDEV_DOWN, dev);
+ }
+ nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
+ }
+ }
+
+ raw_notifier_chain_unregister(&netdev_chain, nb);
+ goto unlock;
+}
+
+/**
+ * unregister_netdevice_notifier - unregister a network notifier block
+ * @nb: notifier
+ *
+ * Unregister a notifier previously registered by
+ * register_netdevice_notifier(). The notifier is unlinked into the
+ * kernel structures and may then be reused. A negative errno code
+ * is returned on a failure.
+ */
+
+int unregister_netdevice_notifier(struct notifier_block *nb)
+{
+ int err;
+
+ rtnl_lock();
+ err = raw_notifier_chain_unregister(&netdev_chain, nb);
+ rtnl_unlock();
+ return err;
+}
+
+/**
+ * call_netdevice_notifiers - call all network notifier blocks
+ * @val: value passed unmodified to notifier function
+ * @dev: net_device pointer passed unmodified to notifier function
+ *
+ * Call all network notifier blocks. Parameters and return value
+ * are as for raw_notifier_call_chain().
+ */
+
+int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
+{
+ return raw_notifier_call_chain(&netdev_chain, val, dev);
+}
+
+/* When > 0 there are consumers of rx skb time stamps */
+static atomic_t netstamp_needed = ATOMIC_INIT(0);
+
+void net_enable_timestamp(void)
+{
+ atomic_inc(&netstamp_needed);
+}
+
+void net_disable_timestamp(void)
+{
+ atomic_dec(&netstamp_needed);
+}
+
+static inline void net_timestamp(struct sk_buff *skb)
+{
+ if (atomic_read(&netstamp_needed))
+ __net_timestamp(skb);
+ else
+ skb->tstamp.tv64 = 0;
+}
+
+/*
+ * Support routine. Sends outgoing frames to any network
+ * taps currently in use.
+ */
+
+static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct packet_type *ptype;
+
+ net_timestamp(skb);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ptype, &ptype_all, list) {
+ /* Never send packets back to the socket
+ * they originated from - MvS (miquels@drinkel.ow.org)
+ */
+ if ((ptype->dev == dev || !ptype->dev) &&
+ (ptype->af_packet_priv == NULL ||
+ (struct sock *)ptype->af_packet_priv != skb->sk)) {
+ struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
+ if (!skb2)
+ break;
+
+ /* skb->nh should be correctly
+ set by sender, so that the second statement is
+ just protection against buggy protocols.
+ */
+ skb_reset_mac_header(skb2);
+
+ if (skb_network_header(skb2) < skb2->data ||
+ skb2->network_header > skb2->tail) {
+ if (net_ratelimit())
+ printk(KERN_CRIT "protocol %04x is "
+ "buggy, dev %s\n",
+ skb2->protocol, dev->name);
+ skb_reset_network_header(skb2);
+ }
+
+ skb2->transport_header = skb2->network_header;
+ skb2->pkt_type = PACKET_OUTGOING;
+ ptype->func(skb2, skb->dev, ptype, skb->dev);
+ }
+ }
+ rcu_read_unlock();
+}
+
+
+static inline void __netif_reschedule(struct Qdisc *q)
+{
+ struct softnet_data *sd;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ sd = &__get_cpu_var(softnet_data);
+ q->next_sched = sd->output_queue;
+ sd->output_queue = q;
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
+}
+
+void __netif_schedule(struct Qdisc *q)
+{
+ if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
+ __netif_reschedule(q);
+}
+EXPORT_SYMBOL(__netif_schedule);
+
+void dev_kfree_skb_irq(struct sk_buff *skb)
+{
+ if (atomic_dec_and_test(&skb->users)) {
+ struct softnet_data *sd;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ sd = &__get_cpu_var(softnet_data);
+ skb->next = sd->completion_queue;
+ sd->completion_queue = skb;
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
+ }
+}
+EXPORT_SYMBOL(dev_kfree_skb_irq);
+
+void dev_kfree_skb_any(struct sk_buff *skb)
+{
+ if (in_irq() || irqs_disabled())
+ dev_kfree_skb_irq(skb);
+ else
+ dev_kfree_skb(skb);
+}
+EXPORT_SYMBOL(dev_kfree_skb_any);
+
+
+/**
+ * netif_device_detach - mark device as removed
+ * @dev: network device
+ *
+ * Mark device as removed from system and therefore no longer available.
+ */
+void netif_device_detach(struct net_device *dev)
+{
+ if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
+ netif_running(dev)) {
+ netif_stop_queue(dev);
+ }
+}
+EXPORT_SYMBOL(netif_device_detach);
+
+/**
+ * netif_device_attach - mark device as attached
+ * @dev: network device
+ *
+ * Mark device as attached from system and restart if needed.
+ */
+void netif_device_attach(struct net_device *dev)
+{
+ if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
+ netif_running(dev)) {
+ netif_wake_queue(dev);
+ __netdev_watchdog_up(dev);
+ }
+}
+EXPORT_SYMBOL(netif_device_attach);
+
+static bool can_checksum_protocol(unsigned long features, __be16 protocol)
+{
+ return ((features & NETIF_F_GEN_CSUM) ||
+ ((features & NETIF_F_IP_CSUM) &&
+ protocol == htons(ETH_P_IP)) ||
+ ((features & NETIF_F_IPV6_CSUM) &&
+ protocol == htons(ETH_P_IPV6)));
+}
+
+static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
+{
+ if (can_checksum_protocol(dev->features, skb->protocol))
+ return true;
+
+ if (skb->protocol == htons(ETH_P_8021Q)) {
+ struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+ if (can_checksum_protocol(dev->features & dev->vlan_features,
+ veh->h_vlan_encapsulated_proto))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Invalidate hardware checksum when packet is to be mangled, and
+ * complete checksum manually on outgoing path.
+ */
+int skb_checksum_help(struct sk_buff *skb)
+{
+ __wsum csum;
+ int ret = 0, offset;
+
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ goto out_set_summed;
+
+ if (unlikely(skb_shinfo(skb)->gso_size)) {
+ /* Let GSO fix up the checksum. */
+ goto out_set_summed;
+ }
+
+ offset = skb->csum_start - skb_headroom(skb);
+ BUG_ON(offset >= skb_headlen(skb));
+ csum = skb_checksum(skb, offset, skb->len - offset, 0);
+
+ offset += skb->csum_offset;
+ BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
+
+ if (skb_cloned(skb) &&
+ !skb_clone_writable(skb, offset + sizeof(__sum16))) {
+ ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (ret)
+ goto out;
+ }
+
+ *(__sum16 *)(skb->data + offset) = csum_fold(csum);
+out_set_summed:
+ skb->ip_summed = CHECKSUM_NONE;
+out:
+ return ret;
+}
+
+/**
+ * skb_gso_segment - Perform segmentation on skb.
+ * @skb: buffer to segment
+ * @features: features for the output path (see dev->features)
+ *
+ * This function segments the given skb and returns a list of segments.
+ *
+ * It may return NULL if the skb requires no segmentation. This is
+ * only possible when GSO is used for verifying header integrity.
+ */
+struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
+{
+ struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
+ struct packet_type *ptype;
+ __be16 type = skb->protocol;
+ int err;
+
+ skb_reset_mac_header(skb);
+ skb->mac_len = skb->network_header - skb->mac_header;
+ __skb_pull(skb, skb->mac_len);
+
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
+ struct net_device *dev = skb->dev;
+ struct ethtool_drvinfo info = {};
+
+ if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
+ dev->ethtool_ops->get_drvinfo(dev, &info);
+
+ WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
+ "ip_summed=%d",
+ info.driver, dev ? dev->features : 0L,
+ skb->sk ? skb->sk->sk_route_caps : 0L,
+ skb->len, skb->data_len, skb->ip_summed);
+
+ if (skb_header_cloned(skb) &&
+ (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+ return ERR_PTR(err);
+ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ptype,
+ &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
+ if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
+ err = ptype->gso_send_check(skb);
+ segs = ERR_PTR(err);
+ if (err || skb_gso_ok(skb, features))
+ break;
+ __skb_push(skb, (skb->data -
+ skb_network_header(skb)));
+ }
+ segs = ptype->gso_segment(skb, features);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ __skb_push(skb, skb->data - skb_mac_header(skb));
+
+ return segs;
+}
+
+EXPORT_SYMBOL(skb_gso_segment);
+
+/* Take action when hardware reception checksum errors are detected. */
+#ifdef CONFIG_BUG
+void netdev_rx_csum_fault(struct net_device *dev)
+{
+ if (net_ratelimit()) {
+ printk(KERN_ERR "%s: hw csum failure.\n",
+ dev ? dev->name : "<unknown>");
+ dump_stack();
+ }
+}
+EXPORT_SYMBOL(netdev_rx_csum_fault);
+#endif
+
+/* Actually, we should eliminate this check as soon as we know, that:
+ * 1. IOMMU is present and allows to map all the memory.
+ * 2. No high memory really exists on this machine.
+ */
+
+static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
+{
+#ifdef CONFIG_HIGHMEM
+ int i;
+
+ if (dev->features & NETIF_F_HIGHDMA)
+ return 0;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ if (PageHighMem(skb_shinfo(skb)->frags[i].page))
+ return 1;
+
+#endif
+ return 0;
+}
+
+struct dev_gso_cb {
+ void (*destructor)(struct sk_buff *skb);
+};
+
+#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
+
+static void dev_gso_skb_destructor(struct sk_buff *skb)
+{
+ struct dev_gso_cb *cb;
+
+ do {
+ struct sk_buff *nskb = skb->next;
+
+ skb->next = nskb->next;
+ nskb->next = NULL;
+ kfree_skb(nskb);
+ } while (skb->next);
+
+ cb = DEV_GSO_CB(skb);
+ if (cb->destructor)
+ cb->destructor(skb);
+}
+
+/**
+ * dev_gso_segment - Perform emulated hardware segmentation on skb.
+ * @skb: buffer to segment
+ *
+ * This function segments the given skb and stores the list of segments
+ * in skb->next.
+ */
+static int dev_gso_segment(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ struct sk_buff *segs;
+ int features = dev->features & ~(illegal_highdma(dev, skb) ?
+ NETIF_F_SG : 0);
+
+ segs = skb_gso_segment(skb, features);
+
+ /* Verifying header integrity only. */
+ if (!segs)
+ return 0;
+
+ if (IS_ERR(segs))
+ return PTR_ERR(segs);
+
+ skb->next = segs;
+ DEV_GSO_CB(skb)->destructor = skb->destructor;
+ skb->destructor = dev_gso_skb_destructor;
+
+ return 0;
+}
+
+int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
+ struct netdev_queue *txq)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ prefetch(&dev->netdev_ops->ndo_start_xmit);
+ if (likely(!skb->next)) {
+ if (!list_empty(&ptype_all))
+ dev_queue_xmit_nit(skb, dev);
+
+ if (netif_needs_gso(dev, skb)) {
+ if (unlikely(dev_gso_segment(skb)))
+ goto out_kfree_skb;
+ if (skb->next)
+ goto gso;
+ }
+
+ return ops->ndo_start_xmit(skb, dev);
+ }
+
+gso:
+ do {
+ struct sk_buff *nskb = skb->next;
+ int rc;
+
+ skb->next = nskb->next;
+ nskb->next = NULL;
+ rc = ops->ndo_start_xmit(nskb, dev);
+ if (unlikely(rc)) {
+ nskb->next = skb->next;
+ skb->next = nskb;
+ return rc;
+ }
+ if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
+ return NETDEV_TX_BUSY;
+ } while (skb->next);
+
+ skb->destructor = DEV_GSO_CB(skb)->destructor;
+
+out_kfree_skb:
+ kfree_skb(skb);
+ return 0;
+}
+
+static u32 simple_tx_hashrnd;
+static int simple_tx_hashrnd_initialized = 0;
+
+static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
+{
+ u32 addr1, addr2, ports;
+ u32 hash, ihl;
+ u8 ip_proto = 0;
+
+ if (unlikely(!simple_tx_hashrnd_initialized)) {
+ get_random_bytes(&simple_tx_hashrnd, 4);
+ simple_tx_hashrnd_initialized = 1;
+ }
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
+ ip_proto = ip_hdr(skb)->protocol;
+ addr1 = ip_hdr(skb)->saddr;
+ addr2 = ip_hdr(skb)->daddr;
+ ihl = ip_hdr(skb)->ihl;
+ break;
+ case htons(ETH_P_IPV6):
+ ip_proto = ipv6_hdr(skb)->nexthdr;
+ addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
+ addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
+ ihl = (40 >> 2);
+ break;
+ default:
+ return 0;
+ }
+
+
+ switch (ip_proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_DCCP:
+ case IPPROTO_ESP:
+ case IPPROTO_AH:
+ case IPPROTO_SCTP:
+ case IPPROTO_UDPLITE:
+ ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
+ break;
+
+ default:
+ ports = 0;
+ break;
+ }
+
+ hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd);
+
+ return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
+}
+
+static struct netdev_queue *dev_pick_tx(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+ u16 queue_index = 0;
+
+ if (ops->ndo_select_queue)
+ queue_index = ops->ndo_select_queue(dev, skb);
+ else if (dev->real_num_tx_queues > 1)
+ queue_index = simple_tx_hash(dev, skb);
+
+ skb_set_queue_mapping(skb, queue_index);
+ return netdev_get_tx_queue(dev, queue_index);
+}
+
+/**
+ * dev_queue_xmit - transmit a buffer
+ * @skb: buffer to transmit
+ *
+ * Queue a buffer for transmission to a network device. The caller must
+ * have set the device and priority and built the buffer before calling
+ * this function. The function can be called from an interrupt.
+ *
+ * A negative errno code is returned on a failure. A success does not
+ * guarantee the frame will be transmitted as it may be dropped due
+ * to congestion or traffic shaping.
+ *
+ * -----------------------------------------------------------------------------------
+ * I notice this method can also return errors from the queue disciplines,
+ * including NET_XMIT_DROP, which is a positive value. So, errors can also
+ * be positive.
+ *
+ * Regardless of the return value, the skb is consumed, so it is currently
+ * difficult to retry a send to this method. (You can bump the ref count
+ * before sending to hold a reference for retry if you are careful.)
+ *
+ * When calling this method, interrupts MUST be enabled. This is because
+ * the BH enable code must have IRQs enabled so that it will not deadlock.
+ * --BLG
+ */
+int dev_queue_xmit(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ struct netdev_queue *txq;
+ struct Qdisc *q;
+ int rc = -ENOMEM;
+
+ /* GSO will handle the following emulations directly. */
+ if (netif_needs_gso(dev, skb))
+ goto gso;
+
+ if (skb_shinfo(skb)->frag_list &&
+ !(dev->features & NETIF_F_FRAGLIST) &&
+ __skb_linearize(skb))
+ goto out_kfree_skb;
+
+ /* Fragmented skb is linearized if device does not support SG,
+ * or if at least one of fragments is in highmem and device
+ * does not support DMA from it.
+ */
+ if (skb_shinfo(skb)->nr_frags &&
+ (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
+ __skb_linearize(skb))
+ goto out_kfree_skb;
+
+ /* If packet is not checksummed and device does not support
+ * checksumming for this protocol, complete checksumming here.
+ */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ skb_set_transport_header(skb, skb->csum_start -
+ skb_headroom(skb));
+ if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
+ goto out_kfree_skb;
+ }
+
+gso:
+ /* Disable soft irqs for various locks below. Also
+ * stops preemption for RCU.
+ */
+ rcu_read_lock_bh();
+
+ txq = dev_pick_tx(dev, skb);
+ q = rcu_dereference(txq->qdisc);
+
+#ifdef CONFIG_NET_CLS_ACT
+ skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
+#endif
+ if (q->enqueue) {
+ spinlock_t *root_lock = qdisc_lock(q);
+
+ spin_lock(root_lock);
+
+ if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
+ kfree_skb(skb);
+ rc = NET_XMIT_DROP;
+ } else {
+ rc = qdisc_enqueue_root(skb, q);
+ qdisc_run(q);
+ }
+ spin_unlock(root_lock);
+
+ goto out;
+ }
+
+ /* The device has no queue. Common case for software devices:
+ loopback, all the sorts of tunnels...
+
+ Really, it is unlikely that netif_tx_lock protection is necessary
+ here. (f.e. loopback and IP tunnels are clean ignoring statistics
+ counters.)
+ However, it is possible, that they rely on protection
+ made by us here.
+
+ Check this and shot the lock. It is not prone from deadlocks.
+ Either shot noqueue qdisc, it is even simpler 8)
+ */
+ if (dev->flags & IFF_UP) {
+ int cpu = smp_processor_id(); /* ok because BHs are off */
+
+ if (txq->xmit_lock_owner != cpu) {
+
+ HARD_TX_LOCK(dev, txq, cpu);
+
+ if (!netif_tx_queue_stopped(txq)) {
+ rc = 0;
+ if (!dev_hard_start_xmit(skb, dev, txq)) {
+ HARD_TX_UNLOCK(dev, txq);
+ goto out;
+ }
+ }
+ HARD_TX_UNLOCK(dev, txq);
+ if (net_ratelimit())
+ printk(KERN_CRIT "Virtual device %s asks to "
+ "queue packet!\n", dev->name);
+ } else {
+ /* Recursion is detected! It is possible,
+ * unfortunately */
+ if (net_ratelimit())
+ printk(KERN_CRIT "Dead loop on virtual device "
+ "%s, fix it urgently!\n", dev->name);
+ }
+ }
+
+ rc = -ENETDOWN;
+ rcu_read_unlock_bh();
+
+out_kfree_skb:
+ kfree_skb(skb);
+ return rc;
+out:
+ rcu_read_unlock_bh();
+ return rc;
+}
+
+
+/*=======================================================================
+ Receiver routines
+ =======================================================================*/
+
+int netdev_max_backlog __read_mostly = 1000;
+int netdev_budget __read_mostly = 300;
+int weight_p __read_mostly = 64; /* old backlog weight */
+
+DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
+
+
+/**
+ * netif_rx - post buffer to the network code
+ * @skb: buffer to post
+ *
+ * This function receives a packet from a device driver and queues it for
+ * the upper (protocol) levels to process. It always succeeds. The buffer
+ * may be dropped during processing for congestion control or by the
+ * protocol layers.
+ *
+ * return values:
+ * NET_RX_SUCCESS (no congestion)
+ * NET_RX_DROP (packet was dropped)
+ *
+ */
+
+int netif_rx(struct sk_buff *skb)
+{
+ struct softnet_data *queue;
+ unsigned long flags;
+
+ /* if netpoll wants it, pretend we never saw it */
+ if (netpoll_rx(skb))
+ return NET_RX_DROP;
+
+ if (!skb->tstamp.tv64)
+ net_timestamp(skb);
+
+ /*
+ * The code is rearranged so that the path is the most
+ * short when CPU is congested, but is still operating.
+ */
+ local_irq_save(flags);
+ queue = &__get_cpu_var(softnet_data);
+
+ __get_cpu_var(netdev_rx_stat).total++;
+ if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
+ if (queue->input_pkt_queue.qlen) {
+enqueue:
+ __skb_queue_tail(&queue->input_pkt_queue, skb);
+ local_irq_restore(flags);
+ return NET_RX_SUCCESS;
+ }
+
+ napi_schedule(&queue->backlog);
+ goto enqueue;
+ }
+
+ __get_cpu_var(netdev_rx_stat).dropped++;
+ local_irq_restore(flags);
+
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+int netif_rx_ni(struct sk_buff *skb)
+{
+ int err;
+
+ preempt_disable();
+ err = netif_rx(skb);
+ if (local_softirq_pending())
+ do_softirq();
+ preempt_enable();
+
+ return err;
+}
+
+EXPORT_SYMBOL(netif_rx_ni);
+
+static void net_tx_action(struct softirq_action *h)
+{
+ struct softnet_data *sd = &__get_cpu_var(softnet_data);
+
+ if (sd->completion_queue) {
+ struct sk_buff *clist;
+
+ local_irq_disable();
+ clist = sd->completion_queue;
+ sd->completion_queue = NULL;
+ local_irq_enable();
+
+ while (clist) {
+ struct sk_buff *skb = clist;
+ clist = clist->next;
+
+ WARN_ON(atomic_read(&skb->users));
+ __kfree_skb(skb);
+ }
+ }
+
+ if (sd->output_queue) {
+ struct Qdisc *head;
+
+ local_irq_disable();
+ head = sd->output_queue;
+ sd->output_queue = NULL;
+ local_irq_enable();
+
+ while (head) {
+ struct Qdisc *q = head;
+ spinlock_t *root_lock;
+
+ head = head->next_sched;
+
+ root_lock = qdisc_lock(q);
+ if (spin_trylock(root_lock)) {
+ smp_mb__before_clear_bit();
+ clear_bit(__QDISC_STATE_SCHED,
+ &q->state);
+ qdisc_run(q);
+ spin_unlock(root_lock);
+ } else {
+ if (!test_bit(__QDISC_STATE_DEACTIVATED,
+ &q->state)) {
+ __netif_reschedule(q);
+ } else {
+ smp_mb__before_clear_bit();
+ clear_bit(__QDISC_STATE_SCHED,
+ &q->state);
+ }
+ }
+ }
+ }
+}
+
+static inline int deliver_skb(struct sk_buff *skb,
+ struct packet_type *pt_prev,
+ struct net_device *orig_dev)
+{
+ atomic_inc(&skb->users);
+ return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+}
+
+#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
+/* These hooks defined here for ATM */
+struct net_bridge;
+struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
+ unsigned char *addr);
+void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
+
+/*
+ * If bridge module is loaded call bridging hook.
+ * returns NULL if packet was consumed.
+ */
+struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
+ struct sk_buff *skb) __read_mostly;
+static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
+ struct packet_type **pt_prev, int *ret,
+ struct net_device *orig_dev)
+{
+ struct net_bridge_port *port;
+
+ if (skb->pkt_type == PACKET_LOOPBACK ||
+ (port = rcu_dereference(skb->dev->br_port)) == NULL)
+ return skb;
+
+ if (*pt_prev) {
+ *ret = deliver_skb(skb, *pt_prev, orig_dev);
+ *pt_prev = NULL;
+ }
+
+ return br_handle_frame_hook(port, skb);
+}
+#else
+#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
+#endif
+
+#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
+struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
+EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
+
+static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
+ struct packet_type **pt_prev,
+ int *ret,
+ struct net_device *orig_dev)
+{
+ if (skb->dev->macvlan_port == NULL)
+ return skb;
+
+ if (*pt_prev) {
+ *ret = deliver_skb(skb, *pt_prev, orig_dev);
+ *pt_prev = NULL;
+ }
+ return macvlan_handle_frame_hook(skb);
+}
+#else
+#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
+#endif
+
+#ifdef CONFIG_NET_CLS_ACT
+/* TODO: Maybe we should just force sch_ingress to be compiled in
+ * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
+ * a compare and 2 stores extra right now if we dont have it on
+ * but have CONFIG_NET_CLS_ACT
+ * NOTE: This doesnt stop any functionality; if you dont have
+ * the ingress scheduler, you just cant add policies on ingress.
+ *
+ */
+static int ing_filter(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ u32 ttl = G_TC_RTTL(skb->tc_verd);
+ struct netdev_queue *rxq;
+ int result = TC_ACT_OK;
+ struct Qdisc *q;
+
+ if (MAX_RED_LOOP < ttl++) {
+ printk(KERN_WARNING
+ "Redir loop detected Dropping packet (%d->%d)\n",
+ skb->iif, dev->ifindex);
+ return TC_ACT_SHOT;
+ }
+
+ skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
+ skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
+
+ rxq = &dev->rx_queue;
+
+ q = rxq->qdisc;
+ if (q != &noop_qdisc) {
+ spin_lock(qdisc_lock(q));
+ if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
+ result = qdisc_enqueue_root(skb, q);
+ spin_unlock(qdisc_lock(q));
+ }
+
+ return result;
+}
+
+static inline struct sk_buff *handle_ing(struct sk_buff *skb,
+ struct packet_type **pt_prev,
+ int *ret, struct net_device *orig_dev)
+{
+ if (skb->dev->rx_queue.qdisc == &noop_qdisc)
+ goto out;
+
+ if (*pt_prev) {
+ *ret = deliver_skb(skb, *pt_prev, orig_dev);
+ *pt_prev = NULL;
+ } else {
+ /* Huh? Why does turning on AF_PACKET affect this? */
+ skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
+ }
+
+ switch (ing_filter(skb)) {
+ case TC_ACT_SHOT:
+ case TC_ACT_STOLEN:
+ kfree_skb(skb);
+ return NULL;
+ }
+
+out:
+ skb->tc_verd = 0;
+ return skb;
+}
+#endif
+
+/*
+ * netif_nit_deliver - deliver received packets to network taps
+ * @skb: buffer
+ *
+ * This function is used to deliver incoming packets to network
+ * taps. It should be used when the normal netif_receive_skb path
+ * is bypassed, for example because of VLAN acceleration.
+ */
+void netif_nit_deliver(struct sk_buff *skb)
+{
+ struct packet_type *ptype;
+
+ if (list_empty(&ptype_all))
+ return;
+
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ skb->mac_len = skb->network_header - skb->mac_header;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ptype, &ptype_all, list) {
+ if (!ptype->dev || ptype->dev == skb->dev)
+ deliver_skb(skb, ptype, skb->dev);
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * netif_receive_skb - process receive buffer from network
+ * @skb: buffer to process
+ *
+ * netif_receive_skb() is the main receive data processing function.
+ * It always succeeds. The buffer may be dropped during processing
+ * for congestion control or by the protocol layers.
+ *
+ * This function may only be called from softirq context and interrupts
+ * should be enabled.
+ *
+ * Return values (usually ignored):
+ * NET_RX_SUCCESS: no congestion
+ * NET_RX_DROP: packet was dropped
+ */
+int netif_receive_skb(struct sk_buff *skb)
+{
+ struct packet_type *ptype, *pt_prev;
+ struct net_device *orig_dev;
+ struct net_device *null_or_orig;
+ int ret = NET_RX_DROP;
+ __be16 type;
+
+ if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
+ return NET_RX_SUCCESS;
+
+ /* if we've gotten here through NAPI, check netpoll */
+ if (netpoll_receive_skb(skb))
+ return NET_RX_DROP;
+
+ if (!skb->tstamp.tv64)
+ net_timestamp(skb);
+
+ if (!skb->iif)
+ skb->iif = skb->dev->ifindex;
+
+ null_or_orig = NULL;
+ orig_dev = skb->dev;
+ if (orig_dev->master) {
+ if (skb_bond_should_drop(skb))
+ null_or_orig = orig_dev; /* deliver only exact match */
+ else
+ skb->dev = orig_dev->master;
+ }
+
+ __get_cpu_var(netdev_rx_stat).total++;
+
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ skb->mac_len = skb->network_header - skb->mac_header;
+
+ pt_prev = NULL;
+
+ rcu_read_lock();
+
+#ifdef CONFIG_NET_CLS_ACT
+ if (skb->tc_verd & TC_NCLS) {
+ skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
+ goto ncls;
+ }
+#endif
+
+ list_for_each_entry_rcu(ptype, &ptype_all, list) {
+ if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
+ ptype->dev == orig_dev) {
+ if (pt_prev)
+ ret = deliver_skb(skb, pt_prev, orig_dev);
+ pt_prev = ptype;
+ }
+ }
+
+#ifdef CONFIG_NET_CLS_ACT
+ skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
+ if (!skb)
+ goto out;
+ncls:
+#endif
+
+ skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
+ if (!skb)
+ goto out;
+ skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
+ if (!skb)
+ goto out;
+
+ type = skb->protocol;
+ list_for_each_entry_rcu(ptype,
+ &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
+ if (ptype->type == type &&
+ (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
+ ptype->dev == orig_dev)) {
+ if (pt_prev)
+ ret = deliver_skb(skb, pt_prev, orig_dev);
+ pt_prev = ptype;
+ }
+ }
+
+ if (pt_prev) {
+ ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+ } else {
+ kfree_skb(skb);
+ /* Jamal, now you will not able to escape explaining
+ * me how you were going to use this. :-)
+ */
+ ret = NET_RX_DROP;
+ }
+
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
+/* Network device is going away, flush any packets still pending */
+static void flush_backlog(void *arg)
+{
+ struct net_device *dev = arg;
+ struct softnet_data *queue = &__get_cpu_var(softnet_data);
+ struct sk_buff *skb, *tmp;
+
+ skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
+ if (skb->dev == dev) {
+ __skb_unlink(skb, &queue->input_pkt_queue);
+ kfree_skb(skb);
+ }
+}
+
+static int napi_gro_complete(struct sk_buff *skb)
+{
+ struct packet_type *ptype;
+ __be16 type = skb->protocol;
+ struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
+ int err = -ENOENT;
+
+ if (NAPI_GRO_CB(skb)->count == 1)
+ goto out;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ptype, head, list) {
+ if (ptype->type != type || ptype->dev || !ptype->gro_complete)
+ continue;
+
+ err = ptype->gro_complete(skb);
+ break;
+ }
+ rcu_read_unlock();
+
+ if (err) {
+ WARN_ON(&ptype->list == head);
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+ }
+
+out:
+ skb_shinfo(skb)->gso_size = 0;
+ __skb_push(skb, -skb_network_offset(skb));
+ return netif_receive_skb(skb);
+}
+
+void napi_gro_flush(struct napi_struct *napi)
+{
+ struct sk_buff *skb, *next;
+
+ for (skb = napi->gro_list; skb; skb = next) {
+ next = skb->next;
+ skb->next = NULL;
+ napi_gro_complete(skb);
+ }
+
+ napi->gro_list = NULL;
+}
+EXPORT_SYMBOL(napi_gro_flush);
+
+int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+{
+ struct sk_buff **pp = NULL;
+ struct packet_type *ptype;
+ __be16 type = skb->protocol;
+ struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
+ int count = 0;
+ int same_flow;
+ int mac_len;
+ int free;
+
+ if (!(skb->dev->features & NETIF_F_GRO))
+ goto normal;
+
+ if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list)
+ goto normal;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ptype, head, list) {
+ struct sk_buff *p;
+
+ if (ptype->type != type || ptype->dev || !ptype->gro_receive)
+ continue;
+
+ skb_reset_network_header(skb);
+ mac_len = skb->network_header - skb->mac_header;
+ skb->mac_len = mac_len;
+ NAPI_GRO_CB(skb)->same_flow = 0;
+ NAPI_GRO_CB(skb)->flush = 0;
+ NAPI_GRO_CB(skb)->free = 0;
+
+ for (p = napi->gro_list; p; p = p->next) {
+ count++;
+
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ if (p->mac_len != mac_len ||
+ memcmp(skb_mac_header(p), skb_mac_header(skb),
+ mac_len))
+ NAPI_GRO_CB(p)->same_flow = 0;
+ }
+
+ pp = ptype->gro_receive(&napi->gro_list, skb);
+ break;
+ }
+ rcu_read_unlock();
+
+ if (&ptype->list == head)
+ goto normal;
+
+ same_flow = NAPI_GRO_CB(skb)->same_flow;
+ free = NAPI_GRO_CB(skb)->free;
+
+ if (pp) {
+ struct sk_buff *nskb = *pp;
+
+ *pp = nskb->next;
+ nskb->next = NULL;
+ napi_gro_complete(nskb);
+ count--;
+ }
+
+ if (same_flow)
+ goto ok;
+
+ if (NAPI_GRO_CB(skb)->flush || count >= MAX_GRO_SKBS) {
+ __skb_push(skb, -skb_network_offset(skb));
+ goto normal;
+ }
+
+ NAPI_GRO_CB(skb)->count = 1;
+ skb_shinfo(skb)->gso_size = skb->len;
+ skb->next = napi->gro_list;
+ napi->gro_list = skb;
+
+ok:
+ return free;
+
+normal:
+ return -1;
+}
+EXPORT_SYMBOL(dev_gro_receive);
+
+static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+{
+ struct sk_buff *p;
+
+ for (p = napi->gro_list; p; p = p->next) {
+ NAPI_GRO_CB(p)->same_flow = 1;
+ NAPI_GRO_CB(p)->flush = 0;
+ }
+
+ return dev_gro_receive(napi, skb);
+}
+
+int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+{
+ if (netpoll_receive_skb(skb))
+ return NET_RX_DROP;
+
+ switch (__napi_gro_receive(napi, skb)) {
+ case -1:
+ return netif_receive_skb(skb);
+
+ case 1:
+ kfree_skb(skb);
+ break;
+ }
+
+ return NET_RX_SUCCESS;
+}
+EXPORT_SYMBOL(napi_gro_receive);
+
+void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
+{
+ __skb_pull(skb, skb_headlen(skb));
+ skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
+
+ napi->skb = skb;
+}
+EXPORT_SYMBOL(napi_reuse_skb);
+
+struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
+ struct napi_gro_fraginfo *info)
+{
+ struct net_device *dev = napi->dev;
+ struct sk_buff *skb = napi->skb;
+
+ napi->skb = NULL;
+
+ if (!skb) {
+ skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
+ if (!skb)
+ goto out;
+
+ skb_reserve(skb, NET_IP_ALIGN);
+ }
+
+ BUG_ON(info->nr_frags > MAX_SKB_FRAGS);
+ skb_shinfo(skb)->nr_frags = info->nr_frags;
+ memcpy(skb_shinfo(skb)->frags, info->frags, sizeof(info->frags));
+
+ skb->data_len = info->len;
+ skb->len += info->len;
+ skb->truesize += info->len;
+
+ if (!pskb_may_pull(skb, ETH_HLEN)) {
+ napi_reuse_skb(napi, skb);
+ skb = NULL;
+ goto out;
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ skb->ip_summed = info->ip_summed;
+ skb->csum = info->csum;
+
+out:
+ return skb;
+}
+EXPORT_SYMBOL(napi_fraginfo_skb);
+
+int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
+{
+ struct sk_buff *skb = napi_fraginfo_skb(napi, info);
+ int err = NET_RX_DROP;
+
+ if (!skb)
+ goto out;
+
+ if (netpoll_receive_skb(skb))
+ goto out;
+
+ err = NET_RX_SUCCESS;
+
+ switch (__napi_gro_receive(napi, skb)) {
+ case -1:
+ return netif_receive_skb(skb);
+
+ case 0:
+ goto out;
+ }
+
+ napi_reuse_skb(napi, skb);
+
+out:
+ return err;
+}
+EXPORT_SYMBOL(napi_gro_frags);
+
+static int process_backlog(struct napi_struct *napi, int quota)
+{
+ int work = 0;
+ struct softnet_data *queue = &__get_cpu_var(softnet_data);
+ unsigned long start_time = jiffies;
+
+ napi->weight = weight_p;
+ do {
+ struct sk_buff *skb;
+
+ local_irq_disable();
+ skb = __skb_dequeue(&queue->input_pkt_queue);
+ if (!skb) {
+ local_irq_enable();
+ napi_complete(napi);
+ goto out;
+ }
+ local_irq_enable();
+
+ napi_gro_receive(napi, skb);
+ } while (++work < quota && jiffies == start_time);
+
+ napi_gro_flush(napi);
+
+out:
+ return work;
+}
+
+/**
+ * __napi_schedule - schedule for receive
+ * @n: entry to schedule
+ *
+ * The entry's receive function will be scheduled to run
+ */
+void __napi_schedule(struct napi_struct *n)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
+ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__napi_schedule);
+
+void __napi_complete(struct napi_struct *n)
+{
+ BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
+ BUG_ON(n->gro_list);
+
+ list_del(&n->poll_list);
+ smp_mb__before_clear_bit();
+ clear_bit(NAPI_STATE_SCHED, &n->state);
+}
+EXPORT_SYMBOL(__napi_complete);
+
+void napi_complete(struct napi_struct *n)
+{
+ unsigned long flags;
+
+ /*
+ * don't let napi dequeue from the cpu poll list
+ * just in case its running on a different cpu
+ */
+ if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
+ return;
+
+ napi_gro_flush(n);
+ local_irq_save(flags);
+ __napi_complete(n);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(napi_complete);
+
+void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int weight)
+{
+ INIT_LIST_HEAD(&napi->poll_list);
+ napi->gro_list = NULL;
+ napi->skb = NULL;
+ napi->poll = poll;
+ napi->weight = weight;
+ list_add(&napi->dev_list, &dev->napi_list);
+ napi->dev = dev;
+#ifdef CONFIG_NETPOLL
+ spin_lock_init(&napi->poll_lock);
+ napi->poll_owner = -1;
+#endif
+ set_bit(NAPI_STATE_SCHED, &napi->state);
+}
+EXPORT_SYMBOL(netif_napi_add);
+
+void netif_napi_del(struct napi_struct *napi)
+{
+ struct sk_buff *skb, *next;
+
+ list_del_init(&napi->dev_list);
+ kfree_skb(napi->skb);
+
+ for (skb = napi->gro_list; skb; skb = next) {
+ next = skb->next;
+ skb->next = NULL;
+ kfree_skb(skb);
+ }
+
+ napi->gro_list = NULL;
+}
+EXPORT_SYMBOL(netif_napi_del);
+
+
+static void net_rx_action(struct softirq_action *h)
+{
+ struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
+ unsigned long time_limit = jiffies + 2;
+ int budget = netdev_budget;
+ void *have;
+
+ local_irq_disable();
+
+ while (!list_empty(list)) {
+ struct napi_struct *n;
+ int work, weight;
+
+ /* If softirq window is exhuasted then punt.
+ * Allow this to run for 2 jiffies since which will allow
+ * an average latency of 1.5/HZ.
+ */
+ if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
+ goto softnet_break;
+
+ local_irq_enable();
+
+ /* Even though interrupts have been re-enabled, this
+ * access is safe because interrupts can only add new
+ * entries to the tail of this list, and only ->poll()
+ * calls can remove this head entry from the list.
+ */
+ n = list_entry(list->next, struct napi_struct, poll_list);
+
+ have = netpoll_poll_lock(n);
+
+ weight = n->weight;
+
+ /* This NAPI_STATE_SCHED test is for avoiding a race
+ * with netpoll's poll_napi(). Only the entity which
+ * obtains the lock and sees NAPI_STATE_SCHED set will
+ * actually make the ->poll() call. Therefore we avoid
+ * accidently calling ->poll() when NAPI is not scheduled.
+ */
+ work = 0;
+ if (test_bit(NAPI_STATE_SCHED, &n->state))
+ work = n->poll(n, weight);
+
+ WARN_ON_ONCE(work > weight);
+
+ budget -= work;
+
+ local_irq_disable();
+
+ /* Drivers must not modify the NAPI state if they
+ * consume the entire weight. In such cases this code
+ * still "owns" the NAPI instance and therefore can
+ * move the instance around on the list at-will.
+ */
+ if (unlikely(work == weight)) {
+ if (unlikely(napi_disable_pending(n)))
+ __napi_complete(n);
+ else
+ list_move_tail(&n->poll_list, list);
+ }
+
+ netpoll_poll_unlock(have);
+ }
+out:
+ local_irq_enable();
+
+#ifdef CONFIG_NET_DMA
+ /*
+ * There may not be any more sk_buffs coming right now, so push
+ * any pending DMA copies to hardware
+ */
+ dma_issue_pending_all();
+#endif
+
+ return;
+
+softnet_break:
+ __get_cpu_var(netdev_rx_stat).time_squeeze++;
+ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ goto out;
+}
+
+static gifconf_func_t * gifconf_list [NPROTO];
+
+/**
+ * register_gifconf - register a SIOCGIF handler
+ * @family: Address family
+ * @gifconf: Function handler
+ *
+ * Register protocol dependent address dumping routines. The handler
+ * that is passed must not be freed or reused until it has been replaced
+ * by another handler.
+ */
+int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
+{
+ if (family >= NPROTO)
+ return -EINVAL;
+ gifconf_list[family] = gifconf;
+ return 0;
+}
+
+
+/*
+ * Map an interface index to its name (SIOCGIFNAME)
+ */
+
+/*
+ * We need this ioctl for efficient implementation of the
+ * if_indextoname() function required by the IPv6 API. Without
+ * it, we would have to search all the interfaces to find a
+ * match. --pb
+ */
+
+static int dev_ifname(struct net *net, struct ifreq __user *arg)
+{
+ struct net_device *dev;
+ struct ifreq ifr;
+
+ /*
+ * Fetch the caller's info block.
+ */
+
+ if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
+ return -EFAULT;
+
+ read_lock(&dev_base_lock);
+ dev = __dev_get_by_index(net, ifr.ifr_ifindex);
+ if (!dev) {
+ read_unlock(&dev_base_lock);
+ return -ENODEV;
+ }
+
+ strcpy(ifr.ifr_name, dev->name);
+ read_unlock(&dev_base_lock);
+
+ if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ * Perform a SIOCGIFCONF call. This structure will change
+ * size eventually, and there is nothing I can do about it.
+ * Thus we will need a 'compatibility mode'.
+ */
+
+static int dev_ifconf(struct net *net, char __user *arg)
+{
+ struct ifconf ifc;
+ struct net_device *dev;
+ char __user *pos;
+ int len;
+ int total;
+ int i;
+
+ /*
+ * Fetch the caller's info block.
+ */
+
+ if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
+ return -EFAULT;
+
+ pos = ifc.ifc_buf;
+ len = ifc.ifc_len;
+
+ /*
+ * Loop over the interfaces, and write an info block for each.
+ */
+
+ total = 0;
+ for_each_netdev(net, dev) {
+ for (i = 0; i < NPROTO; i++) {
+ if (gifconf_list[i]) {
+ int done;
+ if (!pos)
+ done = gifconf_list[i](dev, NULL, 0);
+ else
+ done = gifconf_list[i](dev, pos + total,
+ len - total);
+ if (done < 0)
+ return -EFAULT;
+ total += done;
+ }
+ }
+ }
+
+ /*
+ * All done. Write the updated control block back to the caller.
+ */
+ ifc.ifc_len = total;
+
+ /*
+ * Both BSD and Solaris return 0 here, so we do too.
+ */
+ return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
+}
+
+#ifdef CONFIG_PROC_FS
+/*
+ * This is invoked by the /proc filesystem handler to display a device
+ * in detail.
+ */
+void *dev_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(dev_base_lock)
+{
+ struct net *net = seq_file_net(seq);
+ loff_t off;
+ struct net_device *dev;
+
+ read_lock(&dev_base_lock);
+ if (!*pos)
+ return SEQ_START_TOKEN;
+
+ off = 1;
+ for_each_netdev(net, dev)
+ if (off++ == *pos)
+ return dev;
+
+ return NULL;
+}
+
+void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct net *net = seq_file_net(seq);
+ ++*pos;
+ return v == SEQ_START_TOKEN ?
+ first_net_device(net) : next_net_device((struct net_device *)v);
+}
+
+void dev_seq_stop(struct seq_file *seq, void *v)
+ __releases(dev_base_lock)
+{
+ read_unlock(&dev_base_lock);
+}
+
+static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
+{
+ const struct net_device_stats *stats = dev_get_stats(dev);
+
+ seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
+ "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
+ dev->name, stats->rx_bytes, stats->rx_packets,
+ stats->rx_errors,
+ stats->rx_dropped + stats->rx_missed_errors,
+ stats->rx_fifo_errors,
+ stats->rx_length_errors + stats->rx_over_errors +
+ stats->rx_crc_errors + stats->rx_frame_errors,
+ stats->rx_compressed, stats->multicast,
+ stats->tx_bytes, stats->tx_packets,
+ stats->tx_errors, stats->tx_dropped,
+ stats->tx_fifo_errors, stats->collisions,
+ stats->tx_carrier_errors +
+ stats->tx_aborted_errors +
+ stats->tx_window_errors +
+ stats->tx_heartbeat_errors,
+ stats->tx_compressed);
+}
+
+/*
+ * Called from the PROCfs module. This now uses the new arbitrary sized
+ * /proc/net interface to create /proc/net/dev
+ */
+static int dev_seq_show(struct seq_file *seq, void *v)
+{
+ if (v == SEQ_START_TOKEN)
+ seq_puts(seq, "Inter-| Receive "
+ " | Transmit\n"
+ " face |bytes packets errs drop fifo frame "
+ "compressed multicast|bytes packets errs "
+ "drop fifo colls carrier compressed\n");
+ else
+ dev_seq_printf_stats(seq, v);
+ return 0;
+}
+
+static struct netif_rx_stats *softnet_get_online(loff_t *pos)
+{
+ struct netif_rx_stats *rc = NULL;
+
+ while (*pos < nr_cpu_ids)
+ if (cpu_online(*pos)) {
+ rc = &per_cpu(netdev_rx_stat, *pos);
+ break;
+ } else
+ ++*pos;
+ return rc;
+}
+
+static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ return softnet_get_online(pos);
+}
+
+static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ ++*pos;
+ return softnet_get_online(pos);
+}
+
+static void softnet_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static int softnet_seq_show(struct seq_file *seq, void *v)
+{
+ struct netif_rx_stats *s = v;
+
+ seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ s->total, s->dropped, s->time_squeeze, 0,
+ 0, 0, 0, 0, /* was fastroute */
+ s->cpu_collision );
+ return 0;
+}
+
+static const struct seq_operations dev_seq_ops = {
+ .start = dev_seq_start,
+ .next = dev_seq_next,
+ .stop = dev_seq_stop,
+ .show = dev_seq_show,
+};
+
+static int dev_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open_net(inode, file, &dev_seq_ops,
+ sizeof(struct seq_net_private));
+}
+
+static const struct file_operations dev_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = dev_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_net,
+};
+
+static const struct seq_operations softnet_seq_ops = {
+ .start = softnet_seq_start,
+ .next = softnet_seq_next,
+ .stop = softnet_seq_stop,
+ .show = softnet_seq_show,
+};
+
+static int softnet_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &softnet_seq_ops);
+}
+
+static const struct file_operations softnet_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = softnet_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static void *ptype_get_idx(loff_t pos)
+{
+ struct packet_type *pt = NULL;
+ loff_t i = 0;
+ int t;
+
+ list_for_each_entry_rcu(pt, &ptype_all, list) {
+ if (i == pos)
+ return pt;
+ ++i;
+ }
+
+ for (t = 0; t < PTYPE_HASH_SIZE; t++) {
+ list_for_each_entry_rcu(pt, &ptype_base[t], list) {
+ if (i == pos)
+ return pt;
+ ++i;
+ }
+ }
+ return NULL;
+}
+
+static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(RCU)
+{
+ rcu_read_lock();
+ return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
+}
+
+static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct packet_type *pt;
+ struct list_head *nxt;
+ int hash;
+
+ ++*pos;
+ if (v == SEQ_START_TOKEN)
+ return ptype_get_idx(0);
+
+ pt = v;
+ nxt = pt->list.next;
+ if (pt->type == htons(ETH_P_ALL)) {
+ if (nxt != &ptype_all)
+ goto found;
+ hash = 0;
+ nxt = ptype_base[0].next;
+ } else
+ hash = ntohs(pt->type) & PTYPE_HASH_MASK;
+
+ while (nxt == &ptype_base[hash]) {
+ if (++hash >= PTYPE_HASH_SIZE)
+ return NULL;
+ nxt = ptype_base[hash].next;
+ }
+found:
+ return list_entry(nxt, struct packet_type, list);
+}
+
+static void ptype_seq_stop(struct seq_file *seq, void *v)
+ __releases(RCU)
+{
+ rcu_read_unlock();
+}
+
+static int ptype_seq_show(struct seq_file *seq, void *v)
+{
+ struct packet_type *pt = v;
+
+ if (v == SEQ_START_TOKEN)
+ seq_puts(seq, "Type Device Function\n");
+ else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
+ if (pt->type == htons(ETH_P_ALL))
+ seq_puts(seq, "ALL ");
+ else
+ seq_printf(seq, "%04x", ntohs(pt->type));
+
+ seq_printf(seq, " %-8s %pF\n",
+ pt->dev ? pt->dev->name : "", pt->func);
+ }
+
+ return 0;
+}
+
+static const struct seq_operations ptype_seq_ops = {
+ .start = ptype_seq_start,
+ .next = ptype_seq_next,
+ .stop = ptype_seq_stop,
+ .show = ptype_seq_show,
+};
+
+static int ptype_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open_net(inode, file, &ptype_seq_ops,
+ sizeof(struct seq_net_private));
+}
+
+static const struct file_operations ptype_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = ptype_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_net,
+};
+
+
+static int __net_init dev_proc_net_init(struct net *net)
+{
+ int rc = -ENOMEM;
+
+ if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
+ goto out;
+ if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
+ goto out_dev;
+ if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
+ goto out_softnet;
+
+ if (wext_proc_init(net))
+ goto out_ptype;
+ rc = 0;
+out:
+ return rc;
+out_ptype:
+ proc_net_remove(net, "ptype");
+out_softnet:
+ proc_net_remove(net, "softnet_stat");
+out_dev:
+ proc_net_remove(net, "dev");
+ goto out;
+}
+
+static void __net_exit dev_proc_net_exit(struct net *net)
+{
+ wext_proc_exit(net);
+
+ proc_net_remove(net, "ptype");
+ proc_net_remove(net, "softnet_stat");
+ proc_net_remove(net, "dev");
+}
+
+static struct pernet_operations __net_initdata dev_proc_ops = {
+ .init = dev_proc_net_init,
+ .exit = dev_proc_net_exit,
+};
+
+static int __init dev_proc_init(void)
+{
+ return register_pernet_subsys(&dev_proc_ops);
+}
+#else
+#define dev_proc_init() 0
+#endif /* CONFIG_PROC_FS */
+
+
+/**
+ * netdev_set_master - set up master/slave pair
+ * @slave: slave device
+ * @master: new master device
+ *
+ * Changes the master device of the slave. Pass %NULL to break the
+ * bonding. The caller must hold the RTNL semaphore. On a failure
+ * a negative errno code is returned. On success the reference counts
+ * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
+ * function returns zero.
+ */
+int netdev_set_master(struct net_device *slave, struct net_device *master)
+{
+ struct net_device *old = slave->master;
+
+ ASSERT_RTNL();
+
+ if (master) {
+ if (old)
+ return -EBUSY;
+ dev_hold(master);
+ }
+
+ slave->master = master;
+
+ synchronize_net();
+
+ if (old)
+ dev_put(old);
+
+ if (master)
+ slave->flags |= IFF_SLAVE;
+ else
+ slave->flags &= ~IFF_SLAVE;
+
+ rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
+ return 0;
+}
+
+static void dev_change_rx_flags(struct net_device *dev, int flags)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
+ ops->ndo_change_rx_flags(dev, flags);
+}
+
+static int __dev_set_promiscuity(struct net_device *dev, int inc)
+{
+ unsigned short old_flags = dev->flags;
+ uid_t uid;
+ gid_t gid;
+
+ ASSERT_RTNL();
+
+ dev->flags |= IFF_PROMISC;
+ dev->promiscuity += inc;
+ if (dev->promiscuity == 0) {
+ /*
+ * Avoid overflow.
+ * If inc causes overflow, untouch promisc and return error.
+ */
+ if (inc < 0)
+ dev->flags &= ~IFF_PROMISC;
+ else {
+ dev->promiscuity -= inc;
+ printk(KERN_WARNING "%s: promiscuity touches roof, "
+ "set promiscuity failed, promiscuity feature "
+ "of device might be broken.\n", dev->name);
+ return -EOVERFLOW;
+ }
+ }
+ if (dev->flags != old_flags) {
+ printk(KERN_INFO "device %s %s promiscuous mode\n",
+ dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
+ "left");
+ if (audit_enabled) {
+ current_uid_gid(&uid, &gid);
+ audit_log(current->audit_context, GFP_ATOMIC,
+ AUDIT_ANOM_PROMISCUOUS,
+ "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
+ dev->name, (dev->flags & IFF_PROMISC),
+ (old_flags & IFF_PROMISC),
+ audit_get_loginuid(current),
+ uid, gid,
+ audit_get_sessionid(current));
+ }
+
+ dev_change_rx_flags(dev, IFF_PROMISC);
+ }
+ return 0;
+}
+
+/**
+ * dev_set_promiscuity - update promiscuity count on a device
+ * @dev: device
+ * @inc: modifier
+ *
+ * Add or remove promiscuity from a device. While the count in the device
+ * remains above zero the interface remains promiscuous. Once it hits zero
+ * the device reverts back to normal filtering operation. A negative inc
+ * value is used to drop promiscuity on the device.
+ * Return 0 if successful or a negative errno code on error.
+ */
+int dev_set_promiscuity(struct net_device *dev, int inc)
+{
+ unsigned short old_flags = dev->flags;
+ int err;
+
+ err = __dev_set_promiscuity(dev, inc);
+ if (err < 0)
+ return err;
+ if (dev->flags != old_flags)
+ dev_set_rx_mode(dev);
+ return err;
+}
+
+/**
+ * dev_set_allmulti - update allmulti count on a device
+ * @dev: device
+ * @inc: modifier
+ *
+ * Add or remove reception of all multicast frames to a device. While the
+ * count in the device remains above zero the interface remains listening
+ * to all interfaces. Once it hits zero the device reverts back to normal
+ * filtering operation. A negative @inc value is used to drop the counter
+ * when releasing a resource needing all multicasts.
+ * Return 0 if successful or a negative errno code on error.
+ */
+
+int dev_set_allmulti(struct net_device *dev, int inc)
+{
+ unsigned short old_flags = dev->flags;
+
+ ASSERT_RTNL();
+
+ dev->flags |= IFF_ALLMULTI;
+ dev->allmulti += inc;
+ if (dev->allmulti == 0) {
+ /*
+ * Avoid overflow.
+ * If inc causes overflow, untouch allmulti and return error.
+ */
+ if (inc < 0)
+ dev->flags &= ~IFF_ALLMULTI;
+ else {
+ dev->allmulti -= inc;
+ printk(KERN_WARNING "%s: allmulti touches roof, "
+ "set allmulti failed, allmulti feature of "
+ "device might be broken.\n", dev->name);
+ return -EOVERFLOW;
+ }
+ }
+ if (dev->flags ^ old_flags) {
+ dev_change_rx_flags(dev, IFF_ALLMULTI);
+ dev_set_rx_mode(dev);
+ }
+ return 0;
+}
+
+/*
+ * Upload unicast and multicast address lists to device and
+ * configure RX filtering. When the device doesn't support unicast
+ * filtering it is put in promiscuous mode while unicast addresses
+ * are present.
+ */
+void __dev_set_rx_mode(struct net_device *dev)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ /* dev_open will call this function so the list will stay sane. */
+ if (!(dev->flags&IFF_UP))
+ return;
+
+ if (!netif_device_present(dev))
+ return;
+
+ if (ops->ndo_set_rx_mode)
+ ops->ndo_set_rx_mode(dev);
+ else {
+ /* Unicast addresses changes may only happen under the rtnl,
+ * therefore calling __dev_set_promiscuity here is safe.
+ */
+ if (dev->uc_count > 0 && !dev->uc_promisc) {
+ __dev_set_promiscuity(dev, 1);
+ dev->uc_promisc = 1;
+ } else if (dev->uc_count == 0 && dev->uc_promisc) {
+ __dev_set_promiscuity(dev, -1);
+ dev->uc_promisc = 0;
+ }
+
+ if (ops->ndo_set_multicast_list)
+ ops->ndo_set_multicast_list(dev);
+ }
+}
+
+void dev_set_rx_mode(struct net_device *dev)
+{
+ netif_addr_lock_bh(dev);
+ __dev_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
+}
+
+int __dev_addr_delete(struct dev_addr_list **list, int *count,
+ void *addr, int alen, int glbl)
+{
+ struct dev_addr_list *da;
+
+ for (; (da = *list) != NULL; list = &da->next) {
+ if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
+ alen == da->da_addrlen) {
+ if (glbl) {
+ int old_glbl = da->da_gusers;
+ da->da_gusers = 0;
+ if (old_glbl == 0)
+ break;
+ }
+ if (--da->da_users)
+ return 0;
+
+ *list = da->next;
+ kfree(da);
+ (*count)--;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+int __dev_addr_add(struct dev_addr_list **list, int *count,
+ void *addr, int alen, int glbl)
+{
+ struct dev_addr_list *da;
+
+ for (da = *list; da != NULL; da = da->next) {
+ if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
+ da->da_addrlen == alen) {
+ if (glbl) {
+ int old_glbl = da->da_gusers;
+ da->da_gusers = 1;
+ if (old_glbl)
+ return 0;
+ }
+ da->da_users++;
+ return 0;
+ }
+ }
+
+ da = kzalloc(sizeof(*da), GFP_ATOMIC);
+ if (da == NULL)
+ return -ENOMEM;
+ memcpy(da->da_addr, addr, alen);
+ da->da_addrlen = alen;
+ da->da_users = 1;
+ da->da_gusers = glbl ? 1 : 0;
+ da->next = *list;
+ *list = da;
+ (*count)++;
+ return 0;
+}
+
+/**
+ * dev_unicast_delete - Release secondary unicast address.
+ * @dev: device
+ * @addr: address to delete
+ * @alen: length of @addr
+ *
+ * Release reference to a secondary unicast address and remove it
+ * from the device if the reference count drops to zero.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
+{
+ int err;
+
+ ASSERT_RTNL();
+
+ netif_addr_lock_bh(dev);
+ err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
+ if (!err)
+ __dev_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
+ return err;
+}
+EXPORT_SYMBOL(dev_unicast_delete);
+
+/**
+ * dev_unicast_add - add a secondary unicast address
+ * @dev: device
+ * @addr: address to add
+ * @alen: length of @addr
+ *
+ * Add a secondary unicast address to the device or increase
+ * the reference count if it already exists.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+int dev_unicast_add(struct net_device *dev, void *addr, int alen)
+{
+ int err;
+
+ ASSERT_RTNL();
+
+ netif_addr_lock_bh(dev);
+ err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
+ if (!err)
+ __dev_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
+ return err;
+}
+EXPORT_SYMBOL(dev_unicast_add);
+
+int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
+ struct dev_addr_list **from, int *from_count)
+{
+ struct dev_addr_list *da, *next;
+ int err = 0;
+
+ da = *from;
+ while (da != NULL) {
+ next = da->next;
+ if (!da->da_synced) {
+ err = __dev_addr_add(to, to_count,
+ da->da_addr, da->da_addrlen, 0);
+ if (err < 0)
+ break;
+ da->da_synced = 1;
+ da->da_users++;
+ } else if (da->da_users == 1) {
+ __dev_addr_delete(to, to_count,
+ da->da_addr, da->da_addrlen, 0);
+ __dev_addr_delete(from, from_count,
+ da->da_addr, da->da_addrlen, 0);
+ }
+ da = next;
+ }
+ return err;
+}
+
+void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
+ struct dev_addr_list **from, int *from_count)
+{
+ struct dev_addr_list *da, *next;
+
+ da = *from;
+ while (da != NULL) {
+ next = da->next;
+ if (da->da_synced) {
+ __dev_addr_delete(to, to_count,
+ da->da_addr, da->da_addrlen, 0);
+ da->da_synced = 0;
+ __dev_addr_delete(from, from_count,
+ da->da_addr, da->da_addrlen, 0);
+ }
+ da = next;
+ }
+}
+
+/**
+ * dev_unicast_sync - Synchronize device's unicast list to another device
+ * @to: destination device
+ * @from: source device
+ *
+ * Add newly added addresses to the destination device and release
+ * addresses that have no users left. The source device must be
+ * locked by netif_tx_lock_bh.
+ *
+ * This function is intended to be called from the dev->set_rx_mode
+ * function of layered software devices.
+ */
+int dev_unicast_sync(struct net_device *to, struct net_device *from)
+{
+ int err = 0;
+
+ netif_addr_lock_bh(to);
+ err = __dev_addr_sync(&to->uc_list, &to->uc_count,
+ &from->uc_list, &from->uc_count);
+ if (!err)
+ __dev_set_rx_mode(to);
+ netif_addr_unlock_bh(to);
+ return err;
+}
+EXPORT_SYMBOL(dev_unicast_sync);
+
+/**
+ * dev_unicast_unsync - Remove synchronized addresses from the destination device
+ * @to: destination device
+ * @from: source device
+ *
+ * Remove all addresses that were added to the destination device by
+ * dev_unicast_sync(). This function is intended to be called from the
+ * dev->stop function of layered software devices.
+ */
+void dev_unicast_unsync(struct net_device *to, struct net_device *from)
+{
+ netif_addr_lock_bh(from);
+ netif_addr_lock(to);
+
+ __dev_addr_unsync(&to->uc_list, &to->uc_count,
+ &from->uc_list, &from->uc_count);
+ __dev_set_rx_mode(to);
+
+ netif_addr_unlock(to);
+ netif_addr_unlock_bh(from);
+}
+EXPORT_SYMBOL(dev_unicast_unsync);
+
+static void __dev_addr_discard(struct dev_addr_list **list)
+{
+ struct dev_addr_list *tmp;
+
+ while (*list != NULL) {
+ tmp = *list;
+ *list = tmp->next;
+ if (tmp->da_users > tmp->da_gusers)
+ printk("__dev_addr_discard: address leakage! "
+ "da_users=%d\n", tmp->da_users);
+ kfree(tmp);
+ }
+}
+
+static void dev_addr_discard(struct net_device *dev)
+{
+ netif_addr_lock_bh(dev);
+
+ __dev_addr_discard(&dev->uc_list);
+ dev->uc_count = 0;
+
+ __dev_addr_discard(&dev->mc_list);
+ dev->mc_count = 0;
+
+ netif_addr_unlock_bh(dev);
+}
+
+/**
+ * dev_get_flags - get flags reported to userspace
+ * @dev: device
+ *
+ * Get the combination of flag bits exported through APIs to userspace.
+ */
+unsigned dev_get_flags(const struct net_device *dev)
+{
+ unsigned flags;
+
+ flags = (dev->flags & ~(IFF_PROMISC |
+ IFF_ALLMULTI |
+ IFF_RUNNING |
+ IFF_LOWER_UP |
+ IFF_DORMANT)) |
+ (dev->gflags & (IFF_PROMISC |
+ IFF_ALLMULTI));
+
+ if (netif_running(dev)) {
+ if (netif_oper_up(dev))
+ flags |= IFF_RUNNING;
+ if (netif_carrier_ok(dev))
+ flags |= IFF_LOWER_UP;
+ if (netif_dormant(dev))
+ flags |= IFF_DORMANT;
+ }
+
+ return flags;
+}
+
+/**
+ * dev_change_flags - change device settings
+ * @dev: device
+ * @flags: device state flags
+ *
+ * Change settings on device based state flags. The flags are
+ * in the userspace exported format.
+ */
+int dev_change_flags(struct net_device *dev, unsigned flags)
+{
+ int ret, changes;
+ int old_flags = dev->flags;
+
+ ASSERT_RTNL();
+
+ /*
+ * Set the flags on our device.
+ */
+
+ dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
+ IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
+ IFF_AUTOMEDIA)) |
+ (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
+ IFF_ALLMULTI));
+
+ /*
+ * Load in the correct multicast list now the flags have changed.
+ */
+
+ if ((old_flags ^ flags) & IFF_MULTICAST)
+ dev_change_rx_flags(dev, IFF_MULTICAST);
+
+ dev_set_rx_mode(dev);
+
+ /*
+ * Have we downed the interface. We handle IFF_UP ourselves
+ * according to user attempts to set it, rather than blindly
+ * setting it.
+ */
+
+ ret = 0;
+ if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
+ ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
+
+ if (!ret)
+ dev_set_rx_mode(dev);
+ }
+
+ if (dev->flags & IFF_UP &&
+ ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
+ IFF_VOLATILE)))
+ call_netdevice_notifiers(NETDEV_CHANGE, dev);
+
+ if ((flags ^ dev->gflags) & IFF_PROMISC) {
+ int inc = (flags & IFF_PROMISC) ? +1 : -1;
+ dev->gflags ^= IFF_PROMISC;
+ dev_set_promiscuity(dev, inc);
+ }
+
+ /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
+ is important. Some (broken) drivers set IFF_PROMISC, when
+ IFF_ALLMULTI is requested not asking us and not reporting.
+ */
+ if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
+ int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
+ dev->gflags ^= IFF_ALLMULTI;
+ dev_set_allmulti(dev, inc);
+ }
+
+ /* Exclude state transition flags, already notified */
+ changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
+ if (changes)
+ rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
+
+ return ret;
+}
+
+/**
+ * dev_set_mtu - Change maximum transfer unit
+ * @dev: device
+ * @new_mtu: new transfer unit
+ *
+ * Change the maximum transfer size of the network device.
+ */
+int dev_set_mtu(struct net_device *dev, int new_mtu)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+ int err;
+
+ if (new_mtu == dev->mtu)
+ return 0;
+
+ /* MTU must be positive. */
+ if (new_mtu < 0)
+ return -EINVAL;
+
+ if (!netif_device_present(dev))
+ return -ENODEV;
+
+ err = 0;
+ if (ops->ndo_change_mtu)
+ err = ops->ndo_change_mtu(dev, new_mtu);
+ else
+ dev->mtu = new_mtu;
+
+ if (!err && dev->flags & IFF_UP)
+ call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+ return err;
+}
+
+/**
+ * dev_set_mac_address - Change Media Access Control Address
+ * @dev: device
+ * @sa: new address
+ *
+ * Change the hardware (MAC) address of the device
+ */
+int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+ int err;
+
+ if (!ops->ndo_set_mac_address)
+ return -EOPNOTSUPP;
+ if (sa->sa_family != dev->type)
+ return -EINVAL;
+ if (!netif_device_present(dev))
+ return -ENODEV;
+ err = ops->ndo_set_mac_address(dev, sa);
+ if (!err)
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ return err;
+}
+
+/*
+ * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
+ */
+static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
+{
+ int err;
+ struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
+
+ if (!dev)
+ return -ENODEV;
+
+ switch (cmd) {
+ case SIOCGIFFLAGS: /* Get interface flags */
+ ifr->ifr_flags = dev_get_flags(dev);
+ return 0;
+
+ case SIOCGIFMETRIC: /* Get the metric on the interface
+ (currently unused) */
+ ifr->ifr_metric = 0;
+ return 0;
+
+ case SIOCGIFMTU: /* Get the MTU of a device */
+ ifr->ifr_mtu = dev->mtu;
+ return 0;
+
+ case SIOCGIFHWADDR:
+ if (!dev->addr_len)
+ memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
+ else
+ memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
+ min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
+ ifr->ifr_hwaddr.sa_family = dev->type;
+ return 0;
+
+ case SIOCGIFSLAVE:
+ err = -EINVAL;
+ break;
+
+ case SIOCGIFMAP:
+ ifr->ifr_map.mem_start = dev->mem_start;
+ ifr->ifr_map.mem_end = dev->mem_end;
+ ifr->ifr_map.base_addr = dev->base_addr;
+ ifr->ifr_map.irq = dev->irq;
+ ifr->ifr_map.dma = dev->dma;
+ ifr->ifr_map.port = dev->if_port;
+ return 0;
+
+ case SIOCGIFINDEX:
+ ifr->ifr_ifindex = dev->ifindex;
+ return 0;
+
+ case SIOCGIFTXQLEN:
+ ifr->ifr_qlen = dev->tx_queue_len;
+ return 0;
+
+ default:
+ /* dev_ioctl() should ensure this case
+ * is never reached
+ */
+ WARN_ON(1);
+ err = -EINVAL;
+ break;
+
+ }
+ return err;
+}
+
+/*
+ * Perform the SIOCxIFxxx calls, inside rtnl_lock()
+ */
+static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
+{
+ int err;
+ struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
+ const struct net_device_ops *ops;
+
+ if (!dev)
+ return -ENODEV;
+
+ ops = dev->netdev_ops;
+
+ switch (cmd) {
+ case SIOCSIFFLAGS: /* Set interface flags */
+ return dev_change_flags(dev, ifr->ifr_flags);
+
+ case SIOCSIFMETRIC: /* Set the metric on the interface
+ (currently unused) */
+ return -EOPNOTSUPP;
+
+ case SIOCSIFMTU: /* Set the MTU of a device */
+ return dev_set_mtu(dev, ifr->ifr_mtu);
+
+ case SIOCSIFHWADDR:
+ return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
+
+ case SIOCSIFHWBROADCAST:
+ if (ifr->ifr_hwaddr.sa_family != dev->type)
+ return -EINVAL;
+ memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
+ min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ return 0;
+
+ case SIOCSIFMAP:
+ if (ops->ndo_set_config) {
+ if (!netif_device_present(dev))
+ return -ENODEV;
+ return ops->ndo_set_config(dev, &ifr->ifr_map);
+ }
+ return -EOPNOTSUPP;
+
+ case SIOCADDMULTI:
+ if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
+ ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
+ return -EINVAL;
+ if (!netif_device_present(dev))
+ return -ENODEV;
+ return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
+ dev->addr_len, 1);
+
+ case SIOCDELMULTI:
+ if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
+ ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
+ return -EINVAL;
+ if (!netif_device_present(dev))
+ return -ENODEV;
+ return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
+ dev->addr_len, 1);
+
+ case SIOCSIFTXQLEN:
+ if (ifr->ifr_qlen < 0)
+ return -EINVAL;
+ dev->tx_queue_len = ifr->ifr_qlen;
+ return 0;
+
+ case SIOCSIFNAME:
+ ifr->ifr_newname[IFNAMSIZ-1] = '\0';
+ return dev_change_name(dev, ifr->ifr_newname);
+
+ /*
+ * Unknown or private ioctl
+ */
+
+ default:
+ if ((cmd >= SIOCDEVPRIVATE &&
+ cmd <= SIOCDEVPRIVATE + 15) ||
+ cmd == SIOCBONDENSLAVE ||
+ cmd == SIOCBONDRELEASE ||
+ cmd == SIOCBONDSETHWADDR ||
+ cmd == SIOCBONDSLAVEINFOQUERY ||
+ cmd == SIOCBONDINFOQUERY ||
+ cmd == SIOCBONDCHANGEACTIVE ||
+ cmd == SIOCGMIIPHY ||
+ cmd == SIOCGMIIREG ||
+ cmd == SIOCSMIIREG ||
+ cmd == SIOCBRADDIF ||
+ cmd == SIOCBRDELIF ||
+ cmd == SIOCWANDEV) {
+ err = -EOPNOTSUPP;
+ if (ops->ndo_do_ioctl) {
+ if (netif_device_present(dev))
+ err = ops->ndo_do_ioctl(dev, ifr, cmd);
+ else
+ err = -ENODEV;
+ }
+ } else
+ err = -EINVAL;
+
+ }
+ return err;
+}
+
+/*
+ * This function handles all "interface"-type I/O control requests. The actual
+ * 'doing' part of this is dev_ifsioc above.
+ */
+
+/**
+ * dev_ioctl - network device ioctl
+ * @net: the applicable net namespace
+ * @cmd: command to issue
+ * @arg: pointer to a struct ifreq in user space
+ *
+ * Issue ioctl functions to devices. This is normally called by the
+ * user space syscall interfaces but can sometimes be useful for
+ * other purposes. The return value is the return from the syscall if
+ * positive or a negative errno code on error.
+ */
+
+int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+{
+ struct ifreq ifr;
+ int ret;
+ char *colon;
+
+ /* One special case: SIOCGIFCONF takes ifconf argument
+ and requires shared lock, because it sleeps writing
+ to user space.
+ */
+
+ if (cmd == SIOCGIFCONF) {
+ rtnl_lock();
+ ret = dev_ifconf(net, (char __user *) arg);
+ rtnl_unlock();
+ return ret;
+ }
+ if (cmd == SIOCGIFNAME)
+ return dev_ifname(net, (struct ifreq __user *)arg);
+
+ if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
+ return -EFAULT;
+
+ ifr.ifr_name[IFNAMSIZ-1] = 0;
+
+ colon = strchr(ifr.ifr_name, ':');
+ if (colon)
+ *colon = 0;
+
+ /*
+ * See which interface the caller is talking about.
+ */
+
+ switch (cmd) {
+ /*
+ * These ioctl calls:
+ * - can be done by all.
+ * - atomic and do not require locking.
+ * - return a value
+ */
+ case SIOCGIFFLAGS:
+ case SIOCGIFMETRIC:
+ case SIOCGIFMTU:
+ case SIOCGIFHWADDR:
+ case SIOCGIFSLAVE:
+ case SIOCGIFMAP:
+ case SIOCGIFINDEX:
+ case SIOCGIFTXQLEN:
+ dev_load(net, ifr.ifr_name);
+ read_lock(&dev_base_lock);
+ ret = dev_ifsioc_locked(net, &ifr, cmd);
+ read_unlock(&dev_base_lock);
+ if (!ret) {
+ if (colon)
+ *colon = ':';
+ if (copy_to_user(arg, &ifr,
+ sizeof(struct ifreq)))
+ ret = -EFAULT;
+ }
+ return ret;
+
+ case SIOCETHTOOL:
+ dev_load(net, ifr.ifr_name);
+ rtnl_lock();
+ ret = dev_ethtool(net, &ifr);
+ rtnl_unlock();
+ if (!ret) {
+ if (colon)
+ *colon = ':';
+ if (copy_to_user(arg, &ifr,
+ sizeof(struct ifreq)))
+ ret = -EFAULT;
+ }
+ return ret;
+
+ /*
+ * These ioctl calls:
+ * - require superuser power.
+ * - require strict serialization.
+ * - return a value
+ */
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSIFNAME:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ dev_load(net, ifr.ifr_name);
+ rtnl_lock();
+ ret = dev_ifsioc(net, &ifr, cmd);
+ rtnl_unlock();
+ if (!ret) {
+ if (colon)
+ *colon = ':';
+ if (copy_to_user(arg, &ifr,
+ sizeof(struct ifreq)))
+ ret = -EFAULT;
+ }
+ return ret;
+
+ /*
+ * These ioctl calls:
+ * - require superuser power.
+ * - require strict serialization.
+ * - do not return a value
+ */
+ case SIOCSIFFLAGS:
+ case SIOCSIFMETRIC:
+ case SIOCSIFMTU:
+ case SIOCSIFMAP:
+ case SIOCSIFHWADDR:
+ case SIOCSIFSLAVE:
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ case SIOCSIFHWBROADCAST:
+ case SIOCSIFTXQLEN:
+ case SIOCSMIIREG:
+ case SIOCBONDENSLAVE:
+ case SIOCBONDRELEASE:
+ case SIOCBONDSETHWADDR:
+ case SIOCBONDCHANGEACTIVE:
+ case SIOCBRADDIF:
+ case SIOCBRDELIF:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ /* fall through */
+ case SIOCBONDSLAVEINFOQUERY:
+ case SIOCBONDINFOQUERY:
+ dev_load(net, ifr.ifr_name);
+ rtnl_lock();
+ ret = dev_ifsioc(net, &ifr, cmd);
+ rtnl_unlock();
+ return ret;
+
+ case SIOCGIFMEM:
+ /* Get the per device memory space. We can add this but
+ * currently do not support it */
+ case SIOCSIFMEM:
+ /* Set the per device memory buffer space.
+ * Not applicable in our case */
+ case SIOCSIFLINK:
+ return -EINVAL;
+
+ /*
+ * Unknown or private ioctl.
+ */
+ default:
+ if (cmd == SIOCWANDEV ||
+ (cmd >= SIOCDEVPRIVATE &&
+ cmd <= SIOCDEVPRIVATE + 15)) {
+ dev_load(net, ifr.ifr_name);
+ rtnl_lock();
+ ret = dev_ifsioc(net, &ifr, cmd);
+ rtnl_unlock();
+ if (!ret && copy_to_user(arg, &ifr,
+ sizeof(struct ifreq)))
+ ret = -EFAULT;
+ return ret;
+ }
+ /* Take care of Wireless Extensions */
+ if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
+ return wext_handle_ioctl(net, &ifr, cmd, arg);
+ return -EINVAL;
+ }
+}
+
+
+/**
+ * dev_new_index - allocate an ifindex
+ * @net: the applicable net namespace
+ *
+ * Returns a suitable unique value for a new device interface
+ * number. The caller must hold the rtnl semaphore or the
+ * dev_base_lock to be sure it remains unique.
+ */
+static int dev_new_index(struct net *net)
+{
+ static int ifindex;
+ for (;;) {
+ if (++ifindex <= 0)
+ ifindex = 1;
+ if (!__dev_get_by_index(net, ifindex))
+ return ifindex;
+ }
+}
+
+/* Delayed registration/unregisteration */
+static LIST_HEAD(net_todo_list);
+
+static void net_set_todo(struct net_device *dev)
+{
+ list_add_tail(&dev->todo_list, &net_todo_list);
+}
+
+static void rollback_registered(struct net_device *dev)
+{
+ BUG_ON(dev_boot_phase);
+ ASSERT_RTNL();
+
+ /* Some devices call without registering for initialization unwind. */
+ if (dev->reg_state == NETREG_UNINITIALIZED) {
+ printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
+ "was registered\n", dev->name, dev);
+
+ WARN_ON(1);
+ return;
+ }
+
+ BUG_ON(dev->reg_state != NETREG_REGISTERED);
+
+ /* If device is running, close it first. */
+ dev_close(dev);
+
+ /* And unlink it from device chain. */
+ unlist_netdevice(dev);
+
+ dev->reg_state = NETREG_UNREGISTERING;
+
+ synchronize_net();
+
+ /* Shutdown queueing discipline. */
+ dev_shutdown(dev);
+
+
+ /* Notify protocols, that we are about to destroy
+ this device. They should clean all the things.
+ */
+ call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
+
+ /*
+ * Flush the unicast and multicast chains
+ */
+ dev_addr_discard(dev);
+
+ if (dev->netdev_ops->ndo_uninit)
+ dev->netdev_ops->ndo_uninit(dev);
+
+ /* Notifier chain MUST detach us from master device. */
+ WARN_ON(dev->master);
+
+ /* Remove entries from kobject tree */
+ netdev_unregister_kobject(dev);
+
+ synchronize_net();
+
+ dev_put(dev);
+}
+
+static void __netdev_init_queue_locks_one(struct net_device *dev,
+ struct netdev_queue *dev_queue,
+ void *_unused)
+{
+ spin_lock_init(&dev_queue->_xmit_lock);
+ netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
+ dev_queue->xmit_lock_owner = -1;
+}
+
+static void netdev_init_queue_locks(struct net_device *dev)
+{
+ netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
+ __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
+}
+
+unsigned long netdev_fix_features(unsigned long features, const char *name)
+{
+ /* Fix illegal SG+CSUM combinations. */
+ if ((features & NETIF_F_SG) &&
+ !(features & NETIF_F_ALL_CSUM)) {
+ if (name)
+ printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
+ "checksum feature.\n", name);
+ features &= ~NETIF_F_SG;
+ }
+
+ /* TSO requires that SG is present as well. */
+ if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
+ if (name)
+ printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
+ "SG feature.\n", name);
+ features &= ~NETIF_F_TSO;
+ }
+
+ if (features & NETIF_F_UFO) {
+ if (!(features & NETIF_F_GEN_CSUM)) {
+ if (name)
+ printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
+ "since no NETIF_F_HW_CSUM feature.\n",
+ name);
+ features &= ~NETIF_F_UFO;
+ }
+
+ if (!(features & NETIF_F_SG)) {
+ if (name)
+ printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
+ "since no NETIF_F_SG feature.\n", name);
+ features &= ~NETIF_F_UFO;
+ }
+ }
+
+ return features;
+}
+EXPORT_SYMBOL(netdev_fix_features);
+
+/* Some devices need to (re-)set their netdev_ops inside
+ * ->init() or similar. If that happens, we have to setup
+ * the compat pointers again.
+ */
+void netdev_resync_ops(struct net_device *dev)
+{
+#ifdef CONFIG_COMPAT_NET_DEV_OPS
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ dev->init = ops->ndo_init;
+ dev->uninit = ops->ndo_uninit;
+ dev->open = ops->ndo_open;
+ dev->change_rx_flags = ops->ndo_change_rx_flags;
+ dev->set_rx_mode = ops->ndo_set_rx_mode;
+ dev->set_multicast_list = ops->ndo_set_multicast_list;
+ dev->set_mac_address = ops->ndo_set_mac_address;
+ dev->validate_addr = ops->ndo_validate_addr;
+ dev->do_ioctl = ops->ndo_do_ioctl;
+ dev->set_config = ops->ndo_set_config;
+ dev->change_mtu = ops->ndo_change_mtu;
+ dev->neigh_setup = ops->ndo_neigh_setup;
+ dev->tx_timeout = ops->ndo_tx_timeout;
+ dev->get_stats = ops->ndo_get_stats;
+ dev->vlan_rx_register = ops->ndo_vlan_rx_register;
+ dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
+ dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ops->ndo_poll_controller;
+#endif
+#endif
+}
+EXPORT_SYMBOL(netdev_resync_ops);
+
+/**
+ * register_netdevice - register a network device
+ * @dev: device to register
+ *
+ * Take a completed network device structure and add it to the kernel
+ * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
+ * chain. 0 is returned on success. A negative errno code is returned
+ * on a failure to set up the device, or if the name is a duplicate.
+ *
+ * Callers must hold the rtnl semaphore. You may want
+ * register_netdev() instead of this.
+ *
+ * BUGS:
+ * The locking appears insufficient to guarantee two parallel registers
+ * will not get the same name.
+ */
+
+int register_netdevice(struct net_device *dev)
+{
+ struct hlist_head *head;
+ struct hlist_node *p;
+ int ret;
+ struct net *net = dev_net(dev);
+
+ BUG_ON(dev_boot_phase);
+ ASSERT_RTNL();
+
+ might_sleep();
+
+ /* When net_device's are persistent, this will be fatal. */
+ BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
+ BUG_ON(!net);
+
+ spin_lock_init(&dev->addr_list_lock);
+ netdev_set_addr_lockdep_class(dev);
+ netdev_init_queue_locks(dev);
+
+ dev->iflink = -1;
+
+#ifdef CONFIG_COMPAT_NET_DEV_OPS
+ /* Netdevice_ops API compatiability support.
+ * This is temporary until all network devices are converted.
+ */
+ if (dev->netdev_ops) {
+ netdev_resync_ops(dev);
+ } else {
+ char drivername[64];
+ pr_info("%s (%s): not using net_device_ops yet\n",
+ dev->name, netdev_drivername(dev, drivername, 64));
+
+ /* This works only because net_device_ops and the
+ compatiablity structure are the same. */
+ dev->netdev_ops = (void *) &(dev->init);
+ }
+#endif
+
+ /* Init, if this function is available */
+ if (dev->netdev_ops->ndo_init) {
+ ret = dev->netdev_ops->ndo_init(dev);
+ if (ret) {
+ if (ret > 0)
+ ret = -EIO;
+ goto out;
+ }
+ }
+
+ if (!dev_valid_name(dev->name)) {
+ ret = -EINVAL;
+ goto err_uninit;
+ }
+
+ dev->ifindex = dev_new_index(net);
+ if (dev->iflink == -1)
+ dev->iflink = dev->ifindex;
+
+ /* Check for existence of name */
+ head = dev_name_hash(net, dev->name);
+ hlist_for_each(p, head) {
+ struct net_device *d
+ = hlist_entry(p, struct net_device, name_hlist);
+ if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
+ ret = -EEXIST;
+ goto err_uninit;
+ }
+ }
+
+ /* Fix illegal checksum combinations */
+ if ((dev->features & NETIF_F_HW_CSUM) &&
+ (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
+ printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
+ dev->name);
+ dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
+ }
+
+ if ((dev->features & NETIF_F_NO_CSUM) &&
+ (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
+ printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
+ dev->name);
+ dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
+ }
+
+ dev->features = netdev_fix_features(dev->features, dev->name);
+
+ /* Enable software GSO if SG is supported. */
+ if (dev->features & NETIF_F_SG)
+ dev->features |= NETIF_F_GSO;
+
+ netdev_initialize_kobject(dev);
+ ret = netdev_register_kobject(dev);
+ if (ret)
+ goto err_uninit;
+ dev->reg_state = NETREG_REGISTERED;
+
+ /*
+ * Default initial state at registry is that the
+ * device is present.
+ */
+
+ set_bit(__LINK_STATE_PRESENT, &dev->state);
+
+ dev_init_scheduler(dev);
+ dev_hold(dev);
+ list_netdevice(dev);
+
+ /* Notify protocols, that a new device appeared. */
+ ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
+ ret = notifier_to_errno(ret);
+ if (ret) {
+ rollback_registered(dev);
+ dev->reg_state = NETREG_UNREGISTERED;
+ }
+
+out:
+ return ret;
+
+err_uninit:
+ if (dev->netdev_ops->ndo_uninit)
+ dev->netdev_ops->ndo_uninit(dev);
+ goto out;
+}
+
+/**
+ * init_dummy_netdev - init a dummy network device for NAPI
+ * @dev: device to init
+ *
+ * This takes a network device structure and initialize the minimum
+ * amount of fields so it can be used to schedule NAPI polls without
+ * registering a full blown interface. This is to be used by drivers
+ * that need to tie several hardware interfaces to a single NAPI
+ * poll scheduler due to HW limitations.
+ */
+int init_dummy_netdev(struct net_device *dev)
+{
+ /* Clear everything. Note we don't initialize spinlocks
+ * are they aren't supposed to be taken by any of the
+ * NAPI code and this dummy netdev is supposed to be
+ * only ever used for NAPI polls
+ */
+ memset(dev, 0, sizeof(struct net_device));
+
+ /* make sure we BUG if trying to hit standard
+ * register/unregister code path
+ */
+ dev->reg_state = NETREG_DUMMY;
+
+ /* initialize the ref count */
+ atomic_set(&dev->refcnt, 1);
+
+ /* NAPI wants this */
+ INIT_LIST_HEAD(&dev->napi_list);
+
+ /* a dummy interface is started by default */
+ set_bit(__LINK_STATE_PRESENT, &dev->state);
+ set_bit(__LINK_STATE_START, &dev->state);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(init_dummy_netdev);
+
+
+/**
+ * register_netdev - register a network device
+ * @dev: device to register
+ *
+ * Take a completed network device structure and add it to the kernel
+ * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
+ * chain. 0 is returned on success. A negative errno code is returned
+ * on a failure to set up the device, or if the name is a duplicate.
+ *
+ * This is a wrapper around register_netdevice that takes the rtnl semaphore
+ * and expands the device name if you passed a format string to
+ * alloc_netdev.
+ */
+int register_netdev(struct net_device *dev)
+{
+ int err;
+
+ rtnl_lock();
+
+ /*
+ * If the name is a format string the caller wants us to do a
+ * name allocation.
+ */
+ if (strchr(dev->name, '%')) {
+ err = dev_alloc_name(dev, dev->name);
+ if (err < 0)
+ goto out;
+ }
+
+ err = register_netdevice(dev);
+out:
+ rtnl_unlock();
+ return err;
+}
+EXPORT_SYMBOL(register_netdev);
+
+/*
+ * netdev_wait_allrefs - wait until all references are gone.
+ *
+ * This is called when unregistering network devices.
+ *
+ * Any protocol or device that holds a reference should register
+ * for netdevice notification, and cleanup and put back the
+ * reference if they receive an UNREGISTER event.
+ * We can get stuck here if buggy protocols don't correctly
+ * call dev_put.
+ */
+static void netdev_wait_allrefs(struct net_device *dev)
+{
+ unsigned long rebroadcast_time, warning_time;
+
+ rebroadcast_time = warning_time = jiffies;
+ while (atomic_read(&dev->refcnt) != 0) {
+ if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
+ rtnl_lock();
+
+ /* Rebroadcast unregister notification */
+ call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
+
+ if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
+ &dev->state)) {
+ /* We must not have linkwatch events
+ * pending on unregister. If this
+ * happens, we simply run the queue
+ * unscheduled, resulting in a noop
+ * for this device.
+ */
+ linkwatch_run_queue();
+ }
+
+ __rtnl_unlock();
+
+ rebroadcast_time = jiffies;
+ }
+
+ msleep(250);
+
+ if (time_after(jiffies, warning_time + 10 * HZ)) {
+ printk(KERN_EMERG "unregister_netdevice: "
+ "waiting for %s to become free. Usage "
+ "count = %d\n",
+ dev->name, atomic_read(&dev->refcnt));
+ warning_time = jiffies;
+ }
+ }
+}
+
+/* The sequence is:
+ *
+ * rtnl_lock();
+ * ...
+ * register_netdevice(x1);
+ * register_netdevice(x2);
+ * ...
+ * unregister_netdevice(y1);
+ * unregister_netdevice(y2);
+ * ...
+ * rtnl_unlock();
+ * free_netdev(y1);
+ * free_netdev(y2);
+ *
+ * We are invoked by rtnl_unlock().
+ * This allows us to deal with problems:
+ * 1) We can delete sysfs objects which invoke hotplug
+ * without deadlocking with linkwatch via keventd.
+ * 2) Since we run with the RTNL semaphore not held, we can sleep
+ * safely in order to wait for the netdev refcnt to drop to zero.
+ *
+ * We must not return until all unregister events added during
+ * the interval the lock was held have been completed.
+ */
+void netdev_run_todo(void)
+{
+ struct list_head list;
+
+ /* Snapshot list, allow later requests */
+ list_replace_init(&net_todo_list, &list);
+
+ __rtnl_unlock();
+
+ while (!list_empty(&list)) {
+ struct net_device *dev
+ = list_entry(list.next, struct net_device, todo_list);
+ list_del(&dev->todo_list);
+
+ if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
+ printk(KERN_ERR "network todo '%s' but state %d\n",
+ dev->name, dev->reg_state);
+ dump_stack();
+ continue;
+ }
+
+ dev->reg_state = NETREG_UNREGISTERED;
+
+ on_each_cpu(flush_backlog, dev, 1);
+
+ netdev_wait_allrefs(dev);
+
+ /* paranoia */
+ BUG_ON(atomic_read(&dev->refcnt));
+ WARN_ON(dev->ip_ptr);
+ WARN_ON(dev->ip6_ptr);
+ WARN_ON(dev->dn_ptr);
+
+ if (dev->destructor)
+ dev->destructor(dev);
+
+ /* Free network device */
+ kobject_put(&dev->dev.kobj);
+ }
+}
+
+/**
+ * dev_get_stats - get network device statistics
+ * @dev: device to get statistics from
+ *
+ * Get network statistics from device. The device driver may provide
+ * its own method by setting dev->netdev_ops->get_stats; otherwise
+ * the internal statistics structure is used.
+ */
+const struct net_device_stats *dev_get_stats(struct net_device *dev)
+ {
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ if (ops->ndo_get_stats)
+ return ops->ndo_get_stats(dev);
+ else
+ return &dev->stats;
+}
+EXPORT_SYMBOL(dev_get_stats);
+
+static void netdev_init_one_queue(struct net_device *dev,
+ struct netdev_queue *queue,
+ void *_unused)
+{
+ queue->dev = dev;
+}
+
+static void netdev_init_queues(struct net_device *dev)
+{
+ netdev_init_one_queue(dev, &dev->rx_queue, NULL);
+ netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
+ spin_lock_init(&dev->tx_global_lock);
+}
+
+/**
+ * alloc_netdev_mq - allocate network device
+ * @sizeof_priv: size of private data to allocate space for
+ * @name: device name format string
+ * @setup: callback to initialize device
+ * @queue_count: the number of subqueues to allocate
+ *
+ * Allocates a struct net_device with private data area for driver use
+ * and performs basic initialization. Also allocates subquue structs
+ * for each queue on the device at the end of the netdevice.
+ */
+struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
+ void (*setup)(struct net_device *), unsigned int queue_count)
+{
+ struct netdev_queue *tx;
+ struct net_device *dev;
+ size_t alloc_size;
+ void *p;
+
+ BUG_ON(strlen(name) >= sizeof(dev->name));
+
+ alloc_size = sizeof(struct net_device);
+ if (sizeof_priv) {
+ /* ensure 32-byte alignment of private area */
+ alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
+ alloc_size += sizeof_priv;
+ }
+ /* ensure 32-byte alignment of whole construct */
+ alloc_size += NETDEV_ALIGN_CONST;
+
+ p = kzalloc(alloc_size, GFP_KERNEL);
+ if (!p) {
+ printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
+ return NULL;
+ }
+
+ tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
+ if (!tx) {
+ printk(KERN_ERR "alloc_netdev: Unable to allocate "
+ "tx qdiscs.\n");
+ kfree(p);
+ return NULL;
+ }
+
+ dev = (struct net_device *)
+ (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
+ dev->padded = (char *)dev - (char *)p;
+ dev_net_set(dev, &init_net);
+
+ dev->_tx = tx;
+ dev->num_tx_queues = queue_count;
+ dev->real_num_tx_queues = queue_count;
+
+ dev->gso_max_size = GSO_MAX_SIZE;
+
+ netdev_init_queues(dev);
+
+ INIT_LIST_HEAD(&dev->napi_list);
+ setup(dev);
+ strcpy(dev->name, name);
+ return dev;
+}
+EXPORT_SYMBOL(alloc_netdev_mq);
+
+/**
+ * free_netdev - free network device
+ * @dev: device
+ *
+ * This function does the last stage of destroying an allocated device
+ * interface. The reference to the device object is released.
+ * If this is the last reference then it will be freed.
+ */
+void free_netdev(struct net_device *dev)
+{
+ struct napi_struct *p, *n;
+
+ release_net(dev_net(dev));
+
+ kfree(dev->_tx);
+
+ list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
+ netif_napi_del(p);
+
+ /* Compatibility with error handling in drivers */
+ if (dev->reg_state == NETREG_UNINITIALIZED) {
+ kfree((char *)dev - dev->padded);
+ return;
+ }
+
+ BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
+ dev->reg_state = NETREG_RELEASED;
+
+ /* will free via device release */
+ put_device(&dev->dev);
+}
+
+/**
+ * synchronize_net - Synchronize with packet receive processing
+ *
+ * Wait for packets currently being received to be done.
+ * Does not block later packets from starting.
+ */
+void synchronize_net(void)
+{
+ might_sleep();
+ synchronize_rcu();
+}
+
+/**
+ * unregister_netdevice - remove device from the kernel
+ * @dev: device
+ *
+ * This function shuts down a device interface and removes it
+ * from the kernel tables.
+ *
+ * Callers must hold the rtnl semaphore. You may want
+ * unregister_netdev() instead of this.
+ */
+
+void unregister_netdevice(struct net_device *dev)
+{
+ ASSERT_RTNL();
+
+ rollback_registered(dev);
+ /* Finish processing unregister after unlock */
+ net_set_todo(dev);
+}
+
+/**
+ * unregister_netdev - remove device from the kernel
+ * @dev: device
+ *
+ * This function shuts down a device interface and removes it
+ * from the kernel tables.
+ *
+ * This is just a wrapper for unregister_netdevice that takes
+ * the rtnl semaphore. In general you want to use this and not
+ * unregister_netdevice.
+ */
+void unregister_netdev(struct net_device *dev)
+{
+ rtnl_lock();
+ unregister_netdevice(dev);
+ rtnl_unlock();
+}
+
+EXPORT_SYMBOL(unregister_netdev);
+
+/**
+ * dev_change_net_namespace - move device to different nethost namespace
+ * @dev: device
+ * @net: network namespace
+ * @pat: If not NULL name pattern to try if the current device name
+ * is already taken in the destination network namespace.
+ *
+ * This function shuts down a device interface and moves it
+ * to a new network namespace. On success 0 is returned, on
+ * a failure a netagive errno code is returned.
+ *
+ * Callers must hold the rtnl semaphore.
+ */
+
+int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
+{
+ char buf[IFNAMSIZ];
+ const char *destname;
+ int err;
+
+ ASSERT_RTNL();
+
+ /* Don't allow namespace local devices to be moved. */
+ err = -EINVAL;
+ if (dev->features & NETIF_F_NETNS_LOCAL)
+ goto out;
+
+#ifdef CONFIG_SYSFS
+ /* Don't allow real devices to be moved when sysfs
+ * is enabled.
+ */
+ err = -EINVAL;
+ if (dev->dev.parent)
+ goto out;
+#endif
+
+ /* Ensure the device has been registrered */
+ err = -EINVAL;
+ if (dev->reg_state != NETREG_REGISTERED)
+ goto out;
+
+ /* Get out if there is nothing todo */
+ err = 0;
+ if (net_eq(dev_net(dev), net))
+ goto out;
+
+ /* Pick the destination device name, and ensure
+ * we can use it in the destination network namespace.
+ */
+ err = -EEXIST;
+ destname = dev->name;
+ if (__dev_get_by_name(net, destname)) {
+ /* We get here if we can't use the current device name */
+ if (!pat)
+ goto out;
+ if (!dev_valid_name(pat))
+ goto out;
+ if (strchr(pat, '%')) {
+ if (__dev_alloc_name(net, pat, buf) < 0)
+ goto out;
+ destname = buf;
+ } else
+ destname = pat;
+ if (__dev_get_by_name(net, destname))
+ goto out;
+ }
+
+ /*
+ * And now a mini version of register_netdevice unregister_netdevice.
+ */
+
+ /* If device is running close it first. */
+ dev_close(dev);
+
+ /* And unlink it from device chain */
+ err = -ENODEV;
+ unlist_netdevice(dev);
+
+ synchronize_net();
+
+ /* Shutdown queueing discipline. */
+ dev_shutdown(dev);
+
+ /* Notify protocols, that we are about to destroy
+ this device. They should clean all the things.
+ */
+ call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
+
+ /*
+ * Flush the unicast and multicast chains
+ */
+ dev_addr_discard(dev);
+
+ netdev_unregister_kobject(dev);
+
+ /* Actually switch the network namespace */
+ dev_net_set(dev, net);
+
+ /* Assign the new device name */
+ if (destname != dev->name)
+ strcpy(dev->name, destname);
+
+ /* If there is an ifindex conflict assign a new one */
+ if (__dev_get_by_index(net, dev->ifindex)) {
+ int iflink = (dev->iflink == dev->ifindex);
+ dev->ifindex = dev_new_index(net);
+ if (iflink)
+ dev->iflink = dev->ifindex;
+ }
+
+ /* Fixup kobjects */
+ err = netdev_register_kobject(dev);
+ WARN_ON(err);
+
+ /* Add the device back in the hashes */
+ list_netdevice(dev);
+
+ /* Notify protocols, that a new device appeared. */
+ call_netdevice_notifiers(NETDEV_REGISTER, dev);
+
+ synchronize_net();
+ err = 0;
+out:
+ return err;
+}
+
+static int dev_cpu_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *ocpu)
+{
+ struct sk_buff **list_skb;
+ struct Qdisc **list_net;
+ struct sk_buff *skb;
+ unsigned int cpu, oldcpu = (unsigned long)ocpu;
+ struct softnet_data *sd, *oldsd;
+
+ if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
+ return NOTIFY_OK;
+
+ local_irq_disable();
+ cpu = smp_processor_id();
+ sd = &per_cpu(softnet_data, cpu);
+ oldsd = &per_cpu(softnet_data, oldcpu);
+
+ /* Find end of our completion_queue. */
+ list_skb = &sd->completion_queue;
+ while (*list_skb)
+ list_skb = &(*list_skb)->next;
+ /* Append completion queue from offline CPU. */
+ *list_skb = oldsd->completion_queue;
+ oldsd->completion_queue = NULL;
+
+ /* Find end of our output_queue. */
+ list_net = &sd->output_queue;
+ while (*list_net)
+ list_net = &(*list_net)->next_sched;
+ /* Append output queue from offline CPU. */
+ *list_net = oldsd->output_queue;
+ oldsd->output_queue = NULL;
+
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_enable();
+
+ /* Process offline CPU's input_pkt_queue */
+ while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
+ netif_rx(skb);
+
+ return NOTIFY_OK;
+}
+
+
+/**
+ * netdev_increment_features - increment feature set by one
+ * @all: current feature set
+ * @one: new feature set
+ * @mask: mask feature set
+ *
+ * Computes a new feature set after adding a device with feature set
+ * @one to the master device with current feature set @all. Will not
+ * enable anything that is off in @mask. Returns the new feature set.
+ */
+unsigned long netdev_increment_features(unsigned long all, unsigned long one,
+ unsigned long mask)
+{
+ /* If device needs checksumming, downgrade to it. */
+ if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
+ all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
+ else if (mask & NETIF_F_ALL_CSUM) {
+ /* If one device supports v4/v6 checksumming, set for all. */
+ if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
+ !(all & NETIF_F_GEN_CSUM)) {
+ all &= ~NETIF_F_ALL_CSUM;
+ all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ }
+
+ /* If one device supports hw checksumming, set for all. */
+ if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
+ all &= ~NETIF_F_ALL_CSUM;
+ all |= NETIF_F_HW_CSUM;
+ }
+ }
+
+ one |= NETIF_F_ALL_CSUM;
+
+ one |= all & NETIF_F_ONE_FOR_ALL;
+ all &= one | NETIF_F_LLTX | NETIF_F_GSO;
+ all |= one & mask & NETIF_F_ONE_FOR_ALL;
+
+ return all;
+}
+EXPORT_SYMBOL(netdev_increment_features);
+
+static struct hlist_head *netdev_create_hash(void)
+{
+ int i;
+ struct hlist_head *hash;
+
+ hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
+ if (hash != NULL)
+ for (i = 0; i < NETDEV_HASHENTRIES; i++)
+ INIT_HLIST_HEAD(&hash[i]);
+
+ return hash;
+}
+
+/* Initialize per network namespace state */
+static int __net_init netdev_init(struct net *net)
+{
+ INIT_LIST_HEAD(&net->dev_base_head);
+
+ net->dev_name_head = netdev_create_hash();
+ if (net->dev_name_head == NULL)
+ goto err_name;
+
+ net->dev_index_head = netdev_create_hash();
+ if (net->dev_index_head == NULL)
+ goto err_idx;
+
+ return 0;
+
+err_idx:
+ kfree(net->dev_name_head);
+err_name:
+ return -ENOMEM;
+}
+
+/**
+ * netdev_drivername - network driver for the device
+ * @dev: network device
+ * @buffer: buffer for resulting name
+ * @len: size of buffer
+ *
+ * Determine network driver for device.
+ */
+char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
+{
+ const struct device_driver *driver;
+ const struct device *parent;
+
+ if (len <= 0 || !buffer)
+ return buffer;
+ buffer[0] = 0;
+
+ parent = dev->dev.parent;
+
+ if (!parent)
+ return buffer;
+
+ driver = parent->driver;
+ if (driver && driver->name)
+ strlcpy(buffer, driver->name, len);
+ return buffer;
+}
+
+static void __net_exit netdev_exit(struct net *net)
+{
+ kfree(net->dev_name_head);
+ kfree(net->dev_index_head);
+}
+
+static struct pernet_operations __net_initdata netdev_net_ops = {
+ .init = netdev_init,
+ .exit = netdev_exit,
+};
+
+static void __net_exit default_device_exit(struct net *net)
+{
+ struct net_device *dev;
+ /*
+ * Push all migratable of the network devices back to the
+ * initial network namespace
+ */
+ rtnl_lock();
+restart:
+ for_each_netdev(net, dev) {
+ int err;
+ char fb_name[IFNAMSIZ];
+
+ /* Ignore unmoveable devices (i.e. loopback) */
+ if (dev->features & NETIF_F_NETNS_LOCAL)
+ continue;
+
+ /* Delete virtual devices */
+ if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
+ dev->rtnl_link_ops->dellink(dev);
+ goto restart;
+ }
+
+ /* Push remaing network devices to init_net */
+ snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
+ err = dev_change_net_namespace(dev, &init_net, fb_name);
+ if (err) {
+ printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
+ __func__, dev->name, err);
+ BUG();
+ }
+ goto restart;
+ }
+ rtnl_unlock();
+}
+
+static struct pernet_operations __net_initdata default_device_ops = {
+ .exit = default_device_exit,
+};
+
+/*
+ * Initialize the DEV module. At boot time this walks the device list and
+ * unhooks any devices that fail to initialise (normally hardware not
+ * present) and leaves us with a valid list of present and active devices.
+ *
+ */
+
+/*
+ * This is called single threaded during boot, so no need
+ * to take the rtnl semaphore.
+ */
+static int __init net_dev_init(void)
+{
+ int i, rc = -ENOMEM;
+
+ BUG_ON(!dev_boot_phase);
+
+ if (dev_proc_init())
+ goto out;
+
+ if (netdev_kobject_init())
+ goto out;
+
+ INIT_LIST_HEAD(&ptype_all);
+ for (i = 0; i < PTYPE_HASH_SIZE; i++)
+ INIT_LIST_HEAD(&ptype_base[i]);
+
+ if (register_pernet_subsys(&netdev_net_ops))
+ goto out;
+
+ /*
+ * Initialise the packet receive queues.
+ */
+
+ for_each_possible_cpu(i) {
+ struct softnet_data *queue;
+
+ queue = &per_cpu(softnet_data, i);
+ skb_queue_head_init(&queue->input_pkt_queue);
+ queue->completion_queue = NULL;
+ INIT_LIST_HEAD(&queue->poll_list);
+
+ queue->backlog.poll = process_backlog;
+ queue->backlog.weight = weight_p;
+ queue->backlog.gro_list = NULL;
+ }
+
+ dev_boot_phase = 0;
+
+ /* The loopback device is special if any other network devices
+ * is present in a network namespace the loopback device must
+ * be present. Since we now dynamically allocate and free the
+ * loopback device ensure this invariant is maintained by
+ * keeping the loopback device as the first device on the
+ * list of network devices. Ensuring the loopback devices
+ * is the first device that appears and the last network device
+ * that disappears.
+ */
+ if (register_pernet_device(&loopback_net_ops))
+ goto out;
+
+ if (register_pernet_device(&default_device_ops))
+ goto out;
+
+ open_softirq(NET_TX_SOFTIRQ, net_tx_action);
+ open_softirq(NET_RX_SOFTIRQ, net_rx_action);
+
+ hotcpu_notifier(dev_cpu_callback, 0);
+ dst_init();
+ dev_mcast_init();
+ rc = 0;
+out:
+ return rc;
+}
+
+subsys_initcall(net_dev_init);
+
+EXPORT_SYMBOL(__dev_get_by_index);
+EXPORT_SYMBOL(__dev_get_by_name);
+EXPORT_SYMBOL(__dev_remove_pack);
+EXPORT_SYMBOL(dev_valid_name);
+EXPORT_SYMBOL(dev_add_pack);
+EXPORT_SYMBOL(dev_alloc_name);
+EXPORT_SYMBOL(dev_close);
+EXPORT_SYMBOL(dev_get_by_flags);
+EXPORT_SYMBOL(dev_get_by_index);
+EXPORT_SYMBOL(dev_get_by_name);
+EXPORT_SYMBOL(dev_open);
+EXPORT_SYMBOL(dev_queue_xmit);
+EXPORT_SYMBOL(dev_remove_pack);
+EXPORT_SYMBOL(dev_set_allmulti);
+EXPORT_SYMBOL(dev_set_promiscuity);
+EXPORT_SYMBOL(dev_change_flags);
+EXPORT_SYMBOL(dev_set_mtu);
+EXPORT_SYMBOL(dev_set_mac_address);
+EXPORT_SYMBOL(free_netdev);
+EXPORT_SYMBOL(netdev_boot_setup_check);
+EXPORT_SYMBOL(netdev_set_master);
+EXPORT_SYMBOL(netdev_state_change);
+EXPORT_SYMBOL(netif_receive_skb);
+EXPORT_SYMBOL(netif_rx);
+EXPORT_SYMBOL(register_gifconf);
+EXPORT_SYMBOL(register_netdevice);
+EXPORT_SYMBOL(register_netdevice_notifier);
+EXPORT_SYMBOL(skb_checksum_help);
+EXPORT_SYMBOL(synchronize_net);
+EXPORT_SYMBOL(unregister_netdevice);
+EXPORT_SYMBOL(unregister_netdevice_notifier);
+EXPORT_SYMBOL(net_enable_timestamp);
+EXPORT_SYMBOL(net_disable_timestamp);
+EXPORT_SYMBOL(dev_get_flags);
+
+#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+EXPORT_SYMBOL(br_handle_frame_hook);
+EXPORT_SYMBOL(br_fdb_get_hook);
+EXPORT_SYMBOL(br_fdb_put_hook);
+#endif
+
+EXPORT_SYMBOL(dev_load);
+
+EXPORT_PER_CPU_SYMBOL(softnet_data);
diff --git a/libdde_linux26/contrib/net/core/.svn/text-base/dev_mcast.c.svn-base b/libdde_linux26/contrib/net/core/.svn/text-base/dev_mcast.c.svn-base
new file mode 100644
index 00000000..9e2fa39f
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/.svn/text-base/dev_mcast.c.svn-base
@@ -0,0 +1,229 @@
+/*
+ * Linux NET3: Multicast List maintenance.
+ *
+ * Authors:
+ * Tim Kordas <tjk@nostromo.eeap.cwru.edu>
+ * Richard Underwood <richard@wuzz.demon.co.uk>
+ *
+ * Stir fried together from the IP multicast and CAP patches above
+ * Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * Fixes:
+ * Alan Cox : Update the device on a real delete
+ * rather than any time but...
+ * Alan Cox : IFF_ALLMULTI support.
+ * Alan Cox : New format set_multicast_list() calls.
+ * Gleb Natapov : Remove dev_mc_lock.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <net/net_namespace.h>
+#include <net/ip.h>
+#include <net/route.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/arp.h>
+
+
+/*
+ * Device multicast list maintenance.
+ *
+ * This is used both by IP and by the user level maintenance functions.
+ * Unlike BSD we maintain a usage count on a given multicast address so
+ * that a casual user application can add/delete multicasts used by
+ * protocols without doing damage to the protocols when it deletes the
+ * entries. It also helps IP as it tracks overlapping maps.
+ *
+ * Device mc lists are changed by bh at least if IPv6 is enabled,
+ * so that it must be bh protected.
+ *
+ * We block accesses to device mc filters with netif_tx_lock.
+ */
+
+/*
+ * Delete a device level multicast
+ */
+
+int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
+{
+ int err;
+
+ netif_addr_lock_bh(dev);
+ err = __dev_addr_delete(&dev->mc_list, &dev->mc_count,
+ addr, alen, glbl);
+ if (!err) {
+ /*
+ * We have altered the list, so the card
+ * loaded filter is now wrong. Fix it
+ */
+
+ __dev_set_rx_mode(dev);
+ }
+ netif_addr_unlock_bh(dev);
+ return err;
+}
+
+/*
+ * Add a device level multicast
+ */
+
+int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
+{
+ int err;
+
+ netif_addr_lock_bh(dev);
+ err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl);
+ if (!err)
+ __dev_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
+ return err;
+}
+
+/**
+ * dev_mc_sync - Synchronize device's multicast list to another device
+ * @to: destination device
+ * @from: source device
+ *
+ * Add newly added addresses to the destination device and release
+ * addresses that have no users left. The source device must be
+ * locked by netif_tx_lock_bh.
+ *
+ * This function is intended to be called from the dev->set_multicast_list
+ * or dev->set_rx_mode function of layered software devices.
+ */
+int dev_mc_sync(struct net_device *to, struct net_device *from)
+{
+ int err = 0;
+
+ netif_addr_lock_bh(to);
+ err = __dev_addr_sync(&to->mc_list, &to->mc_count,
+ &from->mc_list, &from->mc_count);
+ if (!err)
+ __dev_set_rx_mode(to);
+ netif_addr_unlock_bh(to);
+
+ return err;
+}
+EXPORT_SYMBOL(dev_mc_sync);
+
+
+/**
+ * dev_mc_unsync - Remove synchronized addresses from the destination
+ * device
+ * @to: destination device
+ * @from: source device
+ *
+ * Remove all addresses that were added to the destination device by
+ * dev_mc_sync(). This function is intended to be called from the
+ * dev->stop function of layered software devices.
+ */
+void dev_mc_unsync(struct net_device *to, struct net_device *from)
+{
+ netif_addr_lock_bh(from);
+ netif_addr_lock(to);
+
+ __dev_addr_unsync(&to->mc_list, &to->mc_count,
+ &from->mc_list, &from->mc_count);
+ __dev_set_rx_mode(to);
+
+ netif_addr_unlock(to);
+ netif_addr_unlock_bh(from);
+}
+EXPORT_SYMBOL(dev_mc_unsync);
+
+#ifdef CONFIG_PROC_FS
+static int dev_mc_seq_show(struct seq_file *seq, void *v)
+{
+ struct dev_addr_list *m;
+ struct net_device *dev = v;
+
+ if (v == SEQ_START_TOKEN)
+ return 0;
+
+ netif_addr_lock_bh(dev);
+ for (m = dev->mc_list; m; m = m->next) {
+ int i;
+
+ seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex,
+ dev->name, m->dmi_users, m->dmi_gusers);
+
+ for (i = 0; i < m->dmi_addrlen; i++)
+ seq_printf(seq, "%02x", m->dmi_addr[i]);
+
+ seq_putc(seq, '\n');
+ }
+ netif_addr_unlock_bh(dev);
+ return 0;
+}
+
+static const struct seq_operations dev_mc_seq_ops = {
+ .start = dev_seq_start,
+ .next = dev_seq_next,
+ .stop = dev_seq_stop,
+ .show = dev_mc_seq_show,
+};
+
+static int dev_mc_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open_net(inode, file, &dev_mc_seq_ops,
+ sizeof(struct seq_net_private));
+}
+
+static const struct file_operations dev_mc_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = dev_mc_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_net,
+};
+
+#endif
+
+static int __net_init dev_mc_net_init(struct net *net)
+{
+ if (!proc_net_fops_create(net, "dev_mcast", 0, &dev_mc_seq_fops))
+ return -ENOMEM;
+ return 0;
+}
+
+static void __net_exit dev_mc_net_exit(struct net *net)
+{
+ proc_net_remove(net, "dev_mcast");
+}
+
+static struct pernet_operations __net_initdata dev_mc_net_ops = {
+ .init = dev_mc_net_init,
+ .exit = dev_mc_net_exit,
+};
+
+void __init dev_mcast_init(void)
+{
+ register_pernet_subsys(&dev_mc_net_ops);
+}
+
+EXPORT_SYMBOL(dev_mc_add);
+EXPORT_SYMBOL(dev_mc_delete);
diff --git a/libdde_linux26/contrib/net/core/.svn/text-base/ethtool.c.svn-base b/libdde_linux26/contrib/net/core/.svn/text-base/ethtool.c.svn-base
new file mode 100644
index 00000000..947710a3
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/.svn/text-base/ethtool.c.svn-base
@@ -0,0 +1,1091 @@
+/*
+ * net/core/ethtool.c - Ethtool ioctl handler
+ * Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx>
+ *
+ * This file is where we call all the ethtool_ops commands to get
+ * the information ethtool needs.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/errno.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <asm/uaccess.h>
+
+/*
+ * Some useful ethtool_ops methods that're device independent.
+ * If we find that all drivers want to do the same thing here,
+ * we can turn these into dev_() function calls.
+ */
+
+u32 ethtool_op_get_link(struct net_device *dev)
+{
+ return netif_carrier_ok(dev) ? 1 : 0;
+}
+
+u32 ethtool_op_get_tx_csum(struct net_device *dev)
+{
+ return (dev->features & NETIF_F_ALL_CSUM) != 0;
+}
+
+int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
+{
+ if (data)
+ dev->features |= NETIF_F_IP_CSUM;
+ else
+ dev->features &= ~NETIF_F_IP_CSUM;
+
+ return 0;
+}
+
+int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data)
+{
+ if (data)
+ dev->features |= NETIF_F_HW_CSUM;
+ else
+ dev->features &= ~NETIF_F_HW_CSUM;
+
+ return 0;
+}
+
+int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data)
+{
+ if (data)
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ else
+ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+
+ return 0;
+}
+
+u32 ethtool_op_get_sg(struct net_device *dev)
+{
+ return (dev->features & NETIF_F_SG) != 0;
+}
+
+int ethtool_op_set_sg(struct net_device *dev, u32 data)
+{
+ if (data)
+ dev->features |= NETIF_F_SG;
+ else
+ dev->features &= ~NETIF_F_SG;
+
+ return 0;
+}
+
+u32 ethtool_op_get_tso(struct net_device *dev)
+{
+ return (dev->features & NETIF_F_TSO) != 0;
+}
+
+int ethtool_op_set_tso(struct net_device *dev, u32 data)
+{
+ if (data)
+ dev->features |= NETIF_F_TSO;
+ else
+ dev->features &= ~NETIF_F_TSO;
+
+ return 0;
+}
+
+u32 ethtool_op_get_ufo(struct net_device *dev)
+{
+ return (dev->features & NETIF_F_UFO) != 0;
+}
+
+int ethtool_op_set_ufo(struct net_device *dev, u32 data)
+{
+ if (data)
+ dev->features |= NETIF_F_UFO;
+ else
+ dev->features &= ~NETIF_F_UFO;
+ return 0;
+}
+
+/* the following list of flags are the same as their associated
+ * NETIF_F_xxx values in include/linux/netdevice.h
+ */
+static const u32 flags_dup_features =
+ ETH_FLAG_LRO;
+
+u32 ethtool_op_get_flags(struct net_device *dev)
+{
+ /* in the future, this function will probably contain additional
+ * handling for flags which are not so easily handled
+ * by a simple masking operation
+ */
+
+ return dev->features & flags_dup_features;
+}
+
+int ethtool_op_set_flags(struct net_device *dev, u32 data)
+{
+ if (data & ETH_FLAG_LRO)
+ dev->features |= NETIF_F_LRO;
+ else
+ dev->features &= ~NETIF_F_LRO;
+
+ return 0;
+}
+
+/* Handlers for each ethtool command */
+
+static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_cmd cmd = { ETHTOOL_GSET };
+ int err;
+
+ if (!dev->ethtool_ops->get_settings)
+ return -EOPNOTSUPP;
+
+ err = dev->ethtool_ops->get_settings(dev, &cmd);
+ if (err < 0)
+ return err;
+
+ if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_settings(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_cmd cmd;
+
+ if (!dev->ethtool_ops->set_settings)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+ return -EFAULT;
+
+ return dev->ethtool_ops->set_settings(dev, &cmd);
+}
+
+static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_drvinfo info;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+
+ if (!ops->get_drvinfo)
+ return -EOPNOTSUPP;
+
+ memset(&info, 0, sizeof(info));
+ info.cmd = ETHTOOL_GDRVINFO;
+ ops->get_drvinfo(dev, &info);
+
+ if (ops->get_sset_count) {
+ int rc;
+
+ rc = ops->get_sset_count(dev, ETH_SS_TEST);
+ if (rc >= 0)
+ info.testinfo_len = rc;
+ rc = ops->get_sset_count(dev, ETH_SS_STATS);
+ if (rc >= 0)
+ info.n_stats = rc;
+ rc = ops->get_sset_count(dev, ETH_SS_PRIV_FLAGS);
+ if (rc >= 0)
+ info.n_priv_flags = rc;
+ } else {
+ /* code path for obsolete hooks */
+
+ if (ops->self_test_count)
+ info.testinfo_len = ops->self_test_count(dev);
+ if (ops->get_stats_count)
+ info.n_stats = ops->get_stats_count(dev);
+ }
+ if (ops->get_regs_len)
+ info.regdump_len = ops->get_regs_len(dev);
+ if (ops->get_eeprom_len)
+ info.eedump_len = ops->get_eeprom_len(dev);
+
+ if (copy_to_user(useraddr, &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_rxhash(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_rxnfc cmd;
+
+ if (!dev->ethtool_ops->set_rxhash)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+ return -EFAULT;
+
+ return dev->ethtool_ops->set_rxhash(dev, &cmd);
+}
+
+static int ethtool_get_rxhash(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_rxnfc info;
+
+ if (!dev->ethtool_ops->get_rxhash)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&info, useraddr, sizeof(info)))
+ return -EFAULT;
+
+ dev->ethtool_ops->get_rxhash(dev, &info);
+
+ if (copy_to_user(useraddr, &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_regs regs;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ void *regbuf;
+ int reglen, ret;
+
+ if (!ops->get_regs || !ops->get_regs_len)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&regs, useraddr, sizeof(regs)))
+ return -EFAULT;
+
+ reglen = ops->get_regs_len(dev);
+ if (regs.len > reglen)
+ regs.len = reglen;
+
+ regbuf = kmalloc(reglen, GFP_USER);
+ if (!regbuf)
+ return -ENOMEM;
+
+ ops->get_regs(dev, &regs, regbuf);
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &regs, sizeof(regs)))
+ goto out;
+ useraddr += offsetof(struct ethtool_regs, data);
+ if (copy_to_user(useraddr, regbuf, regs.len))
+ goto out;
+ ret = 0;
+
+ out:
+ kfree(regbuf);
+ return ret;
+}
+
+static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_wolinfo wol = { ETHTOOL_GWOL };
+
+ if (!dev->ethtool_ops->get_wol)
+ return -EOPNOTSUPP;
+
+ dev->ethtool_ops->get_wol(dev, &wol);
+
+ if (copy_to_user(useraddr, &wol, sizeof(wol)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_wolinfo wol;
+
+ if (!dev->ethtool_ops->set_wol)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&wol, useraddr, sizeof(wol)))
+ return -EFAULT;
+
+ return dev->ethtool_ops->set_wol(dev, &wol);
+}
+
+static int ethtool_nway_reset(struct net_device *dev)
+{
+ if (!dev->ethtool_ops->nway_reset)
+ return -EOPNOTSUPP;
+
+ return dev->ethtool_ops->nway_reset(dev);
+}
+
+static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_eeprom eeprom;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ void __user *userbuf = useraddr + sizeof(eeprom);
+ u32 bytes_remaining;
+ u8 *data;
+ int ret = 0;
+
+ if (!ops->get_eeprom || !ops->get_eeprom_len)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
+ return -EFAULT;
+
+ /* Check for wrap and zero */
+ if (eeprom.offset + eeprom.len <= eeprom.offset)
+ return -EINVAL;
+
+ /* Check for exceeding total eeprom len */
+ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
+ return -EINVAL;
+
+ data = kmalloc(PAGE_SIZE, GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ bytes_remaining = eeprom.len;
+ while (bytes_remaining > 0) {
+ eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE);
+
+ ret = ops->get_eeprom(dev, &eeprom, data);
+ if (ret)
+ break;
+ if (copy_to_user(userbuf, data, eeprom.len)) {
+ ret = -EFAULT;
+ break;
+ }
+ userbuf += eeprom.len;
+ eeprom.offset += eeprom.len;
+ bytes_remaining -= eeprom.len;
+ }
+
+ eeprom.len = userbuf - (useraddr + sizeof(eeprom));
+ eeprom.offset -= eeprom.len;
+ if (copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
+ ret = -EFAULT;
+
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_eeprom eeprom;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ void __user *userbuf = useraddr + sizeof(eeprom);
+ u32 bytes_remaining;
+ u8 *data;
+ int ret = 0;
+
+ if (!ops->set_eeprom || !ops->get_eeprom_len)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
+ return -EFAULT;
+
+ /* Check for wrap and zero */
+ if (eeprom.offset + eeprom.len <= eeprom.offset)
+ return -EINVAL;
+
+ /* Check for exceeding total eeprom len */
+ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
+ return -EINVAL;
+
+ data = kmalloc(PAGE_SIZE, GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ bytes_remaining = eeprom.len;
+ while (bytes_remaining > 0) {
+ eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE);
+
+ if (copy_from_user(data, userbuf, eeprom.len)) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = ops->set_eeprom(dev, &eeprom, data);
+ if (ret)
+ break;
+ userbuf += eeprom.len;
+ eeprom.offset += eeprom.len;
+ bytes_remaining -= eeprom.len;
+ }
+
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE };
+
+ if (!dev->ethtool_ops->get_coalesce)
+ return -EOPNOTSUPP;
+
+ dev->ethtool_ops->get_coalesce(dev, &coalesce);
+
+ if (copy_to_user(useraddr, &coalesce, sizeof(coalesce)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_coalesce coalesce;
+
+ if (!dev->ethtool_ops->set_coalesce)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&coalesce, useraddr, sizeof(coalesce)))
+ return -EFAULT;
+
+ return dev->ethtool_ops->set_coalesce(dev, &coalesce);
+}
+
+static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM };
+
+ if (!dev->ethtool_ops->get_ringparam)
+ return -EOPNOTSUPP;
+
+ dev->ethtool_ops->get_ringparam(dev, &ringparam);
+
+ if (copy_to_user(useraddr, &ringparam, sizeof(ringparam)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_ringparam ringparam;
+
+ if (!dev->ethtool_ops->set_ringparam)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&ringparam, useraddr, sizeof(ringparam)))
+ return -EFAULT;
+
+ return dev->ethtool_ops->set_ringparam(dev, &ringparam);
+}
+
+static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM };
+
+ if (!dev->ethtool_ops->get_pauseparam)
+ return -EOPNOTSUPP;
+
+ dev->ethtool_ops->get_pauseparam(dev, &pauseparam);
+
+ if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_pauseparam pauseparam;
+
+ if (!dev->ethtool_ops->set_pauseparam)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam)))
+ return -EFAULT;
+
+ return dev->ethtool_ops->set_pauseparam(dev, &pauseparam);
+}
+
+static int __ethtool_set_sg(struct net_device *dev, u32 data)
+{
+ int err;
+
+ if (!data && dev->ethtool_ops->set_tso) {
+ err = dev->ethtool_ops->set_tso(dev, 0);
+ if (err)
+ return err;
+ }
+
+ if (!data && dev->ethtool_ops->set_ufo) {
+ err = dev->ethtool_ops->set_ufo(dev, 0);
+ if (err)
+ return err;
+ }
+ return dev->ethtool_ops->set_sg(dev, data);
+}
+
+static int ethtool_set_tx_csum(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_value edata;
+ int err;
+
+ if (!dev->ethtool_ops->set_tx_csum)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ if (!edata.data && dev->ethtool_ops->set_sg) {
+ err = __ethtool_set_sg(dev, 0);
+ if (err)
+ return err;
+ }
+
+ return dev->ethtool_ops->set_tx_csum(dev, edata.data);
+}
+
+static int ethtool_set_rx_csum(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!dev->ethtool_ops->set_rx_csum)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ if (!edata.data && dev->ethtool_ops->set_sg)
+ dev->features &= ~NETIF_F_GRO;
+
+ return dev->ethtool_ops->set_rx_csum(dev, edata.data);
+}
+
+static int ethtool_set_sg(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!dev->ethtool_ops->set_sg)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ if (edata.data &&
+ !(dev->features & NETIF_F_ALL_CSUM))
+ return -EINVAL;
+
+ return __ethtool_set_sg(dev, edata.data);
+}
+
+static int ethtool_set_tso(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!dev->ethtool_ops->set_tso)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ if (edata.data && !(dev->features & NETIF_F_SG))
+ return -EINVAL;
+
+ return dev->ethtool_ops->set_tso(dev, edata.data);
+}
+
+static int ethtool_set_ufo(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!dev->ethtool_ops->set_ufo)
+ return -EOPNOTSUPP;
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+ if (edata.data && !(dev->features & NETIF_F_SG))
+ return -EINVAL;
+ if (edata.data && !(dev->features & NETIF_F_HW_CSUM))
+ return -EINVAL;
+ return dev->ethtool_ops->set_ufo(dev, edata.data);
+}
+
+static int ethtool_get_gso(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GGSO };
+
+ edata.data = dev->features & NETIF_F_GSO;
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_gso(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+ if (edata.data)
+ dev->features |= NETIF_F_GSO;
+ else
+ dev->features &= ~NETIF_F_GSO;
+ return 0;
+}
+
+static int ethtool_get_gro(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GGRO };
+
+ edata.data = dev->features & NETIF_F_GRO;
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_gro(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ if (edata.data) {
+ if (!dev->ethtool_ops->get_rx_csum ||
+ !dev->ethtool_ops->get_rx_csum(dev))
+ return -EINVAL;
+ dev->features |= NETIF_F_GRO;
+ } else
+ dev->features &= ~NETIF_F_GRO;
+
+ return 0;
+}
+
+static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_test test;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ u64 *data;
+ int ret, test_len;
+
+ if (!ops->self_test)
+ return -EOPNOTSUPP;
+ if (!ops->get_sset_count && !ops->self_test_count)
+ return -EOPNOTSUPP;
+
+ if (ops->get_sset_count)
+ test_len = ops->get_sset_count(dev, ETH_SS_TEST);
+ else
+ /* code path for obsolete hook */
+ test_len = ops->self_test_count(dev);
+ if (test_len < 0)
+ return test_len;
+ WARN_ON(test_len == 0);
+
+ if (copy_from_user(&test, useraddr, sizeof(test)))
+ return -EFAULT;
+
+ test.len = test_len;
+ data = kmalloc(test_len * sizeof(u64), GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ops->self_test(dev, &test, data);
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &test, sizeof(test)))
+ goto out;
+ useraddr += sizeof(test);
+ if (copy_to_user(useraddr, data, test.len * sizeof(u64)))
+ goto out;
+ ret = 0;
+
+ out:
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_gstrings gstrings;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ u8 *data;
+ int ret;
+
+ if (!ops->get_strings)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
+ return -EFAULT;
+
+ if (ops->get_sset_count) {
+ ret = ops->get_sset_count(dev, gstrings.string_set);
+ if (ret < 0)
+ return ret;
+
+ gstrings.len = ret;
+ } else {
+ /* code path for obsolete hooks */
+
+ switch (gstrings.string_set) {
+ case ETH_SS_TEST:
+ if (!ops->self_test_count)
+ return -EOPNOTSUPP;
+ gstrings.len = ops->self_test_count(dev);
+ break;
+ case ETH_SS_STATS:
+ if (!ops->get_stats_count)
+ return -EOPNOTSUPP;
+ gstrings.len = ops->get_stats_count(dev);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ops->get_strings(dev, gstrings.string_set, data);
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
+ goto out;
+ useraddr += sizeof(gstrings);
+ if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
+ goto out;
+ ret = 0;
+
+ out:
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_value id;
+
+ if (!dev->ethtool_ops->phys_id)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&id, useraddr, sizeof(id)))
+ return -EFAULT;
+
+ return dev->ethtool_ops->phys_id(dev, id.data);
+}
+
+static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_stats stats;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ u64 *data;
+ int ret, n_stats;
+
+ if (!ops->get_ethtool_stats)
+ return -EOPNOTSUPP;
+ if (!ops->get_sset_count && !ops->get_stats_count)
+ return -EOPNOTSUPP;
+
+ if (ops->get_sset_count)
+ n_stats = ops->get_sset_count(dev, ETH_SS_STATS);
+ else
+ /* code path for obsolete hook */
+ n_stats = ops->get_stats_count(dev);
+ if (n_stats < 0)
+ return n_stats;
+ WARN_ON(n_stats == 0);
+
+ if (copy_from_user(&stats, useraddr, sizeof(stats)))
+ return -EFAULT;
+
+ stats.n_stats = n_stats;
+ data = kmalloc(n_stats * sizeof(u64), GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ops->get_ethtool_stats(dev, &stats, data);
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &stats, sizeof(stats)))
+ goto out;
+ useraddr += sizeof(stats);
+ if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
+ goto out;
+ ret = 0;
+
+ out:
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_get_perm_addr(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_perm_addr epaddr;
+
+ if (copy_from_user(&epaddr, useraddr, sizeof(epaddr)))
+ return -EFAULT;
+
+ if (epaddr.size < dev->addr_len)
+ return -ETOOSMALL;
+ epaddr.size = dev->addr_len;
+
+ if (copy_to_user(useraddr, &epaddr, sizeof(epaddr)))
+ return -EFAULT;
+ useraddr += sizeof(epaddr);
+ if (copy_to_user(useraddr, dev->perm_addr, epaddr.size))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_get_value(struct net_device *dev, char __user *useraddr,
+ u32 cmd, u32 (*actor)(struct net_device *))
+{
+ struct ethtool_value edata = { cmd };
+
+ if (!actor)
+ return -EOPNOTSUPP;
+
+ edata.data = actor(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_value_void(struct net_device *dev, char __user *useraddr,
+ void (*actor)(struct net_device *, u32))
+{
+ struct ethtool_value edata;
+
+ if (!actor)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ actor(dev, edata.data);
+ return 0;
+}
+
+static int ethtool_set_value(struct net_device *dev, char __user *useraddr,
+ int (*actor)(struct net_device *, u32))
+{
+ struct ethtool_value edata;
+
+ if (!actor)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ return actor(dev, edata.data);
+}
+
+/* The main entry point in this file. Called from net/core/dev.c */
+
+int dev_ethtool(struct net *net, struct ifreq *ifr)
+{
+ struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
+ void __user *useraddr = ifr->ifr_data;
+ u32 ethcmd;
+ int rc;
+ unsigned long old_features;
+
+ if (!dev || !netif_device_present(dev))
+ return -ENODEV;
+
+ if (!dev->ethtool_ops)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&ethcmd, useraddr, sizeof (ethcmd)))
+ return -EFAULT;
+
+ /* Allow some commands to be done by anyone */
+ switch(ethcmd) {
+ case ETHTOOL_GDRVINFO:
+ case ETHTOOL_GMSGLVL:
+ case ETHTOOL_GCOALESCE:
+ case ETHTOOL_GRINGPARAM:
+ case ETHTOOL_GPAUSEPARAM:
+ case ETHTOOL_GRXCSUM:
+ case ETHTOOL_GTXCSUM:
+ case ETHTOOL_GSG:
+ case ETHTOOL_GSTRINGS:
+ case ETHTOOL_GTSO:
+ case ETHTOOL_GPERMADDR:
+ case ETHTOOL_GUFO:
+ case ETHTOOL_GGSO:
+ case ETHTOOL_GFLAGS:
+ case ETHTOOL_GPFLAGS:
+ case ETHTOOL_GRXFH:
+ break;
+ default:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ }
+
+ if (dev->ethtool_ops->begin)
+ if ((rc = dev->ethtool_ops->begin(dev)) < 0)
+ return rc;
+
+ old_features = dev->features;
+
+ switch (ethcmd) {
+ case ETHTOOL_GSET:
+ rc = ethtool_get_settings(dev, useraddr);
+ break;
+ case ETHTOOL_SSET:
+ rc = ethtool_set_settings(dev, useraddr);
+ break;
+ case ETHTOOL_GDRVINFO:
+ rc = ethtool_get_drvinfo(dev, useraddr);
+ break;
+ case ETHTOOL_GREGS:
+ rc = ethtool_get_regs(dev, useraddr);
+ break;
+ case ETHTOOL_GWOL:
+ rc = ethtool_get_wol(dev, useraddr);
+ break;
+ case ETHTOOL_SWOL:
+ rc = ethtool_set_wol(dev, useraddr);
+ break;
+ case ETHTOOL_GMSGLVL:
+ rc = ethtool_get_value(dev, useraddr, ethcmd,
+ dev->ethtool_ops->get_msglevel);
+ break;
+ case ETHTOOL_SMSGLVL:
+ rc = ethtool_set_value_void(dev, useraddr,
+ dev->ethtool_ops->set_msglevel);
+ break;
+ case ETHTOOL_NWAY_RST:
+ rc = ethtool_nway_reset(dev);
+ break;
+ case ETHTOOL_GLINK:
+ rc = ethtool_get_value(dev, useraddr, ethcmd,
+ dev->ethtool_ops->get_link);
+ break;
+ case ETHTOOL_GEEPROM:
+ rc = ethtool_get_eeprom(dev, useraddr);
+ break;
+ case ETHTOOL_SEEPROM:
+ rc = ethtool_set_eeprom(dev, useraddr);
+ break;
+ case ETHTOOL_GCOALESCE:
+ rc = ethtool_get_coalesce(dev, useraddr);
+ break;
+ case ETHTOOL_SCOALESCE:
+ rc = ethtool_set_coalesce(dev, useraddr);
+ break;
+ case ETHTOOL_GRINGPARAM:
+ rc = ethtool_get_ringparam(dev, useraddr);
+ break;
+ case ETHTOOL_SRINGPARAM:
+ rc = ethtool_set_ringparam(dev, useraddr);
+ break;
+ case ETHTOOL_GPAUSEPARAM:
+ rc = ethtool_get_pauseparam(dev, useraddr);
+ break;
+ case ETHTOOL_SPAUSEPARAM:
+ rc = ethtool_set_pauseparam(dev, useraddr);
+ break;
+ case ETHTOOL_GRXCSUM:
+ rc = ethtool_get_value(dev, useraddr, ethcmd,
+ dev->ethtool_ops->get_rx_csum);
+ break;
+ case ETHTOOL_SRXCSUM:
+ rc = ethtool_set_rx_csum(dev, useraddr);
+ break;
+ case ETHTOOL_GTXCSUM:
+ rc = ethtool_get_value(dev, useraddr, ethcmd,
+ (dev->ethtool_ops->get_tx_csum ?
+ dev->ethtool_ops->get_tx_csum :
+ ethtool_op_get_tx_csum));
+ break;
+ case ETHTOOL_STXCSUM:
+ rc = ethtool_set_tx_csum(dev, useraddr);
+ break;
+ case ETHTOOL_GSG:
+ rc = ethtool_get_value(dev, useraddr, ethcmd,
+ (dev->ethtool_ops->get_sg ?
+ dev->ethtool_ops->get_sg :
+ ethtool_op_get_sg));
+ break;
+ case ETHTOOL_SSG:
+ rc = ethtool_set_sg(dev, useraddr);
+ break;
+ case ETHTOOL_GTSO:
+ rc = ethtool_get_value(dev, useraddr, ethcmd,
+ (dev->ethtool_ops->get_tso ?
+ dev->ethtool_ops->get_tso :
+ ethtool_op_get_tso));
+ break;
+ case ETHTOOL_STSO:
+ rc = ethtool_set_tso(dev, useraddr);
+ break;
+ case ETHTOOL_TEST:
+ rc = ethtool_self_test(dev, useraddr);
+ break;
+ case ETHTOOL_GSTRINGS:
+ rc = ethtool_get_strings(dev, useraddr);
+ break;
+ case ETHTOOL_PHYS_ID:
+ rc = ethtool_phys_id(dev, useraddr);
+ break;
+ case ETHTOOL_GSTATS:
+ rc = ethtool_get_stats(dev, useraddr);
+ break;
+ case ETHTOOL_GPERMADDR:
+ rc = ethtool_get_perm_addr(dev, useraddr);
+ break;
+ case ETHTOOL_GUFO:
+ rc = ethtool_get_value(dev, useraddr, ethcmd,
+ (dev->ethtool_ops->get_ufo ?
+ dev->ethtool_ops->get_ufo :
+ ethtool_op_get_ufo));
+ break;
+ case ETHTOOL_SUFO:
+ rc = ethtool_set_ufo(dev, useraddr);
+ break;
+ case ETHTOOL_GGSO:
+ rc = ethtool_get_gso(dev, useraddr);
+ break;
+ case ETHTOOL_SGSO:
+ rc = ethtool_set_gso(dev, useraddr);
+ break;
+ case ETHTOOL_GFLAGS:
+ rc = ethtool_get_value(dev, useraddr, ethcmd,
+ dev->ethtool_ops->get_flags);
+ break;
+ case ETHTOOL_SFLAGS:
+ rc = ethtool_set_value(dev, useraddr,
+ dev->ethtool_ops->set_flags);
+ break;
+ case ETHTOOL_GPFLAGS:
+ rc = ethtool_get_value(dev, useraddr, ethcmd,
+ dev->ethtool_ops->get_priv_flags);
+ break;
+ case ETHTOOL_SPFLAGS:
+ rc = ethtool_set_value(dev, useraddr,
+ dev->ethtool_ops->set_priv_flags);
+ break;
+ case ETHTOOL_GRXFH:
+ rc = ethtool_get_rxhash(dev, useraddr);
+ break;
+ case ETHTOOL_SRXFH:
+ rc = ethtool_set_rxhash(dev, useraddr);
+ break;
+ case ETHTOOL_GGRO:
+ rc = ethtool_get_gro(dev, useraddr);
+ break;
+ case ETHTOOL_SGRO:
+ rc = ethtool_set_gro(dev, useraddr);
+ break;
+ default:
+ rc = -EOPNOTSUPP;
+ }
+
+ if (dev->ethtool_ops->complete)
+ dev->ethtool_ops->complete(dev);
+
+ if (old_features != dev->features)
+ netdev_features_change(dev);
+
+ return rc;
+}
+
+EXPORT_SYMBOL(ethtool_op_get_link);
+EXPORT_SYMBOL(ethtool_op_get_sg);
+EXPORT_SYMBOL(ethtool_op_get_tso);
+EXPORT_SYMBOL(ethtool_op_get_tx_csum);
+EXPORT_SYMBOL(ethtool_op_set_sg);
+EXPORT_SYMBOL(ethtool_op_set_tso);
+EXPORT_SYMBOL(ethtool_op_set_tx_csum);
+EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum);
+EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum);
+EXPORT_SYMBOL(ethtool_op_set_ufo);
+EXPORT_SYMBOL(ethtool_op_get_ufo);
+EXPORT_SYMBOL(ethtool_op_set_flags);
+EXPORT_SYMBOL(ethtool_op_get_flags);
diff --git a/libdde_linux26/contrib/net/core/.svn/text-base/filter.c.svn-base b/libdde_linux26/contrib/net/core/.svn/text-base/filter.c.svn-base
new file mode 100644
index 00000000..d1d779ca
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/.svn/text-base/filter.c.svn-base
@@ -0,0 +1,541 @@
+/*
+ * Linux Socket Filter - Kernel level socket filtering
+ *
+ * Author:
+ * Jay Schulist <jschlst@samba.org>
+ *
+ * Based on the design of:
+ * - The Berkeley Packet Filter
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Andi Kleen - Fix a few bad bugs and races.
+ * Kris Katterjohn - Added many additional checks in sk_chk_filter()
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/fcntl.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/if_packet.h>
+#include <net/ip.h>
+#include <net/protocol.h>
+#include <net/netlink.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+#include <linux/filter.h>
+
+/* No hurry in this branch */
+static void *__load_pointer(struct sk_buff *skb, int k)
+{
+ u8 *ptr = NULL;
+
+ if (k >= SKF_NET_OFF)
+ ptr = skb_network_header(skb) + k - SKF_NET_OFF;
+ else if (k >= SKF_LL_OFF)
+ ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
+
+ if (ptr >= skb->head && ptr < skb_tail_pointer(skb))
+ return ptr;
+ return NULL;
+}
+
+static inline void *load_pointer(struct sk_buff *skb, int k,
+ unsigned int size, void *buffer)
+{
+ if (k >= 0)
+ return skb_header_pointer(skb, k, size, buffer);
+ else {
+ if (k >= SKF_AD_OFF)
+ return NULL;
+ return __load_pointer(skb, k);
+ }
+}
+
+/**
+ * sk_filter - run a packet through a socket filter
+ * @sk: sock associated with &sk_buff
+ * @skb: buffer to filter
+ *
+ * Run the filter code and then cut skb->data to correct size returned by
+ * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
+ * than pkt_len we keep whole skb->data. This is the socket level
+ * wrapper to sk_run_filter. It returns 0 if the packet should
+ * be accepted or -EPERM if the packet should be tossed.
+ *
+ */
+int sk_filter(struct sock *sk, struct sk_buff *skb)
+{
+ int err;
+ struct sk_filter *filter;
+
+ err = security_sock_rcv_skb(sk, skb);
+ if (err)
+ return err;
+
+ rcu_read_lock_bh();
+ filter = rcu_dereference(sk->sk_filter);
+ if (filter) {
+ unsigned int pkt_len = sk_run_filter(skb, filter->insns,
+ filter->len);
+ err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
+ }
+ rcu_read_unlock_bh();
+
+ return err;
+}
+EXPORT_SYMBOL(sk_filter);
+
+/**
+ * sk_run_filter - run a filter on a socket
+ * @skb: buffer to run the filter on
+ * @filter: filter to apply
+ * @flen: length of filter
+ *
+ * Decode and apply filter instructions to the skb->data.
+ * Return length to keep, 0 for none. skb is the data we are
+ * filtering, filter is the array of filter instructions, and
+ * len is the number of filter blocks in the array.
+ */
+unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
+{
+ struct sock_filter *fentry; /* We walk down these */
+ void *ptr;
+ u32 A = 0; /* Accumulator */
+ u32 X = 0; /* Index Register */
+ u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
+ u32 tmp;
+ int k;
+ int pc;
+
+ /*
+ * Process array of filter instructions.
+ */
+ for (pc = 0; pc < flen; pc++) {
+ fentry = &filter[pc];
+
+ switch (fentry->code) {
+ case BPF_ALU|BPF_ADD|BPF_X:
+ A += X;
+ continue;
+ case BPF_ALU|BPF_ADD|BPF_K:
+ A += fentry->k;
+ continue;
+ case BPF_ALU|BPF_SUB|BPF_X:
+ A -= X;
+ continue;
+ case BPF_ALU|BPF_SUB|BPF_K:
+ A -= fentry->k;
+ continue;
+ case BPF_ALU|BPF_MUL|BPF_X:
+ A *= X;
+ continue;
+ case BPF_ALU|BPF_MUL|BPF_K:
+ A *= fentry->k;
+ continue;
+ case BPF_ALU|BPF_DIV|BPF_X:
+ if (X == 0)
+ return 0;
+ A /= X;
+ continue;
+ case BPF_ALU|BPF_DIV|BPF_K:
+ A /= fentry->k;
+ continue;
+ case BPF_ALU|BPF_AND|BPF_X:
+ A &= X;
+ continue;
+ case BPF_ALU|BPF_AND|BPF_K:
+ A &= fentry->k;
+ continue;
+ case BPF_ALU|BPF_OR|BPF_X:
+ A |= X;
+ continue;
+ case BPF_ALU|BPF_OR|BPF_K:
+ A |= fentry->k;
+ continue;
+ case BPF_ALU|BPF_LSH|BPF_X:
+ A <<= X;
+ continue;
+ case BPF_ALU|BPF_LSH|BPF_K:
+ A <<= fentry->k;
+ continue;
+ case BPF_ALU|BPF_RSH|BPF_X:
+ A >>= X;
+ continue;
+ case BPF_ALU|BPF_RSH|BPF_K:
+ A >>= fentry->k;
+ continue;
+ case BPF_ALU|BPF_NEG:
+ A = -A;
+ continue;
+ case BPF_JMP|BPF_JA:
+ pc += fentry->k;
+ continue;
+ case BPF_JMP|BPF_JGT|BPF_K:
+ pc += (A > fentry->k) ? fentry->jt : fentry->jf;
+ continue;
+ case BPF_JMP|BPF_JGE|BPF_K:
+ pc += (A >= fentry->k) ? fentry->jt : fentry->jf;
+ continue;
+ case BPF_JMP|BPF_JEQ|BPF_K:
+ pc += (A == fentry->k) ? fentry->jt : fentry->jf;
+ continue;
+ case BPF_JMP|BPF_JSET|BPF_K:
+ pc += (A & fentry->k) ? fentry->jt : fentry->jf;
+ continue;
+ case BPF_JMP|BPF_JGT|BPF_X:
+ pc += (A > X) ? fentry->jt : fentry->jf;
+ continue;
+ case BPF_JMP|BPF_JGE|BPF_X:
+ pc += (A >= X) ? fentry->jt : fentry->jf;
+ continue;
+ case BPF_JMP|BPF_JEQ|BPF_X:
+ pc += (A == X) ? fentry->jt : fentry->jf;
+ continue;
+ case BPF_JMP|BPF_JSET|BPF_X:
+ pc += (A & X) ? fentry->jt : fentry->jf;
+ continue;
+ case BPF_LD|BPF_W|BPF_ABS:
+ k = fentry->k;
+load_w:
+ ptr = load_pointer(skb, k, 4, &tmp);
+ if (ptr != NULL) {
+ A = get_unaligned_be32(ptr);
+ continue;
+ }
+ break;
+ case BPF_LD|BPF_H|BPF_ABS:
+ k = fentry->k;
+load_h:
+ ptr = load_pointer(skb, k, 2, &tmp);
+ if (ptr != NULL) {
+ A = get_unaligned_be16(ptr);
+ continue;
+ }
+ break;
+ case BPF_LD|BPF_B|BPF_ABS:
+ k = fentry->k;
+load_b:
+ ptr = load_pointer(skb, k, 1, &tmp);
+ if (ptr != NULL) {
+ A = *(u8 *)ptr;
+ continue;
+ }
+ break;
+ case BPF_LD|BPF_W|BPF_LEN:
+ A = skb->len;
+ continue;
+ case BPF_LDX|BPF_W|BPF_LEN:
+ X = skb->len;
+ continue;
+ case BPF_LD|BPF_W|BPF_IND:
+ k = X + fentry->k;
+ goto load_w;
+ case BPF_LD|BPF_H|BPF_IND:
+ k = X + fentry->k;
+ goto load_h;
+ case BPF_LD|BPF_B|BPF_IND:
+ k = X + fentry->k;
+ goto load_b;
+ case BPF_LDX|BPF_B|BPF_MSH:
+ ptr = load_pointer(skb, fentry->k, 1, &tmp);
+ if (ptr != NULL) {
+ X = (*(u8 *)ptr & 0xf) << 2;
+ continue;
+ }
+ return 0;
+ case BPF_LD|BPF_IMM:
+ A = fentry->k;
+ continue;
+ case BPF_LDX|BPF_IMM:
+ X = fentry->k;
+ continue;
+ case BPF_LD|BPF_MEM:
+ A = mem[fentry->k];
+ continue;
+ case BPF_LDX|BPF_MEM:
+ X = mem[fentry->k];
+ continue;
+ case BPF_MISC|BPF_TAX:
+ X = A;
+ continue;
+ case BPF_MISC|BPF_TXA:
+ A = X;
+ continue;
+ case BPF_RET|BPF_K:
+ return fentry->k;
+ case BPF_RET|BPF_A:
+ return A;
+ case BPF_ST:
+ mem[fentry->k] = A;
+ continue;
+ case BPF_STX:
+ mem[fentry->k] = X;
+ continue;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+
+ /*
+ * Handle ancillary data, which are impossible
+ * (or very difficult) to get parsing packet contents.
+ */
+ switch (k-SKF_AD_OFF) {
+ case SKF_AD_PROTOCOL:
+ A = ntohs(skb->protocol);
+ continue;
+ case SKF_AD_PKTTYPE:
+ A = skb->pkt_type;
+ continue;
+ case SKF_AD_IFINDEX:
+ A = skb->dev->ifindex;
+ continue;
+ case SKF_AD_NLATTR: {
+ struct nlattr *nla;
+
+ if (skb_is_nonlinear(skb))
+ return 0;
+ if (A > skb->len - sizeof(struct nlattr))
+ return 0;
+
+ nla = nla_find((struct nlattr *)&skb->data[A],
+ skb->len - A, X);
+ if (nla)
+ A = (void *)nla - (void *)skb->data;
+ else
+ A = 0;
+ continue;
+ }
+ case SKF_AD_NLATTR_NEST: {
+ struct nlattr *nla;
+
+ if (skb_is_nonlinear(skb))
+ return 0;
+ if (A > skb->len - sizeof(struct nlattr))
+ return 0;
+
+ nla = (struct nlattr *)&skb->data[A];
+ if (nla->nla_len > A - skb->len)
+ return 0;
+
+ nla = nla_find_nested(nla, X);
+ if (nla)
+ A = (void *)nla - (void *)skb->data;
+ else
+ A = 0;
+ continue;
+ }
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sk_run_filter);
+
+/**
+ * sk_chk_filter - verify socket filter code
+ * @filter: filter to verify
+ * @flen: length of filter
+ *
+ * Check the user's filter code. If we let some ugly
+ * filter code slip through kaboom! The filter must contain
+ * no references or jumps that are out of range, no illegal
+ * instructions, and must end with a RET instruction.
+ *
+ * All jumps are forward as they are not signed.
+ *
+ * Returns 0 if the rule set is legal or -EINVAL if not.
+ */
+int sk_chk_filter(struct sock_filter *filter, int flen)
+{
+ struct sock_filter *ftest;
+ int pc;
+
+ if (flen == 0 || flen > BPF_MAXINSNS)
+ return -EINVAL;
+
+ /* check the filter code now */
+ for (pc = 0; pc < flen; pc++) {
+ ftest = &filter[pc];
+
+ /* Only allow valid instructions */
+ switch (ftest->code) {
+ case BPF_ALU|BPF_ADD|BPF_K:
+ case BPF_ALU|BPF_ADD|BPF_X:
+ case BPF_ALU|BPF_SUB|BPF_K:
+ case BPF_ALU|BPF_SUB|BPF_X:
+ case BPF_ALU|BPF_MUL|BPF_K:
+ case BPF_ALU|BPF_MUL|BPF_X:
+ case BPF_ALU|BPF_DIV|BPF_X:
+ case BPF_ALU|BPF_AND|BPF_K:
+ case BPF_ALU|BPF_AND|BPF_X:
+ case BPF_ALU|BPF_OR|BPF_K:
+ case BPF_ALU|BPF_OR|BPF_X:
+ case BPF_ALU|BPF_LSH|BPF_K:
+ case BPF_ALU|BPF_LSH|BPF_X:
+ case BPF_ALU|BPF_RSH|BPF_K:
+ case BPF_ALU|BPF_RSH|BPF_X:
+ case BPF_ALU|BPF_NEG:
+ case BPF_LD|BPF_W|BPF_ABS:
+ case BPF_LD|BPF_H|BPF_ABS:
+ case BPF_LD|BPF_B|BPF_ABS:
+ case BPF_LD|BPF_W|BPF_LEN:
+ case BPF_LD|BPF_W|BPF_IND:
+ case BPF_LD|BPF_H|BPF_IND:
+ case BPF_LD|BPF_B|BPF_IND:
+ case BPF_LD|BPF_IMM:
+ case BPF_LDX|BPF_W|BPF_LEN:
+ case BPF_LDX|BPF_B|BPF_MSH:
+ case BPF_LDX|BPF_IMM:
+ case BPF_MISC|BPF_TAX:
+ case BPF_MISC|BPF_TXA:
+ case BPF_RET|BPF_K:
+ case BPF_RET|BPF_A:
+ break;
+
+ /* Some instructions need special checks */
+
+ case BPF_ALU|BPF_DIV|BPF_K:
+ /* check for division by zero */
+ if (ftest->k == 0)
+ return -EINVAL;
+ break;
+
+ case BPF_LD|BPF_MEM:
+ case BPF_LDX|BPF_MEM:
+ case BPF_ST:
+ case BPF_STX:
+ /* check for invalid memory addresses */
+ if (ftest->k >= BPF_MEMWORDS)
+ return -EINVAL;
+ break;
+
+ case BPF_JMP|BPF_JA:
+ /*
+ * Note, the large ftest->k might cause loops.
+ * Compare this with conditional jumps below,
+ * where offsets are limited. --ANK (981016)
+ */
+ if (ftest->k >= (unsigned)(flen-pc-1))
+ return -EINVAL;
+ break;
+
+ case BPF_JMP|BPF_JEQ|BPF_K:
+ case BPF_JMP|BPF_JEQ|BPF_X:
+ case BPF_JMP|BPF_JGE|BPF_K:
+ case BPF_JMP|BPF_JGE|BPF_X:
+ case BPF_JMP|BPF_JGT|BPF_K:
+ case BPF_JMP|BPF_JGT|BPF_X:
+ case BPF_JMP|BPF_JSET|BPF_K:
+ case BPF_JMP|BPF_JSET|BPF_X:
+ /* for conditionals both must be safe */
+ if (pc + ftest->jt + 1 >= flen ||
+ pc + ftest->jf + 1 >= flen)
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL;
+}
+EXPORT_SYMBOL(sk_chk_filter);
+
+/**
+ * sk_filter_rcu_release: Release a socket filter by rcu_head
+ * @rcu: rcu_head that contains the sk_filter to free
+ */
+static void sk_filter_rcu_release(struct rcu_head *rcu)
+{
+ struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
+
+ sk_filter_release(fp);
+}
+
+static void sk_filter_delayed_uncharge(struct sock *sk, struct sk_filter *fp)
+{
+ unsigned int size = sk_filter_len(fp);
+
+ atomic_sub(size, &sk->sk_omem_alloc);
+ call_rcu_bh(&fp->rcu, sk_filter_rcu_release);
+}
+
+/**
+ * sk_attach_filter - attach a socket filter
+ * @fprog: the filter program
+ * @sk: the socket to use
+ *
+ * Attach the user's filter code. We first run some sanity checks on
+ * it to make sure it does not explode on us later. If an error
+ * occurs or there is insufficient memory for the filter a negative
+ * errno code is returned. On success the return is zero.
+ */
+int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
+{
+ struct sk_filter *fp, *old_fp;
+ unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
+ int err;
+
+ /* Make sure new filter is there and in the right amounts. */
+ if (fprog->filter == NULL)
+ return -EINVAL;
+
+ fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
+ if (!fp)
+ return -ENOMEM;
+ if (copy_from_user(fp->insns, fprog->filter, fsize)) {
+ sock_kfree_s(sk, fp, fsize+sizeof(*fp));
+ return -EFAULT;
+ }
+
+ atomic_set(&fp->refcnt, 1);
+ fp->len = fprog->len;
+
+ err = sk_chk_filter(fp->insns, fp->len);
+ if (err) {
+ sk_filter_uncharge(sk, fp);
+ return err;
+ }
+
+ rcu_read_lock_bh();
+ old_fp = rcu_dereference(sk->sk_filter);
+ rcu_assign_pointer(sk->sk_filter, fp);
+ rcu_read_unlock_bh();
+
+ if (old_fp)
+ sk_filter_delayed_uncharge(sk, old_fp);
+ return 0;
+}
+
+int sk_detach_filter(struct sock *sk)
+{
+ int ret = -ENOENT;
+ struct sk_filter *filter;
+
+ rcu_read_lock_bh();
+ filter = rcu_dereference(sk->sk_filter);
+ if (filter) {
+ rcu_assign_pointer(sk->sk_filter, NULL);
+ sk_filter_delayed_uncharge(sk, filter);
+ ret = 0;
+ }
+ rcu_read_unlock_bh();
+ return ret;
+}
diff --git a/libdde_linux26/contrib/net/core/.svn/text-base/kmap_skb.h.svn-base b/libdde_linux26/contrib/net/core/.svn/text-base/kmap_skb.h.svn-base
new file mode 100644
index 00000000..283c2b99
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/.svn/text-base/kmap_skb.h.svn-base
@@ -0,0 +1,19 @@
+#include <linux/highmem.h>
+
+static inline void *kmap_skb_frag(const skb_frag_t *frag)
+{
+#ifdef CONFIG_HIGHMEM
+ BUG_ON(in_irq());
+
+ local_bh_disable();
+#endif
+ return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);
+}
+
+static inline void kunmap_skb_frag(void *vaddr)
+{
+ kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
+#ifdef CONFIG_HIGHMEM
+ local_bh_enable();
+#endif
+}
diff --git a/libdde_linux26/contrib/net/core/.svn/text-base/neighbour.c.svn-base b/libdde_linux26/contrib/net/core/.svn/text-base/neighbour.c.svn-base
new file mode 100644
index 00000000..278a142d
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/.svn/text-base/neighbour.c.svn-base
@@ -0,0 +1,2824 @@
+/*
+ * Generic address resolution entity
+ *
+ * Authors:
+ * Pedro Roque <roque@di.fc.ul.pt>
+ * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Fixes:
+ * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
+ * Harald Welte Add neighbour cache statistics like rtstat
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/socket.h>
+#include <linux/netdevice.h>
+#include <linux/proc_fs.h>
+#ifdef CONFIG_SYSCTL
+#include <linux/sysctl.h>
+#endif
+#include <linux/times.h>
+#include <net/net_namespace.h>
+#include <net/neighbour.h>
+#include <net/dst.h>
+#include <net/sock.h>
+#include <net/netevent.h>
+#include <net/netlink.h>
+#include <linux/rtnetlink.h>
+#include <linux/random.h>
+#include <linux/string.h>
+#include <linux/log2.h>
+
+#define NEIGH_DEBUG 1
+
+#define NEIGH_PRINTK(x...) printk(x)
+#define NEIGH_NOPRINTK(x...) do { ; } while(0)
+#define NEIGH_PRINTK0 NEIGH_PRINTK
+#define NEIGH_PRINTK1 NEIGH_NOPRINTK
+#define NEIGH_PRINTK2 NEIGH_NOPRINTK
+
+#if NEIGH_DEBUG >= 1
+#undef NEIGH_PRINTK1
+#define NEIGH_PRINTK1 NEIGH_PRINTK
+#endif
+#if NEIGH_DEBUG >= 2
+#undef NEIGH_PRINTK2
+#define NEIGH_PRINTK2 NEIGH_PRINTK
+#endif
+
+#define PNEIGH_HASHMASK 0xF
+
+static void neigh_timer_handler(unsigned long arg);
+static void __neigh_notify(struct neighbour *n, int type, int flags);
+static void neigh_update_notify(struct neighbour *neigh);
+static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
+
+static struct neigh_table *neigh_tables;
+#ifdef CONFIG_PROC_FS
+static const struct file_operations neigh_stat_seq_fops;
+#endif
+
+/*
+ Neighbour hash table buckets are protected with rwlock tbl->lock.
+
+ - All the scans/updates to hash buckets MUST be made under this lock.
+ - NOTHING clever should be made under this lock: no callbacks
+ to protocol backends, no attempts to send something to network.
+ It will result in deadlocks, if backend/driver wants to use neighbour
+ cache.
+ - If the entry requires some non-trivial actions, increase
+ its reference count and release table lock.
+
+ Neighbour entries are protected:
+ - with reference count.
+ - with rwlock neigh->lock
+
+ Reference count prevents destruction.
+
+ neigh->lock mainly serializes ll address data and its validity state.
+ However, the same lock is used to protect another entry fields:
+ - timer
+ - resolution queue
+
+ Again, nothing clever shall be made under neigh->lock,
+ the most complicated procedure, which we allow is dev->hard_header.
+ It is supposed, that dev->hard_header is simplistic and does
+ not make callbacks to neighbour tables.
+
+ The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
+ list of neighbour tables. This list is used only in process context,
+ */
+
+static DEFINE_RWLOCK(neigh_tbl_lock);
+
+static int neigh_blackhole(struct sk_buff *skb)
+{
+ kfree_skb(skb);
+ return -ENETDOWN;
+}
+
+static void neigh_cleanup_and_release(struct neighbour *neigh)
+{
+ if (neigh->parms->neigh_cleanup)
+ neigh->parms->neigh_cleanup(neigh);
+
+ __neigh_notify(neigh, RTM_DELNEIGH, 0);
+ neigh_release(neigh);
+}
+
+/*
+ * It is random distribution in the interval (1/2)*base...(3/2)*base.
+ * It corresponds to default IPv6 settings and is not overridable,
+ * because it is really reasonable choice.
+ */
+
+unsigned long neigh_rand_reach_time(unsigned long base)
+{
+ return (base ? (net_random() % base) + (base >> 1) : 0);
+}
+EXPORT_SYMBOL(neigh_rand_reach_time);
+
+
+static int neigh_forced_gc(struct neigh_table *tbl)
+{
+ int shrunk = 0;
+ int i;
+
+ NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
+
+ write_lock_bh(&tbl->lock);
+ for (i = 0; i <= tbl->hash_mask; i++) {
+ struct neighbour *n, **np;
+
+ np = &tbl->hash_buckets[i];
+ while ((n = *np) != NULL) {
+ /* Neighbour record may be discarded if:
+ * - nobody refers to it.
+ * - it is not permanent
+ */
+ write_lock(&n->lock);
+ if (atomic_read(&n->refcnt) == 1 &&
+ !(n->nud_state & NUD_PERMANENT)) {
+ *np = n->next;
+ n->dead = 1;
+ shrunk = 1;
+ write_unlock(&n->lock);
+ neigh_cleanup_and_release(n);
+ continue;
+ }
+ write_unlock(&n->lock);
+ np = &n->next;
+ }
+ }
+
+ tbl->last_flush = jiffies;
+
+ write_unlock_bh(&tbl->lock);
+
+ return shrunk;
+}
+
+static void neigh_add_timer(struct neighbour *n, unsigned long when)
+{
+ neigh_hold(n);
+ if (unlikely(mod_timer(&n->timer, when))) {
+ printk("NEIGH: BUG, double timer add, state is %x\n",
+ n->nud_state);
+ dump_stack();
+ }
+}
+
+static int neigh_del_timer(struct neighbour *n)
+{
+ if ((n->nud_state & NUD_IN_TIMER) &&
+ del_timer(&n->timer)) {
+ neigh_release(n);
+ return 1;
+ }
+ return 0;
+}
+
+static void pneigh_queue_purge(struct sk_buff_head *list)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(list)) != NULL) {
+ dev_put(skb->dev);
+ kfree_skb(skb);
+ }
+}
+
+static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
+{
+ int i;
+
+ for (i = 0; i <= tbl->hash_mask; i++) {
+ struct neighbour *n, **np = &tbl->hash_buckets[i];
+
+ while ((n = *np) != NULL) {
+ if (dev && n->dev != dev) {
+ np = &n->next;
+ continue;
+ }
+ *np = n->next;
+ write_lock(&n->lock);
+ neigh_del_timer(n);
+ n->dead = 1;
+
+ if (atomic_read(&n->refcnt) != 1) {
+ /* The most unpleasant situation.
+ We must destroy neighbour entry,
+ but someone still uses it.
+
+ The destroy will be delayed until
+ the last user releases us, but
+ we must kill timers etc. and move
+ it to safe state.
+ */
+ skb_queue_purge(&n->arp_queue);
+ n->output = neigh_blackhole;
+ if (n->nud_state & NUD_VALID)
+ n->nud_state = NUD_NOARP;
+ else
+ n->nud_state = NUD_NONE;
+ NEIGH_PRINTK2("neigh %p is stray.\n", n);
+ }
+ write_unlock(&n->lock);
+ neigh_cleanup_and_release(n);
+ }
+ }
+}
+
+void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
+{
+ write_lock_bh(&tbl->lock);
+ neigh_flush_dev(tbl, dev);
+ write_unlock_bh(&tbl->lock);
+}
+EXPORT_SYMBOL(neigh_changeaddr);
+
+int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
+{
+ write_lock_bh(&tbl->lock);
+ neigh_flush_dev(tbl, dev);
+ pneigh_ifdown(tbl, dev);
+ write_unlock_bh(&tbl->lock);
+
+ del_timer_sync(&tbl->proxy_timer);
+ pneigh_queue_purge(&tbl->proxy_queue);
+ return 0;
+}
+EXPORT_SYMBOL(neigh_ifdown);
+
+static struct neighbour *neigh_alloc(struct neigh_table *tbl)
+{
+ struct neighbour *n = NULL;
+ unsigned long now = jiffies;
+ int entries;
+
+ entries = atomic_inc_return(&tbl->entries) - 1;
+ if (entries >= tbl->gc_thresh3 ||
+ (entries >= tbl->gc_thresh2 &&
+ time_after(now, tbl->last_flush + 5 * HZ))) {
+ if (!neigh_forced_gc(tbl) &&
+ entries >= tbl->gc_thresh3)
+ goto out_entries;
+ }
+
+ n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
+ if (!n)
+ goto out_entries;
+
+ skb_queue_head_init(&n->arp_queue);
+ rwlock_init(&n->lock);
+ n->updated = n->used = now;
+ n->nud_state = NUD_NONE;
+ n->output = neigh_blackhole;
+ n->parms = neigh_parms_clone(&tbl->parms);
+ setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
+
+ NEIGH_CACHE_STAT_INC(tbl, allocs);
+ n->tbl = tbl;
+ atomic_set(&n->refcnt, 1);
+ n->dead = 1;
+out:
+ return n;
+
+out_entries:
+ atomic_dec(&tbl->entries);
+ goto out;
+}
+
+static struct neighbour **neigh_hash_alloc(unsigned int entries)
+{
+ unsigned long size = entries * sizeof(struct neighbour *);
+ struct neighbour **ret;
+
+ if (size <= PAGE_SIZE) {
+ ret = kzalloc(size, GFP_ATOMIC);
+ } else {
+ ret = (struct neighbour **)
+ __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
+ }
+ return ret;
+}
+
+static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
+{
+ unsigned long size = entries * sizeof(struct neighbour *);
+
+ if (size <= PAGE_SIZE)
+ kfree(hash);
+ else
+ free_pages((unsigned long)hash, get_order(size));
+}
+
+static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
+{
+ struct neighbour **new_hash, **old_hash;
+ unsigned int i, new_hash_mask, old_entries;
+
+ NEIGH_CACHE_STAT_INC(tbl, hash_grows);
+
+ BUG_ON(!is_power_of_2(new_entries));
+ new_hash = neigh_hash_alloc(new_entries);
+ if (!new_hash)
+ return;
+
+ old_entries = tbl->hash_mask + 1;
+ new_hash_mask = new_entries - 1;
+ old_hash = tbl->hash_buckets;
+
+ get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
+ for (i = 0; i < old_entries; i++) {
+ struct neighbour *n, *next;
+
+ for (n = old_hash[i]; n; n = next) {
+ unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
+
+ hash_val &= new_hash_mask;
+ next = n->next;
+
+ n->next = new_hash[hash_val];
+ new_hash[hash_val] = n;
+ }
+ }
+ tbl->hash_buckets = new_hash;
+ tbl->hash_mask = new_hash_mask;
+
+ neigh_hash_free(old_hash, old_entries);
+}
+
+struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
+ struct net_device *dev)
+{
+ struct neighbour *n;
+ int key_len = tbl->key_len;
+ u32 hash_val;
+
+ NEIGH_CACHE_STAT_INC(tbl, lookups);
+
+ read_lock_bh(&tbl->lock);
+ hash_val = tbl->hash(pkey, dev);
+ for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
+ if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
+ neigh_hold(n);
+ NEIGH_CACHE_STAT_INC(tbl, hits);
+ break;
+ }
+ }
+ read_unlock_bh(&tbl->lock);
+ return n;
+}
+EXPORT_SYMBOL(neigh_lookup);
+
+struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
+ const void *pkey)
+{
+ struct neighbour *n;
+ int key_len = tbl->key_len;
+ u32 hash_val;
+
+ NEIGH_CACHE_STAT_INC(tbl, lookups);
+
+ read_lock_bh(&tbl->lock);
+ hash_val = tbl->hash(pkey, NULL);
+ for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
+ if (!memcmp(n->primary_key, pkey, key_len) &&
+ net_eq(dev_net(n->dev), net)) {
+ neigh_hold(n);
+ NEIGH_CACHE_STAT_INC(tbl, hits);
+ break;
+ }
+ }
+ read_unlock_bh(&tbl->lock);
+ return n;
+}
+EXPORT_SYMBOL(neigh_lookup_nodev);
+
+struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
+ struct net_device *dev)
+{
+ u32 hash_val;
+ int key_len = tbl->key_len;
+ int error;
+ struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
+
+ if (!n) {
+ rc = ERR_PTR(-ENOBUFS);
+ goto out;
+ }
+
+ memcpy(n->primary_key, pkey, key_len);
+ n->dev = dev;
+ dev_hold(dev);
+
+ /* Protocol specific setup. */
+ if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
+ rc = ERR_PTR(error);
+ goto out_neigh_release;
+ }
+
+ /* Device specific setup. */
+ if (n->parms->neigh_setup &&
+ (error = n->parms->neigh_setup(n)) < 0) {
+ rc = ERR_PTR(error);
+ goto out_neigh_release;
+ }
+
+ n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
+
+ write_lock_bh(&tbl->lock);
+
+ if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
+ neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
+
+ hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
+
+ if (n->parms->dead) {
+ rc = ERR_PTR(-EINVAL);
+ goto out_tbl_unlock;
+ }
+
+ for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
+ if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
+ neigh_hold(n1);
+ rc = n1;
+ goto out_tbl_unlock;
+ }
+ }
+
+ n->next = tbl->hash_buckets[hash_val];
+ tbl->hash_buckets[hash_val] = n;
+ n->dead = 0;
+ neigh_hold(n);
+ write_unlock_bh(&tbl->lock);
+ NEIGH_PRINTK2("neigh %p is created.\n", n);
+ rc = n;
+out:
+ return rc;
+out_tbl_unlock:
+ write_unlock_bh(&tbl->lock);
+out_neigh_release:
+ neigh_release(n);
+ goto out;
+}
+EXPORT_SYMBOL(neigh_create);
+
+static u32 pneigh_hash(const void *pkey, int key_len)
+{
+ u32 hash_val = *(u32 *)(pkey + key_len - 4);
+ hash_val ^= (hash_val >> 16);
+ hash_val ^= hash_val >> 8;
+ hash_val ^= hash_val >> 4;
+ hash_val &= PNEIGH_HASHMASK;
+ return hash_val;
+}
+
+static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
+ struct net *net,
+ const void *pkey,
+ int key_len,
+ struct net_device *dev)
+{
+ while (n) {
+ if (!memcmp(n->key, pkey, key_len) &&
+ net_eq(pneigh_net(n), net) &&
+ (n->dev == dev || !n->dev))
+ return n;
+ n = n->next;
+ }
+ return NULL;
+}
+
+struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
+ struct net *net, const void *pkey, struct net_device *dev)
+{
+ int key_len = tbl->key_len;
+ u32 hash_val = pneigh_hash(pkey, key_len);
+
+ return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
+ net, pkey, key_len, dev);
+}
+EXPORT_SYMBOL_GPL(__pneigh_lookup);
+
+struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
+ struct net *net, const void *pkey,
+ struct net_device *dev, int creat)
+{
+ struct pneigh_entry *n;
+ int key_len = tbl->key_len;
+ u32 hash_val = pneigh_hash(pkey, key_len);
+
+ read_lock_bh(&tbl->lock);
+ n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
+ net, pkey, key_len, dev);
+ read_unlock_bh(&tbl->lock);
+
+ if (n || !creat)
+ goto out;
+
+ ASSERT_RTNL();
+
+ n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
+ if (!n)
+ goto out;
+
+ write_pnet(&n->net, hold_net(net));
+ memcpy(n->key, pkey, key_len);
+ n->dev = dev;
+ if (dev)
+ dev_hold(dev);
+
+ if (tbl->pconstructor && tbl->pconstructor(n)) {
+ if (dev)
+ dev_put(dev);
+ release_net(net);
+ kfree(n);
+ n = NULL;
+ goto out;
+ }
+
+ write_lock_bh(&tbl->lock);
+ n->next = tbl->phash_buckets[hash_val];
+ tbl->phash_buckets[hash_val] = n;
+ write_unlock_bh(&tbl->lock);
+out:
+ return n;
+}
+EXPORT_SYMBOL(pneigh_lookup);
+
+
+int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
+ struct net_device *dev)
+{
+ struct pneigh_entry *n, **np;
+ int key_len = tbl->key_len;
+ u32 hash_val = pneigh_hash(pkey, key_len);
+
+ write_lock_bh(&tbl->lock);
+ for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
+ np = &n->next) {
+ if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
+ net_eq(pneigh_net(n), net)) {
+ *np = n->next;
+ write_unlock_bh(&tbl->lock);
+ if (tbl->pdestructor)
+ tbl->pdestructor(n);
+ if (n->dev)
+ dev_put(n->dev);
+ release_net(pneigh_net(n));
+ kfree(n);
+ return 0;
+ }
+ }
+ write_unlock_bh(&tbl->lock);
+ return -ENOENT;
+}
+
+static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
+{
+ struct pneigh_entry *n, **np;
+ u32 h;
+
+ for (h = 0; h <= PNEIGH_HASHMASK; h++) {
+ np = &tbl->phash_buckets[h];
+ while ((n = *np) != NULL) {
+ if (!dev || n->dev == dev) {
+ *np = n->next;
+ if (tbl->pdestructor)
+ tbl->pdestructor(n);
+ if (n->dev)
+ dev_put(n->dev);
+ release_net(pneigh_net(n));
+ kfree(n);
+ continue;
+ }
+ np = &n->next;
+ }
+ }
+ return -ENOENT;
+}
+
+static void neigh_parms_destroy(struct neigh_parms *parms);
+
+static inline void neigh_parms_put(struct neigh_parms *parms)
+{
+ if (atomic_dec_and_test(&parms->refcnt))
+ neigh_parms_destroy(parms);
+}
+
+/*
+ * neighbour must already be out of the table;
+ *
+ */
+void neigh_destroy(struct neighbour *neigh)
+{
+ struct hh_cache *hh;
+
+ NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
+
+ if (!neigh->dead) {
+ printk(KERN_WARNING
+ "Destroying alive neighbour %p\n", neigh);
+ dump_stack();
+ return;
+ }
+
+ if (neigh_del_timer(neigh))
+ printk(KERN_WARNING "Impossible event.\n");
+
+ while ((hh = neigh->hh) != NULL) {
+ neigh->hh = hh->hh_next;
+ hh->hh_next = NULL;
+
+ write_seqlock_bh(&hh->hh_lock);
+ hh->hh_output = neigh_blackhole;
+ write_sequnlock_bh(&hh->hh_lock);
+ if (atomic_dec_and_test(&hh->hh_refcnt))
+ kfree(hh);
+ }
+
+ skb_queue_purge(&neigh->arp_queue);
+
+ dev_put(neigh->dev);
+ neigh_parms_put(neigh->parms);
+
+ NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
+
+ atomic_dec(&neigh->tbl->entries);
+ kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
+}
+EXPORT_SYMBOL(neigh_destroy);
+
+/* Neighbour state is suspicious;
+ disable fast path.
+
+ Called with write_locked neigh.
+ */
+static void neigh_suspect(struct neighbour *neigh)
+{
+ struct hh_cache *hh;
+
+ NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
+
+ neigh->output = neigh->ops->output;
+
+ for (hh = neigh->hh; hh; hh = hh->hh_next)
+ hh->hh_output = neigh->ops->output;
+}
+
+/* Neighbour state is OK;
+ enable fast path.
+
+ Called with write_locked neigh.
+ */
+static void neigh_connect(struct neighbour *neigh)
+{
+ struct hh_cache *hh;
+
+ NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
+
+ neigh->output = neigh->ops->connected_output;
+
+ for (hh = neigh->hh; hh; hh = hh->hh_next)
+ hh->hh_output = neigh->ops->hh_output;
+}
+
+static void neigh_periodic_timer(unsigned long arg)
+{
+ struct neigh_table *tbl = (struct neigh_table *)arg;
+ struct neighbour *n, **np;
+ unsigned long expire, now = jiffies;
+
+ NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
+
+ write_lock(&tbl->lock);
+
+ /*
+ * periodically recompute ReachableTime from random function
+ */
+
+ if (time_after(now, tbl->last_rand + 300 * HZ)) {
+ struct neigh_parms *p;
+ tbl->last_rand = now;
+ for (p = &tbl->parms; p; p = p->next)
+ p->reachable_time =
+ neigh_rand_reach_time(p->base_reachable_time);
+ }
+
+ np = &tbl->hash_buckets[tbl->hash_chain_gc];
+ tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
+
+ while ((n = *np) != NULL) {
+ unsigned int state;
+
+ write_lock(&n->lock);
+
+ state = n->nud_state;
+ if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
+ write_unlock(&n->lock);
+ goto next_elt;
+ }
+
+ if (time_before(n->used, n->confirmed))
+ n->used = n->confirmed;
+
+ if (atomic_read(&n->refcnt) == 1 &&
+ (state == NUD_FAILED ||
+ time_after(now, n->used + n->parms->gc_staletime))) {
+ *np = n->next;
+ n->dead = 1;
+ write_unlock(&n->lock);
+ neigh_cleanup_and_release(n);
+ continue;
+ }
+ write_unlock(&n->lock);
+
+next_elt:
+ np = &n->next;
+ }
+
+ /* Cycle through all hash buckets every base_reachable_time/2 ticks.
+ * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
+ * base_reachable_time.
+ */
+ expire = tbl->parms.base_reachable_time >> 1;
+ expire /= (tbl->hash_mask + 1);
+ if (!expire)
+ expire = 1;
+
+ if (expire>HZ)
+ mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
+ else
+ mod_timer(&tbl->gc_timer, now + expire);
+
+ write_unlock(&tbl->lock);
+}
+
+static __inline__ int neigh_max_probes(struct neighbour *n)
+{
+ struct neigh_parms *p = n->parms;
+ return (n->nud_state & NUD_PROBE ?
+ p->ucast_probes :
+ p->ucast_probes + p->app_probes + p->mcast_probes);
+}
+
+/* Called when a timer expires for a neighbour entry. */
+
+static void neigh_timer_handler(unsigned long arg)
+{
+ unsigned long now, next;
+ struct neighbour *neigh = (struct neighbour *)arg;
+ unsigned state;
+ int notify = 0;
+
+ write_lock(&neigh->lock);
+
+ state = neigh->nud_state;
+ now = jiffies;
+ next = now + HZ;
+
+ if (!(state & NUD_IN_TIMER)) {
+#ifndef CONFIG_SMP
+ printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
+#endif
+ goto out;
+ }
+
+ if (state & NUD_REACHABLE) {
+ if (time_before_eq(now,
+ neigh->confirmed + neigh->parms->reachable_time)) {
+ NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
+ next = neigh->confirmed + neigh->parms->reachable_time;
+ } else if (time_before_eq(now,
+ neigh->used + neigh->parms->delay_probe_time)) {
+ NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
+ neigh->nud_state = NUD_DELAY;
+ neigh->updated = jiffies;
+ neigh_suspect(neigh);
+ next = now + neigh->parms->delay_probe_time;
+ } else {
+ NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
+ neigh->nud_state = NUD_STALE;
+ neigh->updated = jiffies;
+ neigh_suspect(neigh);
+ notify = 1;
+ }
+ } else if (state & NUD_DELAY) {
+ if (time_before_eq(now,
+ neigh->confirmed + neigh->parms->delay_probe_time)) {
+ NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
+ neigh->nud_state = NUD_REACHABLE;
+ neigh->updated = jiffies;
+ neigh_connect(neigh);
+ notify = 1;
+ next = neigh->confirmed + neigh->parms->reachable_time;
+ } else {
+ NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
+ neigh->nud_state = NUD_PROBE;
+ neigh->updated = jiffies;
+ atomic_set(&neigh->probes, 0);
+ next = now + neigh->parms->retrans_time;
+ }
+ } else {
+ /* NUD_PROBE|NUD_INCOMPLETE */
+ next = now + neigh->parms->retrans_time;
+ }
+
+ if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
+ atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
+ struct sk_buff *skb;
+
+ neigh->nud_state = NUD_FAILED;
+ neigh->updated = jiffies;
+ notify = 1;
+ NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
+ NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
+
+ /* It is very thin place. report_unreachable is very complicated
+ routine. Particularly, it can hit the same neighbour entry!
+
+ So that, we try to be accurate and avoid dead loop. --ANK
+ */
+ while (neigh->nud_state == NUD_FAILED &&
+ (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
+ write_unlock(&neigh->lock);
+ neigh->ops->error_report(neigh, skb);
+ write_lock(&neigh->lock);
+ }
+ skb_queue_purge(&neigh->arp_queue);
+ }
+
+ if (neigh->nud_state & NUD_IN_TIMER) {
+ if (time_before(next, jiffies + HZ/2))
+ next = jiffies + HZ/2;
+ if (!mod_timer(&neigh->timer, next))
+ neigh_hold(neigh);
+ }
+ if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
+ struct sk_buff *skb = skb_peek(&neigh->arp_queue);
+ /* keep skb alive even if arp_queue overflows */
+ if (skb)
+ skb = skb_copy(skb, GFP_ATOMIC);
+ write_unlock(&neigh->lock);
+ neigh->ops->solicit(neigh, skb);
+ atomic_inc(&neigh->probes);
+ if (skb)
+ kfree_skb(skb);
+ } else {
+out:
+ write_unlock(&neigh->lock);
+ }
+
+ if (notify)
+ neigh_update_notify(neigh);
+
+ neigh_release(neigh);
+}
+
+int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
+{
+ int rc;
+ unsigned long now;
+
+ write_lock_bh(&neigh->lock);
+
+ rc = 0;
+ if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
+ goto out_unlock_bh;
+
+ now = jiffies;
+
+ if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
+ if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
+ atomic_set(&neigh->probes, neigh->parms->ucast_probes);
+ neigh->nud_state = NUD_INCOMPLETE;
+ neigh->updated = jiffies;
+ neigh_add_timer(neigh, now + 1);
+ } else {
+ neigh->nud_state = NUD_FAILED;
+ neigh->updated = jiffies;
+ write_unlock_bh(&neigh->lock);
+
+ if (skb)
+ kfree_skb(skb);
+ return 1;
+ }
+ } else if (neigh->nud_state & NUD_STALE) {
+ NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
+ neigh->nud_state = NUD_DELAY;
+ neigh->updated = jiffies;
+ neigh_add_timer(neigh,
+ jiffies + neigh->parms->delay_probe_time);
+ }
+
+ if (neigh->nud_state == NUD_INCOMPLETE) {
+ if (skb) {
+ if (skb_queue_len(&neigh->arp_queue) >=
+ neigh->parms->queue_len) {
+ struct sk_buff *buff;
+ buff = __skb_dequeue(&neigh->arp_queue);
+ kfree_skb(buff);
+ NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
+ }
+ __skb_queue_tail(&neigh->arp_queue, skb);
+ }
+ rc = 1;
+ }
+out_unlock_bh:
+ write_unlock_bh(&neigh->lock);
+ return rc;
+}
+EXPORT_SYMBOL(__neigh_event_send);
+
+static void neigh_update_hhs(struct neighbour *neigh)
+{
+ struct hh_cache *hh;
+ void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
+ = neigh->dev->header_ops->cache_update;
+
+ if (update) {
+ for (hh = neigh->hh; hh; hh = hh->hh_next) {
+ write_seqlock_bh(&hh->hh_lock);
+ update(hh, neigh->dev, neigh->ha);
+ write_sequnlock_bh(&hh->hh_lock);
+ }
+ }
+}
+
+
+
+/* Generic update routine.
+ -- lladdr is new lladdr or NULL, if it is not supplied.
+ -- new is new state.
+ -- flags
+ NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
+ if it is different.
+ NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
+ lladdr instead of overriding it
+ if it is different.
+ It also allows to retain current state
+ if lladdr is unchanged.
+ NEIGH_UPDATE_F_ADMIN means that the change is administrative.
+
+ NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
+ NTF_ROUTER flag.
+ NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
+ a router.
+
+ Caller MUST hold reference count on the entry.
+ */
+
+int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
+ u32 flags)
+{
+ u8 old;
+ int err;
+ int notify = 0;
+ struct net_device *dev;
+ int update_isrouter = 0;
+
+ write_lock_bh(&neigh->lock);
+
+ dev = neigh->dev;
+ old = neigh->nud_state;
+ err = -EPERM;
+
+ if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
+ (old & (NUD_NOARP | NUD_PERMANENT)))
+ goto out;
+
+ if (!(new & NUD_VALID)) {
+ neigh_del_timer(neigh);
+ if (old & NUD_CONNECTED)
+ neigh_suspect(neigh);
+ neigh->nud_state = new;
+ err = 0;
+ notify = old & NUD_VALID;
+ goto out;
+ }
+
+ /* Compare new lladdr with cached one */
+ if (!dev->addr_len) {
+ /* First case: device needs no address. */
+ lladdr = neigh->ha;
+ } else if (lladdr) {
+ /* The second case: if something is already cached
+ and a new address is proposed:
+ - compare new & old
+ - if they are different, check override flag
+ */
+ if ((old & NUD_VALID) &&
+ !memcmp(lladdr, neigh->ha, dev->addr_len))
+ lladdr = neigh->ha;
+ } else {
+ /* No address is supplied; if we know something,
+ use it, otherwise discard the request.
+ */
+ err = -EINVAL;
+ if (!(old & NUD_VALID))
+ goto out;
+ lladdr = neigh->ha;
+ }
+
+ if (new & NUD_CONNECTED)
+ neigh->confirmed = jiffies;
+ neigh->updated = jiffies;
+
+ /* If entry was valid and address is not changed,
+ do not change entry state, if new one is STALE.
+ */
+ err = 0;
+ update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
+ if (old & NUD_VALID) {
+ if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
+ update_isrouter = 0;
+ if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
+ (old & NUD_CONNECTED)) {
+ lladdr = neigh->ha;
+ new = NUD_STALE;
+ } else
+ goto out;
+ } else {
+ if (lladdr == neigh->ha && new == NUD_STALE &&
+ ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
+ (old & NUD_CONNECTED))
+ )
+ new = old;
+ }
+ }
+
+ if (new != old) {
+ neigh_del_timer(neigh);
+ if (new & NUD_IN_TIMER)
+ neigh_add_timer(neigh, (jiffies +
+ ((new & NUD_REACHABLE) ?
+ neigh->parms->reachable_time :
+ 0)));
+ neigh->nud_state = new;
+ }
+
+ if (lladdr != neigh->ha) {
+ memcpy(&neigh->ha, lladdr, dev->addr_len);
+ neigh_update_hhs(neigh);
+ if (!(new & NUD_CONNECTED))
+ neigh->confirmed = jiffies -
+ (neigh->parms->base_reachable_time << 1);
+ notify = 1;
+ }
+ if (new == old)
+ goto out;
+ if (new & NUD_CONNECTED)
+ neigh_connect(neigh);
+ else
+ neigh_suspect(neigh);
+ if (!(old & NUD_VALID)) {
+ struct sk_buff *skb;
+
+ /* Again: avoid dead loop if something went wrong */
+
+ while (neigh->nud_state & NUD_VALID &&
+ (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
+ struct neighbour *n1 = neigh;
+ write_unlock_bh(&neigh->lock);
+ /* On shaper/eql skb->dst->neighbour != neigh :( */
+ if (skb->dst && skb->dst->neighbour)
+ n1 = skb->dst->neighbour;
+ n1->output(skb);
+ write_lock_bh(&neigh->lock);
+ }
+ skb_queue_purge(&neigh->arp_queue);
+ }
+out:
+ if (update_isrouter) {
+ neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
+ (neigh->flags | NTF_ROUTER) :
+ (neigh->flags & ~NTF_ROUTER);
+ }
+ write_unlock_bh(&neigh->lock);
+
+ if (notify)
+ neigh_update_notify(neigh);
+
+ return err;
+}
+EXPORT_SYMBOL(neigh_update);
+
+struct neighbour *neigh_event_ns(struct neigh_table *tbl,
+ u8 *lladdr, void *saddr,
+ struct net_device *dev)
+{
+ struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
+ lladdr || !dev->addr_len);
+ if (neigh)
+ neigh_update(neigh, lladdr, NUD_STALE,
+ NEIGH_UPDATE_F_OVERRIDE);
+ return neigh;
+}
+EXPORT_SYMBOL(neigh_event_ns);
+
+static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
+ __be16 protocol)
+{
+ struct hh_cache *hh;
+ struct net_device *dev = dst->dev;
+
+ for (hh = n->hh; hh; hh = hh->hh_next)
+ if (hh->hh_type == protocol)
+ break;
+
+ if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
+ seqlock_init(&hh->hh_lock);
+ hh->hh_type = protocol;
+ atomic_set(&hh->hh_refcnt, 0);
+ hh->hh_next = NULL;
+
+ if (dev->header_ops->cache(n, hh)) {
+ kfree(hh);
+ hh = NULL;
+ } else {
+ atomic_inc(&hh->hh_refcnt);
+ hh->hh_next = n->hh;
+ n->hh = hh;
+ if (n->nud_state & NUD_CONNECTED)
+ hh->hh_output = n->ops->hh_output;
+ else
+ hh->hh_output = n->ops->output;
+ }
+ }
+ if (hh) {
+ atomic_inc(&hh->hh_refcnt);
+ dst->hh = hh;
+ }
+}
+
+/* This function can be used in contexts, where only old dev_queue_xmit
+ worked, f.e. if you want to override normal output path (eql, shaper),
+ but resolution is not made yet.
+ */
+
+int neigh_compat_output(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+
+ __skb_pull(skb, skb_network_offset(skb));
+
+ if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
+ skb->len) < 0 &&
+ dev->header_ops->rebuild(skb))
+ return 0;
+
+ return dev_queue_xmit(skb);
+}
+EXPORT_SYMBOL(neigh_compat_output);
+
+/* Slow and careful. */
+
+int neigh_resolve_output(struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb->dst;
+ struct neighbour *neigh;
+ int rc = 0;
+
+ if (!dst || !(neigh = dst->neighbour))
+ goto discard;
+
+ __skb_pull(skb, skb_network_offset(skb));
+
+ if (!neigh_event_send(neigh, skb)) {
+ int err;
+ struct net_device *dev = neigh->dev;
+ if (dev->header_ops->cache && !dst->hh) {
+ write_lock_bh(&neigh->lock);
+ if (!dst->hh)
+ neigh_hh_init(neigh, dst, dst->ops->protocol);
+ err = dev_hard_header(skb, dev, ntohs(skb->protocol),
+ neigh->ha, NULL, skb->len);
+ write_unlock_bh(&neigh->lock);
+ } else {
+ read_lock_bh(&neigh->lock);
+ err = dev_hard_header(skb, dev, ntohs(skb->protocol),
+ neigh->ha, NULL, skb->len);
+ read_unlock_bh(&neigh->lock);
+ }
+ if (err >= 0)
+ rc = neigh->ops->queue_xmit(skb);
+ else
+ goto out_kfree_skb;
+ }
+out:
+ return rc;
+discard:
+ NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
+ dst, dst ? dst->neighbour : NULL);
+out_kfree_skb:
+ rc = -EINVAL;
+ kfree_skb(skb);
+ goto out;
+}
+EXPORT_SYMBOL(neigh_resolve_output);
+
+/* As fast as possible without hh cache */
+
+int neigh_connected_output(struct sk_buff *skb)
+{
+ int err;
+ struct dst_entry *dst = skb->dst;
+ struct neighbour *neigh = dst->neighbour;
+ struct net_device *dev = neigh->dev;
+
+ __skb_pull(skb, skb_network_offset(skb));
+
+ read_lock_bh(&neigh->lock);
+ err = dev_hard_header(skb, dev, ntohs(skb->protocol),
+ neigh->ha, NULL, skb->len);
+ read_unlock_bh(&neigh->lock);
+ if (err >= 0)
+ err = neigh->ops->queue_xmit(skb);
+ else {
+ err = -EINVAL;
+ kfree_skb(skb);
+ }
+ return err;
+}
+EXPORT_SYMBOL(neigh_connected_output);
+
+static void neigh_proxy_process(unsigned long arg)
+{
+ struct neigh_table *tbl = (struct neigh_table *)arg;
+ long sched_next = 0;
+ unsigned long now = jiffies;
+ struct sk_buff *skb, *n;
+
+ spin_lock(&tbl->proxy_queue.lock);
+
+ skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
+ long tdif = NEIGH_CB(skb)->sched_next - now;
+
+ if (tdif <= 0) {
+ struct net_device *dev = skb->dev;
+ __skb_unlink(skb, &tbl->proxy_queue);
+ if (tbl->proxy_redo && netif_running(dev))
+ tbl->proxy_redo(skb);
+ else
+ kfree_skb(skb);
+
+ dev_put(dev);
+ } else if (!sched_next || tdif < sched_next)
+ sched_next = tdif;
+ }
+ del_timer(&tbl->proxy_timer);
+ if (sched_next)
+ mod_timer(&tbl->proxy_timer, jiffies + sched_next);
+ spin_unlock(&tbl->proxy_queue.lock);
+}
+
+void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
+ struct sk_buff *skb)
+{
+ unsigned long now = jiffies;
+ unsigned long sched_next = now + (net_random() % p->proxy_delay);
+
+ if (tbl->proxy_queue.qlen > p->proxy_qlen) {
+ kfree_skb(skb);
+ return;
+ }
+
+ NEIGH_CB(skb)->sched_next = sched_next;
+ NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
+
+ spin_lock(&tbl->proxy_queue.lock);
+ if (del_timer(&tbl->proxy_timer)) {
+ if (time_before(tbl->proxy_timer.expires, sched_next))
+ sched_next = tbl->proxy_timer.expires;
+ }
+ dst_release(skb->dst);
+ skb->dst = NULL;
+ dev_hold(skb->dev);
+ __skb_queue_tail(&tbl->proxy_queue, skb);
+ mod_timer(&tbl->proxy_timer, sched_next);
+ spin_unlock(&tbl->proxy_queue.lock);
+}
+EXPORT_SYMBOL(pneigh_enqueue);
+
+static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
+ struct net *net, int ifindex)
+{
+ struct neigh_parms *p;
+
+ for (p = &tbl->parms; p; p = p->next) {
+ if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
+ (!p->dev && !ifindex))
+ return p;
+ }
+
+ return NULL;
+}
+
+struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
+ struct neigh_table *tbl)
+{
+ struct neigh_parms *p, *ref;
+ struct net *net = dev_net(dev);
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ ref = lookup_neigh_params(tbl, net, 0);
+ if (!ref)
+ return NULL;
+
+ p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
+ if (p) {
+ p->tbl = tbl;
+ atomic_set(&p->refcnt, 1);
+ p->reachable_time =
+ neigh_rand_reach_time(p->base_reachable_time);
+
+ if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
+ kfree(p);
+ return NULL;
+ }
+
+ dev_hold(dev);
+ p->dev = dev;
+ write_pnet(&p->net, hold_net(net));
+ p->sysctl_table = NULL;
+ write_lock_bh(&tbl->lock);
+ p->next = tbl->parms.next;
+ tbl->parms.next = p;
+ write_unlock_bh(&tbl->lock);
+ }
+ return p;
+}
+EXPORT_SYMBOL(neigh_parms_alloc);
+
+static void neigh_rcu_free_parms(struct rcu_head *head)
+{
+ struct neigh_parms *parms =
+ container_of(head, struct neigh_parms, rcu_head);
+
+ neigh_parms_put(parms);
+}
+
+void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
+{
+ struct neigh_parms **p;
+
+ if (!parms || parms == &tbl->parms)
+ return;
+ write_lock_bh(&tbl->lock);
+ for (p = &tbl->parms.next; *p; p = &(*p)->next) {
+ if (*p == parms) {
+ *p = parms->next;
+ parms->dead = 1;
+ write_unlock_bh(&tbl->lock);
+ if (parms->dev)
+ dev_put(parms->dev);
+ call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
+ return;
+ }
+ }
+ write_unlock_bh(&tbl->lock);
+ NEIGH_PRINTK1("neigh_parms_release: not found\n");
+}
+EXPORT_SYMBOL(neigh_parms_release);
+
+static void neigh_parms_destroy(struct neigh_parms *parms)
+{
+ release_net(neigh_parms_net(parms));
+ kfree(parms);
+}
+
+static struct lock_class_key neigh_table_proxy_queue_class;
+
+void neigh_table_init_no_netlink(struct neigh_table *tbl)
+{
+ unsigned long now = jiffies;
+ unsigned long phsize;
+
+ write_pnet(&tbl->parms.net, &init_net);
+ atomic_set(&tbl->parms.refcnt, 1);
+ tbl->parms.reachable_time =
+ neigh_rand_reach_time(tbl->parms.base_reachable_time);
+
+ if (!tbl->kmem_cachep)
+ tbl->kmem_cachep =
+ kmem_cache_create(tbl->id, tbl->entry_size, 0,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+ NULL);
+ tbl->stats = alloc_percpu(struct neigh_statistics);
+ if (!tbl->stats)
+ panic("cannot create neighbour cache statistics");
+
+#ifdef CONFIG_PROC_FS
+ if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
+ &neigh_stat_seq_fops, tbl))
+ panic("cannot create neighbour proc dir entry");
+#endif
+
+ tbl->hash_mask = 1;
+ tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
+
+ phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
+ tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
+
+ if (!tbl->hash_buckets || !tbl->phash_buckets)
+ panic("cannot allocate neighbour cache hashes");
+
+ get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
+
+ rwlock_init(&tbl->lock);
+ setup_timer(&tbl->gc_timer, neigh_periodic_timer, (unsigned long)tbl);
+ tbl->gc_timer.expires = now + 1;
+ add_timer(&tbl->gc_timer);
+
+ setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
+ skb_queue_head_init_class(&tbl->proxy_queue,
+ &neigh_table_proxy_queue_class);
+
+ tbl->last_flush = now;
+ tbl->last_rand = now + tbl->parms.reachable_time * 20;
+}
+EXPORT_SYMBOL(neigh_table_init_no_netlink);
+
+void neigh_table_init(struct neigh_table *tbl)
+{
+ struct neigh_table *tmp;
+
+ neigh_table_init_no_netlink(tbl);
+ write_lock(&neigh_tbl_lock);
+ for (tmp = neigh_tables; tmp; tmp = tmp->next) {
+ if (tmp->family == tbl->family)
+ break;
+ }
+ tbl->next = neigh_tables;
+ neigh_tables = tbl;
+ write_unlock(&neigh_tbl_lock);
+
+ if (unlikely(tmp)) {
+ printk(KERN_ERR "NEIGH: Registering multiple tables for "
+ "family %d\n", tbl->family);
+ dump_stack();
+ }
+}
+EXPORT_SYMBOL(neigh_table_init);
+
+int neigh_table_clear(struct neigh_table *tbl)
+{
+ struct neigh_table **tp;
+
+ /* It is not clean... Fix it to unload IPv6 module safely */
+ del_timer_sync(&tbl->gc_timer);
+ del_timer_sync(&tbl->proxy_timer);
+ pneigh_queue_purge(&tbl->proxy_queue);
+ neigh_ifdown(tbl, NULL);
+ if (atomic_read(&tbl->entries))
+ printk(KERN_CRIT "neighbour leakage\n");
+ write_lock(&neigh_tbl_lock);
+ for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
+ if (*tp == tbl) {
+ *tp = tbl->next;
+ break;
+ }
+ }
+ write_unlock(&neigh_tbl_lock);
+
+ neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
+ tbl->hash_buckets = NULL;
+
+ kfree(tbl->phash_buckets);
+ tbl->phash_buckets = NULL;
+
+ remove_proc_entry(tbl->id, init_net.proc_net_stat);
+
+ free_percpu(tbl->stats);
+ tbl->stats = NULL;
+
+ kmem_cache_destroy(tbl->kmem_cachep);
+ tbl->kmem_cachep = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL(neigh_table_clear);
+
+static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+{
+ struct net *net = sock_net(skb->sk);
+ struct ndmsg *ndm;
+ struct nlattr *dst_attr;
+ struct neigh_table *tbl;
+ struct net_device *dev = NULL;
+ int err = -EINVAL;
+
+ if (nlmsg_len(nlh) < sizeof(*ndm))
+ goto out;
+
+ dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
+ if (dst_attr == NULL)
+ goto out;
+
+ ndm = nlmsg_data(nlh);
+ if (ndm->ndm_ifindex) {
+ dev = dev_get_by_index(net, ndm->ndm_ifindex);
+ if (dev == NULL) {
+ err = -ENODEV;
+ goto out;
+ }
+ }
+
+ read_lock(&neigh_tbl_lock);
+ for (tbl = neigh_tables; tbl; tbl = tbl->next) {
+ struct neighbour *neigh;
+
+ if (tbl->family != ndm->ndm_family)
+ continue;
+ read_unlock(&neigh_tbl_lock);
+
+ if (nla_len(dst_attr) < tbl->key_len)
+ goto out_dev_put;
+
+ if (ndm->ndm_flags & NTF_PROXY) {
+ err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
+ goto out_dev_put;
+ }
+
+ if (dev == NULL)
+ goto out_dev_put;
+
+ neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
+ if (neigh == NULL) {
+ err = -ENOENT;
+ goto out_dev_put;
+ }
+
+ err = neigh_update(neigh, NULL, NUD_FAILED,
+ NEIGH_UPDATE_F_OVERRIDE |
+ NEIGH_UPDATE_F_ADMIN);
+ neigh_release(neigh);
+ goto out_dev_put;
+ }
+ read_unlock(&neigh_tbl_lock);
+ err = -EAFNOSUPPORT;
+
+out_dev_put:
+ if (dev)
+ dev_put(dev);
+out:
+ return err;
+}
+
+static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+{
+ struct net *net = sock_net(skb->sk);
+ struct ndmsg *ndm;
+ struct nlattr *tb[NDA_MAX+1];
+ struct neigh_table *tbl;
+ struct net_device *dev = NULL;
+ int err;
+
+ err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
+ if (err < 0)
+ goto out;
+
+ err = -EINVAL;
+ if (tb[NDA_DST] == NULL)
+ goto out;
+
+ ndm = nlmsg_data(nlh);
+ if (ndm->ndm_ifindex) {
+ dev = dev_get_by_index(net, ndm->ndm_ifindex);
+ if (dev == NULL) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
+ goto out_dev_put;
+ }
+
+ read_lock(&neigh_tbl_lock);
+ for (tbl = neigh_tables; tbl; tbl = tbl->next) {
+ int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
+ struct neighbour *neigh;
+ void *dst, *lladdr;
+
+ if (tbl->family != ndm->ndm_family)
+ continue;
+ read_unlock(&neigh_tbl_lock);
+
+ if (nla_len(tb[NDA_DST]) < tbl->key_len)
+ goto out_dev_put;
+ dst = nla_data(tb[NDA_DST]);
+ lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
+
+ if (ndm->ndm_flags & NTF_PROXY) {
+ struct pneigh_entry *pn;
+
+ err = -ENOBUFS;
+ pn = pneigh_lookup(tbl, net, dst, dev, 1);
+ if (pn) {
+ pn->flags = ndm->ndm_flags;
+ err = 0;
+ }
+ goto out_dev_put;
+ }
+
+ if (dev == NULL)
+ goto out_dev_put;
+
+ neigh = neigh_lookup(tbl, dst, dev);
+ if (neigh == NULL) {
+ if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
+ err = -ENOENT;
+ goto out_dev_put;
+ }
+
+ neigh = __neigh_lookup_errno(tbl, dst, dev);
+ if (IS_ERR(neigh)) {
+ err = PTR_ERR(neigh);
+ goto out_dev_put;
+ }
+ } else {
+ if (nlh->nlmsg_flags & NLM_F_EXCL) {
+ err = -EEXIST;
+ neigh_release(neigh);
+ goto out_dev_put;
+ }
+
+ if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
+ flags &= ~NEIGH_UPDATE_F_OVERRIDE;
+ }
+
+ err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
+ neigh_release(neigh);
+ goto out_dev_put;
+ }
+
+ read_unlock(&neigh_tbl_lock);
+ err = -EAFNOSUPPORT;
+
+out_dev_put:
+ if (dev)
+ dev_put(dev);
+out:
+ return err;
+}
+
+static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
+{
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, NDTA_PARMS);
+ if (nest == NULL)
+ return -ENOBUFS;
+
+ if (parms->dev)
+ NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
+
+ NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
+ NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
+ NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
+ NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
+ NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
+ NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
+ NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
+ NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
+ parms->base_reachable_time);
+ NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
+ NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
+ NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
+ NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
+ NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
+ NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
+
+ return nla_nest_end(skb, nest);
+
+nla_put_failure:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
+static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
+ u32 pid, u32 seq, int type, int flags)
+{
+ struct nlmsghdr *nlh;
+ struct ndtmsg *ndtmsg;
+
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
+ if (nlh == NULL)
+ return -EMSGSIZE;
+
+ ndtmsg = nlmsg_data(nlh);
+
+ read_lock_bh(&tbl->lock);
+ ndtmsg->ndtm_family = tbl->family;
+ ndtmsg->ndtm_pad1 = 0;
+ ndtmsg->ndtm_pad2 = 0;
+
+ NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
+ NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
+ NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
+ NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
+ NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
+
+ {
+ unsigned long now = jiffies;
+ unsigned int flush_delta = now - tbl->last_flush;
+ unsigned int rand_delta = now - tbl->last_rand;
+
+ struct ndt_config ndc = {
+ .ndtc_key_len = tbl->key_len,
+ .ndtc_entry_size = tbl->entry_size,
+ .ndtc_entries = atomic_read(&tbl->entries),
+ .ndtc_last_flush = jiffies_to_msecs(flush_delta),
+ .ndtc_last_rand = jiffies_to_msecs(rand_delta),
+ .ndtc_hash_rnd = tbl->hash_rnd,
+ .ndtc_hash_mask = tbl->hash_mask,
+ .ndtc_hash_chain_gc = tbl->hash_chain_gc,
+ .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
+ };
+
+ NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
+ }
+
+ {
+ int cpu;
+ struct ndt_stats ndst;
+
+ memset(&ndst, 0, sizeof(ndst));
+
+ for_each_possible_cpu(cpu) {
+ struct neigh_statistics *st;
+
+ st = per_cpu_ptr(tbl->stats, cpu);
+ ndst.ndts_allocs += st->allocs;
+ ndst.ndts_destroys += st->destroys;
+ ndst.ndts_hash_grows += st->hash_grows;
+ ndst.ndts_res_failed += st->res_failed;
+ ndst.ndts_lookups += st->lookups;
+ ndst.ndts_hits += st->hits;
+ ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
+ ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
+ ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
+ ndst.ndts_forced_gc_runs += st->forced_gc_runs;
+ }
+
+ NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
+ }
+
+ BUG_ON(tbl->parms.dev);
+ if (neightbl_fill_parms(skb, &tbl->parms) < 0)
+ goto nla_put_failure;
+
+ read_unlock_bh(&tbl->lock);
+ return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+ read_unlock_bh(&tbl->lock);
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static int neightbl_fill_param_info(struct sk_buff *skb,
+ struct neigh_table *tbl,
+ struct neigh_parms *parms,
+ u32 pid, u32 seq, int type,
+ unsigned int flags)
+{
+ struct ndtmsg *ndtmsg;
+ struct nlmsghdr *nlh;
+
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
+ if (nlh == NULL)
+ return -EMSGSIZE;
+
+ ndtmsg = nlmsg_data(nlh);
+
+ read_lock_bh(&tbl->lock);
+ ndtmsg->ndtm_family = tbl->family;
+ ndtmsg->ndtm_pad1 = 0;
+ ndtmsg->ndtm_pad2 = 0;
+
+ if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
+ neightbl_fill_parms(skb, parms) < 0)
+ goto errout;
+
+ read_unlock_bh(&tbl->lock);
+ return nlmsg_end(skb, nlh);
+errout:
+ read_unlock_bh(&tbl->lock);
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
+ [NDTA_NAME] = { .type = NLA_STRING },
+ [NDTA_THRESH1] = { .type = NLA_U32 },
+ [NDTA_THRESH2] = { .type = NLA_U32 },
+ [NDTA_THRESH3] = { .type = NLA_U32 },
+ [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
+ [NDTA_PARMS] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
+ [NDTPA_IFINDEX] = { .type = NLA_U32 },
+ [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
+ [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
+ [NDTPA_APP_PROBES] = { .type = NLA_U32 },
+ [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
+ [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
+ [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
+ [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
+ [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
+ [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
+ [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
+ [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
+ [NDTPA_LOCKTIME] = { .type = NLA_U64 },
+};
+
+static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+{
+ struct net *net = sock_net(skb->sk);
+ struct neigh_table *tbl;
+ struct ndtmsg *ndtmsg;
+ struct nlattr *tb[NDTA_MAX+1];
+ int err;
+
+ err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
+ nl_neightbl_policy);
+ if (err < 0)
+ goto errout;
+
+ if (tb[NDTA_NAME] == NULL) {
+ err = -EINVAL;
+ goto errout;
+ }
+
+ ndtmsg = nlmsg_data(nlh);
+ read_lock(&neigh_tbl_lock);
+ for (tbl = neigh_tables; tbl; tbl = tbl->next) {
+ if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
+ continue;
+
+ if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
+ break;
+ }
+
+ if (tbl == NULL) {
+ err = -ENOENT;
+ goto errout_locked;
+ }
+
+ /*
+ * We acquire tbl->lock to be nice to the periodic timers and
+ * make sure they always see a consistent set of values.
+ */
+ write_lock_bh(&tbl->lock);
+
+ if (tb[NDTA_PARMS]) {
+ struct nlattr *tbp[NDTPA_MAX+1];
+ struct neigh_parms *p;
+ int i, ifindex = 0;
+
+ err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
+ nl_ntbl_parm_policy);
+ if (err < 0)
+ goto errout_tbl_lock;
+
+ if (tbp[NDTPA_IFINDEX])
+ ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
+
+ p = lookup_neigh_params(tbl, net, ifindex);
+ if (p == NULL) {
+ err = -ENOENT;
+ goto errout_tbl_lock;
+ }
+
+ for (i = 1; i <= NDTPA_MAX; i++) {
+ if (tbp[i] == NULL)
+ continue;
+
+ switch (i) {
+ case NDTPA_QUEUE_LEN:
+ p->queue_len = nla_get_u32(tbp[i]);
+ break;
+ case NDTPA_PROXY_QLEN:
+ p->proxy_qlen = nla_get_u32(tbp[i]);
+ break;
+ case NDTPA_APP_PROBES:
+ p->app_probes = nla_get_u32(tbp[i]);
+ break;
+ case NDTPA_UCAST_PROBES:
+ p->ucast_probes = nla_get_u32(tbp[i]);
+ break;
+ case NDTPA_MCAST_PROBES:
+ p->mcast_probes = nla_get_u32(tbp[i]);
+ break;
+ case NDTPA_BASE_REACHABLE_TIME:
+ p->base_reachable_time = nla_get_msecs(tbp[i]);
+ break;
+ case NDTPA_GC_STALETIME:
+ p->gc_staletime = nla_get_msecs(tbp[i]);
+ break;
+ case NDTPA_DELAY_PROBE_TIME:
+ p->delay_probe_time = nla_get_msecs(tbp[i]);
+ break;
+ case NDTPA_RETRANS_TIME:
+ p->retrans_time = nla_get_msecs(tbp[i]);
+ break;
+ case NDTPA_ANYCAST_DELAY:
+ p->anycast_delay = nla_get_msecs(tbp[i]);
+ break;
+ case NDTPA_PROXY_DELAY:
+ p->proxy_delay = nla_get_msecs(tbp[i]);
+ break;
+ case NDTPA_LOCKTIME:
+ p->locktime = nla_get_msecs(tbp[i]);
+ break;
+ }
+ }
+ }
+
+ if (tb[NDTA_THRESH1])
+ tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
+
+ if (tb[NDTA_THRESH2])
+ tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
+
+ if (tb[NDTA_THRESH3])
+ tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
+
+ if (tb[NDTA_GC_INTERVAL])
+ tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
+
+ err = 0;
+
+errout_tbl_lock:
+ write_unlock_bh(&tbl->lock);
+errout_locked:
+ read_unlock(&neigh_tbl_lock);
+errout:
+ return err;
+}
+
+static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ int family, tidx, nidx = 0;
+ int tbl_skip = cb->args[0];
+ int neigh_skip = cb->args[1];
+ struct neigh_table *tbl;
+
+ family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
+
+ read_lock(&neigh_tbl_lock);
+ for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
+ struct neigh_parms *p;
+
+ if (tidx < tbl_skip || (family && tbl->family != family))
+ continue;
+
+ if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
+ NLM_F_MULTI) <= 0)
+ break;
+
+ for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
+ if (!net_eq(neigh_parms_net(p), net))
+ continue;
+
+ if (nidx < neigh_skip)
+ goto next;
+
+ if (neightbl_fill_param_info(skb, tbl, p,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGHTBL,
+ NLM_F_MULTI) <= 0)
+ goto out;
+ next:
+ nidx++;
+ }
+
+ neigh_skip = 0;
+ }
+out:
+ read_unlock(&neigh_tbl_lock);
+ cb->args[0] = tidx;
+ cb->args[1] = nidx;
+
+ return skb->len;
+}
+
+static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
+ u32 pid, u32 seq, int type, unsigned int flags)
+{
+ unsigned long now = jiffies;
+ struct nda_cacheinfo ci;
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
+ if (nlh == NULL)
+ return -EMSGSIZE;
+
+ ndm = nlmsg_data(nlh);
+ ndm->ndm_family = neigh->ops->family;
+ ndm->ndm_pad1 = 0;
+ ndm->ndm_pad2 = 0;
+ ndm->ndm_flags = neigh->flags;
+ ndm->ndm_type = neigh->type;
+ ndm->ndm_ifindex = neigh->dev->ifindex;
+
+ NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
+
+ read_lock_bh(&neigh->lock);
+ ndm->ndm_state = neigh->nud_state;
+ if ((neigh->nud_state & NUD_VALID) &&
+ nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) {
+ read_unlock_bh(&neigh->lock);
+ goto nla_put_failure;
+ }
+
+ ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
+ ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
+ ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
+ ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
+ read_unlock_bh(&neigh->lock);
+
+ NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
+ NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
+
+ return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static void neigh_update_notify(struct neighbour *neigh)
+{
+ call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
+ __neigh_notify(neigh, RTM_NEWNEIGH, 0);
+}
+
+static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct net * net = sock_net(skb->sk);
+ struct neighbour *n;
+ int rc, h, s_h = cb->args[1];
+ int idx, s_idx = idx = cb->args[2];
+
+ read_lock_bh(&tbl->lock);
+ for (h = 0; h <= tbl->hash_mask; h++) {
+ if (h < s_h)
+ continue;
+ if (h > s_h)
+ s_idx = 0;
+ for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
+ if (dev_net(n->dev) != net)
+ continue;
+ if (idx < s_idx)
+ goto next;
+ if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGH,
+ NLM_F_MULTI) <= 0) {
+ read_unlock_bh(&tbl->lock);
+ rc = -1;
+ goto out;
+ }
+ next:
+ idx++;
+ }
+ }
+ read_unlock_bh(&tbl->lock);
+ rc = skb->len;
+out:
+ cb->args[1] = h;
+ cb->args[2] = idx;
+ return rc;
+}
+
+static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct neigh_table *tbl;
+ int t, family, s_t;
+
+ read_lock(&neigh_tbl_lock);
+ family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
+ s_t = cb->args[0];
+
+ for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
+ if (t < s_t || (family && tbl->family != family))
+ continue;
+ if (t > s_t)
+ memset(&cb->args[1], 0, sizeof(cb->args) -
+ sizeof(cb->args[0]));
+ if (neigh_dump_table(tbl, skb, cb) < 0)
+ break;
+ }
+ read_unlock(&neigh_tbl_lock);
+
+ cb->args[0] = t;
+ return skb->len;
+}
+
+void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
+{
+ int chain;
+
+ read_lock_bh(&tbl->lock);
+ for (chain = 0; chain <= tbl->hash_mask; chain++) {
+ struct neighbour *n;
+
+ for (n = tbl->hash_buckets[chain]; n; n = n->next)
+ cb(n, cookie);
+ }
+ read_unlock_bh(&tbl->lock);
+}
+EXPORT_SYMBOL(neigh_for_each);
+
+/* The tbl->lock must be held as a writer and BH disabled. */
+void __neigh_for_each_release(struct neigh_table *tbl,
+ int (*cb)(struct neighbour *))
+{
+ int chain;
+
+ for (chain = 0; chain <= tbl->hash_mask; chain++) {
+ struct neighbour *n, **np;
+
+ np = &tbl->hash_buckets[chain];
+ while ((n = *np) != NULL) {
+ int release;
+
+ write_lock(&n->lock);
+ release = cb(n);
+ if (release) {
+ *np = n->next;
+ n->dead = 1;
+ } else
+ np = &n->next;
+ write_unlock(&n->lock);
+ if (release)
+ neigh_cleanup_and_release(n);
+ }
+ }
+}
+EXPORT_SYMBOL(__neigh_for_each_release);
+
+#ifdef CONFIG_PROC_FS
+
+static struct neighbour *neigh_get_first(struct seq_file *seq)
+{
+ struct neigh_seq_state *state = seq->private;
+ struct net *net = seq_file_net(seq);
+ struct neigh_table *tbl = state->tbl;
+ struct neighbour *n = NULL;
+ int bucket = state->bucket;
+
+ state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
+ for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
+ n = tbl->hash_buckets[bucket];
+
+ while (n) {
+ if (!net_eq(dev_net(n->dev), net))
+ goto next;
+ if (state->neigh_sub_iter) {
+ loff_t fakep = 0;
+ void *v;
+
+ v = state->neigh_sub_iter(state, n, &fakep);
+ if (!v)
+ goto next;
+ }
+ if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
+ break;
+ if (n->nud_state & ~NUD_NOARP)
+ break;
+ next:
+ n = n->next;
+ }
+
+ if (n)
+ break;
+ }
+ state->bucket = bucket;
+
+ return n;
+}
+
+static struct neighbour *neigh_get_next(struct seq_file *seq,
+ struct neighbour *n,
+ loff_t *pos)
+{
+ struct neigh_seq_state *state = seq->private;
+ struct net *net = seq_file_net(seq);
+ struct neigh_table *tbl = state->tbl;
+
+ if (state->neigh_sub_iter) {
+ void *v = state->neigh_sub_iter(state, n, pos);
+ if (v)
+ return n;
+ }
+ n = n->next;
+
+ while (1) {
+ while (n) {
+ if (!net_eq(dev_net(n->dev), net))
+ goto next;
+ if (state->neigh_sub_iter) {
+ void *v = state->neigh_sub_iter(state, n, pos);
+ if (v)
+ return n;
+ goto next;
+ }
+ if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
+ break;
+
+ if (n->nud_state & ~NUD_NOARP)
+ break;
+ next:
+ n = n->next;
+ }
+
+ if (n)
+ break;
+
+ if (++state->bucket > tbl->hash_mask)
+ break;
+
+ n = tbl->hash_buckets[state->bucket];
+ }
+
+ if (n && pos)
+ --(*pos);
+ return n;
+}
+
+static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
+{
+ struct neighbour *n = neigh_get_first(seq);
+
+ if (n) {
+ --(*pos);
+ while (*pos) {
+ n = neigh_get_next(seq, n, pos);
+ if (!n)
+ break;
+ }
+ }
+ return *pos ? NULL : n;
+}
+
+static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
+{
+ struct neigh_seq_state *state = seq->private;
+ struct net *net = seq_file_net(seq);
+ struct neigh_table *tbl = state->tbl;
+ struct pneigh_entry *pn = NULL;
+ int bucket = state->bucket;
+
+ state->flags |= NEIGH_SEQ_IS_PNEIGH;
+ for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
+ pn = tbl->phash_buckets[bucket];
+ while (pn && !net_eq(pneigh_net(pn), net))
+ pn = pn->next;
+ if (pn)
+ break;
+ }
+ state->bucket = bucket;
+
+ return pn;
+}
+
+static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
+ struct pneigh_entry *pn,
+ loff_t *pos)
+{
+ struct neigh_seq_state *state = seq->private;
+ struct net *net = seq_file_net(seq);
+ struct neigh_table *tbl = state->tbl;
+
+ pn = pn->next;
+ while (!pn) {
+ if (++state->bucket > PNEIGH_HASHMASK)
+ break;
+ pn = tbl->phash_buckets[state->bucket];
+ while (pn && !net_eq(pneigh_net(pn), net))
+ pn = pn->next;
+ if (pn)
+ break;
+ }
+
+ if (pn && pos)
+ --(*pos);
+
+ return pn;
+}
+
+static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
+{
+ struct pneigh_entry *pn = pneigh_get_first(seq);
+
+ if (pn) {
+ --(*pos);
+ while (*pos) {
+ pn = pneigh_get_next(seq, pn, pos);
+ if (!pn)
+ break;
+ }
+ }
+ return *pos ? NULL : pn;
+}
+
+static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
+{
+ struct neigh_seq_state *state = seq->private;
+ void *rc;
+ loff_t idxpos = *pos;
+
+ rc = neigh_get_idx(seq, &idxpos);
+ if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
+ rc = pneigh_get_idx(seq, &idxpos);
+
+ return rc;
+}
+
+void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
+ __acquires(tbl->lock)
+{
+ struct neigh_seq_state *state = seq->private;
+
+ state->tbl = tbl;
+ state->bucket = 0;
+ state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
+
+ read_lock_bh(&tbl->lock);
+
+ return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
+}
+EXPORT_SYMBOL(neigh_seq_start);
+
+void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct neigh_seq_state *state;
+ void *rc;
+
+ if (v == SEQ_START_TOKEN) {
+ rc = neigh_get_first(seq);
+ goto out;
+ }
+
+ state = seq->private;
+ if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
+ rc = neigh_get_next(seq, v, NULL);
+ if (rc)
+ goto out;
+ if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
+ rc = pneigh_get_first(seq);
+ } else {
+ BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
+ rc = pneigh_get_next(seq, v, NULL);
+ }
+out:
+ ++(*pos);
+ return rc;
+}
+EXPORT_SYMBOL(neigh_seq_next);
+
+void neigh_seq_stop(struct seq_file *seq, void *v)
+ __releases(tbl->lock)
+{
+ struct neigh_seq_state *state = seq->private;
+ struct neigh_table *tbl = state->tbl;
+
+ read_unlock_bh(&tbl->lock);
+}
+EXPORT_SYMBOL(neigh_seq_stop);
+
+/* statistics via seq_file */
+
+static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct proc_dir_entry *pde = seq->private;
+ struct neigh_table *tbl = pde->data;
+ int cpu;
+
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
+
+ for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
+ if (!cpu_possible(cpu))
+ continue;
+ *pos = cpu+1;
+ return per_cpu_ptr(tbl->stats, cpu);
+ }
+ return NULL;
+}
+
+static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct proc_dir_entry *pde = seq->private;
+ struct neigh_table *tbl = pde->data;
+ int cpu;
+
+ for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
+ if (!cpu_possible(cpu))
+ continue;
+ *pos = cpu+1;
+ return per_cpu_ptr(tbl->stats, cpu);
+ }
+ return NULL;
+}
+
+static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
+{
+
+}
+
+static int neigh_stat_seq_show(struct seq_file *seq, void *v)
+{
+ struct proc_dir_entry *pde = seq->private;
+ struct neigh_table *tbl = pde->data;
+ struct neigh_statistics *st = v;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards\n");
+ return 0;
+ }
+
+ seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
+ "%08lx %08lx %08lx %08lx %08lx\n",
+ atomic_read(&tbl->entries),
+
+ st->allocs,
+ st->destroys,
+ st->hash_grows,
+
+ st->lookups,
+ st->hits,
+
+ st->res_failed,
+
+ st->rcv_probes_mcast,
+ st->rcv_probes_ucast,
+
+ st->periodic_gc_runs,
+ st->forced_gc_runs,
+ st->unres_discards
+ );
+
+ return 0;
+}
+
+static const struct seq_operations neigh_stat_seq_ops = {
+ .start = neigh_stat_seq_start,
+ .next = neigh_stat_seq_next,
+ .stop = neigh_stat_seq_stop,
+ .show = neigh_stat_seq_show,
+};
+
+static int neigh_stat_seq_open(struct inode *inode, struct file *file)
+{
+ int ret = seq_open(file, &neigh_stat_seq_ops);
+
+ if (!ret) {
+ struct seq_file *sf = file->private_data;
+ sf->private = PDE(inode);
+ }
+ return ret;
+};
+
+static const struct file_operations neigh_stat_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = neigh_stat_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+#endif /* CONFIG_PROC_FS */
+
+static inline size_t neigh_nlmsg_size(void)
+{
+ return NLMSG_ALIGN(sizeof(struct ndmsg))
+ + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
+ + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
+ + nla_total_size(sizeof(struct nda_cacheinfo))
+ + nla_total_size(4); /* NDA_PROBES */
+}
+
+static void __neigh_notify(struct neighbour *n, int type, int flags)
+{
+ struct net *net = dev_net(n->dev);
+ struct sk_buff *skb;
+ int err = -ENOBUFS;
+
+ skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
+ if (skb == NULL)
+ goto errout;
+
+ err = neigh_fill_info(skb, n, 0, 0, type, flags);
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(skb);
+ goto errout;
+ }
+ err = rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
+errout:
+ if (err < 0)
+ rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
+}
+
+#ifdef CONFIG_ARPD
+void neigh_app_ns(struct neighbour *n)
+{
+ __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
+}
+EXPORT_SYMBOL(neigh_app_ns);
+#endif /* CONFIG_ARPD */
+
+#ifdef CONFIG_SYSCTL
+
+static struct neigh_sysctl_table {
+ struct ctl_table_header *sysctl_header;
+ struct ctl_table neigh_vars[__NET_NEIGH_MAX];
+ char *dev_name;
+} neigh_sysctl_template __read_mostly = {
+ .neigh_vars = {
+ {
+ .ctl_name = NET_NEIGH_MCAST_SOLICIT,
+ .procname = "mcast_solicit",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .ctl_name = NET_NEIGH_UCAST_SOLICIT,
+ .procname = "ucast_solicit",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .ctl_name = NET_NEIGH_APP_SOLICIT,
+ .procname = "app_solicit",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "retrans_time",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_userhz_jiffies,
+ },
+ {
+ .ctl_name = NET_NEIGH_REACHABLE_TIME,
+ .procname = "base_reachable_time",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies,
+ },
+ {
+ .ctl_name = NET_NEIGH_DELAY_PROBE_TIME,
+ .procname = "delay_first_probe_time",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies,
+ },
+ {
+ .ctl_name = NET_NEIGH_GC_STALE_TIME,
+ .procname = "gc_stale_time",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies,
+ },
+ {
+ .ctl_name = NET_NEIGH_UNRES_QLEN,
+ .procname = "unres_qlen",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .ctl_name = NET_NEIGH_PROXY_QLEN,
+ .procname = "proxy_qlen",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "anycast_delay",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_userhz_jiffies,
+ },
+ {
+ .procname = "proxy_delay",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_userhz_jiffies,
+ },
+ {
+ .procname = "locktime",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_userhz_jiffies,
+ },
+ {
+ .ctl_name = NET_NEIGH_RETRANS_TIME_MS,
+ .procname = "retrans_time_ms",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_ms_jiffies,
+ .strategy = sysctl_ms_jiffies,
+ },
+ {
+ .ctl_name = NET_NEIGH_REACHABLE_TIME_MS,
+ .procname = "base_reachable_time_ms",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_ms_jiffies,
+ .strategy = sysctl_ms_jiffies,
+ },
+ {
+ .ctl_name = NET_NEIGH_GC_INTERVAL,
+ .procname = "gc_interval",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies,
+ },
+ {
+ .ctl_name = NET_NEIGH_GC_THRESH1,
+ .procname = "gc_thresh1",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .ctl_name = NET_NEIGH_GC_THRESH2,
+ .procname = "gc_thresh2",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .ctl_name = NET_NEIGH_GC_THRESH3,
+ .procname = "gc_thresh3",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {},
+ },
+};
+
+int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
+ int p_id, int pdev_id, char *p_name,
+ proc_handler *handler, ctl_handler *strategy)
+{
+ struct neigh_sysctl_table *t;
+ const char *dev_name_source = NULL;
+
+#define NEIGH_CTL_PATH_ROOT 0
+#define NEIGH_CTL_PATH_PROTO 1
+#define NEIGH_CTL_PATH_NEIGH 2
+#define NEIGH_CTL_PATH_DEV 3
+
+ struct ctl_path neigh_path[] = {
+ { .procname = "net", .ctl_name = CTL_NET, },
+ { .procname = "proto", .ctl_name = 0, },
+ { .procname = "neigh", .ctl_name = 0, },
+ { .procname = "default", .ctl_name = NET_PROTO_CONF_DEFAULT, },
+ { },
+ };
+
+ t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
+ if (!t)
+ goto err;
+
+ t->neigh_vars[0].data = &p->mcast_probes;
+ t->neigh_vars[1].data = &p->ucast_probes;
+ t->neigh_vars[2].data = &p->app_probes;
+ t->neigh_vars[3].data = &p->retrans_time;
+ t->neigh_vars[4].data = &p->base_reachable_time;
+ t->neigh_vars[5].data = &p->delay_probe_time;
+ t->neigh_vars[6].data = &p->gc_staletime;
+ t->neigh_vars[7].data = &p->queue_len;
+ t->neigh_vars[8].data = &p->proxy_qlen;
+ t->neigh_vars[9].data = &p->anycast_delay;
+ t->neigh_vars[10].data = &p->proxy_delay;
+ t->neigh_vars[11].data = &p->locktime;
+ t->neigh_vars[12].data = &p->retrans_time;
+ t->neigh_vars[13].data = &p->base_reachable_time;
+
+ if (dev) {
+ dev_name_source = dev->name;
+ neigh_path[NEIGH_CTL_PATH_DEV].ctl_name = dev->ifindex;
+ /* Terminate the table early */
+ memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
+ } else {
+ dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
+ t->neigh_vars[14].data = (int *)(p + 1);
+ t->neigh_vars[15].data = (int *)(p + 1) + 1;
+ t->neigh_vars[16].data = (int *)(p + 1) + 2;
+ t->neigh_vars[17].data = (int *)(p + 1) + 3;
+ }
+
+
+ if (handler || strategy) {
+ /* RetransTime */
+ t->neigh_vars[3].proc_handler = handler;
+ t->neigh_vars[3].strategy = strategy;
+ t->neigh_vars[3].extra1 = dev;
+ if (!strategy)
+ t->neigh_vars[3].ctl_name = CTL_UNNUMBERED;
+ /* ReachableTime */
+ t->neigh_vars[4].proc_handler = handler;
+ t->neigh_vars[4].strategy = strategy;
+ t->neigh_vars[4].extra1 = dev;
+ if (!strategy)
+ t->neigh_vars[4].ctl_name = CTL_UNNUMBERED;
+ /* RetransTime (in milliseconds)*/
+ t->neigh_vars[12].proc_handler = handler;
+ t->neigh_vars[12].strategy = strategy;
+ t->neigh_vars[12].extra1 = dev;
+ if (!strategy)
+ t->neigh_vars[12].ctl_name = CTL_UNNUMBERED;
+ /* ReachableTime (in milliseconds) */
+ t->neigh_vars[13].proc_handler = handler;
+ t->neigh_vars[13].strategy = strategy;
+ t->neigh_vars[13].extra1 = dev;
+ if (!strategy)
+ t->neigh_vars[13].ctl_name = CTL_UNNUMBERED;
+ }
+
+ t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
+ if (!t->dev_name)
+ goto free;
+
+ neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
+ neigh_path[NEIGH_CTL_PATH_NEIGH].ctl_name = pdev_id;
+ neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
+ neigh_path[NEIGH_CTL_PATH_PROTO].ctl_name = p_id;
+
+ t->sysctl_header =
+ register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars);
+ if (!t->sysctl_header)
+ goto free_procname;
+
+ p->sysctl_table = t;
+ return 0;
+
+free_procname:
+ kfree(t->dev_name);
+free:
+ kfree(t);
+err:
+ return -ENOBUFS;
+}
+EXPORT_SYMBOL(neigh_sysctl_register);
+
+void neigh_sysctl_unregister(struct neigh_parms *p)
+{
+ if (p->sysctl_table) {
+ struct neigh_sysctl_table *t = p->sysctl_table;
+ p->sysctl_table = NULL;
+ unregister_sysctl_table(t->sysctl_header);
+ kfree(t->dev_name);
+ kfree(t);
+ }
+}
+EXPORT_SYMBOL(neigh_sysctl_unregister);
+
+#endif /* CONFIG_SYSCTL */
+
+static int __init neigh_init(void)
+{
+ rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
+ rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
+ rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
+
+ rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
+ rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
+
+ return 0;
+}
+
+subsys_initcall(neigh_init);
+
diff --git a/libdde_linux26/contrib/net/core/.svn/text-base/net-sysfs.c.svn-base b/libdde_linux26/contrib/net/core/.svn/text-base/net-sysfs.c.svn-base
new file mode 100644
index 00000000..484f5875
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/.svn/text-base/net-sysfs.c.svn-base
@@ -0,0 +1,540 @@
+/*
+ * net-sysfs.c - network device class and attributes
+ *
+ * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <net/sock.h>
+#include <linux/rtnetlink.h>
+#include <linux/wireless.h>
+#include <net/iw_handler.h>
+
+#include "net-sysfs.h"
+
+#ifdef CONFIG_SYSFS
+static const char fmt_hex[] = "%#x\n";
+static const char fmt_long_hex[] = "%#lx\n";
+static const char fmt_dec[] = "%d\n";
+static const char fmt_ulong[] = "%lu\n";
+
+static inline int dev_isalive(const struct net_device *dev)
+{
+ return dev->reg_state <= NETREG_REGISTERED;
+}
+
+/* use same locking rules as GIF* ioctl's */
+static ssize_t netdev_show(const struct device *dev,
+ struct device_attribute *attr, char *buf,
+ ssize_t (*format)(const struct net_device *, char *))
+{
+ struct net_device *net = to_net_dev(dev);
+ ssize_t ret = -EINVAL;
+
+ read_lock(&dev_base_lock);
+ if (dev_isalive(net))
+ ret = (*format)(net, buf);
+ read_unlock(&dev_base_lock);
+
+ return ret;
+}
+
+/* generate a show function for simple field */
+#define NETDEVICE_SHOW(field, format_string) \
+static ssize_t format_##field(const struct net_device *net, char *buf) \
+{ \
+ return sprintf(buf, format_string, net->field); \
+} \
+static ssize_t show_##field(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ return netdev_show(dev, attr, buf, format_##field); \
+}
+
+
+/* use same locking and permission rules as SIF* ioctl's */
+static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len,
+ int (*set)(struct net_device *, unsigned long))
+{
+ struct net_device *net = to_net_dev(dev);
+ char *endp;
+ unsigned long new;
+ int ret = -EINVAL;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ new = simple_strtoul(buf, &endp, 0);
+ if (endp == buf)
+ goto err;
+
+ if (!rtnl_trylock())
+ return -ERESTARTSYS;
+
+ if (dev_isalive(net)) {
+ if ((ret = (*set)(net, new)) == 0)
+ ret = len;
+ }
+ rtnl_unlock();
+ err:
+ return ret;
+}
+
+NETDEVICE_SHOW(dev_id, fmt_hex);
+NETDEVICE_SHOW(addr_len, fmt_dec);
+NETDEVICE_SHOW(iflink, fmt_dec);
+NETDEVICE_SHOW(ifindex, fmt_dec);
+NETDEVICE_SHOW(features, fmt_long_hex);
+NETDEVICE_SHOW(type, fmt_dec);
+NETDEVICE_SHOW(link_mode, fmt_dec);
+
+/* use same locking rules as GIFHWADDR ioctl's */
+static ssize_t show_address(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *net = to_net_dev(dev);
+ ssize_t ret = -EINVAL;
+
+ read_lock(&dev_base_lock);
+ if (dev_isalive(net))
+ ret = sysfs_format_mac(buf, net->dev_addr, net->addr_len);
+ read_unlock(&dev_base_lock);
+ return ret;
+}
+
+static ssize_t show_broadcast(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *net = to_net_dev(dev);
+ if (dev_isalive(net))
+ return sysfs_format_mac(buf, net->broadcast, net->addr_len);
+ return -EINVAL;
+}
+
+static ssize_t show_carrier(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ if (netif_running(netdev)) {
+ return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
+ }
+ return -EINVAL;
+}
+
+static ssize_t show_dormant(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+
+ if (netif_running(netdev))
+ return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
+
+ return -EINVAL;
+}
+
+static const char *operstates[] = {
+ "unknown",
+ "notpresent", /* currently unused */
+ "down",
+ "lowerlayerdown",
+ "testing", /* currently unused */
+ "dormant",
+ "up"
+};
+
+static ssize_t show_operstate(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct net_device *netdev = to_net_dev(dev);
+ unsigned char operstate;
+
+ read_lock(&dev_base_lock);
+ operstate = netdev->operstate;
+ if (!netif_running(netdev))
+ operstate = IF_OPER_DOWN;
+ read_unlock(&dev_base_lock);
+
+ if (operstate >= ARRAY_SIZE(operstates))
+ return -EINVAL; /* should not happen */
+
+ return sprintf(buf, "%s\n", operstates[operstate]);
+}
+
+/* read-write attributes */
+NETDEVICE_SHOW(mtu, fmt_dec);
+
+static int change_mtu(struct net_device *net, unsigned long new_mtu)
+{
+ return dev_set_mtu(net, (int) new_mtu);
+}
+
+static ssize_t store_mtu(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return netdev_store(dev, attr, buf, len, change_mtu);
+}
+
+NETDEVICE_SHOW(flags, fmt_hex);
+
+static int change_flags(struct net_device *net, unsigned long new_flags)
+{
+ return dev_change_flags(net, (unsigned) new_flags);
+}
+
+static ssize_t store_flags(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return netdev_store(dev, attr, buf, len, change_flags);
+}
+
+NETDEVICE_SHOW(tx_queue_len, fmt_ulong);
+
+static int change_tx_queue_len(struct net_device *net, unsigned long new_len)
+{
+ net->tx_queue_len = new_len;
+ return 0;
+}
+
+static ssize_t store_tx_queue_len(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return netdev_store(dev, attr, buf, len, change_tx_queue_len);
+}
+
+static ssize_t store_ifalias(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ size_t count = len;
+ ssize_t ret;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* ignore trailing newline */
+ if (len > 0 && buf[len - 1] == '\n')
+ --count;
+
+ rtnl_lock();
+ ret = dev_set_alias(netdev, buf, count);
+ rtnl_unlock();
+
+ return ret < 0 ? ret : len;
+}
+
+static ssize_t show_ifalias(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct net_device *netdev = to_net_dev(dev);
+ ssize_t ret = 0;
+
+ rtnl_lock();
+ if (netdev->ifalias)
+ ret = sprintf(buf, "%s\n", netdev->ifalias);
+ rtnl_unlock();
+ return ret;
+}
+
+static struct device_attribute net_class_attributes[] = {
+ __ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
+ __ATTR(dev_id, S_IRUGO, show_dev_id, NULL),
+ __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias),
+ __ATTR(iflink, S_IRUGO, show_iflink, NULL),
+ __ATTR(ifindex, S_IRUGO, show_ifindex, NULL),
+ __ATTR(features, S_IRUGO, show_features, NULL),
+ __ATTR(type, S_IRUGO, show_type, NULL),
+ __ATTR(link_mode, S_IRUGO, show_link_mode, NULL),
+ __ATTR(address, S_IRUGO, show_address, NULL),
+ __ATTR(broadcast, S_IRUGO, show_broadcast, NULL),
+ __ATTR(carrier, S_IRUGO, show_carrier, NULL),
+ __ATTR(dormant, S_IRUGO, show_dormant, NULL),
+ __ATTR(operstate, S_IRUGO, show_operstate, NULL),
+ __ATTR(mtu, S_IRUGO | S_IWUSR, show_mtu, store_mtu),
+ __ATTR(flags, S_IRUGO | S_IWUSR, show_flags, store_flags),
+ __ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len,
+ store_tx_queue_len),
+ {}
+};
+
+/* Show a given an attribute in the statistics group */
+static ssize_t netstat_show(const struct device *d,
+ struct device_attribute *attr, char *buf,
+ unsigned long offset)
+{
+ struct net_device *dev = to_net_dev(d);
+ ssize_t ret = -EINVAL;
+
+ WARN_ON(offset > sizeof(struct net_device_stats) ||
+ offset % sizeof(unsigned long) != 0);
+
+ read_lock(&dev_base_lock);
+ if (dev_isalive(dev)) {
+ const struct net_device_stats *stats = dev_get_stats(dev);
+ ret = sprintf(buf, fmt_ulong,
+ *(unsigned long *)(((u8 *) stats) + offset));
+ }
+ read_unlock(&dev_base_lock);
+ return ret;
+}
+
+/* generate a read-only statistics attribute */
+#define NETSTAT_ENTRY(name) \
+static ssize_t show_##name(struct device *d, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ return netstat_show(d, attr, buf, \
+ offsetof(struct net_device_stats, name)); \
+} \
+static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
+
+NETSTAT_ENTRY(rx_packets);
+NETSTAT_ENTRY(tx_packets);
+NETSTAT_ENTRY(rx_bytes);
+NETSTAT_ENTRY(tx_bytes);
+NETSTAT_ENTRY(rx_errors);
+NETSTAT_ENTRY(tx_errors);
+NETSTAT_ENTRY(rx_dropped);
+NETSTAT_ENTRY(tx_dropped);
+NETSTAT_ENTRY(multicast);
+NETSTAT_ENTRY(collisions);
+NETSTAT_ENTRY(rx_length_errors);
+NETSTAT_ENTRY(rx_over_errors);
+NETSTAT_ENTRY(rx_crc_errors);
+NETSTAT_ENTRY(rx_frame_errors);
+NETSTAT_ENTRY(rx_fifo_errors);
+NETSTAT_ENTRY(rx_missed_errors);
+NETSTAT_ENTRY(tx_aborted_errors);
+NETSTAT_ENTRY(tx_carrier_errors);
+NETSTAT_ENTRY(tx_fifo_errors);
+NETSTAT_ENTRY(tx_heartbeat_errors);
+NETSTAT_ENTRY(tx_window_errors);
+NETSTAT_ENTRY(rx_compressed);
+NETSTAT_ENTRY(tx_compressed);
+
+static struct attribute *netstat_attrs[] = {
+ &dev_attr_rx_packets.attr,
+ &dev_attr_tx_packets.attr,
+ &dev_attr_rx_bytes.attr,
+ &dev_attr_tx_bytes.attr,
+ &dev_attr_rx_errors.attr,
+ &dev_attr_tx_errors.attr,
+ &dev_attr_rx_dropped.attr,
+ &dev_attr_tx_dropped.attr,
+ &dev_attr_multicast.attr,
+ &dev_attr_collisions.attr,
+ &dev_attr_rx_length_errors.attr,
+ &dev_attr_rx_over_errors.attr,
+ &dev_attr_rx_crc_errors.attr,
+ &dev_attr_rx_frame_errors.attr,
+ &dev_attr_rx_fifo_errors.attr,
+ &dev_attr_rx_missed_errors.attr,
+ &dev_attr_tx_aborted_errors.attr,
+ &dev_attr_tx_carrier_errors.attr,
+ &dev_attr_tx_fifo_errors.attr,
+ &dev_attr_tx_heartbeat_errors.attr,
+ &dev_attr_tx_window_errors.attr,
+ &dev_attr_rx_compressed.attr,
+ &dev_attr_tx_compressed.attr,
+ NULL
+};
+
+
+static struct attribute_group netstat_group = {
+ .name = "statistics",
+ .attrs = netstat_attrs,
+};
+
+#ifdef CONFIG_WIRELESS_EXT_SYSFS
+/* helper function that does all the locking etc for wireless stats */
+static ssize_t wireless_show(struct device *d, char *buf,
+ ssize_t (*format)(const struct iw_statistics *,
+ char *))
+{
+ struct net_device *dev = to_net_dev(d);
+ const struct iw_statistics *iw = NULL;
+ ssize_t ret = -EINVAL;
+
+ read_lock(&dev_base_lock);
+ if (dev_isalive(dev)) {
+ if (dev->wireless_handlers &&
+ dev->wireless_handlers->get_wireless_stats)
+ iw = dev->wireless_handlers->get_wireless_stats(dev);
+ if (iw != NULL)
+ ret = (*format)(iw, buf);
+ }
+ read_unlock(&dev_base_lock);
+
+ return ret;
+}
+
+/* show function template for wireless fields */
+#define WIRELESS_SHOW(name, field, format_string) \
+static ssize_t format_iw_##name(const struct iw_statistics *iw, char *buf) \
+{ \
+ return sprintf(buf, format_string, iw->field); \
+} \
+static ssize_t show_iw_##name(struct device *d, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ return wireless_show(d, buf, format_iw_##name); \
+} \
+static DEVICE_ATTR(name, S_IRUGO, show_iw_##name, NULL)
+
+WIRELESS_SHOW(status, status, fmt_hex);
+WIRELESS_SHOW(link, qual.qual, fmt_dec);
+WIRELESS_SHOW(level, qual.level, fmt_dec);
+WIRELESS_SHOW(noise, qual.noise, fmt_dec);
+WIRELESS_SHOW(nwid, discard.nwid, fmt_dec);
+WIRELESS_SHOW(crypt, discard.code, fmt_dec);
+WIRELESS_SHOW(fragment, discard.fragment, fmt_dec);
+WIRELESS_SHOW(misc, discard.misc, fmt_dec);
+WIRELESS_SHOW(retries, discard.retries, fmt_dec);
+WIRELESS_SHOW(beacon, miss.beacon, fmt_dec);
+
+static struct attribute *wireless_attrs[] = {
+ &dev_attr_status.attr,
+ &dev_attr_link.attr,
+ &dev_attr_level.attr,
+ &dev_attr_noise.attr,
+ &dev_attr_nwid.attr,
+ &dev_attr_crypt.attr,
+ &dev_attr_fragment.attr,
+ &dev_attr_retries.attr,
+ &dev_attr_misc.attr,
+ &dev_attr_beacon.attr,
+ NULL
+};
+
+static struct attribute_group wireless_group = {
+ .name = "wireless",
+ .attrs = wireless_attrs,
+};
+#endif
+
+#endif /* CONFIG_SYSFS */
+
+#ifdef CONFIG_HOTPLUG
+static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
+{
+ struct net_device *dev = to_net_dev(d);
+ int retval;
+
+ if (!net_eq(dev_net(dev), &init_net))
+ return 0;
+
+ /* pass interface to uevent. */
+ retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
+ if (retval)
+ goto exit;
+
+ /* pass ifindex to uevent.
+ * ifindex is useful as it won't change (interface name may change)
+ * and is what RtNetlink uses natively. */
+ retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
+
+exit:
+ return retval;
+}
+#endif
+
+/*
+ * netdev_release -- destroy and free a dead device.
+ * Called when last reference to device kobject is gone.
+ */
+static void netdev_release(struct device *d)
+{
+ struct net_device *dev = to_net_dev(d);
+
+ BUG_ON(dev->reg_state != NETREG_RELEASED);
+
+ kfree(dev->ifalias);
+ kfree((char *)dev - dev->padded);
+}
+
+static struct class net_class = {
+ .name = "net",
+ .dev_release = netdev_release,
+#ifdef CONFIG_SYSFS
+ .dev_attrs = net_class_attributes,
+#endif /* CONFIG_SYSFS */
+#ifdef CONFIG_HOTPLUG
+ .dev_uevent = netdev_uevent,
+#endif
+};
+
+/* Delete sysfs entries but hold kobject reference until after all
+ * netdev references are gone.
+ */
+void netdev_unregister_kobject(struct net_device * net)
+{
+ struct device *dev = &(net->dev);
+
+ kobject_get(&dev->kobj);
+
+ if (dev_net(net) != &init_net)
+ return;
+
+ device_del(dev);
+}
+
+/* Create sysfs entries for network device. */
+int netdev_register_kobject(struct net_device *net)
+{
+ struct device *dev = &(net->dev);
+ struct attribute_group **groups = net->sysfs_groups;
+
+ dev->class = &net_class;
+ dev->platform_data = net;
+ dev->groups = groups;
+
+ BUILD_BUG_ON(BUS_ID_SIZE < IFNAMSIZ);
+ dev_set_name(dev, net->name);
+
+#ifdef CONFIG_SYSFS
+ *groups++ = &netstat_group;
+
+#ifdef CONFIG_WIRELESS_EXT_SYSFS
+ if (net->wireless_handlers && net->wireless_handlers->get_wireless_stats)
+ *groups++ = &wireless_group;
+#endif
+#endif /* CONFIG_SYSFS */
+
+ if (dev_net(net) != &init_net)
+ return 0;
+
+ return device_add(dev);
+}
+
+int netdev_class_create_file(struct class_attribute *class_attr)
+{
+ return class_create_file(&net_class, class_attr);
+}
+
+void netdev_class_remove_file(struct class_attribute *class_attr)
+{
+ class_remove_file(&net_class, class_attr);
+}
+
+EXPORT_SYMBOL(netdev_class_create_file);
+EXPORT_SYMBOL(netdev_class_remove_file);
+
+void netdev_initialize_kobject(struct net_device *net)
+{
+ struct device *device = &(net->dev);
+ device_initialize(device);
+}
+
+int netdev_kobject_init(void)
+{
+ return class_register(&net_class);
+}
diff --git a/libdde_linux26/contrib/net/core/.svn/text-base/net-sysfs.h.svn-base b/libdde_linux26/contrib/net/core/.svn/text-base/net-sysfs.h.svn-base
new file mode 100644
index 00000000..14e75242
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/.svn/text-base/net-sysfs.h.svn-base
@@ -0,0 +1,8 @@
+#ifndef __NET_SYSFS_H__
+#define __NET_SYSFS_H__
+
+int netdev_kobject_init(void);
+int netdev_register_kobject(struct net_device *);
+void netdev_unregister_kobject(struct net_device *);
+void netdev_initialize_kobject(struct net_device *);
+#endif
diff --git a/libdde_linux26/contrib/net/core/.svn/text-base/netevent.c.svn-base b/libdde_linux26/contrib/net/core/.svn/text-base/netevent.c.svn-base
new file mode 100644
index 00000000..95f81de8
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/.svn/text-base/netevent.c.svn-base
@@ -0,0 +1,70 @@
+/*
+ * Network event notifiers
+ *
+ * Authors:
+ * Tom Tucker <tom@opengridcomputing.com>
+ * Steve Wise <swise@opengridcomputing.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Fixes:
+ */
+
+#include <linux/rtnetlink.h>
+#include <linux/notifier.h>
+#include <net/netevent.h>
+
+static ATOMIC_NOTIFIER_HEAD(netevent_notif_chain);
+
+/**
+ * register_netevent_notifier - register a netevent notifier block
+ * @nb: notifier
+ *
+ * Register a notifier to be called when a netevent occurs.
+ * The notifier passed is linked into the kernel structures and must
+ * not be reused until it has been unregistered. A negative errno code
+ * is returned on a failure.
+ */
+int register_netevent_notifier(struct notifier_block *nb)
+{
+ int err;
+
+ err = atomic_notifier_chain_register(&netevent_notif_chain, nb);
+ return err;
+}
+
+/**
+ * netevent_unregister_notifier - unregister a netevent notifier block
+ * @nb: notifier
+ *
+ * Unregister a notifier previously registered by
+ * register_neigh_notifier(). The notifier is unlinked into the
+ * kernel structures and may then be reused. A negative errno code
+ * is returned on a failure.
+ */
+
+int unregister_netevent_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&netevent_notif_chain, nb);
+}
+
+/**
+ * call_netevent_notifiers - call all netevent notifier blocks
+ * @val: value passed unmodified to notifier function
+ * @v: pointer passed unmodified to notifier function
+ *
+ * Call all neighbour notifier blocks. Parameters and return value
+ * are as for notifier_call_chain().
+ */
+
+int call_netevent_notifiers(unsigned long val, void *v)
+{
+ return atomic_notifier_call_chain(&netevent_notif_chain, val, v);
+}
+
+EXPORT_SYMBOL_GPL(register_netevent_notifier);
+EXPORT_SYMBOL_GPL(unregister_netevent_notifier);
+EXPORT_SYMBOL_GPL(call_netevent_notifiers);
diff --git a/libdde_linux26/contrib/net/core/.svn/text-base/rtnetlink.c.svn-base b/libdde_linux26/contrib/net/core/.svn/text-base/rtnetlink.c.svn-base
new file mode 100644
index 00000000..790dd205
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/.svn/text-base/rtnetlink.c.svn-base
@@ -0,0 +1,1430 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Routing netlink socket interface: protocol independent part.
+ *
+ * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Fixes:
+ * Vitaly E. Lavrov RTA_OK arithmetics was wrong.
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/sockios.h>
+#include <linux/net.h>
+#include <linux/fcntl.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/capability.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/security.h>
+#include <linux/mutex.h>
+#include <linux/if_addr.h>
+#include <linux/nsproxy.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/string.h>
+
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <net/ip.h>
+#include <net/protocol.h>
+#include <net/arp.h>
+#include <net/route.h>
+#include <net/udp.h>
+#include <net/sock.h>
+#include <net/pkt_sched.h>
+#include <net/fib_rules.h>
+#include <net/rtnetlink.h>
+
+struct rtnl_link
+{
+ rtnl_doit_func doit;
+ rtnl_dumpit_func dumpit;
+};
+
+static DEFINE_MUTEX(rtnl_mutex);
+
+void rtnl_lock(void)
+{
+ mutex_lock(&rtnl_mutex);
+}
+
+void __rtnl_unlock(void)
+{
+ mutex_unlock(&rtnl_mutex);
+}
+
+void rtnl_unlock(void)
+{
+ /* This fellow will unlock it for us. */
+ netdev_run_todo();
+}
+
+int rtnl_trylock(void)
+{
+ return mutex_trylock(&rtnl_mutex);
+}
+
+int rtnl_is_locked(void)
+{
+ return mutex_is_locked(&rtnl_mutex);
+}
+
+static struct rtnl_link *rtnl_msg_handlers[NPROTO];
+
+static inline int rtm_msgindex(int msgtype)
+{
+ int msgindex = msgtype - RTM_BASE;
+
+ /*
+ * msgindex < 0 implies someone tried to register a netlink
+ * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
+ * the message type has not been added to linux/rtnetlink.h
+ */
+ BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
+
+ return msgindex;
+}
+
+static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex)
+{
+ struct rtnl_link *tab;
+
+ tab = rtnl_msg_handlers[protocol];
+ if (tab == NULL || tab[msgindex].doit == NULL)
+ tab = rtnl_msg_handlers[PF_UNSPEC];
+
+ return tab ? tab[msgindex].doit : NULL;
+}
+
+static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex)
+{
+ struct rtnl_link *tab;
+
+ tab = rtnl_msg_handlers[protocol];
+ if (tab == NULL || tab[msgindex].dumpit == NULL)
+ tab = rtnl_msg_handlers[PF_UNSPEC];
+
+ return tab ? tab[msgindex].dumpit : NULL;
+}
+
+/**
+ * __rtnl_register - Register a rtnetlink message type
+ * @protocol: Protocol family or PF_UNSPEC
+ * @msgtype: rtnetlink message type
+ * @doit: Function pointer called for each request message
+ * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
+ *
+ * Registers the specified function pointers (at least one of them has
+ * to be non-NULL) to be called whenever a request message for the
+ * specified protocol family and message type is received.
+ *
+ * The special protocol family PF_UNSPEC may be used to define fallback
+ * function pointers for the case when no entry for the specific protocol
+ * family exists.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int __rtnl_register(int protocol, int msgtype,
+ rtnl_doit_func doit, rtnl_dumpit_func dumpit)
+{
+ struct rtnl_link *tab;
+ int msgindex;
+
+ BUG_ON(protocol < 0 || protocol >= NPROTO);
+ msgindex = rtm_msgindex(msgtype);
+
+ tab = rtnl_msg_handlers[protocol];
+ if (tab == NULL) {
+ tab = kcalloc(RTM_NR_MSGTYPES, sizeof(*tab), GFP_KERNEL);
+ if (tab == NULL)
+ return -ENOBUFS;
+
+ rtnl_msg_handlers[protocol] = tab;
+ }
+
+ if (doit)
+ tab[msgindex].doit = doit;
+
+ if (dumpit)
+ tab[msgindex].dumpit = dumpit;
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(__rtnl_register);
+
+/**
+ * rtnl_register - Register a rtnetlink message type
+ *
+ * Identical to __rtnl_register() but panics on failure. This is useful
+ * as failure of this function is very unlikely, it can only happen due
+ * to lack of memory when allocating the chain to store all message
+ * handlers for a protocol. Meant for use in init functions where lack
+ * of memory implies no sense in continueing.
+ */
+void rtnl_register(int protocol, int msgtype,
+ rtnl_doit_func doit, rtnl_dumpit_func dumpit)
+{
+ if (__rtnl_register(protocol, msgtype, doit, dumpit) < 0)
+ panic("Unable to register rtnetlink message handler, "
+ "protocol = %d, message type = %d\n",
+ protocol, msgtype);
+}
+
+EXPORT_SYMBOL_GPL(rtnl_register);
+
+/**
+ * rtnl_unregister - Unregister a rtnetlink message type
+ * @protocol: Protocol family or PF_UNSPEC
+ * @msgtype: rtnetlink message type
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int rtnl_unregister(int protocol, int msgtype)
+{
+ int msgindex;
+
+ BUG_ON(protocol < 0 || protocol >= NPROTO);
+ msgindex = rtm_msgindex(msgtype);
+
+ if (rtnl_msg_handlers[protocol] == NULL)
+ return -ENOENT;
+
+ rtnl_msg_handlers[protocol][msgindex].doit = NULL;
+ rtnl_msg_handlers[protocol][msgindex].dumpit = NULL;
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(rtnl_unregister);
+
+/**
+ * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
+ * @protocol : Protocol family or PF_UNSPEC
+ *
+ * Identical to calling rtnl_unregster() for all registered message types
+ * of a certain protocol family.
+ */
+void rtnl_unregister_all(int protocol)
+{
+ BUG_ON(protocol < 0 || protocol >= NPROTO);
+
+ kfree(rtnl_msg_handlers[protocol]);
+ rtnl_msg_handlers[protocol] = NULL;
+}
+
+EXPORT_SYMBOL_GPL(rtnl_unregister_all);
+
+static LIST_HEAD(link_ops);
+
+/**
+ * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
+ * @ops: struct rtnl_link_ops * to register
+ *
+ * The caller must hold the rtnl_mutex. This function should be used
+ * by drivers that create devices during module initialization. It
+ * must be called before registering the devices.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int __rtnl_link_register(struct rtnl_link_ops *ops)
+{
+ if (!ops->dellink)
+ ops->dellink = unregister_netdevice;
+
+ list_add_tail(&ops->list, &link_ops);
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(__rtnl_link_register);
+
+/**
+ * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
+ * @ops: struct rtnl_link_ops * to register
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int rtnl_link_register(struct rtnl_link_ops *ops)
+{
+ int err;
+
+ rtnl_lock();
+ err = __rtnl_link_register(ops);
+ rtnl_unlock();
+ return err;
+}
+
+EXPORT_SYMBOL_GPL(rtnl_link_register);
+
+static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
+{
+ struct net_device *dev;
+restart:
+ for_each_netdev(net, dev) {
+ if (dev->rtnl_link_ops == ops) {
+ ops->dellink(dev);
+ goto restart;
+ }
+ }
+}
+
+void rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
+{
+ rtnl_lock();
+ __rtnl_kill_links(net, ops);
+ rtnl_unlock();
+}
+EXPORT_SYMBOL_GPL(rtnl_kill_links);
+
+/**
+ * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
+ * @ops: struct rtnl_link_ops * to unregister
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+void __rtnl_link_unregister(struct rtnl_link_ops *ops)
+{
+ struct net *net;
+
+ for_each_net(net) {
+ __rtnl_kill_links(net, ops);
+ }
+ list_del(&ops->list);
+}
+
+EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
+
+/**
+ * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
+ * @ops: struct rtnl_link_ops * to unregister
+ */
+void rtnl_link_unregister(struct rtnl_link_ops *ops)
+{
+ rtnl_lock();
+ __rtnl_link_unregister(ops);
+ rtnl_unlock();
+}
+
+EXPORT_SYMBOL_GPL(rtnl_link_unregister);
+
+static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
+{
+ const struct rtnl_link_ops *ops;
+
+ list_for_each_entry(ops, &link_ops, list) {
+ if (!strcmp(ops->kind, kind))
+ return ops;
+ }
+ return NULL;
+}
+
+static size_t rtnl_link_get_size(const struct net_device *dev)
+{
+ const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
+ size_t size;
+
+ if (!ops)
+ return 0;
+
+ size = nlmsg_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
+ nlmsg_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
+
+ if (ops->get_size)
+ /* IFLA_INFO_DATA + nested data */
+ size += nlmsg_total_size(sizeof(struct nlattr)) +
+ ops->get_size(dev);
+
+ if (ops->get_xstats_size)
+ size += ops->get_xstats_size(dev); /* IFLA_INFO_XSTATS */
+
+ return size;
+}
+
+static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
+{
+ const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
+ struct nlattr *linkinfo, *data;
+ int err = -EMSGSIZE;
+
+ linkinfo = nla_nest_start(skb, IFLA_LINKINFO);
+ if (linkinfo == NULL)
+ goto out;
+
+ if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
+ goto err_cancel_link;
+ if (ops->fill_xstats) {
+ err = ops->fill_xstats(skb, dev);
+ if (err < 0)
+ goto err_cancel_link;
+ }
+ if (ops->fill_info) {
+ data = nla_nest_start(skb, IFLA_INFO_DATA);
+ if (data == NULL)
+ goto err_cancel_link;
+ err = ops->fill_info(skb, dev);
+ if (err < 0)
+ goto err_cancel_data;
+ nla_nest_end(skb, data);
+ }
+
+ nla_nest_end(skb, linkinfo);
+ return 0;
+
+err_cancel_data:
+ nla_nest_cancel(skb, data);
+err_cancel_link:
+ nla_nest_cancel(skb, linkinfo);
+out:
+ return err;
+}
+
+static const int rtm_min[RTM_NR_FAMILIES] =
+{
+ [RTM_FAM(RTM_NEWLINK)] = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
+ [RTM_FAM(RTM_NEWADDR)] = NLMSG_LENGTH(sizeof(struct ifaddrmsg)),
+ [RTM_FAM(RTM_NEWROUTE)] = NLMSG_LENGTH(sizeof(struct rtmsg)),
+ [RTM_FAM(RTM_NEWRULE)] = NLMSG_LENGTH(sizeof(struct fib_rule_hdr)),
+ [RTM_FAM(RTM_NEWQDISC)] = NLMSG_LENGTH(sizeof(struct tcmsg)),
+ [RTM_FAM(RTM_NEWTCLASS)] = NLMSG_LENGTH(sizeof(struct tcmsg)),
+ [RTM_FAM(RTM_NEWTFILTER)] = NLMSG_LENGTH(sizeof(struct tcmsg)),
+ [RTM_FAM(RTM_NEWACTION)] = NLMSG_LENGTH(sizeof(struct tcamsg)),
+ [RTM_FAM(RTM_GETMULTICAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
+ [RTM_FAM(RTM_GETANYCAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
+};
+
+static const int rta_max[RTM_NR_FAMILIES] =
+{
+ [RTM_FAM(RTM_NEWLINK)] = IFLA_MAX,
+ [RTM_FAM(RTM_NEWADDR)] = IFA_MAX,
+ [RTM_FAM(RTM_NEWROUTE)] = RTA_MAX,
+ [RTM_FAM(RTM_NEWRULE)] = FRA_MAX,
+ [RTM_FAM(RTM_NEWQDISC)] = TCA_MAX,
+ [RTM_FAM(RTM_NEWTCLASS)] = TCA_MAX,
+ [RTM_FAM(RTM_NEWTFILTER)] = TCA_MAX,
+ [RTM_FAM(RTM_NEWACTION)] = TCAA_MAX,
+};
+
+void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
+{
+ struct rtattr *rta;
+ int size = RTA_LENGTH(attrlen);
+
+ rta = (struct rtattr*)skb_put(skb, RTA_ALIGN(size));
+ rta->rta_type = attrtype;
+ rta->rta_len = size;
+ memcpy(RTA_DATA(rta), data, attrlen);
+ memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size);
+}
+
+int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, int echo)
+{
+ struct sock *rtnl = net->rtnl;
+ int err = 0;
+
+ NETLINK_CB(skb).dst_group = group;
+ if (echo)
+ atomic_inc(&skb->users);
+ netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
+ if (echo)
+ err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
+ return err;
+}
+
+int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
+{
+ struct sock *rtnl = net->rtnl;
+
+ return nlmsg_unicast(rtnl, skb, pid);
+}
+
+int rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
+ struct nlmsghdr *nlh, gfp_t flags)
+{
+ struct sock *rtnl = net->rtnl;
+ int report = 0;
+
+ if (nlh)
+ report = nlmsg_report(nlh);
+
+ return nlmsg_notify(rtnl, skb, pid, group, report, flags);
+}
+
+void rtnl_set_sk_err(struct net *net, u32 group, int error)
+{
+ struct sock *rtnl = net->rtnl;
+
+ netlink_set_err(rtnl, 0, group, error);
+}
+
+int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
+{
+ struct nlattr *mx;
+ int i, valid = 0;
+
+ mx = nla_nest_start(skb, RTA_METRICS);
+ if (mx == NULL)
+ return -ENOBUFS;
+
+ for (i = 0; i < RTAX_MAX; i++) {
+ if (metrics[i]) {
+ valid++;
+ NLA_PUT_U32(skb, i+1, metrics[i]);
+ }
+ }
+
+ if (!valid) {
+ nla_nest_cancel(skb, mx);
+ return 0;
+ }
+
+ return nla_nest_end(skb, mx);
+
+nla_put_failure:
+ nla_nest_cancel(skb, mx);
+ return -EMSGSIZE;
+}
+
+int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
+ u32 ts, u32 tsage, long expires, u32 error)
+{
+ struct rta_cacheinfo ci = {
+ .rta_lastuse = jiffies_to_clock_t(jiffies - dst->lastuse),
+ .rta_used = dst->__use,
+ .rta_clntref = atomic_read(&(dst->__refcnt)),
+ .rta_error = error,
+ .rta_id = id,
+ .rta_ts = ts,
+ .rta_tsage = tsage,
+ };
+
+ if (expires)
+ ci.rta_expires = jiffies_to_clock_t(expires);
+
+ return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
+}
+
+EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
+
+static void set_operstate(struct net_device *dev, unsigned char transition)
+{
+ unsigned char operstate = dev->operstate;
+
+ switch(transition) {
+ case IF_OPER_UP:
+ if ((operstate == IF_OPER_DORMANT ||
+ operstate == IF_OPER_UNKNOWN) &&
+ !netif_dormant(dev))
+ operstate = IF_OPER_UP;
+ break;
+
+ case IF_OPER_DORMANT:
+ if (operstate == IF_OPER_UP ||
+ operstate == IF_OPER_UNKNOWN)
+ operstate = IF_OPER_DORMANT;
+ break;
+ }
+
+ if (dev->operstate != operstate) {
+ write_lock_bh(&dev_base_lock);
+ dev->operstate = operstate;
+ write_unlock_bh(&dev_base_lock);
+ netdev_state_change(dev);
+ }
+}
+
+static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
+ const struct net_device_stats *b)
+{
+ a->rx_packets = b->rx_packets;
+ a->tx_packets = b->tx_packets;
+ a->rx_bytes = b->rx_bytes;
+ a->tx_bytes = b->tx_bytes;
+ a->rx_errors = b->rx_errors;
+ a->tx_errors = b->tx_errors;
+ a->rx_dropped = b->rx_dropped;
+ a->tx_dropped = b->tx_dropped;
+
+ a->multicast = b->multicast;
+ a->collisions = b->collisions;
+
+ a->rx_length_errors = b->rx_length_errors;
+ a->rx_over_errors = b->rx_over_errors;
+ a->rx_crc_errors = b->rx_crc_errors;
+ a->rx_frame_errors = b->rx_frame_errors;
+ a->rx_fifo_errors = b->rx_fifo_errors;
+ a->rx_missed_errors = b->rx_missed_errors;
+
+ a->tx_aborted_errors = b->tx_aborted_errors;
+ a->tx_carrier_errors = b->tx_carrier_errors;
+ a->tx_fifo_errors = b->tx_fifo_errors;
+ a->tx_heartbeat_errors = b->tx_heartbeat_errors;
+ a->tx_window_errors = b->tx_window_errors;
+
+ a->rx_compressed = b->rx_compressed;
+ a->tx_compressed = b->tx_compressed;
+};
+
+static inline size_t if_nlmsg_size(const struct net_device *dev)
+{
+ return NLMSG_ALIGN(sizeof(struct ifinfomsg))
+ + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
+ + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
+ + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
+ + nla_total_size(sizeof(struct rtnl_link_ifmap))
+ + nla_total_size(sizeof(struct rtnl_link_stats))
+ + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
+ + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
+ + nla_total_size(4) /* IFLA_TXQLEN */
+ + nla_total_size(4) /* IFLA_WEIGHT */
+ + nla_total_size(4) /* IFLA_MTU */
+ + nla_total_size(4) /* IFLA_LINK */
+ + nla_total_size(4) /* IFLA_MASTER */
+ + nla_total_size(1) /* IFLA_OPERSTATE */
+ + nla_total_size(1) /* IFLA_LINKMODE */
+ + rtnl_link_get_size(dev); /* IFLA_LINKINFO */
+}
+
+static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
+ int type, u32 pid, u32 seq, u32 change,
+ unsigned int flags)
+{
+ struct netdev_queue *txq;
+ struct ifinfomsg *ifm;
+ struct nlmsghdr *nlh;
+ const struct net_device_stats *stats;
+ struct nlattr *attr;
+
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
+ if (nlh == NULL)
+ return -EMSGSIZE;
+
+ ifm = nlmsg_data(nlh);
+ ifm->ifi_family = AF_UNSPEC;
+ ifm->__ifi_pad = 0;
+ ifm->ifi_type = dev->type;
+ ifm->ifi_index = dev->ifindex;
+ ifm->ifi_flags = dev_get_flags(dev);
+ ifm->ifi_change = change;
+
+ NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
+ NLA_PUT_U32(skb, IFLA_TXQLEN, dev->tx_queue_len);
+ NLA_PUT_U8(skb, IFLA_OPERSTATE,
+ netif_running(dev) ? dev->operstate : IF_OPER_DOWN);
+ NLA_PUT_U8(skb, IFLA_LINKMODE, dev->link_mode);
+ NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
+
+ if (dev->ifindex != dev->iflink)
+ NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
+
+ if (dev->master)
+ NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex);
+
+ txq = netdev_get_tx_queue(dev, 0);
+ if (txq->qdisc_sleeping)
+ NLA_PUT_STRING(skb, IFLA_QDISC, txq->qdisc_sleeping->ops->id);
+
+ if (dev->ifalias)
+ NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias);
+
+ if (1) {
+ struct rtnl_link_ifmap map = {
+ .mem_start = dev->mem_start,
+ .mem_end = dev->mem_end,
+ .base_addr = dev->base_addr,
+ .irq = dev->irq,
+ .dma = dev->dma,
+ .port = dev->if_port,
+ };
+ NLA_PUT(skb, IFLA_MAP, sizeof(map), &map);
+ }
+
+ if (dev->addr_len) {
+ NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
+ NLA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast);
+ }
+
+ attr = nla_reserve(skb, IFLA_STATS,
+ sizeof(struct rtnl_link_stats));
+ if (attr == NULL)
+ goto nla_put_failure;
+
+ stats = dev_get_stats(dev);
+ copy_rtnl_link_stats(nla_data(attr), stats);
+
+ if (dev->rtnl_link_ops) {
+ if (rtnl_link_fill(skb, dev) < 0)
+ goto nla_put_failure;
+ }
+
+ return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ int idx;
+ int s_idx = cb->args[0];
+ struct net_device *dev;
+
+ idx = 0;
+ for_each_netdev(net, dev) {
+ if (idx < s_idx)
+ goto cont;
+ if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, 0, NLM_F_MULTI) <= 0)
+ break;
+cont:
+ idx++;
+ }
+ cb->args[0] = idx;
+
+ return skb->len;
+}
+
+const struct nla_policy ifla_policy[IFLA_MAX+1] = {
+ [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
+ [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
+ [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
+ [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
+ [IFLA_MTU] = { .type = NLA_U32 },
+ [IFLA_LINK] = { .type = NLA_U32 },
+ [IFLA_TXQLEN] = { .type = NLA_U32 },
+ [IFLA_WEIGHT] = { .type = NLA_U32 },
+ [IFLA_OPERSTATE] = { .type = NLA_U8 },
+ [IFLA_LINKMODE] = { .type = NLA_U8 },
+ [IFLA_LINKINFO] = { .type = NLA_NESTED },
+ [IFLA_NET_NS_PID] = { .type = NLA_U32 },
+ [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
+};
+
+static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
+ [IFLA_INFO_KIND] = { .type = NLA_STRING },
+ [IFLA_INFO_DATA] = { .type = NLA_NESTED },
+};
+
+static struct net *get_net_ns_by_pid(pid_t pid)
+{
+ struct task_struct *tsk;
+ struct net *net;
+
+ /* Lookup the network namespace */
+ net = ERR_PTR(-ESRCH);
+ rcu_read_lock();
+ tsk = find_task_by_vpid(pid);
+ if (tsk) {
+ struct nsproxy *nsproxy;
+ nsproxy = task_nsproxy(tsk);
+ if (nsproxy)
+ net = get_net(nsproxy->net_ns);
+ }
+ rcu_read_unlock();
+ return net;
+}
+
+static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
+{
+ if (dev) {
+ if (tb[IFLA_ADDRESS] &&
+ nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
+ return -EINVAL;
+
+ if (tb[IFLA_BROADCAST] &&
+ nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
+ struct nlattr **tb, char *ifname, int modified)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+ int send_addr_notify = 0;
+ int err;
+
+ if (tb[IFLA_NET_NS_PID]) {
+ struct net *net;
+ net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
+ if (IS_ERR(net)) {
+ err = PTR_ERR(net);
+ goto errout;
+ }
+ err = dev_change_net_namespace(dev, net, ifname);
+ put_net(net);
+ if (err)
+ goto errout;
+ modified = 1;
+ }
+
+ if (tb[IFLA_MAP]) {
+ struct rtnl_link_ifmap *u_map;
+ struct ifmap k_map;
+
+ if (!ops->ndo_set_config) {
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+
+ if (!netif_device_present(dev)) {
+ err = -ENODEV;
+ goto errout;
+ }
+
+ u_map = nla_data(tb[IFLA_MAP]);
+ k_map.mem_start = (unsigned long) u_map->mem_start;
+ k_map.mem_end = (unsigned long) u_map->mem_end;
+ k_map.base_addr = (unsigned short) u_map->base_addr;
+ k_map.irq = (unsigned char) u_map->irq;
+ k_map.dma = (unsigned char) u_map->dma;
+ k_map.port = (unsigned char) u_map->port;
+
+ err = ops->ndo_set_config(dev, &k_map);
+ if (err < 0)
+ goto errout;
+
+ modified = 1;
+ }
+
+ if (tb[IFLA_ADDRESS]) {
+ struct sockaddr *sa;
+ int len;
+
+ if (!ops->ndo_set_mac_address) {
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+
+ if (!netif_device_present(dev)) {
+ err = -ENODEV;
+ goto errout;
+ }
+
+ len = sizeof(sa_family_t) + dev->addr_len;
+ sa = kmalloc(len, GFP_KERNEL);
+ if (!sa) {
+ err = -ENOMEM;
+ goto errout;
+ }
+ sa->sa_family = dev->type;
+ memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
+ dev->addr_len);
+ err = ops->ndo_set_mac_address(dev, sa);
+ kfree(sa);
+ if (err)
+ goto errout;
+ send_addr_notify = 1;
+ modified = 1;
+ }
+
+ if (tb[IFLA_MTU]) {
+ err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
+ if (err < 0)
+ goto errout;
+ modified = 1;
+ }
+
+ /*
+ * Interface selected by interface index but interface
+ * name provided implies that a name change has been
+ * requested.
+ */
+ if (ifm->ifi_index > 0 && ifname[0]) {
+ err = dev_change_name(dev, ifname);
+ if (err < 0)
+ goto errout;
+ modified = 1;
+ }
+
+ if (tb[IFLA_IFALIAS]) {
+ err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
+ nla_len(tb[IFLA_IFALIAS]));
+ if (err < 0)
+ goto errout;
+ modified = 1;
+ }
+
+ if (tb[IFLA_BROADCAST]) {
+ nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
+ send_addr_notify = 1;
+ }
+
+ if (ifm->ifi_flags || ifm->ifi_change) {
+ unsigned int flags = ifm->ifi_flags;
+
+ /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
+ if (ifm->ifi_change)
+ flags = (flags & ifm->ifi_change) |
+ (dev->flags & ~ifm->ifi_change);
+ err = dev_change_flags(dev, flags);
+ if (err < 0)
+ goto errout;
+ }
+
+ if (tb[IFLA_TXQLEN])
+ dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
+
+ if (tb[IFLA_OPERSTATE])
+ set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
+
+ if (tb[IFLA_LINKMODE]) {
+ write_lock_bh(&dev_base_lock);
+ dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
+ write_unlock_bh(&dev_base_lock);
+ }
+
+ err = 0;
+
+errout:
+ if (err < 0 && modified && net_ratelimit())
+ printk(KERN_WARNING "A link change request failed with "
+ "some changes comitted already. Interface %s may "
+ "have been left with an inconsistent configuration, "
+ "please check.\n", dev->name);
+
+ if (send_addr_notify)
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ return err;
+}
+
+static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+{
+ struct net *net = sock_net(skb->sk);
+ struct ifinfomsg *ifm;
+ struct net_device *dev;
+ int err;
+ struct nlattr *tb[IFLA_MAX+1];
+ char ifname[IFNAMSIZ];
+
+ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
+ if (err < 0)
+ goto errout;
+
+ if (tb[IFLA_IFNAME])
+ nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
+ else
+ ifname[0] = '\0';
+
+ err = -EINVAL;
+ ifm = nlmsg_data(nlh);
+ if (ifm->ifi_index > 0)
+ dev = dev_get_by_index(net, ifm->ifi_index);
+ else if (tb[IFLA_IFNAME])
+ dev = dev_get_by_name(net, ifname);
+ else
+ goto errout;
+
+ if (dev == NULL) {
+ err = -ENODEV;
+ goto errout;
+ }
+
+ if ((err = validate_linkmsg(dev, tb)) < 0)
+ goto errout_dev;
+
+ err = do_setlink(dev, ifm, tb, ifname, 0);
+errout_dev:
+ dev_put(dev);
+errout:
+ return err;
+}
+
+static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+{
+ struct net *net = sock_net(skb->sk);
+ const struct rtnl_link_ops *ops;
+ struct net_device *dev;
+ struct ifinfomsg *ifm;
+ char ifname[IFNAMSIZ];
+ struct nlattr *tb[IFLA_MAX+1];
+ int err;
+
+ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
+ if (err < 0)
+ return err;
+
+ if (tb[IFLA_IFNAME])
+ nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
+
+ ifm = nlmsg_data(nlh);
+ if (ifm->ifi_index > 0)
+ dev = __dev_get_by_index(net, ifm->ifi_index);
+ else if (tb[IFLA_IFNAME])
+ dev = __dev_get_by_name(net, ifname);
+ else
+ return -EINVAL;
+
+ if (!dev)
+ return -ENODEV;
+
+ ops = dev->rtnl_link_ops;
+ if (!ops)
+ return -EOPNOTSUPP;
+
+ ops->dellink(dev);
+ return 0;
+}
+
+struct net_device *rtnl_create_link(struct net *net, char *ifname,
+ const struct rtnl_link_ops *ops, struct nlattr *tb[])
+{
+ int err;
+ struct net_device *dev;
+
+ err = -ENOMEM;
+ dev = alloc_netdev(ops->priv_size, ifname, ops->setup);
+ if (!dev)
+ goto err;
+
+ if (strchr(dev->name, '%')) {
+ err = dev_alloc_name(dev, dev->name);
+ if (err < 0)
+ goto err_free;
+ }
+
+ dev_net_set(dev, net);
+ dev->rtnl_link_ops = ops;
+
+ if (tb[IFLA_MTU])
+ dev->mtu = nla_get_u32(tb[IFLA_MTU]);
+ if (tb[IFLA_ADDRESS])
+ memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
+ nla_len(tb[IFLA_ADDRESS]));
+ if (tb[IFLA_BROADCAST])
+ memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
+ nla_len(tb[IFLA_BROADCAST]));
+ if (tb[IFLA_TXQLEN])
+ dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
+ if (tb[IFLA_OPERSTATE])
+ set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
+ if (tb[IFLA_LINKMODE])
+ dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
+
+ return dev;
+
+err_free:
+ free_netdev(dev);
+err:
+ return ERR_PTR(err);
+}
+
+static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+{
+ struct net *net = sock_net(skb->sk);
+ const struct rtnl_link_ops *ops;
+ struct net_device *dev;
+ struct ifinfomsg *ifm;
+ char kind[MODULE_NAME_LEN];
+ char ifname[IFNAMSIZ];
+ struct nlattr *tb[IFLA_MAX+1];
+ struct nlattr *linkinfo[IFLA_INFO_MAX+1];
+ int err;
+
+#ifdef CONFIG_MODULES
+replay:
+#endif
+ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
+ if (err < 0)
+ return err;
+
+ if (tb[IFLA_IFNAME])
+ nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
+ else
+ ifname[0] = '\0';
+
+ ifm = nlmsg_data(nlh);
+ if (ifm->ifi_index > 0)
+ dev = __dev_get_by_index(net, ifm->ifi_index);
+ else if (ifname[0])
+ dev = __dev_get_by_name(net, ifname);
+ else
+ dev = NULL;
+
+ if ((err = validate_linkmsg(dev, tb)) < 0)
+ return err;
+
+ if (tb[IFLA_LINKINFO]) {
+ err = nla_parse_nested(linkinfo, IFLA_INFO_MAX,
+ tb[IFLA_LINKINFO], ifla_info_policy);
+ if (err < 0)
+ return err;
+ } else
+ memset(linkinfo, 0, sizeof(linkinfo));
+
+ if (linkinfo[IFLA_INFO_KIND]) {
+ nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
+ ops = rtnl_link_ops_get(kind);
+ } else {
+ kind[0] = '\0';
+ ops = NULL;
+ }
+
+ if (1) {
+ struct nlattr *attr[ops ? ops->maxtype + 1 : 0], **data = NULL;
+
+ if (ops) {
+ if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
+ err = nla_parse_nested(attr, ops->maxtype,
+ linkinfo[IFLA_INFO_DATA],
+ ops->policy);
+ if (err < 0)
+ return err;
+ data = attr;
+ }
+ if (ops->validate) {
+ err = ops->validate(tb, data);
+ if (err < 0)
+ return err;
+ }
+ }
+
+ if (dev) {
+ int modified = 0;
+
+ if (nlh->nlmsg_flags & NLM_F_EXCL)
+ return -EEXIST;
+ if (nlh->nlmsg_flags & NLM_F_REPLACE)
+ return -EOPNOTSUPP;
+
+ if (linkinfo[IFLA_INFO_DATA]) {
+ if (!ops || ops != dev->rtnl_link_ops ||
+ !ops->changelink)
+ return -EOPNOTSUPP;
+
+ err = ops->changelink(dev, tb, data);
+ if (err < 0)
+ return err;
+ modified = 1;
+ }
+
+ return do_setlink(dev, ifm, tb, ifname, modified);
+ }
+
+ if (!(nlh->nlmsg_flags & NLM_F_CREATE))
+ return -ENODEV;
+
+ if (ifm->ifi_index || ifm->ifi_flags || ifm->ifi_change)
+ return -EOPNOTSUPP;
+ if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO])
+ return -EOPNOTSUPP;
+
+ if (!ops) {
+#ifdef CONFIG_MODULES
+ if (kind[0]) {
+ __rtnl_unlock();
+ request_module("rtnl-link-%s", kind);
+ rtnl_lock();
+ ops = rtnl_link_ops_get(kind);
+ if (ops)
+ goto replay;
+ }
+#endif
+ return -EOPNOTSUPP;
+ }
+
+ if (!ifname[0])
+ snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
+
+ dev = rtnl_create_link(net, ifname, ops, tb);
+
+ if (IS_ERR(dev))
+ err = PTR_ERR(dev);
+ else if (ops->newlink)
+ err = ops->newlink(dev, tb, data);
+ else
+ err = register_netdevice(dev);
+
+ if (err < 0 && !IS_ERR(dev))
+ free_netdev(dev);
+ return err;
+ }
+}
+
+static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
+{
+ struct net *net = sock_net(skb->sk);
+ struct ifinfomsg *ifm;
+ struct nlattr *tb[IFLA_MAX+1];
+ struct net_device *dev = NULL;
+ struct sk_buff *nskb;
+ int err;
+
+ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
+ if (err < 0)
+ return err;
+
+ ifm = nlmsg_data(nlh);
+ if (ifm->ifi_index > 0) {
+ dev = dev_get_by_index(net, ifm->ifi_index);
+ if (dev == NULL)
+ return -ENODEV;
+ } else
+ return -EINVAL;
+
+ nskb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL);
+ if (nskb == NULL) {
+ err = -ENOBUFS;
+ goto errout;
+ }
+
+ err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid,
+ nlh->nlmsg_seq, 0, 0);
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in if_nlmsg_size */
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(nskb);
+ goto errout;
+ }
+ err = rtnl_unicast(nskb, net, NETLINK_CB(skb).pid);
+errout:
+ dev_put(dev);
+
+ return err;
+}
+
+static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int idx;
+ int s_idx = cb->family;
+
+ if (s_idx == 0)
+ s_idx = 1;
+ for (idx=1; idx<NPROTO; idx++) {
+ int type = cb->nlh->nlmsg_type-RTM_BASE;
+ if (idx < s_idx || idx == PF_PACKET)
+ continue;
+ if (rtnl_msg_handlers[idx] == NULL ||
+ rtnl_msg_handlers[idx][type].dumpit == NULL)
+ continue;
+ if (idx > s_idx)
+ memset(&cb->args[0], 0, sizeof(cb->args));
+ if (rtnl_msg_handlers[idx][type].dumpit(skb, cb))
+ break;
+ }
+ cb->family = idx;
+
+ return skb->len;
+}
+
+void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
+{
+ struct net *net = dev_net(dev);
+ struct sk_buff *skb;
+ int err = -ENOBUFS;
+
+ skb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL);
+ if (skb == NULL)
+ goto errout;
+
+ err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0);
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in if_nlmsg_size() */
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(skb);
+ goto errout;
+ }
+ err = rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
+errout:
+ if (err < 0)
+ rtnl_set_sk_err(net, RTNLGRP_LINK, err);
+}
+
+/* Protected by RTNL sempahore. */
+static struct rtattr **rta_buf;
+static int rtattr_max;
+
+/* Process one rtnetlink message. */
+
+static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+ struct net *net = sock_net(skb->sk);
+ rtnl_doit_func doit;
+ int sz_idx, kind;
+ int min_len;
+ int family;
+ int type;
+ int err;
+
+ type = nlh->nlmsg_type;
+ if (type > RTM_MAX)
+ return -EOPNOTSUPP;
+
+ type -= RTM_BASE;
+
+ /* All the messages must have at least 1 byte length */
+ if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct rtgenmsg)))
+ return 0;
+
+ family = ((struct rtgenmsg*)NLMSG_DATA(nlh))->rtgen_family;
+ if (family >= NPROTO)
+ return -EAFNOSUPPORT;
+
+ sz_idx = type>>2;
+ kind = type&3;
+
+ if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
+ struct sock *rtnl;
+ rtnl_dumpit_func dumpit;
+
+ dumpit = rtnl_get_dumpit(family, type);
+ if (dumpit == NULL)
+ return -EOPNOTSUPP;
+
+ __rtnl_unlock();
+ rtnl = net->rtnl;
+ err = netlink_dump_start(rtnl, skb, nlh, dumpit, NULL);
+ rtnl_lock();
+ return err;
+ }
+
+ memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *)));
+
+ min_len = rtm_min[sz_idx];
+ if (nlh->nlmsg_len < min_len)
+ return -EINVAL;
+
+ if (nlh->nlmsg_len > min_len) {
+ int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len);
+ struct rtattr *attr = (void*)nlh + NLMSG_ALIGN(min_len);
+
+ while (RTA_OK(attr, attrlen)) {
+ unsigned flavor = attr->rta_type;
+ if (flavor) {
+ if (flavor > rta_max[sz_idx])
+ return -EINVAL;
+ rta_buf[flavor-1] = attr;
+ }
+ attr = RTA_NEXT(attr, attrlen);
+ }
+ }
+
+ doit = rtnl_get_doit(family, type);
+ if (doit == NULL)
+ return -EOPNOTSUPP;
+
+ return doit(skb, nlh, (void *)&rta_buf[0]);
+}
+
+static void rtnetlink_rcv(struct sk_buff *skb)
+{
+ rtnl_lock();
+ netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
+ rtnl_unlock();
+}
+
+static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+ struct net_device *dev = ptr;
+
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
+ break;
+ case NETDEV_REGISTER:
+ rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
+ break;
+ case NETDEV_UP:
+ case NETDEV_DOWN:
+ rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
+ break;
+ case NETDEV_CHANGE:
+ case NETDEV_GOING_DOWN:
+ break;
+ default:
+ rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block rtnetlink_dev_notifier = {
+ .notifier_call = rtnetlink_event,
+};
+
+
+static int rtnetlink_net_init(struct net *net)
+{
+ struct sock *sk;
+ sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX,
+ rtnetlink_rcv, &rtnl_mutex, THIS_MODULE);
+ if (!sk)
+ return -ENOMEM;
+ net->rtnl = sk;
+ return 0;
+}
+
+static void rtnetlink_net_exit(struct net *net)
+{
+ netlink_kernel_release(net->rtnl);
+ net->rtnl = NULL;
+}
+
+static struct pernet_operations rtnetlink_net_ops = {
+ .init = rtnetlink_net_init,
+ .exit = rtnetlink_net_exit,
+};
+
+void __init rtnetlink_init(void)
+{
+ int i;
+
+ rtattr_max = 0;
+ for (i = 0; i < ARRAY_SIZE(rta_max); i++)
+ if (rta_max[i] > rtattr_max)
+ rtattr_max = rta_max[i];
+ rta_buf = kmalloc(rtattr_max * sizeof(struct rtattr *), GFP_KERNEL);
+ if (!rta_buf)
+ panic("rtnetlink_init: cannot allocate rta_buf\n");
+
+ if (register_pernet_subsys(&rtnetlink_net_ops))
+ panic("rtnetlink_init: cannot initialize rtnetlink\n");
+
+ netlink_set_nonroot(NETLINK_ROUTE, NL_NONROOT_RECV);
+ register_netdevice_notifier(&rtnetlink_dev_notifier);
+
+ rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, rtnl_dump_ifinfo);
+ rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL);
+ rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL);
+ rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL);
+
+ rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all);
+ rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all);
+}
+
+EXPORT_SYMBOL(__rta_fill);
+EXPORT_SYMBOL(rtnetlink_put_metrics);
+EXPORT_SYMBOL(rtnl_lock);
+EXPORT_SYMBOL(rtnl_trylock);
+EXPORT_SYMBOL(rtnl_unlock);
+EXPORT_SYMBOL(rtnl_is_locked);
+EXPORT_SYMBOL(rtnl_unicast);
+EXPORT_SYMBOL(rtnl_notify);
+EXPORT_SYMBOL(rtnl_set_sk_err);
+EXPORT_SYMBOL(rtnl_create_link);
+EXPORT_SYMBOL(ifla_policy);
diff --git a/libdde_linux26/contrib/net/core/.svn/text-base/skb_dma_map.c.svn-base b/libdde_linux26/contrib/net/core/.svn/text-base/skb_dma_map.c.svn-base
new file mode 100644
index 00000000..86234923
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/.svn/text-base/skb_dma_map.c.svn-base
@@ -0,0 +1,66 @@
+/* skb_dma_map.c: DMA mapping helpers for socket buffers.
+ *
+ * Copyright (C) David S. Miller <davem@davemloft.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/skbuff.h>
+
+int skb_dma_map(struct device *dev, struct sk_buff *skb,
+ enum dma_data_direction dir)
+{
+ struct skb_shared_info *sp = skb_shinfo(skb);
+ dma_addr_t map;
+ int i;
+
+ map = dma_map_single(dev, skb->data,
+ skb_headlen(skb), dir);
+ if (dma_mapping_error(dev, map))
+ goto out_err;
+
+ sp->dma_maps[0] = map;
+ for (i = 0; i < sp->nr_frags; i++) {
+ skb_frag_t *fp = &sp->frags[i];
+
+ map = dma_map_page(dev, fp->page, fp->page_offset,
+ fp->size, dir);
+ if (dma_mapping_error(dev, map))
+ goto unwind;
+ sp->dma_maps[i + 1] = map;
+ }
+ sp->num_dma_maps = i + 1;
+
+ return 0;
+
+unwind:
+ while (--i >= 0) {
+ skb_frag_t *fp = &sp->frags[i];
+
+ dma_unmap_page(dev, sp->dma_maps[i + 1],
+ fp->size, dir);
+ }
+ dma_unmap_single(dev, sp->dma_maps[0],
+ skb_headlen(skb), dir);
+out_err:
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(skb_dma_map);
+
+void skb_dma_unmap(struct device *dev, struct sk_buff *skb,
+ enum dma_data_direction dir)
+{
+ struct skb_shared_info *sp = skb_shinfo(skb);
+ int i;
+
+ dma_unmap_single(dev, sp->dma_maps[0],
+ skb_headlen(skb), dir);
+ for (i = 0; i < sp->nr_frags; i++) {
+ skb_frag_t *fp = &sp->frags[i];
+
+ dma_unmap_page(dev, sp->dma_maps[i + 1],
+ fp->size, dir);
+ }
+}
+EXPORT_SYMBOL(skb_dma_unmap);
diff --git a/libdde_linux26/contrib/net/core/.svn/text-base/sock.c.svn-base b/libdde_linux26/contrib/net/core/.svn/text-base/sock.c.svn-base
new file mode 100644
index 00000000..5f97caa1
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/.svn/text-base/sock.c.svn-base
@@ -0,0 +1,2297 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Generic socket support routines. Memory allocators, socket lock/release
+ * handler for protocols to use and generic option handler.
+ *
+ *
+ * Authors: Ross Biro
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Florian La Roche, <flla@stud.uni-sb.de>
+ * Alan Cox, <A.Cox@swansea.ac.uk>
+ *
+ * Fixes:
+ * Alan Cox : Numerous verify_area() problems
+ * Alan Cox : Connecting on a connecting socket
+ * now returns an error for tcp.
+ * Alan Cox : sock->protocol is set correctly.
+ * and is not sometimes left as 0.
+ * Alan Cox : connect handles icmp errors on a
+ * connect properly. Unfortunately there
+ * is a restart syscall nasty there. I
+ * can't match BSD without hacking the C
+ * library. Ideas urgently sought!
+ * Alan Cox : Disallow bind() to addresses that are
+ * not ours - especially broadcast ones!!
+ * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
+ * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
+ * instead they leave that for the DESTROY timer.
+ * Alan Cox : Clean up error flag in accept
+ * Alan Cox : TCP ack handling is buggy, the DESTROY timer
+ * was buggy. Put a remove_sock() in the handler
+ * for memory when we hit 0. Also altered the timer
+ * code. The ACK stuff can wait and needs major
+ * TCP layer surgery.
+ * Alan Cox : Fixed TCP ack bug, removed remove sock
+ * and fixed timer/inet_bh race.
+ * Alan Cox : Added zapped flag for TCP
+ * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
+ * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
+ * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
+ * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
+ * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
+ * Rick Sladkey : Relaxed UDP rules for matching packets.
+ * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
+ * Pauline Middelink : identd support
+ * Alan Cox : Fixed connect() taking signals I think.
+ * Alan Cox : SO_LINGER supported
+ * Alan Cox : Error reporting fixes
+ * Anonymous : inet_create tidied up (sk->reuse setting)
+ * Alan Cox : inet sockets don't set sk->type!
+ * Alan Cox : Split socket option code
+ * Alan Cox : Callbacks
+ * Alan Cox : Nagle flag for Charles & Johannes stuff
+ * Alex : Removed restriction on inet fioctl
+ * Alan Cox : Splitting INET from NET core
+ * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
+ * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
+ * Alan Cox : Split IP from generic code
+ * Alan Cox : New kfree_skbmem()
+ * Alan Cox : Make SO_DEBUG superuser only.
+ * Alan Cox : Allow anyone to clear SO_DEBUG
+ * (compatibility fix)
+ * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
+ * Alan Cox : Allocator for a socket is settable.
+ * Alan Cox : SO_ERROR includes soft errors.
+ * Alan Cox : Allow NULL arguments on some SO_ opts
+ * Alan Cox : Generic socket allocation to make hooks
+ * easier (suggested by Craig Metz).
+ * Michael Pall : SO_ERROR returns positive errno again
+ * Steve Whitehouse: Added default destructor to free
+ * protocol private data.
+ * Steve Whitehouse: Added various other default routines
+ * common to several socket families.
+ * Chris Evans : Call suser() check last on F_SETOWN
+ * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
+ * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
+ * Andi Kleen : Fix write_space callback
+ * Chris Evans : Security fixes - signedness again
+ * Arnaldo C. Melo : cleanups, use skb_queue_purge
+ *
+ * To Fix:
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/capability.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/sockios.h>
+#include <linux/net.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/poll.h>
+#include <linux/tcp.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#include <linux/netdevice.h>
+#include <net/protocol.h>
+#include <linux/skbuff.h>
+#include <net/net_namespace.h>
+#include <net/request_sock.h>
+#include <net/sock.h>
+#include <net/xfrm.h>
+#include <linux/ipsec.h>
+
+#include <linux/filter.h>
+
+#ifdef CONFIG_INET
+#include <net/tcp.h>
+#endif
+
+/*
+ * Each address family might have different locking rules, so we have
+ * one slock key per address family:
+ */
+static struct lock_class_key af_family_keys[AF_MAX];
+static struct lock_class_key af_family_slock_keys[AF_MAX];
+
+/*
+ * Make lock validator output more readable. (we pre-construct these
+ * strings build-time, so that runtime initialization of socket
+ * locks is fast):
+ */
+static const char *af_family_key_strings[AF_MAX+1] = {
+ "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
+ "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
+ "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
+ "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
+ "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
+ "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
+ "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
+ "sk_lock-21" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
+ "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
+ "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
+ "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
+ "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
+ "sk_lock-AF_MAX"
+};
+static const char *af_family_slock_key_strings[AF_MAX+1] = {
+ "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
+ "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
+ "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
+ "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
+ "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
+ "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
+ "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
+ "slock-21" , "slock-AF_SNA" , "slock-AF_IRDA" ,
+ "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
+ "slock-27" , "slock-28" , "slock-AF_CAN" ,
+ "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
+ "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
+ "slock-AF_MAX"
+};
+static const char *af_family_clock_key_strings[AF_MAX+1] = {
+ "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
+ "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
+ "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
+ "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
+ "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
+ "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
+ "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
+ "clock-21" , "clock-AF_SNA" , "clock-AF_IRDA" ,
+ "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
+ "clock-27" , "clock-28" , "clock-AF_CAN" ,
+ "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
+ "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
+ "clock-AF_MAX"
+};
+
+/*
+ * sk_callback_lock locking rules are per-address-family,
+ * so split the lock classes by using a per-AF key:
+ */
+static struct lock_class_key af_callback_keys[AF_MAX];
+
+/* Take into consideration the size of the struct sk_buff overhead in the
+ * determination of these values, since that is non-constant across
+ * platforms. This makes socket queueing behavior and performance
+ * not depend upon such differences.
+ */
+#define _SK_MEM_PACKETS 256
+#define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256)
+#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
+#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
+
+/* Run time adjustable parameters. */
+__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
+__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
+__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
+__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
+
+/* Maximal space eaten by iovec or ancilliary data plus some space */
+int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
+
+static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
+{
+ struct timeval tv;
+
+ if (optlen < sizeof(tv))
+ return -EINVAL;
+ if (copy_from_user(&tv, optval, sizeof(tv)))
+ return -EFAULT;
+ if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
+ return -EDOM;
+
+ if (tv.tv_sec < 0) {
+ static int warned __read_mostly;
+
+ *timeo_p = 0;
+ if (warned < 10 && net_ratelimit()) {
+ warned++;
+ printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
+ "tries to set negative timeout\n",
+ current->comm, task_pid_nr(current));
+ }
+ return 0;
+ }
+ *timeo_p = MAX_SCHEDULE_TIMEOUT;
+ if (tv.tv_sec == 0 && tv.tv_usec == 0)
+ return 0;
+ if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
+ *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
+ return 0;
+}
+
+static void sock_warn_obsolete_bsdism(const char *name)
+{
+ static int warned;
+ static char warncomm[TASK_COMM_LEN];
+ if (strcmp(warncomm, current->comm) && warned < 5) {
+ strcpy(warncomm, current->comm);
+ printk(KERN_WARNING "process `%s' is using obsolete "
+ "%s SO_BSDCOMPAT\n", warncomm, name);
+ warned++;
+ }
+}
+
+static void sock_disable_timestamp(struct sock *sk)
+{
+ if (sock_flag(sk, SOCK_TIMESTAMP)) {
+ sock_reset_flag(sk, SOCK_TIMESTAMP);
+ net_disable_timestamp();
+ }
+}
+
+
+int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ int err = 0;
+ int skb_len;
+
+ /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
+ number of warnings when compiling with -W --ANK
+ */
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+ (unsigned)sk->sk_rcvbuf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = sk_filter(sk, skb);
+ if (err)
+ goto out;
+
+ if (!sk_rmem_schedule(sk, skb->truesize)) {
+ err = -ENOBUFS;
+ goto out;
+ }
+
+ skb->dev = NULL;
+ skb_set_owner_r(skb, sk);
+
+ /* Cache the SKB length before we tack it onto the receive
+ * queue. Once it is added it no longer belongs to us and
+ * may be freed by other threads of control pulling packets
+ * from the queue.
+ */
+ skb_len = skb->len;
+
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_data_ready(sk, skb_len);
+out:
+ return err;
+}
+EXPORT_SYMBOL(sock_queue_rcv_skb);
+
+int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
+{
+ int rc = NET_RX_SUCCESS;
+
+ if (sk_filter(sk, skb))
+ goto discard_and_relse;
+
+ skb->dev = NULL;
+
+ if (nested)
+ bh_lock_sock_nested(sk);
+ else
+ bh_lock_sock(sk);
+ if (!sock_owned_by_user(sk)) {
+ /*
+ * trylock + unlock semantics:
+ */
+ mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
+
+ rc = sk_backlog_rcv(sk, skb);
+
+ mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
+ } else
+ sk_add_backlog(sk, skb);
+ bh_unlock_sock(sk);
+out:
+ sock_put(sk);
+ return rc;
+discard_and_relse:
+ kfree_skb(skb);
+ goto out;
+}
+EXPORT_SYMBOL(sk_receive_skb);
+
+struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
+{
+ struct dst_entry *dst = sk->sk_dst_cache;
+
+ if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
+ sk->sk_dst_cache = NULL;
+ dst_release(dst);
+ return NULL;
+ }
+
+ return dst;
+}
+EXPORT_SYMBOL(__sk_dst_check);
+
+struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
+{
+ struct dst_entry *dst = sk_dst_get(sk);
+
+ if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
+ sk_dst_reset(sk);
+ dst_release(dst);
+ return NULL;
+ }
+
+ return dst;
+}
+EXPORT_SYMBOL(sk_dst_check);
+
+static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
+{
+ int ret = -ENOPROTOOPT;
+#ifdef CONFIG_NETDEVICES
+ struct net *net = sock_net(sk);
+ char devname[IFNAMSIZ];
+ int index;
+
+ /* Sorry... */
+ ret = -EPERM;
+ if (!capable(CAP_NET_RAW))
+ goto out;
+
+ ret = -EINVAL;
+ if (optlen < 0)
+ goto out;
+
+ /* Bind this socket to a particular device like "eth0",
+ * as specified in the passed interface name. If the
+ * name is "" or the option length is zero the socket
+ * is not bound.
+ */
+ if (optlen > IFNAMSIZ - 1)
+ optlen = IFNAMSIZ - 1;
+ memset(devname, 0, sizeof(devname));
+
+ ret = -EFAULT;
+ if (copy_from_user(devname, optval, optlen))
+ goto out;
+
+ if (devname[0] == '\0') {
+ index = 0;
+ } else {
+ struct net_device *dev = dev_get_by_name(net, devname);
+
+ ret = -ENODEV;
+ if (!dev)
+ goto out;
+
+ index = dev->ifindex;
+ dev_put(dev);
+ }
+
+ lock_sock(sk);
+ sk->sk_bound_dev_if = index;
+ sk_dst_reset(sk);
+ release_sock(sk);
+
+ ret = 0;
+
+out:
+#endif
+
+ return ret;
+}
+
+static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
+{
+ if (valbool)
+ sock_set_flag(sk, bit);
+ else
+ sock_reset_flag(sk, bit);
+}
+
+/*
+ * This is meant for all protocols to use and covers goings on
+ * at the socket level. Everything here is generic.
+ */
+
+int sock_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int optlen)
+{
+ struct sock *sk=sock->sk;
+ int val;
+ int valbool;
+ struct linger ling;
+ int ret = 0;
+
+ /*
+ * Options without arguments
+ */
+
+ if (optname == SO_BINDTODEVICE)
+ return sock_bindtodevice(sk, optval, optlen);
+
+ if (optlen < sizeof(int))
+ return -EINVAL;
+
+ if (get_user(val, (int __user *)optval))
+ return -EFAULT;
+
+ valbool = val?1:0;
+
+ lock_sock(sk);
+
+ switch(optname) {
+ case SO_DEBUG:
+ if (val && !capable(CAP_NET_ADMIN)) {
+ ret = -EACCES;
+ } else
+ sock_valbool_flag(sk, SOCK_DBG, valbool);
+ break;
+ case SO_REUSEADDR:
+ sk->sk_reuse = valbool;
+ break;
+ case SO_TYPE:
+ case SO_ERROR:
+ ret = -ENOPROTOOPT;
+ break;
+ case SO_DONTROUTE:
+ sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
+ break;
+ case SO_BROADCAST:
+ sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
+ break;
+ case SO_SNDBUF:
+ /* Don't error on this BSD doesn't and if you think
+ about it this is right. Otherwise apps have to
+ play 'guess the biggest size' games. RCVBUF/SNDBUF
+ are treated in BSD as hints */
+
+ if (val > sysctl_wmem_max)
+ val = sysctl_wmem_max;
+set_sndbuf:
+ sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
+ if ((val * 2) < SOCK_MIN_SNDBUF)
+ sk->sk_sndbuf = SOCK_MIN_SNDBUF;
+ else
+ sk->sk_sndbuf = val * 2;
+
+ /*
+ * Wake up sending tasks if we
+ * upped the value.
+ */
+ sk->sk_write_space(sk);
+ break;
+
+ case SO_SNDBUFFORCE:
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+ goto set_sndbuf;
+
+ case SO_RCVBUF:
+ /* Don't error on this BSD doesn't and if you think
+ about it this is right. Otherwise apps have to
+ play 'guess the biggest size' games. RCVBUF/SNDBUF
+ are treated in BSD as hints */
+
+ if (val > sysctl_rmem_max)
+ val = sysctl_rmem_max;
+set_rcvbuf:
+ sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
+ /*
+ * We double it on the way in to account for
+ * "struct sk_buff" etc. overhead. Applications
+ * assume that the SO_RCVBUF setting they make will
+ * allow that much actual data to be received on that
+ * socket.
+ *
+ * Applications are unaware that "struct sk_buff" and
+ * other overheads allocate from the receive buffer
+ * during socket buffer allocation.
+ *
+ * And after considering the possible alternatives,
+ * returning the value we actually used in getsockopt
+ * is the most desirable behavior.
+ */
+ if ((val * 2) < SOCK_MIN_RCVBUF)
+ sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
+ else
+ sk->sk_rcvbuf = val * 2;
+ break;
+
+ case SO_RCVBUFFORCE:
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+ goto set_rcvbuf;
+
+ case SO_KEEPALIVE:
+#ifdef CONFIG_INET
+ if (sk->sk_protocol == IPPROTO_TCP)
+ tcp_set_keepalive(sk, valbool);
+#endif
+ sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
+ break;
+
+ case SO_OOBINLINE:
+ sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
+ break;
+
+ case SO_NO_CHECK:
+ sk->sk_no_check = valbool;
+ break;
+
+ case SO_PRIORITY:
+ if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
+ sk->sk_priority = val;
+ else
+ ret = -EPERM;
+ break;
+
+ case SO_LINGER:
+ if (optlen < sizeof(ling)) {
+ ret = -EINVAL; /* 1003.1g */
+ break;
+ }
+ if (copy_from_user(&ling,optval,sizeof(ling))) {
+ ret = -EFAULT;
+ break;
+ }
+ if (!ling.l_onoff)
+ sock_reset_flag(sk, SOCK_LINGER);
+ else {
+#if (BITS_PER_LONG == 32)
+ if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
+ sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
+ else
+#endif
+ sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
+ sock_set_flag(sk, SOCK_LINGER);
+ }
+ break;
+
+ case SO_BSDCOMPAT:
+ sock_warn_obsolete_bsdism("setsockopt");
+ break;
+
+ case SO_PASSCRED:
+ if (valbool)
+ set_bit(SOCK_PASSCRED, &sock->flags);
+ else
+ clear_bit(SOCK_PASSCRED, &sock->flags);
+ break;
+
+ case SO_TIMESTAMP:
+ case SO_TIMESTAMPNS:
+ if (valbool) {
+ if (optname == SO_TIMESTAMP)
+ sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
+ else
+ sock_set_flag(sk, SOCK_RCVTSTAMPNS);
+ sock_set_flag(sk, SOCK_RCVTSTAMP);
+ sock_enable_timestamp(sk);
+ } else {
+ sock_reset_flag(sk, SOCK_RCVTSTAMP);
+ sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
+ }
+ break;
+
+ case SO_RCVLOWAT:
+ if (val < 0)
+ val = INT_MAX;
+ sk->sk_rcvlowat = val ? : 1;
+ break;
+
+ case SO_RCVTIMEO:
+ ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
+ break;
+
+ case SO_SNDTIMEO:
+ ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
+ break;
+
+ case SO_ATTACH_FILTER:
+ ret = -EINVAL;
+ if (optlen == sizeof(struct sock_fprog)) {
+ struct sock_fprog fprog;
+
+ ret = -EFAULT;
+ if (copy_from_user(&fprog, optval, sizeof(fprog)))
+ break;
+
+ ret = sk_attach_filter(&fprog, sk);
+ }
+ break;
+
+ case SO_DETACH_FILTER:
+ ret = sk_detach_filter(sk);
+ break;
+
+ case SO_PASSSEC:
+ if (valbool)
+ set_bit(SOCK_PASSSEC, &sock->flags);
+ else
+ clear_bit(SOCK_PASSSEC, &sock->flags);
+ break;
+ case SO_MARK:
+ if (!capable(CAP_NET_ADMIN))
+ ret = -EPERM;
+ else {
+ sk->sk_mark = val;
+ }
+ break;
+
+ /* We implement the SO_SNDLOWAT etc to
+ not be settable (1003.1g 5.3) */
+ default:
+ ret = -ENOPROTOOPT;
+ break;
+ }
+ release_sock(sk);
+ return ret;
+}
+
+
+int sock_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+
+ union {
+ int val;
+ struct linger ling;
+ struct timeval tm;
+ } v;
+
+ unsigned int lv = sizeof(int);
+ int len;
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+ if (len < 0)
+ return -EINVAL;
+
+ memset(&v, 0, sizeof(v));
+
+ switch(optname) {
+ case SO_DEBUG:
+ v.val = sock_flag(sk, SOCK_DBG);
+ break;
+
+ case SO_DONTROUTE:
+ v.val = sock_flag(sk, SOCK_LOCALROUTE);
+ break;
+
+ case SO_BROADCAST:
+ v.val = !!sock_flag(sk, SOCK_BROADCAST);
+ break;
+
+ case SO_SNDBUF:
+ v.val = sk->sk_sndbuf;
+ break;
+
+ case SO_RCVBUF:
+ v.val = sk->sk_rcvbuf;
+ break;
+
+ case SO_REUSEADDR:
+ v.val = sk->sk_reuse;
+ break;
+
+ case SO_KEEPALIVE:
+ v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
+ break;
+
+ case SO_TYPE:
+ v.val = sk->sk_type;
+ break;
+
+ case SO_ERROR:
+ v.val = -sock_error(sk);
+ if (v.val==0)
+ v.val = xchg(&sk->sk_err_soft, 0);
+ break;
+
+ case SO_OOBINLINE:
+ v.val = !!sock_flag(sk, SOCK_URGINLINE);
+ break;
+
+ case SO_NO_CHECK:
+ v.val = sk->sk_no_check;
+ break;
+
+ case SO_PRIORITY:
+ v.val = sk->sk_priority;
+ break;
+
+ case SO_LINGER:
+ lv = sizeof(v.ling);
+ v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
+ v.ling.l_linger = sk->sk_lingertime / HZ;
+ break;
+
+ case SO_BSDCOMPAT:
+ sock_warn_obsolete_bsdism("getsockopt");
+ break;
+
+ case SO_TIMESTAMP:
+ v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
+ !sock_flag(sk, SOCK_RCVTSTAMPNS);
+ break;
+
+ case SO_TIMESTAMPNS:
+ v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
+ break;
+
+ case SO_RCVTIMEO:
+ lv=sizeof(struct timeval);
+ if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
+ v.tm.tv_sec = 0;
+ v.tm.tv_usec = 0;
+ } else {
+ v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
+ v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
+ }
+ break;
+
+ case SO_SNDTIMEO:
+ lv=sizeof(struct timeval);
+ if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
+ v.tm.tv_sec = 0;
+ v.tm.tv_usec = 0;
+ } else {
+ v.tm.tv_sec = sk->sk_sndtimeo / HZ;
+ v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
+ }
+ break;
+
+ case SO_RCVLOWAT:
+ v.val = sk->sk_rcvlowat;
+ break;
+
+ case SO_SNDLOWAT:
+ v.val=1;
+ break;
+
+ case SO_PASSCRED:
+ v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
+ break;
+
+ case SO_PEERCRED:
+ if (len > sizeof(sk->sk_peercred))
+ len = sizeof(sk->sk_peercred);
+ if (copy_to_user(optval, &sk->sk_peercred, len))
+ return -EFAULT;
+ goto lenout;
+
+ case SO_PEERNAME:
+ {
+ char address[128];
+
+ if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
+ return -ENOTCONN;
+ if (lv < len)
+ return -EINVAL;
+ if (copy_to_user(optval, address, len))
+ return -EFAULT;
+ goto lenout;
+ }
+
+ /* Dubious BSD thing... Probably nobody even uses it, but
+ * the UNIX standard wants it for whatever reason... -DaveM
+ */
+ case SO_ACCEPTCONN:
+ v.val = sk->sk_state == TCP_LISTEN;
+ break;
+
+ case SO_PASSSEC:
+ v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
+ break;
+
+ case SO_PEERSEC:
+ return security_socket_getpeersec_stream(sock, optval, optlen, len);
+
+ case SO_MARK:
+ v.val = sk->sk_mark;
+ break;
+
+ default:
+ return -ENOPROTOOPT;
+ }
+
+ if (len > lv)
+ len = lv;
+ if (copy_to_user(optval, &v, len))
+ return -EFAULT;
+lenout:
+ if (put_user(len, optlen))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ * Initialize an sk_lock.
+ *
+ * (We also register the sk_lock with the lock validator.)
+ */
+static inline void sock_lock_init(struct sock *sk)
+{
+ sock_lock_init_class_and_name(sk,
+ af_family_slock_key_strings[sk->sk_family],
+ af_family_slock_keys + sk->sk_family,
+ af_family_key_strings[sk->sk_family],
+ af_family_keys + sk->sk_family);
+}
+
+static void sock_copy(struct sock *nsk, const struct sock *osk)
+{
+#ifdef CONFIG_SECURITY_NETWORK
+ void *sptr = nsk->sk_security;
+#endif
+
+ memcpy(nsk, osk, osk->sk_prot->obj_size);
+#ifdef CONFIG_SECURITY_NETWORK
+ nsk->sk_security = sptr;
+ security_sk_clone(osk, nsk);
+#endif
+}
+
+static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
+ int family)
+{
+ struct sock *sk;
+ struct kmem_cache *slab;
+
+ slab = prot->slab;
+ if (slab != NULL)
+ sk = kmem_cache_alloc(slab, priority);
+ else
+ sk = kmalloc(prot->obj_size, priority);
+
+ if (sk != NULL) {
+ if (security_sk_alloc(sk, family, priority))
+ goto out_free;
+
+ if (!try_module_get(prot->owner))
+ goto out_free_sec;
+ }
+
+ return sk;
+
+out_free_sec:
+ security_sk_free(sk);
+out_free:
+ if (slab != NULL)
+ kmem_cache_free(slab, sk);
+ else
+ kfree(sk);
+ return NULL;
+}
+
+static void sk_prot_free(struct proto *prot, struct sock *sk)
+{
+ struct kmem_cache *slab;
+ struct module *owner;
+
+ owner = prot->owner;
+ slab = prot->slab;
+
+ security_sk_free(sk);
+ if (slab != NULL)
+ kmem_cache_free(slab, sk);
+ else
+ kfree(sk);
+ module_put(owner);
+}
+
+/**
+ * sk_alloc - All socket objects are allocated here
+ * @net: the applicable net namespace
+ * @family: protocol family
+ * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
+ * @prot: struct proto associated with this new sock instance
+ */
+struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
+ struct proto *prot)
+{
+ struct sock *sk;
+
+ sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
+ if (sk) {
+ sk->sk_family = family;
+ /*
+ * See comment in struct sock definition to understand
+ * why we need sk_prot_creator -acme
+ */
+ sk->sk_prot = sk->sk_prot_creator = prot;
+ sock_lock_init(sk);
+ sock_net_set(sk, get_net(net));
+ }
+
+ return sk;
+}
+
+void sk_free(struct sock *sk)
+{
+ struct sk_filter *filter;
+
+ if (sk->sk_destruct)
+ sk->sk_destruct(sk);
+
+ filter = rcu_dereference(sk->sk_filter);
+ if (filter) {
+ sk_filter_uncharge(sk, filter);
+ rcu_assign_pointer(sk->sk_filter, NULL);
+ }
+
+ sock_disable_timestamp(sk);
+
+ if (atomic_read(&sk->sk_omem_alloc))
+ printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
+ __func__, atomic_read(&sk->sk_omem_alloc));
+
+ put_net(sock_net(sk));
+ sk_prot_free(sk->sk_prot_creator, sk);
+}
+
+/*
+ * Last sock_put should drop referrence to sk->sk_net. It has already
+ * been dropped in sk_change_net. Taking referrence to stopping namespace
+ * is not an option.
+ * Take referrence to a socket to remove it from hash _alive_ and after that
+ * destroy it in the context of init_net.
+ */
+void sk_release_kernel(struct sock *sk)
+{
+ if (sk == NULL || sk->sk_socket == NULL)
+ return;
+
+ sock_hold(sk);
+ sock_release(sk->sk_socket);
+ release_net(sock_net(sk));
+ sock_net_set(sk, get_net(&init_net));
+ sock_put(sk);
+}
+EXPORT_SYMBOL(sk_release_kernel);
+
+struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
+{
+ struct sock *newsk;
+
+ newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
+ if (newsk != NULL) {
+ struct sk_filter *filter;
+
+ sock_copy(newsk, sk);
+
+ /* SANITY */
+ get_net(sock_net(newsk));
+ sk_node_init(&newsk->sk_node);
+ sock_lock_init(newsk);
+ bh_lock_sock(newsk);
+ newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
+
+ atomic_set(&newsk->sk_rmem_alloc, 0);
+ atomic_set(&newsk->sk_wmem_alloc, 0);
+ atomic_set(&newsk->sk_omem_alloc, 0);
+ skb_queue_head_init(&newsk->sk_receive_queue);
+ skb_queue_head_init(&newsk->sk_write_queue);
+#ifdef CONFIG_NET_DMA
+ skb_queue_head_init(&newsk->sk_async_wait_queue);
+#endif
+
+ rwlock_init(&newsk->sk_dst_lock);
+ rwlock_init(&newsk->sk_callback_lock);
+ lockdep_set_class_and_name(&newsk->sk_callback_lock,
+ af_callback_keys + newsk->sk_family,
+ af_family_clock_key_strings[newsk->sk_family]);
+
+ newsk->sk_dst_cache = NULL;
+ newsk->sk_wmem_queued = 0;
+ newsk->sk_forward_alloc = 0;
+ newsk->sk_send_head = NULL;
+ newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
+
+ sock_reset_flag(newsk, SOCK_DONE);
+ skb_queue_head_init(&newsk->sk_error_queue);
+
+ filter = newsk->sk_filter;
+ if (filter != NULL)
+ sk_filter_charge(newsk, filter);
+
+ if (unlikely(xfrm_sk_clone_policy(newsk))) {
+ /* It is still raw copy of parent, so invalidate
+ * destructor and make plain sk_free() */
+ newsk->sk_destruct = NULL;
+ sk_free(newsk);
+ newsk = NULL;
+ goto out;
+ }
+
+ newsk->sk_err = 0;
+ newsk->sk_priority = 0;
+ atomic_set(&newsk->sk_refcnt, 2);
+
+ /*
+ * Increment the counter in the same struct proto as the master
+ * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
+ * is the same as sk->sk_prot->socks, as this field was copied
+ * with memcpy).
+ *
+ * This _changes_ the previous behaviour, where
+ * tcp_create_openreq_child always was incrementing the
+ * equivalent to tcp_prot->socks (inet_sock_nr), so this have
+ * to be taken into account in all callers. -acme
+ */
+ sk_refcnt_debug_inc(newsk);
+ sk_set_socket(newsk, NULL);
+ newsk->sk_sleep = NULL;
+
+ if (newsk->sk_prot->sockets_allocated)
+ percpu_counter_inc(newsk->sk_prot->sockets_allocated);
+ }
+out:
+ return newsk;
+}
+
+EXPORT_SYMBOL_GPL(sk_clone);
+
+void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
+{
+ __sk_dst_set(sk, dst);
+ sk->sk_route_caps = dst->dev->features;
+ if (sk->sk_route_caps & NETIF_F_GSO)
+ sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
+ if (sk_can_gso(sk)) {
+ if (dst->header_len) {
+ sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
+ } else {
+ sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ sk->sk_gso_max_size = dst->dev->gso_max_size;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(sk_setup_caps);
+
+void __init sk_init(void)
+{
+ if (num_physpages <= 4096) {
+ sysctl_wmem_max = 32767;
+ sysctl_rmem_max = 32767;
+ sysctl_wmem_default = 32767;
+ sysctl_rmem_default = 32767;
+ } else if (num_physpages >= 131072) {
+ sysctl_wmem_max = 131071;
+ sysctl_rmem_max = 131071;
+ }
+}
+
+/*
+ * Simple resource managers for sockets.
+ */
+
+
+/*
+ * Write buffer destructor automatically called from kfree_skb.
+ */
+void sock_wfree(struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+
+ /* In case it might be waiting for more memory. */
+ atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
+ if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE))
+ sk->sk_write_space(sk);
+ sock_put(sk);
+}
+
+/*
+ * Read buffer destructor automatically called from kfree_skb.
+ */
+void sock_rfree(struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+
+ atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
+ sk_mem_uncharge(skb->sk, skb->truesize);
+}
+
+
+int sock_i_uid(struct sock *sk)
+{
+ int uid;
+
+ read_lock(&sk->sk_callback_lock);
+ uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
+ read_unlock(&sk->sk_callback_lock);
+ return uid;
+}
+
+unsigned long sock_i_ino(struct sock *sk)
+{
+ unsigned long ino;
+
+ read_lock(&sk->sk_callback_lock);
+ ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
+ read_unlock(&sk->sk_callback_lock);
+ return ino;
+}
+
+/*
+ * Allocate a skb from the socket's send buffer.
+ */
+struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
+ gfp_t priority)
+{
+ if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
+ struct sk_buff * skb = alloc_skb(size, priority);
+ if (skb) {
+ skb_set_owner_w(skb, sk);
+ return skb;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Allocate a skb from the socket's receive buffer.
+ */
+struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
+ gfp_t priority)
+{
+ if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
+ struct sk_buff *skb = alloc_skb(size, priority);
+ if (skb) {
+ skb_set_owner_r(skb, sk);
+ return skb;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Allocate a memory block from the socket's option memory buffer.
+ */
+void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
+{
+ if ((unsigned)size <= sysctl_optmem_max &&
+ atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
+ void *mem;
+ /* First do the add, to avoid the race if kmalloc
+ * might sleep.
+ */
+ atomic_add(size, &sk->sk_omem_alloc);
+ mem = kmalloc(size, priority);
+ if (mem)
+ return mem;
+ atomic_sub(size, &sk->sk_omem_alloc);
+ }
+ return NULL;
+}
+
+/*
+ * Free an option memory block.
+ */
+void sock_kfree_s(struct sock *sk, void *mem, int size)
+{
+ kfree(mem);
+ atomic_sub(size, &sk->sk_omem_alloc);
+}
+
+/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
+ I think, these locks should be removed for datagram sockets.
+ */
+static long sock_wait_for_wmem(struct sock * sk, long timeo)
+{
+ DEFINE_WAIT(wait);
+
+ clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ for (;;) {
+ if (!timeo)
+ break;
+ if (signal_pending(current))
+ break;
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
+ break;
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
+ break;
+ if (sk->sk_err)
+ break;
+ timeo = schedule_timeout(timeo);
+ }
+ finish_wait(sk->sk_sleep, &wait);
+ return timeo;
+}
+
+
+/*
+ * Generic send/receive buffer handlers
+ */
+
+static struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
+ unsigned long header_len,
+ unsigned long data_len,
+ int noblock, int *errcode)
+{
+ struct sk_buff *skb;
+ gfp_t gfp_mask;
+ long timeo;
+ int err;
+
+ gfp_mask = sk->sk_allocation;
+ if (gfp_mask & __GFP_WAIT)
+ gfp_mask |= __GFP_REPEAT;
+
+ timeo = sock_sndtimeo(sk, noblock);
+ while (1) {
+ err = sock_error(sk);
+ if (err != 0)
+ goto failure;
+
+ err = -EPIPE;
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
+ goto failure;
+
+ if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
+ skb = alloc_skb(header_len, gfp_mask);
+ if (skb) {
+ int npages;
+ int i;
+
+ /* No pages, we're done... */
+ if (!data_len)
+ break;
+
+ npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ skb->truesize += data_len;
+ skb_shinfo(skb)->nr_frags = npages;
+ for (i = 0; i < npages; i++) {
+ struct page *page;
+ skb_frag_t *frag;
+
+ page = alloc_pages(sk->sk_allocation, 0);
+ if (!page) {
+ err = -ENOBUFS;
+ skb_shinfo(skb)->nr_frags = i;
+ kfree_skb(skb);
+ goto failure;
+ }
+
+ frag = &skb_shinfo(skb)->frags[i];
+ frag->page = page;
+ frag->page_offset = 0;
+ frag->size = (data_len >= PAGE_SIZE ?
+ PAGE_SIZE :
+ data_len);
+ data_len -= PAGE_SIZE;
+ }
+
+ /* Full success... */
+ break;
+ }
+ err = -ENOBUFS;
+ goto failure;
+ }
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ err = -EAGAIN;
+ if (!timeo)
+ goto failure;
+ if (signal_pending(current))
+ goto interrupted;
+ timeo = sock_wait_for_wmem(sk, timeo);
+ }
+
+ skb_set_owner_w(skb, sk);
+ return skb;
+
+interrupted:
+ err = sock_intr_errno(timeo);
+failure:
+ *errcode = err;
+ return NULL;
+}
+
+struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
+ int noblock, int *errcode)
+{
+ return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
+}
+
+static void __lock_sock(struct sock *sk)
+{
+ DEFINE_WAIT(wait);
+
+ for (;;) {
+ prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
+ TASK_UNINTERRUPTIBLE);
+ spin_unlock_bh(&sk->sk_lock.slock);
+ schedule();
+ spin_lock_bh(&sk->sk_lock.slock);
+ if (!sock_owned_by_user(sk))
+ break;
+ }
+ finish_wait(&sk->sk_lock.wq, &wait);
+}
+
+static void __release_sock(struct sock *sk)
+{
+ struct sk_buff *skb = sk->sk_backlog.head;
+
+ do {
+ sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
+ bh_unlock_sock(sk);
+
+ do {
+ struct sk_buff *next = skb->next;
+
+ skb->next = NULL;
+ sk_backlog_rcv(sk, skb);
+
+ /*
+ * We are in process context here with softirqs
+ * disabled, use cond_resched_softirq() to preempt.
+ * This is safe to do because we've taken the backlog
+ * queue private:
+ */
+ cond_resched_softirq();
+
+ skb = next;
+ } while (skb != NULL);
+
+ bh_lock_sock(sk);
+ } while ((skb = sk->sk_backlog.head) != NULL);
+}
+
+/**
+ * sk_wait_data - wait for data to arrive at sk_receive_queue
+ * @sk: sock to wait on
+ * @timeo: for how long
+ *
+ * Now socket state including sk->sk_err is changed only under lock,
+ * hence we may omit checks after joining wait queue.
+ * We check receive queue before schedule() only as optimization;
+ * it is very likely that release_sock() added new data.
+ */
+int sk_wait_data(struct sock *sk, long *timeo)
+{
+ int rc;
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
+ clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ finish_wait(sk->sk_sleep, &wait);
+ return rc;
+}
+
+EXPORT_SYMBOL(sk_wait_data);
+
+/**
+ * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
+ * @sk: socket
+ * @size: memory size to allocate
+ * @kind: allocation type
+ *
+ * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
+ * rmem allocation. This function assumes that protocols which have
+ * memory_pressure use sk_wmem_queued as write buffer accounting.
+ */
+int __sk_mem_schedule(struct sock *sk, int size, int kind)
+{
+ struct proto *prot = sk->sk_prot;
+ int amt = sk_mem_pages(size);
+ int allocated;
+
+ sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
+ allocated = atomic_add_return(amt, prot->memory_allocated);
+
+ /* Under limit. */
+ if (allocated <= prot->sysctl_mem[0]) {
+ if (prot->memory_pressure && *prot->memory_pressure)
+ *prot->memory_pressure = 0;
+ return 1;
+ }
+
+ /* Under pressure. */
+ if (allocated > prot->sysctl_mem[1])
+ if (prot->enter_memory_pressure)
+ prot->enter_memory_pressure(sk);
+
+ /* Over hard limit. */
+ if (allocated > prot->sysctl_mem[2])
+ goto suppress_allocation;
+
+ /* guarantee minimum buffer size under pressure */
+ if (kind == SK_MEM_RECV) {
+ if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
+ return 1;
+ } else { /* SK_MEM_SEND */
+ if (sk->sk_type == SOCK_STREAM) {
+ if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
+ return 1;
+ } else if (atomic_read(&sk->sk_wmem_alloc) <
+ prot->sysctl_wmem[0])
+ return 1;
+ }
+
+ if (prot->memory_pressure) {
+ int alloc;
+
+ if (!*prot->memory_pressure)
+ return 1;
+ alloc = percpu_counter_read_positive(prot->sockets_allocated);
+ if (prot->sysctl_mem[2] > alloc *
+ sk_mem_pages(sk->sk_wmem_queued +
+ atomic_read(&sk->sk_rmem_alloc) +
+ sk->sk_forward_alloc))
+ return 1;
+ }
+
+suppress_allocation:
+
+ if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
+ sk_stream_moderate_sndbuf(sk);
+
+ /* Fail only if socket is _under_ its sndbuf.
+ * In this case we cannot block, so that we have to fail.
+ */
+ if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
+ return 1;
+ }
+
+ /* Alas. Undo changes. */
+ sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
+ atomic_sub(amt, prot->memory_allocated);
+ return 0;
+}
+
+EXPORT_SYMBOL(__sk_mem_schedule);
+
+/**
+ * __sk_reclaim - reclaim memory_allocated
+ * @sk: socket
+ */
+void __sk_mem_reclaim(struct sock *sk)
+{
+ struct proto *prot = sk->sk_prot;
+
+ atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
+ prot->memory_allocated);
+ sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
+
+ if (prot->memory_pressure && *prot->memory_pressure &&
+ (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0]))
+ *prot->memory_pressure = 0;
+}
+
+EXPORT_SYMBOL(__sk_mem_reclaim);
+
+
+/*
+ * Set of default routines for initialising struct proto_ops when
+ * the protocol does not support a particular function. In certain
+ * cases where it makes no sense for a protocol to have a "do nothing"
+ * function, some default processing is provided.
+ */
+
+int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
+{
+ return -EOPNOTSUPP;
+}
+
+int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
+ int len, int flags)
+{
+ return -EOPNOTSUPP;
+}
+
+int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
+{
+ return -EOPNOTSUPP;
+}
+
+int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
+{
+ return -EOPNOTSUPP;
+}
+
+int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
+ int *len, int peer)
+{
+ return -EOPNOTSUPP;
+}
+
+unsigned int sock_no_poll(struct file * file, struct socket *sock, poll_table *pt)
+{
+ return 0;
+}
+
+int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ return -EOPNOTSUPP;
+}
+
+int sock_no_listen(struct socket *sock, int backlog)
+{
+ return -EOPNOTSUPP;
+}
+
+int sock_no_shutdown(struct socket *sock, int how)
+{
+ return -EOPNOTSUPP;
+}
+
+int sock_no_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int optlen)
+{
+ return -EOPNOTSUPP;
+}
+
+int sock_no_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ return -EOPNOTSUPP;
+}
+
+int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
+ size_t len)
+{
+ return -EOPNOTSUPP;
+}
+
+int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
+ size_t len, int flags)
+{
+ return -EOPNOTSUPP;
+}
+
+int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
+{
+ /* Mirror missing mmap method error code */
+ return -ENODEV;
+}
+
+ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
+{
+ ssize_t res;
+ struct msghdr msg = {.msg_flags = flags};
+ struct kvec iov;
+ char *kaddr = kmap(page);
+ iov.iov_base = kaddr + offset;
+ iov.iov_len = size;
+ res = kernel_sendmsg(sock, &msg, &iov, 1, size);
+ kunmap(page);
+ return res;
+}
+
+/*
+ * Default Socket Callbacks
+ */
+
+static void sock_def_wakeup(struct sock *sk)
+{
+ read_lock(&sk->sk_callback_lock);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible_all(sk->sk_sleep);
+ read_unlock(&sk->sk_callback_lock);
+}
+
+static void sock_def_error_report(struct sock *sk)
+{
+ read_lock(&sk->sk_callback_lock);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
+ sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
+ read_unlock(&sk->sk_callback_lock);
+}
+
+static void sock_def_readable(struct sock *sk, int len)
+{
+ read_lock(&sk->sk_callback_lock);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible_sync(sk->sk_sleep);
+ sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+ read_unlock(&sk->sk_callback_lock);
+}
+
+static void sock_def_write_space(struct sock *sk)
+{
+ read_lock(&sk->sk_callback_lock);
+
+ /* Do not wake up a writer until he can make "significant"
+ * progress. --DaveM
+ */
+ if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible_sync(sk->sk_sleep);
+
+ /* Should agree with poll, otherwise some programs break */
+ if (sock_writeable(sk))
+ sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ }
+
+ read_unlock(&sk->sk_callback_lock);
+}
+
+static void sock_def_destruct(struct sock *sk)
+{
+ kfree(sk->sk_protinfo);
+}
+
+void sk_send_sigurg(struct sock *sk)
+{
+ if (sk->sk_socket && sk->sk_socket->file)
+ if (send_sigurg(&sk->sk_socket->file->f_owner))
+ sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
+}
+
+void sk_reset_timer(struct sock *sk, struct timer_list* timer,
+ unsigned long expires)
+{
+ if (!mod_timer(timer, expires))
+ sock_hold(sk);
+}
+
+EXPORT_SYMBOL(sk_reset_timer);
+
+void sk_stop_timer(struct sock *sk, struct timer_list* timer)
+{
+ if (timer_pending(timer) && del_timer(timer))
+ __sock_put(sk);
+}
+
+EXPORT_SYMBOL(sk_stop_timer);
+
+void sock_init_data(struct socket *sock, struct sock *sk)
+{
+ skb_queue_head_init(&sk->sk_receive_queue);
+ skb_queue_head_init(&sk->sk_write_queue);
+ skb_queue_head_init(&sk->sk_error_queue);
+#ifdef CONFIG_NET_DMA
+ skb_queue_head_init(&sk->sk_async_wait_queue);
+#endif
+
+ sk->sk_send_head = NULL;
+
+ init_timer(&sk->sk_timer);
+
+ sk->sk_allocation = GFP_KERNEL;
+ sk->sk_rcvbuf = sysctl_rmem_default;
+ sk->sk_sndbuf = sysctl_wmem_default;
+ sk->sk_state = TCP_CLOSE;
+ sk_set_socket(sk, sock);
+
+ sock_set_flag(sk, SOCK_ZAPPED);
+
+ if (sock) {
+ sk->sk_type = sock->type;
+ sk->sk_sleep = &sock->wait;
+ sock->sk = sk;
+ } else
+ sk->sk_sleep = NULL;
+
+ rwlock_init(&sk->sk_dst_lock);
+ rwlock_init(&sk->sk_callback_lock);
+ lockdep_set_class_and_name(&sk->sk_callback_lock,
+ af_callback_keys + sk->sk_family,
+ af_family_clock_key_strings[sk->sk_family]);
+
+ sk->sk_state_change = sock_def_wakeup;
+ sk->sk_data_ready = sock_def_readable;
+ sk->sk_write_space = sock_def_write_space;
+ sk->sk_error_report = sock_def_error_report;
+ sk->sk_destruct = sock_def_destruct;
+
+ sk->sk_sndmsg_page = NULL;
+ sk->sk_sndmsg_off = 0;
+
+ sk->sk_peercred.pid = 0;
+ sk->sk_peercred.uid = -1;
+ sk->sk_peercred.gid = -1;
+ sk->sk_write_pending = 0;
+ sk->sk_rcvlowat = 1;
+ sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+ sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
+
+ sk->sk_stamp = ktime_set(-1L, 0);
+
+ atomic_set(&sk->sk_refcnt, 1);
+ atomic_set(&sk->sk_drops, 0);
+}
+
+void lock_sock_nested(struct sock *sk, int subclass)
+{
+ might_sleep();
+ spin_lock_bh(&sk->sk_lock.slock);
+ if (sk->sk_lock.owned)
+ __lock_sock(sk);
+ sk->sk_lock.owned = 1;
+ spin_unlock(&sk->sk_lock.slock);
+ /*
+ * The sk_lock has mutex_lock() semantics here:
+ */
+ mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
+ local_bh_enable();
+}
+
+EXPORT_SYMBOL(lock_sock_nested);
+
+void release_sock(struct sock *sk)
+{
+ /*
+ * The sk_lock has mutex_unlock() semantics:
+ */
+ mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
+
+ spin_lock_bh(&sk->sk_lock.slock);
+ if (sk->sk_backlog.tail)
+ __release_sock(sk);
+ sk->sk_lock.owned = 0;
+ if (waitqueue_active(&sk->sk_lock.wq))
+ wake_up(&sk->sk_lock.wq);
+ spin_unlock_bh(&sk->sk_lock.slock);
+}
+EXPORT_SYMBOL(release_sock);
+
+int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
+{
+ struct timeval tv;
+ if (!sock_flag(sk, SOCK_TIMESTAMP))
+ sock_enable_timestamp(sk);
+ tv = ktime_to_timeval(sk->sk_stamp);
+ if (tv.tv_sec == -1)
+ return -ENOENT;
+ if (tv.tv_sec == 0) {
+ sk->sk_stamp = ktime_get_real();
+ tv = ktime_to_timeval(sk->sk_stamp);
+ }
+ return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
+}
+EXPORT_SYMBOL(sock_get_timestamp);
+
+int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
+{
+ struct timespec ts;
+ if (!sock_flag(sk, SOCK_TIMESTAMP))
+ sock_enable_timestamp(sk);
+ ts = ktime_to_timespec(sk->sk_stamp);
+ if (ts.tv_sec == -1)
+ return -ENOENT;
+ if (ts.tv_sec == 0) {
+ sk->sk_stamp = ktime_get_real();
+ ts = ktime_to_timespec(sk->sk_stamp);
+ }
+ return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
+}
+EXPORT_SYMBOL(sock_get_timestampns);
+
+void sock_enable_timestamp(struct sock *sk)
+{
+ if (!sock_flag(sk, SOCK_TIMESTAMP)) {
+ sock_set_flag(sk, SOCK_TIMESTAMP);
+ net_enable_timestamp();
+ }
+}
+
+/*
+ * Get a socket option on an socket.
+ *
+ * FIX: POSIX 1003.1g is very ambiguous here. It states that
+ * asynchronous errors should be reported by getsockopt. We assume
+ * this means if you specify SO_ERROR (otherwise whats the point of it).
+ */
+int sock_common_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+
+ return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
+}
+
+EXPORT_SYMBOL(sock_common_getsockopt);
+
+#ifdef CONFIG_COMPAT
+int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+
+ if (sk->sk_prot->compat_getsockopt != NULL)
+ return sk->sk_prot->compat_getsockopt(sk, level, optname,
+ optval, optlen);
+ return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
+}
+EXPORT_SYMBOL(compat_sock_common_getsockopt);
+#endif
+
+int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t size, int flags)
+{
+ struct sock *sk = sock->sk;
+ int addr_len = 0;
+ int err;
+
+ err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
+ flags & ~MSG_DONTWAIT, &addr_len);
+ if (err >= 0)
+ msg->msg_namelen = addr_len;
+ return err;
+}
+
+EXPORT_SYMBOL(sock_common_recvmsg);
+
+/*
+ * Set socket options on an inet socket.
+ */
+int sock_common_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int optlen)
+{
+ struct sock *sk = sock->sk;
+
+ return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
+}
+
+EXPORT_SYMBOL(sock_common_setsockopt);
+
+#ifdef CONFIG_COMPAT
+int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int optlen)
+{
+ struct sock *sk = sock->sk;
+
+ if (sk->sk_prot->compat_setsockopt != NULL)
+ return sk->sk_prot->compat_setsockopt(sk, level, optname,
+ optval, optlen);
+ return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
+}
+EXPORT_SYMBOL(compat_sock_common_setsockopt);
+#endif
+
+void sk_common_release(struct sock *sk)
+{
+ if (sk->sk_prot->destroy)
+ sk->sk_prot->destroy(sk);
+
+ /*
+ * Observation: when sock_common_release is called, processes have
+ * no access to socket. But net still has.
+ * Step one, detach it from networking:
+ *
+ * A. Remove from hash tables.
+ */
+
+ sk->sk_prot->unhash(sk);
+
+ /*
+ * In this point socket cannot receive new packets, but it is possible
+ * that some packets are in flight because some CPU runs receiver and
+ * did hash table lookup before we unhashed socket. They will achieve
+ * receive queue and will be purged by socket destructor.
+ *
+ * Also we still have packets pending on receive queue and probably,
+ * our own packets waiting in device queues. sock_destroy will drain
+ * receive queue, but transmitted packets will delay socket destruction
+ * until the last reference will be released.
+ */
+
+ sock_orphan(sk);
+
+ xfrm_sk_free_policy(sk);
+
+ sk_refcnt_debug_release(sk);
+ sock_put(sk);
+}
+
+EXPORT_SYMBOL(sk_common_release);
+
+static DEFINE_RWLOCK(proto_list_lock);
+static LIST_HEAD(proto_list);
+
+#ifdef CONFIG_PROC_FS
+#define PROTO_INUSE_NR 64 /* should be enough for the first time */
+struct prot_inuse {
+ int val[PROTO_INUSE_NR];
+};
+
+static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
+
+#ifdef CONFIG_NET_NS
+void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
+{
+ int cpu = smp_processor_id();
+ per_cpu_ptr(net->core.inuse, cpu)->val[prot->inuse_idx] += val;
+}
+EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
+
+int sock_prot_inuse_get(struct net *net, struct proto *prot)
+{
+ int cpu, idx = prot->inuse_idx;
+ int res = 0;
+
+ for_each_possible_cpu(cpu)
+ res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
+
+ return res >= 0 ? res : 0;
+}
+EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
+
+static int sock_inuse_init_net(struct net *net)
+{
+ net->core.inuse = alloc_percpu(struct prot_inuse);
+ return net->core.inuse ? 0 : -ENOMEM;
+}
+
+static void sock_inuse_exit_net(struct net *net)
+{
+ free_percpu(net->core.inuse);
+}
+
+static struct pernet_operations net_inuse_ops = {
+ .init = sock_inuse_init_net,
+ .exit = sock_inuse_exit_net,
+};
+
+static __init int net_inuse_init(void)
+{
+ if (register_pernet_subsys(&net_inuse_ops))
+ panic("Cannot initialize net inuse counters");
+
+ return 0;
+}
+
+core_initcall(net_inuse_init);
+#else
+static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
+
+void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
+{
+ __get_cpu_var(prot_inuse).val[prot->inuse_idx] += val;
+}
+EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
+
+int sock_prot_inuse_get(struct net *net, struct proto *prot)
+{
+ int cpu, idx = prot->inuse_idx;
+ int res = 0;
+
+ for_each_possible_cpu(cpu)
+ res += per_cpu(prot_inuse, cpu).val[idx];
+
+ return res >= 0 ? res : 0;
+}
+EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
+#endif
+
+static void assign_proto_idx(struct proto *prot)
+{
+ prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
+
+ if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
+ printk(KERN_ERR "PROTO_INUSE_NR exhausted\n");
+ return;
+ }
+
+ set_bit(prot->inuse_idx, proto_inuse_idx);
+}
+
+static void release_proto_idx(struct proto *prot)
+{
+ if (prot->inuse_idx != PROTO_INUSE_NR - 1)
+ clear_bit(prot->inuse_idx, proto_inuse_idx);
+}
+#else
+static inline void assign_proto_idx(struct proto *prot)
+{
+}
+
+static inline void release_proto_idx(struct proto *prot)
+{
+}
+#endif
+
+int proto_register(struct proto *prot, int alloc_slab)
+{
+ if (alloc_slab) {
+ prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
+ SLAB_HWCACHE_ALIGN | prot->slab_flags,
+ NULL);
+
+ if (prot->slab == NULL) {
+ printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
+ prot->name);
+ goto out;
+ }
+
+ if (prot->rsk_prot != NULL) {
+ static const char mask[] = "request_sock_%s";
+
+ prot->rsk_prot->slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
+ if (prot->rsk_prot->slab_name == NULL)
+ goto out_free_sock_slab;
+
+ sprintf(prot->rsk_prot->slab_name, mask, prot->name);
+ prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
+ prot->rsk_prot->obj_size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+
+ if (prot->rsk_prot->slab == NULL) {
+ printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
+ prot->name);
+ goto out_free_request_sock_slab_name;
+ }
+ }
+
+ if (prot->twsk_prot != NULL) {
+ static const char mask[] = "tw_sock_%s";
+
+ prot->twsk_prot->twsk_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
+
+ if (prot->twsk_prot->twsk_slab_name == NULL)
+ goto out_free_request_sock_slab;
+
+ sprintf(prot->twsk_prot->twsk_slab_name, mask, prot->name);
+ prot->twsk_prot->twsk_slab =
+ kmem_cache_create(prot->twsk_prot->twsk_slab_name,
+ prot->twsk_prot->twsk_obj_size,
+ 0,
+ SLAB_HWCACHE_ALIGN |
+ prot->slab_flags,
+ NULL);
+ if (prot->twsk_prot->twsk_slab == NULL)
+ goto out_free_timewait_sock_slab_name;
+ }
+ }
+
+ write_lock(&proto_list_lock);
+ list_add(&prot->node, &proto_list);
+ assign_proto_idx(prot);
+ write_unlock(&proto_list_lock);
+ return 0;
+
+out_free_timewait_sock_slab_name:
+ kfree(prot->twsk_prot->twsk_slab_name);
+out_free_request_sock_slab:
+ if (prot->rsk_prot && prot->rsk_prot->slab) {
+ kmem_cache_destroy(prot->rsk_prot->slab);
+ prot->rsk_prot->slab = NULL;
+ }
+out_free_request_sock_slab_name:
+ kfree(prot->rsk_prot->slab_name);
+out_free_sock_slab:
+ kmem_cache_destroy(prot->slab);
+ prot->slab = NULL;
+out:
+ return -ENOBUFS;
+}
+
+EXPORT_SYMBOL(proto_register);
+
+void proto_unregister(struct proto *prot)
+{
+ write_lock(&proto_list_lock);
+ release_proto_idx(prot);
+ list_del(&prot->node);
+ write_unlock(&proto_list_lock);
+
+ if (prot->slab != NULL) {
+ kmem_cache_destroy(prot->slab);
+ prot->slab = NULL;
+ }
+
+ if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
+ kmem_cache_destroy(prot->rsk_prot->slab);
+ kfree(prot->rsk_prot->slab_name);
+ prot->rsk_prot->slab = NULL;
+ }
+
+ if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
+ kmem_cache_destroy(prot->twsk_prot->twsk_slab);
+ kfree(prot->twsk_prot->twsk_slab_name);
+ prot->twsk_prot->twsk_slab = NULL;
+ }
+}
+
+EXPORT_SYMBOL(proto_unregister);
+
+#ifdef CONFIG_PROC_FS
+static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(proto_list_lock)
+{
+ read_lock(&proto_list_lock);
+ return seq_list_start_head(&proto_list, *pos);
+}
+
+static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ return seq_list_next(v, &proto_list, pos);
+}
+
+static void proto_seq_stop(struct seq_file *seq, void *v)
+ __releases(proto_list_lock)
+{
+ read_unlock(&proto_list_lock);
+}
+
+static char proto_method_implemented(const void *method)
+{
+ return method == NULL ? 'n' : 'y';
+}
+
+static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
+{
+ seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s "
+ "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
+ proto->name,
+ proto->obj_size,
+ sock_prot_inuse_get(seq_file_net(seq), proto),
+ proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1,
+ proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
+ proto->max_header,
+ proto->slab == NULL ? "no" : "yes",
+ module_name(proto->owner),
+ proto_method_implemented(proto->close),
+ proto_method_implemented(proto->connect),
+ proto_method_implemented(proto->disconnect),
+ proto_method_implemented(proto->accept),
+ proto_method_implemented(proto->ioctl),
+ proto_method_implemented(proto->init),
+ proto_method_implemented(proto->destroy),
+ proto_method_implemented(proto->shutdown),
+ proto_method_implemented(proto->setsockopt),
+ proto_method_implemented(proto->getsockopt),
+ proto_method_implemented(proto->sendmsg),
+ proto_method_implemented(proto->recvmsg),
+ proto_method_implemented(proto->sendpage),
+ proto_method_implemented(proto->bind),
+ proto_method_implemented(proto->backlog_rcv),
+ proto_method_implemented(proto->hash),
+ proto_method_implemented(proto->unhash),
+ proto_method_implemented(proto->get_port),
+ proto_method_implemented(proto->enter_memory_pressure));
+}
+
+static int proto_seq_show(struct seq_file *seq, void *v)
+{
+ if (v == &proto_list)
+ seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
+ "protocol",
+ "size",
+ "sockets",
+ "memory",
+ "press",
+ "maxhdr",
+ "slab",
+ "module",
+ "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
+ else
+ proto_seq_printf(seq, list_entry(v, struct proto, node));
+ return 0;
+}
+
+static const struct seq_operations proto_seq_ops = {
+ .start = proto_seq_start,
+ .next = proto_seq_next,
+ .stop = proto_seq_stop,
+ .show = proto_seq_show,
+};
+
+static int proto_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open_net(inode, file, &proto_seq_ops,
+ sizeof(struct seq_net_private));
+}
+
+static const struct file_operations proto_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = proto_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_net,
+};
+
+static __net_init int proto_init_net(struct net *net)
+{
+ if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static __net_exit void proto_exit_net(struct net *net)
+{
+ proc_net_remove(net, "protocols");
+}
+
+
+static __net_initdata struct pernet_operations proto_net_ops = {
+ .init = proto_init_net,
+ .exit = proto_exit_net,
+};
+
+static int __init proto_init(void)
+{
+ return register_pernet_subsys(&proto_net_ops);
+}
+
+subsys_initcall(proto_init);
+
+#endif /* PROC_FS */
+
+EXPORT_SYMBOL(sk_alloc);
+EXPORT_SYMBOL(sk_free);
+EXPORT_SYMBOL(sk_send_sigurg);
+EXPORT_SYMBOL(sock_alloc_send_skb);
+EXPORT_SYMBOL(sock_init_data);
+EXPORT_SYMBOL(sock_kfree_s);
+EXPORT_SYMBOL(sock_kmalloc);
+EXPORT_SYMBOL(sock_no_accept);
+EXPORT_SYMBOL(sock_no_bind);
+EXPORT_SYMBOL(sock_no_connect);
+EXPORT_SYMBOL(sock_no_getname);
+EXPORT_SYMBOL(sock_no_getsockopt);
+EXPORT_SYMBOL(sock_no_ioctl);
+EXPORT_SYMBOL(sock_no_listen);
+EXPORT_SYMBOL(sock_no_mmap);
+EXPORT_SYMBOL(sock_no_poll);
+EXPORT_SYMBOL(sock_no_recvmsg);
+EXPORT_SYMBOL(sock_no_sendmsg);
+EXPORT_SYMBOL(sock_no_sendpage);
+EXPORT_SYMBOL(sock_no_setsockopt);
+EXPORT_SYMBOL(sock_no_shutdown);
+EXPORT_SYMBOL(sock_no_socketpair);
+EXPORT_SYMBOL(sock_rfree);
+EXPORT_SYMBOL(sock_setsockopt);
+EXPORT_SYMBOL(sock_wfree);
+EXPORT_SYMBOL(sock_wmalloc);
+EXPORT_SYMBOL(sock_i_uid);
+EXPORT_SYMBOL(sock_i_ino);
+EXPORT_SYMBOL(sysctl_optmem_max);
diff --git a/libdde_linux26/contrib/net/core/dev.c b/libdde_linux26/contrib/net/core/dev.c
new file mode 100644
index 00000000..e3fe5c70
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/dev.c
@@ -0,0 +1,5277 @@
+/*
+ * NET3 Protocol independent device support routines.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Derived from the non IP parts of dev.c 1.0.19
+ * Authors: Ross Biro
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Mark Evans, <evansmp@uhura.aston.ac.uk>
+ *
+ * Additional Authors:
+ * Florian la Roche <rzsfl@rz.uni-sb.de>
+ * Alan Cox <gw4pts@gw4pts.ampr.org>
+ * David Hinds <dahinds@users.sourceforge.net>
+ * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
+ * Adam Sulmicki <adam@cfar.umd.edu>
+ * Pekka Riikonen <priikone@poesidon.pspt.fi>
+ *
+ * Changes:
+ * D.J. Barrow : Fixed bug where dev->refcnt gets set
+ * to 2 if register_netdev gets called
+ * before net_dev_init & also removed a
+ * few lines of code in the process.
+ * Alan Cox : device private ioctl copies fields back.
+ * Alan Cox : Transmit queue code does relevant
+ * stunts to keep the queue safe.
+ * Alan Cox : Fixed double lock.
+ * Alan Cox : Fixed promisc NULL pointer trap
+ * ???????? : Support the full private ioctl range
+ * Alan Cox : Moved ioctl permission check into
+ * drivers
+ * Tim Kordas : SIOCADDMULTI/SIOCDELMULTI
+ * Alan Cox : 100 backlog just doesn't cut it when
+ * you start doing multicast video 8)
+ * Alan Cox : Rewrote net_bh and list manager.
+ * Alan Cox : Fix ETH_P_ALL echoback lengths.
+ * Alan Cox : Took out transmit every packet pass
+ * Saved a few bytes in the ioctl handler
+ * Alan Cox : Network driver sets packet type before
+ * calling netif_rx. Saves a function
+ * call a packet.
+ * Alan Cox : Hashed net_bh()
+ * Richard Kooijman: Timestamp fixes.
+ * Alan Cox : Wrong field in SIOCGIFDSTADDR
+ * Alan Cox : Device lock protection.
+ * Alan Cox : Fixed nasty side effect of device close
+ * changes.
+ * Rudi Cilibrasi : Pass the right thing to
+ * set_mac_address()
+ * Dave Miller : 32bit quantity for the device lock to
+ * make it work out on a Sparc.
+ * Bjorn Ekwall : Added KERNELD hack.
+ * Alan Cox : Cleaned up the backlog initialise.
+ * Craig Metz : SIOCGIFCONF fix if space for under
+ * 1 device.
+ * Thomas Bogendoerfer : Return ENODEV for dev_open, if there
+ * is no device open function.
+ * Andi Kleen : Fix error reporting for SIOCGIFCONF
+ * Michael Chastain : Fix signed/unsigned for SIOCGIFCONF
+ * Cyrus Durgin : Cleaned for KMOD
+ * Adam Sulmicki : Bug Fix : Network Device Unload
+ * A network device unload needs to purge
+ * the backlog queue.
+ * Paul Rusty Russell : SIOCSIFNAME
+ * Pekka Riikonen : Netdev boot-time settings code
+ * Andrew Morton : Make unregister_netdevice wait
+ * indefinitely on dev->refcnt
+ * J Hadi Salim : - Backlog queue sampling
+ * - netif_rx() feedback
+ */
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <linux/bitops.h>
+#include <linux/capability.h>
+#include <linux/cpu.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mutex.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/notifier.h>
+#include <linux/skbuff.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+#include <linux/rtnetlink.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/stat.h>
+#include <linux/if_bridge.h>
+#include <linux/if_macvlan.h>
+#include <net/dst.h>
+#include <net/pkt_sched.h>
+#include <net/checksum.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/kmod.h>
+#include <linux/module.h>
+#include <linux/netpoll.h>
+#include <linux/rcupdate.h>
+#include <linux/delay.h>
+#include <net/wext.h>
+#include <net/iw_handler.h>
+#include <asm/current.h>
+#include <linux/audit.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/if_arp.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <linux/in.h>
+#include <linux/jhash.h>
+#include <linux/random.h>
+
+#include "net-sysfs.h"
+
+/* Instead of increasing this, you should create a hash table. */
+#define MAX_GRO_SKBS 8
+
+/* This should be increased if a protocol with a bigger head is added. */
+#define GRO_MAX_HEAD (MAX_HEADER + 128)
+
+/*
+ * The list of packet types we will receive (as opposed to discard)
+ * and the routines to invoke.
+ *
+ * Why 16. Because with 16 the only overlap we get on a hash of the
+ * low nibble of the protocol value is RARP/SNAP/X.25.
+ *
+ * NOTE: That is no longer true with the addition of VLAN tags. Not
+ * sure which should go first, but I bet it won't make much
+ * difference if we are running VLANs. The good news is that
+ * this protocol won't be in the list unless compiled in, so
+ * the average user (w/out VLANs) will not be adversely affected.
+ * --BLG
+ *
+ * 0800 IP
+ * 8100 802.1Q VLAN
+ * 0001 802.3
+ * 0002 AX.25
+ * 0004 802.2
+ * 8035 RARP
+ * 0005 SNAP
+ * 0805 X.25
+ * 0806 ARP
+ * 8137 IPX
+ * 0009 Localtalk
+ * 86DD IPv6
+ */
+
+#define PTYPE_HASH_SIZE (16)
+#define PTYPE_HASH_MASK (PTYPE_HASH_SIZE - 1)
+
+static DEFINE_SPINLOCK(ptype_lock);
+static struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
+static struct list_head ptype_all __read_mostly; /* Taps */
+
+/*
+ * The @dev_base_head list is protected by @dev_base_lock and the rtnl
+ * semaphore.
+ *
+ * Pure readers hold dev_base_lock for reading.
+ *
+ * Writers must hold the rtnl semaphore while they loop through the
+ * dev_base_head list, and hold dev_base_lock for writing when they do the
+ * actual updates. This allows pure readers to access the list even
+ * while a writer is preparing to update it.
+ *
+ * To put it another way, dev_base_lock is held for writing only to
+ * protect against pure readers; the rtnl semaphore provides the
+ * protection against other writers.
+ *
+ * See, for example usages, register_netdevice() and
+ * unregister_netdevice(), which must be called with the rtnl
+ * semaphore held.
+ */
+DEFINE_RWLOCK(dev_base_lock);
+
+EXPORT_SYMBOL(dev_base_lock);
+
+#define NETDEV_HASHBITS 8
+#define NETDEV_HASHENTRIES (1 << NETDEV_HASHBITS)
+
+static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
+{
+ unsigned hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
+ return &net->dev_name_head[hash & ((1 << NETDEV_HASHBITS) - 1)];
+}
+
+static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
+{
+ return &net->dev_index_head[ifindex & ((1 << NETDEV_HASHBITS) - 1)];
+}
+
+/* Device list insertion */
+static int list_netdevice(struct net_device *dev)
+{
+ struct net *net = dev_net(dev);
+
+ ASSERT_RTNL();
+
+ write_lock_bh(&dev_base_lock);
+ list_add_tail(&dev->dev_list, &net->dev_base_head);
+ hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
+ hlist_add_head(&dev->index_hlist, dev_index_hash(net, dev->ifindex));
+ write_unlock_bh(&dev_base_lock);
+ return 0;
+}
+
+/* Device list removal */
+static void unlist_netdevice(struct net_device *dev)
+{
+ ASSERT_RTNL();
+
+ /* Unlink dev from the device chain */
+ write_lock_bh(&dev_base_lock);
+ list_del(&dev->dev_list);
+ hlist_del(&dev->name_hlist);
+ hlist_del(&dev->index_hlist);
+ write_unlock_bh(&dev_base_lock);
+}
+
+/*
+ * Our notifier list
+ */
+
+static RAW_NOTIFIER_HEAD(netdev_chain);
+
+/*
+ * Device drivers call our routines to queue packets here. We empty the
+ * queue in the local softnet handler.
+ */
+
+DEFINE_PER_CPU(struct softnet_data, softnet_data);
+
+#ifdef CONFIG_LOCKDEP
+/*
+ * register_netdevice() inits txq->_xmit_lock and sets lockdep class
+ * according to dev->type
+ */
+static const unsigned short netdev_lock_type[] =
+ {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
+ ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
+ ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
+ ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
+ ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
+ ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
+ ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
+ ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
+ ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
+ ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
+ ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
+ ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
+ ARPHRD_FCFABRIC, ARPHRD_IEEE802_TR, ARPHRD_IEEE80211,
+ ARPHRD_IEEE80211_PRISM, ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET,
+ ARPHRD_PHONET_PIPE, ARPHRD_VOID, ARPHRD_NONE};
+
+static const char *netdev_lock_name[] =
+ {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
+ "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
+ "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
+ "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
+ "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
+ "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
+ "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
+ "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
+ "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
+ "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
+ "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
+ "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
+ "_xmit_FCFABRIC", "_xmit_IEEE802_TR", "_xmit_IEEE80211",
+ "_xmit_IEEE80211_PRISM", "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET",
+ "_xmit_PHONET_PIPE", "_xmit_VOID", "_xmit_NONE"};
+
+static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
+static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
+
+static inline unsigned short netdev_lock_pos(unsigned short dev_type)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
+ if (netdev_lock_type[i] == dev_type)
+ return i;
+ /* the last key is used by default */
+ return ARRAY_SIZE(netdev_lock_type) - 1;
+}
+
+static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
+ unsigned short dev_type)
+{
+ int i;
+
+ i = netdev_lock_pos(dev_type);
+ lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
+ netdev_lock_name[i]);
+}
+
+static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
+{
+ int i;
+
+ i = netdev_lock_pos(dev->type);
+ lockdep_set_class_and_name(&dev->addr_list_lock,
+ &netdev_addr_lock_key[i],
+ netdev_lock_name[i]);
+}
+#else
+static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
+ unsigned short dev_type)
+{
+}
+static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
+{
+}
+#endif
+
+/*******************************************************************************
+
+ Protocol management and registration routines
+
+*******************************************************************************/
+
+/*
+ * Add a protocol ID to the list. Now that the input handler is
+ * smarter we can dispense with all the messy stuff that used to be
+ * here.
+ *
+ * BEWARE!!! Protocol handlers, mangling input packets,
+ * MUST BE last in hash buckets and checking protocol handlers
+ * MUST start from promiscuous ptype_all chain in net_bh.
+ * It is true now, do not change it.
+ * Explanation follows: if protocol handler, mangling packet, will
+ * be the first on list, it is not able to sense, that packet
+ * is cloned and should be copied-on-write, so that it will
+ * change it and subsequent readers will get broken packet.
+ * --ANK (980803)
+ */
+
+/**
+ * dev_add_pack - add packet handler
+ * @pt: packet type declaration
+ *
+ * Add a protocol handler to the networking stack. The passed &packet_type
+ * is linked into kernel lists and may not be freed until it has been
+ * removed from the kernel lists.
+ *
+ * This call does not sleep therefore it can not
+ * guarantee all CPU's that are in middle of receiving packets
+ * will see the new packet type (until the next received packet).
+ */
+
+void dev_add_pack(struct packet_type *pt)
+{
+ int hash;
+
+ spin_lock_bh(&ptype_lock);
+ if (pt->type == htons(ETH_P_ALL))
+ list_add_rcu(&pt->list, &ptype_all);
+ else {
+ hash = ntohs(pt->type) & PTYPE_HASH_MASK;
+ list_add_rcu(&pt->list, &ptype_base[hash]);
+ }
+ spin_unlock_bh(&ptype_lock);
+}
+
+/**
+ * __dev_remove_pack - remove packet handler
+ * @pt: packet type declaration
+ *
+ * Remove a protocol handler that was previously added to the kernel
+ * protocol handlers by dev_add_pack(). The passed &packet_type is removed
+ * from the kernel lists and can be freed or reused once this function
+ * returns.
+ *
+ * The packet type might still be in use by receivers
+ * and must not be freed until after all the CPU's have gone
+ * through a quiescent state.
+ */
+void __dev_remove_pack(struct packet_type *pt)
+{
+ struct list_head *head;
+ struct packet_type *pt1;
+
+ spin_lock_bh(&ptype_lock);
+
+ if (pt->type == htons(ETH_P_ALL))
+ head = &ptype_all;
+ else
+ head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
+
+ list_for_each_entry(pt1, head, list) {
+ if (pt == pt1) {
+ list_del_rcu(&pt->list);
+ goto out;
+ }
+ }
+
+ printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
+out:
+ spin_unlock_bh(&ptype_lock);
+}
+/**
+ * dev_remove_pack - remove packet handler
+ * @pt: packet type declaration
+ *
+ * Remove a protocol handler that was previously added to the kernel
+ * protocol handlers by dev_add_pack(). The passed &packet_type is removed
+ * from the kernel lists and can be freed or reused once this function
+ * returns.
+ *
+ * This call sleeps to guarantee that no CPU is looking at the packet
+ * type after return.
+ */
+void dev_remove_pack(struct packet_type *pt)
+{
+ __dev_remove_pack(pt);
+
+ synchronize_net();
+}
+
+/******************************************************************************
+
+ Device Boot-time Settings Routines
+
+*******************************************************************************/
+
+/* Boot time configuration table */
+static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
+
+/**
+ * netdev_boot_setup_add - add new setup entry
+ * @name: name of the device
+ * @map: configured settings for the device
+ *
+ * Adds new setup entry to the dev_boot_setup list. The function
+ * returns 0 on error and 1 on success. This is a generic routine to
+ * all netdevices.
+ */
+static int netdev_boot_setup_add(char *name, struct ifmap *map)
+{
+ struct netdev_boot_setup *s;
+ int i;
+
+ s = dev_boot_setup;
+ for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
+ if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
+ memset(s[i].name, 0, sizeof(s[i].name));
+ strlcpy(s[i].name, name, IFNAMSIZ);
+ memcpy(&s[i].map, map, sizeof(s[i].map));
+ break;
+ }
+ }
+
+ return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
+}
+
+/**
+ * netdev_boot_setup_check - check boot time settings
+ * @dev: the netdevice
+ *
+ * Check boot time settings for the device.
+ * The found settings are set for the device to be used
+ * later in the device probing.
+ * Returns 0 if no settings found, 1 if they are.
+ */
+int netdev_boot_setup_check(struct net_device *dev)
+{
+ struct netdev_boot_setup *s = dev_boot_setup;
+ int i;
+
+ for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
+ if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
+ !strcmp(dev->name, s[i].name)) {
+ dev->irq = s[i].map.irq;
+ dev->base_addr = s[i].map.base_addr;
+ dev->mem_start = s[i].map.mem_start;
+ dev->mem_end = s[i].map.mem_end;
+ return 1;
+ }
+ }
+ return 0;
+}
+
+
+/**
+ * netdev_boot_base - get address from boot time settings
+ * @prefix: prefix for network device
+ * @unit: id for network device
+ *
+ * Check boot time settings for the base address of device.
+ * The found settings are set for the device to be used
+ * later in the device probing.
+ * Returns 0 if no settings found.
+ */
+unsigned long netdev_boot_base(const char *prefix, int unit)
+{
+ const struct netdev_boot_setup *s = dev_boot_setup;
+ char name[IFNAMSIZ];
+ int i;
+
+ sprintf(name, "%s%d", prefix, unit);
+
+ /*
+ * If device already registered then return base of 1
+ * to indicate not to probe for this interface
+ */
+ if (__dev_get_by_name(&init_net, name))
+ return 1;
+
+ for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
+ if (!strcmp(name, s[i].name))
+ return s[i].map.base_addr;
+ return 0;
+}
+
+/*
+ * Saves at boot time configured settings for any netdevice.
+ */
+int __init netdev_boot_setup(char *str)
+{
+ int ints[5];
+ struct ifmap map;
+
+ str = get_options(str, ARRAY_SIZE(ints), ints);
+ if (!str || !*str)
+ return 0;
+
+ /* Save settings */
+ memset(&map, 0, sizeof(map));
+ if (ints[0] > 0)
+ map.irq = ints[1];
+ if (ints[0] > 1)
+ map.base_addr = ints[2];
+ if (ints[0] > 2)
+ map.mem_start = ints[3];
+ if (ints[0] > 3)
+ map.mem_end = ints[4];
+
+ /* Add new entry to the list */
+ return netdev_boot_setup_add(str, &map);
+}
+
+__setup("netdev=", netdev_boot_setup);
+
+/*******************************************************************************
+
+ Device Interface Subroutines
+
+*******************************************************************************/
+
+/**
+ * __dev_get_by_name - find a device by its name
+ * @net: the applicable net namespace
+ * @name: name to find
+ *
+ * Find an interface by name. Must be called under RTNL semaphore
+ * or @dev_base_lock. If the name is found a pointer to the device
+ * is returned. If the name is not found then %NULL is returned. The
+ * reference counters are not incremented so the caller must be
+ * careful with locks.
+ */
+
+struct net_device *__dev_get_by_name(struct net *net, const char *name)
+{
+ struct hlist_node *p;
+
+ hlist_for_each(p, dev_name_hash(net, name)) {
+ struct net_device *dev
+ = hlist_entry(p, struct net_device, name_hlist);
+ if (!strncmp(dev->name, name, IFNAMSIZ))
+ return dev;
+ }
+ return NULL;
+}
+
+/**
+ * dev_get_by_name - find a device by its name
+ * @net: the applicable net namespace
+ * @name: name to find
+ *
+ * Find an interface by name. This can be called from any
+ * context and does its own locking. The returned handle has
+ * the usage count incremented and the caller must use dev_put() to
+ * release it when it is no longer needed. %NULL is returned if no
+ * matching device is found.
+ */
+
+struct net_device *dev_get_by_name(struct net *net, const char *name)
+{
+ struct net_device *dev;
+
+ read_lock(&dev_base_lock);
+ dev = __dev_get_by_name(net, name);
+ if (dev)
+ dev_hold(dev);
+ read_unlock(&dev_base_lock);
+ return dev;
+}
+
+/**
+ * __dev_get_by_index - find a device by its ifindex
+ * @net: the applicable net namespace
+ * @ifindex: index of device
+ *
+ * Search for an interface by index. Returns %NULL if the device
+ * is not found or a pointer to the device. The device has not
+ * had its reference counter increased so the caller must be careful
+ * about locking. The caller must hold either the RTNL semaphore
+ * or @dev_base_lock.
+ */
+
+struct net_device *__dev_get_by_index(struct net *net, int ifindex)
+{
+ struct hlist_node *p;
+
+ hlist_for_each(p, dev_index_hash(net, ifindex)) {
+ struct net_device *dev
+ = hlist_entry(p, struct net_device, index_hlist);
+ if (dev->ifindex == ifindex)
+ return dev;
+ }
+ return NULL;
+}
+
+
+/**
+ * dev_get_by_index - find a device by its ifindex
+ * @net: the applicable net namespace
+ * @ifindex: index of device
+ *
+ * Search for an interface by index. Returns NULL if the device
+ * is not found or a pointer to the device. The device returned has
+ * had a reference added and the pointer is safe until the user calls
+ * dev_put to indicate they have finished with it.
+ */
+
+struct net_device *dev_get_by_index(struct net *net, int ifindex)
+{
+ struct net_device *dev;
+
+ read_lock(&dev_base_lock);
+ dev = __dev_get_by_index(net, ifindex);
+ if (dev)
+ dev_hold(dev);
+ read_unlock(&dev_base_lock);
+ return dev;
+}
+
+/**
+ * dev_getbyhwaddr - find a device by its hardware address
+ * @net: the applicable net namespace
+ * @type: media type of device
+ * @ha: hardware address
+ *
+ * Search for an interface by MAC address. Returns NULL if the device
+ * is not found or a pointer to the device. The caller must hold the
+ * rtnl semaphore. The returned device has not had its ref count increased
+ * and the caller must therefore be careful about locking
+ *
+ * BUGS:
+ * If the API was consistent this would be __dev_get_by_hwaddr
+ */
+
+struct net_device *dev_getbyhwaddr(struct net *net, unsigned short type, char *ha)
+{
+ struct net_device *dev;
+
+ ASSERT_RTNL();
+
+ for_each_netdev(net, dev)
+ if (dev->type == type &&
+ !memcmp(dev->dev_addr, ha, dev->addr_len))
+ return dev;
+
+ return NULL;
+}
+
+EXPORT_SYMBOL(dev_getbyhwaddr);
+
+struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
+{
+ struct net_device *dev;
+
+ ASSERT_RTNL();
+ for_each_netdev(net, dev)
+ if (dev->type == type)
+ return dev;
+
+ return NULL;
+}
+
+EXPORT_SYMBOL(__dev_getfirstbyhwtype);
+
+struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
+{
+ struct net_device *dev;
+
+ rtnl_lock();
+ dev = __dev_getfirstbyhwtype(net, type);
+ if (dev)
+ dev_hold(dev);
+ rtnl_unlock();
+ return dev;
+}
+
+EXPORT_SYMBOL(dev_getfirstbyhwtype);
+
+/**
+ * dev_get_by_flags - find any device with given flags
+ * @net: the applicable net namespace
+ * @if_flags: IFF_* values
+ * @mask: bitmask of bits in if_flags to check
+ *
+ * Search for any interface with the given flags. Returns NULL if a device
+ * is not found or a pointer to the device. The device returned has
+ * had a reference added and the pointer is safe until the user calls
+ * dev_put to indicate they have finished with it.
+ */
+
+struct net_device * dev_get_by_flags(struct net *net, unsigned short if_flags, unsigned short mask)
+{
+ struct net_device *dev, *ret;
+
+ ret = NULL;
+ read_lock(&dev_base_lock);
+ for_each_netdev(net, dev) {
+ if (((dev->flags ^ if_flags) & mask) == 0) {
+ dev_hold(dev);
+ ret = dev;
+ break;
+ }
+ }
+ read_unlock(&dev_base_lock);
+ return ret;
+}
+
+/**
+ * dev_valid_name - check if name is okay for network device
+ * @name: name string
+ *
+ * Network device names need to be valid file names to
+ * to allow sysfs to work. We also disallow any kind of
+ * whitespace.
+ */
+int dev_valid_name(const char *name)
+{
+ if (*name == '\0')
+ return 0;
+ if (strlen(name) >= IFNAMSIZ)
+ return 0;
+ if (!strcmp(name, ".") || !strcmp(name, ".."))
+ return 0;
+
+ while (*name) {
+ if (*name == '/' || isspace(*name))
+ return 0;
+ name++;
+ }
+ return 1;
+}
+
+/**
+ * __dev_alloc_name - allocate a name for a device
+ * @net: network namespace to allocate the device name in
+ * @name: name format string
+ * @buf: scratch buffer and result name string
+ *
+ * Passed a format string - eg "lt%d" it will try and find a suitable
+ * id. It scans list of devices to build up a free map, then chooses
+ * the first empty slot. The caller must hold the dev_base or rtnl lock
+ * while allocating the name and adding the device in order to avoid
+ * duplicates.
+ * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
+ * Returns the number of the unit assigned or a negative errno code.
+ */
+
+static int __dev_alloc_name(struct net *net, const char *name, char *buf)
+{
+ int i = 0;
+ const char *p;
+ const int max_netdevices = 8*PAGE_SIZE;
+ unsigned long *inuse;
+ struct net_device *d;
+
+ p = strnchr(name, IFNAMSIZ-1, '%');
+ if (p) {
+ /*
+ * Verify the string as this thing may have come from
+ * the user. There must be either one "%d" and no other "%"
+ * characters.
+ */
+ if (p[1] != 'd' || strchr(p + 2, '%'))
+ return -EINVAL;
+
+ /* Use one page as a bit array of possible slots */
+ inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
+ if (!inuse)
+ return -ENOMEM;
+
+ for_each_netdev(net, d) {
+ if (!sscanf(d->name, name, &i))
+ continue;
+ if (i < 0 || i >= max_netdevices)
+ continue;
+
+ /* avoid cases where sscanf is not exact inverse of printf */
+ snprintf(buf, IFNAMSIZ, name, i);
+ if (!strncmp(buf, d->name, IFNAMSIZ))
+ set_bit(i, inuse);
+ }
+
+ i = find_first_zero_bit(inuse, max_netdevices);
+ free_page((unsigned long) inuse);
+ }
+
+ snprintf(buf, IFNAMSIZ, name, i);
+ if (!__dev_get_by_name(net, buf))
+ return i;
+
+ /* It is possible to run out of possible slots
+ * when the name is long and there isn't enough space left
+ * for the digits, or if all bits are used.
+ */
+ return -ENFILE;
+}
+
+/**
+ * dev_alloc_name - allocate a name for a device
+ * @dev: device
+ * @name: name format string
+ *
+ * Passed a format string - eg "lt%d" it will try and find a suitable
+ * id. It scans list of devices to build up a free map, then chooses
+ * the first empty slot. The caller must hold the dev_base or rtnl lock
+ * while allocating the name and adding the device in order to avoid
+ * duplicates.
+ * Limited to bits_per_byte * page size devices (ie 32K on most platforms).
+ * Returns the number of the unit assigned or a negative errno code.
+ */
+
+int dev_alloc_name(struct net_device *dev, const char *name)
+{
+ char buf[IFNAMSIZ];
+ struct net *net;
+ int ret;
+
+ BUG_ON(!dev_net(dev));
+ net = dev_net(dev);
+ ret = __dev_alloc_name(net, name, buf);
+ if (ret >= 0)
+ strlcpy(dev->name, buf, IFNAMSIZ);
+ return ret;
+}
+
+
+/**
+ * dev_change_name - change name of a device
+ * @dev: device
+ * @newname: name (or format string) must be at least IFNAMSIZ
+ *
+ * Change name of a device, can pass format strings "eth%d".
+ * for wildcarding.
+ */
+int dev_change_name(struct net_device *dev, const char *newname)
+{
+ char oldname[IFNAMSIZ];
+ int err = 0;
+ int ret;
+ struct net *net;
+
+ ASSERT_RTNL();
+ BUG_ON(!dev_net(dev));
+
+ net = dev_net(dev);
+ if (dev->flags & IFF_UP)
+ return -EBUSY;
+
+ if (!dev_valid_name(newname))
+ return -EINVAL;
+
+ if (strncmp(newname, dev->name, IFNAMSIZ) == 0)
+ return 0;
+
+ memcpy(oldname, dev->name, IFNAMSIZ);
+
+ if (strchr(newname, '%')) {
+ err = dev_alloc_name(dev, newname);
+ if (err < 0)
+ return err;
+ }
+ else if (__dev_get_by_name(net, newname))
+ return -EEXIST;
+ else
+ strlcpy(dev->name, newname, IFNAMSIZ);
+
+rollback:
+ /* For now only devices in the initial network namespace
+ * are in sysfs.
+ */
+ if (net == &init_net) {
+ ret = device_rename(&dev->dev, dev->name);
+ if (ret) {
+ memcpy(dev->name, oldname, IFNAMSIZ);
+ return ret;
+ }
+ }
+
+ write_lock_bh(&dev_base_lock);
+ hlist_del(&dev->name_hlist);
+ hlist_add_head(&dev->name_hlist, dev_name_hash(net, dev->name));
+ write_unlock_bh(&dev_base_lock);
+
+ ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
+ ret = notifier_to_errno(ret);
+
+ if (ret) {
+ if (err) {
+ printk(KERN_ERR
+ "%s: name change rollback failed: %d.\n",
+ dev->name, ret);
+ } else {
+ err = ret;
+ memcpy(dev->name, oldname, IFNAMSIZ);
+ goto rollback;
+ }
+ }
+
+ return err;
+}
+
+/**
+ * dev_set_alias - change ifalias of a device
+ * @dev: device
+ * @alias: name up to IFALIASZ
+ * @len: limit of bytes to copy from info
+ *
+ * Set ifalias for a device,
+ */
+int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
+{
+ ASSERT_RTNL();
+
+ if (len >= IFALIASZ)
+ return -EINVAL;
+
+ if (!len) {
+ if (dev->ifalias) {
+ kfree(dev->ifalias);
+ dev->ifalias = NULL;
+ }
+ return 0;
+ }
+
+ dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
+ if (!dev->ifalias)
+ return -ENOMEM;
+
+ strlcpy(dev->ifalias, alias, len+1);
+ return len;
+}
+
+
+/**
+ * netdev_features_change - device changes features
+ * @dev: device to cause notification
+ *
+ * Called to indicate a device has changed features.
+ */
+void netdev_features_change(struct net_device *dev)
+{
+ call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
+}
+EXPORT_SYMBOL(netdev_features_change);
+
+/**
+ * netdev_state_change - device changes state
+ * @dev: device to cause notification
+ *
+ * Called to indicate a device has changed state. This function calls
+ * the notifier chains for netdev_chain and sends a NEWLINK message
+ * to the routing socket.
+ */
+void netdev_state_change(struct net_device *dev)
+{
+ if (dev->flags & IFF_UP) {
+ call_netdevice_notifiers(NETDEV_CHANGE, dev);
+ rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
+ }
+}
+
+void netdev_bonding_change(struct net_device *dev)
+{
+ call_netdevice_notifiers(NETDEV_BONDING_FAILOVER, dev);
+}
+EXPORT_SYMBOL(netdev_bonding_change);
+
+/**
+ * dev_load - load a network module
+ * @net: the applicable net namespace
+ * @name: name of interface
+ *
+ * If a network interface is not present and the process has suitable
+ * privileges this function loads the module. If module loading is not
+ * available in this kernel then it becomes a nop.
+ */
+
+void dev_load(struct net *net, const char *name)
+{
+ struct net_device *dev;
+
+ read_lock(&dev_base_lock);
+ dev = __dev_get_by_name(net, name);
+ read_unlock(&dev_base_lock);
+
+ if (!dev && capable(CAP_SYS_MODULE))
+ request_module("%s", name);
+}
+
+/**
+ * dev_open - prepare an interface for use.
+ * @dev: device to open
+ *
+ * Takes a device from down to up state. The device's private open
+ * function is invoked and then the multicast lists are loaded. Finally
+ * the device is moved into the up state and a %NETDEV_UP message is
+ * sent to the netdev notifier chain.
+ *
+ * Calling this function on an active interface is a nop. On a failure
+ * a negative errno code is returned.
+ */
+int dev_open(struct net_device *dev)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+ int ret = 0;
+
+ ASSERT_RTNL();
+
+ /*
+ * Is it already up?
+ */
+
+ if (dev->flags & IFF_UP)
+ return 0;
+
+ /*
+ * Is it even present?
+ */
+ if (!netif_device_present(dev))
+ return -ENODEV;
+
+ /*
+ * Call device private open method
+ */
+ set_bit(__LINK_STATE_START, &dev->state);
+
+ if (ops->ndo_validate_addr)
+ ret = ops->ndo_validate_addr(dev);
+
+ if (!ret && ops->ndo_open)
+ ret = ops->ndo_open(dev);
+
+ /*
+ * If it went open OK then:
+ */
+
+ if (ret)
+ clear_bit(__LINK_STATE_START, &dev->state);
+ else {
+ /*
+ * Set the flags.
+ */
+ dev->flags |= IFF_UP;
+
+ /*
+ * Enable NET_DMA
+ */
+ net_dmaengine_get();
+
+ /*
+ * Initialize multicasting status
+ */
+ dev_set_rx_mode(dev);
+
+ /*
+ * Wakeup transmit queue engine
+ */
+ dev_activate(dev);
+
+ /*
+ * ... and announce new interface.
+ */
+ call_netdevice_notifiers(NETDEV_UP, dev);
+ }
+
+ return ret;
+}
+
+/**
+ * dev_close - shutdown an interface.
+ * @dev: device to shutdown
+ *
+ * This function moves an active device into down state. A
+ * %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
+ * is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
+ * chain.
+ */
+int dev_close(struct net_device *dev)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+ ASSERT_RTNL();
+
+ might_sleep();
+
+ if (!(dev->flags & IFF_UP))
+ return 0;
+
+ /*
+ * Tell people we are going down, so that they can
+ * prepare to death, when device is still operating.
+ */
+ call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
+
+ clear_bit(__LINK_STATE_START, &dev->state);
+
+ /* Synchronize to scheduled poll. We cannot touch poll list,
+ * it can be even on different cpu. So just clear netif_running().
+ *
+ * dev->stop() will invoke napi_disable() on all of it's
+ * napi_struct instances on this device.
+ */
+ smp_mb__after_clear_bit(); /* Commit netif_running(). */
+
+ dev_deactivate(dev);
+
+ /*
+ * Call the device specific close. This cannot fail.
+ * Only if device is UP
+ *
+ * We allow it to be called even after a DETACH hot-plug
+ * event.
+ */
+ if (ops->ndo_stop)
+ ops->ndo_stop(dev);
+
+ /*
+ * Device is now down.
+ */
+
+ dev->flags &= ~IFF_UP;
+
+ /*
+ * Tell people we are down
+ */
+ call_netdevice_notifiers(NETDEV_DOWN, dev);
+
+ /*
+ * Shutdown NET_DMA
+ */
+ net_dmaengine_put();
+
+ return 0;
+}
+
+
+/**
+ * dev_disable_lro - disable Large Receive Offload on a device
+ * @dev: device
+ *
+ * Disable Large Receive Offload (LRO) on a net device. Must be
+ * called under RTNL. This is needed if received packets may be
+ * forwarded to another interface.
+ */
+void dev_disable_lro(struct net_device *dev)
+{
+ if (dev->ethtool_ops && dev->ethtool_ops->get_flags &&
+ dev->ethtool_ops->set_flags) {
+ u32 flags = dev->ethtool_ops->get_flags(dev);
+ if (flags & ETH_FLAG_LRO) {
+ flags &= ~ETH_FLAG_LRO;
+ dev->ethtool_ops->set_flags(dev, flags);
+ }
+ }
+ WARN_ON(dev->features & NETIF_F_LRO);
+}
+EXPORT_SYMBOL(dev_disable_lro);
+
+
+static int dev_boot_phase = 1;
+
+/*
+ * Device change register/unregister. These are not inline or static
+ * as we export them to the world.
+ */
+
+/**
+ * register_netdevice_notifier - register a network notifier block
+ * @nb: notifier
+ *
+ * Register a notifier to be called when network device events occur.
+ * The notifier passed is linked into the kernel structures and must
+ * not be reused until it has been unregistered. A negative errno code
+ * is returned on a failure.
+ *
+ * When registered all registration and up events are replayed
+ * to the new notifier to allow device to have a race free
+ * view of the network device list.
+ */
+
+int register_netdevice_notifier(struct notifier_block *nb)
+{
+ struct net_device *dev;
+ struct net_device *last;
+ struct net *net;
+ int err;
+
+ rtnl_lock();
+ err = raw_notifier_chain_register(&netdev_chain, nb);
+ if (err)
+ goto unlock;
+ if (dev_boot_phase)
+ goto unlock;
+ for_each_net(net) {
+ for_each_netdev(net, dev) {
+ err = nb->notifier_call(nb, NETDEV_REGISTER, dev);
+ err = notifier_to_errno(err);
+ if (err)
+ goto rollback;
+
+ if (!(dev->flags & IFF_UP))
+ continue;
+
+ nb->notifier_call(nb, NETDEV_UP, dev);
+ }
+ }
+
+unlock:
+ rtnl_unlock();
+ return err;
+
+rollback:
+ last = dev;
+ for_each_net(net) {
+ for_each_netdev(net, dev) {
+ if (dev == last)
+ break;
+
+ if (dev->flags & IFF_UP) {
+ nb->notifier_call(nb, NETDEV_GOING_DOWN, dev);
+ nb->notifier_call(nb, NETDEV_DOWN, dev);
+ }
+ nb->notifier_call(nb, NETDEV_UNREGISTER, dev);
+ }
+ }
+
+ raw_notifier_chain_unregister(&netdev_chain, nb);
+ goto unlock;
+}
+
+/**
+ * unregister_netdevice_notifier - unregister a network notifier block
+ * @nb: notifier
+ *
+ * Unregister a notifier previously registered by
+ * register_netdevice_notifier(). The notifier is unlinked into the
+ * kernel structures and may then be reused. A negative errno code
+ * is returned on a failure.
+ */
+
+int unregister_netdevice_notifier(struct notifier_block *nb)
+{
+ int err;
+
+ rtnl_lock();
+ err = raw_notifier_chain_unregister(&netdev_chain, nb);
+ rtnl_unlock();
+ return err;
+}
+
+/**
+ * call_netdevice_notifiers - call all network notifier blocks
+ * @val: value passed unmodified to notifier function
+ * @dev: net_device pointer passed unmodified to notifier function
+ *
+ * Call all network notifier blocks. Parameters and return value
+ * are as for raw_notifier_call_chain().
+ */
+
+int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
+{
+ return raw_notifier_call_chain(&netdev_chain, val, dev);
+}
+
+/* When > 0 there are consumers of rx skb time stamps */
+static atomic_t netstamp_needed = ATOMIC_INIT(0);
+
+void net_enable_timestamp(void)
+{
+ atomic_inc(&netstamp_needed);
+}
+
+void net_disable_timestamp(void)
+{
+ atomic_dec(&netstamp_needed);
+}
+
+static inline void net_timestamp(struct sk_buff *skb)
+{
+ if (atomic_read(&netstamp_needed))
+ __net_timestamp(skb);
+ else
+ skb->tstamp.tv64 = 0;
+}
+
+/*
+ * Support routine. Sends outgoing frames to any network
+ * taps currently in use.
+ */
+
+static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct packet_type *ptype;
+
+ net_timestamp(skb);
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ptype, &ptype_all, list) {
+ /* Never send packets back to the socket
+ * they originated from - MvS (miquels@drinkel.ow.org)
+ */
+ if ((ptype->dev == dev || !ptype->dev) &&
+ (ptype->af_packet_priv == NULL ||
+ (struct sock *)ptype->af_packet_priv != skb->sk)) {
+ struct sk_buff *skb2= skb_clone(skb, GFP_ATOMIC);
+ if (!skb2)
+ break;
+
+ /* skb->nh should be correctly
+ set by sender, so that the second statement is
+ just protection against buggy protocols.
+ */
+ skb_reset_mac_header(skb2);
+
+ if (skb_network_header(skb2) < skb2->data ||
+ skb2->network_header > skb2->tail) {
+ if (net_ratelimit())
+ printk(KERN_CRIT "protocol %04x is "
+ "buggy, dev %s\n",
+ skb2->protocol, dev->name);
+ skb_reset_network_header(skb2);
+ }
+
+ skb2->transport_header = skb2->network_header;
+ skb2->pkt_type = PACKET_OUTGOING;
+ ptype->func(skb2, skb->dev, ptype, skb->dev);
+ }
+ }
+ rcu_read_unlock();
+}
+
+
+static inline void __netif_reschedule(struct Qdisc *q)
+{
+ struct softnet_data *sd;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ sd = &__get_cpu_var(softnet_data);
+ q->next_sched = sd->output_queue;
+ sd->output_queue = q;
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
+}
+
+void __netif_schedule(struct Qdisc *q)
+{
+ if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
+ __netif_reschedule(q);
+}
+EXPORT_SYMBOL(__netif_schedule);
+
+void dev_kfree_skb_irq(struct sk_buff *skb)
+{
+ if (atomic_dec_and_test(&skb->users)) {
+ struct softnet_data *sd;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ sd = &__get_cpu_var(softnet_data);
+ skb->next = sd->completion_queue;
+ sd->completion_queue = skb;
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_restore(flags);
+ }
+}
+EXPORT_SYMBOL(dev_kfree_skb_irq);
+
+void dev_kfree_skb_any(struct sk_buff *skb)
+{
+ if (in_irq() || irqs_disabled())
+ dev_kfree_skb_irq(skb);
+ else
+ dev_kfree_skb(skb);
+}
+EXPORT_SYMBOL(dev_kfree_skb_any);
+
+
+/**
+ * netif_device_detach - mark device as removed
+ * @dev: network device
+ *
+ * Mark device as removed from system and therefore no longer available.
+ */
+void netif_device_detach(struct net_device *dev)
+{
+ if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
+ netif_running(dev)) {
+ netif_stop_queue(dev);
+ }
+}
+EXPORT_SYMBOL(netif_device_detach);
+
+/**
+ * netif_device_attach - mark device as attached
+ * @dev: network device
+ *
+ * Mark device as attached from system and restart if needed.
+ */
+void netif_device_attach(struct net_device *dev)
+{
+ if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
+ netif_running(dev)) {
+ netif_wake_queue(dev);
+ __netdev_watchdog_up(dev);
+ }
+}
+EXPORT_SYMBOL(netif_device_attach);
+
+static bool can_checksum_protocol(unsigned long features, __be16 protocol)
+{
+ return ((features & NETIF_F_GEN_CSUM) ||
+ ((features & NETIF_F_IP_CSUM) &&
+ protocol == htons(ETH_P_IP)) ||
+ ((features & NETIF_F_IPV6_CSUM) &&
+ protocol == htons(ETH_P_IPV6)));
+}
+
+static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
+{
+ if (can_checksum_protocol(dev->features, skb->protocol))
+ return true;
+
+ if (skb->protocol == htons(ETH_P_8021Q)) {
+ struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+ if (can_checksum_protocol(dev->features & dev->vlan_features,
+ veh->h_vlan_encapsulated_proto))
+ return true;
+ }
+
+ return false;
+}
+
+/*
+ * Invalidate hardware checksum when packet is to be mangled, and
+ * complete checksum manually on outgoing path.
+ */
+int skb_checksum_help(struct sk_buff *skb)
+{
+ __wsum csum;
+ int ret = 0, offset;
+
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ goto out_set_summed;
+
+ if (unlikely(skb_shinfo(skb)->gso_size)) {
+ /* Let GSO fix up the checksum. */
+ goto out_set_summed;
+ }
+
+ offset = skb->csum_start - skb_headroom(skb);
+ BUG_ON(offset >= skb_headlen(skb));
+ csum = skb_checksum(skb, offset, skb->len - offset, 0);
+
+ offset += skb->csum_offset;
+ BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
+
+ if (skb_cloned(skb) &&
+ !skb_clone_writable(skb, offset + sizeof(__sum16))) {
+ ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+ if (ret)
+ goto out;
+ }
+
+ *(__sum16 *)(skb->data + offset) = csum_fold(csum);
+out_set_summed:
+ skb->ip_summed = CHECKSUM_NONE;
+out:
+ return ret;
+}
+
+/**
+ * skb_gso_segment - Perform segmentation on skb.
+ * @skb: buffer to segment
+ * @features: features for the output path (see dev->features)
+ *
+ * This function segments the given skb and returns a list of segments.
+ *
+ * It may return NULL if the skb requires no segmentation. This is
+ * only possible when GSO is used for verifying header integrity.
+ */
+struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
+{
+ struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
+ struct packet_type *ptype;
+ __be16 type = skb->protocol;
+ int err;
+
+ skb_reset_mac_header(skb);
+ skb->mac_len = skb->network_header - skb->mac_header;
+ __skb_pull(skb, skb->mac_len);
+
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
+ struct net_device *dev = skb->dev;
+ struct ethtool_drvinfo info = {};
+
+ if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
+ dev->ethtool_ops->get_drvinfo(dev, &info);
+
+ WARN(1, "%s: caps=(0x%lx, 0x%lx) len=%d data_len=%d "
+ "ip_summed=%d",
+ info.driver, dev ? dev->features : 0L,
+ skb->sk ? skb->sk->sk_route_caps : 0L,
+ skb->len, skb->data_len, skb->ip_summed);
+
+ if (skb_header_cloned(skb) &&
+ (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+ return ERR_PTR(err);
+ }
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ptype,
+ &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
+ if (ptype->type == type && !ptype->dev && ptype->gso_segment) {
+ if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
+ err = ptype->gso_send_check(skb);
+ segs = ERR_PTR(err);
+ if (err || skb_gso_ok(skb, features))
+ break;
+ __skb_push(skb, (skb->data -
+ skb_network_header(skb)));
+ }
+ segs = ptype->gso_segment(skb, features);
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ __skb_push(skb, skb->data - skb_mac_header(skb));
+
+ return segs;
+}
+
+EXPORT_SYMBOL(skb_gso_segment);
+
+/* Take action when hardware reception checksum errors are detected. */
+#ifdef CONFIG_BUG
+void netdev_rx_csum_fault(struct net_device *dev)
+{
+ if (net_ratelimit()) {
+ printk(KERN_ERR "%s: hw csum failure.\n",
+ dev ? dev->name : "<unknown>");
+ dump_stack();
+ }
+}
+EXPORT_SYMBOL(netdev_rx_csum_fault);
+#endif
+
+/* Actually, we should eliminate this check as soon as we know, that:
+ * 1. IOMMU is present and allows to map all the memory.
+ * 2. No high memory really exists on this machine.
+ */
+
+static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
+{
+#ifdef CONFIG_HIGHMEM
+ int i;
+
+ if (dev->features & NETIF_F_HIGHDMA)
+ return 0;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
+ if (PageHighMem(skb_shinfo(skb)->frags[i].page))
+ return 1;
+
+#endif
+ return 0;
+}
+
+struct dev_gso_cb {
+ void (*destructor)(struct sk_buff *skb);
+};
+
+#define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
+
+static void dev_gso_skb_destructor(struct sk_buff *skb)
+{
+ struct dev_gso_cb *cb;
+
+ do {
+ struct sk_buff *nskb = skb->next;
+
+ skb->next = nskb->next;
+ nskb->next = NULL;
+ kfree_skb(nskb);
+ } while (skb->next);
+
+ cb = DEV_GSO_CB(skb);
+ if (cb->destructor)
+ cb->destructor(skb);
+}
+
+/**
+ * dev_gso_segment - Perform emulated hardware segmentation on skb.
+ * @skb: buffer to segment
+ *
+ * This function segments the given skb and stores the list of segments
+ * in skb->next.
+ */
+static int dev_gso_segment(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ struct sk_buff *segs;
+ int features = dev->features & ~(illegal_highdma(dev, skb) ?
+ NETIF_F_SG : 0);
+
+ segs = skb_gso_segment(skb, features);
+
+ /* Verifying header integrity only. */
+ if (!segs)
+ return 0;
+
+ if (IS_ERR(segs))
+ return PTR_ERR(segs);
+
+ skb->next = segs;
+ DEV_GSO_CB(skb)->destructor = skb->destructor;
+ skb->destructor = dev_gso_skb_destructor;
+
+ return 0;
+}
+
+int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
+ struct netdev_queue *txq)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ prefetch(&dev->netdev_ops->ndo_start_xmit);
+ if (likely(!skb->next)) {
+ if (!list_empty(&ptype_all))
+ dev_queue_xmit_nit(skb, dev);
+
+ if (netif_needs_gso(dev, skb)) {
+ if (unlikely(dev_gso_segment(skb)))
+ goto out_kfree_skb;
+ if (skb->next)
+ goto gso;
+ }
+
+ return ops->ndo_start_xmit(skb, dev);
+ }
+
+gso:
+ do {
+ struct sk_buff *nskb = skb->next;
+ int rc;
+
+ skb->next = nskb->next;
+ nskb->next = NULL;
+ rc = ops->ndo_start_xmit(nskb, dev);
+ if (unlikely(rc)) {
+ nskb->next = skb->next;
+ skb->next = nskb;
+ return rc;
+ }
+ if (unlikely(netif_tx_queue_stopped(txq) && skb->next))
+ return NETDEV_TX_BUSY;
+ } while (skb->next);
+
+ skb->destructor = DEV_GSO_CB(skb)->destructor;
+
+out_kfree_skb:
+ kfree_skb(skb);
+ return 0;
+}
+
+static u32 simple_tx_hashrnd;
+static int simple_tx_hashrnd_initialized = 0;
+
+static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
+{
+ u32 addr1, addr2, ports;
+ u32 hash, ihl;
+ u8 ip_proto = 0;
+
+ if (unlikely(!simple_tx_hashrnd_initialized)) {
+ get_random_bytes(&simple_tx_hashrnd, 4);
+ simple_tx_hashrnd_initialized = 1;
+ }
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
+ ip_proto = ip_hdr(skb)->protocol;
+ addr1 = ip_hdr(skb)->saddr;
+ addr2 = ip_hdr(skb)->daddr;
+ ihl = ip_hdr(skb)->ihl;
+ break;
+ case htons(ETH_P_IPV6):
+ ip_proto = ipv6_hdr(skb)->nexthdr;
+ addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
+ addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
+ ihl = (40 >> 2);
+ break;
+ default:
+ return 0;
+ }
+
+
+ switch (ip_proto) {
+ case IPPROTO_TCP:
+ case IPPROTO_UDP:
+ case IPPROTO_DCCP:
+ case IPPROTO_ESP:
+ case IPPROTO_AH:
+ case IPPROTO_SCTP:
+ case IPPROTO_UDPLITE:
+ ports = *((u32 *) (skb_network_header(skb) + (ihl * 4)));
+ break;
+
+ default:
+ ports = 0;
+ break;
+ }
+
+ hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd);
+
+ return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
+}
+
+static struct netdev_queue *dev_pick_tx(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+ u16 queue_index = 0;
+
+ if (ops->ndo_select_queue)
+ queue_index = ops->ndo_select_queue(dev, skb);
+ else if (dev->real_num_tx_queues > 1)
+ queue_index = simple_tx_hash(dev, skb);
+
+ skb_set_queue_mapping(skb, queue_index);
+ return netdev_get_tx_queue(dev, queue_index);
+}
+
+/**
+ * dev_queue_xmit - transmit a buffer
+ * @skb: buffer to transmit
+ *
+ * Queue a buffer for transmission to a network device. The caller must
+ * have set the device and priority and built the buffer before calling
+ * this function. The function can be called from an interrupt.
+ *
+ * A negative errno code is returned on a failure. A success does not
+ * guarantee the frame will be transmitted as it may be dropped due
+ * to congestion or traffic shaping.
+ *
+ * -----------------------------------------------------------------------------------
+ * I notice this method can also return errors from the queue disciplines,
+ * including NET_XMIT_DROP, which is a positive value. So, errors can also
+ * be positive.
+ *
+ * Regardless of the return value, the skb is consumed, so it is currently
+ * difficult to retry a send to this method. (You can bump the ref count
+ * before sending to hold a reference for retry if you are careful.)
+ *
+ * When calling this method, interrupts MUST be enabled. This is because
+ * the BH enable code must have IRQs enabled so that it will not deadlock.
+ * --BLG
+ */
+int dev_queue_xmit(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ struct netdev_queue *txq;
+ struct Qdisc *q;
+ int rc = -ENOMEM;
+
+ /* GSO will handle the following emulations directly. */
+ if (netif_needs_gso(dev, skb))
+ goto gso;
+
+ if (skb_shinfo(skb)->frag_list &&
+ !(dev->features & NETIF_F_FRAGLIST) &&
+ __skb_linearize(skb))
+ goto out_kfree_skb;
+
+ /* Fragmented skb is linearized if device does not support SG,
+ * or if at least one of fragments is in highmem and device
+ * does not support DMA from it.
+ */
+ if (skb_shinfo(skb)->nr_frags &&
+ (!(dev->features & NETIF_F_SG) || illegal_highdma(dev, skb)) &&
+ __skb_linearize(skb))
+ goto out_kfree_skb;
+
+ /* If packet is not checksummed and device does not support
+ * checksumming for this protocol, complete checksumming here.
+ */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ skb_set_transport_header(skb, skb->csum_start -
+ skb_headroom(skb));
+ if (!dev_can_checksum(dev, skb) && skb_checksum_help(skb))
+ goto out_kfree_skb;
+ }
+
+gso:
+ /* Disable soft irqs for various locks below. Also
+ * stops preemption for RCU.
+ */
+ rcu_read_lock_bh();
+
+ txq = dev_pick_tx(dev, skb);
+ q = rcu_dereference(txq->qdisc);
+
+#ifdef CONFIG_NET_CLS_ACT
+ skb->tc_verd = SET_TC_AT(skb->tc_verd,AT_EGRESS);
+#endif
+ if (q->enqueue) {
+ spinlock_t *root_lock = qdisc_lock(q);
+
+ spin_lock(root_lock);
+
+ if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
+ kfree_skb(skb);
+ rc = NET_XMIT_DROP;
+ } else {
+ rc = qdisc_enqueue_root(skb, q);
+ qdisc_run(q);
+ }
+ spin_unlock(root_lock);
+
+ goto out;
+ }
+
+ /* The device has no queue. Common case for software devices:
+ loopback, all the sorts of tunnels...
+
+ Really, it is unlikely that netif_tx_lock protection is necessary
+ here. (f.e. loopback and IP tunnels are clean ignoring statistics
+ counters.)
+ However, it is possible, that they rely on protection
+ made by us here.
+
+ Check this and shot the lock. It is not prone from deadlocks.
+ Either shot noqueue qdisc, it is even simpler 8)
+ */
+ if (dev->flags & IFF_UP) {
+ int cpu = smp_processor_id(); /* ok because BHs are off */
+
+ if (txq->xmit_lock_owner != cpu) {
+
+ HARD_TX_LOCK(dev, txq, cpu);
+
+ if (!netif_tx_queue_stopped(txq)) {
+ rc = 0;
+ if (!dev_hard_start_xmit(skb, dev, txq)) {
+ HARD_TX_UNLOCK(dev, txq);
+ goto out;
+ }
+ }
+ HARD_TX_UNLOCK(dev, txq);
+ if (net_ratelimit())
+ printk(KERN_CRIT "Virtual device %s asks to "
+ "queue packet!\n", dev->name);
+ } else {
+ /* Recursion is detected! It is possible,
+ * unfortunately */
+ if (net_ratelimit())
+ printk(KERN_CRIT "Dead loop on virtual device "
+ "%s, fix it urgently!\n", dev->name);
+ }
+ }
+
+ rc = -ENETDOWN;
+ rcu_read_unlock_bh();
+
+out_kfree_skb:
+ kfree_skb(skb);
+ return rc;
+out:
+ rcu_read_unlock_bh();
+ return rc;
+}
+
+
+/*=======================================================================
+ Receiver routines
+ =======================================================================*/
+
+int netdev_max_backlog __read_mostly = 1000;
+int netdev_budget __read_mostly = 300;
+int weight_p __read_mostly = 64; /* old backlog weight */
+
+DEFINE_PER_CPU(struct netif_rx_stats, netdev_rx_stat) = { 0, };
+
+
+/**
+ * netif_rx - post buffer to the network code
+ * @skb: buffer to post
+ *
+ * This function receives a packet from a device driver and queues it for
+ * the upper (protocol) levels to process. It always succeeds. The buffer
+ * may be dropped during processing for congestion control or by the
+ * protocol layers.
+ *
+ * return values:
+ * NET_RX_SUCCESS (no congestion)
+ * NET_RX_DROP (packet was dropped)
+ *
+ */
+
+int netif_rx(struct sk_buff *skb)
+{
+ struct softnet_data *queue;
+ unsigned long flags;
+
+ /* if netpoll wants it, pretend we never saw it */
+ if (netpoll_rx(skb))
+ return NET_RX_DROP;
+
+ if (!skb->tstamp.tv64)
+ net_timestamp(skb);
+
+ /*
+ * The code is rearranged so that the path is the most
+ * short when CPU is congested, but is still operating.
+ */
+ local_irq_save(flags);
+ queue = &__get_cpu_var(softnet_data);
+
+ __get_cpu_var(netdev_rx_stat).total++;
+ if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {
+ if (queue->input_pkt_queue.qlen) {
+enqueue:
+ __skb_queue_tail(&queue->input_pkt_queue, skb);
+ local_irq_restore(flags);
+ return NET_RX_SUCCESS;
+ }
+
+ napi_schedule(&queue->backlog);
+ goto enqueue;
+ }
+
+ __get_cpu_var(netdev_rx_stat).dropped++;
+ local_irq_restore(flags);
+
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+int netif_rx_ni(struct sk_buff *skb)
+{
+ int err;
+
+ preempt_disable();
+ err = netif_rx(skb);
+ if (local_softirq_pending())
+ do_softirq();
+ preempt_enable();
+
+ return err;
+}
+
+EXPORT_SYMBOL(netif_rx_ni);
+
+static void net_tx_action(struct softirq_action *h)
+{
+ struct softnet_data *sd = &__get_cpu_var(softnet_data);
+
+ if (sd->completion_queue) {
+ struct sk_buff *clist;
+
+ local_irq_disable();
+ clist = sd->completion_queue;
+ sd->completion_queue = NULL;
+ local_irq_enable();
+
+ while (clist) {
+ struct sk_buff *skb = clist;
+ clist = clist->next;
+
+ WARN_ON(atomic_read(&skb->users));
+ __kfree_skb(skb);
+ }
+ }
+
+ if (sd->output_queue) {
+ struct Qdisc *head;
+
+ local_irq_disable();
+ head = sd->output_queue;
+ sd->output_queue = NULL;
+ local_irq_enable();
+
+ while (head) {
+ struct Qdisc *q = head;
+ spinlock_t *root_lock;
+
+ head = head->next_sched;
+
+ root_lock = qdisc_lock(q);
+ if (spin_trylock(root_lock)) {
+ smp_mb__before_clear_bit();
+ clear_bit(__QDISC_STATE_SCHED,
+ &q->state);
+ qdisc_run(q);
+ spin_unlock(root_lock);
+ } else {
+ if (!test_bit(__QDISC_STATE_DEACTIVATED,
+ &q->state)) {
+ __netif_reschedule(q);
+ } else {
+ smp_mb__before_clear_bit();
+ clear_bit(__QDISC_STATE_SCHED,
+ &q->state);
+ }
+ }
+ }
+ }
+}
+
+static inline int deliver_skb(struct sk_buff *skb,
+ struct packet_type *pt_prev,
+ struct net_device *orig_dev)
+{
+ atomic_inc(&skb->users);
+ return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+}
+
+#if defined(CONFIG_BRIDGE) || defined (CONFIG_BRIDGE_MODULE)
+/* These hooks defined here for ATM */
+struct net_bridge;
+struct net_bridge_fdb_entry *(*br_fdb_get_hook)(struct net_bridge *br,
+ unsigned char *addr);
+void (*br_fdb_put_hook)(struct net_bridge_fdb_entry *ent) __read_mostly;
+
+/*
+ * If bridge module is loaded call bridging hook.
+ * returns NULL if packet was consumed.
+ */
+struct sk_buff *(*br_handle_frame_hook)(struct net_bridge_port *p,
+ struct sk_buff *skb) __read_mostly;
+static inline struct sk_buff *handle_bridge(struct sk_buff *skb,
+ struct packet_type **pt_prev, int *ret,
+ struct net_device *orig_dev)
+{
+ struct net_bridge_port *port;
+
+ if (skb->pkt_type == PACKET_LOOPBACK ||
+ (port = rcu_dereference(skb->dev->br_port)) == NULL)
+ return skb;
+
+ if (*pt_prev) {
+ *ret = deliver_skb(skb, *pt_prev, orig_dev);
+ *pt_prev = NULL;
+ }
+
+ return br_handle_frame_hook(port, skb);
+}
+#else
+#define handle_bridge(skb, pt_prev, ret, orig_dev) (skb)
+#endif
+
+#if defined(CONFIG_MACVLAN) || defined(CONFIG_MACVLAN_MODULE)
+struct sk_buff *(*macvlan_handle_frame_hook)(struct sk_buff *skb) __read_mostly;
+EXPORT_SYMBOL_GPL(macvlan_handle_frame_hook);
+
+static inline struct sk_buff *handle_macvlan(struct sk_buff *skb,
+ struct packet_type **pt_prev,
+ int *ret,
+ struct net_device *orig_dev)
+{
+ if (skb->dev->macvlan_port == NULL)
+ return skb;
+
+ if (*pt_prev) {
+ *ret = deliver_skb(skb, *pt_prev, orig_dev);
+ *pt_prev = NULL;
+ }
+ return macvlan_handle_frame_hook(skb);
+}
+#else
+#define handle_macvlan(skb, pt_prev, ret, orig_dev) (skb)
+#endif
+
+#ifdef CONFIG_NET_CLS_ACT
+/* TODO: Maybe we should just force sch_ingress to be compiled in
+ * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
+ * a compare and 2 stores extra right now if we dont have it on
+ * but have CONFIG_NET_CLS_ACT
+ * NOTE: This doesnt stop any functionality; if you dont have
+ * the ingress scheduler, you just cant add policies on ingress.
+ *
+ */
+static int ing_filter(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+ u32 ttl = G_TC_RTTL(skb->tc_verd);
+ struct netdev_queue *rxq;
+ int result = TC_ACT_OK;
+ struct Qdisc *q;
+
+ if (MAX_RED_LOOP < ttl++) {
+ printk(KERN_WARNING
+ "Redir loop detected Dropping packet (%d->%d)\n",
+ skb->iif, dev->ifindex);
+ return TC_ACT_SHOT;
+ }
+
+ skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
+ skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
+
+ rxq = &dev->rx_queue;
+
+ q = rxq->qdisc;
+ if (q != &noop_qdisc) {
+ spin_lock(qdisc_lock(q));
+ if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
+ result = qdisc_enqueue_root(skb, q);
+ spin_unlock(qdisc_lock(q));
+ }
+
+ return result;
+}
+
+static inline struct sk_buff *handle_ing(struct sk_buff *skb,
+ struct packet_type **pt_prev,
+ int *ret, struct net_device *orig_dev)
+{
+ if (skb->dev->rx_queue.qdisc == &noop_qdisc)
+ goto out;
+
+ if (*pt_prev) {
+ *ret = deliver_skb(skb, *pt_prev, orig_dev);
+ *pt_prev = NULL;
+ } else {
+ /* Huh? Why does turning on AF_PACKET affect this? */
+ skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd);
+ }
+
+ switch (ing_filter(skb)) {
+ case TC_ACT_SHOT:
+ case TC_ACT_STOLEN:
+ kfree_skb(skb);
+ return NULL;
+ }
+
+out:
+ skb->tc_verd = 0;
+ return skb;
+}
+#endif
+
+/*
+ * netif_nit_deliver - deliver received packets to network taps
+ * @skb: buffer
+ *
+ * This function is used to deliver incoming packets to network
+ * taps. It should be used when the normal netif_receive_skb path
+ * is bypassed, for example because of VLAN acceleration.
+ */
+void netif_nit_deliver(struct sk_buff *skb)
+{
+ struct packet_type *ptype;
+
+ if (list_empty(&ptype_all))
+ return;
+
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ skb->mac_len = skb->network_header - skb->mac_header;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ptype, &ptype_all, list) {
+ if (!ptype->dev || ptype->dev == skb->dev)
+ deliver_skb(skb, ptype, skb->dev);
+ }
+ rcu_read_unlock();
+}
+
+/**
+ * netif_receive_skb - process receive buffer from network
+ * @skb: buffer to process
+ *
+ * netif_receive_skb() is the main receive data processing function.
+ * It always succeeds. The buffer may be dropped during processing
+ * for congestion control or by the protocol layers.
+ *
+ * This function may only be called from softirq context and interrupts
+ * should be enabled.
+ *
+ * Return values (usually ignored):
+ * NET_RX_SUCCESS: no congestion
+ * NET_RX_DROP: packet was dropped
+ */
+int netif_receive_skb(struct sk_buff *skb)
+{
+ struct packet_type *ptype, *pt_prev;
+ struct net_device *orig_dev;
+ struct net_device *null_or_orig;
+ int ret = NET_RX_DROP;
+ __be16 type;
+
+ if (skb->vlan_tci && vlan_hwaccel_do_receive(skb))
+ return NET_RX_SUCCESS;
+
+ /* if we've gotten here through NAPI, check netpoll */
+ if (netpoll_receive_skb(skb))
+ return NET_RX_DROP;
+
+ if (!skb->tstamp.tv64)
+ net_timestamp(skb);
+
+ if (!skb->iif)
+ skb->iif = skb->dev->ifindex;
+
+ null_or_orig = NULL;
+ orig_dev = skb->dev;
+ if (orig_dev->master) {
+ if (skb_bond_should_drop(skb))
+ null_or_orig = orig_dev; /* deliver only exact match */
+ else
+ skb->dev = orig_dev->master;
+ }
+
+ __get_cpu_var(netdev_rx_stat).total++;
+
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ skb->mac_len = skb->network_header - skb->mac_header;
+
+ pt_prev = NULL;
+
+ rcu_read_lock();
+
+#ifdef CONFIG_NET_CLS_ACT
+ if (skb->tc_verd & TC_NCLS) {
+ skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
+ goto ncls;
+ }
+#endif
+
+ list_for_each_entry_rcu(ptype, &ptype_all, list) {
+ if (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
+ ptype->dev == orig_dev) {
+ if (pt_prev)
+ ret = deliver_skb(skb, pt_prev, orig_dev);
+ pt_prev = ptype;
+ }
+ }
+
+#ifdef CONFIG_NET_CLS_ACT
+ skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
+ if (!skb)
+ goto out;
+ncls:
+#endif
+
+ skb = handle_bridge(skb, &pt_prev, &ret, orig_dev);
+ if (!skb)
+ goto out;
+ skb = handle_macvlan(skb, &pt_prev, &ret, orig_dev);
+ if (!skb)
+ goto out;
+
+ type = skb->protocol;
+ list_for_each_entry_rcu(ptype,
+ &ptype_base[ntohs(type) & PTYPE_HASH_MASK], list) {
+ if (ptype->type == type &&
+ (ptype->dev == null_or_orig || ptype->dev == skb->dev ||
+ ptype->dev == orig_dev)) {
+ if (pt_prev)
+ ret = deliver_skb(skb, pt_prev, orig_dev);
+ pt_prev = ptype;
+ }
+ }
+
+ if (pt_prev) {
+ ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
+ } else {
+ kfree_skb(skb);
+ /* Jamal, now you will not able to escape explaining
+ * me how you were going to use this. :-)
+ */
+ ret = NET_RX_DROP;
+ }
+
+out:
+ rcu_read_unlock();
+ return ret;
+}
+
+/* Network device is going away, flush any packets still pending */
+static void flush_backlog(void *arg)
+{
+ struct net_device *dev = arg;
+ struct softnet_data *queue = &__get_cpu_var(softnet_data);
+ struct sk_buff *skb, *tmp;
+
+ skb_queue_walk_safe(&queue->input_pkt_queue, skb, tmp)
+ if (skb->dev == dev) {
+ __skb_unlink(skb, &queue->input_pkt_queue);
+ kfree_skb(skb);
+ }
+}
+
+static int napi_gro_complete(struct sk_buff *skb)
+{
+ struct packet_type *ptype;
+ __be16 type = skb->protocol;
+ struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
+ int err = -ENOENT;
+
+ if (NAPI_GRO_CB(skb)->count == 1)
+ goto out;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ptype, head, list) {
+ if (ptype->type != type || ptype->dev || !ptype->gro_complete)
+ continue;
+
+ err = ptype->gro_complete(skb);
+ break;
+ }
+ rcu_read_unlock();
+
+ if (err) {
+ WARN_ON(&ptype->list == head);
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+ }
+
+out:
+ skb_shinfo(skb)->gso_size = 0;
+ __skb_push(skb, -skb_network_offset(skb));
+ return netif_receive_skb(skb);
+}
+
+void napi_gro_flush(struct napi_struct *napi)
+{
+ struct sk_buff *skb, *next;
+
+ for (skb = napi->gro_list; skb; skb = next) {
+ next = skb->next;
+ skb->next = NULL;
+ napi_gro_complete(skb);
+ }
+
+ napi->gro_list = NULL;
+}
+EXPORT_SYMBOL(napi_gro_flush);
+
+int dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+{
+ struct sk_buff **pp = NULL;
+ struct packet_type *ptype;
+ __be16 type = skb->protocol;
+ struct list_head *head = &ptype_base[ntohs(type) & PTYPE_HASH_MASK];
+ int count = 0;
+ int same_flow;
+ int mac_len;
+ int free;
+
+ if (!(skb->dev->features & NETIF_F_GRO))
+ goto normal;
+
+ if (skb_is_gso(skb) || skb_shinfo(skb)->frag_list)
+ goto normal;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(ptype, head, list) {
+ struct sk_buff *p;
+
+ if (ptype->type != type || ptype->dev || !ptype->gro_receive)
+ continue;
+
+ skb_reset_network_header(skb);
+ mac_len = skb->network_header - skb->mac_header;
+ skb->mac_len = mac_len;
+ NAPI_GRO_CB(skb)->same_flow = 0;
+ NAPI_GRO_CB(skb)->flush = 0;
+ NAPI_GRO_CB(skb)->free = 0;
+
+ for (p = napi->gro_list; p; p = p->next) {
+ count++;
+
+ if (!NAPI_GRO_CB(p)->same_flow)
+ continue;
+
+ if (p->mac_len != mac_len ||
+ memcmp(skb_mac_header(p), skb_mac_header(skb),
+ mac_len))
+ NAPI_GRO_CB(p)->same_flow = 0;
+ }
+
+ pp = ptype->gro_receive(&napi->gro_list, skb);
+ break;
+ }
+ rcu_read_unlock();
+
+ if (&ptype->list == head)
+ goto normal;
+
+ same_flow = NAPI_GRO_CB(skb)->same_flow;
+ free = NAPI_GRO_CB(skb)->free;
+
+ if (pp) {
+ struct sk_buff *nskb = *pp;
+
+ *pp = nskb->next;
+ nskb->next = NULL;
+ napi_gro_complete(nskb);
+ count--;
+ }
+
+ if (same_flow)
+ goto ok;
+
+ if (NAPI_GRO_CB(skb)->flush || count >= MAX_GRO_SKBS) {
+ __skb_push(skb, -skb_network_offset(skb));
+ goto normal;
+ }
+
+ NAPI_GRO_CB(skb)->count = 1;
+ skb_shinfo(skb)->gso_size = skb->len;
+ skb->next = napi->gro_list;
+ napi->gro_list = skb;
+
+ok:
+ return free;
+
+normal:
+ return -1;
+}
+EXPORT_SYMBOL(dev_gro_receive);
+
+static int __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+{
+ struct sk_buff *p;
+
+ for (p = napi->gro_list; p; p = p->next) {
+ NAPI_GRO_CB(p)->same_flow = 1;
+ NAPI_GRO_CB(p)->flush = 0;
+ }
+
+ return dev_gro_receive(napi, skb);
+}
+
+int napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
+{
+ if (netpoll_receive_skb(skb))
+ return NET_RX_DROP;
+
+ switch (__napi_gro_receive(napi, skb)) {
+ case -1:
+ return netif_receive_skb(skb);
+
+ case 1:
+ kfree_skb(skb);
+ break;
+ }
+
+ return NET_RX_SUCCESS;
+}
+EXPORT_SYMBOL(napi_gro_receive);
+
+void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
+{
+ __skb_pull(skb, skb_headlen(skb));
+ skb_reserve(skb, NET_IP_ALIGN - skb_headroom(skb));
+
+ napi->skb = skb;
+}
+EXPORT_SYMBOL(napi_reuse_skb);
+
+struct sk_buff *napi_fraginfo_skb(struct napi_struct *napi,
+ struct napi_gro_fraginfo *info)
+{
+ struct net_device *dev = napi->dev;
+ struct sk_buff *skb = napi->skb;
+
+ napi->skb = NULL;
+
+ if (!skb) {
+ skb = netdev_alloc_skb(dev, GRO_MAX_HEAD + NET_IP_ALIGN);
+ if (!skb)
+ goto out;
+
+ skb_reserve(skb, NET_IP_ALIGN);
+ }
+
+ BUG_ON(info->nr_frags > MAX_SKB_FRAGS);
+ skb_shinfo(skb)->nr_frags = info->nr_frags;
+ memcpy(skb_shinfo(skb)->frags, info->frags, sizeof(info->frags));
+
+ skb->data_len = info->len;
+ skb->len += info->len;
+ skb->truesize += info->len;
+
+ if (!pskb_may_pull(skb, ETH_HLEN)) {
+ napi_reuse_skb(napi, skb);
+ skb = NULL;
+ goto out;
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+
+ skb->ip_summed = info->ip_summed;
+ skb->csum = info->csum;
+
+out:
+ return skb;
+}
+EXPORT_SYMBOL(napi_fraginfo_skb);
+
+int napi_gro_frags(struct napi_struct *napi, struct napi_gro_fraginfo *info)
+{
+ struct sk_buff *skb = napi_fraginfo_skb(napi, info);
+ int err = NET_RX_DROP;
+
+ if (!skb)
+ goto out;
+
+ if (netpoll_receive_skb(skb))
+ goto out;
+
+ err = NET_RX_SUCCESS;
+
+ switch (__napi_gro_receive(napi, skb)) {
+ case -1:
+ return netif_receive_skb(skb);
+
+ case 0:
+ goto out;
+ }
+
+ napi_reuse_skb(napi, skb);
+
+out:
+ return err;
+}
+EXPORT_SYMBOL(napi_gro_frags);
+
+static int process_backlog(struct napi_struct *napi, int quota)
+{
+ int work = 0;
+ struct softnet_data *queue = &__get_cpu_var(softnet_data);
+ unsigned long start_time = jiffies;
+
+ napi->weight = weight_p;
+ do {
+ struct sk_buff *skb;
+
+ local_irq_disable();
+ skb = __skb_dequeue(&queue->input_pkt_queue);
+ if (!skb) {
+ local_irq_enable();
+ napi_complete(napi);
+ goto out;
+ }
+ local_irq_enable();
+
+ napi_gro_receive(napi, skb);
+ } while (++work < quota && jiffies == start_time);
+
+ napi_gro_flush(napi);
+
+out:
+ return work;
+}
+
+/**
+ * __napi_schedule - schedule for receive
+ * @n: entry to schedule
+ *
+ * The entry's receive function will be scheduled to run
+ */
+void __napi_schedule(struct napi_struct *n)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ list_add_tail(&n->poll_list, &__get_cpu_var(softnet_data).poll_list);
+ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(__napi_schedule);
+
+void __napi_complete(struct napi_struct *n)
+{
+ BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
+ BUG_ON(n->gro_list);
+
+ list_del(&n->poll_list);
+ smp_mb__before_clear_bit();
+ clear_bit(NAPI_STATE_SCHED, &n->state);
+}
+EXPORT_SYMBOL(__napi_complete);
+
+void napi_complete(struct napi_struct *n)
+{
+ unsigned long flags;
+
+ /*
+ * don't let napi dequeue from the cpu poll list
+ * just in case its running on a different cpu
+ */
+ if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
+ return;
+
+ napi_gro_flush(n);
+ local_irq_save(flags);
+ __napi_complete(n);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(napi_complete);
+
+void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
+ int (*poll)(struct napi_struct *, int), int weight)
+{
+ INIT_LIST_HEAD(&napi->poll_list);
+ napi->gro_list = NULL;
+ napi->skb = NULL;
+ napi->poll = poll;
+ napi->weight = weight;
+ list_add(&napi->dev_list, &dev->napi_list);
+ napi->dev = dev;
+#ifdef CONFIG_NETPOLL
+ spin_lock_init(&napi->poll_lock);
+ napi->poll_owner = -1;
+#endif
+ set_bit(NAPI_STATE_SCHED, &napi->state);
+}
+EXPORT_SYMBOL(netif_napi_add);
+
+void netif_napi_del(struct napi_struct *napi)
+{
+ struct sk_buff *skb, *next;
+
+ list_del_init(&napi->dev_list);
+ kfree_skb(napi->skb);
+
+ for (skb = napi->gro_list; skb; skb = next) {
+ next = skb->next;
+ skb->next = NULL;
+ kfree_skb(skb);
+ }
+
+ napi->gro_list = NULL;
+}
+EXPORT_SYMBOL(netif_napi_del);
+
+
+static void net_rx_action(struct softirq_action *h)
+{
+ struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
+ unsigned long time_limit = jiffies + 2;
+ int budget = netdev_budget;
+ void *have;
+
+ local_irq_disable();
+
+ while (!list_empty(list)) {
+ struct napi_struct *n;
+ int work, weight;
+
+ /* If softirq window is exhuasted then punt.
+ * Allow this to run for 2 jiffies since which will allow
+ * an average latency of 1.5/HZ.
+ */
+ if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))
+ goto softnet_break;
+
+ local_irq_enable();
+
+ /* Even though interrupts have been re-enabled, this
+ * access is safe because interrupts can only add new
+ * entries to the tail of this list, and only ->poll()
+ * calls can remove this head entry from the list.
+ */
+ n = list_entry(list->next, struct napi_struct, poll_list);
+
+ have = netpoll_poll_lock(n);
+
+ weight = n->weight;
+
+ /* This NAPI_STATE_SCHED test is for avoiding a race
+ * with netpoll's poll_napi(). Only the entity which
+ * obtains the lock and sees NAPI_STATE_SCHED set will
+ * actually make the ->poll() call. Therefore we avoid
+ * accidently calling ->poll() when NAPI is not scheduled.
+ */
+ work = 0;
+ if (test_bit(NAPI_STATE_SCHED, &n->state))
+ work = n->poll(n, weight);
+
+ WARN_ON_ONCE(work > weight);
+
+ budget -= work;
+
+ local_irq_disable();
+
+ /* Drivers must not modify the NAPI state if they
+ * consume the entire weight. In such cases this code
+ * still "owns" the NAPI instance and therefore can
+ * move the instance around on the list at-will.
+ */
+ if (unlikely(work == weight)) {
+ if (unlikely(napi_disable_pending(n)))
+ __napi_complete(n);
+ else
+ list_move_tail(&n->poll_list, list);
+ }
+
+ netpoll_poll_unlock(have);
+ }
+out:
+ local_irq_enable();
+
+#ifdef CONFIG_NET_DMA
+ /*
+ * There may not be any more sk_buffs coming right now, so push
+ * any pending DMA copies to hardware
+ */
+ dma_issue_pending_all();
+#endif
+
+ return;
+
+softnet_break:
+ __get_cpu_var(netdev_rx_stat).time_squeeze++;
+ __raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ goto out;
+}
+
+static gifconf_func_t * gifconf_list [NPROTO];
+
+/**
+ * register_gifconf - register a SIOCGIF handler
+ * @family: Address family
+ * @gifconf: Function handler
+ *
+ * Register protocol dependent address dumping routines. The handler
+ * that is passed must not be freed or reused until it has been replaced
+ * by another handler.
+ */
+int register_gifconf(unsigned int family, gifconf_func_t * gifconf)
+{
+ if (family >= NPROTO)
+ return -EINVAL;
+ gifconf_list[family] = gifconf;
+ return 0;
+}
+
+
+/*
+ * Map an interface index to its name (SIOCGIFNAME)
+ */
+
+/*
+ * We need this ioctl for efficient implementation of the
+ * if_indextoname() function required by the IPv6 API. Without
+ * it, we would have to search all the interfaces to find a
+ * match. --pb
+ */
+
+static int dev_ifname(struct net *net, struct ifreq __user *arg)
+{
+ struct net_device *dev;
+ struct ifreq ifr;
+
+ /*
+ * Fetch the caller's info block.
+ */
+
+ if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
+ return -EFAULT;
+
+ read_lock(&dev_base_lock);
+ dev = __dev_get_by_index(net, ifr.ifr_ifindex);
+ if (!dev) {
+ read_unlock(&dev_base_lock);
+ return -ENODEV;
+ }
+
+ strcpy(ifr.ifr_name, dev->name);
+ read_unlock(&dev_base_lock);
+
+ if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ * Perform a SIOCGIFCONF call. This structure will change
+ * size eventually, and there is nothing I can do about it.
+ * Thus we will need a 'compatibility mode'.
+ */
+
+static int dev_ifconf(struct net *net, char __user *arg)
+{
+ struct ifconf ifc;
+ struct net_device *dev;
+ char __user *pos;
+ int len;
+ int total;
+ int i;
+
+ /*
+ * Fetch the caller's info block.
+ */
+
+ if (copy_from_user(&ifc, arg, sizeof(struct ifconf)))
+ return -EFAULT;
+
+ pos = ifc.ifc_buf;
+ len = ifc.ifc_len;
+
+ /*
+ * Loop over the interfaces, and write an info block for each.
+ */
+
+ total = 0;
+ for_each_netdev(net, dev) {
+ for (i = 0; i < NPROTO; i++) {
+ if (gifconf_list[i]) {
+ int done;
+ if (!pos)
+ done = gifconf_list[i](dev, NULL, 0);
+ else
+ done = gifconf_list[i](dev, pos + total,
+ len - total);
+ if (done < 0)
+ return -EFAULT;
+ total += done;
+ }
+ }
+ }
+
+ /*
+ * All done. Write the updated control block back to the caller.
+ */
+ ifc.ifc_len = total;
+
+ /*
+ * Both BSD and Solaris return 0 here, so we do too.
+ */
+ return copy_to_user(arg, &ifc, sizeof(struct ifconf)) ? -EFAULT : 0;
+}
+
+#ifdef CONFIG_PROC_FS
+/*
+ * This is invoked by the /proc filesystem handler to display a device
+ * in detail.
+ */
+void *dev_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(dev_base_lock)
+{
+ struct net *net = seq_file_net(seq);
+ loff_t off;
+ struct net_device *dev;
+
+ read_lock(&dev_base_lock);
+ if (!*pos)
+ return SEQ_START_TOKEN;
+
+ off = 1;
+ for_each_netdev(net, dev)
+ if (off++ == *pos)
+ return dev;
+
+ return NULL;
+}
+
+void *dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct net *net = seq_file_net(seq);
+ ++*pos;
+ return v == SEQ_START_TOKEN ?
+ first_net_device(net) : next_net_device((struct net_device *)v);
+}
+
+void dev_seq_stop(struct seq_file *seq, void *v)
+ __releases(dev_base_lock)
+{
+ read_unlock(&dev_base_lock);
+}
+
+static void dev_seq_printf_stats(struct seq_file *seq, struct net_device *dev)
+{
+ const struct net_device_stats *stats = dev_get_stats(dev);
+
+ seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
+ "%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
+ dev->name, stats->rx_bytes, stats->rx_packets,
+ stats->rx_errors,
+ stats->rx_dropped + stats->rx_missed_errors,
+ stats->rx_fifo_errors,
+ stats->rx_length_errors + stats->rx_over_errors +
+ stats->rx_crc_errors + stats->rx_frame_errors,
+ stats->rx_compressed, stats->multicast,
+ stats->tx_bytes, stats->tx_packets,
+ stats->tx_errors, stats->tx_dropped,
+ stats->tx_fifo_errors, stats->collisions,
+ stats->tx_carrier_errors +
+ stats->tx_aborted_errors +
+ stats->tx_window_errors +
+ stats->tx_heartbeat_errors,
+ stats->tx_compressed);
+}
+
+/*
+ * Called from the PROCfs module. This now uses the new arbitrary sized
+ * /proc/net interface to create /proc/net/dev
+ */
+static int dev_seq_show(struct seq_file *seq, void *v)
+{
+ if (v == SEQ_START_TOKEN)
+ seq_puts(seq, "Inter-| Receive "
+ " | Transmit\n"
+ " face |bytes packets errs drop fifo frame "
+ "compressed multicast|bytes packets errs "
+ "drop fifo colls carrier compressed\n");
+ else
+ dev_seq_printf_stats(seq, v);
+ return 0;
+}
+
+static struct netif_rx_stats *softnet_get_online(loff_t *pos)
+{
+ struct netif_rx_stats *rc = NULL;
+
+ while (*pos < nr_cpu_ids)
+ if (cpu_online(*pos)) {
+ rc = &per_cpu(netdev_rx_stat, *pos);
+ break;
+ } else
+ ++*pos;
+ return rc;
+}
+
+static void *softnet_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ return softnet_get_online(pos);
+}
+
+static void *softnet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ ++*pos;
+ return softnet_get_online(pos);
+}
+
+static void softnet_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static int softnet_seq_show(struct seq_file *seq, void *v)
+{
+ struct netif_rx_stats *s = v;
+
+ seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
+ s->total, s->dropped, s->time_squeeze, 0,
+ 0, 0, 0, 0, /* was fastroute */
+ s->cpu_collision );
+ return 0;
+}
+
+static const struct seq_operations dev_seq_ops = {
+ .start = dev_seq_start,
+ .next = dev_seq_next,
+ .stop = dev_seq_stop,
+ .show = dev_seq_show,
+};
+
+static int dev_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open_net(inode, file, &dev_seq_ops,
+ sizeof(struct seq_net_private));
+}
+
+static const struct file_operations dev_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = dev_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_net,
+};
+
+static const struct seq_operations softnet_seq_ops = {
+ .start = softnet_seq_start,
+ .next = softnet_seq_next,
+ .stop = softnet_seq_stop,
+ .show = softnet_seq_show,
+};
+
+static int softnet_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &softnet_seq_ops);
+}
+
+static const struct file_operations softnet_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = softnet_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static void *ptype_get_idx(loff_t pos)
+{
+ struct packet_type *pt = NULL;
+ loff_t i = 0;
+ int t;
+
+ list_for_each_entry_rcu(pt, &ptype_all, list) {
+ if (i == pos)
+ return pt;
+ ++i;
+ }
+
+ for (t = 0; t < PTYPE_HASH_SIZE; t++) {
+ list_for_each_entry_rcu(pt, &ptype_base[t], list) {
+ if (i == pos)
+ return pt;
+ ++i;
+ }
+ }
+ return NULL;
+}
+
+static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(RCU)
+{
+ rcu_read_lock();
+ return *pos ? ptype_get_idx(*pos - 1) : SEQ_START_TOKEN;
+}
+
+static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct packet_type *pt;
+ struct list_head *nxt;
+ int hash;
+
+ ++*pos;
+ if (v == SEQ_START_TOKEN)
+ return ptype_get_idx(0);
+
+ pt = v;
+ nxt = pt->list.next;
+ if (pt->type == htons(ETH_P_ALL)) {
+ if (nxt != &ptype_all)
+ goto found;
+ hash = 0;
+ nxt = ptype_base[0].next;
+ } else
+ hash = ntohs(pt->type) & PTYPE_HASH_MASK;
+
+ while (nxt == &ptype_base[hash]) {
+ if (++hash >= PTYPE_HASH_SIZE)
+ return NULL;
+ nxt = ptype_base[hash].next;
+ }
+found:
+ return list_entry(nxt, struct packet_type, list);
+}
+
+static void ptype_seq_stop(struct seq_file *seq, void *v)
+ __releases(RCU)
+{
+ rcu_read_unlock();
+}
+
+static int ptype_seq_show(struct seq_file *seq, void *v)
+{
+ struct packet_type *pt = v;
+
+ if (v == SEQ_START_TOKEN)
+ seq_puts(seq, "Type Device Function\n");
+ else if (pt->dev == NULL || dev_net(pt->dev) == seq_file_net(seq)) {
+ if (pt->type == htons(ETH_P_ALL))
+ seq_puts(seq, "ALL ");
+ else
+ seq_printf(seq, "%04x", ntohs(pt->type));
+
+ seq_printf(seq, " %-8s %pF\n",
+ pt->dev ? pt->dev->name : "", pt->func);
+ }
+
+ return 0;
+}
+
+static const struct seq_operations ptype_seq_ops = {
+ .start = ptype_seq_start,
+ .next = ptype_seq_next,
+ .stop = ptype_seq_stop,
+ .show = ptype_seq_show,
+};
+
+static int ptype_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open_net(inode, file, &ptype_seq_ops,
+ sizeof(struct seq_net_private));
+}
+
+static const struct file_operations ptype_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = ptype_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_net,
+};
+
+
+static int __net_init dev_proc_net_init(struct net *net)
+{
+ int rc = -ENOMEM;
+
+ if (!proc_net_fops_create(net, "dev", S_IRUGO, &dev_seq_fops))
+ goto out;
+ if (!proc_net_fops_create(net, "softnet_stat", S_IRUGO, &softnet_seq_fops))
+ goto out_dev;
+ if (!proc_net_fops_create(net, "ptype", S_IRUGO, &ptype_seq_fops))
+ goto out_softnet;
+
+ if (wext_proc_init(net))
+ goto out_ptype;
+ rc = 0;
+out:
+ return rc;
+out_ptype:
+ proc_net_remove(net, "ptype");
+out_softnet:
+ proc_net_remove(net, "softnet_stat");
+out_dev:
+ proc_net_remove(net, "dev");
+ goto out;
+}
+
+static void __net_exit dev_proc_net_exit(struct net *net)
+{
+ wext_proc_exit(net);
+
+ proc_net_remove(net, "ptype");
+ proc_net_remove(net, "softnet_stat");
+ proc_net_remove(net, "dev");
+}
+
+static struct pernet_operations __net_initdata dev_proc_ops = {
+ .init = dev_proc_net_init,
+ .exit = dev_proc_net_exit,
+};
+
+static int __init dev_proc_init(void)
+{
+ return register_pernet_subsys(&dev_proc_ops);
+}
+#else
+#define dev_proc_init() 0
+#endif /* CONFIG_PROC_FS */
+
+
+/**
+ * netdev_set_master - set up master/slave pair
+ * @slave: slave device
+ * @master: new master device
+ *
+ * Changes the master device of the slave. Pass %NULL to break the
+ * bonding. The caller must hold the RTNL semaphore. On a failure
+ * a negative errno code is returned. On success the reference counts
+ * are adjusted, %RTM_NEWLINK is sent to the routing socket and the
+ * function returns zero.
+ */
+int netdev_set_master(struct net_device *slave, struct net_device *master)
+{
+ struct net_device *old = slave->master;
+
+ ASSERT_RTNL();
+
+ if (master) {
+ if (old)
+ return -EBUSY;
+ dev_hold(master);
+ }
+
+ slave->master = master;
+
+ synchronize_net();
+
+ if (old)
+ dev_put(old);
+
+ if (master)
+ slave->flags |= IFF_SLAVE;
+ else
+ slave->flags &= ~IFF_SLAVE;
+
+ rtmsg_ifinfo(RTM_NEWLINK, slave, IFF_SLAVE);
+ return 0;
+}
+
+static void dev_change_rx_flags(struct net_device *dev, int flags)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ if ((dev->flags & IFF_UP) && ops->ndo_change_rx_flags)
+ ops->ndo_change_rx_flags(dev, flags);
+}
+
+static int __dev_set_promiscuity(struct net_device *dev, int inc)
+{
+ unsigned short old_flags = dev->flags;
+ uid_t uid;
+ gid_t gid;
+
+ ASSERT_RTNL();
+
+ dev->flags |= IFF_PROMISC;
+ dev->promiscuity += inc;
+ if (dev->promiscuity == 0) {
+ /*
+ * Avoid overflow.
+ * If inc causes overflow, untouch promisc and return error.
+ */
+ if (inc < 0)
+ dev->flags &= ~IFF_PROMISC;
+ else {
+ dev->promiscuity -= inc;
+ printk(KERN_WARNING "%s: promiscuity touches roof, "
+ "set promiscuity failed, promiscuity feature "
+ "of device might be broken.\n", dev->name);
+ return -EOVERFLOW;
+ }
+ }
+ if (dev->flags != old_flags) {
+ printk(KERN_INFO "device %s %s promiscuous mode\n",
+ dev->name, (dev->flags & IFF_PROMISC) ? "entered" :
+ "left");
+ if (audit_enabled) {
+ current_uid_gid(&uid, &gid);
+ audit_log(current->audit_context, GFP_ATOMIC,
+ AUDIT_ANOM_PROMISCUOUS,
+ "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
+ dev->name, (dev->flags & IFF_PROMISC),
+ (old_flags & IFF_PROMISC),
+ audit_get_loginuid(current),
+ uid, gid,
+ audit_get_sessionid(current));
+ }
+
+ dev_change_rx_flags(dev, IFF_PROMISC);
+ }
+ return 0;
+}
+
+/**
+ * dev_set_promiscuity - update promiscuity count on a device
+ * @dev: device
+ * @inc: modifier
+ *
+ * Add or remove promiscuity from a device. While the count in the device
+ * remains above zero the interface remains promiscuous. Once it hits zero
+ * the device reverts back to normal filtering operation. A negative inc
+ * value is used to drop promiscuity on the device.
+ * Return 0 if successful or a negative errno code on error.
+ */
+int dev_set_promiscuity(struct net_device *dev, int inc)
+{
+ unsigned short old_flags = dev->flags;
+ int err;
+
+ err = __dev_set_promiscuity(dev, inc);
+ if (err < 0)
+ return err;
+ if (dev->flags != old_flags)
+ dev_set_rx_mode(dev);
+ return err;
+}
+
+/**
+ * dev_set_allmulti - update allmulti count on a device
+ * @dev: device
+ * @inc: modifier
+ *
+ * Add or remove reception of all multicast frames to a device. While the
+ * count in the device remains above zero the interface remains listening
+ * to all interfaces. Once it hits zero the device reverts back to normal
+ * filtering operation. A negative @inc value is used to drop the counter
+ * when releasing a resource needing all multicasts.
+ * Return 0 if successful or a negative errno code on error.
+ */
+
+int dev_set_allmulti(struct net_device *dev, int inc)
+{
+ unsigned short old_flags = dev->flags;
+
+ ASSERT_RTNL();
+
+ dev->flags |= IFF_ALLMULTI;
+ dev->allmulti += inc;
+ if (dev->allmulti == 0) {
+ /*
+ * Avoid overflow.
+ * If inc causes overflow, untouch allmulti and return error.
+ */
+ if (inc < 0)
+ dev->flags &= ~IFF_ALLMULTI;
+ else {
+ dev->allmulti -= inc;
+ printk(KERN_WARNING "%s: allmulti touches roof, "
+ "set allmulti failed, allmulti feature of "
+ "device might be broken.\n", dev->name);
+ return -EOVERFLOW;
+ }
+ }
+ if (dev->flags ^ old_flags) {
+ dev_change_rx_flags(dev, IFF_ALLMULTI);
+ dev_set_rx_mode(dev);
+ }
+ return 0;
+}
+
+/*
+ * Upload unicast and multicast address lists to device and
+ * configure RX filtering. When the device doesn't support unicast
+ * filtering it is put in promiscuous mode while unicast addresses
+ * are present.
+ */
+void __dev_set_rx_mode(struct net_device *dev)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ /* dev_open will call this function so the list will stay sane. */
+ if (!(dev->flags&IFF_UP))
+ return;
+
+ if (!netif_device_present(dev))
+ return;
+
+ if (ops->ndo_set_rx_mode)
+ ops->ndo_set_rx_mode(dev);
+ else {
+ /* Unicast addresses changes may only happen under the rtnl,
+ * therefore calling __dev_set_promiscuity here is safe.
+ */
+ if (dev->uc_count > 0 && !dev->uc_promisc) {
+ __dev_set_promiscuity(dev, 1);
+ dev->uc_promisc = 1;
+ } else if (dev->uc_count == 0 && dev->uc_promisc) {
+ __dev_set_promiscuity(dev, -1);
+ dev->uc_promisc = 0;
+ }
+
+ if (ops->ndo_set_multicast_list)
+ ops->ndo_set_multicast_list(dev);
+ }
+}
+
+void dev_set_rx_mode(struct net_device *dev)
+{
+ netif_addr_lock_bh(dev);
+ __dev_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
+}
+
+int __dev_addr_delete(struct dev_addr_list **list, int *count,
+ void *addr, int alen, int glbl)
+{
+ struct dev_addr_list *da;
+
+ for (; (da = *list) != NULL; list = &da->next) {
+ if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
+ alen == da->da_addrlen) {
+ if (glbl) {
+ int old_glbl = da->da_gusers;
+ da->da_gusers = 0;
+ if (old_glbl == 0)
+ break;
+ }
+ if (--da->da_users)
+ return 0;
+
+ *list = da->next;
+ kfree(da);
+ (*count)--;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+int __dev_addr_add(struct dev_addr_list **list, int *count,
+ void *addr, int alen, int glbl)
+{
+ struct dev_addr_list *da;
+
+ for (da = *list; da != NULL; da = da->next) {
+ if (memcmp(da->da_addr, addr, da->da_addrlen) == 0 &&
+ da->da_addrlen == alen) {
+ if (glbl) {
+ int old_glbl = da->da_gusers;
+ da->da_gusers = 1;
+ if (old_glbl)
+ return 0;
+ }
+ da->da_users++;
+ return 0;
+ }
+ }
+
+ da = kzalloc(sizeof(*da), GFP_ATOMIC);
+ if (da == NULL)
+ return -ENOMEM;
+ memcpy(da->da_addr, addr, alen);
+ da->da_addrlen = alen;
+ da->da_users = 1;
+ da->da_gusers = glbl ? 1 : 0;
+ da->next = *list;
+ *list = da;
+ (*count)++;
+ return 0;
+}
+
+/**
+ * dev_unicast_delete - Release secondary unicast address.
+ * @dev: device
+ * @addr: address to delete
+ * @alen: length of @addr
+ *
+ * Release reference to a secondary unicast address and remove it
+ * from the device if the reference count drops to zero.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
+{
+ int err;
+
+ ASSERT_RTNL();
+
+ netif_addr_lock_bh(dev);
+ err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
+ if (!err)
+ __dev_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
+ return err;
+}
+EXPORT_SYMBOL(dev_unicast_delete);
+
+/**
+ * dev_unicast_add - add a secondary unicast address
+ * @dev: device
+ * @addr: address to add
+ * @alen: length of @addr
+ *
+ * Add a secondary unicast address to the device or increase
+ * the reference count if it already exists.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+int dev_unicast_add(struct net_device *dev, void *addr, int alen)
+{
+ int err;
+
+ ASSERT_RTNL();
+
+ netif_addr_lock_bh(dev);
+ err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
+ if (!err)
+ __dev_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
+ return err;
+}
+EXPORT_SYMBOL(dev_unicast_add);
+
+int __dev_addr_sync(struct dev_addr_list **to, int *to_count,
+ struct dev_addr_list **from, int *from_count)
+{
+ struct dev_addr_list *da, *next;
+ int err = 0;
+
+ da = *from;
+ while (da != NULL) {
+ next = da->next;
+ if (!da->da_synced) {
+ err = __dev_addr_add(to, to_count,
+ da->da_addr, da->da_addrlen, 0);
+ if (err < 0)
+ break;
+ da->da_synced = 1;
+ da->da_users++;
+ } else if (da->da_users == 1) {
+ __dev_addr_delete(to, to_count,
+ da->da_addr, da->da_addrlen, 0);
+ __dev_addr_delete(from, from_count,
+ da->da_addr, da->da_addrlen, 0);
+ }
+ da = next;
+ }
+ return err;
+}
+
+void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
+ struct dev_addr_list **from, int *from_count)
+{
+ struct dev_addr_list *da, *next;
+
+ da = *from;
+ while (da != NULL) {
+ next = da->next;
+ if (da->da_synced) {
+ __dev_addr_delete(to, to_count,
+ da->da_addr, da->da_addrlen, 0);
+ da->da_synced = 0;
+ __dev_addr_delete(from, from_count,
+ da->da_addr, da->da_addrlen, 0);
+ }
+ da = next;
+ }
+}
+
+/**
+ * dev_unicast_sync - Synchronize device's unicast list to another device
+ * @to: destination device
+ * @from: source device
+ *
+ * Add newly added addresses to the destination device and release
+ * addresses that have no users left. The source device must be
+ * locked by netif_tx_lock_bh.
+ *
+ * This function is intended to be called from the dev->set_rx_mode
+ * function of layered software devices.
+ */
+int dev_unicast_sync(struct net_device *to, struct net_device *from)
+{
+ int err = 0;
+
+ netif_addr_lock_bh(to);
+ err = __dev_addr_sync(&to->uc_list, &to->uc_count,
+ &from->uc_list, &from->uc_count);
+ if (!err)
+ __dev_set_rx_mode(to);
+ netif_addr_unlock_bh(to);
+ return err;
+}
+EXPORT_SYMBOL(dev_unicast_sync);
+
+/**
+ * dev_unicast_unsync - Remove synchronized addresses from the destination device
+ * @to: destination device
+ * @from: source device
+ *
+ * Remove all addresses that were added to the destination device by
+ * dev_unicast_sync(). This function is intended to be called from the
+ * dev->stop function of layered software devices.
+ */
+void dev_unicast_unsync(struct net_device *to, struct net_device *from)
+{
+ netif_addr_lock_bh(from);
+ netif_addr_lock(to);
+
+ __dev_addr_unsync(&to->uc_list, &to->uc_count,
+ &from->uc_list, &from->uc_count);
+ __dev_set_rx_mode(to);
+
+ netif_addr_unlock(to);
+ netif_addr_unlock_bh(from);
+}
+EXPORT_SYMBOL(dev_unicast_unsync);
+
+static void __dev_addr_discard(struct dev_addr_list **list)
+{
+ struct dev_addr_list *tmp;
+
+ while (*list != NULL) {
+ tmp = *list;
+ *list = tmp->next;
+ if (tmp->da_users > tmp->da_gusers)
+ printk("__dev_addr_discard: address leakage! "
+ "da_users=%d\n", tmp->da_users);
+ kfree(tmp);
+ }
+}
+
+static void dev_addr_discard(struct net_device *dev)
+{
+ netif_addr_lock_bh(dev);
+
+ __dev_addr_discard(&dev->uc_list);
+ dev->uc_count = 0;
+
+ __dev_addr_discard(&dev->mc_list);
+ dev->mc_count = 0;
+
+ netif_addr_unlock_bh(dev);
+}
+
+/**
+ * dev_get_flags - get flags reported to userspace
+ * @dev: device
+ *
+ * Get the combination of flag bits exported through APIs to userspace.
+ */
+unsigned dev_get_flags(const struct net_device *dev)
+{
+ unsigned flags;
+
+ flags = (dev->flags & ~(IFF_PROMISC |
+ IFF_ALLMULTI |
+ IFF_RUNNING |
+ IFF_LOWER_UP |
+ IFF_DORMANT)) |
+ (dev->gflags & (IFF_PROMISC |
+ IFF_ALLMULTI));
+
+ if (netif_running(dev)) {
+ if (netif_oper_up(dev))
+ flags |= IFF_RUNNING;
+ if (netif_carrier_ok(dev))
+ flags |= IFF_LOWER_UP;
+ if (netif_dormant(dev))
+ flags |= IFF_DORMANT;
+ }
+
+ return flags;
+}
+
+/**
+ * dev_change_flags - change device settings
+ * @dev: device
+ * @flags: device state flags
+ *
+ * Change settings on device based state flags. The flags are
+ * in the userspace exported format.
+ */
+int dev_change_flags(struct net_device *dev, unsigned flags)
+{
+ int ret, changes;
+ int old_flags = dev->flags;
+
+ ASSERT_RTNL();
+
+ /*
+ * Set the flags on our device.
+ */
+
+ dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
+ IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
+ IFF_AUTOMEDIA)) |
+ (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
+ IFF_ALLMULTI));
+
+ /*
+ * Load in the correct multicast list now the flags have changed.
+ */
+
+ if ((old_flags ^ flags) & IFF_MULTICAST)
+ dev_change_rx_flags(dev, IFF_MULTICAST);
+
+ dev_set_rx_mode(dev);
+
+ /*
+ * Have we downed the interface. We handle IFF_UP ourselves
+ * according to user attempts to set it, rather than blindly
+ * setting it.
+ */
+
+ ret = 0;
+ if ((old_flags ^ flags) & IFF_UP) { /* Bit is different ? */
+ ret = ((old_flags & IFF_UP) ? dev_close : dev_open)(dev);
+
+ if (!ret)
+ dev_set_rx_mode(dev);
+ }
+
+ if (dev->flags & IFF_UP &&
+ ((old_flags ^ dev->flags) &~ (IFF_UP | IFF_PROMISC | IFF_ALLMULTI |
+ IFF_VOLATILE)))
+ call_netdevice_notifiers(NETDEV_CHANGE, dev);
+
+ if ((flags ^ dev->gflags) & IFF_PROMISC) {
+ int inc = (flags & IFF_PROMISC) ? +1 : -1;
+ dev->gflags ^= IFF_PROMISC;
+ dev_set_promiscuity(dev, inc);
+ }
+
+ /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
+ is important. Some (broken) drivers set IFF_PROMISC, when
+ IFF_ALLMULTI is requested not asking us and not reporting.
+ */
+ if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
+ int inc = (flags & IFF_ALLMULTI) ? +1 : -1;
+ dev->gflags ^= IFF_ALLMULTI;
+ dev_set_allmulti(dev, inc);
+ }
+
+ /* Exclude state transition flags, already notified */
+ changes = (old_flags ^ dev->flags) & ~(IFF_UP | IFF_RUNNING);
+ if (changes)
+ rtmsg_ifinfo(RTM_NEWLINK, dev, changes);
+
+ return ret;
+}
+
+/**
+ * dev_set_mtu - Change maximum transfer unit
+ * @dev: device
+ * @new_mtu: new transfer unit
+ *
+ * Change the maximum transfer size of the network device.
+ */
+int dev_set_mtu(struct net_device *dev, int new_mtu)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+ int err;
+
+ if (new_mtu == dev->mtu)
+ return 0;
+
+ /* MTU must be positive. */
+ if (new_mtu < 0)
+ return -EINVAL;
+
+ if (!netif_device_present(dev))
+ return -ENODEV;
+
+ err = 0;
+ if (ops->ndo_change_mtu)
+ err = ops->ndo_change_mtu(dev, new_mtu);
+ else
+ dev->mtu = new_mtu;
+
+ if (!err && dev->flags & IFF_UP)
+ call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+ return err;
+}
+
+/**
+ * dev_set_mac_address - Change Media Access Control Address
+ * @dev: device
+ * @sa: new address
+ *
+ * Change the hardware (MAC) address of the device
+ */
+int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+ int err;
+
+ if (!ops->ndo_set_mac_address)
+ return -EOPNOTSUPP;
+ if (sa->sa_family != dev->type)
+ return -EINVAL;
+ if (!netif_device_present(dev))
+ return -ENODEV;
+ err = ops->ndo_set_mac_address(dev, sa);
+ if (!err)
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ return err;
+}
+
+/*
+ * Perform the SIOCxIFxxx calls, inside read_lock(dev_base_lock)
+ */
+static int dev_ifsioc_locked(struct net *net, struct ifreq *ifr, unsigned int cmd)
+{
+ int err;
+ struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
+
+ if (!dev)
+ return -ENODEV;
+
+ switch (cmd) {
+ case SIOCGIFFLAGS: /* Get interface flags */
+ ifr->ifr_flags = dev_get_flags(dev);
+ return 0;
+
+ case SIOCGIFMETRIC: /* Get the metric on the interface
+ (currently unused) */
+ ifr->ifr_metric = 0;
+ return 0;
+
+ case SIOCGIFMTU: /* Get the MTU of a device */
+ ifr->ifr_mtu = dev->mtu;
+ return 0;
+
+ case SIOCGIFHWADDR:
+ if (!dev->addr_len)
+ memset(ifr->ifr_hwaddr.sa_data, 0, sizeof ifr->ifr_hwaddr.sa_data);
+ else
+ memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr,
+ min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
+ ifr->ifr_hwaddr.sa_family = dev->type;
+ return 0;
+
+ case SIOCGIFSLAVE:
+ err = -EINVAL;
+ break;
+
+ case SIOCGIFMAP:
+ ifr->ifr_map.mem_start = dev->mem_start;
+ ifr->ifr_map.mem_end = dev->mem_end;
+ ifr->ifr_map.base_addr = dev->base_addr;
+ ifr->ifr_map.irq = dev->irq;
+ ifr->ifr_map.dma = dev->dma;
+ ifr->ifr_map.port = dev->if_port;
+ return 0;
+
+ case SIOCGIFINDEX:
+ ifr->ifr_ifindex = dev->ifindex;
+ return 0;
+
+ case SIOCGIFTXQLEN:
+ ifr->ifr_qlen = dev->tx_queue_len;
+ return 0;
+
+ default:
+ /* dev_ioctl() should ensure this case
+ * is never reached
+ */
+ WARN_ON(1);
+ err = -EINVAL;
+ break;
+
+ }
+ return err;
+}
+
+/*
+ * Perform the SIOCxIFxxx calls, inside rtnl_lock()
+ */
+static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
+{
+ int err;
+ struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
+ const struct net_device_ops *ops;
+
+ if (!dev)
+ return -ENODEV;
+
+ ops = dev->netdev_ops;
+
+ switch (cmd) {
+ case SIOCSIFFLAGS: /* Set interface flags */
+ return dev_change_flags(dev, ifr->ifr_flags);
+
+ case SIOCSIFMETRIC: /* Set the metric on the interface
+ (currently unused) */
+ return -EOPNOTSUPP;
+
+ case SIOCSIFMTU: /* Set the MTU of a device */
+ return dev_set_mtu(dev, ifr->ifr_mtu);
+
+ case SIOCSIFHWADDR:
+ return dev_set_mac_address(dev, &ifr->ifr_hwaddr);
+
+ case SIOCSIFHWBROADCAST:
+ if (ifr->ifr_hwaddr.sa_family != dev->type)
+ return -EINVAL;
+ memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data,
+ min(sizeof ifr->ifr_hwaddr.sa_data, (size_t) dev->addr_len));
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ return 0;
+
+ case SIOCSIFMAP:
+ if (ops->ndo_set_config) {
+ if (!netif_device_present(dev))
+ return -ENODEV;
+ return ops->ndo_set_config(dev, &ifr->ifr_map);
+ }
+ return -EOPNOTSUPP;
+
+ case SIOCADDMULTI:
+ if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
+ ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
+ return -EINVAL;
+ if (!netif_device_present(dev))
+ return -ENODEV;
+ return dev_mc_add(dev, ifr->ifr_hwaddr.sa_data,
+ dev->addr_len, 1);
+
+ case SIOCDELMULTI:
+ if ((!ops->ndo_set_multicast_list && !ops->ndo_set_rx_mode) ||
+ ifr->ifr_hwaddr.sa_family != AF_UNSPEC)
+ return -EINVAL;
+ if (!netif_device_present(dev))
+ return -ENODEV;
+ return dev_mc_delete(dev, ifr->ifr_hwaddr.sa_data,
+ dev->addr_len, 1);
+
+ case SIOCSIFTXQLEN:
+ if (ifr->ifr_qlen < 0)
+ return -EINVAL;
+ dev->tx_queue_len = ifr->ifr_qlen;
+ return 0;
+
+ case SIOCSIFNAME:
+ ifr->ifr_newname[IFNAMSIZ-1] = '\0';
+ return dev_change_name(dev, ifr->ifr_newname);
+
+ /*
+ * Unknown or private ioctl
+ */
+
+ default:
+ if ((cmd >= SIOCDEVPRIVATE &&
+ cmd <= SIOCDEVPRIVATE + 15) ||
+ cmd == SIOCBONDENSLAVE ||
+ cmd == SIOCBONDRELEASE ||
+ cmd == SIOCBONDSETHWADDR ||
+ cmd == SIOCBONDSLAVEINFOQUERY ||
+ cmd == SIOCBONDINFOQUERY ||
+ cmd == SIOCBONDCHANGEACTIVE ||
+ cmd == SIOCGMIIPHY ||
+ cmd == SIOCGMIIREG ||
+ cmd == SIOCSMIIREG ||
+ cmd == SIOCBRADDIF ||
+ cmd == SIOCBRDELIF ||
+ cmd == SIOCWANDEV) {
+ err = -EOPNOTSUPP;
+ if (ops->ndo_do_ioctl) {
+ if (netif_device_present(dev))
+ err = ops->ndo_do_ioctl(dev, ifr, cmd);
+ else
+ err = -ENODEV;
+ }
+ } else
+ err = -EINVAL;
+
+ }
+ return err;
+}
+
+/*
+ * This function handles all "interface"-type I/O control requests. The actual
+ * 'doing' part of this is dev_ifsioc above.
+ */
+
+/**
+ * dev_ioctl - network device ioctl
+ * @net: the applicable net namespace
+ * @cmd: command to issue
+ * @arg: pointer to a struct ifreq in user space
+ *
+ * Issue ioctl functions to devices. This is normally called by the
+ * user space syscall interfaces but can sometimes be useful for
+ * other purposes. The return value is the return from the syscall if
+ * positive or a negative errno code on error.
+ */
+
+int dev_ioctl(struct net *net, unsigned int cmd, void __user *arg)
+{
+ struct ifreq ifr;
+ int ret;
+ char *colon;
+
+ /* One special case: SIOCGIFCONF takes ifconf argument
+ and requires shared lock, because it sleeps writing
+ to user space.
+ */
+
+ if (cmd == SIOCGIFCONF) {
+ rtnl_lock();
+ ret = dev_ifconf(net, (char __user *) arg);
+ rtnl_unlock();
+ return ret;
+ }
+ if (cmd == SIOCGIFNAME)
+ return dev_ifname(net, (struct ifreq __user *)arg);
+
+ if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
+ return -EFAULT;
+
+ ifr.ifr_name[IFNAMSIZ-1] = 0;
+
+ colon = strchr(ifr.ifr_name, ':');
+ if (colon)
+ *colon = 0;
+
+ /*
+ * See which interface the caller is talking about.
+ */
+
+ switch (cmd) {
+ /*
+ * These ioctl calls:
+ * - can be done by all.
+ * - atomic and do not require locking.
+ * - return a value
+ */
+ case SIOCGIFFLAGS:
+ case SIOCGIFMETRIC:
+ case SIOCGIFMTU:
+ case SIOCGIFHWADDR:
+ case SIOCGIFSLAVE:
+ case SIOCGIFMAP:
+ case SIOCGIFINDEX:
+ case SIOCGIFTXQLEN:
+ dev_load(net, ifr.ifr_name);
+ read_lock(&dev_base_lock);
+ ret = dev_ifsioc_locked(net, &ifr, cmd);
+ read_unlock(&dev_base_lock);
+ if (!ret) {
+ if (colon)
+ *colon = ':';
+ if (copy_to_user(arg, &ifr,
+ sizeof(struct ifreq)))
+ ret = -EFAULT;
+ }
+ return ret;
+
+ case SIOCETHTOOL:
+ dev_load(net, ifr.ifr_name);
+ rtnl_lock();
+ ret = dev_ethtool(net, &ifr);
+ rtnl_unlock();
+ if (!ret) {
+ if (colon)
+ *colon = ':';
+ if (copy_to_user(arg, &ifr,
+ sizeof(struct ifreq)))
+ ret = -EFAULT;
+ }
+ return ret;
+
+ /*
+ * These ioctl calls:
+ * - require superuser power.
+ * - require strict serialization.
+ * - return a value
+ */
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSIFNAME:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ dev_load(net, ifr.ifr_name);
+ rtnl_lock();
+ ret = dev_ifsioc(net, &ifr, cmd);
+ rtnl_unlock();
+ if (!ret) {
+ if (colon)
+ *colon = ':';
+ if (copy_to_user(arg, &ifr,
+ sizeof(struct ifreq)))
+ ret = -EFAULT;
+ }
+ return ret;
+
+ /*
+ * These ioctl calls:
+ * - require superuser power.
+ * - require strict serialization.
+ * - do not return a value
+ */
+ case SIOCSIFFLAGS:
+ case SIOCSIFMETRIC:
+ case SIOCSIFMTU:
+ case SIOCSIFMAP:
+ case SIOCSIFHWADDR:
+ case SIOCSIFSLAVE:
+ case SIOCADDMULTI:
+ case SIOCDELMULTI:
+ case SIOCSIFHWBROADCAST:
+ case SIOCSIFTXQLEN:
+ case SIOCSMIIREG:
+ case SIOCBONDENSLAVE:
+ case SIOCBONDRELEASE:
+ case SIOCBONDSETHWADDR:
+ case SIOCBONDCHANGEACTIVE:
+ case SIOCBRADDIF:
+ case SIOCBRDELIF:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ /* fall through */
+ case SIOCBONDSLAVEINFOQUERY:
+ case SIOCBONDINFOQUERY:
+ dev_load(net, ifr.ifr_name);
+ rtnl_lock();
+ ret = dev_ifsioc(net, &ifr, cmd);
+ rtnl_unlock();
+ return ret;
+
+ case SIOCGIFMEM:
+ /* Get the per device memory space. We can add this but
+ * currently do not support it */
+ case SIOCSIFMEM:
+ /* Set the per device memory buffer space.
+ * Not applicable in our case */
+ case SIOCSIFLINK:
+ return -EINVAL;
+
+ /*
+ * Unknown or private ioctl.
+ */
+ default:
+ if (cmd == SIOCWANDEV ||
+ (cmd >= SIOCDEVPRIVATE &&
+ cmd <= SIOCDEVPRIVATE + 15)) {
+ dev_load(net, ifr.ifr_name);
+ rtnl_lock();
+ ret = dev_ifsioc(net, &ifr, cmd);
+ rtnl_unlock();
+ if (!ret && copy_to_user(arg, &ifr,
+ sizeof(struct ifreq)))
+ ret = -EFAULT;
+ return ret;
+ }
+ /* Take care of Wireless Extensions */
+ if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST)
+ return wext_handle_ioctl(net, &ifr, cmd, arg);
+ return -EINVAL;
+ }
+}
+
+
+/**
+ * dev_new_index - allocate an ifindex
+ * @net: the applicable net namespace
+ *
+ * Returns a suitable unique value for a new device interface
+ * number. The caller must hold the rtnl semaphore or the
+ * dev_base_lock to be sure it remains unique.
+ */
+static int dev_new_index(struct net *net)
+{
+ static int ifindex;
+ for (;;) {
+ if (++ifindex <= 0)
+ ifindex = 1;
+ if (!__dev_get_by_index(net, ifindex))
+ return ifindex;
+ }
+}
+
+/* Delayed registration/unregisteration */
+static LIST_HEAD(net_todo_list);
+
+static void net_set_todo(struct net_device *dev)
+{
+ list_add_tail(&dev->todo_list, &net_todo_list);
+}
+
+static void rollback_registered(struct net_device *dev)
+{
+ BUG_ON(dev_boot_phase);
+ ASSERT_RTNL();
+
+ /* Some devices call without registering for initialization unwind. */
+ if (dev->reg_state == NETREG_UNINITIALIZED) {
+ printk(KERN_DEBUG "unregister_netdevice: device %s/%p never "
+ "was registered\n", dev->name, dev);
+
+ WARN_ON(1);
+ return;
+ }
+
+ BUG_ON(dev->reg_state != NETREG_REGISTERED);
+
+ /* If device is running, close it first. */
+ dev_close(dev);
+
+ /* And unlink it from device chain. */
+ unlist_netdevice(dev);
+
+ dev->reg_state = NETREG_UNREGISTERING;
+
+ synchronize_net();
+
+ /* Shutdown queueing discipline. */
+ dev_shutdown(dev);
+
+
+ /* Notify protocols, that we are about to destroy
+ this device. They should clean all the things.
+ */
+ call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
+
+ /*
+ * Flush the unicast and multicast chains
+ */
+ dev_addr_discard(dev);
+
+ if (dev->netdev_ops->ndo_uninit)
+ dev->netdev_ops->ndo_uninit(dev);
+
+ /* Notifier chain MUST detach us from master device. */
+ WARN_ON(dev->master);
+
+ /* Remove entries from kobject tree */
+ netdev_unregister_kobject(dev);
+
+ synchronize_net();
+
+ dev_put(dev);
+}
+
+static void __netdev_init_queue_locks_one(struct net_device *dev,
+ struct netdev_queue *dev_queue,
+ void *_unused)
+{
+ spin_lock_init(&dev_queue->_xmit_lock);
+ netdev_set_xmit_lockdep_class(&dev_queue->_xmit_lock, dev->type);
+ dev_queue->xmit_lock_owner = -1;
+}
+
+static void netdev_init_queue_locks(struct net_device *dev)
+{
+ netdev_for_each_tx_queue(dev, __netdev_init_queue_locks_one, NULL);
+ __netdev_init_queue_locks_one(dev, &dev->rx_queue, NULL);
+}
+
+unsigned long netdev_fix_features(unsigned long features, const char *name)
+{
+ /* Fix illegal SG+CSUM combinations. */
+ if ((features & NETIF_F_SG) &&
+ !(features & NETIF_F_ALL_CSUM)) {
+ if (name)
+ printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no "
+ "checksum feature.\n", name);
+ features &= ~NETIF_F_SG;
+ }
+
+ /* TSO requires that SG is present as well. */
+ if ((features & NETIF_F_TSO) && !(features & NETIF_F_SG)) {
+ if (name)
+ printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no "
+ "SG feature.\n", name);
+ features &= ~NETIF_F_TSO;
+ }
+
+ if (features & NETIF_F_UFO) {
+ if (!(features & NETIF_F_GEN_CSUM)) {
+ if (name)
+ printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
+ "since no NETIF_F_HW_CSUM feature.\n",
+ name);
+ features &= ~NETIF_F_UFO;
+ }
+
+ if (!(features & NETIF_F_SG)) {
+ if (name)
+ printk(KERN_ERR "%s: Dropping NETIF_F_UFO "
+ "since no NETIF_F_SG feature.\n", name);
+ features &= ~NETIF_F_UFO;
+ }
+ }
+
+ return features;
+}
+EXPORT_SYMBOL(netdev_fix_features);
+
+/* Some devices need to (re-)set their netdev_ops inside
+ * ->init() or similar. If that happens, we have to setup
+ * the compat pointers again.
+ */
+void netdev_resync_ops(struct net_device *dev)
+{
+#ifdef CONFIG_COMPAT_NET_DEV_OPS
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ dev->init = ops->ndo_init;
+ dev->uninit = ops->ndo_uninit;
+ dev->open = ops->ndo_open;
+ dev->change_rx_flags = ops->ndo_change_rx_flags;
+ dev->set_rx_mode = ops->ndo_set_rx_mode;
+ dev->set_multicast_list = ops->ndo_set_multicast_list;
+ dev->set_mac_address = ops->ndo_set_mac_address;
+ dev->validate_addr = ops->ndo_validate_addr;
+ dev->do_ioctl = ops->ndo_do_ioctl;
+ dev->set_config = ops->ndo_set_config;
+ dev->change_mtu = ops->ndo_change_mtu;
+ dev->neigh_setup = ops->ndo_neigh_setup;
+ dev->tx_timeout = ops->ndo_tx_timeout;
+ dev->get_stats = ops->ndo_get_stats;
+ dev->vlan_rx_register = ops->ndo_vlan_rx_register;
+ dev->vlan_rx_add_vid = ops->ndo_vlan_rx_add_vid;
+ dev->vlan_rx_kill_vid = ops->ndo_vlan_rx_kill_vid;
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ dev->poll_controller = ops->ndo_poll_controller;
+#endif
+#endif
+}
+EXPORT_SYMBOL(netdev_resync_ops);
+
+/**
+ * register_netdevice - register a network device
+ * @dev: device to register
+ *
+ * Take a completed network device structure and add it to the kernel
+ * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
+ * chain. 0 is returned on success. A negative errno code is returned
+ * on a failure to set up the device, or if the name is a duplicate.
+ *
+ * Callers must hold the rtnl semaphore. You may want
+ * register_netdev() instead of this.
+ *
+ * BUGS:
+ * The locking appears insufficient to guarantee two parallel registers
+ * will not get the same name.
+ */
+
+int register_netdevice(struct net_device *dev)
+{
+ struct hlist_head *head;
+ struct hlist_node *p;
+ int ret;
+ struct net *net = dev_net(dev);
+
+ BUG_ON(dev_boot_phase);
+ ASSERT_RTNL();
+
+ might_sleep();
+
+ /* When net_device's are persistent, this will be fatal. */
+ BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
+ BUG_ON(!net);
+
+ spin_lock_init(&dev->addr_list_lock);
+ netdev_set_addr_lockdep_class(dev);
+ netdev_init_queue_locks(dev);
+
+ dev->iflink = -1;
+
+#ifdef CONFIG_COMPAT_NET_DEV_OPS
+ /* Netdevice_ops API compatiability support.
+ * This is temporary until all network devices are converted.
+ */
+ if (dev->netdev_ops) {
+ netdev_resync_ops(dev);
+ } else {
+ char drivername[64];
+ pr_info("%s (%s): not using net_device_ops yet\n",
+ dev->name, netdev_drivername(dev, drivername, 64));
+
+ /* This works only because net_device_ops and the
+ compatiablity structure are the same. */
+ dev->netdev_ops = (void *) &(dev->init);
+ }
+#endif
+
+ /* Init, if this function is available */
+ if (dev->netdev_ops->ndo_init) {
+ ret = dev->netdev_ops->ndo_init(dev);
+ if (ret) {
+ if (ret > 0)
+ ret = -EIO;
+ goto out;
+ }
+ }
+
+ if (!dev_valid_name(dev->name)) {
+ ret = -EINVAL;
+ goto err_uninit;
+ }
+
+ dev->ifindex = dev_new_index(net);
+ if (dev->iflink == -1)
+ dev->iflink = dev->ifindex;
+
+ /* Check for existence of name */
+ head = dev_name_hash(net, dev->name);
+ hlist_for_each(p, head) {
+ struct net_device *d
+ = hlist_entry(p, struct net_device, name_hlist);
+ if (!strncmp(d->name, dev->name, IFNAMSIZ)) {
+ ret = -EEXIST;
+ goto err_uninit;
+ }
+ }
+
+ /* Fix illegal checksum combinations */
+ if ((dev->features & NETIF_F_HW_CSUM) &&
+ (dev->features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
+ printk(KERN_NOTICE "%s: mixed HW and IP checksum settings.\n",
+ dev->name);
+ dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
+ }
+
+ if ((dev->features & NETIF_F_NO_CSUM) &&
+ (dev->features & (NETIF_F_HW_CSUM|NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
+ printk(KERN_NOTICE "%s: mixed no checksumming and other settings.\n",
+ dev->name);
+ dev->features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM|NETIF_F_HW_CSUM);
+ }
+
+ dev->features = netdev_fix_features(dev->features, dev->name);
+
+ /* Enable software GSO if SG is supported. */
+ if (dev->features & NETIF_F_SG)
+ dev->features |= NETIF_F_GSO;
+
+ netdev_initialize_kobject(dev);
+ ret = netdev_register_kobject(dev);
+ if (ret)
+ goto err_uninit;
+ dev->reg_state = NETREG_REGISTERED;
+
+ /*
+ * Default initial state at registry is that the
+ * device is present.
+ */
+
+ set_bit(__LINK_STATE_PRESENT, &dev->state);
+
+ dev_init_scheduler(dev);
+ dev_hold(dev);
+ list_netdevice(dev);
+
+ /* Notify protocols, that a new device appeared. */
+ ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
+ ret = notifier_to_errno(ret);
+ if (ret) {
+ rollback_registered(dev);
+ dev->reg_state = NETREG_UNREGISTERED;
+ }
+
+out:
+ return ret;
+
+err_uninit:
+ if (dev->netdev_ops->ndo_uninit)
+ dev->netdev_ops->ndo_uninit(dev);
+ goto out;
+}
+
+/**
+ * init_dummy_netdev - init a dummy network device for NAPI
+ * @dev: device to init
+ *
+ * This takes a network device structure and initialize the minimum
+ * amount of fields so it can be used to schedule NAPI polls without
+ * registering a full blown interface. This is to be used by drivers
+ * that need to tie several hardware interfaces to a single NAPI
+ * poll scheduler due to HW limitations.
+ */
+int init_dummy_netdev(struct net_device *dev)
+{
+ /* Clear everything. Note we don't initialize spinlocks
+ * are they aren't supposed to be taken by any of the
+ * NAPI code and this dummy netdev is supposed to be
+ * only ever used for NAPI polls
+ */
+ memset(dev, 0, sizeof(struct net_device));
+
+ /* make sure we BUG if trying to hit standard
+ * register/unregister code path
+ */
+ dev->reg_state = NETREG_DUMMY;
+
+ /* initialize the ref count */
+ atomic_set(&dev->refcnt, 1);
+
+ /* NAPI wants this */
+ INIT_LIST_HEAD(&dev->napi_list);
+
+ /* a dummy interface is started by default */
+ set_bit(__LINK_STATE_PRESENT, &dev->state);
+ set_bit(__LINK_STATE_START, &dev->state);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(init_dummy_netdev);
+
+
+/**
+ * register_netdev - register a network device
+ * @dev: device to register
+ *
+ * Take a completed network device structure and add it to the kernel
+ * interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
+ * chain. 0 is returned on success. A negative errno code is returned
+ * on a failure to set up the device, or if the name is a duplicate.
+ *
+ * This is a wrapper around register_netdevice that takes the rtnl semaphore
+ * and expands the device name if you passed a format string to
+ * alloc_netdev.
+ */
+int register_netdev(struct net_device *dev)
+{
+ int err;
+
+ rtnl_lock();
+
+ /*
+ * If the name is a format string the caller wants us to do a
+ * name allocation.
+ */
+ if (strchr(dev->name, '%')) {
+ err = dev_alloc_name(dev, dev->name);
+ if (err < 0)
+ goto out;
+ }
+
+ err = register_netdevice(dev);
+out:
+ rtnl_unlock();
+ return err;
+}
+EXPORT_SYMBOL(register_netdev);
+
+/*
+ * netdev_wait_allrefs - wait until all references are gone.
+ *
+ * This is called when unregistering network devices.
+ *
+ * Any protocol or device that holds a reference should register
+ * for netdevice notification, and cleanup and put back the
+ * reference if they receive an UNREGISTER event.
+ * We can get stuck here if buggy protocols don't correctly
+ * call dev_put.
+ */
+static void netdev_wait_allrefs(struct net_device *dev)
+{
+ unsigned long rebroadcast_time, warning_time;
+
+ rebroadcast_time = warning_time = jiffies;
+ while (atomic_read(&dev->refcnt) != 0) {
+ if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
+ rtnl_lock();
+
+ /* Rebroadcast unregister notification */
+ call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
+
+ if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
+ &dev->state)) {
+ /* We must not have linkwatch events
+ * pending on unregister. If this
+ * happens, we simply run the queue
+ * unscheduled, resulting in a noop
+ * for this device.
+ */
+ linkwatch_run_queue();
+ }
+
+ __rtnl_unlock();
+
+ rebroadcast_time = jiffies;
+ }
+
+ msleep(250);
+
+ if (time_after(jiffies, warning_time + 10 * HZ)) {
+ printk(KERN_EMERG "unregister_netdevice: "
+ "waiting for %s to become free. Usage "
+ "count = %d\n",
+ dev->name, atomic_read(&dev->refcnt));
+ warning_time = jiffies;
+ }
+ }
+}
+
+/* The sequence is:
+ *
+ * rtnl_lock();
+ * ...
+ * register_netdevice(x1);
+ * register_netdevice(x2);
+ * ...
+ * unregister_netdevice(y1);
+ * unregister_netdevice(y2);
+ * ...
+ * rtnl_unlock();
+ * free_netdev(y1);
+ * free_netdev(y2);
+ *
+ * We are invoked by rtnl_unlock().
+ * This allows us to deal with problems:
+ * 1) We can delete sysfs objects which invoke hotplug
+ * without deadlocking with linkwatch via keventd.
+ * 2) Since we run with the RTNL semaphore not held, we can sleep
+ * safely in order to wait for the netdev refcnt to drop to zero.
+ *
+ * We must not return until all unregister events added during
+ * the interval the lock was held have been completed.
+ */
+void netdev_run_todo(void)
+{
+ struct list_head list;
+
+ /* Snapshot list, allow later requests */
+ list_replace_init(&net_todo_list, &list);
+
+ __rtnl_unlock();
+
+ while (!list_empty(&list)) {
+ struct net_device *dev
+ = list_entry(list.next, struct net_device, todo_list);
+ list_del(&dev->todo_list);
+
+ if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
+ printk(KERN_ERR "network todo '%s' but state %d\n",
+ dev->name, dev->reg_state);
+ dump_stack();
+ continue;
+ }
+
+ dev->reg_state = NETREG_UNREGISTERED;
+
+ on_each_cpu(flush_backlog, dev, 1);
+
+ netdev_wait_allrefs(dev);
+
+ /* paranoia */
+ BUG_ON(atomic_read(&dev->refcnt));
+ WARN_ON(dev->ip_ptr);
+ WARN_ON(dev->ip6_ptr);
+ WARN_ON(dev->dn_ptr);
+
+ if (dev->destructor)
+ dev->destructor(dev);
+
+ /* Free network device */
+ kobject_put(&dev->dev.kobj);
+ }
+}
+
+/**
+ * dev_get_stats - get network device statistics
+ * @dev: device to get statistics from
+ *
+ * Get network statistics from device. The device driver may provide
+ * its own method by setting dev->netdev_ops->get_stats; otherwise
+ * the internal statistics structure is used.
+ */
+const struct net_device_stats *dev_get_stats(struct net_device *dev)
+ {
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ if (ops->ndo_get_stats)
+ return ops->ndo_get_stats(dev);
+ else
+ return &dev->stats;
+}
+EXPORT_SYMBOL(dev_get_stats);
+
+static void netdev_init_one_queue(struct net_device *dev,
+ struct netdev_queue *queue,
+ void *_unused)
+{
+ queue->dev = dev;
+}
+
+static void netdev_init_queues(struct net_device *dev)
+{
+ netdev_init_one_queue(dev, &dev->rx_queue, NULL);
+ netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
+ spin_lock_init(&dev->tx_global_lock);
+}
+
+/**
+ * alloc_netdev_mq - allocate network device
+ * @sizeof_priv: size of private data to allocate space for
+ * @name: device name format string
+ * @setup: callback to initialize device
+ * @queue_count: the number of subqueues to allocate
+ *
+ * Allocates a struct net_device with private data area for driver use
+ * and performs basic initialization. Also allocates subquue structs
+ * for each queue on the device at the end of the netdevice.
+ */
+struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
+ void (*setup)(struct net_device *), unsigned int queue_count)
+{
+ struct netdev_queue *tx;
+ struct net_device *dev;
+ size_t alloc_size;
+ void *p;
+
+ BUG_ON(strlen(name) >= sizeof(dev->name));
+
+ alloc_size = sizeof(struct net_device);
+ if (sizeof_priv) {
+ /* ensure 32-byte alignment of private area */
+ alloc_size = (alloc_size + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST;
+ alloc_size += sizeof_priv;
+ }
+ /* ensure 32-byte alignment of whole construct */
+ alloc_size += NETDEV_ALIGN_CONST;
+
+ p = kzalloc(alloc_size, GFP_KERNEL);
+ if (!p) {
+ printk(KERN_ERR "alloc_netdev: Unable to allocate device.\n");
+ return NULL;
+ }
+
+ tx = kcalloc(queue_count, sizeof(struct netdev_queue), GFP_KERNEL);
+ if (!tx) {
+ printk(KERN_ERR "alloc_netdev: Unable to allocate "
+ "tx qdiscs.\n");
+ kfree(p);
+ return NULL;
+ }
+
+ dev = (struct net_device *)
+ (((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
+ dev->padded = (char *)dev - (char *)p;
+ dev_net_set(dev, &init_net);
+
+ dev->_tx = tx;
+ dev->num_tx_queues = queue_count;
+ dev->real_num_tx_queues = queue_count;
+
+ dev->gso_max_size = GSO_MAX_SIZE;
+
+ netdev_init_queues(dev);
+
+ INIT_LIST_HEAD(&dev->napi_list);
+ setup(dev);
+ strcpy(dev->name, name);
+ return dev;
+}
+EXPORT_SYMBOL(alloc_netdev_mq);
+
+/**
+ * free_netdev - free network device
+ * @dev: device
+ *
+ * This function does the last stage of destroying an allocated device
+ * interface. The reference to the device object is released.
+ * If this is the last reference then it will be freed.
+ */
+void free_netdev(struct net_device *dev)
+{
+ struct napi_struct *p, *n;
+
+ release_net(dev_net(dev));
+
+ kfree(dev->_tx);
+
+ list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
+ netif_napi_del(p);
+
+ /* Compatibility with error handling in drivers */
+ if (dev->reg_state == NETREG_UNINITIALIZED) {
+ kfree((char *)dev - dev->padded);
+ return;
+ }
+
+ BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
+ dev->reg_state = NETREG_RELEASED;
+
+ /* will free via device release */
+ put_device(&dev->dev);
+}
+
+/**
+ * synchronize_net - Synchronize with packet receive processing
+ *
+ * Wait for packets currently being received to be done.
+ * Does not block later packets from starting.
+ */
+void synchronize_net(void)
+{
+ might_sleep();
+ synchronize_rcu();
+}
+
+/**
+ * unregister_netdevice - remove device from the kernel
+ * @dev: device
+ *
+ * This function shuts down a device interface and removes it
+ * from the kernel tables.
+ *
+ * Callers must hold the rtnl semaphore. You may want
+ * unregister_netdev() instead of this.
+ */
+
+void unregister_netdevice(struct net_device *dev)
+{
+ ASSERT_RTNL();
+
+ rollback_registered(dev);
+ /* Finish processing unregister after unlock */
+ net_set_todo(dev);
+}
+
+/**
+ * unregister_netdev - remove device from the kernel
+ * @dev: device
+ *
+ * This function shuts down a device interface and removes it
+ * from the kernel tables.
+ *
+ * This is just a wrapper for unregister_netdevice that takes
+ * the rtnl semaphore. In general you want to use this and not
+ * unregister_netdevice.
+ */
+void unregister_netdev(struct net_device *dev)
+{
+ rtnl_lock();
+ unregister_netdevice(dev);
+ rtnl_unlock();
+}
+
+EXPORT_SYMBOL(unregister_netdev);
+
+/**
+ * dev_change_net_namespace - move device to different nethost namespace
+ * @dev: device
+ * @net: network namespace
+ * @pat: If not NULL name pattern to try if the current device name
+ * is already taken in the destination network namespace.
+ *
+ * This function shuts down a device interface and moves it
+ * to a new network namespace. On success 0 is returned, on
+ * a failure a netagive errno code is returned.
+ *
+ * Callers must hold the rtnl semaphore.
+ */
+
+int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
+{
+ char buf[IFNAMSIZ];
+ const char *destname;
+ int err;
+
+ ASSERT_RTNL();
+
+ /* Don't allow namespace local devices to be moved. */
+ err = -EINVAL;
+ if (dev->features & NETIF_F_NETNS_LOCAL)
+ goto out;
+
+#ifdef CONFIG_SYSFS
+ /* Don't allow real devices to be moved when sysfs
+ * is enabled.
+ */
+ err = -EINVAL;
+ if (dev->dev.parent)
+ goto out;
+#endif
+
+ /* Ensure the device has been registrered */
+ err = -EINVAL;
+ if (dev->reg_state != NETREG_REGISTERED)
+ goto out;
+
+ /* Get out if there is nothing todo */
+ err = 0;
+ if (net_eq(dev_net(dev), net))
+ goto out;
+
+ /* Pick the destination device name, and ensure
+ * we can use it in the destination network namespace.
+ */
+ err = -EEXIST;
+ destname = dev->name;
+ if (__dev_get_by_name(net, destname)) {
+ /* We get here if we can't use the current device name */
+ if (!pat)
+ goto out;
+ if (!dev_valid_name(pat))
+ goto out;
+ if (strchr(pat, '%')) {
+ if (__dev_alloc_name(net, pat, buf) < 0)
+ goto out;
+ destname = buf;
+ } else
+ destname = pat;
+ if (__dev_get_by_name(net, destname))
+ goto out;
+ }
+
+ /*
+ * And now a mini version of register_netdevice unregister_netdevice.
+ */
+
+ /* If device is running close it first. */
+ dev_close(dev);
+
+ /* And unlink it from device chain */
+ err = -ENODEV;
+ unlist_netdevice(dev);
+
+ synchronize_net();
+
+ /* Shutdown queueing discipline. */
+ dev_shutdown(dev);
+
+ /* Notify protocols, that we are about to destroy
+ this device. They should clean all the things.
+ */
+ call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
+
+ /*
+ * Flush the unicast and multicast chains
+ */
+ dev_addr_discard(dev);
+
+ netdev_unregister_kobject(dev);
+
+ /* Actually switch the network namespace */
+ dev_net_set(dev, net);
+
+ /* Assign the new device name */
+ if (destname != dev->name)
+ strcpy(dev->name, destname);
+
+ /* If there is an ifindex conflict assign a new one */
+ if (__dev_get_by_index(net, dev->ifindex)) {
+ int iflink = (dev->iflink == dev->ifindex);
+ dev->ifindex = dev_new_index(net);
+ if (iflink)
+ dev->iflink = dev->ifindex;
+ }
+
+ /* Fixup kobjects */
+ err = netdev_register_kobject(dev);
+ WARN_ON(err);
+
+ /* Add the device back in the hashes */
+ list_netdevice(dev);
+
+ /* Notify protocols, that a new device appeared. */
+ call_netdevice_notifiers(NETDEV_REGISTER, dev);
+
+ synchronize_net();
+ err = 0;
+out:
+ return err;
+}
+
+static int dev_cpu_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *ocpu)
+{
+ struct sk_buff **list_skb;
+ struct Qdisc **list_net;
+ struct sk_buff *skb;
+ unsigned int cpu, oldcpu = (unsigned long)ocpu;
+ struct softnet_data *sd, *oldsd;
+
+ if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
+ return NOTIFY_OK;
+
+ local_irq_disable();
+ cpu = smp_processor_id();
+ sd = &per_cpu(softnet_data, cpu);
+ oldsd = &per_cpu(softnet_data, oldcpu);
+
+ /* Find end of our completion_queue. */
+ list_skb = &sd->completion_queue;
+ while (*list_skb)
+ list_skb = &(*list_skb)->next;
+ /* Append completion queue from offline CPU. */
+ *list_skb = oldsd->completion_queue;
+ oldsd->completion_queue = NULL;
+
+ /* Find end of our output_queue. */
+ list_net = &sd->output_queue;
+ while (*list_net)
+ list_net = &(*list_net)->next_sched;
+ /* Append output queue from offline CPU. */
+ *list_net = oldsd->output_queue;
+ oldsd->output_queue = NULL;
+
+ raise_softirq_irqoff(NET_TX_SOFTIRQ);
+ local_irq_enable();
+
+ /* Process offline CPU's input_pkt_queue */
+ while ((skb = __skb_dequeue(&oldsd->input_pkt_queue)))
+ netif_rx(skb);
+
+ return NOTIFY_OK;
+}
+
+
+/**
+ * netdev_increment_features - increment feature set by one
+ * @all: current feature set
+ * @one: new feature set
+ * @mask: mask feature set
+ *
+ * Computes a new feature set after adding a device with feature set
+ * @one to the master device with current feature set @all. Will not
+ * enable anything that is off in @mask. Returns the new feature set.
+ */
+unsigned long netdev_increment_features(unsigned long all, unsigned long one,
+ unsigned long mask)
+{
+ /* If device needs checksumming, downgrade to it. */
+ if (all & NETIF_F_NO_CSUM && !(one & NETIF_F_NO_CSUM))
+ all ^= NETIF_F_NO_CSUM | (one & NETIF_F_ALL_CSUM);
+ else if (mask & NETIF_F_ALL_CSUM) {
+ /* If one device supports v4/v6 checksumming, set for all. */
+ if (one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM) &&
+ !(all & NETIF_F_GEN_CSUM)) {
+ all &= ~NETIF_F_ALL_CSUM;
+ all |= one & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+ }
+
+ /* If one device supports hw checksumming, set for all. */
+ if (one & NETIF_F_GEN_CSUM && !(all & NETIF_F_GEN_CSUM)) {
+ all &= ~NETIF_F_ALL_CSUM;
+ all |= NETIF_F_HW_CSUM;
+ }
+ }
+
+ one |= NETIF_F_ALL_CSUM;
+
+ one |= all & NETIF_F_ONE_FOR_ALL;
+ all &= one | NETIF_F_LLTX | NETIF_F_GSO;
+ all |= one & mask & NETIF_F_ONE_FOR_ALL;
+
+ return all;
+}
+EXPORT_SYMBOL(netdev_increment_features);
+
+static struct hlist_head *netdev_create_hash(void)
+{
+ int i;
+ struct hlist_head *hash;
+
+ hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
+ if (hash != NULL)
+ for (i = 0; i < NETDEV_HASHENTRIES; i++)
+ INIT_HLIST_HEAD(&hash[i]);
+
+ return hash;
+}
+
+/* Initialize per network namespace state */
+static int __net_init netdev_init(struct net *net)
+{
+ INIT_LIST_HEAD(&net->dev_base_head);
+
+ net->dev_name_head = netdev_create_hash();
+ if (net->dev_name_head == NULL)
+ goto err_name;
+
+ net->dev_index_head = netdev_create_hash();
+ if (net->dev_index_head == NULL)
+ goto err_idx;
+
+ return 0;
+
+err_idx:
+ kfree(net->dev_name_head);
+err_name:
+ return -ENOMEM;
+}
+
+/**
+ * netdev_drivername - network driver for the device
+ * @dev: network device
+ * @buffer: buffer for resulting name
+ * @len: size of buffer
+ *
+ * Determine network driver for device.
+ */
+char *netdev_drivername(const struct net_device *dev, char *buffer, int len)
+{
+ const struct device_driver *driver;
+ const struct device *parent;
+
+ if (len <= 0 || !buffer)
+ return buffer;
+ buffer[0] = 0;
+
+ parent = dev->dev.parent;
+
+ if (!parent)
+ return buffer;
+
+ driver = parent->driver;
+ if (driver && driver->name)
+ strlcpy(buffer, driver->name, len);
+ return buffer;
+}
+
+static void __net_exit netdev_exit(struct net *net)
+{
+ kfree(net->dev_name_head);
+ kfree(net->dev_index_head);
+}
+
+static struct pernet_operations __net_initdata netdev_net_ops = {
+ .init = netdev_init,
+ .exit = netdev_exit,
+};
+
+static void __net_exit default_device_exit(struct net *net)
+{
+ struct net_device *dev;
+ /*
+ * Push all migratable of the network devices back to the
+ * initial network namespace
+ */
+ rtnl_lock();
+restart:
+ for_each_netdev(net, dev) {
+ int err;
+ char fb_name[IFNAMSIZ];
+
+ /* Ignore unmoveable devices (i.e. loopback) */
+ if (dev->features & NETIF_F_NETNS_LOCAL)
+ continue;
+
+ /* Delete virtual devices */
+ if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink) {
+ dev->rtnl_link_ops->dellink(dev);
+ goto restart;
+ }
+
+ /* Push remaing network devices to init_net */
+ snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
+ err = dev_change_net_namespace(dev, &init_net, fb_name);
+ if (err) {
+ printk(KERN_EMERG "%s: failed to move %s to init_net: %d\n",
+ __func__, dev->name, err);
+ BUG();
+ }
+ goto restart;
+ }
+ rtnl_unlock();
+}
+
+static struct pernet_operations __net_initdata default_device_ops = {
+ .exit = default_device_exit,
+};
+
+/*
+ * Initialize the DEV module. At boot time this walks the device list and
+ * unhooks any devices that fail to initialise (normally hardware not
+ * present) and leaves us with a valid list of present and active devices.
+ *
+ */
+
+/*
+ * This is called single threaded during boot, so no need
+ * to take the rtnl semaphore.
+ */
+static int __init net_dev_init(void)
+{
+ int i, rc = -ENOMEM;
+
+ BUG_ON(!dev_boot_phase);
+
+ if (dev_proc_init())
+ goto out;
+
+ if (netdev_kobject_init())
+ goto out;
+
+ INIT_LIST_HEAD(&ptype_all);
+ for (i = 0; i < PTYPE_HASH_SIZE; i++)
+ INIT_LIST_HEAD(&ptype_base[i]);
+
+ if (register_pernet_subsys(&netdev_net_ops))
+ goto out;
+
+ /*
+ * Initialise the packet receive queues.
+ */
+
+ for_each_possible_cpu(i) {
+ struct softnet_data *queue;
+
+ queue = &per_cpu(softnet_data, i);
+ skb_queue_head_init(&queue->input_pkt_queue);
+ queue->completion_queue = NULL;
+ INIT_LIST_HEAD(&queue->poll_list);
+
+ queue->backlog.poll = process_backlog;
+ queue->backlog.weight = weight_p;
+ queue->backlog.gro_list = NULL;
+ }
+
+ dev_boot_phase = 0;
+
+ /* The loopback device is special if any other network devices
+ * is present in a network namespace the loopback device must
+ * be present. Since we now dynamically allocate and free the
+ * loopback device ensure this invariant is maintained by
+ * keeping the loopback device as the first device on the
+ * list of network devices. Ensuring the loopback devices
+ * is the first device that appears and the last network device
+ * that disappears.
+ */
+ if (register_pernet_device(&loopback_net_ops))
+ goto out;
+
+ if (register_pernet_device(&default_device_ops))
+ goto out;
+
+ open_softirq(NET_TX_SOFTIRQ, net_tx_action);
+ open_softirq(NET_RX_SOFTIRQ, net_rx_action);
+
+ hotcpu_notifier(dev_cpu_callback, 0);
+ dst_init();
+ dev_mcast_init();
+ rc = 0;
+out:
+ return rc;
+}
+
+subsys_initcall(net_dev_init);
+
+EXPORT_SYMBOL(__dev_get_by_index);
+EXPORT_SYMBOL(__dev_get_by_name);
+EXPORT_SYMBOL(__dev_remove_pack);
+EXPORT_SYMBOL(dev_valid_name);
+EXPORT_SYMBOL(dev_add_pack);
+EXPORT_SYMBOL(dev_alloc_name);
+EXPORT_SYMBOL(dev_close);
+EXPORT_SYMBOL(dev_get_by_flags);
+EXPORT_SYMBOL(dev_get_by_index);
+EXPORT_SYMBOL(dev_get_by_name);
+EXPORT_SYMBOL(dev_open);
+EXPORT_SYMBOL(dev_queue_xmit);
+EXPORT_SYMBOL(dev_remove_pack);
+EXPORT_SYMBOL(dev_set_allmulti);
+EXPORT_SYMBOL(dev_set_promiscuity);
+EXPORT_SYMBOL(dev_change_flags);
+EXPORT_SYMBOL(dev_set_mtu);
+EXPORT_SYMBOL(dev_set_mac_address);
+EXPORT_SYMBOL(free_netdev);
+EXPORT_SYMBOL(netdev_boot_setup_check);
+EXPORT_SYMBOL(netdev_set_master);
+EXPORT_SYMBOL(netdev_state_change);
+EXPORT_SYMBOL(netif_receive_skb);
+EXPORT_SYMBOL(netif_rx);
+EXPORT_SYMBOL(register_gifconf);
+EXPORT_SYMBOL(register_netdevice);
+EXPORT_SYMBOL(register_netdevice_notifier);
+EXPORT_SYMBOL(skb_checksum_help);
+EXPORT_SYMBOL(synchronize_net);
+EXPORT_SYMBOL(unregister_netdevice);
+EXPORT_SYMBOL(unregister_netdevice_notifier);
+EXPORT_SYMBOL(net_enable_timestamp);
+EXPORT_SYMBOL(net_disable_timestamp);
+EXPORT_SYMBOL(dev_get_flags);
+
+#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+EXPORT_SYMBOL(br_handle_frame_hook);
+EXPORT_SYMBOL(br_fdb_get_hook);
+EXPORT_SYMBOL(br_fdb_put_hook);
+#endif
+
+EXPORT_SYMBOL(dev_load);
+
+EXPORT_PER_CPU_SYMBOL(softnet_data);
diff --git a/libdde_linux26/contrib/net/core/dev_mcast.c b/libdde_linux26/contrib/net/core/dev_mcast.c
new file mode 100644
index 00000000..9e2fa39f
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/dev_mcast.c
@@ -0,0 +1,229 @@
+/*
+ * Linux NET3: Multicast List maintenance.
+ *
+ * Authors:
+ * Tim Kordas <tjk@nostromo.eeap.cwru.edu>
+ * Richard Underwood <richard@wuzz.demon.co.uk>
+ *
+ * Stir fried together from the IP multicast and CAP patches above
+ * Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * Fixes:
+ * Alan Cox : Update the device on a real delete
+ * rather than any time but...
+ * Alan Cox : IFF_ALLMULTI support.
+ * Alan Cox : New format set_multicast_list() calls.
+ * Gleb Natapov : Remove dev_mc_lock.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <net/net_namespace.h>
+#include <net/ip.h>
+#include <net/route.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/arp.h>
+
+
+/*
+ * Device multicast list maintenance.
+ *
+ * This is used both by IP and by the user level maintenance functions.
+ * Unlike BSD we maintain a usage count on a given multicast address so
+ * that a casual user application can add/delete multicasts used by
+ * protocols without doing damage to the protocols when it deletes the
+ * entries. It also helps IP as it tracks overlapping maps.
+ *
+ * Device mc lists are changed by bh at least if IPv6 is enabled,
+ * so that it must be bh protected.
+ *
+ * We block accesses to device mc filters with netif_tx_lock.
+ */
+
+/*
+ * Delete a device level multicast
+ */
+
+int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
+{
+ int err;
+
+ netif_addr_lock_bh(dev);
+ err = __dev_addr_delete(&dev->mc_list, &dev->mc_count,
+ addr, alen, glbl);
+ if (!err) {
+ /*
+ * We have altered the list, so the card
+ * loaded filter is now wrong. Fix it
+ */
+
+ __dev_set_rx_mode(dev);
+ }
+ netif_addr_unlock_bh(dev);
+ return err;
+}
+
+/*
+ * Add a device level multicast
+ */
+
+int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
+{
+ int err;
+
+ netif_addr_lock_bh(dev);
+ err = __dev_addr_add(&dev->mc_list, &dev->mc_count, addr, alen, glbl);
+ if (!err)
+ __dev_set_rx_mode(dev);
+ netif_addr_unlock_bh(dev);
+ return err;
+}
+
+/**
+ * dev_mc_sync - Synchronize device's multicast list to another device
+ * @to: destination device
+ * @from: source device
+ *
+ * Add newly added addresses to the destination device and release
+ * addresses that have no users left. The source device must be
+ * locked by netif_tx_lock_bh.
+ *
+ * This function is intended to be called from the dev->set_multicast_list
+ * or dev->set_rx_mode function of layered software devices.
+ */
+int dev_mc_sync(struct net_device *to, struct net_device *from)
+{
+ int err = 0;
+
+ netif_addr_lock_bh(to);
+ err = __dev_addr_sync(&to->mc_list, &to->mc_count,
+ &from->mc_list, &from->mc_count);
+ if (!err)
+ __dev_set_rx_mode(to);
+ netif_addr_unlock_bh(to);
+
+ return err;
+}
+EXPORT_SYMBOL(dev_mc_sync);
+
+
+/**
+ * dev_mc_unsync - Remove synchronized addresses from the destination
+ * device
+ * @to: destination device
+ * @from: source device
+ *
+ * Remove all addresses that were added to the destination device by
+ * dev_mc_sync(). This function is intended to be called from the
+ * dev->stop function of layered software devices.
+ */
+void dev_mc_unsync(struct net_device *to, struct net_device *from)
+{
+ netif_addr_lock_bh(from);
+ netif_addr_lock(to);
+
+ __dev_addr_unsync(&to->mc_list, &to->mc_count,
+ &from->mc_list, &from->mc_count);
+ __dev_set_rx_mode(to);
+
+ netif_addr_unlock(to);
+ netif_addr_unlock_bh(from);
+}
+EXPORT_SYMBOL(dev_mc_unsync);
+
+#ifdef CONFIG_PROC_FS
+static int dev_mc_seq_show(struct seq_file *seq, void *v)
+{
+ struct dev_addr_list *m;
+ struct net_device *dev = v;
+
+ if (v == SEQ_START_TOKEN)
+ return 0;
+
+ netif_addr_lock_bh(dev);
+ for (m = dev->mc_list; m; m = m->next) {
+ int i;
+
+ seq_printf(seq, "%-4d %-15s %-5d %-5d ", dev->ifindex,
+ dev->name, m->dmi_users, m->dmi_gusers);
+
+ for (i = 0; i < m->dmi_addrlen; i++)
+ seq_printf(seq, "%02x", m->dmi_addr[i]);
+
+ seq_putc(seq, '\n');
+ }
+ netif_addr_unlock_bh(dev);
+ return 0;
+}
+
+static const struct seq_operations dev_mc_seq_ops = {
+ .start = dev_seq_start,
+ .next = dev_seq_next,
+ .stop = dev_seq_stop,
+ .show = dev_mc_seq_show,
+};
+
+static int dev_mc_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open_net(inode, file, &dev_mc_seq_ops,
+ sizeof(struct seq_net_private));
+}
+
+static const struct file_operations dev_mc_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = dev_mc_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_net,
+};
+
+#endif
+
+static int __net_init dev_mc_net_init(struct net *net)
+{
+ if (!proc_net_fops_create(net, "dev_mcast", 0, &dev_mc_seq_fops))
+ return -ENOMEM;
+ return 0;
+}
+
+static void __net_exit dev_mc_net_exit(struct net *net)
+{
+ proc_net_remove(net, "dev_mcast");
+}
+
+static struct pernet_operations __net_initdata dev_mc_net_ops = {
+ .init = dev_mc_net_init,
+ .exit = dev_mc_net_exit,
+};
+
+void __init dev_mcast_init(void)
+{
+ register_pernet_subsys(&dev_mc_net_ops);
+}
+
+EXPORT_SYMBOL(dev_mc_add);
+EXPORT_SYMBOL(dev_mc_delete);
diff --git a/libdde_linux26/contrib/net/core/ethtool.c b/libdde_linux26/contrib/net/core/ethtool.c
new file mode 100644
index 00000000..947710a3
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/ethtool.c
@@ -0,0 +1,1091 @@
+/*
+ * net/core/ethtool.c - Ethtool ioctl handler
+ * Copyright (c) 2003 Matthew Wilcox <matthew@wil.cx>
+ *
+ * This file is where we call all the ethtool_ops commands to get
+ * the information ethtool needs.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/capability.h>
+#include <linux/errno.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <asm/uaccess.h>
+
+/*
+ * Some useful ethtool_ops methods that're device independent.
+ * If we find that all drivers want to do the same thing here,
+ * we can turn these into dev_() function calls.
+ */
+
+u32 ethtool_op_get_link(struct net_device *dev)
+{
+ return netif_carrier_ok(dev) ? 1 : 0;
+}
+
+u32 ethtool_op_get_tx_csum(struct net_device *dev)
+{
+ return (dev->features & NETIF_F_ALL_CSUM) != 0;
+}
+
+int ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
+{
+ if (data)
+ dev->features |= NETIF_F_IP_CSUM;
+ else
+ dev->features &= ~NETIF_F_IP_CSUM;
+
+ return 0;
+}
+
+int ethtool_op_set_tx_hw_csum(struct net_device *dev, u32 data)
+{
+ if (data)
+ dev->features |= NETIF_F_HW_CSUM;
+ else
+ dev->features &= ~NETIF_F_HW_CSUM;
+
+ return 0;
+}
+
+int ethtool_op_set_tx_ipv6_csum(struct net_device *dev, u32 data)
+{
+ if (data)
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ else
+ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
+
+ return 0;
+}
+
+u32 ethtool_op_get_sg(struct net_device *dev)
+{
+ return (dev->features & NETIF_F_SG) != 0;
+}
+
+int ethtool_op_set_sg(struct net_device *dev, u32 data)
+{
+ if (data)
+ dev->features |= NETIF_F_SG;
+ else
+ dev->features &= ~NETIF_F_SG;
+
+ return 0;
+}
+
+u32 ethtool_op_get_tso(struct net_device *dev)
+{
+ return (dev->features & NETIF_F_TSO) != 0;
+}
+
+int ethtool_op_set_tso(struct net_device *dev, u32 data)
+{
+ if (data)
+ dev->features |= NETIF_F_TSO;
+ else
+ dev->features &= ~NETIF_F_TSO;
+
+ return 0;
+}
+
+u32 ethtool_op_get_ufo(struct net_device *dev)
+{
+ return (dev->features & NETIF_F_UFO) != 0;
+}
+
+int ethtool_op_set_ufo(struct net_device *dev, u32 data)
+{
+ if (data)
+ dev->features |= NETIF_F_UFO;
+ else
+ dev->features &= ~NETIF_F_UFO;
+ return 0;
+}
+
+/* the following list of flags are the same as their associated
+ * NETIF_F_xxx values in include/linux/netdevice.h
+ */
+static const u32 flags_dup_features =
+ ETH_FLAG_LRO;
+
+u32 ethtool_op_get_flags(struct net_device *dev)
+{
+ /* in the future, this function will probably contain additional
+ * handling for flags which are not so easily handled
+ * by a simple masking operation
+ */
+
+ return dev->features & flags_dup_features;
+}
+
+int ethtool_op_set_flags(struct net_device *dev, u32 data)
+{
+ if (data & ETH_FLAG_LRO)
+ dev->features |= NETIF_F_LRO;
+ else
+ dev->features &= ~NETIF_F_LRO;
+
+ return 0;
+}
+
+/* Handlers for each ethtool command */
+
+static int ethtool_get_settings(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_cmd cmd = { ETHTOOL_GSET };
+ int err;
+
+ if (!dev->ethtool_ops->get_settings)
+ return -EOPNOTSUPP;
+
+ err = dev->ethtool_ops->get_settings(dev, &cmd);
+ if (err < 0)
+ return err;
+
+ if (copy_to_user(useraddr, &cmd, sizeof(cmd)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_settings(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_cmd cmd;
+
+ if (!dev->ethtool_ops->set_settings)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+ return -EFAULT;
+
+ return dev->ethtool_ops->set_settings(dev, &cmd);
+}
+
+static int ethtool_get_drvinfo(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_drvinfo info;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+
+ if (!ops->get_drvinfo)
+ return -EOPNOTSUPP;
+
+ memset(&info, 0, sizeof(info));
+ info.cmd = ETHTOOL_GDRVINFO;
+ ops->get_drvinfo(dev, &info);
+
+ if (ops->get_sset_count) {
+ int rc;
+
+ rc = ops->get_sset_count(dev, ETH_SS_TEST);
+ if (rc >= 0)
+ info.testinfo_len = rc;
+ rc = ops->get_sset_count(dev, ETH_SS_STATS);
+ if (rc >= 0)
+ info.n_stats = rc;
+ rc = ops->get_sset_count(dev, ETH_SS_PRIV_FLAGS);
+ if (rc >= 0)
+ info.n_priv_flags = rc;
+ } else {
+ /* code path for obsolete hooks */
+
+ if (ops->self_test_count)
+ info.testinfo_len = ops->self_test_count(dev);
+ if (ops->get_stats_count)
+ info.n_stats = ops->get_stats_count(dev);
+ }
+ if (ops->get_regs_len)
+ info.regdump_len = ops->get_regs_len(dev);
+ if (ops->get_eeprom_len)
+ info.eedump_len = ops->get_eeprom_len(dev);
+
+ if (copy_to_user(useraddr, &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_rxhash(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_rxnfc cmd;
+
+ if (!dev->ethtool_ops->set_rxhash)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&cmd, useraddr, sizeof(cmd)))
+ return -EFAULT;
+
+ return dev->ethtool_ops->set_rxhash(dev, &cmd);
+}
+
+static int ethtool_get_rxhash(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_rxnfc info;
+
+ if (!dev->ethtool_ops->get_rxhash)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&info, useraddr, sizeof(info)))
+ return -EFAULT;
+
+ dev->ethtool_ops->get_rxhash(dev, &info);
+
+ if (copy_to_user(useraddr, &info, sizeof(info)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_get_regs(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_regs regs;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ void *regbuf;
+ int reglen, ret;
+
+ if (!ops->get_regs || !ops->get_regs_len)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&regs, useraddr, sizeof(regs)))
+ return -EFAULT;
+
+ reglen = ops->get_regs_len(dev);
+ if (regs.len > reglen)
+ regs.len = reglen;
+
+ regbuf = kmalloc(reglen, GFP_USER);
+ if (!regbuf)
+ return -ENOMEM;
+
+ ops->get_regs(dev, &regs, regbuf);
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &regs, sizeof(regs)))
+ goto out;
+ useraddr += offsetof(struct ethtool_regs, data);
+ if (copy_to_user(useraddr, regbuf, regs.len))
+ goto out;
+ ret = 0;
+
+ out:
+ kfree(regbuf);
+ return ret;
+}
+
+static int ethtool_get_wol(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_wolinfo wol = { ETHTOOL_GWOL };
+
+ if (!dev->ethtool_ops->get_wol)
+ return -EOPNOTSUPP;
+
+ dev->ethtool_ops->get_wol(dev, &wol);
+
+ if (copy_to_user(useraddr, &wol, sizeof(wol)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_wol(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_wolinfo wol;
+
+ if (!dev->ethtool_ops->set_wol)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&wol, useraddr, sizeof(wol)))
+ return -EFAULT;
+
+ return dev->ethtool_ops->set_wol(dev, &wol);
+}
+
+static int ethtool_nway_reset(struct net_device *dev)
+{
+ if (!dev->ethtool_ops->nway_reset)
+ return -EOPNOTSUPP;
+
+ return dev->ethtool_ops->nway_reset(dev);
+}
+
+static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_eeprom eeprom;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ void __user *userbuf = useraddr + sizeof(eeprom);
+ u32 bytes_remaining;
+ u8 *data;
+ int ret = 0;
+
+ if (!ops->get_eeprom || !ops->get_eeprom_len)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
+ return -EFAULT;
+
+ /* Check for wrap and zero */
+ if (eeprom.offset + eeprom.len <= eeprom.offset)
+ return -EINVAL;
+
+ /* Check for exceeding total eeprom len */
+ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
+ return -EINVAL;
+
+ data = kmalloc(PAGE_SIZE, GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ bytes_remaining = eeprom.len;
+ while (bytes_remaining > 0) {
+ eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE);
+
+ ret = ops->get_eeprom(dev, &eeprom, data);
+ if (ret)
+ break;
+ if (copy_to_user(userbuf, data, eeprom.len)) {
+ ret = -EFAULT;
+ break;
+ }
+ userbuf += eeprom.len;
+ eeprom.offset += eeprom.len;
+ bytes_remaining -= eeprom.len;
+ }
+
+ eeprom.len = userbuf - (useraddr + sizeof(eeprom));
+ eeprom.offset -= eeprom.len;
+ if (copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
+ ret = -EFAULT;
+
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_eeprom eeprom;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ void __user *userbuf = useraddr + sizeof(eeprom);
+ u32 bytes_remaining;
+ u8 *data;
+ int ret = 0;
+
+ if (!ops->set_eeprom || !ops->get_eeprom_len)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
+ return -EFAULT;
+
+ /* Check for wrap and zero */
+ if (eeprom.offset + eeprom.len <= eeprom.offset)
+ return -EINVAL;
+
+ /* Check for exceeding total eeprom len */
+ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev))
+ return -EINVAL;
+
+ data = kmalloc(PAGE_SIZE, GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ bytes_remaining = eeprom.len;
+ while (bytes_remaining > 0) {
+ eeprom.len = min(bytes_remaining, (u32)PAGE_SIZE);
+
+ if (copy_from_user(data, userbuf, eeprom.len)) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = ops->set_eeprom(dev, &eeprom, data);
+ if (ret)
+ break;
+ userbuf += eeprom.len;
+ eeprom.offset += eeprom.len;
+ bytes_remaining -= eeprom.len;
+ }
+
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_get_coalesce(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE };
+
+ if (!dev->ethtool_ops->get_coalesce)
+ return -EOPNOTSUPP;
+
+ dev->ethtool_ops->get_coalesce(dev, &coalesce);
+
+ if (copy_to_user(useraddr, &coalesce, sizeof(coalesce)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_coalesce(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_coalesce coalesce;
+
+ if (!dev->ethtool_ops->set_coalesce)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&coalesce, useraddr, sizeof(coalesce)))
+ return -EFAULT;
+
+ return dev->ethtool_ops->set_coalesce(dev, &coalesce);
+}
+
+static int ethtool_get_ringparam(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM };
+
+ if (!dev->ethtool_ops->get_ringparam)
+ return -EOPNOTSUPP;
+
+ dev->ethtool_ops->get_ringparam(dev, &ringparam);
+
+ if (copy_to_user(useraddr, &ringparam, sizeof(ringparam)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_ringparam(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_ringparam ringparam;
+
+ if (!dev->ethtool_ops->set_ringparam)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&ringparam, useraddr, sizeof(ringparam)))
+ return -EFAULT;
+
+ return dev->ethtool_ops->set_ringparam(dev, &ringparam);
+}
+
+static int ethtool_get_pauseparam(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM };
+
+ if (!dev->ethtool_ops->get_pauseparam)
+ return -EOPNOTSUPP;
+
+ dev->ethtool_ops->get_pauseparam(dev, &pauseparam);
+
+ if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_pauseparam(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_pauseparam pauseparam;
+
+ if (!dev->ethtool_ops->set_pauseparam)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam)))
+ return -EFAULT;
+
+ return dev->ethtool_ops->set_pauseparam(dev, &pauseparam);
+}
+
+static int __ethtool_set_sg(struct net_device *dev, u32 data)
+{
+ int err;
+
+ if (!data && dev->ethtool_ops->set_tso) {
+ err = dev->ethtool_ops->set_tso(dev, 0);
+ if (err)
+ return err;
+ }
+
+ if (!data && dev->ethtool_ops->set_ufo) {
+ err = dev->ethtool_ops->set_ufo(dev, 0);
+ if (err)
+ return err;
+ }
+ return dev->ethtool_ops->set_sg(dev, data);
+}
+
+static int ethtool_set_tx_csum(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_value edata;
+ int err;
+
+ if (!dev->ethtool_ops->set_tx_csum)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ if (!edata.data && dev->ethtool_ops->set_sg) {
+ err = __ethtool_set_sg(dev, 0);
+ if (err)
+ return err;
+ }
+
+ return dev->ethtool_ops->set_tx_csum(dev, edata.data);
+}
+
+static int ethtool_set_rx_csum(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!dev->ethtool_ops->set_rx_csum)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ if (!edata.data && dev->ethtool_ops->set_sg)
+ dev->features &= ~NETIF_F_GRO;
+
+ return dev->ethtool_ops->set_rx_csum(dev, edata.data);
+}
+
+static int ethtool_set_sg(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!dev->ethtool_ops->set_sg)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ if (edata.data &&
+ !(dev->features & NETIF_F_ALL_CSUM))
+ return -EINVAL;
+
+ return __ethtool_set_sg(dev, edata.data);
+}
+
+static int ethtool_set_tso(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!dev->ethtool_ops->set_tso)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ if (edata.data && !(dev->features & NETIF_F_SG))
+ return -EINVAL;
+
+ return dev->ethtool_ops->set_tso(dev, edata.data);
+}
+
+static int ethtool_set_ufo(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (!dev->ethtool_ops->set_ufo)
+ return -EOPNOTSUPP;
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+ if (edata.data && !(dev->features & NETIF_F_SG))
+ return -EINVAL;
+ if (edata.data && !(dev->features & NETIF_F_HW_CSUM))
+ return -EINVAL;
+ return dev->ethtool_ops->set_ufo(dev, edata.data);
+}
+
+static int ethtool_get_gso(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GGSO };
+
+ edata.data = dev->features & NETIF_F_GSO;
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_gso(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+ if (edata.data)
+ dev->features |= NETIF_F_GSO;
+ else
+ dev->features &= ~NETIF_F_GSO;
+ return 0;
+}
+
+static int ethtool_get_gro(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_value edata = { ETHTOOL_GGRO };
+
+ edata.data = dev->features & NETIF_F_GRO;
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_gro(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_value edata;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ if (edata.data) {
+ if (!dev->ethtool_ops->get_rx_csum ||
+ !dev->ethtool_ops->get_rx_csum(dev))
+ return -EINVAL;
+ dev->features |= NETIF_F_GRO;
+ } else
+ dev->features &= ~NETIF_F_GRO;
+
+ return 0;
+}
+
+static int ethtool_self_test(struct net_device *dev, char __user *useraddr)
+{
+ struct ethtool_test test;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ u64 *data;
+ int ret, test_len;
+
+ if (!ops->self_test)
+ return -EOPNOTSUPP;
+ if (!ops->get_sset_count && !ops->self_test_count)
+ return -EOPNOTSUPP;
+
+ if (ops->get_sset_count)
+ test_len = ops->get_sset_count(dev, ETH_SS_TEST);
+ else
+ /* code path for obsolete hook */
+ test_len = ops->self_test_count(dev);
+ if (test_len < 0)
+ return test_len;
+ WARN_ON(test_len == 0);
+
+ if (copy_from_user(&test, useraddr, sizeof(test)))
+ return -EFAULT;
+
+ test.len = test_len;
+ data = kmalloc(test_len * sizeof(u64), GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ops->self_test(dev, &test, data);
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &test, sizeof(test)))
+ goto out;
+ useraddr += sizeof(test);
+ if (copy_to_user(useraddr, data, test.len * sizeof(u64)))
+ goto out;
+ ret = 0;
+
+ out:
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_gstrings gstrings;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ u8 *data;
+ int ret;
+
+ if (!ops->get_strings)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&gstrings, useraddr, sizeof(gstrings)))
+ return -EFAULT;
+
+ if (ops->get_sset_count) {
+ ret = ops->get_sset_count(dev, gstrings.string_set);
+ if (ret < 0)
+ return ret;
+
+ gstrings.len = ret;
+ } else {
+ /* code path for obsolete hooks */
+
+ switch (gstrings.string_set) {
+ case ETH_SS_TEST:
+ if (!ops->self_test_count)
+ return -EOPNOTSUPP;
+ gstrings.len = ops->self_test_count(dev);
+ break;
+ case ETH_SS_STATS:
+ if (!ops->get_stats_count)
+ return -EOPNOTSUPP;
+ gstrings.len = ops->get_stats_count(dev);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+
+ data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ops->get_strings(dev, gstrings.string_set, data);
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
+ goto out;
+ useraddr += sizeof(gstrings);
+ if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
+ goto out;
+ ret = 0;
+
+ out:
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_phys_id(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_value id;
+
+ if (!dev->ethtool_ops->phys_id)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&id, useraddr, sizeof(id)))
+ return -EFAULT;
+
+ return dev->ethtool_ops->phys_id(dev, id.data);
+}
+
+static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_stats stats;
+ const struct ethtool_ops *ops = dev->ethtool_ops;
+ u64 *data;
+ int ret, n_stats;
+
+ if (!ops->get_ethtool_stats)
+ return -EOPNOTSUPP;
+ if (!ops->get_sset_count && !ops->get_stats_count)
+ return -EOPNOTSUPP;
+
+ if (ops->get_sset_count)
+ n_stats = ops->get_sset_count(dev, ETH_SS_STATS);
+ else
+ /* code path for obsolete hook */
+ n_stats = ops->get_stats_count(dev);
+ if (n_stats < 0)
+ return n_stats;
+ WARN_ON(n_stats == 0);
+
+ if (copy_from_user(&stats, useraddr, sizeof(stats)))
+ return -EFAULT;
+
+ stats.n_stats = n_stats;
+ data = kmalloc(n_stats * sizeof(u64), GFP_USER);
+ if (!data)
+ return -ENOMEM;
+
+ ops->get_ethtool_stats(dev, &stats, data);
+
+ ret = -EFAULT;
+ if (copy_to_user(useraddr, &stats, sizeof(stats)))
+ goto out;
+ useraddr += sizeof(stats);
+ if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
+ goto out;
+ ret = 0;
+
+ out:
+ kfree(data);
+ return ret;
+}
+
+static int ethtool_get_perm_addr(struct net_device *dev, void __user *useraddr)
+{
+ struct ethtool_perm_addr epaddr;
+
+ if (copy_from_user(&epaddr, useraddr, sizeof(epaddr)))
+ return -EFAULT;
+
+ if (epaddr.size < dev->addr_len)
+ return -ETOOSMALL;
+ epaddr.size = dev->addr_len;
+
+ if (copy_to_user(useraddr, &epaddr, sizeof(epaddr)))
+ return -EFAULT;
+ useraddr += sizeof(epaddr);
+ if (copy_to_user(useraddr, dev->perm_addr, epaddr.size))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_get_value(struct net_device *dev, char __user *useraddr,
+ u32 cmd, u32 (*actor)(struct net_device *))
+{
+ struct ethtool_value edata = { cmd };
+
+ if (!actor)
+ return -EOPNOTSUPP;
+
+ edata.data = actor(dev);
+
+ if (copy_to_user(useraddr, &edata, sizeof(edata)))
+ return -EFAULT;
+ return 0;
+}
+
+static int ethtool_set_value_void(struct net_device *dev, char __user *useraddr,
+ void (*actor)(struct net_device *, u32))
+{
+ struct ethtool_value edata;
+
+ if (!actor)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ actor(dev, edata.data);
+ return 0;
+}
+
+static int ethtool_set_value(struct net_device *dev, char __user *useraddr,
+ int (*actor)(struct net_device *, u32))
+{
+ struct ethtool_value edata;
+
+ if (!actor)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&edata, useraddr, sizeof(edata)))
+ return -EFAULT;
+
+ return actor(dev, edata.data);
+}
+
+/* The main entry point in this file. Called from net/core/dev.c */
+
+int dev_ethtool(struct net *net, struct ifreq *ifr)
+{
+ struct net_device *dev = __dev_get_by_name(net, ifr->ifr_name);
+ void __user *useraddr = ifr->ifr_data;
+ u32 ethcmd;
+ int rc;
+ unsigned long old_features;
+
+ if (!dev || !netif_device_present(dev))
+ return -ENODEV;
+
+ if (!dev->ethtool_ops)
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&ethcmd, useraddr, sizeof (ethcmd)))
+ return -EFAULT;
+
+ /* Allow some commands to be done by anyone */
+ switch(ethcmd) {
+ case ETHTOOL_GDRVINFO:
+ case ETHTOOL_GMSGLVL:
+ case ETHTOOL_GCOALESCE:
+ case ETHTOOL_GRINGPARAM:
+ case ETHTOOL_GPAUSEPARAM:
+ case ETHTOOL_GRXCSUM:
+ case ETHTOOL_GTXCSUM:
+ case ETHTOOL_GSG:
+ case ETHTOOL_GSTRINGS:
+ case ETHTOOL_GTSO:
+ case ETHTOOL_GPERMADDR:
+ case ETHTOOL_GUFO:
+ case ETHTOOL_GGSO:
+ case ETHTOOL_GFLAGS:
+ case ETHTOOL_GPFLAGS:
+ case ETHTOOL_GRXFH:
+ break;
+ default:
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+ }
+
+ if (dev->ethtool_ops->begin)
+ if ((rc = dev->ethtool_ops->begin(dev)) < 0)
+ return rc;
+
+ old_features = dev->features;
+
+ switch (ethcmd) {
+ case ETHTOOL_GSET:
+ rc = ethtool_get_settings(dev, useraddr);
+ break;
+ case ETHTOOL_SSET:
+ rc = ethtool_set_settings(dev, useraddr);
+ break;
+ case ETHTOOL_GDRVINFO:
+ rc = ethtool_get_drvinfo(dev, useraddr);
+ break;
+ case ETHTOOL_GREGS:
+ rc = ethtool_get_regs(dev, useraddr);
+ break;
+ case ETHTOOL_GWOL:
+ rc = ethtool_get_wol(dev, useraddr);
+ break;
+ case ETHTOOL_SWOL:
+ rc = ethtool_set_wol(dev, useraddr);
+ break;
+ case ETHTOOL_GMSGLVL:
+ rc = ethtool_get_value(dev, useraddr, ethcmd,
+ dev->ethtool_ops->get_msglevel);
+ break;
+ case ETHTOOL_SMSGLVL:
+ rc = ethtool_set_value_void(dev, useraddr,
+ dev->ethtool_ops->set_msglevel);
+ break;
+ case ETHTOOL_NWAY_RST:
+ rc = ethtool_nway_reset(dev);
+ break;
+ case ETHTOOL_GLINK:
+ rc = ethtool_get_value(dev, useraddr, ethcmd,
+ dev->ethtool_ops->get_link);
+ break;
+ case ETHTOOL_GEEPROM:
+ rc = ethtool_get_eeprom(dev, useraddr);
+ break;
+ case ETHTOOL_SEEPROM:
+ rc = ethtool_set_eeprom(dev, useraddr);
+ break;
+ case ETHTOOL_GCOALESCE:
+ rc = ethtool_get_coalesce(dev, useraddr);
+ break;
+ case ETHTOOL_SCOALESCE:
+ rc = ethtool_set_coalesce(dev, useraddr);
+ break;
+ case ETHTOOL_GRINGPARAM:
+ rc = ethtool_get_ringparam(dev, useraddr);
+ break;
+ case ETHTOOL_SRINGPARAM:
+ rc = ethtool_set_ringparam(dev, useraddr);
+ break;
+ case ETHTOOL_GPAUSEPARAM:
+ rc = ethtool_get_pauseparam(dev, useraddr);
+ break;
+ case ETHTOOL_SPAUSEPARAM:
+ rc = ethtool_set_pauseparam(dev, useraddr);
+ break;
+ case ETHTOOL_GRXCSUM:
+ rc = ethtool_get_value(dev, useraddr, ethcmd,
+ dev->ethtool_ops->get_rx_csum);
+ break;
+ case ETHTOOL_SRXCSUM:
+ rc = ethtool_set_rx_csum(dev, useraddr);
+ break;
+ case ETHTOOL_GTXCSUM:
+ rc = ethtool_get_value(dev, useraddr, ethcmd,
+ (dev->ethtool_ops->get_tx_csum ?
+ dev->ethtool_ops->get_tx_csum :
+ ethtool_op_get_tx_csum));
+ break;
+ case ETHTOOL_STXCSUM:
+ rc = ethtool_set_tx_csum(dev, useraddr);
+ break;
+ case ETHTOOL_GSG:
+ rc = ethtool_get_value(dev, useraddr, ethcmd,
+ (dev->ethtool_ops->get_sg ?
+ dev->ethtool_ops->get_sg :
+ ethtool_op_get_sg));
+ break;
+ case ETHTOOL_SSG:
+ rc = ethtool_set_sg(dev, useraddr);
+ break;
+ case ETHTOOL_GTSO:
+ rc = ethtool_get_value(dev, useraddr, ethcmd,
+ (dev->ethtool_ops->get_tso ?
+ dev->ethtool_ops->get_tso :
+ ethtool_op_get_tso));
+ break;
+ case ETHTOOL_STSO:
+ rc = ethtool_set_tso(dev, useraddr);
+ break;
+ case ETHTOOL_TEST:
+ rc = ethtool_self_test(dev, useraddr);
+ break;
+ case ETHTOOL_GSTRINGS:
+ rc = ethtool_get_strings(dev, useraddr);
+ break;
+ case ETHTOOL_PHYS_ID:
+ rc = ethtool_phys_id(dev, useraddr);
+ break;
+ case ETHTOOL_GSTATS:
+ rc = ethtool_get_stats(dev, useraddr);
+ break;
+ case ETHTOOL_GPERMADDR:
+ rc = ethtool_get_perm_addr(dev, useraddr);
+ break;
+ case ETHTOOL_GUFO:
+ rc = ethtool_get_value(dev, useraddr, ethcmd,
+ (dev->ethtool_ops->get_ufo ?
+ dev->ethtool_ops->get_ufo :
+ ethtool_op_get_ufo));
+ break;
+ case ETHTOOL_SUFO:
+ rc = ethtool_set_ufo(dev, useraddr);
+ break;
+ case ETHTOOL_GGSO:
+ rc = ethtool_get_gso(dev, useraddr);
+ break;
+ case ETHTOOL_SGSO:
+ rc = ethtool_set_gso(dev, useraddr);
+ break;
+ case ETHTOOL_GFLAGS:
+ rc = ethtool_get_value(dev, useraddr, ethcmd,
+ dev->ethtool_ops->get_flags);
+ break;
+ case ETHTOOL_SFLAGS:
+ rc = ethtool_set_value(dev, useraddr,
+ dev->ethtool_ops->set_flags);
+ break;
+ case ETHTOOL_GPFLAGS:
+ rc = ethtool_get_value(dev, useraddr, ethcmd,
+ dev->ethtool_ops->get_priv_flags);
+ break;
+ case ETHTOOL_SPFLAGS:
+ rc = ethtool_set_value(dev, useraddr,
+ dev->ethtool_ops->set_priv_flags);
+ break;
+ case ETHTOOL_GRXFH:
+ rc = ethtool_get_rxhash(dev, useraddr);
+ break;
+ case ETHTOOL_SRXFH:
+ rc = ethtool_set_rxhash(dev, useraddr);
+ break;
+ case ETHTOOL_GGRO:
+ rc = ethtool_get_gro(dev, useraddr);
+ break;
+ case ETHTOOL_SGRO:
+ rc = ethtool_set_gro(dev, useraddr);
+ break;
+ default:
+ rc = -EOPNOTSUPP;
+ }
+
+ if (dev->ethtool_ops->complete)
+ dev->ethtool_ops->complete(dev);
+
+ if (old_features != dev->features)
+ netdev_features_change(dev);
+
+ return rc;
+}
+
+EXPORT_SYMBOL(ethtool_op_get_link);
+EXPORT_SYMBOL(ethtool_op_get_sg);
+EXPORT_SYMBOL(ethtool_op_get_tso);
+EXPORT_SYMBOL(ethtool_op_get_tx_csum);
+EXPORT_SYMBOL(ethtool_op_set_sg);
+EXPORT_SYMBOL(ethtool_op_set_tso);
+EXPORT_SYMBOL(ethtool_op_set_tx_csum);
+EXPORT_SYMBOL(ethtool_op_set_tx_hw_csum);
+EXPORT_SYMBOL(ethtool_op_set_tx_ipv6_csum);
+EXPORT_SYMBOL(ethtool_op_set_ufo);
+EXPORT_SYMBOL(ethtool_op_get_ufo);
+EXPORT_SYMBOL(ethtool_op_set_flags);
+EXPORT_SYMBOL(ethtool_op_get_flags);
diff --git a/libdde_linux26/contrib/net/core/filter.c b/libdde_linux26/contrib/net/core/filter.c
new file mode 100644
index 00000000..d1d779ca
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/filter.c
@@ -0,0 +1,541 @@
+/*
+ * Linux Socket Filter - Kernel level socket filtering
+ *
+ * Author:
+ * Jay Schulist <jschlst@samba.org>
+ *
+ * Based on the design of:
+ * - The Berkeley Packet Filter
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Andi Kleen - Fix a few bad bugs and races.
+ * Kris Katterjohn - Added many additional checks in sk_chk_filter()
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/fcntl.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/if_packet.h>
+#include <net/ip.h>
+#include <net/protocol.h>
+#include <net/netlink.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <asm/system.h>
+#include <asm/uaccess.h>
+#include <asm/unaligned.h>
+#include <linux/filter.h>
+
+/* No hurry in this branch */
+static void *__load_pointer(struct sk_buff *skb, int k)
+{
+ u8 *ptr = NULL;
+
+ if (k >= SKF_NET_OFF)
+ ptr = skb_network_header(skb) + k - SKF_NET_OFF;
+ else if (k >= SKF_LL_OFF)
+ ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
+
+ if (ptr >= skb->head && ptr < skb_tail_pointer(skb))
+ return ptr;
+ return NULL;
+}
+
+static inline void *load_pointer(struct sk_buff *skb, int k,
+ unsigned int size, void *buffer)
+{
+ if (k >= 0)
+ return skb_header_pointer(skb, k, size, buffer);
+ else {
+ if (k >= SKF_AD_OFF)
+ return NULL;
+ return __load_pointer(skb, k);
+ }
+}
+
+/**
+ * sk_filter - run a packet through a socket filter
+ * @sk: sock associated with &sk_buff
+ * @skb: buffer to filter
+ *
+ * Run the filter code and then cut skb->data to correct size returned by
+ * sk_run_filter. If pkt_len is 0 we toss packet. If skb->len is smaller
+ * than pkt_len we keep whole skb->data. This is the socket level
+ * wrapper to sk_run_filter. It returns 0 if the packet should
+ * be accepted or -EPERM if the packet should be tossed.
+ *
+ */
+int sk_filter(struct sock *sk, struct sk_buff *skb)
+{
+ int err;
+ struct sk_filter *filter;
+
+ err = security_sock_rcv_skb(sk, skb);
+ if (err)
+ return err;
+
+ rcu_read_lock_bh();
+ filter = rcu_dereference(sk->sk_filter);
+ if (filter) {
+ unsigned int pkt_len = sk_run_filter(skb, filter->insns,
+ filter->len);
+ err = pkt_len ? pskb_trim(skb, pkt_len) : -EPERM;
+ }
+ rcu_read_unlock_bh();
+
+ return err;
+}
+EXPORT_SYMBOL(sk_filter);
+
+/**
+ * sk_run_filter - run a filter on a socket
+ * @skb: buffer to run the filter on
+ * @filter: filter to apply
+ * @flen: length of filter
+ *
+ * Decode and apply filter instructions to the skb->data.
+ * Return length to keep, 0 for none. skb is the data we are
+ * filtering, filter is the array of filter instructions, and
+ * len is the number of filter blocks in the array.
+ */
+unsigned int sk_run_filter(struct sk_buff *skb, struct sock_filter *filter, int flen)
+{
+ struct sock_filter *fentry; /* We walk down these */
+ void *ptr;
+ u32 A = 0; /* Accumulator */
+ u32 X = 0; /* Index Register */
+ u32 mem[BPF_MEMWORDS]; /* Scratch Memory Store */
+ u32 tmp;
+ int k;
+ int pc;
+
+ /*
+ * Process array of filter instructions.
+ */
+ for (pc = 0; pc < flen; pc++) {
+ fentry = &filter[pc];
+
+ switch (fentry->code) {
+ case BPF_ALU|BPF_ADD|BPF_X:
+ A += X;
+ continue;
+ case BPF_ALU|BPF_ADD|BPF_K:
+ A += fentry->k;
+ continue;
+ case BPF_ALU|BPF_SUB|BPF_X:
+ A -= X;
+ continue;
+ case BPF_ALU|BPF_SUB|BPF_K:
+ A -= fentry->k;
+ continue;
+ case BPF_ALU|BPF_MUL|BPF_X:
+ A *= X;
+ continue;
+ case BPF_ALU|BPF_MUL|BPF_K:
+ A *= fentry->k;
+ continue;
+ case BPF_ALU|BPF_DIV|BPF_X:
+ if (X == 0)
+ return 0;
+ A /= X;
+ continue;
+ case BPF_ALU|BPF_DIV|BPF_K:
+ A /= fentry->k;
+ continue;
+ case BPF_ALU|BPF_AND|BPF_X:
+ A &= X;
+ continue;
+ case BPF_ALU|BPF_AND|BPF_K:
+ A &= fentry->k;
+ continue;
+ case BPF_ALU|BPF_OR|BPF_X:
+ A |= X;
+ continue;
+ case BPF_ALU|BPF_OR|BPF_K:
+ A |= fentry->k;
+ continue;
+ case BPF_ALU|BPF_LSH|BPF_X:
+ A <<= X;
+ continue;
+ case BPF_ALU|BPF_LSH|BPF_K:
+ A <<= fentry->k;
+ continue;
+ case BPF_ALU|BPF_RSH|BPF_X:
+ A >>= X;
+ continue;
+ case BPF_ALU|BPF_RSH|BPF_K:
+ A >>= fentry->k;
+ continue;
+ case BPF_ALU|BPF_NEG:
+ A = -A;
+ continue;
+ case BPF_JMP|BPF_JA:
+ pc += fentry->k;
+ continue;
+ case BPF_JMP|BPF_JGT|BPF_K:
+ pc += (A > fentry->k) ? fentry->jt : fentry->jf;
+ continue;
+ case BPF_JMP|BPF_JGE|BPF_K:
+ pc += (A >= fentry->k) ? fentry->jt : fentry->jf;
+ continue;
+ case BPF_JMP|BPF_JEQ|BPF_K:
+ pc += (A == fentry->k) ? fentry->jt : fentry->jf;
+ continue;
+ case BPF_JMP|BPF_JSET|BPF_K:
+ pc += (A & fentry->k) ? fentry->jt : fentry->jf;
+ continue;
+ case BPF_JMP|BPF_JGT|BPF_X:
+ pc += (A > X) ? fentry->jt : fentry->jf;
+ continue;
+ case BPF_JMP|BPF_JGE|BPF_X:
+ pc += (A >= X) ? fentry->jt : fentry->jf;
+ continue;
+ case BPF_JMP|BPF_JEQ|BPF_X:
+ pc += (A == X) ? fentry->jt : fentry->jf;
+ continue;
+ case BPF_JMP|BPF_JSET|BPF_X:
+ pc += (A & X) ? fentry->jt : fentry->jf;
+ continue;
+ case BPF_LD|BPF_W|BPF_ABS:
+ k = fentry->k;
+load_w:
+ ptr = load_pointer(skb, k, 4, &tmp);
+ if (ptr != NULL) {
+ A = get_unaligned_be32(ptr);
+ continue;
+ }
+ break;
+ case BPF_LD|BPF_H|BPF_ABS:
+ k = fentry->k;
+load_h:
+ ptr = load_pointer(skb, k, 2, &tmp);
+ if (ptr != NULL) {
+ A = get_unaligned_be16(ptr);
+ continue;
+ }
+ break;
+ case BPF_LD|BPF_B|BPF_ABS:
+ k = fentry->k;
+load_b:
+ ptr = load_pointer(skb, k, 1, &tmp);
+ if (ptr != NULL) {
+ A = *(u8 *)ptr;
+ continue;
+ }
+ break;
+ case BPF_LD|BPF_W|BPF_LEN:
+ A = skb->len;
+ continue;
+ case BPF_LDX|BPF_W|BPF_LEN:
+ X = skb->len;
+ continue;
+ case BPF_LD|BPF_W|BPF_IND:
+ k = X + fentry->k;
+ goto load_w;
+ case BPF_LD|BPF_H|BPF_IND:
+ k = X + fentry->k;
+ goto load_h;
+ case BPF_LD|BPF_B|BPF_IND:
+ k = X + fentry->k;
+ goto load_b;
+ case BPF_LDX|BPF_B|BPF_MSH:
+ ptr = load_pointer(skb, fentry->k, 1, &tmp);
+ if (ptr != NULL) {
+ X = (*(u8 *)ptr & 0xf) << 2;
+ continue;
+ }
+ return 0;
+ case BPF_LD|BPF_IMM:
+ A = fentry->k;
+ continue;
+ case BPF_LDX|BPF_IMM:
+ X = fentry->k;
+ continue;
+ case BPF_LD|BPF_MEM:
+ A = mem[fentry->k];
+ continue;
+ case BPF_LDX|BPF_MEM:
+ X = mem[fentry->k];
+ continue;
+ case BPF_MISC|BPF_TAX:
+ X = A;
+ continue;
+ case BPF_MISC|BPF_TXA:
+ A = X;
+ continue;
+ case BPF_RET|BPF_K:
+ return fentry->k;
+ case BPF_RET|BPF_A:
+ return A;
+ case BPF_ST:
+ mem[fentry->k] = A;
+ continue;
+ case BPF_STX:
+ mem[fentry->k] = X;
+ continue;
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+
+ /*
+ * Handle ancillary data, which are impossible
+ * (or very difficult) to get parsing packet contents.
+ */
+ switch (k-SKF_AD_OFF) {
+ case SKF_AD_PROTOCOL:
+ A = ntohs(skb->protocol);
+ continue;
+ case SKF_AD_PKTTYPE:
+ A = skb->pkt_type;
+ continue;
+ case SKF_AD_IFINDEX:
+ A = skb->dev->ifindex;
+ continue;
+ case SKF_AD_NLATTR: {
+ struct nlattr *nla;
+
+ if (skb_is_nonlinear(skb))
+ return 0;
+ if (A > skb->len - sizeof(struct nlattr))
+ return 0;
+
+ nla = nla_find((struct nlattr *)&skb->data[A],
+ skb->len - A, X);
+ if (nla)
+ A = (void *)nla - (void *)skb->data;
+ else
+ A = 0;
+ continue;
+ }
+ case SKF_AD_NLATTR_NEST: {
+ struct nlattr *nla;
+
+ if (skb_is_nonlinear(skb))
+ return 0;
+ if (A > skb->len - sizeof(struct nlattr))
+ return 0;
+
+ nla = (struct nlattr *)&skb->data[A];
+ if (nla->nla_len > A - skb->len)
+ return 0;
+
+ nla = nla_find_nested(nla, X);
+ if (nla)
+ A = (void *)nla - (void *)skb->data;
+ else
+ A = 0;
+ continue;
+ }
+ default:
+ return 0;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(sk_run_filter);
+
+/**
+ * sk_chk_filter - verify socket filter code
+ * @filter: filter to verify
+ * @flen: length of filter
+ *
+ * Check the user's filter code. If we let some ugly
+ * filter code slip through kaboom! The filter must contain
+ * no references or jumps that are out of range, no illegal
+ * instructions, and must end with a RET instruction.
+ *
+ * All jumps are forward as they are not signed.
+ *
+ * Returns 0 if the rule set is legal or -EINVAL if not.
+ */
+int sk_chk_filter(struct sock_filter *filter, int flen)
+{
+ struct sock_filter *ftest;
+ int pc;
+
+ if (flen == 0 || flen > BPF_MAXINSNS)
+ return -EINVAL;
+
+ /* check the filter code now */
+ for (pc = 0; pc < flen; pc++) {
+ ftest = &filter[pc];
+
+ /* Only allow valid instructions */
+ switch (ftest->code) {
+ case BPF_ALU|BPF_ADD|BPF_K:
+ case BPF_ALU|BPF_ADD|BPF_X:
+ case BPF_ALU|BPF_SUB|BPF_K:
+ case BPF_ALU|BPF_SUB|BPF_X:
+ case BPF_ALU|BPF_MUL|BPF_K:
+ case BPF_ALU|BPF_MUL|BPF_X:
+ case BPF_ALU|BPF_DIV|BPF_X:
+ case BPF_ALU|BPF_AND|BPF_K:
+ case BPF_ALU|BPF_AND|BPF_X:
+ case BPF_ALU|BPF_OR|BPF_K:
+ case BPF_ALU|BPF_OR|BPF_X:
+ case BPF_ALU|BPF_LSH|BPF_K:
+ case BPF_ALU|BPF_LSH|BPF_X:
+ case BPF_ALU|BPF_RSH|BPF_K:
+ case BPF_ALU|BPF_RSH|BPF_X:
+ case BPF_ALU|BPF_NEG:
+ case BPF_LD|BPF_W|BPF_ABS:
+ case BPF_LD|BPF_H|BPF_ABS:
+ case BPF_LD|BPF_B|BPF_ABS:
+ case BPF_LD|BPF_W|BPF_LEN:
+ case BPF_LD|BPF_W|BPF_IND:
+ case BPF_LD|BPF_H|BPF_IND:
+ case BPF_LD|BPF_B|BPF_IND:
+ case BPF_LD|BPF_IMM:
+ case BPF_LDX|BPF_W|BPF_LEN:
+ case BPF_LDX|BPF_B|BPF_MSH:
+ case BPF_LDX|BPF_IMM:
+ case BPF_MISC|BPF_TAX:
+ case BPF_MISC|BPF_TXA:
+ case BPF_RET|BPF_K:
+ case BPF_RET|BPF_A:
+ break;
+
+ /* Some instructions need special checks */
+
+ case BPF_ALU|BPF_DIV|BPF_K:
+ /* check for division by zero */
+ if (ftest->k == 0)
+ return -EINVAL;
+ break;
+
+ case BPF_LD|BPF_MEM:
+ case BPF_LDX|BPF_MEM:
+ case BPF_ST:
+ case BPF_STX:
+ /* check for invalid memory addresses */
+ if (ftest->k >= BPF_MEMWORDS)
+ return -EINVAL;
+ break;
+
+ case BPF_JMP|BPF_JA:
+ /*
+ * Note, the large ftest->k might cause loops.
+ * Compare this with conditional jumps below,
+ * where offsets are limited. --ANK (981016)
+ */
+ if (ftest->k >= (unsigned)(flen-pc-1))
+ return -EINVAL;
+ break;
+
+ case BPF_JMP|BPF_JEQ|BPF_K:
+ case BPF_JMP|BPF_JEQ|BPF_X:
+ case BPF_JMP|BPF_JGE|BPF_K:
+ case BPF_JMP|BPF_JGE|BPF_X:
+ case BPF_JMP|BPF_JGT|BPF_K:
+ case BPF_JMP|BPF_JGT|BPF_X:
+ case BPF_JMP|BPF_JSET|BPF_K:
+ case BPF_JMP|BPF_JSET|BPF_X:
+ /* for conditionals both must be safe */
+ if (pc + ftest->jt + 1 >= flen ||
+ pc + ftest->jf + 1 >= flen)
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ }
+
+ return (BPF_CLASS(filter[flen - 1].code) == BPF_RET) ? 0 : -EINVAL;
+}
+EXPORT_SYMBOL(sk_chk_filter);
+
+/**
+ * sk_filter_rcu_release: Release a socket filter by rcu_head
+ * @rcu: rcu_head that contains the sk_filter to free
+ */
+static void sk_filter_rcu_release(struct rcu_head *rcu)
+{
+ struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
+
+ sk_filter_release(fp);
+}
+
+static void sk_filter_delayed_uncharge(struct sock *sk, struct sk_filter *fp)
+{
+ unsigned int size = sk_filter_len(fp);
+
+ atomic_sub(size, &sk->sk_omem_alloc);
+ call_rcu_bh(&fp->rcu, sk_filter_rcu_release);
+}
+
+/**
+ * sk_attach_filter - attach a socket filter
+ * @fprog: the filter program
+ * @sk: the socket to use
+ *
+ * Attach the user's filter code. We first run some sanity checks on
+ * it to make sure it does not explode on us later. If an error
+ * occurs or there is insufficient memory for the filter a negative
+ * errno code is returned. On success the return is zero.
+ */
+int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
+{
+ struct sk_filter *fp, *old_fp;
+ unsigned int fsize = sizeof(struct sock_filter) * fprog->len;
+ int err;
+
+ /* Make sure new filter is there and in the right amounts. */
+ if (fprog->filter == NULL)
+ return -EINVAL;
+
+ fp = sock_kmalloc(sk, fsize+sizeof(*fp), GFP_KERNEL);
+ if (!fp)
+ return -ENOMEM;
+ if (copy_from_user(fp->insns, fprog->filter, fsize)) {
+ sock_kfree_s(sk, fp, fsize+sizeof(*fp));
+ return -EFAULT;
+ }
+
+ atomic_set(&fp->refcnt, 1);
+ fp->len = fprog->len;
+
+ err = sk_chk_filter(fp->insns, fp->len);
+ if (err) {
+ sk_filter_uncharge(sk, fp);
+ return err;
+ }
+
+ rcu_read_lock_bh();
+ old_fp = rcu_dereference(sk->sk_filter);
+ rcu_assign_pointer(sk->sk_filter, fp);
+ rcu_read_unlock_bh();
+
+ if (old_fp)
+ sk_filter_delayed_uncharge(sk, old_fp);
+ return 0;
+}
+
+int sk_detach_filter(struct sock *sk)
+{
+ int ret = -ENOENT;
+ struct sk_filter *filter;
+
+ rcu_read_lock_bh();
+ filter = rcu_dereference(sk->sk_filter);
+ if (filter) {
+ rcu_assign_pointer(sk->sk_filter, NULL);
+ sk_filter_delayed_uncharge(sk, filter);
+ ret = 0;
+ }
+ rcu_read_unlock_bh();
+ return ret;
+}
diff --git a/libdde_linux26/contrib/net/core/kmap_skb.h b/libdde_linux26/contrib/net/core/kmap_skb.h
new file mode 100644
index 00000000..283c2b99
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/kmap_skb.h
@@ -0,0 +1,19 @@
+#include <linux/highmem.h>
+
+static inline void *kmap_skb_frag(const skb_frag_t *frag)
+{
+#ifdef CONFIG_HIGHMEM
+ BUG_ON(in_irq());
+
+ local_bh_disable();
+#endif
+ return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);
+}
+
+static inline void kunmap_skb_frag(void *vaddr)
+{
+ kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
+#ifdef CONFIG_HIGHMEM
+ local_bh_enable();
+#endif
+}
diff --git a/libdde_linux26/contrib/net/core/neighbour.c b/libdde_linux26/contrib/net/core/neighbour.c
new file mode 100644
index 00000000..278a142d
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/neighbour.c
@@ -0,0 +1,2824 @@
+/*
+ * Generic address resolution entity
+ *
+ * Authors:
+ * Pedro Roque <roque@di.fc.ul.pt>
+ * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Fixes:
+ * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
+ * Harald Welte Add neighbour cache statistics like rtstat
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/socket.h>
+#include <linux/netdevice.h>
+#include <linux/proc_fs.h>
+#ifdef CONFIG_SYSCTL
+#include <linux/sysctl.h>
+#endif
+#include <linux/times.h>
+#include <net/net_namespace.h>
+#include <net/neighbour.h>
+#include <net/dst.h>
+#include <net/sock.h>
+#include <net/netevent.h>
+#include <net/netlink.h>
+#include <linux/rtnetlink.h>
+#include <linux/random.h>
+#include <linux/string.h>
+#include <linux/log2.h>
+
+#define NEIGH_DEBUG 1
+
+#define NEIGH_PRINTK(x...) printk(x)
+#define NEIGH_NOPRINTK(x...) do { ; } while(0)
+#define NEIGH_PRINTK0 NEIGH_PRINTK
+#define NEIGH_PRINTK1 NEIGH_NOPRINTK
+#define NEIGH_PRINTK2 NEIGH_NOPRINTK
+
+#if NEIGH_DEBUG >= 1
+#undef NEIGH_PRINTK1
+#define NEIGH_PRINTK1 NEIGH_PRINTK
+#endif
+#if NEIGH_DEBUG >= 2
+#undef NEIGH_PRINTK2
+#define NEIGH_PRINTK2 NEIGH_PRINTK
+#endif
+
+#define PNEIGH_HASHMASK 0xF
+
+static void neigh_timer_handler(unsigned long arg);
+static void __neigh_notify(struct neighbour *n, int type, int flags);
+static void neigh_update_notify(struct neighbour *neigh);
+static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
+
+static struct neigh_table *neigh_tables;
+#ifdef CONFIG_PROC_FS
+static const struct file_operations neigh_stat_seq_fops;
+#endif
+
+/*
+ Neighbour hash table buckets are protected with rwlock tbl->lock.
+
+ - All the scans/updates to hash buckets MUST be made under this lock.
+ - NOTHING clever should be made under this lock: no callbacks
+ to protocol backends, no attempts to send something to network.
+ It will result in deadlocks, if backend/driver wants to use neighbour
+ cache.
+ - If the entry requires some non-trivial actions, increase
+ its reference count and release table lock.
+
+ Neighbour entries are protected:
+ - with reference count.
+ - with rwlock neigh->lock
+
+ Reference count prevents destruction.
+
+ neigh->lock mainly serializes ll address data and its validity state.
+ However, the same lock is used to protect another entry fields:
+ - timer
+ - resolution queue
+
+ Again, nothing clever shall be made under neigh->lock,
+ the most complicated procedure, which we allow is dev->hard_header.
+ It is supposed, that dev->hard_header is simplistic and does
+ not make callbacks to neighbour tables.
+
+ The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
+ list of neighbour tables. This list is used only in process context,
+ */
+
+static DEFINE_RWLOCK(neigh_tbl_lock);
+
+static int neigh_blackhole(struct sk_buff *skb)
+{
+ kfree_skb(skb);
+ return -ENETDOWN;
+}
+
+static void neigh_cleanup_and_release(struct neighbour *neigh)
+{
+ if (neigh->parms->neigh_cleanup)
+ neigh->parms->neigh_cleanup(neigh);
+
+ __neigh_notify(neigh, RTM_DELNEIGH, 0);
+ neigh_release(neigh);
+}
+
+/*
+ * It is random distribution in the interval (1/2)*base...(3/2)*base.
+ * It corresponds to default IPv6 settings and is not overridable,
+ * because it is really reasonable choice.
+ */
+
+unsigned long neigh_rand_reach_time(unsigned long base)
+{
+ return (base ? (net_random() % base) + (base >> 1) : 0);
+}
+EXPORT_SYMBOL(neigh_rand_reach_time);
+
+
+static int neigh_forced_gc(struct neigh_table *tbl)
+{
+ int shrunk = 0;
+ int i;
+
+ NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
+
+ write_lock_bh(&tbl->lock);
+ for (i = 0; i <= tbl->hash_mask; i++) {
+ struct neighbour *n, **np;
+
+ np = &tbl->hash_buckets[i];
+ while ((n = *np) != NULL) {
+ /* Neighbour record may be discarded if:
+ * - nobody refers to it.
+ * - it is not permanent
+ */
+ write_lock(&n->lock);
+ if (atomic_read(&n->refcnt) == 1 &&
+ !(n->nud_state & NUD_PERMANENT)) {
+ *np = n->next;
+ n->dead = 1;
+ shrunk = 1;
+ write_unlock(&n->lock);
+ neigh_cleanup_and_release(n);
+ continue;
+ }
+ write_unlock(&n->lock);
+ np = &n->next;
+ }
+ }
+
+ tbl->last_flush = jiffies;
+
+ write_unlock_bh(&tbl->lock);
+
+ return shrunk;
+}
+
+static void neigh_add_timer(struct neighbour *n, unsigned long when)
+{
+ neigh_hold(n);
+ if (unlikely(mod_timer(&n->timer, when))) {
+ printk("NEIGH: BUG, double timer add, state is %x\n",
+ n->nud_state);
+ dump_stack();
+ }
+}
+
+static int neigh_del_timer(struct neighbour *n)
+{
+ if ((n->nud_state & NUD_IN_TIMER) &&
+ del_timer(&n->timer)) {
+ neigh_release(n);
+ return 1;
+ }
+ return 0;
+}
+
+static void pneigh_queue_purge(struct sk_buff_head *list)
+{
+ struct sk_buff *skb;
+
+ while ((skb = skb_dequeue(list)) != NULL) {
+ dev_put(skb->dev);
+ kfree_skb(skb);
+ }
+}
+
+static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
+{
+ int i;
+
+ for (i = 0; i <= tbl->hash_mask; i++) {
+ struct neighbour *n, **np = &tbl->hash_buckets[i];
+
+ while ((n = *np) != NULL) {
+ if (dev && n->dev != dev) {
+ np = &n->next;
+ continue;
+ }
+ *np = n->next;
+ write_lock(&n->lock);
+ neigh_del_timer(n);
+ n->dead = 1;
+
+ if (atomic_read(&n->refcnt) != 1) {
+ /* The most unpleasant situation.
+ We must destroy neighbour entry,
+ but someone still uses it.
+
+ The destroy will be delayed until
+ the last user releases us, but
+ we must kill timers etc. and move
+ it to safe state.
+ */
+ skb_queue_purge(&n->arp_queue);
+ n->output = neigh_blackhole;
+ if (n->nud_state & NUD_VALID)
+ n->nud_state = NUD_NOARP;
+ else
+ n->nud_state = NUD_NONE;
+ NEIGH_PRINTK2("neigh %p is stray.\n", n);
+ }
+ write_unlock(&n->lock);
+ neigh_cleanup_and_release(n);
+ }
+ }
+}
+
+void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
+{
+ write_lock_bh(&tbl->lock);
+ neigh_flush_dev(tbl, dev);
+ write_unlock_bh(&tbl->lock);
+}
+EXPORT_SYMBOL(neigh_changeaddr);
+
+int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
+{
+ write_lock_bh(&tbl->lock);
+ neigh_flush_dev(tbl, dev);
+ pneigh_ifdown(tbl, dev);
+ write_unlock_bh(&tbl->lock);
+
+ del_timer_sync(&tbl->proxy_timer);
+ pneigh_queue_purge(&tbl->proxy_queue);
+ return 0;
+}
+EXPORT_SYMBOL(neigh_ifdown);
+
+static struct neighbour *neigh_alloc(struct neigh_table *tbl)
+{
+ struct neighbour *n = NULL;
+ unsigned long now = jiffies;
+ int entries;
+
+ entries = atomic_inc_return(&tbl->entries) - 1;
+ if (entries >= tbl->gc_thresh3 ||
+ (entries >= tbl->gc_thresh2 &&
+ time_after(now, tbl->last_flush + 5 * HZ))) {
+ if (!neigh_forced_gc(tbl) &&
+ entries >= tbl->gc_thresh3)
+ goto out_entries;
+ }
+
+ n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
+ if (!n)
+ goto out_entries;
+
+ skb_queue_head_init(&n->arp_queue);
+ rwlock_init(&n->lock);
+ n->updated = n->used = now;
+ n->nud_state = NUD_NONE;
+ n->output = neigh_blackhole;
+ n->parms = neigh_parms_clone(&tbl->parms);
+ setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
+
+ NEIGH_CACHE_STAT_INC(tbl, allocs);
+ n->tbl = tbl;
+ atomic_set(&n->refcnt, 1);
+ n->dead = 1;
+out:
+ return n;
+
+out_entries:
+ atomic_dec(&tbl->entries);
+ goto out;
+}
+
+static struct neighbour **neigh_hash_alloc(unsigned int entries)
+{
+ unsigned long size = entries * sizeof(struct neighbour *);
+ struct neighbour **ret;
+
+ if (size <= PAGE_SIZE) {
+ ret = kzalloc(size, GFP_ATOMIC);
+ } else {
+ ret = (struct neighbour **)
+ __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
+ }
+ return ret;
+}
+
+static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
+{
+ unsigned long size = entries * sizeof(struct neighbour *);
+
+ if (size <= PAGE_SIZE)
+ kfree(hash);
+ else
+ free_pages((unsigned long)hash, get_order(size));
+}
+
+static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
+{
+ struct neighbour **new_hash, **old_hash;
+ unsigned int i, new_hash_mask, old_entries;
+
+ NEIGH_CACHE_STAT_INC(tbl, hash_grows);
+
+ BUG_ON(!is_power_of_2(new_entries));
+ new_hash = neigh_hash_alloc(new_entries);
+ if (!new_hash)
+ return;
+
+ old_entries = tbl->hash_mask + 1;
+ new_hash_mask = new_entries - 1;
+ old_hash = tbl->hash_buckets;
+
+ get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
+ for (i = 0; i < old_entries; i++) {
+ struct neighbour *n, *next;
+
+ for (n = old_hash[i]; n; n = next) {
+ unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
+
+ hash_val &= new_hash_mask;
+ next = n->next;
+
+ n->next = new_hash[hash_val];
+ new_hash[hash_val] = n;
+ }
+ }
+ tbl->hash_buckets = new_hash;
+ tbl->hash_mask = new_hash_mask;
+
+ neigh_hash_free(old_hash, old_entries);
+}
+
+struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
+ struct net_device *dev)
+{
+ struct neighbour *n;
+ int key_len = tbl->key_len;
+ u32 hash_val;
+
+ NEIGH_CACHE_STAT_INC(tbl, lookups);
+
+ read_lock_bh(&tbl->lock);
+ hash_val = tbl->hash(pkey, dev);
+ for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
+ if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
+ neigh_hold(n);
+ NEIGH_CACHE_STAT_INC(tbl, hits);
+ break;
+ }
+ }
+ read_unlock_bh(&tbl->lock);
+ return n;
+}
+EXPORT_SYMBOL(neigh_lookup);
+
+struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
+ const void *pkey)
+{
+ struct neighbour *n;
+ int key_len = tbl->key_len;
+ u32 hash_val;
+
+ NEIGH_CACHE_STAT_INC(tbl, lookups);
+
+ read_lock_bh(&tbl->lock);
+ hash_val = tbl->hash(pkey, NULL);
+ for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
+ if (!memcmp(n->primary_key, pkey, key_len) &&
+ net_eq(dev_net(n->dev), net)) {
+ neigh_hold(n);
+ NEIGH_CACHE_STAT_INC(tbl, hits);
+ break;
+ }
+ }
+ read_unlock_bh(&tbl->lock);
+ return n;
+}
+EXPORT_SYMBOL(neigh_lookup_nodev);
+
+struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
+ struct net_device *dev)
+{
+ u32 hash_val;
+ int key_len = tbl->key_len;
+ int error;
+ struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
+
+ if (!n) {
+ rc = ERR_PTR(-ENOBUFS);
+ goto out;
+ }
+
+ memcpy(n->primary_key, pkey, key_len);
+ n->dev = dev;
+ dev_hold(dev);
+
+ /* Protocol specific setup. */
+ if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
+ rc = ERR_PTR(error);
+ goto out_neigh_release;
+ }
+
+ /* Device specific setup. */
+ if (n->parms->neigh_setup &&
+ (error = n->parms->neigh_setup(n)) < 0) {
+ rc = ERR_PTR(error);
+ goto out_neigh_release;
+ }
+
+ n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
+
+ write_lock_bh(&tbl->lock);
+
+ if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
+ neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
+
+ hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
+
+ if (n->parms->dead) {
+ rc = ERR_PTR(-EINVAL);
+ goto out_tbl_unlock;
+ }
+
+ for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
+ if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
+ neigh_hold(n1);
+ rc = n1;
+ goto out_tbl_unlock;
+ }
+ }
+
+ n->next = tbl->hash_buckets[hash_val];
+ tbl->hash_buckets[hash_val] = n;
+ n->dead = 0;
+ neigh_hold(n);
+ write_unlock_bh(&tbl->lock);
+ NEIGH_PRINTK2("neigh %p is created.\n", n);
+ rc = n;
+out:
+ return rc;
+out_tbl_unlock:
+ write_unlock_bh(&tbl->lock);
+out_neigh_release:
+ neigh_release(n);
+ goto out;
+}
+EXPORT_SYMBOL(neigh_create);
+
+static u32 pneigh_hash(const void *pkey, int key_len)
+{
+ u32 hash_val = *(u32 *)(pkey + key_len - 4);
+ hash_val ^= (hash_val >> 16);
+ hash_val ^= hash_val >> 8;
+ hash_val ^= hash_val >> 4;
+ hash_val &= PNEIGH_HASHMASK;
+ return hash_val;
+}
+
+static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
+ struct net *net,
+ const void *pkey,
+ int key_len,
+ struct net_device *dev)
+{
+ while (n) {
+ if (!memcmp(n->key, pkey, key_len) &&
+ net_eq(pneigh_net(n), net) &&
+ (n->dev == dev || !n->dev))
+ return n;
+ n = n->next;
+ }
+ return NULL;
+}
+
+struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
+ struct net *net, const void *pkey, struct net_device *dev)
+{
+ int key_len = tbl->key_len;
+ u32 hash_val = pneigh_hash(pkey, key_len);
+
+ return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
+ net, pkey, key_len, dev);
+}
+EXPORT_SYMBOL_GPL(__pneigh_lookup);
+
+struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
+ struct net *net, const void *pkey,
+ struct net_device *dev, int creat)
+{
+ struct pneigh_entry *n;
+ int key_len = tbl->key_len;
+ u32 hash_val = pneigh_hash(pkey, key_len);
+
+ read_lock_bh(&tbl->lock);
+ n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
+ net, pkey, key_len, dev);
+ read_unlock_bh(&tbl->lock);
+
+ if (n || !creat)
+ goto out;
+
+ ASSERT_RTNL();
+
+ n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
+ if (!n)
+ goto out;
+
+ write_pnet(&n->net, hold_net(net));
+ memcpy(n->key, pkey, key_len);
+ n->dev = dev;
+ if (dev)
+ dev_hold(dev);
+
+ if (tbl->pconstructor && tbl->pconstructor(n)) {
+ if (dev)
+ dev_put(dev);
+ release_net(net);
+ kfree(n);
+ n = NULL;
+ goto out;
+ }
+
+ write_lock_bh(&tbl->lock);
+ n->next = tbl->phash_buckets[hash_val];
+ tbl->phash_buckets[hash_val] = n;
+ write_unlock_bh(&tbl->lock);
+out:
+ return n;
+}
+EXPORT_SYMBOL(pneigh_lookup);
+
+
+int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
+ struct net_device *dev)
+{
+ struct pneigh_entry *n, **np;
+ int key_len = tbl->key_len;
+ u32 hash_val = pneigh_hash(pkey, key_len);
+
+ write_lock_bh(&tbl->lock);
+ for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
+ np = &n->next) {
+ if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
+ net_eq(pneigh_net(n), net)) {
+ *np = n->next;
+ write_unlock_bh(&tbl->lock);
+ if (tbl->pdestructor)
+ tbl->pdestructor(n);
+ if (n->dev)
+ dev_put(n->dev);
+ release_net(pneigh_net(n));
+ kfree(n);
+ return 0;
+ }
+ }
+ write_unlock_bh(&tbl->lock);
+ return -ENOENT;
+}
+
+static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
+{
+ struct pneigh_entry *n, **np;
+ u32 h;
+
+ for (h = 0; h <= PNEIGH_HASHMASK; h++) {
+ np = &tbl->phash_buckets[h];
+ while ((n = *np) != NULL) {
+ if (!dev || n->dev == dev) {
+ *np = n->next;
+ if (tbl->pdestructor)
+ tbl->pdestructor(n);
+ if (n->dev)
+ dev_put(n->dev);
+ release_net(pneigh_net(n));
+ kfree(n);
+ continue;
+ }
+ np = &n->next;
+ }
+ }
+ return -ENOENT;
+}
+
+static void neigh_parms_destroy(struct neigh_parms *parms);
+
+static inline void neigh_parms_put(struct neigh_parms *parms)
+{
+ if (atomic_dec_and_test(&parms->refcnt))
+ neigh_parms_destroy(parms);
+}
+
+/*
+ * neighbour must already be out of the table;
+ *
+ */
+void neigh_destroy(struct neighbour *neigh)
+{
+ struct hh_cache *hh;
+
+ NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
+
+ if (!neigh->dead) {
+ printk(KERN_WARNING
+ "Destroying alive neighbour %p\n", neigh);
+ dump_stack();
+ return;
+ }
+
+ if (neigh_del_timer(neigh))
+ printk(KERN_WARNING "Impossible event.\n");
+
+ while ((hh = neigh->hh) != NULL) {
+ neigh->hh = hh->hh_next;
+ hh->hh_next = NULL;
+
+ write_seqlock_bh(&hh->hh_lock);
+ hh->hh_output = neigh_blackhole;
+ write_sequnlock_bh(&hh->hh_lock);
+ if (atomic_dec_and_test(&hh->hh_refcnt))
+ kfree(hh);
+ }
+
+ skb_queue_purge(&neigh->arp_queue);
+
+ dev_put(neigh->dev);
+ neigh_parms_put(neigh->parms);
+
+ NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
+
+ atomic_dec(&neigh->tbl->entries);
+ kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
+}
+EXPORT_SYMBOL(neigh_destroy);
+
+/* Neighbour state is suspicious;
+ disable fast path.
+
+ Called with write_locked neigh.
+ */
+static void neigh_suspect(struct neighbour *neigh)
+{
+ struct hh_cache *hh;
+
+ NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
+
+ neigh->output = neigh->ops->output;
+
+ for (hh = neigh->hh; hh; hh = hh->hh_next)
+ hh->hh_output = neigh->ops->output;
+}
+
+/* Neighbour state is OK;
+ enable fast path.
+
+ Called with write_locked neigh.
+ */
+static void neigh_connect(struct neighbour *neigh)
+{
+ struct hh_cache *hh;
+
+ NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
+
+ neigh->output = neigh->ops->connected_output;
+
+ for (hh = neigh->hh; hh; hh = hh->hh_next)
+ hh->hh_output = neigh->ops->hh_output;
+}
+
+static void neigh_periodic_timer(unsigned long arg)
+{
+ struct neigh_table *tbl = (struct neigh_table *)arg;
+ struct neighbour *n, **np;
+ unsigned long expire, now = jiffies;
+
+ NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
+
+ write_lock(&tbl->lock);
+
+ /*
+ * periodically recompute ReachableTime from random function
+ */
+
+ if (time_after(now, tbl->last_rand + 300 * HZ)) {
+ struct neigh_parms *p;
+ tbl->last_rand = now;
+ for (p = &tbl->parms; p; p = p->next)
+ p->reachable_time =
+ neigh_rand_reach_time(p->base_reachable_time);
+ }
+
+ np = &tbl->hash_buckets[tbl->hash_chain_gc];
+ tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
+
+ while ((n = *np) != NULL) {
+ unsigned int state;
+
+ write_lock(&n->lock);
+
+ state = n->nud_state;
+ if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
+ write_unlock(&n->lock);
+ goto next_elt;
+ }
+
+ if (time_before(n->used, n->confirmed))
+ n->used = n->confirmed;
+
+ if (atomic_read(&n->refcnt) == 1 &&
+ (state == NUD_FAILED ||
+ time_after(now, n->used + n->parms->gc_staletime))) {
+ *np = n->next;
+ n->dead = 1;
+ write_unlock(&n->lock);
+ neigh_cleanup_and_release(n);
+ continue;
+ }
+ write_unlock(&n->lock);
+
+next_elt:
+ np = &n->next;
+ }
+
+ /* Cycle through all hash buckets every base_reachable_time/2 ticks.
+ * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
+ * base_reachable_time.
+ */
+ expire = tbl->parms.base_reachable_time >> 1;
+ expire /= (tbl->hash_mask + 1);
+ if (!expire)
+ expire = 1;
+
+ if (expire>HZ)
+ mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
+ else
+ mod_timer(&tbl->gc_timer, now + expire);
+
+ write_unlock(&tbl->lock);
+}
+
+static __inline__ int neigh_max_probes(struct neighbour *n)
+{
+ struct neigh_parms *p = n->parms;
+ return (n->nud_state & NUD_PROBE ?
+ p->ucast_probes :
+ p->ucast_probes + p->app_probes + p->mcast_probes);
+}
+
+/* Called when a timer expires for a neighbour entry. */
+
+static void neigh_timer_handler(unsigned long arg)
+{
+ unsigned long now, next;
+ struct neighbour *neigh = (struct neighbour *)arg;
+ unsigned state;
+ int notify = 0;
+
+ write_lock(&neigh->lock);
+
+ state = neigh->nud_state;
+ now = jiffies;
+ next = now + HZ;
+
+ if (!(state & NUD_IN_TIMER)) {
+#ifndef CONFIG_SMP
+ printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
+#endif
+ goto out;
+ }
+
+ if (state & NUD_REACHABLE) {
+ if (time_before_eq(now,
+ neigh->confirmed + neigh->parms->reachable_time)) {
+ NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
+ next = neigh->confirmed + neigh->parms->reachable_time;
+ } else if (time_before_eq(now,
+ neigh->used + neigh->parms->delay_probe_time)) {
+ NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
+ neigh->nud_state = NUD_DELAY;
+ neigh->updated = jiffies;
+ neigh_suspect(neigh);
+ next = now + neigh->parms->delay_probe_time;
+ } else {
+ NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
+ neigh->nud_state = NUD_STALE;
+ neigh->updated = jiffies;
+ neigh_suspect(neigh);
+ notify = 1;
+ }
+ } else if (state & NUD_DELAY) {
+ if (time_before_eq(now,
+ neigh->confirmed + neigh->parms->delay_probe_time)) {
+ NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
+ neigh->nud_state = NUD_REACHABLE;
+ neigh->updated = jiffies;
+ neigh_connect(neigh);
+ notify = 1;
+ next = neigh->confirmed + neigh->parms->reachable_time;
+ } else {
+ NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
+ neigh->nud_state = NUD_PROBE;
+ neigh->updated = jiffies;
+ atomic_set(&neigh->probes, 0);
+ next = now + neigh->parms->retrans_time;
+ }
+ } else {
+ /* NUD_PROBE|NUD_INCOMPLETE */
+ next = now + neigh->parms->retrans_time;
+ }
+
+ if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
+ atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
+ struct sk_buff *skb;
+
+ neigh->nud_state = NUD_FAILED;
+ neigh->updated = jiffies;
+ notify = 1;
+ NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
+ NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
+
+ /* It is very thin place. report_unreachable is very complicated
+ routine. Particularly, it can hit the same neighbour entry!
+
+ So that, we try to be accurate and avoid dead loop. --ANK
+ */
+ while (neigh->nud_state == NUD_FAILED &&
+ (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
+ write_unlock(&neigh->lock);
+ neigh->ops->error_report(neigh, skb);
+ write_lock(&neigh->lock);
+ }
+ skb_queue_purge(&neigh->arp_queue);
+ }
+
+ if (neigh->nud_state & NUD_IN_TIMER) {
+ if (time_before(next, jiffies + HZ/2))
+ next = jiffies + HZ/2;
+ if (!mod_timer(&neigh->timer, next))
+ neigh_hold(neigh);
+ }
+ if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
+ struct sk_buff *skb = skb_peek(&neigh->arp_queue);
+ /* keep skb alive even if arp_queue overflows */
+ if (skb)
+ skb = skb_copy(skb, GFP_ATOMIC);
+ write_unlock(&neigh->lock);
+ neigh->ops->solicit(neigh, skb);
+ atomic_inc(&neigh->probes);
+ if (skb)
+ kfree_skb(skb);
+ } else {
+out:
+ write_unlock(&neigh->lock);
+ }
+
+ if (notify)
+ neigh_update_notify(neigh);
+
+ neigh_release(neigh);
+}
+
+int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
+{
+ int rc;
+ unsigned long now;
+
+ write_lock_bh(&neigh->lock);
+
+ rc = 0;
+ if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
+ goto out_unlock_bh;
+
+ now = jiffies;
+
+ if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
+ if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
+ atomic_set(&neigh->probes, neigh->parms->ucast_probes);
+ neigh->nud_state = NUD_INCOMPLETE;
+ neigh->updated = jiffies;
+ neigh_add_timer(neigh, now + 1);
+ } else {
+ neigh->nud_state = NUD_FAILED;
+ neigh->updated = jiffies;
+ write_unlock_bh(&neigh->lock);
+
+ if (skb)
+ kfree_skb(skb);
+ return 1;
+ }
+ } else if (neigh->nud_state & NUD_STALE) {
+ NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
+ neigh->nud_state = NUD_DELAY;
+ neigh->updated = jiffies;
+ neigh_add_timer(neigh,
+ jiffies + neigh->parms->delay_probe_time);
+ }
+
+ if (neigh->nud_state == NUD_INCOMPLETE) {
+ if (skb) {
+ if (skb_queue_len(&neigh->arp_queue) >=
+ neigh->parms->queue_len) {
+ struct sk_buff *buff;
+ buff = __skb_dequeue(&neigh->arp_queue);
+ kfree_skb(buff);
+ NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
+ }
+ __skb_queue_tail(&neigh->arp_queue, skb);
+ }
+ rc = 1;
+ }
+out_unlock_bh:
+ write_unlock_bh(&neigh->lock);
+ return rc;
+}
+EXPORT_SYMBOL(__neigh_event_send);
+
+static void neigh_update_hhs(struct neighbour *neigh)
+{
+ struct hh_cache *hh;
+ void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
+ = neigh->dev->header_ops->cache_update;
+
+ if (update) {
+ for (hh = neigh->hh; hh; hh = hh->hh_next) {
+ write_seqlock_bh(&hh->hh_lock);
+ update(hh, neigh->dev, neigh->ha);
+ write_sequnlock_bh(&hh->hh_lock);
+ }
+ }
+}
+
+
+
+/* Generic update routine.
+ -- lladdr is new lladdr or NULL, if it is not supplied.
+ -- new is new state.
+ -- flags
+ NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
+ if it is different.
+ NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
+ lladdr instead of overriding it
+ if it is different.
+ It also allows to retain current state
+ if lladdr is unchanged.
+ NEIGH_UPDATE_F_ADMIN means that the change is administrative.
+
+ NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
+ NTF_ROUTER flag.
+ NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
+ a router.
+
+ Caller MUST hold reference count on the entry.
+ */
+
+int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
+ u32 flags)
+{
+ u8 old;
+ int err;
+ int notify = 0;
+ struct net_device *dev;
+ int update_isrouter = 0;
+
+ write_lock_bh(&neigh->lock);
+
+ dev = neigh->dev;
+ old = neigh->nud_state;
+ err = -EPERM;
+
+ if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
+ (old & (NUD_NOARP | NUD_PERMANENT)))
+ goto out;
+
+ if (!(new & NUD_VALID)) {
+ neigh_del_timer(neigh);
+ if (old & NUD_CONNECTED)
+ neigh_suspect(neigh);
+ neigh->nud_state = new;
+ err = 0;
+ notify = old & NUD_VALID;
+ goto out;
+ }
+
+ /* Compare new lladdr with cached one */
+ if (!dev->addr_len) {
+ /* First case: device needs no address. */
+ lladdr = neigh->ha;
+ } else if (lladdr) {
+ /* The second case: if something is already cached
+ and a new address is proposed:
+ - compare new & old
+ - if they are different, check override flag
+ */
+ if ((old & NUD_VALID) &&
+ !memcmp(lladdr, neigh->ha, dev->addr_len))
+ lladdr = neigh->ha;
+ } else {
+ /* No address is supplied; if we know something,
+ use it, otherwise discard the request.
+ */
+ err = -EINVAL;
+ if (!(old & NUD_VALID))
+ goto out;
+ lladdr = neigh->ha;
+ }
+
+ if (new & NUD_CONNECTED)
+ neigh->confirmed = jiffies;
+ neigh->updated = jiffies;
+
+ /* If entry was valid and address is not changed,
+ do not change entry state, if new one is STALE.
+ */
+ err = 0;
+ update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
+ if (old & NUD_VALID) {
+ if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
+ update_isrouter = 0;
+ if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
+ (old & NUD_CONNECTED)) {
+ lladdr = neigh->ha;
+ new = NUD_STALE;
+ } else
+ goto out;
+ } else {
+ if (lladdr == neigh->ha && new == NUD_STALE &&
+ ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
+ (old & NUD_CONNECTED))
+ )
+ new = old;
+ }
+ }
+
+ if (new != old) {
+ neigh_del_timer(neigh);
+ if (new & NUD_IN_TIMER)
+ neigh_add_timer(neigh, (jiffies +
+ ((new & NUD_REACHABLE) ?
+ neigh->parms->reachable_time :
+ 0)));
+ neigh->nud_state = new;
+ }
+
+ if (lladdr != neigh->ha) {
+ memcpy(&neigh->ha, lladdr, dev->addr_len);
+ neigh_update_hhs(neigh);
+ if (!(new & NUD_CONNECTED))
+ neigh->confirmed = jiffies -
+ (neigh->parms->base_reachable_time << 1);
+ notify = 1;
+ }
+ if (new == old)
+ goto out;
+ if (new & NUD_CONNECTED)
+ neigh_connect(neigh);
+ else
+ neigh_suspect(neigh);
+ if (!(old & NUD_VALID)) {
+ struct sk_buff *skb;
+
+ /* Again: avoid dead loop if something went wrong */
+
+ while (neigh->nud_state & NUD_VALID &&
+ (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
+ struct neighbour *n1 = neigh;
+ write_unlock_bh(&neigh->lock);
+ /* On shaper/eql skb->dst->neighbour != neigh :( */
+ if (skb->dst && skb->dst->neighbour)
+ n1 = skb->dst->neighbour;
+ n1->output(skb);
+ write_lock_bh(&neigh->lock);
+ }
+ skb_queue_purge(&neigh->arp_queue);
+ }
+out:
+ if (update_isrouter) {
+ neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
+ (neigh->flags | NTF_ROUTER) :
+ (neigh->flags & ~NTF_ROUTER);
+ }
+ write_unlock_bh(&neigh->lock);
+
+ if (notify)
+ neigh_update_notify(neigh);
+
+ return err;
+}
+EXPORT_SYMBOL(neigh_update);
+
+struct neighbour *neigh_event_ns(struct neigh_table *tbl,
+ u8 *lladdr, void *saddr,
+ struct net_device *dev)
+{
+ struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
+ lladdr || !dev->addr_len);
+ if (neigh)
+ neigh_update(neigh, lladdr, NUD_STALE,
+ NEIGH_UPDATE_F_OVERRIDE);
+ return neigh;
+}
+EXPORT_SYMBOL(neigh_event_ns);
+
+static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
+ __be16 protocol)
+{
+ struct hh_cache *hh;
+ struct net_device *dev = dst->dev;
+
+ for (hh = n->hh; hh; hh = hh->hh_next)
+ if (hh->hh_type == protocol)
+ break;
+
+ if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
+ seqlock_init(&hh->hh_lock);
+ hh->hh_type = protocol;
+ atomic_set(&hh->hh_refcnt, 0);
+ hh->hh_next = NULL;
+
+ if (dev->header_ops->cache(n, hh)) {
+ kfree(hh);
+ hh = NULL;
+ } else {
+ atomic_inc(&hh->hh_refcnt);
+ hh->hh_next = n->hh;
+ n->hh = hh;
+ if (n->nud_state & NUD_CONNECTED)
+ hh->hh_output = n->ops->hh_output;
+ else
+ hh->hh_output = n->ops->output;
+ }
+ }
+ if (hh) {
+ atomic_inc(&hh->hh_refcnt);
+ dst->hh = hh;
+ }
+}
+
+/* This function can be used in contexts, where only old dev_queue_xmit
+ worked, f.e. if you want to override normal output path (eql, shaper),
+ but resolution is not made yet.
+ */
+
+int neigh_compat_output(struct sk_buff *skb)
+{
+ struct net_device *dev = skb->dev;
+
+ __skb_pull(skb, skb_network_offset(skb));
+
+ if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
+ skb->len) < 0 &&
+ dev->header_ops->rebuild(skb))
+ return 0;
+
+ return dev_queue_xmit(skb);
+}
+EXPORT_SYMBOL(neigh_compat_output);
+
+/* Slow and careful. */
+
+int neigh_resolve_output(struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb->dst;
+ struct neighbour *neigh;
+ int rc = 0;
+
+ if (!dst || !(neigh = dst->neighbour))
+ goto discard;
+
+ __skb_pull(skb, skb_network_offset(skb));
+
+ if (!neigh_event_send(neigh, skb)) {
+ int err;
+ struct net_device *dev = neigh->dev;
+ if (dev->header_ops->cache && !dst->hh) {
+ write_lock_bh(&neigh->lock);
+ if (!dst->hh)
+ neigh_hh_init(neigh, dst, dst->ops->protocol);
+ err = dev_hard_header(skb, dev, ntohs(skb->protocol),
+ neigh->ha, NULL, skb->len);
+ write_unlock_bh(&neigh->lock);
+ } else {
+ read_lock_bh(&neigh->lock);
+ err = dev_hard_header(skb, dev, ntohs(skb->protocol),
+ neigh->ha, NULL, skb->len);
+ read_unlock_bh(&neigh->lock);
+ }
+ if (err >= 0)
+ rc = neigh->ops->queue_xmit(skb);
+ else
+ goto out_kfree_skb;
+ }
+out:
+ return rc;
+discard:
+ NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
+ dst, dst ? dst->neighbour : NULL);
+out_kfree_skb:
+ rc = -EINVAL;
+ kfree_skb(skb);
+ goto out;
+}
+EXPORT_SYMBOL(neigh_resolve_output);
+
+/* As fast as possible without hh cache */
+
+int neigh_connected_output(struct sk_buff *skb)
+{
+ int err;
+ struct dst_entry *dst = skb->dst;
+ struct neighbour *neigh = dst->neighbour;
+ struct net_device *dev = neigh->dev;
+
+ __skb_pull(skb, skb_network_offset(skb));
+
+ read_lock_bh(&neigh->lock);
+ err = dev_hard_header(skb, dev, ntohs(skb->protocol),
+ neigh->ha, NULL, skb->len);
+ read_unlock_bh(&neigh->lock);
+ if (err >= 0)
+ err = neigh->ops->queue_xmit(skb);
+ else {
+ err = -EINVAL;
+ kfree_skb(skb);
+ }
+ return err;
+}
+EXPORT_SYMBOL(neigh_connected_output);
+
+static void neigh_proxy_process(unsigned long arg)
+{
+ struct neigh_table *tbl = (struct neigh_table *)arg;
+ long sched_next = 0;
+ unsigned long now = jiffies;
+ struct sk_buff *skb, *n;
+
+ spin_lock(&tbl->proxy_queue.lock);
+
+ skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
+ long tdif = NEIGH_CB(skb)->sched_next - now;
+
+ if (tdif <= 0) {
+ struct net_device *dev = skb->dev;
+ __skb_unlink(skb, &tbl->proxy_queue);
+ if (tbl->proxy_redo && netif_running(dev))
+ tbl->proxy_redo(skb);
+ else
+ kfree_skb(skb);
+
+ dev_put(dev);
+ } else if (!sched_next || tdif < sched_next)
+ sched_next = tdif;
+ }
+ del_timer(&tbl->proxy_timer);
+ if (sched_next)
+ mod_timer(&tbl->proxy_timer, jiffies + sched_next);
+ spin_unlock(&tbl->proxy_queue.lock);
+}
+
+void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
+ struct sk_buff *skb)
+{
+ unsigned long now = jiffies;
+ unsigned long sched_next = now + (net_random() % p->proxy_delay);
+
+ if (tbl->proxy_queue.qlen > p->proxy_qlen) {
+ kfree_skb(skb);
+ return;
+ }
+
+ NEIGH_CB(skb)->sched_next = sched_next;
+ NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
+
+ spin_lock(&tbl->proxy_queue.lock);
+ if (del_timer(&tbl->proxy_timer)) {
+ if (time_before(tbl->proxy_timer.expires, sched_next))
+ sched_next = tbl->proxy_timer.expires;
+ }
+ dst_release(skb->dst);
+ skb->dst = NULL;
+ dev_hold(skb->dev);
+ __skb_queue_tail(&tbl->proxy_queue, skb);
+ mod_timer(&tbl->proxy_timer, sched_next);
+ spin_unlock(&tbl->proxy_queue.lock);
+}
+EXPORT_SYMBOL(pneigh_enqueue);
+
+static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
+ struct net *net, int ifindex)
+{
+ struct neigh_parms *p;
+
+ for (p = &tbl->parms; p; p = p->next) {
+ if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
+ (!p->dev && !ifindex))
+ return p;
+ }
+
+ return NULL;
+}
+
+struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
+ struct neigh_table *tbl)
+{
+ struct neigh_parms *p, *ref;
+ struct net *net = dev_net(dev);
+ const struct net_device_ops *ops = dev->netdev_ops;
+
+ ref = lookup_neigh_params(tbl, net, 0);
+ if (!ref)
+ return NULL;
+
+ p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
+ if (p) {
+ p->tbl = tbl;
+ atomic_set(&p->refcnt, 1);
+ p->reachable_time =
+ neigh_rand_reach_time(p->base_reachable_time);
+
+ if (ops->ndo_neigh_setup && ops->ndo_neigh_setup(dev, p)) {
+ kfree(p);
+ return NULL;
+ }
+
+ dev_hold(dev);
+ p->dev = dev;
+ write_pnet(&p->net, hold_net(net));
+ p->sysctl_table = NULL;
+ write_lock_bh(&tbl->lock);
+ p->next = tbl->parms.next;
+ tbl->parms.next = p;
+ write_unlock_bh(&tbl->lock);
+ }
+ return p;
+}
+EXPORT_SYMBOL(neigh_parms_alloc);
+
+static void neigh_rcu_free_parms(struct rcu_head *head)
+{
+ struct neigh_parms *parms =
+ container_of(head, struct neigh_parms, rcu_head);
+
+ neigh_parms_put(parms);
+}
+
+void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
+{
+ struct neigh_parms **p;
+
+ if (!parms || parms == &tbl->parms)
+ return;
+ write_lock_bh(&tbl->lock);
+ for (p = &tbl->parms.next; *p; p = &(*p)->next) {
+ if (*p == parms) {
+ *p = parms->next;
+ parms->dead = 1;
+ write_unlock_bh(&tbl->lock);
+ if (parms->dev)
+ dev_put(parms->dev);
+ call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
+ return;
+ }
+ }
+ write_unlock_bh(&tbl->lock);
+ NEIGH_PRINTK1("neigh_parms_release: not found\n");
+}
+EXPORT_SYMBOL(neigh_parms_release);
+
+static void neigh_parms_destroy(struct neigh_parms *parms)
+{
+ release_net(neigh_parms_net(parms));
+ kfree(parms);
+}
+
+static struct lock_class_key neigh_table_proxy_queue_class;
+
+void neigh_table_init_no_netlink(struct neigh_table *tbl)
+{
+ unsigned long now = jiffies;
+ unsigned long phsize;
+
+ write_pnet(&tbl->parms.net, &init_net);
+ atomic_set(&tbl->parms.refcnt, 1);
+ tbl->parms.reachable_time =
+ neigh_rand_reach_time(tbl->parms.base_reachable_time);
+
+ if (!tbl->kmem_cachep)
+ tbl->kmem_cachep =
+ kmem_cache_create(tbl->id, tbl->entry_size, 0,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC,
+ NULL);
+ tbl->stats = alloc_percpu(struct neigh_statistics);
+ if (!tbl->stats)
+ panic("cannot create neighbour cache statistics");
+
+#ifdef CONFIG_PROC_FS
+ if (!proc_create_data(tbl->id, 0, init_net.proc_net_stat,
+ &neigh_stat_seq_fops, tbl))
+ panic("cannot create neighbour proc dir entry");
+#endif
+
+ tbl->hash_mask = 1;
+ tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
+
+ phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
+ tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
+
+ if (!tbl->hash_buckets || !tbl->phash_buckets)
+ panic("cannot allocate neighbour cache hashes");
+
+ get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
+
+ rwlock_init(&tbl->lock);
+ setup_timer(&tbl->gc_timer, neigh_periodic_timer, (unsigned long)tbl);
+ tbl->gc_timer.expires = now + 1;
+ add_timer(&tbl->gc_timer);
+
+ setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
+ skb_queue_head_init_class(&tbl->proxy_queue,
+ &neigh_table_proxy_queue_class);
+
+ tbl->last_flush = now;
+ tbl->last_rand = now + tbl->parms.reachable_time * 20;
+}
+EXPORT_SYMBOL(neigh_table_init_no_netlink);
+
+void neigh_table_init(struct neigh_table *tbl)
+{
+ struct neigh_table *tmp;
+
+ neigh_table_init_no_netlink(tbl);
+ write_lock(&neigh_tbl_lock);
+ for (tmp = neigh_tables; tmp; tmp = tmp->next) {
+ if (tmp->family == tbl->family)
+ break;
+ }
+ tbl->next = neigh_tables;
+ neigh_tables = tbl;
+ write_unlock(&neigh_tbl_lock);
+
+ if (unlikely(tmp)) {
+ printk(KERN_ERR "NEIGH: Registering multiple tables for "
+ "family %d\n", tbl->family);
+ dump_stack();
+ }
+}
+EXPORT_SYMBOL(neigh_table_init);
+
+int neigh_table_clear(struct neigh_table *tbl)
+{
+ struct neigh_table **tp;
+
+ /* It is not clean... Fix it to unload IPv6 module safely */
+ del_timer_sync(&tbl->gc_timer);
+ del_timer_sync(&tbl->proxy_timer);
+ pneigh_queue_purge(&tbl->proxy_queue);
+ neigh_ifdown(tbl, NULL);
+ if (atomic_read(&tbl->entries))
+ printk(KERN_CRIT "neighbour leakage\n");
+ write_lock(&neigh_tbl_lock);
+ for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
+ if (*tp == tbl) {
+ *tp = tbl->next;
+ break;
+ }
+ }
+ write_unlock(&neigh_tbl_lock);
+
+ neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
+ tbl->hash_buckets = NULL;
+
+ kfree(tbl->phash_buckets);
+ tbl->phash_buckets = NULL;
+
+ remove_proc_entry(tbl->id, init_net.proc_net_stat);
+
+ free_percpu(tbl->stats);
+ tbl->stats = NULL;
+
+ kmem_cache_destroy(tbl->kmem_cachep);
+ tbl->kmem_cachep = NULL;
+
+ return 0;
+}
+EXPORT_SYMBOL(neigh_table_clear);
+
+static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+{
+ struct net *net = sock_net(skb->sk);
+ struct ndmsg *ndm;
+ struct nlattr *dst_attr;
+ struct neigh_table *tbl;
+ struct net_device *dev = NULL;
+ int err = -EINVAL;
+
+ if (nlmsg_len(nlh) < sizeof(*ndm))
+ goto out;
+
+ dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
+ if (dst_attr == NULL)
+ goto out;
+
+ ndm = nlmsg_data(nlh);
+ if (ndm->ndm_ifindex) {
+ dev = dev_get_by_index(net, ndm->ndm_ifindex);
+ if (dev == NULL) {
+ err = -ENODEV;
+ goto out;
+ }
+ }
+
+ read_lock(&neigh_tbl_lock);
+ for (tbl = neigh_tables; tbl; tbl = tbl->next) {
+ struct neighbour *neigh;
+
+ if (tbl->family != ndm->ndm_family)
+ continue;
+ read_unlock(&neigh_tbl_lock);
+
+ if (nla_len(dst_attr) < tbl->key_len)
+ goto out_dev_put;
+
+ if (ndm->ndm_flags & NTF_PROXY) {
+ err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
+ goto out_dev_put;
+ }
+
+ if (dev == NULL)
+ goto out_dev_put;
+
+ neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
+ if (neigh == NULL) {
+ err = -ENOENT;
+ goto out_dev_put;
+ }
+
+ err = neigh_update(neigh, NULL, NUD_FAILED,
+ NEIGH_UPDATE_F_OVERRIDE |
+ NEIGH_UPDATE_F_ADMIN);
+ neigh_release(neigh);
+ goto out_dev_put;
+ }
+ read_unlock(&neigh_tbl_lock);
+ err = -EAFNOSUPPORT;
+
+out_dev_put:
+ if (dev)
+ dev_put(dev);
+out:
+ return err;
+}
+
+static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+{
+ struct net *net = sock_net(skb->sk);
+ struct ndmsg *ndm;
+ struct nlattr *tb[NDA_MAX+1];
+ struct neigh_table *tbl;
+ struct net_device *dev = NULL;
+ int err;
+
+ err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
+ if (err < 0)
+ goto out;
+
+ err = -EINVAL;
+ if (tb[NDA_DST] == NULL)
+ goto out;
+
+ ndm = nlmsg_data(nlh);
+ if (ndm->ndm_ifindex) {
+ dev = dev_get_by_index(net, ndm->ndm_ifindex);
+ if (dev == NULL) {
+ err = -ENODEV;
+ goto out;
+ }
+
+ if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
+ goto out_dev_put;
+ }
+
+ read_lock(&neigh_tbl_lock);
+ for (tbl = neigh_tables; tbl; tbl = tbl->next) {
+ int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
+ struct neighbour *neigh;
+ void *dst, *lladdr;
+
+ if (tbl->family != ndm->ndm_family)
+ continue;
+ read_unlock(&neigh_tbl_lock);
+
+ if (nla_len(tb[NDA_DST]) < tbl->key_len)
+ goto out_dev_put;
+ dst = nla_data(tb[NDA_DST]);
+ lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
+
+ if (ndm->ndm_flags & NTF_PROXY) {
+ struct pneigh_entry *pn;
+
+ err = -ENOBUFS;
+ pn = pneigh_lookup(tbl, net, dst, dev, 1);
+ if (pn) {
+ pn->flags = ndm->ndm_flags;
+ err = 0;
+ }
+ goto out_dev_put;
+ }
+
+ if (dev == NULL)
+ goto out_dev_put;
+
+ neigh = neigh_lookup(tbl, dst, dev);
+ if (neigh == NULL) {
+ if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
+ err = -ENOENT;
+ goto out_dev_put;
+ }
+
+ neigh = __neigh_lookup_errno(tbl, dst, dev);
+ if (IS_ERR(neigh)) {
+ err = PTR_ERR(neigh);
+ goto out_dev_put;
+ }
+ } else {
+ if (nlh->nlmsg_flags & NLM_F_EXCL) {
+ err = -EEXIST;
+ neigh_release(neigh);
+ goto out_dev_put;
+ }
+
+ if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
+ flags &= ~NEIGH_UPDATE_F_OVERRIDE;
+ }
+
+ err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
+ neigh_release(neigh);
+ goto out_dev_put;
+ }
+
+ read_unlock(&neigh_tbl_lock);
+ err = -EAFNOSUPPORT;
+
+out_dev_put:
+ if (dev)
+ dev_put(dev);
+out:
+ return err;
+}
+
+static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
+{
+ struct nlattr *nest;
+
+ nest = nla_nest_start(skb, NDTA_PARMS);
+ if (nest == NULL)
+ return -ENOBUFS;
+
+ if (parms->dev)
+ NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
+
+ NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
+ NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
+ NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
+ NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
+ NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
+ NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
+ NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
+ NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
+ parms->base_reachable_time);
+ NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
+ NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
+ NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
+ NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
+ NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
+ NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
+
+ return nla_nest_end(skb, nest);
+
+nla_put_failure:
+ nla_nest_cancel(skb, nest);
+ return -EMSGSIZE;
+}
+
+static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
+ u32 pid, u32 seq, int type, int flags)
+{
+ struct nlmsghdr *nlh;
+ struct ndtmsg *ndtmsg;
+
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
+ if (nlh == NULL)
+ return -EMSGSIZE;
+
+ ndtmsg = nlmsg_data(nlh);
+
+ read_lock_bh(&tbl->lock);
+ ndtmsg->ndtm_family = tbl->family;
+ ndtmsg->ndtm_pad1 = 0;
+ ndtmsg->ndtm_pad2 = 0;
+
+ NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
+ NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
+ NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
+ NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
+ NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
+
+ {
+ unsigned long now = jiffies;
+ unsigned int flush_delta = now - tbl->last_flush;
+ unsigned int rand_delta = now - tbl->last_rand;
+
+ struct ndt_config ndc = {
+ .ndtc_key_len = tbl->key_len,
+ .ndtc_entry_size = tbl->entry_size,
+ .ndtc_entries = atomic_read(&tbl->entries),
+ .ndtc_last_flush = jiffies_to_msecs(flush_delta),
+ .ndtc_last_rand = jiffies_to_msecs(rand_delta),
+ .ndtc_hash_rnd = tbl->hash_rnd,
+ .ndtc_hash_mask = tbl->hash_mask,
+ .ndtc_hash_chain_gc = tbl->hash_chain_gc,
+ .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
+ };
+
+ NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
+ }
+
+ {
+ int cpu;
+ struct ndt_stats ndst;
+
+ memset(&ndst, 0, sizeof(ndst));
+
+ for_each_possible_cpu(cpu) {
+ struct neigh_statistics *st;
+
+ st = per_cpu_ptr(tbl->stats, cpu);
+ ndst.ndts_allocs += st->allocs;
+ ndst.ndts_destroys += st->destroys;
+ ndst.ndts_hash_grows += st->hash_grows;
+ ndst.ndts_res_failed += st->res_failed;
+ ndst.ndts_lookups += st->lookups;
+ ndst.ndts_hits += st->hits;
+ ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
+ ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
+ ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
+ ndst.ndts_forced_gc_runs += st->forced_gc_runs;
+ }
+
+ NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
+ }
+
+ BUG_ON(tbl->parms.dev);
+ if (neightbl_fill_parms(skb, &tbl->parms) < 0)
+ goto nla_put_failure;
+
+ read_unlock_bh(&tbl->lock);
+ return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+ read_unlock_bh(&tbl->lock);
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static int neightbl_fill_param_info(struct sk_buff *skb,
+ struct neigh_table *tbl,
+ struct neigh_parms *parms,
+ u32 pid, u32 seq, int type,
+ unsigned int flags)
+{
+ struct ndtmsg *ndtmsg;
+ struct nlmsghdr *nlh;
+
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
+ if (nlh == NULL)
+ return -EMSGSIZE;
+
+ ndtmsg = nlmsg_data(nlh);
+
+ read_lock_bh(&tbl->lock);
+ ndtmsg->ndtm_family = tbl->family;
+ ndtmsg->ndtm_pad1 = 0;
+ ndtmsg->ndtm_pad2 = 0;
+
+ if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
+ neightbl_fill_parms(skb, parms) < 0)
+ goto errout;
+
+ read_unlock_bh(&tbl->lock);
+ return nlmsg_end(skb, nlh);
+errout:
+ read_unlock_bh(&tbl->lock);
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
+ [NDTA_NAME] = { .type = NLA_STRING },
+ [NDTA_THRESH1] = { .type = NLA_U32 },
+ [NDTA_THRESH2] = { .type = NLA_U32 },
+ [NDTA_THRESH3] = { .type = NLA_U32 },
+ [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
+ [NDTA_PARMS] = { .type = NLA_NESTED },
+};
+
+static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
+ [NDTPA_IFINDEX] = { .type = NLA_U32 },
+ [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
+ [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
+ [NDTPA_APP_PROBES] = { .type = NLA_U32 },
+ [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
+ [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
+ [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
+ [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
+ [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
+ [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
+ [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
+ [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
+ [NDTPA_LOCKTIME] = { .type = NLA_U64 },
+};
+
+static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+{
+ struct net *net = sock_net(skb->sk);
+ struct neigh_table *tbl;
+ struct ndtmsg *ndtmsg;
+ struct nlattr *tb[NDTA_MAX+1];
+ int err;
+
+ err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
+ nl_neightbl_policy);
+ if (err < 0)
+ goto errout;
+
+ if (tb[NDTA_NAME] == NULL) {
+ err = -EINVAL;
+ goto errout;
+ }
+
+ ndtmsg = nlmsg_data(nlh);
+ read_lock(&neigh_tbl_lock);
+ for (tbl = neigh_tables; tbl; tbl = tbl->next) {
+ if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
+ continue;
+
+ if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
+ break;
+ }
+
+ if (tbl == NULL) {
+ err = -ENOENT;
+ goto errout_locked;
+ }
+
+ /*
+ * We acquire tbl->lock to be nice to the periodic timers and
+ * make sure they always see a consistent set of values.
+ */
+ write_lock_bh(&tbl->lock);
+
+ if (tb[NDTA_PARMS]) {
+ struct nlattr *tbp[NDTPA_MAX+1];
+ struct neigh_parms *p;
+ int i, ifindex = 0;
+
+ err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
+ nl_ntbl_parm_policy);
+ if (err < 0)
+ goto errout_tbl_lock;
+
+ if (tbp[NDTPA_IFINDEX])
+ ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
+
+ p = lookup_neigh_params(tbl, net, ifindex);
+ if (p == NULL) {
+ err = -ENOENT;
+ goto errout_tbl_lock;
+ }
+
+ for (i = 1; i <= NDTPA_MAX; i++) {
+ if (tbp[i] == NULL)
+ continue;
+
+ switch (i) {
+ case NDTPA_QUEUE_LEN:
+ p->queue_len = nla_get_u32(tbp[i]);
+ break;
+ case NDTPA_PROXY_QLEN:
+ p->proxy_qlen = nla_get_u32(tbp[i]);
+ break;
+ case NDTPA_APP_PROBES:
+ p->app_probes = nla_get_u32(tbp[i]);
+ break;
+ case NDTPA_UCAST_PROBES:
+ p->ucast_probes = nla_get_u32(tbp[i]);
+ break;
+ case NDTPA_MCAST_PROBES:
+ p->mcast_probes = nla_get_u32(tbp[i]);
+ break;
+ case NDTPA_BASE_REACHABLE_TIME:
+ p->base_reachable_time = nla_get_msecs(tbp[i]);
+ break;
+ case NDTPA_GC_STALETIME:
+ p->gc_staletime = nla_get_msecs(tbp[i]);
+ break;
+ case NDTPA_DELAY_PROBE_TIME:
+ p->delay_probe_time = nla_get_msecs(tbp[i]);
+ break;
+ case NDTPA_RETRANS_TIME:
+ p->retrans_time = nla_get_msecs(tbp[i]);
+ break;
+ case NDTPA_ANYCAST_DELAY:
+ p->anycast_delay = nla_get_msecs(tbp[i]);
+ break;
+ case NDTPA_PROXY_DELAY:
+ p->proxy_delay = nla_get_msecs(tbp[i]);
+ break;
+ case NDTPA_LOCKTIME:
+ p->locktime = nla_get_msecs(tbp[i]);
+ break;
+ }
+ }
+ }
+
+ if (tb[NDTA_THRESH1])
+ tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
+
+ if (tb[NDTA_THRESH2])
+ tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
+
+ if (tb[NDTA_THRESH3])
+ tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
+
+ if (tb[NDTA_GC_INTERVAL])
+ tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
+
+ err = 0;
+
+errout_tbl_lock:
+ write_unlock_bh(&tbl->lock);
+errout_locked:
+ read_unlock(&neigh_tbl_lock);
+errout:
+ return err;
+}
+
+static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ int family, tidx, nidx = 0;
+ int tbl_skip = cb->args[0];
+ int neigh_skip = cb->args[1];
+ struct neigh_table *tbl;
+
+ family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
+
+ read_lock(&neigh_tbl_lock);
+ for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
+ struct neigh_parms *p;
+
+ if (tidx < tbl_skip || (family && tbl->family != family))
+ continue;
+
+ if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
+ NLM_F_MULTI) <= 0)
+ break;
+
+ for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
+ if (!net_eq(neigh_parms_net(p), net))
+ continue;
+
+ if (nidx < neigh_skip)
+ goto next;
+
+ if (neightbl_fill_param_info(skb, tbl, p,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGHTBL,
+ NLM_F_MULTI) <= 0)
+ goto out;
+ next:
+ nidx++;
+ }
+
+ neigh_skip = 0;
+ }
+out:
+ read_unlock(&neigh_tbl_lock);
+ cb->args[0] = tidx;
+ cb->args[1] = nidx;
+
+ return skb->len;
+}
+
+static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
+ u32 pid, u32 seq, int type, unsigned int flags)
+{
+ unsigned long now = jiffies;
+ struct nda_cacheinfo ci;
+ struct nlmsghdr *nlh;
+ struct ndmsg *ndm;
+
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
+ if (nlh == NULL)
+ return -EMSGSIZE;
+
+ ndm = nlmsg_data(nlh);
+ ndm->ndm_family = neigh->ops->family;
+ ndm->ndm_pad1 = 0;
+ ndm->ndm_pad2 = 0;
+ ndm->ndm_flags = neigh->flags;
+ ndm->ndm_type = neigh->type;
+ ndm->ndm_ifindex = neigh->dev->ifindex;
+
+ NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
+
+ read_lock_bh(&neigh->lock);
+ ndm->ndm_state = neigh->nud_state;
+ if ((neigh->nud_state & NUD_VALID) &&
+ nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) {
+ read_unlock_bh(&neigh->lock);
+ goto nla_put_failure;
+ }
+
+ ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
+ ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
+ ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
+ ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
+ read_unlock_bh(&neigh->lock);
+
+ NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
+ NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
+
+ return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static void neigh_update_notify(struct neighbour *neigh)
+{
+ call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
+ __neigh_notify(neigh, RTM_NEWNEIGH, 0);
+}
+
+static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
+ struct netlink_callback *cb)
+{
+ struct net * net = sock_net(skb->sk);
+ struct neighbour *n;
+ int rc, h, s_h = cb->args[1];
+ int idx, s_idx = idx = cb->args[2];
+
+ read_lock_bh(&tbl->lock);
+ for (h = 0; h <= tbl->hash_mask; h++) {
+ if (h < s_h)
+ continue;
+ if (h > s_h)
+ s_idx = 0;
+ for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
+ if (dev_net(n->dev) != net)
+ continue;
+ if (idx < s_idx)
+ goto next;
+ if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWNEIGH,
+ NLM_F_MULTI) <= 0) {
+ read_unlock_bh(&tbl->lock);
+ rc = -1;
+ goto out;
+ }
+ next:
+ idx++;
+ }
+ }
+ read_unlock_bh(&tbl->lock);
+ rc = skb->len;
+out:
+ cb->args[1] = h;
+ cb->args[2] = idx;
+ return rc;
+}
+
+static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct neigh_table *tbl;
+ int t, family, s_t;
+
+ read_lock(&neigh_tbl_lock);
+ family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
+ s_t = cb->args[0];
+
+ for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
+ if (t < s_t || (family && tbl->family != family))
+ continue;
+ if (t > s_t)
+ memset(&cb->args[1], 0, sizeof(cb->args) -
+ sizeof(cb->args[0]));
+ if (neigh_dump_table(tbl, skb, cb) < 0)
+ break;
+ }
+ read_unlock(&neigh_tbl_lock);
+
+ cb->args[0] = t;
+ return skb->len;
+}
+
+void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
+{
+ int chain;
+
+ read_lock_bh(&tbl->lock);
+ for (chain = 0; chain <= tbl->hash_mask; chain++) {
+ struct neighbour *n;
+
+ for (n = tbl->hash_buckets[chain]; n; n = n->next)
+ cb(n, cookie);
+ }
+ read_unlock_bh(&tbl->lock);
+}
+EXPORT_SYMBOL(neigh_for_each);
+
+/* The tbl->lock must be held as a writer and BH disabled. */
+void __neigh_for_each_release(struct neigh_table *tbl,
+ int (*cb)(struct neighbour *))
+{
+ int chain;
+
+ for (chain = 0; chain <= tbl->hash_mask; chain++) {
+ struct neighbour *n, **np;
+
+ np = &tbl->hash_buckets[chain];
+ while ((n = *np) != NULL) {
+ int release;
+
+ write_lock(&n->lock);
+ release = cb(n);
+ if (release) {
+ *np = n->next;
+ n->dead = 1;
+ } else
+ np = &n->next;
+ write_unlock(&n->lock);
+ if (release)
+ neigh_cleanup_and_release(n);
+ }
+ }
+}
+EXPORT_SYMBOL(__neigh_for_each_release);
+
+#ifdef CONFIG_PROC_FS
+
+static struct neighbour *neigh_get_first(struct seq_file *seq)
+{
+ struct neigh_seq_state *state = seq->private;
+ struct net *net = seq_file_net(seq);
+ struct neigh_table *tbl = state->tbl;
+ struct neighbour *n = NULL;
+ int bucket = state->bucket;
+
+ state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
+ for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
+ n = tbl->hash_buckets[bucket];
+
+ while (n) {
+ if (!net_eq(dev_net(n->dev), net))
+ goto next;
+ if (state->neigh_sub_iter) {
+ loff_t fakep = 0;
+ void *v;
+
+ v = state->neigh_sub_iter(state, n, &fakep);
+ if (!v)
+ goto next;
+ }
+ if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
+ break;
+ if (n->nud_state & ~NUD_NOARP)
+ break;
+ next:
+ n = n->next;
+ }
+
+ if (n)
+ break;
+ }
+ state->bucket = bucket;
+
+ return n;
+}
+
+static struct neighbour *neigh_get_next(struct seq_file *seq,
+ struct neighbour *n,
+ loff_t *pos)
+{
+ struct neigh_seq_state *state = seq->private;
+ struct net *net = seq_file_net(seq);
+ struct neigh_table *tbl = state->tbl;
+
+ if (state->neigh_sub_iter) {
+ void *v = state->neigh_sub_iter(state, n, pos);
+ if (v)
+ return n;
+ }
+ n = n->next;
+
+ while (1) {
+ while (n) {
+ if (!net_eq(dev_net(n->dev), net))
+ goto next;
+ if (state->neigh_sub_iter) {
+ void *v = state->neigh_sub_iter(state, n, pos);
+ if (v)
+ return n;
+ goto next;
+ }
+ if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
+ break;
+
+ if (n->nud_state & ~NUD_NOARP)
+ break;
+ next:
+ n = n->next;
+ }
+
+ if (n)
+ break;
+
+ if (++state->bucket > tbl->hash_mask)
+ break;
+
+ n = tbl->hash_buckets[state->bucket];
+ }
+
+ if (n && pos)
+ --(*pos);
+ return n;
+}
+
+static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
+{
+ struct neighbour *n = neigh_get_first(seq);
+
+ if (n) {
+ --(*pos);
+ while (*pos) {
+ n = neigh_get_next(seq, n, pos);
+ if (!n)
+ break;
+ }
+ }
+ return *pos ? NULL : n;
+}
+
+static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
+{
+ struct neigh_seq_state *state = seq->private;
+ struct net *net = seq_file_net(seq);
+ struct neigh_table *tbl = state->tbl;
+ struct pneigh_entry *pn = NULL;
+ int bucket = state->bucket;
+
+ state->flags |= NEIGH_SEQ_IS_PNEIGH;
+ for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
+ pn = tbl->phash_buckets[bucket];
+ while (pn && !net_eq(pneigh_net(pn), net))
+ pn = pn->next;
+ if (pn)
+ break;
+ }
+ state->bucket = bucket;
+
+ return pn;
+}
+
+static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
+ struct pneigh_entry *pn,
+ loff_t *pos)
+{
+ struct neigh_seq_state *state = seq->private;
+ struct net *net = seq_file_net(seq);
+ struct neigh_table *tbl = state->tbl;
+
+ pn = pn->next;
+ while (!pn) {
+ if (++state->bucket > PNEIGH_HASHMASK)
+ break;
+ pn = tbl->phash_buckets[state->bucket];
+ while (pn && !net_eq(pneigh_net(pn), net))
+ pn = pn->next;
+ if (pn)
+ break;
+ }
+
+ if (pn && pos)
+ --(*pos);
+
+ return pn;
+}
+
+static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
+{
+ struct pneigh_entry *pn = pneigh_get_first(seq);
+
+ if (pn) {
+ --(*pos);
+ while (*pos) {
+ pn = pneigh_get_next(seq, pn, pos);
+ if (!pn)
+ break;
+ }
+ }
+ return *pos ? NULL : pn;
+}
+
+static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
+{
+ struct neigh_seq_state *state = seq->private;
+ void *rc;
+ loff_t idxpos = *pos;
+
+ rc = neigh_get_idx(seq, &idxpos);
+ if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
+ rc = pneigh_get_idx(seq, &idxpos);
+
+ return rc;
+}
+
+void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
+ __acquires(tbl->lock)
+{
+ struct neigh_seq_state *state = seq->private;
+
+ state->tbl = tbl;
+ state->bucket = 0;
+ state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
+
+ read_lock_bh(&tbl->lock);
+
+ return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
+}
+EXPORT_SYMBOL(neigh_seq_start);
+
+void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct neigh_seq_state *state;
+ void *rc;
+
+ if (v == SEQ_START_TOKEN) {
+ rc = neigh_get_first(seq);
+ goto out;
+ }
+
+ state = seq->private;
+ if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
+ rc = neigh_get_next(seq, v, NULL);
+ if (rc)
+ goto out;
+ if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
+ rc = pneigh_get_first(seq);
+ } else {
+ BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
+ rc = pneigh_get_next(seq, v, NULL);
+ }
+out:
+ ++(*pos);
+ return rc;
+}
+EXPORT_SYMBOL(neigh_seq_next);
+
+void neigh_seq_stop(struct seq_file *seq, void *v)
+ __releases(tbl->lock)
+{
+ struct neigh_seq_state *state = seq->private;
+ struct neigh_table *tbl = state->tbl;
+
+ read_unlock_bh(&tbl->lock);
+}
+EXPORT_SYMBOL(neigh_seq_stop);
+
+/* statistics via seq_file */
+
+static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ struct proc_dir_entry *pde = seq->private;
+ struct neigh_table *tbl = pde->data;
+ int cpu;
+
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
+
+ for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
+ if (!cpu_possible(cpu))
+ continue;
+ *pos = cpu+1;
+ return per_cpu_ptr(tbl->stats, cpu);
+ }
+ return NULL;
+}
+
+static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct proc_dir_entry *pde = seq->private;
+ struct neigh_table *tbl = pde->data;
+ int cpu;
+
+ for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
+ if (!cpu_possible(cpu))
+ continue;
+ *pos = cpu+1;
+ return per_cpu_ptr(tbl->stats, cpu);
+ }
+ return NULL;
+}
+
+static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
+{
+
+}
+
+static int neigh_stat_seq_show(struct seq_file *seq, void *v)
+{
+ struct proc_dir_entry *pde = seq->private;
+ struct neigh_table *tbl = pde->data;
+ struct neigh_statistics *st = v;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards\n");
+ return 0;
+ }
+
+ seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
+ "%08lx %08lx %08lx %08lx %08lx\n",
+ atomic_read(&tbl->entries),
+
+ st->allocs,
+ st->destroys,
+ st->hash_grows,
+
+ st->lookups,
+ st->hits,
+
+ st->res_failed,
+
+ st->rcv_probes_mcast,
+ st->rcv_probes_ucast,
+
+ st->periodic_gc_runs,
+ st->forced_gc_runs,
+ st->unres_discards
+ );
+
+ return 0;
+}
+
+static const struct seq_operations neigh_stat_seq_ops = {
+ .start = neigh_stat_seq_start,
+ .next = neigh_stat_seq_next,
+ .stop = neigh_stat_seq_stop,
+ .show = neigh_stat_seq_show,
+};
+
+static int neigh_stat_seq_open(struct inode *inode, struct file *file)
+{
+ int ret = seq_open(file, &neigh_stat_seq_ops);
+
+ if (!ret) {
+ struct seq_file *sf = file->private_data;
+ sf->private = PDE(inode);
+ }
+ return ret;
+};
+
+static const struct file_operations neigh_stat_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = neigh_stat_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+#endif /* CONFIG_PROC_FS */
+
+static inline size_t neigh_nlmsg_size(void)
+{
+ return NLMSG_ALIGN(sizeof(struct ndmsg))
+ + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
+ + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
+ + nla_total_size(sizeof(struct nda_cacheinfo))
+ + nla_total_size(4); /* NDA_PROBES */
+}
+
+static void __neigh_notify(struct neighbour *n, int type, int flags)
+{
+ struct net *net = dev_net(n->dev);
+ struct sk_buff *skb;
+ int err = -ENOBUFS;
+
+ skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
+ if (skb == NULL)
+ goto errout;
+
+ err = neigh_fill_info(skb, n, 0, 0, type, flags);
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(skb);
+ goto errout;
+ }
+ err = rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
+errout:
+ if (err < 0)
+ rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
+}
+
+#ifdef CONFIG_ARPD
+void neigh_app_ns(struct neighbour *n)
+{
+ __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
+}
+EXPORT_SYMBOL(neigh_app_ns);
+#endif /* CONFIG_ARPD */
+
+#ifdef CONFIG_SYSCTL
+
+static struct neigh_sysctl_table {
+ struct ctl_table_header *sysctl_header;
+ struct ctl_table neigh_vars[__NET_NEIGH_MAX];
+ char *dev_name;
+} neigh_sysctl_template __read_mostly = {
+ .neigh_vars = {
+ {
+ .ctl_name = NET_NEIGH_MCAST_SOLICIT,
+ .procname = "mcast_solicit",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .ctl_name = NET_NEIGH_UCAST_SOLICIT,
+ .procname = "ucast_solicit",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .ctl_name = NET_NEIGH_APP_SOLICIT,
+ .procname = "app_solicit",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "retrans_time",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_userhz_jiffies,
+ },
+ {
+ .ctl_name = NET_NEIGH_REACHABLE_TIME,
+ .procname = "base_reachable_time",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies,
+ },
+ {
+ .ctl_name = NET_NEIGH_DELAY_PROBE_TIME,
+ .procname = "delay_first_probe_time",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies,
+ },
+ {
+ .ctl_name = NET_NEIGH_GC_STALE_TIME,
+ .procname = "gc_stale_time",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies,
+ },
+ {
+ .ctl_name = NET_NEIGH_UNRES_QLEN,
+ .procname = "unres_qlen",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .ctl_name = NET_NEIGH_PROXY_QLEN,
+ .procname = "proxy_qlen",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .procname = "anycast_delay",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_userhz_jiffies,
+ },
+ {
+ .procname = "proxy_delay",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_userhz_jiffies,
+ },
+ {
+ .procname = "locktime",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_userhz_jiffies,
+ },
+ {
+ .ctl_name = NET_NEIGH_RETRANS_TIME_MS,
+ .procname = "retrans_time_ms",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_ms_jiffies,
+ .strategy = sysctl_ms_jiffies,
+ },
+ {
+ .ctl_name = NET_NEIGH_REACHABLE_TIME_MS,
+ .procname = "base_reachable_time_ms",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_ms_jiffies,
+ .strategy = sysctl_ms_jiffies,
+ },
+ {
+ .ctl_name = NET_NEIGH_GC_INTERVAL,
+ .procname = "gc_interval",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ .strategy = sysctl_jiffies,
+ },
+ {
+ .ctl_name = NET_NEIGH_GC_THRESH1,
+ .procname = "gc_thresh1",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .ctl_name = NET_NEIGH_GC_THRESH2,
+ .procname = "gc_thresh2",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {
+ .ctl_name = NET_NEIGH_GC_THRESH3,
+ .procname = "gc_thresh3",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ },
+ {},
+ },
+};
+
+int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
+ int p_id, int pdev_id, char *p_name,
+ proc_handler *handler, ctl_handler *strategy)
+{
+ struct neigh_sysctl_table *t;
+ const char *dev_name_source = NULL;
+
+#define NEIGH_CTL_PATH_ROOT 0
+#define NEIGH_CTL_PATH_PROTO 1
+#define NEIGH_CTL_PATH_NEIGH 2
+#define NEIGH_CTL_PATH_DEV 3
+
+ struct ctl_path neigh_path[] = {
+ { .procname = "net", .ctl_name = CTL_NET, },
+ { .procname = "proto", .ctl_name = 0, },
+ { .procname = "neigh", .ctl_name = 0, },
+ { .procname = "default", .ctl_name = NET_PROTO_CONF_DEFAULT, },
+ { },
+ };
+
+ t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
+ if (!t)
+ goto err;
+
+ t->neigh_vars[0].data = &p->mcast_probes;
+ t->neigh_vars[1].data = &p->ucast_probes;
+ t->neigh_vars[2].data = &p->app_probes;
+ t->neigh_vars[3].data = &p->retrans_time;
+ t->neigh_vars[4].data = &p->base_reachable_time;
+ t->neigh_vars[5].data = &p->delay_probe_time;
+ t->neigh_vars[6].data = &p->gc_staletime;
+ t->neigh_vars[7].data = &p->queue_len;
+ t->neigh_vars[8].data = &p->proxy_qlen;
+ t->neigh_vars[9].data = &p->anycast_delay;
+ t->neigh_vars[10].data = &p->proxy_delay;
+ t->neigh_vars[11].data = &p->locktime;
+ t->neigh_vars[12].data = &p->retrans_time;
+ t->neigh_vars[13].data = &p->base_reachable_time;
+
+ if (dev) {
+ dev_name_source = dev->name;
+ neigh_path[NEIGH_CTL_PATH_DEV].ctl_name = dev->ifindex;
+ /* Terminate the table early */
+ memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
+ } else {
+ dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
+ t->neigh_vars[14].data = (int *)(p + 1);
+ t->neigh_vars[15].data = (int *)(p + 1) + 1;
+ t->neigh_vars[16].data = (int *)(p + 1) + 2;
+ t->neigh_vars[17].data = (int *)(p + 1) + 3;
+ }
+
+
+ if (handler || strategy) {
+ /* RetransTime */
+ t->neigh_vars[3].proc_handler = handler;
+ t->neigh_vars[3].strategy = strategy;
+ t->neigh_vars[3].extra1 = dev;
+ if (!strategy)
+ t->neigh_vars[3].ctl_name = CTL_UNNUMBERED;
+ /* ReachableTime */
+ t->neigh_vars[4].proc_handler = handler;
+ t->neigh_vars[4].strategy = strategy;
+ t->neigh_vars[4].extra1 = dev;
+ if (!strategy)
+ t->neigh_vars[4].ctl_name = CTL_UNNUMBERED;
+ /* RetransTime (in milliseconds)*/
+ t->neigh_vars[12].proc_handler = handler;
+ t->neigh_vars[12].strategy = strategy;
+ t->neigh_vars[12].extra1 = dev;
+ if (!strategy)
+ t->neigh_vars[12].ctl_name = CTL_UNNUMBERED;
+ /* ReachableTime (in milliseconds) */
+ t->neigh_vars[13].proc_handler = handler;
+ t->neigh_vars[13].strategy = strategy;
+ t->neigh_vars[13].extra1 = dev;
+ if (!strategy)
+ t->neigh_vars[13].ctl_name = CTL_UNNUMBERED;
+ }
+
+ t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
+ if (!t->dev_name)
+ goto free;
+
+ neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
+ neigh_path[NEIGH_CTL_PATH_NEIGH].ctl_name = pdev_id;
+ neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
+ neigh_path[NEIGH_CTL_PATH_PROTO].ctl_name = p_id;
+
+ t->sysctl_header =
+ register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars);
+ if (!t->sysctl_header)
+ goto free_procname;
+
+ p->sysctl_table = t;
+ return 0;
+
+free_procname:
+ kfree(t->dev_name);
+free:
+ kfree(t);
+err:
+ return -ENOBUFS;
+}
+EXPORT_SYMBOL(neigh_sysctl_register);
+
+void neigh_sysctl_unregister(struct neigh_parms *p)
+{
+ if (p->sysctl_table) {
+ struct neigh_sysctl_table *t = p->sysctl_table;
+ p->sysctl_table = NULL;
+ unregister_sysctl_table(t->sysctl_header);
+ kfree(t->dev_name);
+ kfree(t);
+ }
+}
+EXPORT_SYMBOL(neigh_sysctl_unregister);
+
+#endif /* CONFIG_SYSCTL */
+
+static int __init neigh_init(void)
+{
+ rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
+ rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
+ rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
+
+ rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
+ rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
+
+ return 0;
+}
+
+subsys_initcall(neigh_init);
+
diff --git a/libdde_linux26/contrib/net/core/net-sysfs.c b/libdde_linux26/contrib/net/core/net-sysfs.c
new file mode 100644
index 00000000..484f5875
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/net-sysfs.c
@@ -0,0 +1,540 @@
+/*
+ * net-sysfs.c - network device class and attributes
+ *
+ * Copyright (c) 2003 Stephen Hemminger <shemminger@osdl.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/capability.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/if_arp.h>
+#include <net/sock.h>
+#include <linux/rtnetlink.h>
+#include <linux/wireless.h>
+#include <net/iw_handler.h>
+
+#include "net-sysfs.h"
+
+#ifdef CONFIG_SYSFS
+static const char fmt_hex[] = "%#x\n";
+static const char fmt_long_hex[] = "%#lx\n";
+static const char fmt_dec[] = "%d\n";
+static const char fmt_ulong[] = "%lu\n";
+
+static inline int dev_isalive(const struct net_device *dev)
+{
+ return dev->reg_state <= NETREG_REGISTERED;
+}
+
+/* use same locking rules as GIF* ioctl's */
+static ssize_t netdev_show(const struct device *dev,
+ struct device_attribute *attr, char *buf,
+ ssize_t (*format)(const struct net_device *, char *))
+{
+ struct net_device *net = to_net_dev(dev);
+ ssize_t ret = -EINVAL;
+
+ read_lock(&dev_base_lock);
+ if (dev_isalive(net))
+ ret = (*format)(net, buf);
+ read_unlock(&dev_base_lock);
+
+ return ret;
+}
+
+/* generate a show function for simple field */
+#define NETDEVICE_SHOW(field, format_string) \
+static ssize_t format_##field(const struct net_device *net, char *buf) \
+{ \
+ return sprintf(buf, format_string, net->field); \
+} \
+static ssize_t show_##field(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ return netdev_show(dev, attr, buf, format_##field); \
+}
+
+
+/* use same locking and permission rules as SIF* ioctl's */
+static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len,
+ int (*set)(struct net_device *, unsigned long))
+{
+ struct net_device *net = to_net_dev(dev);
+ char *endp;
+ unsigned long new;
+ int ret = -EINVAL;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ new = simple_strtoul(buf, &endp, 0);
+ if (endp == buf)
+ goto err;
+
+ if (!rtnl_trylock())
+ return -ERESTARTSYS;
+
+ if (dev_isalive(net)) {
+ if ((ret = (*set)(net, new)) == 0)
+ ret = len;
+ }
+ rtnl_unlock();
+ err:
+ return ret;
+}
+
+NETDEVICE_SHOW(dev_id, fmt_hex);
+NETDEVICE_SHOW(addr_len, fmt_dec);
+NETDEVICE_SHOW(iflink, fmt_dec);
+NETDEVICE_SHOW(ifindex, fmt_dec);
+NETDEVICE_SHOW(features, fmt_long_hex);
+NETDEVICE_SHOW(type, fmt_dec);
+NETDEVICE_SHOW(link_mode, fmt_dec);
+
+/* use same locking rules as GIFHWADDR ioctl's */
+static ssize_t show_address(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct net_device *net = to_net_dev(dev);
+ ssize_t ret = -EINVAL;
+
+ read_lock(&dev_base_lock);
+ if (dev_isalive(net))
+ ret = sysfs_format_mac(buf, net->dev_addr, net->addr_len);
+ read_unlock(&dev_base_lock);
+ return ret;
+}
+
+static ssize_t show_broadcast(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *net = to_net_dev(dev);
+ if (dev_isalive(net))
+ return sysfs_format_mac(buf, net->broadcast, net->addr_len);
+ return -EINVAL;
+}
+
+static ssize_t show_carrier(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ if (netif_running(netdev)) {
+ return sprintf(buf, fmt_dec, !!netif_carrier_ok(netdev));
+ }
+ return -EINVAL;
+}
+
+static ssize_t show_dormant(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct net_device *netdev = to_net_dev(dev);
+
+ if (netif_running(netdev))
+ return sprintf(buf, fmt_dec, !!netif_dormant(netdev));
+
+ return -EINVAL;
+}
+
+static const char *operstates[] = {
+ "unknown",
+ "notpresent", /* currently unused */
+ "down",
+ "lowerlayerdown",
+ "testing", /* currently unused */
+ "dormant",
+ "up"
+};
+
+static ssize_t show_operstate(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct net_device *netdev = to_net_dev(dev);
+ unsigned char operstate;
+
+ read_lock(&dev_base_lock);
+ operstate = netdev->operstate;
+ if (!netif_running(netdev))
+ operstate = IF_OPER_DOWN;
+ read_unlock(&dev_base_lock);
+
+ if (operstate >= ARRAY_SIZE(operstates))
+ return -EINVAL; /* should not happen */
+
+ return sprintf(buf, "%s\n", operstates[operstate]);
+}
+
+/* read-write attributes */
+NETDEVICE_SHOW(mtu, fmt_dec);
+
+static int change_mtu(struct net_device *net, unsigned long new_mtu)
+{
+ return dev_set_mtu(net, (int) new_mtu);
+}
+
+static ssize_t store_mtu(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return netdev_store(dev, attr, buf, len, change_mtu);
+}
+
+NETDEVICE_SHOW(flags, fmt_hex);
+
+static int change_flags(struct net_device *net, unsigned long new_flags)
+{
+ return dev_change_flags(net, (unsigned) new_flags);
+}
+
+static ssize_t store_flags(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return netdev_store(dev, attr, buf, len, change_flags);
+}
+
+NETDEVICE_SHOW(tx_queue_len, fmt_ulong);
+
+static int change_tx_queue_len(struct net_device *net, unsigned long new_len)
+{
+ net->tx_queue_len = new_len;
+ return 0;
+}
+
+static ssize_t store_tx_queue_len(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ return netdev_store(dev, attr, buf, len, change_tx_queue_len);
+}
+
+static ssize_t store_ifalias(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct net_device *netdev = to_net_dev(dev);
+ size_t count = len;
+ ssize_t ret;
+
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ /* ignore trailing newline */
+ if (len > 0 && buf[len - 1] == '\n')
+ --count;
+
+ rtnl_lock();
+ ret = dev_set_alias(netdev, buf, count);
+ rtnl_unlock();
+
+ return ret < 0 ? ret : len;
+}
+
+static ssize_t show_ifalias(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const struct net_device *netdev = to_net_dev(dev);
+ ssize_t ret = 0;
+
+ rtnl_lock();
+ if (netdev->ifalias)
+ ret = sprintf(buf, "%s\n", netdev->ifalias);
+ rtnl_unlock();
+ return ret;
+}
+
+static struct device_attribute net_class_attributes[] = {
+ __ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
+ __ATTR(dev_id, S_IRUGO, show_dev_id, NULL),
+ __ATTR(ifalias, S_IRUGO | S_IWUSR, show_ifalias, store_ifalias),
+ __ATTR(iflink, S_IRUGO, show_iflink, NULL),
+ __ATTR(ifindex, S_IRUGO, show_ifindex, NULL),
+ __ATTR(features, S_IRUGO, show_features, NULL),
+ __ATTR(type, S_IRUGO, show_type, NULL),
+ __ATTR(link_mode, S_IRUGO, show_link_mode, NULL),
+ __ATTR(address, S_IRUGO, show_address, NULL),
+ __ATTR(broadcast, S_IRUGO, show_broadcast, NULL),
+ __ATTR(carrier, S_IRUGO, show_carrier, NULL),
+ __ATTR(dormant, S_IRUGO, show_dormant, NULL),
+ __ATTR(operstate, S_IRUGO, show_operstate, NULL),
+ __ATTR(mtu, S_IRUGO | S_IWUSR, show_mtu, store_mtu),
+ __ATTR(flags, S_IRUGO | S_IWUSR, show_flags, store_flags),
+ __ATTR(tx_queue_len, S_IRUGO | S_IWUSR, show_tx_queue_len,
+ store_tx_queue_len),
+ {}
+};
+
+/* Show a given an attribute in the statistics group */
+static ssize_t netstat_show(const struct device *d,
+ struct device_attribute *attr, char *buf,
+ unsigned long offset)
+{
+ struct net_device *dev = to_net_dev(d);
+ ssize_t ret = -EINVAL;
+
+ WARN_ON(offset > sizeof(struct net_device_stats) ||
+ offset % sizeof(unsigned long) != 0);
+
+ read_lock(&dev_base_lock);
+ if (dev_isalive(dev)) {
+ const struct net_device_stats *stats = dev_get_stats(dev);
+ ret = sprintf(buf, fmt_ulong,
+ *(unsigned long *)(((u8 *) stats) + offset));
+ }
+ read_unlock(&dev_base_lock);
+ return ret;
+}
+
+/* generate a read-only statistics attribute */
+#define NETSTAT_ENTRY(name) \
+static ssize_t show_##name(struct device *d, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ return netstat_show(d, attr, buf, \
+ offsetof(struct net_device_stats, name)); \
+} \
+static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
+
+NETSTAT_ENTRY(rx_packets);
+NETSTAT_ENTRY(tx_packets);
+NETSTAT_ENTRY(rx_bytes);
+NETSTAT_ENTRY(tx_bytes);
+NETSTAT_ENTRY(rx_errors);
+NETSTAT_ENTRY(tx_errors);
+NETSTAT_ENTRY(rx_dropped);
+NETSTAT_ENTRY(tx_dropped);
+NETSTAT_ENTRY(multicast);
+NETSTAT_ENTRY(collisions);
+NETSTAT_ENTRY(rx_length_errors);
+NETSTAT_ENTRY(rx_over_errors);
+NETSTAT_ENTRY(rx_crc_errors);
+NETSTAT_ENTRY(rx_frame_errors);
+NETSTAT_ENTRY(rx_fifo_errors);
+NETSTAT_ENTRY(rx_missed_errors);
+NETSTAT_ENTRY(tx_aborted_errors);
+NETSTAT_ENTRY(tx_carrier_errors);
+NETSTAT_ENTRY(tx_fifo_errors);
+NETSTAT_ENTRY(tx_heartbeat_errors);
+NETSTAT_ENTRY(tx_window_errors);
+NETSTAT_ENTRY(rx_compressed);
+NETSTAT_ENTRY(tx_compressed);
+
+static struct attribute *netstat_attrs[] = {
+ &dev_attr_rx_packets.attr,
+ &dev_attr_tx_packets.attr,
+ &dev_attr_rx_bytes.attr,
+ &dev_attr_tx_bytes.attr,
+ &dev_attr_rx_errors.attr,
+ &dev_attr_tx_errors.attr,
+ &dev_attr_rx_dropped.attr,
+ &dev_attr_tx_dropped.attr,
+ &dev_attr_multicast.attr,
+ &dev_attr_collisions.attr,
+ &dev_attr_rx_length_errors.attr,
+ &dev_attr_rx_over_errors.attr,
+ &dev_attr_rx_crc_errors.attr,
+ &dev_attr_rx_frame_errors.attr,
+ &dev_attr_rx_fifo_errors.attr,
+ &dev_attr_rx_missed_errors.attr,
+ &dev_attr_tx_aborted_errors.attr,
+ &dev_attr_tx_carrier_errors.attr,
+ &dev_attr_tx_fifo_errors.attr,
+ &dev_attr_tx_heartbeat_errors.attr,
+ &dev_attr_tx_window_errors.attr,
+ &dev_attr_rx_compressed.attr,
+ &dev_attr_tx_compressed.attr,
+ NULL
+};
+
+
+static struct attribute_group netstat_group = {
+ .name = "statistics",
+ .attrs = netstat_attrs,
+};
+
+#ifdef CONFIG_WIRELESS_EXT_SYSFS
+/* helper function that does all the locking etc for wireless stats */
+static ssize_t wireless_show(struct device *d, char *buf,
+ ssize_t (*format)(const struct iw_statistics *,
+ char *))
+{
+ struct net_device *dev = to_net_dev(d);
+ const struct iw_statistics *iw = NULL;
+ ssize_t ret = -EINVAL;
+
+ read_lock(&dev_base_lock);
+ if (dev_isalive(dev)) {
+ if (dev->wireless_handlers &&
+ dev->wireless_handlers->get_wireless_stats)
+ iw = dev->wireless_handlers->get_wireless_stats(dev);
+ if (iw != NULL)
+ ret = (*format)(iw, buf);
+ }
+ read_unlock(&dev_base_lock);
+
+ return ret;
+}
+
+/* show function template for wireless fields */
+#define WIRELESS_SHOW(name, field, format_string) \
+static ssize_t format_iw_##name(const struct iw_statistics *iw, char *buf) \
+{ \
+ return sprintf(buf, format_string, iw->field); \
+} \
+static ssize_t show_iw_##name(struct device *d, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ return wireless_show(d, buf, format_iw_##name); \
+} \
+static DEVICE_ATTR(name, S_IRUGO, show_iw_##name, NULL)
+
+WIRELESS_SHOW(status, status, fmt_hex);
+WIRELESS_SHOW(link, qual.qual, fmt_dec);
+WIRELESS_SHOW(level, qual.level, fmt_dec);
+WIRELESS_SHOW(noise, qual.noise, fmt_dec);
+WIRELESS_SHOW(nwid, discard.nwid, fmt_dec);
+WIRELESS_SHOW(crypt, discard.code, fmt_dec);
+WIRELESS_SHOW(fragment, discard.fragment, fmt_dec);
+WIRELESS_SHOW(misc, discard.misc, fmt_dec);
+WIRELESS_SHOW(retries, discard.retries, fmt_dec);
+WIRELESS_SHOW(beacon, miss.beacon, fmt_dec);
+
+static struct attribute *wireless_attrs[] = {
+ &dev_attr_status.attr,
+ &dev_attr_link.attr,
+ &dev_attr_level.attr,
+ &dev_attr_noise.attr,
+ &dev_attr_nwid.attr,
+ &dev_attr_crypt.attr,
+ &dev_attr_fragment.attr,
+ &dev_attr_retries.attr,
+ &dev_attr_misc.attr,
+ &dev_attr_beacon.attr,
+ NULL
+};
+
+static struct attribute_group wireless_group = {
+ .name = "wireless",
+ .attrs = wireless_attrs,
+};
+#endif
+
+#endif /* CONFIG_SYSFS */
+
+#ifdef CONFIG_HOTPLUG
+static int netdev_uevent(struct device *d, struct kobj_uevent_env *env)
+{
+ struct net_device *dev = to_net_dev(d);
+ int retval;
+
+ if (!net_eq(dev_net(dev), &init_net))
+ return 0;
+
+ /* pass interface to uevent. */
+ retval = add_uevent_var(env, "INTERFACE=%s", dev->name);
+ if (retval)
+ goto exit;
+
+ /* pass ifindex to uevent.
+ * ifindex is useful as it won't change (interface name may change)
+ * and is what RtNetlink uses natively. */
+ retval = add_uevent_var(env, "IFINDEX=%d", dev->ifindex);
+
+exit:
+ return retval;
+}
+#endif
+
+/*
+ * netdev_release -- destroy and free a dead device.
+ * Called when last reference to device kobject is gone.
+ */
+static void netdev_release(struct device *d)
+{
+ struct net_device *dev = to_net_dev(d);
+
+ BUG_ON(dev->reg_state != NETREG_RELEASED);
+
+ kfree(dev->ifalias);
+ kfree((char *)dev - dev->padded);
+}
+
+static struct class net_class = {
+ .name = "net",
+ .dev_release = netdev_release,
+#ifdef CONFIG_SYSFS
+ .dev_attrs = net_class_attributes,
+#endif /* CONFIG_SYSFS */
+#ifdef CONFIG_HOTPLUG
+ .dev_uevent = netdev_uevent,
+#endif
+};
+
+/* Delete sysfs entries but hold kobject reference until after all
+ * netdev references are gone.
+ */
+void netdev_unregister_kobject(struct net_device * net)
+{
+ struct device *dev = &(net->dev);
+
+ kobject_get(&dev->kobj);
+
+ if (dev_net(net) != &init_net)
+ return;
+
+ device_del(dev);
+}
+
+/* Create sysfs entries for network device. */
+int netdev_register_kobject(struct net_device *net)
+{
+ struct device *dev = &(net->dev);
+ struct attribute_group **groups = net->sysfs_groups;
+
+ dev->class = &net_class;
+ dev->platform_data = net;
+ dev->groups = groups;
+
+ BUILD_BUG_ON(BUS_ID_SIZE < IFNAMSIZ);
+ dev_set_name(dev, net->name);
+
+#ifdef CONFIG_SYSFS
+ *groups++ = &netstat_group;
+
+#ifdef CONFIG_WIRELESS_EXT_SYSFS
+ if (net->wireless_handlers && net->wireless_handlers->get_wireless_stats)
+ *groups++ = &wireless_group;
+#endif
+#endif /* CONFIG_SYSFS */
+
+ if (dev_net(net) != &init_net)
+ return 0;
+
+ return device_add(dev);
+}
+
+int netdev_class_create_file(struct class_attribute *class_attr)
+{
+ return class_create_file(&net_class, class_attr);
+}
+
+void netdev_class_remove_file(struct class_attribute *class_attr)
+{
+ class_remove_file(&net_class, class_attr);
+}
+
+EXPORT_SYMBOL(netdev_class_create_file);
+EXPORT_SYMBOL(netdev_class_remove_file);
+
+void netdev_initialize_kobject(struct net_device *net)
+{
+ struct device *device = &(net->dev);
+ device_initialize(device);
+}
+
+int netdev_kobject_init(void)
+{
+ return class_register(&net_class);
+}
diff --git a/libdde_linux26/contrib/net/core/net-sysfs.h b/libdde_linux26/contrib/net/core/net-sysfs.h
new file mode 100644
index 00000000..14e75242
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/net-sysfs.h
@@ -0,0 +1,8 @@
+#ifndef __NET_SYSFS_H__
+#define __NET_SYSFS_H__
+
+int netdev_kobject_init(void);
+int netdev_register_kobject(struct net_device *);
+void netdev_unregister_kobject(struct net_device *);
+void netdev_initialize_kobject(struct net_device *);
+#endif
diff --git a/libdde_linux26/contrib/net/core/netevent.c b/libdde_linux26/contrib/net/core/netevent.c
new file mode 100644
index 00000000..95f81de8
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/netevent.c
@@ -0,0 +1,70 @@
+/*
+ * Network event notifiers
+ *
+ * Authors:
+ * Tom Tucker <tom@opengridcomputing.com>
+ * Steve Wise <swise@opengridcomputing.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Fixes:
+ */
+
+#include <linux/rtnetlink.h>
+#include <linux/notifier.h>
+#include <net/netevent.h>
+
+static ATOMIC_NOTIFIER_HEAD(netevent_notif_chain);
+
+/**
+ * register_netevent_notifier - register a netevent notifier block
+ * @nb: notifier
+ *
+ * Register a notifier to be called when a netevent occurs.
+ * The notifier passed is linked into the kernel structures and must
+ * not be reused until it has been unregistered. A negative errno code
+ * is returned on a failure.
+ */
+int register_netevent_notifier(struct notifier_block *nb)
+{
+ int err;
+
+ err = atomic_notifier_chain_register(&netevent_notif_chain, nb);
+ return err;
+}
+
+/**
+ * netevent_unregister_notifier - unregister a netevent notifier block
+ * @nb: notifier
+ *
+ * Unregister a notifier previously registered by
+ * register_neigh_notifier(). The notifier is unlinked into the
+ * kernel structures and may then be reused. A negative errno code
+ * is returned on a failure.
+ */
+
+int unregister_netevent_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&netevent_notif_chain, nb);
+}
+
+/**
+ * call_netevent_notifiers - call all netevent notifier blocks
+ * @val: value passed unmodified to notifier function
+ * @v: pointer passed unmodified to notifier function
+ *
+ * Call all neighbour notifier blocks. Parameters and return value
+ * are as for notifier_call_chain().
+ */
+
+int call_netevent_notifiers(unsigned long val, void *v)
+{
+ return atomic_notifier_call_chain(&netevent_notif_chain, val, v);
+}
+
+EXPORT_SYMBOL_GPL(register_netevent_notifier);
+EXPORT_SYMBOL_GPL(unregister_netevent_notifier);
+EXPORT_SYMBOL_GPL(call_netevent_notifiers);
diff --git a/libdde_linux26/contrib/net/core/rtnetlink.c b/libdde_linux26/contrib/net/core/rtnetlink.c
new file mode 100644
index 00000000..790dd205
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/rtnetlink.c
@@ -0,0 +1,1430 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Routing netlink socket interface: protocol independent part.
+ *
+ * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Fixes:
+ * Vitaly E. Lavrov RTA_OK arithmetics was wrong.
+ */
+
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/sockios.h>
+#include <linux/net.h>
+#include <linux/fcntl.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/capability.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/security.h>
+#include <linux/mutex.h>
+#include <linux/if_addr.h>
+#include <linux/nsproxy.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/string.h>
+
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <net/ip.h>
+#include <net/protocol.h>
+#include <net/arp.h>
+#include <net/route.h>
+#include <net/udp.h>
+#include <net/sock.h>
+#include <net/pkt_sched.h>
+#include <net/fib_rules.h>
+#include <net/rtnetlink.h>
+
+struct rtnl_link
+{
+ rtnl_doit_func doit;
+ rtnl_dumpit_func dumpit;
+};
+
+static DEFINE_MUTEX(rtnl_mutex);
+
+void rtnl_lock(void)
+{
+ mutex_lock(&rtnl_mutex);
+}
+
+void __rtnl_unlock(void)
+{
+ mutex_unlock(&rtnl_mutex);
+}
+
+void rtnl_unlock(void)
+{
+ /* This fellow will unlock it for us. */
+ netdev_run_todo();
+}
+
+int rtnl_trylock(void)
+{
+ return mutex_trylock(&rtnl_mutex);
+}
+
+int rtnl_is_locked(void)
+{
+ return mutex_is_locked(&rtnl_mutex);
+}
+
+static struct rtnl_link *rtnl_msg_handlers[NPROTO];
+
+static inline int rtm_msgindex(int msgtype)
+{
+ int msgindex = msgtype - RTM_BASE;
+
+ /*
+ * msgindex < 0 implies someone tried to register a netlink
+ * control code. msgindex >= RTM_NR_MSGTYPES may indicate that
+ * the message type has not been added to linux/rtnetlink.h
+ */
+ BUG_ON(msgindex < 0 || msgindex >= RTM_NR_MSGTYPES);
+
+ return msgindex;
+}
+
+static rtnl_doit_func rtnl_get_doit(int protocol, int msgindex)
+{
+ struct rtnl_link *tab;
+
+ tab = rtnl_msg_handlers[protocol];
+ if (tab == NULL || tab[msgindex].doit == NULL)
+ tab = rtnl_msg_handlers[PF_UNSPEC];
+
+ return tab ? tab[msgindex].doit : NULL;
+}
+
+static rtnl_dumpit_func rtnl_get_dumpit(int protocol, int msgindex)
+{
+ struct rtnl_link *tab;
+
+ tab = rtnl_msg_handlers[protocol];
+ if (tab == NULL || tab[msgindex].dumpit == NULL)
+ tab = rtnl_msg_handlers[PF_UNSPEC];
+
+ return tab ? tab[msgindex].dumpit : NULL;
+}
+
+/**
+ * __rtnl_register - Register a rtnetlink message type
+ * @protocol: Protocol family or PF_UNSPEC
+ * @msgtype: rtnetlink message type
+ * @doit: Function pointer called for each request message
+ * @dumpit: Function pointer called for each dump request (NLM_F_DUMP) message
+ *
+ * Registers the specified function pointers (at least one of them has
+ * to be non-NULL) to be called whenever a request message for the
+ * specified protocol family and message type is received.
+ *
+ * The special protocol family PF_UNSPEC may be used to define fallback
+ * function pointers for the case when no entry for the specific protocol
+ * family exists.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int __rtnl_register(int protocol, int msgtype,
+ rtnl_doit_func doit, rtnl_dumpit_func dumpit)
+{
+ struct rtnl_link *tab;
+ int msgindex;
+
+ BUG_ON(protocol < 0 || protocol >= NPROTO);
+ msgindex = rtm_msgindex(msgtype);
+
+ tab = rtnl_msg_handlers[protocol];
+ if (tab == NULL) {
+ tab = kcalloc(RTM_NR_MSGTYPES, sizeof(*tab), GFP_KERNEL);
+ if (tab == NULL)
+ return -ENOBUFS;
+
+ rtnl_msg_handlers[protocol] = tab;
+ }
+
+ if (doit)
+ tab[msgindex].doit = doit;
+
+ if (dumpit)
+ tab[msgindex].dumpit = dumpit;
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(__rtnl_register);
+
+/**
+ * rtnl_register - Register a rtnetlink message type
+ *
+ * Identical to __rtnl_register() but panics on failure. This is useful
+ * as failure of this function is very unlikely, it can only happen due
+ * to lack of memory when allocating the chain to store all message
+ * handlers for a protocol. Meant for use in init functions where lack
+ * of memory implies no sense in continueing.
+ */
+void rtnl_register(int protocol, int msgtype,
+ rtnl_doit_func doit, rtnl_dumpit_func dumpit)
+{
+ if (__rtnl_register(protocol, msgtype, doit, dumpit) < 0)
+ panic("Unable to register rtnetlink message handler, "
+ "protocol = %d, message type = %d\n",
+ protocol, msgtype);
+}
+
+EXPORT_SYMBOL_GPL(rtnl_register);
+
+/**
+ * rtnl_unregister - Unregister a rtnetlink message type
+ * @protocol: Protocol family or PF_UNSPEC
+ * @msgtype: rtnetlink message type
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int rtnl_unregister(int protocol, int msgtype)
+{
+ int msgindex;
+
+ BUG_ON(protocol < 0 || protocol >= NPROTO);
+ msgindex = rtm_msgindex(msgtype);
+
+ if (rtnl_msg_handlers[protocol] == NULL)
+ return -ENOENT;
+
+ rtnl_msg_handlers[protocol][msgindex].doit = NULL;
+ rtnl_msg_handlers[protocol][msgindex].dumpit = NULL;
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(rtnl_unregister);
+
+/**
+ * rtnl_unregister_all - Unregister all rtnetlink message type of a protocol
+ * @protocol : Protocol family or PF_UNSPEC
+ *
+ * Identical to calling rtnl_unregster() for all registered message types
+ * of a certain protocol family.
+ */
+void rtnl_unregister_all(int protocol)
+{
+ BUG_ON(protocol < 0 || protocol >= NPROTO);
+
+ kfree(rtnl_msg_handlers[protocol]);
+ rtnl_msg_handlers[protocol] = NULL;
+}
+
+EXPORT_SYMBOL_GPL(rtnl_unregister_all);
+
+static LIST_HEAD(link_ops);
+
+/**
+ * __rtnl_link_register - Register rtnl_link_ops with rtnetlink.
+ * @ops: struct rtnl_link_ops * to register
+ *
+ * The caller must hold the rtnl_mutex. This function should be used
+ * by drivers that create devices during module initialization. It
+ * must be called before registering the devices.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int __rtnl_link_register(struct rtnl_link_ops *ops)
+{
+ if (!ops->dellink)
+ ops->dellink = unregister_netdevice;
+
+ list_add_tail(&ops->list, &link_ops);
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(__rtnl_link_register);
+
+/**
+ * rtnl_link_register - Register rtnl_link_ops with rtnetlink.
+ * @ops: struct rtnl_link_ops * to register
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int rtnl_link_register(struct rtnl_link_ops *ops)
+{
+ int err;
+
+ rtnl_lock();
+ err = __rtnl_link_register(ops);
+ rtnl_unlock();
+ return err;
+}
+
+EXPORT_SYMBOL_GPL(rtnl_link_register);
+
+static void __rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
+{
+ struct net_device *dev;
+restart:
+ for_each_netdev(net, dev) {
+ if (dev->rtnl_link_ops == ops) {
+ ops->dellink(dev);
+ goto restart;
+ }
+ }
+}
+
+void rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops)
+{
+ rtnl_lock();
+ __rtnl_kill_links(net, ops);
+ rtnl_unlock();
+}
+EXPORT_SYMBOL_GPL(rtnl_kill_links);
+
+/**
+ * __rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
+ * @ops: struct rtnl_link_ops * to unregister
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+void __rtnl_link_unregister(struct rtnl_link_ops *ops)
+{
+ struct net *net;
+
+ for_each_net(net) {
+ __rtnl_kill_links(net, ops);
+ }
+ list_del(&ops->list);
+}
+
+EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
+
+/**
+ * rtnl_link_unregister - Unregister rtnl_link_ops from rtnetlink.
+ * @ops: struct rtnl_link_ops * to unregister
+ */
+void rtnl_link_unregister(struct rtnl_link_ops *ops)
+{
+ rtnl_lock();
+ __rtnl_link_unregister(ops);
+ rtnl_unlock();
+}
+
+EXPORT_SYMBOL_GPL(rtnl_link_unregister);
+
+static const struct rtnl_link_ops *rtnl_link_ops_get(const char *kind)
+{
+ const struct rtnl_link_ops *ops;
+
+ list_for_each_entry(ops, &link_ops, list) {
+ if (!strcmp(ops->kind, kind))
+ return ops;
+ }
+ return NULL;
+}
+
+static size_t rtnl_link_get_size(const struct net_device *dev)
+{
+ const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
+ size_t size;
+
+ if (!ops)
+ return 0;
+
+ size = nlmsg_total_size(sizeof(struct nlattr)) + /* IFLA_LINKINFO */
+ nlmsg_total_size(strlen(ops->kind) + 1); /* IFLA_INFO_KIND */
+
+ if (ops->get_size)
+ /* IFLA_INFO_DATA + nested data */
+ size += nlmsg_total_size(sizeof(struct nlattr)) +
+ ops->get_size(dev);
+
+ if (ops->get_xstats_size)
+ size += ops->get_xstats_size(dev); /* IFLA_INFO_XSTATS */
+
+ return size;
+}
+
+static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)
+{
+ const struct rtnl_link_ops *ops = dev->rtnl_link_ops;
+ struct nlattr *linkinfo, *data;
+ int err = -EMSGSIZE;
+
+ linkinfo = nla_nest_start(skb, IFLA_LINKINFO);
+ if (linkinfo == NULL)
+ goto out;
+
+ if (nla_put_string(skb, IFLA_INFO_KIND, ops->kind) < 0)
+ goto err_cancel_link;
+ if (ops->fill_xstats) {
+ err = ops->fill_xstats(skb, dev);
+ if (err < 0)
+ goto err_cancel_link;
+ }
+ if (ops->fill_info) {
+ data = nla_nest_start(skb, IFLA_INFO_DATA);
+ if (data == NULL)
+ goto err_cancel_link;
+ err = ops->fill_info(skb, dev);
+ if (err < 0)
+ goto err_cancel_data;
+ nla_nest_end(skb, data);
+ }
+
+ nla_nest_end(skb, linkinfo);
+ return 0;
+
+err_cancel_data:
+ nla_nest_cancel(skb, data);
+err_cancel_link:
+ nla_nest_cancel(skb, linkinfo);
+out:
+ return err;
+}
+
+static const int rtm_min[RTM_NR_FAMILIES] =
+{
+ [RTM_FAM(RTM_NEWLINK)] = NLMSG_LENGTH(sizeof(struct ifinfomsg)),
+ [RTM_FAM(RTM_NEWADDR)] = NLMSG_LENGTH(sizeof(struct ifaddrmsg)),
+ [RTM_FAM(RTM_NEWROUTE)] = NLMSG_LENGTH(sizeof(struct rtmsg)),
+ [RTM_FAM(RTM_NEWRULE)] = NLMSG_LENGTH(sizeof(struct fib_rule_hdr)),
+ [RTM_FAM(RTM_NEWQDISC)] = NLMSG_LENGTH(sizeof(struct tcmsg)),
+ [RTM_FAM(RTM_NEWTCLASS)] = NLMSG_LENGTH(sizeof(struct tcmsg)),
+ [RTM_FAM(RTM_NEWTFILTER)] = NLMSG_LENGTH(sizeof(struct tcmsg)),
+ [RTM_FAM(RTM_NEWACTION)] = NLMSG_LENGTH(sizeof(struct tcamsg)),
+ [RTM_FAM(RTM_GETMULTICAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
+ [RTM_FAM(RTM_GETANYCAST)] = NLMSG_LENGTH(sizeof(struct rtgenmsg)),
+};
+
+static const int rta_max[RTM_NR_FAMILIES] =
+{
+ [RTM_FAM(RTM_NEWLINK)] = IFLA_MAX,
+ [RTM_FAM(RTM_NEWADDR)] = IFA_MAX,
+ [RTM_FAM(RTM_NEWROUTE)] = RTA_MAX,
+ [RTM_FAM(RTM_NEWRULE)] = FRA_MAX,
+ [RTM_FAM(RTM_NEWQDISC)] = TCA_MAX,
+ [RTM_FAM(RTM_NEWTCLASS)] = TCA_MAX,
+ [RTM_FAM(RTM_NEWTFILTER)] = TCA_MAX,
+ [RTM_FAM(RTM_NEWACTION)] = TCAA_MAX,
+};
+
+void __rta_fill(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
+{
+ struct rtattr *rta;
+ int size = RTA_LENGTH(attrlen);
+
+ rta = (struct rtattr*)skb_put(skb, RTA_ALIGN(size));
+ rta->rta_type = attrtype;
+ rta->rta_len = size;
+ memcpy(RTA_DATA(rta), data, attrlen);
+ memset(RTA_DATA(rta) + attrlen, 0, RTA_ALIGN(size) - size);
+}
+
+int rtnetlink_send(struct sk_buff *skb, struct net *net, u32 pid, unsigned group, int echo)
+{
+ struct sock *rtnl = net->rtnl;
+ int err = 0;
+
+ NETLINK_CB(skb).dst_group = group;
+ if (echo)
+ atomic_inc(&skb->users);
+ netlink_broadcast(rtnl, skb, pid, group, GFP_KERNEL);
+ if (echo)
+ err = netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
+ return err;
+}
+
+int rtnl_unicast(struct sk_buff *skb, struct net *net, u32 pid)
+{
+ struct sock *rtnl = net->rtnl;
+
+ return nlmsg_unicast(rtnl, skb, pid);
+}
+
+int rtnl_notify(struct sk_buff *skb, struct net *net, u32 pid, u32 group,
+ struct nlmsghdr *nlh, gfp_t flags)
+{
+ struct sock *rtnl = net->rtnl;
+ int report = 0;
+
+ if (nlh)
+ report = nlmsg_report(nlh);
+
+ return nlmsg_notify(rtnl, skb, pid, group, report, flags);
+}
+
+void rtnl_set_sk_err(struct net *net, u32 group, int error)
+{
+ struct sock *rtnl = net->rtnl;
+
+ netlink_set_err(rtnl, 0, group, error);
+}
+
+int rtnetlink_put_metrics(struct sk_buff *skb, u32 *metrics)
+{
+ struct nlattr *mx;
+ int i, valid = 0;
+
+ mx = nla_nest_start(skb, RTA_METRICS);
+ if (mx == NULL)
+ return -ENOBUFS;
+
+ for (i = 0; i < RTAX_MAX; i++) {
+ if (metrics[i]) {
+ valid++;
+ NLA_PUT_U32(skb, i+1, metrics[i]);
+ }
+ }
+
+ if (!valid) {
+ nla_nest_cancel(skb, mx);
+ return 0;
+ }
+
+ return nla_nest_end(skb, mx);
+
+nla_put_failure:
+ nla_nest_cancel(skb, mx);
+ return -EMSGSIZE;
+}
+
+int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, u32 id,
+ u32 ts, u32 tsage, long expires, u32 error)
+{
+ struct rta_cacheinfo ci = {
+ .rta_lastuse = jiffies_to_clock_t(jiffies - dst->lastuse),
+ .rta_used = dst->__use,
+ .rta_clntref = atomic_read(&(dst->__refcnt)),
+ .rta_error = error,
+ .rta_id = id,
+ .rta_ts = ts,
+ .rta_tsage = tsage,
+ };
+
+ if (expires)
+ ci.rta_expires = jiffies_to_clock_t(expires);
+
+ return nla_put(skb, RTA_CACHEINFO, sizeof(ci), &ci);
+}
+
+EXPORT_SYMBOL_GPL(rtnl_put_cacheinfo);
+
+static void set_operstate(struct net_device *dev, unsigned char transition)
+{
+ unsigned char operstate = dev->operstate;
+
+ switch(transition) {
+ case IF_OPER_UP:
+ if ((operstate == IF_OPER_DORMANT ||
+ operstate == IF_OPER_UNKNOWN) &&
+ !netif_dormant(dev))
+ operstate = IF_OPER_UP;
+ break;
+
+ case IF_OPER_DORMANT:
+ if (operstate == IF_OPER_UP ||
+ operstate == IF_OPER_UNKNOWN)
+ operstate = IF_OPER_DORMANT;
+ break;
+ }
+
+ if (dev->operstate != operstate) {
+ write_lock_bh(&dev_base_lock);
+ dev->operstate = operstate;
+ write_unlock_bh(&dev_base_lock);
+ netdev_state_change(dev);
+ }
+}
+
+static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
+ const struct net_device_stats *b)
+{
+ a->rx_packets = b->rx_packets;
+ a->tx_packets = b->tx_packets;
+ a->rx_bytes = b->rx_bytes;
+ a->tx_bytes = b->tx_bytes;
+ a->rx_errors = b->rx_errors;
+ a->tx_errors = b->tx_errors;
+ a->rx_dropped = b->rx_dropped;
+ a->tx_dropped = b->tx_dropped;
+
+ a->multicast = b->multicast;
+ a->collisions = b->collisions;
+
+ a->rx_length_errors = b->rx_length_errors;
+ a->rx_over_errors = b->rx_over_errors;
+ a->rx_crc_errors = b->rx_crc_errors;
+ a->rx_frame_errors = b->rx_frame_errors;
+ a->rx_fifo_errors = b->rx_fifo_errors;
+ a->rx_missed_errors = b->rx_missed_errors;
+
+ a->tx_aborted_errors = b->tx_aborted_errors;
+ a->tx_carrier_errors = b->tx_carrier_errors;
+ a->tx_fifo_errors = b->tx_fifo_errors;
+ a->tx_heartbeat_errors = b->tx_heartbeat_errors;
+ a->tx_window_errors = b->tx_window_errors;
+
+ a->rx_compressed = b->rx_compressed;
+ a->tx_compressed = b->tx_compressed;
+};
+
+static inline size_t if_nlmsg_size(const struct net_device *dev)
+{
+ return NLMSG_ALIGN(sizeof(struct ifinfomsg))
+ + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
+ + nla_total_size(IFALIASZ) /* IFLA_IFALIAS */
+ + nla_total_size(IFNAMSIZ) /* IFLA_QDISC */
+ + nla_total_size(sizeof(struct rtnl_link_ifmap))
+ + nla_total_size(sizeof(struct rtnl_link_stats))
+ + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
+ + nla_total_size(MAX_ADDR_LEN) /* IFLA_BROADCAST */
+ + nla_total_size(4) /* IFLA_TXQLEN */
+ + nla_total_size(4) /* IFLA_WEIGHT */
+ + nla_total_size(4) /* IFLA_MTU */
+ + nla_total_size(4) /* IFLA_LINK */
+ + nla_total_size(4) /* IFLA_MASTER */
+ + nla_total_size(1) /* IFLA_OPERSTATE */
+ + nla_total_size(1) /* IFLA_LINKMODE */
+ + rtnl_link_get_size(dev); /* IFLA_LINKINFO */
+}
+
+static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
+ int type, u32 pid, u32 seq, u32 change,
+ unsigned int flags)
+{
+ struct netdev_queue *txq;
+ struct ifinfomsg *ifm;
+ struct nlmsghdr *nlh;
+ const struct net_device_stats *stats;
+ struct nlattr *attr;
+
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ifm), flags);
+ if (nlh == NULL)
+ return -EMSGSIZE;
+
+ ifm = nlmsg_data(nlh);
+ ifm->ifi_family = AF_UNSPEC;
+ ifm->__ifi_pad = 0;
+ ifm->ifi_type = dev->type;
+ ifm->ifi_index = dev->ifindex;
+ ifm->ifi_flags = dev_get_flags(dev);
+ ifm->ifi_change = change;
+
+ NLA_PUT_STRING(skb, IFLA_IFNAME, dev->name);
+ NLA_PUT_U32(skb, IFLA_TXQLEN, dev->tx_queue_len);
+ NLA_PUT_U8(skb, IFLA_OPERSTATE,
+ netif_running(dev) ? dev->operstate : IF_OPER_DOWN);
+ NLA_PUT_U8(skb, IFLA_LINKMODE, dev->link_mode);
+ NLA_PUT_U32(skb, IFLA_MTU, dev->mtu);
+
+ if (dev->ifindex != dev->iflink)
+ NLA_PUT_U32(skb, IFLA_LINK, dev->iflink);
+
+ if (dev->master)
+ NLA_PUT_U32(skb, IFLA_MASTER, dev->master->ifindex);
+
+ txq = netdev_get_tx_queue(dev, 0);
+ if (txq->qdisc_sleeping)
+ NLA_PUT_STRING(skb, IFLA_QDISC, txq->qdisc_sleeping->ops->id);
+
+ if (dev->ifalias)
+ NLA_PUT_STRING(skb, IFLA_IFALIAS, dev->ifalias);
+
+ if (1) {
+ struct rtnl_link_ifmap map = {
+ .mem_start = dev->mem_start,
+ .mem_end = dev->mem_end,
+ .base_addr = dev->base_addr,
+ .irq = dev->irq,
+ .dma = dev->dma,
+ .port = dev->if_port,
+ };
+ NLA_PUT(skb, IFLA_MAP, sizeof(map), &map);
+ }
+
+ if (dev->addr_len) {
+ NLA_PUT(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr);
+ NLA_PUT(skb, IFLA_BROADCAST, dev->addr_len, dev->broadcast);
+ }
+
+ attr = nla_reserve(skb, IFLA_STATS,
+ sizeof(struct rtnl_link_stats));
+ if (attr == NULL)
+ goto nla_put_failure;
+
+ stats = dev_get_stats(dev);
+ copy_rtnl_link_stats(nla_data(attr), stats);
+
+ if (dev->rtnl_link_ops) {
+ if (rtnl_link_fill(skb, dev) < 0)
+ goto nla_put_failure;
+ }
+
+ return nlmsg_end(skb, nlh);
+
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ int idx;
+ int s_idx = cb->args[0];
+ struct net_device *dev;
+
+ idx = 0;
+ for_each_netdev(net, dev) {
+ if (idx < s_idx)
+ goto cont;
+ if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
+ NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, 0, NLM_F_MULTI) <= 0)
+ break;
+cont:
+ idx++;
+ }
+ cb->args[0] = idx;
+
+ return skb->len;
+}
+
+const struct nla_policy ifla_policy[IFLA_MAX+1] = {
+ [IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
+ [IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
+ [IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
+ [IFLA_MAP] = { .len = sizeof(struct rtnl_link_ifmap) },
+ [IFLA_MTU] = { .type = NLA_U32 },
+ [IFLA_LINK] = { .type = NLA_U32 },
+ [IFLA_TXQLEN] = { .type = NLA_U32 },
+ [IFLA_WEIGHT] = { .type = NLA_U32 },
+ [IFLA_OPERSTATE] = { .type = NLA_U8 },
+ [IFLA_LINKMODE] = { .type = NLA_U8 },
+ [IFLA_LINKINFO] = { .type = NLA_NESTED },
+ [IFLA_NET_NS_PID] = { .type = NLA_U32 },
+ [IFLA_IFALIAS] = { .type = NLA_STRING, .len = IFALIASZ-1 },
+};
+
+static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
+ [IFLA_INFO_KIND] = { .type = NLA_STRING },
+ [IFLA_INFO_DATA] = { .type = NLA_NESTED },
+};
+
+static struct net *get_net_ns_by_pid(pid_t pid)
+{
+ struct task_struct *tsk;
+ struct net *net;
+
+ /* Lookup the network namespace */
+ net = ERR_PTR(-ESRCH);
+ rcu_read_lock();
+ tsk = find_task_by_vpid(pid);
+ if (tsk) {
+ struct nsproxy *nsproxy;
+ nsproxy = task_nsproxy(tsk);
+ if (nsproxy)
+ net = get_net(nsproxy->net_ns);
+ }
+ rcu_read_unlock();
+ return net;
+}
+
+static int validate_linkmsg(struct net_device *dev, struct nlattr *tb[])
+{
+ if (dev) {
+ if (tb[IFLA_ADDRESS] &&
+ nla_len(tb[IFLA_ADDRESS]) < dev->addr_len)
+ return -EINVAL;
+
+ if (tb[IFLA_BROADCAST] &&
+ nla_len(tb[IFLA_BROADCAST]) < dev->addr_len)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
+ struct nlattr **tb, char *ifname, int modified)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+ int send_addr_notify = 0;
+ int err;
+
+ if (tb[IFLA_NET_NS_PID]) {
+ struct net *net;
+ net = get_net_ns_by_pid(nla_get_u32(tb[IFLA_NET_NS_PID]));
+ if (IS_ERR(net)) {
+ err = PTR_ERR(net);
+ goto errout;
+ }
+ err = dev_change_net_namespace(dev, net, ifname);
+ put_net(net);
+ if (err)
+ goto errout;
+ modified = 1;
+ }
+
+ if (tb[IFLA_MAP]) {
+ struct rtnl_link_ifmap *u_map;
+ struct ifmap k_map;
+
+ if (!ops->ndo_set_config) {
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+
+ if (!netif_device_present(dev)) {
+ err = -ENODEV;
+ goto errout;
+ }
+
+ u_map = nla_data(tb[IFLA_MAP]);
+ k_map.mem_start = (unsigned long) u_map->mem_start;
+ k_map.mem_end = (unsigned long) u_map->mem_end;
+ k_map.base_addr = (unsigned short) u_map->base_addr;
+ k_map.irq = (unsigned char) u_map->irq;
+ k_map.dma = (unsigned char) u_map->dma;
+ k_map.port = (unsigned char) u_map->port;
+
+ err = ops->ndo_set_config(dev, &k_map);
+ if (err < 0)
+ goto errout;
+
+ modified = 1;
+ }
+
+ if (tb[IFLA_ADDRESS]) {
+ struct sockaddr *sa;
+ int len;
+
+ if (!ops->ndo_set_mac_address) {
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+
+ if (!netif_device_present(dev)) {
+ err = -ENODEV;
+ goto errout;
+ }
+
+ len = sizeof(sa_family_t) + dev->addr_len;
+ sa = kmalloc(len, GFP_KERNEL);
+ if (!sa) {
+ err = -ENOMEM;
+ goto errout;
+ }
+ sa->sa_family = dev->type;
+ memcpy(sa->sa_data, nla_data(tb[IFLA_ADDRESS]),
+ dev->addr_len);
+ err = ops->ndo_set_mac_address(dev, sa);
+ kfree(sa);
+ if (err)
+ goto errout;
+ send_addr_notify = 1;
+ modified = 1;
+ }
+
+ if (tb[IFLA_MTU]) {
+ err = dev_set_mtu(dev, nla_get_u32(tb[IFLA_MTU]));
+ if (err < 0)
+ goto errout;
+ modified = 1;
+ }
+
+ /*
+ * Interface selected by interface index but interface
+ * name provided implies that a name change has been
+ * requested.
+ */
+ if (ifm->ifi_index > 0 && ifname[0]) {
+ err = dev_change_name(dev, ifname);
+ if (err < 0)
+ goto errout;
+ modified = 1;
+ }
+
+ if (tb[IFLA_IFALIAS]) {
+ err = dev_set_alias(dev, nla_data(tb[IFLA_IFALIAS]),
+ nla_len(tb[IFLA_IFALIAS]));
+ if (err < 0)
+ goto errout;
+ modified = 1;
+ }
+
+ if (tb[IFLA_BROADCAST]) {
+ nla_memcpy(dev->broadcast, tb[IFLA_BROADCAST], dev->addr_len);
+ send_addr_notify = 1;
+ }
+
+ if (ifm->ifi_flags || ifm->ifi_change) {
+ unsigned int flags = ifm->ifi_flags;
+
+ /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
+ if (ifm->ifi_change)
+ flags = (flags & ifm->ifi_change) |
+ (dev->flags & ~ifm->ifi_change);
+ err = dev_change_flags(dev, flags);
+ if (err < 0)
+ goto errout;
+ }
+
+ if (tb[IFLA_TXQLEN])
+ dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
+
+ if (tb[IFLA_OPERSTATE])
+ set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
+
+ if (tb[IFLA_LINKMODE]) {
+ write_lock_bh(&dev_base_lock);
+ dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
+ write_unlock_bh(&dev_base_lock);
+ }
+
+ err = 0;
+
+errout:
+ if (err < 0 && modified && net_ratelimit())
+ printk(KERN_WARNING "A link change request failed with "
+ "some changes comitted already. Interface %s may "
+ "have been left with an inconsistent configuration, "
+ "please check.\n", dev->name);
+
+ if (send_addr_notify)
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ return err;
+}
+
+static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+{
+ struct net *net = sock_net(skb->sk);
+ struct ifinfomsg *ifm;
+ struct net_device *dev;
+ int err;
+ struct nlattr *tb[IFLA_MAX+1];
+ char ifname[IFNAMSIZ];
+
+ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
+ if (err < 0)
+ goto errout;
+
+ if (tb[IFLA_IFNAME])
+ nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
+ else
+ ifname[0] = '\0';
+
+ err = -EINVAL;
+ ifm = nlmsg_data(nlh);
+ if (ifm->ifi_index > 0)
+ dev = dev_get_by_index(net, ifm->ifi_index);
+ else if (tb[IFLA_IFNAME])
+ dev = dev_get_by_name(net, ifname);
+ else
+ goto errout;
+
+ if (dev == NULL) {
+ err = -ENODEV;
+ goto errout;
+ }
+
+ if ((err = validate_linkmsg(dev, tb)) < 0)
+ goto errout_dev;
+
+ err = do_setlink(dev, ifm, tb, ifname, 0);
+errout_dev:
+ dev_put(dev);
+errout:
+ return err;
+}
+
+static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+{
+ struct net *net = sock_net(skb->sk);
+ const struct rtnl_link_ops *ops;
+ struct net_device *dev;
+ struct ifinfomsg *ifm;
+ char ifname[IFNAMSIZ];
+ struct nlattr *tb[IFLA_MAX+1];
+ int err;
+
+ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
+ if (err < 0)
+ return err;
+
+ if (tb[IFLA_IFNAME])
+ nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
+
+ ifm = nlmsg_data(nlh);
+ if (ifm->ifi_index > 0)
+ dev = __dev_get_by_index(net, ifm->ifi_index);
+ else if (tb[IFLA_IFNAME])
+ dev = __dev_get_by_name(net, ifname);
+ else
+ return -EINVAL;
+
+ if (!dev)
+ return -ENODEV;
+
+ ops = dev->rtnl_link_ops;
+ if (!ops)
+ return -EOPNOTSUPP;
+
+ ops->dellink(dev);
+ return 0;
+}
+
+struct net_device *rtnl_create_link(struct net *net, char *ifname,
+ const struct rtnl_link_ops *ops, struct nlattr *tb[])
+{
+ int err;
+ struct net_device *dev;
+
+ err = -ENOMEM;
+ dev = alloc_netdev(ops->priv_size, ifname, ops->setup);
+ if (!dev)
+ goto err;
+
+ if (strchr(dev->name, '%')) {
+ err = dev_alloc_name(dev, dev->name);
+ if (err < 0)
+ goto err_free;
+ }
+
+ dev_net_set(dev, net);
+ dev->rtnl_link_ops = ops;
+
+ if (tb[IFLA_MTU])
+ dev->mtu = nla_get_u32(tb[IFLA_MTU]);
+ if (tb[IFLA_ADDRESS])
+ memcpy(dev->dev_addr, nla_data(tb[IFLA_ADDRESS]),
+ nla_len(tb[IFLA_ADDRESS]));
+ if (tb[IFLA_BROADCAST])
+ memcpy(dev->broadcast, nla_data(tb[IFLA_BROADCAST]),
+ nla_len(tb[IFLA_BROADCAST]));
+ if (tb[IFLA_TXQLEN])
+ dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]);
+ if (tb[IFLA_OPERSTATE])
+ set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE]));
+ if (tb[IFLA_LINKMODE])
+ dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]);
+
+ return dev;
+
+err_free:
+ free_netdev(dev);
+err:
+ return ERR_PTR(err);
+}
+
+static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
+{
+ struct net *net = sock_net(skb->sk);
+ const struct rtnl_link_ops *ops;
+ struct net_device *dev;
+ struct ifinfomsg *ifm;
+ char kind[MODULE_NAME_LEN];
+ char ifname[IFNAMSIZ];
+ struct nlattr *tb[IFLA_MAX+1];
+ struct nlattr *linkinfo[IFLA_INFO_MAX+1];
+ int err;
+
+#ifdef CONFIG_MODULES
+replay:
+#endif
+ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
+ if (err < 0)
+ return err;
+
+ if (tb[IFLA_IFNAME])
+ nla_strlcpy(ifname, tb[IFLA_IFNAME], IFNAMSIZ);
+ else
+ ifname[0] = '\0';
+
+ ifm = nlmsg_data(nlh);
+ if (ifm->ifi_index > 0)
+ dev = __dev_get_by_index(net, ifm->ifi_index);
+ else if (ifname[0])
+ dev = __dev_get_by_name(net, ifname);
+ else
+ dev = NULL;
+
+ if ((err = validate_linkmsg(dev, tb)) < 0)
+ return err;
+
+ if (tb[IFLA_LINKINFO]) {
+ err = nla_parse_nested(linkinfo, IFLA_INFO_MAX,
+ tb[IFLA_LINKINFO], ifla_info_policy);
+ if (err < 0)
+ return err;
+ } else
+ memset(linkinfo, 0, sizeof(linkinfo));
+
+ if (linkinfo[IFLA_INFO_KIND]) {
+ nla_strlcpy(kind, linkinfo[IFLA_INFO_KIND], sizeof(kind));
+ ops = rtnl_link_ops_get(kind);
+ } else {
+ kind[0] = '\0';
+ ops = NULL;
+ }
+
+ if (1) {
+ struct nlattr *attr[ops ? ops->maxtype + 1 : 0], **data = NULL;
+
+ if (ops) {
+ if (ops->maxtype && linkinfo[IFLA_INFO_DATA]) {
+ err = nla_parse_nested(attr, ops->maxtype,
+ linkinfo[IFLA_INFO_DATA],
+ ops->policy);
+ if (err < 0)
+ return err;
+ data = attr;
+ }
+ if (ops->validate) {
+ err = ops->validate(tb, data);
+ if (err < 0)
+ return err;
+ }
+ }
+
+ if (dev) {
+ int modified = 0;
+
+ if (nlh->nlmsg_flags & NLM_F_EXCL)
+ return -EEXIST;
+ if (nlh->nlmsg_flags & NLM_F_REPLACE)
+ return -EOPNOTSUPP;
+
+ if (linkinfo[IFLA_INFO_DATA]) {
+ if (!ops || ops != dev->rtnl_link_ops ||
+ !ops->changelink)
+ return -EOPNOTSUPP;
+
+ err = ops->changelink(dev, tb, data);
+ if (err < 0)
+ return err;
+ modified = 1;
+ }
+
+ return do_setlink(dev, ifm, tb, ifname, modified);
+ }
+
+ if (!(nlh->nlmsg_flags & NLM_F_CREATE))
+ return -ENODEV;
+
+ if (ifm->ifi_index || ifm->ifi_flags || ifm->ifi_change)
+ return -EOPNOTSUPP;
+ if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO])
+ return -EOPNOTSUPP;
+
+ if (!ops) {
+#ifdef CONFIG_MODULES
+ if (kind[0]) {
+ __rtnl_unlock();
+ request_module("rtnl-link-%s", kind);
+ rtnl_lock();
+ ops = rtnl_link_ops_get(kind);
+ if (ops)
+ goto replay;
+ }
+#endif
+ return -EOPNOTSUPP;
+ }
+
+ if (!ifname[0])
+ snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
+
+ dev = rtnl_create_link(net, ifname, ops, tb);
+
+ if (IS_ERR(dev))
+ err = PTR_ERR(dev);
+ else if (ops->newlink)
+ err = ops->newlink(dev, tb, data);
+ else
+ err = register_netdevice(dev);
+
+ if (err < 0 && !IS_ERR(dev))
+ free_netdev(dev);
+ return err;
+ }
+}
+
+static int rtnl_getlink(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg)
+{
+ struct net *net = sock_net(skb->sk);
+ struct ifinfomsg *ifm;
+ struct nlattr *tb[IFLA_MAX+1];
+ struct net_device *dev = NULL;
+ struct sk_buff *nskb;
+ int err;
+
+ err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFLA_MAX, ifla_policy);
+ if (err < 0)
+ return err;
+
+ ifm = nlmsg_data(nlh);
+ if (ifm->ifi_index > 0) {
+ dev = dev_get_by_index(net, ifm->ifi_index);
+ if (dev == NULL)
+ return -ENODEV;
+ } else
+ return -EINVAL;
+
+ nskb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL);
+ if (nskb == NULL) {
+ err = -ENOBUFS;
+ goto errout;
+ }
+
+ err = rtnl_fill_ifinfo(nskb, dev, RTM_NEWLINK, NETLINK_CB(skb).pid,
+ nlh->nlmsg_seq, 0, 0);
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in if_nlmsg_size */
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(nskb);
+ goto errout;
+ }
+ err = rtnl_unicast(nskb, net, NETLINK_CB(skb).pid);
+errout:
+ dev_put(dev);
+
+ return err;
+}
+
+static int rtnl_dump_all(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ int idx;
+ int s_idx = cb->family;
+
+ if (s_idx == 0)
+ s_idx = 1;
+ for (idx=1; idx<NPROTO; idx++) {
+ int type = cb->nlh->nlmsg_type-RTM_BASE;
+ if (idx < s_idx || idx == PF_PACKET)
+ continue;
+ if (rtnl_msg_handlers[idx] == NULL ||
+ rtnl_msg_handlers[idx][type].dumpit == NULL)
+ continue;
+ if (idx > s_idx)
+ memset(&cb->args[0], 0, sizeof(cb->args));
+ if (rtnl_msg_handlers[idx][type].dumpit(skb, cb))
+ break;
+ }
+ cb->family = idx;
+
+ return skb->len;
+}
+
+void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
+{
+ struct net *net = dev_net(dev);
+ struct sk_buff *skb;
+ int err = -ENOBUFS;
+
+ skb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL);
+ if (skb == NULL)
+ goto errout;
+
+ err = rtnl_fill_ifinfo(skb, dev, type, 0, 0, change, 0);
+ if (err < 0) {
+ /* -EMSGSIZE implies BUG in if_nlmsg_size() */
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(skb);
+ goto errout;
+ }
+ err = rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL);
+errout:
+ if (err < 0)
+ rtnl_set_sk_err(net, RTNLGRP_LINK, err);
+}
+
+/* Protected by RTNL sempahore. */
+static struct rtattr **rta_buf;
+static int rtattr_max;
+
+/* Process one rtnetlink message. */
+
+static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+ struct net *net = sock_net(skb->sk);
+ rtnl_doit_func doit;
+ int sz_idx, kind;
+ int min_len;
+ int family;
+ int type;
+ int err;
+
+ type = nlh->nlmsg_type;
+ if (type > RTM_MAX)
+ return -EOPNOTSUPP;
+
+ type -= RTM_BASE;
+
+ /* All the messages must have at least 1 byte length */
+ if (nlh->nlmsg_len < NLMSG_LENGTH(sizeof(struct rtgenmsg)))
+ return 0;
+
+ family = ((struct rtgenmsg*)NLMSG_DATA(nlh))->rtgen_family;
+ if (family >= NPROTO)
+ return -EAFNOSUPPORT;
+
+ sz_idx = type>>2;
+ kind = type&3;
+
+ if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
+ struct sock *rtnl;
+ rtnl_dumpit_func dumpit;
+
+ dumpit = rtnl_get_dumpit(family, type);
+ if (dumpit == NULL)
+ return -EOPNOTSUPP;
+
+ __rtnl_unlock();
+ rtnl = net->rtnl;
+ err = netlink_dump_start(rtnl, skb, nlh, dumpit, NULL);
+ rtnl_lock();
+ return err;
+ }
+
+ memset(rta_buf, 0, (rtattr_max * sizeof(struct rtattr *)));
+
+ min_len = rtm_min[sz_idx];
+ if (nlh->nlmsg_len < min_len)
+ return -EINVAL;
+
+ if (nlh->nlmsg_len > min_len) {
+ int attrlen = nlh->nlmsg_len - NLMSG_ALIGN(min_len);
+ struct rtattr *attr = (void*)nlh + NLMSG_ALIGN(min_len);
+
+ while (RTA_OK(attr, attrlen)) {
+ unsigned flavor = attr->rta_type;
+ if (flavor) {
+ if (flavor > rta_max[sz_idx])
+ return -EINVAL;
+ rta_buf[flavor-1] = attr;
+ }
+ attr = RTA_NEXT(attr, attrlen);
+ }
+ }
+
+ doit = rtnl_get_doit(family, type);
+ if (doit == NULL)
+ return -EOPNOTSUPP;
+
+ return doit(skb, nlh, (void *)&rta_buf[0]);
+}
+
+static void rtnetlink_rcv(struct sk_buff *skb)
+{
+ rtnl_lock();
+ netlink_rcv_skb(skb, &rtnetlink_rcv_msg);
+ rtnl_unlock();
+}
+
+static int rtnetlink_event(struct notifier_block *this, unsigned long event, void *ptr)
+{
+ struct net_device *dev = ptr;
+
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ rtmsg_ifinfo(RTM_DELLINK, dev, ~0U);
+ break;
+ case NETDEV_REGISTER:
+ rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U);
+ break;
+ case NETDEV_UP:
+ case NETDEV_DOWN:
+ rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING);
+ break;
+ case NETDEV_CHANGE:
+ case NETDEV_GOING_DOWN:
+ break;
+ default:
+ rtmsg_ifinfo(RTM_NEWLINK, dev, 0);
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block rtnetlink_dev_notifier = {
+ .notifier_call = rtnetlink_event,
+};
+
+
+static int rtnetlink_net_init(struct net *net)
+{
+ struct sock *sk;
+ sk = netlink_kernel_create(net, NETLINK_ROUTE, RTNLGRP_MAX,
+ rtnetlink_rcv, &rtnl_mutex, THIS_MODULE);
+ if (!sk)
+ return -ENOMEM;
+ net->rtnl = sk;
+ return 0;
+}
+
+static void rtnetlink_net_exit(struct net *net)
+{
+ netlink_kernel_release(net->rtnl);
+ net->rtnl = NULL;
+}
+
+static struct pernet_operations rtnetlink_net_ops = {
+ .init = rtnetlink_net_init,
+ .exit = rtnetlink_net_exit,
+};
+
+void __init rtnetlink_init(void)
+{
+ int i;
+
+ rtattr_max = 0;
+ for (i = 0; i < ARRAY_SIZE(rta_max); i++)
+ if (rta_max[i] > rtattr_max)
+ rtattr_max = rta_max[i];
+ rta_buf = kmalloc(rtattr_max * sizeof(struct rtattr *), GFP_KERNEL);
+ if (!rta_buf)
+ panic("rtnetlink_init: cannot allocate rta_buf\n");
+
+ if (register_pernet_subsys(&rtnetlink_net_ops))
+ panic("rtnetlink_init: cannot initialize rtnetlink\n");
+
+ netlink_set_nonroot(NETLINK_ROUTE, NL_NONROOT_RECV);
+ register_netdevice_notifier(&rtnetlink_dev_notifier);
+
+ rtnl_register(PF_UNSPEC, RTM_GETLINK, rtnl_getlink, rtnl_dump_ifinfo);
+ rtnl_register(PF_UNSPEC, RTM_SETLINK, rtnl_setlink, NULL);
+ rtnl_register(PF_UNSPEC, RTM_NEWLINK, rtnl_newlink, NULL);
+ rtnl_register(PF_UNSPEC, RTM_DELLINK, rtnl_dellink, NULL);
+
+ rtnl_register(PF_UNSPEC, RTM_GETADDR, NULL, rtnl_dump_all);
+ rtnl_register(PF_UNSPEC, RTM_GETROUTE, NULL, rtnl_dump_all);
+}
+
+EXPORT_SYMBOL(__rta_fill);
+EXPORT_SYMBOL(rtnetlink_put_metrics);
+EXPORT_SYMBOL(rtnl_lock);
+EXPORT_SYMBOL(rtnl_trylock);
+EXPORT_SYMBOL(rtnl_unlock);
+EXPORT_SYMBOL(rtnl_is_locked);
+EXPORT_SYMBOL(rtnl_unicast);
+EXPORT_SYMBOL(rtnl_notify);
+EXPORT_SYMBOL(rtnl_set_sk_err);
+EXPORT_SYMBOL(rtnl_create_link);
+EXPORT_SYMBOL(ifla_policy);
diff --git a/libdde_linux26/contrib/net/core/skb_dma_map.c b/libdde_linux26/contrib/net/core/skb_dma_map.c
new file mode 100644
index 00000000..86234923
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/skb_dma_map.c
@@ -0,0 +1,66 @@
+/* skb_dma_map.c: DMA mapping helpers for socket buffers.
+ *
+ * Copyright (C) David S. Miller <davem@davemloft.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/skbuff.h>
+
+int skb_dma_map(struct device *dev, struct sk_buff *skb,
+ enum dma_data_direction dir)
+{
+ struct skb_shared_info *sp = skb_shinfo(skb);
+ dma_addr_t map;
+ int i;
+
+ map = dma_map_single(dev, skb->data,
+ skb_headlen(skb), dir);
+ if (dma_mapping_error(dev, map))
+ goto out_err;
+
+ sp->dma_maps[0] = map;
+ for (i = 0; i < sp->nr_frags; i++) {
+ skb_frag_t *fp = &sp->frags[i];
+
+ map = dma_map_page(dev, fp->page, fp->page_offset,
+ fp->size, dir);
+ if (dma_mapping_error(dev, map))
+ goto unwind;
+ sp->dma_maps[i + 1] = map;
+ }
+ sp->num_dma_maps = i + 1;
+
+ return 0;
+
+unwind:
+ while (--i >= 0) {
+ skb_frag_t *fp = &sp->frags[i];
+
+ dma_unmap_page(dev, sp->dma_maps[i + 1],
+ fp->size, dir);
+ }
+ dma_unmap_single(dev, sp->dma_maps[0],
+ skb_headlen(skb), dir);
+out_err:
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(skb_dma_map);
+
+void skb_dma_unmap(struct device *dev, struct sk_buff *skb,
+ enum dma_data_direction dir)
+{
+ struct skb_shared_info *sp = skb_shinfo(skb);
+ int i;
+
+ dma_unmap_single(dev, sp->dma_maps[0],
+ skb_headlen(skb), dir);
+ for (i = 0; i < sp->nr_frags; i++) {
+ skb_frag_t *fp = &sp->frags[i];
+
+ dma_unmap_page(dev, sp->dma_maps[i + 1],
+ fp->size, dir);
+ }
+}
+EXPORT_SYMBOL(skb_dma_unmap);
diff --git a/libdde_linux26/contrib/net/core/sock.c b/libdde_linux26/contrib/net/core/sock.c
new file mode 100644
index 00000000..5f97caa1
--- /dev/null
+++ b/libdde_linux26/contrib/net/core/sock.c
@@ -0,0 +1,2297 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Generic socket support routines. Memory allocators, socket lock/release
+ * handler for protocols to use and generic option handler.
+ *
+ *
+ * Authors: Ross Biro
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Florian La Roche, <flla@stud.uni-sb.de>
+ * Alan Cox, <A.Cox@swansea.ac.uk>
+ *
+ * Fixes:
+ * Alan Cox : Numerous verify_area() problems
+ * Alan Cox : Connecting on a connecting socket
+ * now returns an error for tcp.
+ * Alan Cox : sock->protocol is set correctly.
+ * and is not sometimes left as 0.
+ * Alan Cox : connect handles icmp errors on a
+ * connect properly. Unfortunately there
+ * is a restart syscall nasty there. I
+ * can't match BSD without hacking the C
+ * library. Ideas urgently sought!
+ * Alan Cox : Disallow bind() to addresses that are
+ * not ours - especially broadcast ones!!
+ * Alan Cox : Socket 1024 _IS_ ok for users. (fencepost)
+ * Alan Cox : sock_wfree/sock_rfree don't destroy sockets,
+ * instead they leave that for the DESTROY timer.
+ * Alan Cox : Clean up error flag in accept
+ * Alan Cox : TCP ack handling is buggy, the DESTROY timer
+ * was buggy. Put a remove_sock() in the handler
+ * for memory when we hit 0. Also altered the timer
+ * code. The ACK stuff can wait and needs major
+ * TCP layer surgery.
+ * Alan Cox : Fixed TCP ack bug, removed remove sock
+ * and fixed timer/inet_bh race.
+ * Alan Cox : Added zapped flag for TCP
+ * Alan Cox : Move kfree_skb into skbuff.c and tidied up surplus code
+ * Alan Cox : for new sk_buff allocations wmalloc/rmalloc now call alloc_skb
+ * Alan Cox : kfree_s calls now are kfree_skbmem so we can track skb resources
+ * Alan Cox : Supports socket option broadcast now as does udp. Packet and raw need fixing.
+ * Alan Cox : Added RCVBUF,SNDBUF size setting. It suddenly occurred to me how easy it was so...
+ * Rick Sladkey : Relaxed UDP rules for matching packets.
+ * C.E.Hawkins : IFF_PROMISC/SIOCGHWADDR support
+ * Pauline Middelink : identd support
+ * Alan Cox : Fixed connect() taking signals I think.
+ * Alan Cox : SO_LINGER supported
+ * Alan Cox : Error reporting fixes
+ * Anonymous : inet_create tidied up (sk->reuse setting)
+ * Alan Cox : inet sockets don't set sk->type!
+ * Alan Cox : Split socket option code
+ * Alan Cox : Callbacks
+ * Alan Cox : Nagle flag for Charles & Johannes stuff
+ * Alex : Removed restriction on inet fioctl
+ * Alan Cox : Splitting INET from NET core
+ * Alan Cox : Fixed bogus SO_TYPE handling in getsockopt()
+ * Adam Caldwell : Missing return in SO_DONTROUTE/SO_DEBUG code
+ * Alan Cox : Split IP from generic code
+ * Alan Cox : New kfree_skbmem()
+ * Alan Cox : Make SO_DEBUG superuser only.
+ * Alan Cox : Allow anyone to clear SO_DEBUG
+ * (compatibility fix)
+ * Alan Cox : Added optimistic memory grabbing for AF_UNIX throughput.
+ * Alan Cox : Allocator for a socket is settable.
+ * Alan Cox : SO_ERROR includes soft errors.
+ * Alan Cox : Allow NULL arguments on some SO_ opts
+ * Alan Cox : Generic socket allocation to make hooks
+ * easier (suggested by Craig Metz).
+ * Michael Pall : SO_ERROR returns positive errno again
+ * Steve Whitehouse: Added default destructor to free
+ * protocol private data.
+ * Steve Whitehouse: Added various other default routines
+ * common to several socket families.
+ * Chris Evans : Call suser() check last on F_SETOWN
+ * Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
+ * Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
+ * Andi Kleen : Fix write_space callback
+ * Chris Evans : Security fixes - signedness again
+ * Arnaldo C. Melo : cleanups, use skb_queue_purge
+ *
+ * To Fix:
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/capability.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/string.h>
+#include <linux/sockios.h>
+#include <linux/net.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/poll.h>
+#include <linux/tcp.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+#include <linux/netdevice.h>
+#include <net/protocol.h>
+#include <linux/skbuff.h>
+#include <net/net_namespace.h>
+#include <net/request_sock.h>
+#include <net/sock.h>
+#include <net/xfrm.h>
+#include <linux/ipsec.h>
+
+#include <linux/filter.h>
+
+#ifdef CONFIG_INET
+#include <net/tcp.h>
+#endif
+
+/*
+ * Each address family might have different locking rules, so we have
+ * one slock key per address family:
+ */
+static struct lock_class_key af_family_keys[AF_MAX];
+static struct lock_class_key af_family_slock_keys[AF_MAX];
+
+/*
+ * Make lock validator output more readable. (we pre-construct these
+ * strings build-time, so that runtime initialization of socket
+ * locks is fast):
+ */
+static const char *af_family_key_strings[AF_MAX+1] = {
+ "sk_lock-AF_UNSPEC", "sk_lock-AF_UNIX" , "sk_lock-AF_INET" ,
+ "sk_lock-AF_AX25" , "sk_lock-AF_IPX" , "sk_lock-AF_APPLETALK",
+ "sk_lock-AF_NETROM", "sk_lock-AF_BRIDGE" , "sk_lock-AF_ATMPVC" ,
+ "sk_lock-AF_X25" , "sk_lock-AF_INET6" , "sk_lock-AF_ROSE" ,
+ "sk_lock-AF_DECnet", "sk_lock-AF_NETBEUI" , "sk_lock-AF_SECURITY" ,
+ "sk_lock-AF_KEY" , "sk_lock-AF_NETLINK" , "sk_lock-AF_PACKET" ,
+ "sk_lock-AF_ASH" , "sk_lock-AF_ECONET" , "sk_lock-AF_ATMSVC" ,
+ "sk_lock-21" , "sk_lock-AF_SNA" , "sk_lock-AF_IRDA" ,
+ "sk_lock-AF_PPPOX" , "sk_lock-AF_WANPIPE" , "sk_lock-AF_LLC" ,
+ "sk_lock-27" , "sk_lock-28" , "sk_lock-AF_CAN" ,
+ "sk_lock-AF_TIPC" , "sk_lock-AF_BLUETOOTH", "sk_lock-IUCV" ,
+ "sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
+ "sk_lock-AF_MAX"
+};
+static const char *af_family_slock_key_strings[AF_MAX+1] = {
+ "slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
+ "slock-AF_AX25" , "slock-AF_IPX" , "slock-AF_APPLETALK",
+ "slock-AF_NETROM", "slock-AF_BRIDGE" , "slock-AF_ATMPVC" ,
+ "slock-AF_X25" , "slock-AF_INET6" , "slock-AF_ROSE" ,
+ "slock-AF_DECnet", "slock-AF_NETBEUI" , "slock-AF_SECURITY" ,
+ "slock-AF_KEY" , "slock-AF_NETLINK" , "slock-AF_PACKET" ,
+ "slock-AF_ASH" , "slock-AF_ECONET" , "slock-AF_ATMSVC" ,
+ "slock-21" , "slock-AF_SNA" , "slock-AF_IRDA" ,
+ "slock-AF_PPPOX" , "slock-AF_WANPIPE" , "slock-AF_LLC" ,
+ "slock-27" , "slock-28" , "slock-AF_CAN" ,
+ "slock-AF_TIPC" , "slock-AF_BLUETOOTH", "slock-AF_IUCV" ,
+ "slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
+ "slock-AF_MAX"
+};
+static const char *af_family_clock_key_strings[AF_MAX+1] = {
+ "clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
+ "clock-AF_AX25" , "clock-AF_IPX" , "clock-AF_APPLETALK",
+ "clock-AF_NETROM", "clock-AF_BRIDGE" , "clock-AF_ATMPVC" ,
+ "clock-AF_X25" , "clock-AF_INET6" , "clock-AF_ROSE" ,
+ "clock-AF_DECnet", "clock-AF_NETBEUI" , "clock-AF_SECURITY" ,
+ "clock-AF_KEY" , "clock-AF_NETLINK" , "clock-AF_PACKET" ,
+ "clock-AF_ASH" , "clock-AF_ECONET" , "clock-AF_ATMSVC" ,
+ "clock-21" , "clock-AF_SNA" , "clock-AF_IRDA" ,
+ "clock-AF_PPPOX" , "clock-AF_WANPIPE" , "clock-AF_LLC" ,
+ "clock-27" , "clock-28" , "clock-AF_CAN" ,
+ "clock-AF_TIPC" , "clock-AF_BLUETOOTH", "clock-AF_IUCV" ,
+ "clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
+ "clock-AF_MAX"
+};
+
+/*
+ * sk_callback_lock locking rules are per-address-family,
+ * so split the lock classes by using a per-AF key:
+ */
+static struct lock_class_key af_callback_keys[AF_MAX];
+
+/* Take into consideration the size of the struct sk_buff overhead in the
+ * determination of these values, since that is non-constant across
+ * platforms. This makes socket queueing behavior and performance
+ * not depend upon such differences.
+ */
+#define _SK_MEM_PACKETS 256
+#define _SK_MEM_OVERHEAD (sizeof(struct sk_buff) + 256)
+#define SK_WMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
+#define SK_RMEM_MAX (_SK_MEM_OVERHEAD * _SK_MEM_PACKETS)
+
+/* Run time adjustable parameters. */
+__u32 sysctl_wmem_max __read_mostly = SK_WMEM_MAX;
+__u32 sysctl_rmem_max __read_mostly = SK_RMEM_MAX;
+__u32 sysctl_wmem_default __read_mostly = SK_WMEM_MAX;
+__u32 sysctl_rmem_default __read_mostly = SK_RMEM_MAX;
+
+/* Maximal space eaten by iovec or ancilliary data plus some space */
+int sysctl_optmem_max __read_mostly = sizeof(unsigned long)*(2*UIO_MAXIOV+512);
+
+static int sock_set_timeout(long *timeo_p, char __user *optval, int optlen)
+{
+ struct timeval tv;
+
+ if (optlen < sizeof(tv))
+ return -EINVAL;
+ if (copy_from_user(&tv, optval, sizeof(tv)))
+ return -EFAULT;
+ if (tv.tv_usec < 0 || tv.tv_usec >= USEC_PER_SEC)
+ return -EDOM;
+
+ if (tv.tv_sec < 0) {
+ static int warned __read_mostly;
+
+ *timeo_p = 0;
+ if (warned < 10 && net_ratelimit()) {
+ warned++;
+ printk(KERN_INFO "sock_set_timeout: `%s' (pid %d) "
+ "tries to set negative timeout\n",
+ current->comm, task_pid_nr(current));
+ }
+ return 0;
+ }
+ *timeo_p = MAX_SCHEDULE_TIMEOUT;
+ if (tv.tv_sec == 0 && tv.tv_usec == 0)
+ return 0;
+ if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT/HZ - 1))
+ *timeo_p = tv.tv_sec*HZ + (tv.tv_usec+(1000000/HZ-1))/(1000000/HZ);
+ return 0;
+}
+
+static void sock_warn_obsolete_bsdism(const char *name)
+{
+ static int warned;
+ static char warncomm[TASK_COMM_LEN];
+ if (strcmp(warncomm, current->comm) && warned < 5) {
+ strcpy(warncomm, current->comm);
+ printk(KERN_WARNING "process `%s' is using obsolete "
+ "%s SO_BSDCOMPAT\n", warncomm, name);
+ warned++;
+ }
+}
+
+static void sock_disable_timestamp(struct sock *sk)
+{
+ if (sock_flag(sk, SOCK_TIMESTAMP)) {
+ sock_reset_flag(sk, SOCK_TIMESTAMP);
+ net_disable_timestamp();
+ }
+}
+
+
+int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ int err = 0;
+ int skb_len;
+
+ /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
+ number of warnings when compiling with -W --ANK
+ */
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+ (unsigned)sk->sk_rcvbuf) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ err = sk_filter(sk, skb);
+ if (err)
+ goto out;
+
+ if (!sk_rmem_schedule(sk, skb->truesize)) {
+ err = -ENOBUFS;
+ goto out;
+ }
+
+ skb->dev = NULL;
+ skb_set_owner_r(skb, sk);
+
+ /* Cache the SKB length before we tack it onto the receive
+ * queue. Once it is added it no longer belongs to us and
+ * may be freed by other threads of control pulling packets
+ * from the queue.
+ */
+ skb_len = skb->len;
+
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_data_ready(sk, skb_len);
+out:
+ return err;
+}
+EXPORT_SYMBOL(sock_queue_rcv_skb);
+
+int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
+{
+ int rc = NET_RX_SUCCESS;
+
+ if (sk_filter(sk, skb))
+ goto discard_and_relse;
+
+ skb->dev = NULL;
+
+ if (nested)
+ bh_lock_sock_nested(sk);
+ else
+ bh_lock_sock(sk);
+ if (!sock_owned_by_user(sk)) {
+ /*
+ * trylock + unlock semantics:
+ */
+ mutex_acquire(&sk->sk_lock.dep_map, 0, 1, _RET_IP_);
+
+ rc = sk_backlog_rcv(sk, skb);
+
+ mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
+ } else
+ sk_add_backlog(sk, skb);
+ bh_unlock_sock(sk);
+out:
+ sock_put(sk);
+ return rc;
+discard_and_relse:
+ kfree_skb(skb);
+ goto out;
+}
+EXPORT_SYMBOL(sk_receive_skb);
+
+struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie)
+{
+ struct dst_entry *dst = sk->sk_dst_cache;
+
+ if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
+ sk->sk_dst_cache = NULL;
+ dst_release(dst);
+ return NULL;
+ }
+
+ return dst;
+}
+EXPORT_SYMBOL(__sk_dst_check);
+
+struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie)
+{
+ struct dst_entry *dst = sk_dst_get(sk);
+
+ if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
+ sk_dst_reset(sk);
+ dst_release(dst);
+ return NULL;
+ }
+
+ return dst;
+}
+EXPORT_SYMBOL(sk_dst_check);
+
+static int sock_bindtodevice(struct sock *sk, char __user *optval, int optlen)
+{
+ int ret = -ENOPROTOOPT;
+#ifdef CONFIG_NETDEVICES
+ struct net *net = sock_net(sk);
+ char devname[IFNAMSIZ];
+ int index;
+
+ /* Sorry... */
+ ret = -EPERM;
+ if (!capable(CAP_NET_RAW))
+ goto out;
+
+ ret = -EINVAL;
+ if (optlen < 0)
+ goto out;
+
+ /* Bind this socket to a particular device like "eth0",
+ * as specified in the passed interface name. If the
+ * name is "" or the option length is zero the socket
+ * is not bound.
+ */
+ if (optlen > IFNAMSIZ - 1)
+ optlen = IFNAMSIZ - 1;
+ memset(devname, 0, sizeof(devname));
+
+ ret = -EFAULT;
+ if (copy_from_user(devname, optval, optlen))
+ goto out;
+
+ if (devname[0] == '\0') {
+ index = 0;
+ } else {
+ struct net_device *dev = dev_get_by_name(net, devname);
+
+ ret = -ENODEV;
+ if (!dev)
+ goto out;
+
+ index = dev->ifindex;
+ dev_put(dev);
+ }
+
+ lock_sock(sk);
+ sk->sk_bound_dev_if = index;
+ sk_dst_reset(sk);
+ release_sock(sk);
+
+ ret = 0;
+
+out:
+#endif
+
+ return ret;
+}
+
+static inline void sock_valbool_flag(struct sock *sk, int bit, int valbool)
+{
+ if (valbool)
+ sock_set_flag(sk, bit);
+ else
+ sock_reset_flag(sk, bit);
+}
+
+/*
+ * This is meant for all protocols to use and covers goings on
+ * at the socket level. Everything here is generic.
+ */
+
+int sock_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int optlen)
+{
+ struct sock *sk=sock->sk;
+ int val;
+ int valbool;
+ struct linger ling;
+ int ret = 0;
+
+ /*
+ * Options without arguments
+ */
+
+ if (optname == SO_BINDTODEVICE)
+ return sock_bindtodevice(sk, optval, optlen);
+
+ if (optlen < sizeof(int))
+ return -EINVAL;
+
+ if (get_user(val, (int __user *)optval))
+ return -EFAULT;
+
+ valbool = val?1:0;
+
+ lock_sock(sk);
+
+ switch(optname) {
+ case SO_DEBUG:
+ if (val && !capable(CAP_NET_ADMIN)) {
+ ret = -EACCES;
+ } else
+ sock_valbool_flag(sk, SOCK_DBG, valbool);
+ break;
+ case SO_REUSEADDR:
+ sk->sk_reuse = valbool;
+ break;
+ case SO_TYPE:
+ case SO_ERROR:
+ ret = -ENOPROTOOPT;
+ break;
+ case SO_DONTROUTE:
+ sock_valbool_flag(sk, SOCK_LOCALROUTE, valbool);
+ break;
+ case SO_BROADCAST:
+ sock_valbool_flag(sk, SOCK_BROADCAST, valbool);
+ break;
+ case SO_SNDBUF:
+ /* Don't error on this BSD doesn't and if you think
+ about it this is right. Otherwise apps have to
+ play 'guess the biggest size' games. RCVBUF/SNDBUF
+ are treated in BSD as hints */
+
+ if (val > sysctl_wmem_max)
+ val = sysctl_wmem_max;
+set_sndbuf:
+ sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
+ if ((val * 2) < SOCK_MIN_SNDBUF)
+ sk->sk_sndbuf = SOCK_MIN_SNDBUF;
+ else
+ sk->sk_sndbuf = val * 2;
+
+ /*
+ * Wake up sending tasks if we
+ * upped the value.
+ */
+ sk->sk_write_space(sk);
+ break;
+
+ case SO_SNDBUFFORCE:
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+ goto set_sndbuf;
+
+ case SO_RCVBUF:
+ /* Don't error on this BSD doesn't and if you think
+ about it this is right. Otherwise apps have to
+ play 'guess the biggest size' games. RCVBUF/SNDBUF
+ are treated in BSD as hints */
+
+ if (val > sysctl_rmem_max)
+ val = sysctl_rmem_max;
+set_rcvbuf:
+ sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
+ /*
+ * We double it on the way in to account for
+ * "struct sk_buff" etc. overhead. Applications
+ * assume that the SO_RCVBUF setting they make will
+ * allow that much actual data to be received on that
+ * socket.
+ *
+ * Applications are unaware that "struct sk_buff" and
+ * other overheads allocate from the receive buffer
+ * during socket buffer allocation.
+ *
+ * And after considering the possible alternatives,
+ * returning the value we actually used in getsockopt
+ * is the most desirable behavior.
+ */
+ if ((val * 2) < SOCK_MIN_RCVBUF)
+ sk->sk_rcvbuf = SOCK_MIN_RCVBUF;
+ else
+ sk->sk_rcvbuf = val * 2;
+ break;
+
+ case SO_RCVBUFFORCE:
+ if (!capable(CAP_NET_ADMIN)) {
+ ret = -EPERM;
+ break;
+ }
+ goto set_rcvbuf;
+
+ case SO_KEEPALIVE:
+#ifdef CONFIG_INET
+ if (sk->sk_protocol == IPPROTO_TCP)
+ tcp_set_keepalive(sk, valbool);
+#endif
+ sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
+ break;
+
+ case SO_OOBINLINE:
+ sock_valbool_flag(sk, SOCK_URGINLINE, valbool);
+ break;
+
+ case SO_NO_CHECK:
+ sk->sk_no_check = valbool;
+ break;
+
+ case SO_PRIORITY:
+ if ((val >= 0 && val <= 6) || capable(CAP_NET_ADMIN))
+ sk->sk_priority = val;
+ else
+ ret = -EPERM;
+ break;
+
+ case SO_LINGER:
+ if (optlen < sizeof(ling)) {
+ ret = -EINVAL; /* 1003.1g */
+ break;
+ }
+ if (copy_from_user(&ling,optval,sizeof(ling))) {
+ ret = -EFAULT;
+ break;
+ }
+ if (!ling.l_onoff)
+ sock_reset_flag(sk, SOCK_LINGER);
+ else {
+#if (BITS_PER_LONG == 32)
+ if ((unsigned int)ling.l_linger >= MAX_SCHEDULE_TIMEOUT/HZ)
+ sk->sk_lingertime = MAX_SCHEDULE_TIMEOUT;
+ else
+#endif
+ sk->sk_lingertime = (unsigned int)ling.l_linger * HZ;
+ sock_set_flag(sk, SOCK_LINGER);
+ }
+ break;
+
+ case SO_BSDCOMPAT:
+ sock_warn_obsolete_bsdism("setsockopt");
+ break;
+
+ case SO_PASSCRED:
+ if (valbool)
+ set_bit(SOCK_PASSCRED, &sock->flags);
+ else
+ clear_bit(SOCK_PASSCRED, &sock->flags);
+ break;
+
+ case SO_TIMESTAMP:
+ case SO_TIMESTAMPNS:
+ if (valbool) {
+ if (optname == SO_TIMESTAMP)
+ sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
+ else
+ sock_set_flag(sk, SOCK_RCVTSTAMPNS);
+ sock_set_flag(sk, SOCK_RCVTSTAMP);
+ sock_enable_timestamp(sk);
+ } else {
+ sock_reset_flag(sk, SOCK_RCVTSTAMP);
+ sock_reset_flag(sk, SOCK_RCVTSTAMPNS);
+ }
+ break;
+
+ case SO_RCVLOWAT:
+ if (val < 0)
+ val = INT_MAX;
+ sk->sk_rcvlowat = val ? : 1;
+ break;
+
+ case SO_RCVTIMEO:
+ ret = sock_set_timeout(&sk->sk_rcvtimeo, optval, optlen);
+ break;
+
+ case SO_SNDTIMEO:
+ ret = sock_set_timeout(&sk->sk_sndtimeo, optval, optlen);
+ break;
+
+ case SO_ATTACH_FILTER:
+ ret = -EINVAL;
+ if (optlen == sizeof(struct sock_fprog)) {
+ struct sock_fprog fprog;
+
+ ret = -EFAULT;
+ if (copy_from_user(&fprog, optval, sizeof(fprog)))
+ break;
+
+ ret = sk_attach_filter(&fprog, sk);
+ }
+ break;
+
+ case SO_DETACH_FILTER:
+ ret = sk_detach_filter(sk);
+ break;
+
+ case SO_PASSSEC:
+ if (valbool)
+ set_bit(SOCK_PASSSEC, &sock->flags);
+ else
+ clear_bit(SOCK_PASSSEC, &sock->flags);
+ break;
+ case SO_MARK:
+ if (!capable(CAP_NET_ADMIN))
+ ret = -EPERM;
+ else {
+ sk->sk_mark = val;
+ }
+ break;
+
+ /* We implement the SO_SNDLOWAT etc to
+ not be settable (1003.1g 5.3) */
+ default:
+ ret = -ENOPROTOOPT;
+ break;
+ }
+ release_sock(sk);
+ return ret;
+}
+
+
+int sock_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+
+ union {
+ int val;
+ struct linger ling;
+ struct timeval tm;
+ } v;
+
+ unsigned int lv = sizeof(int);
+ int len;
+
+ if (get_user(len, optlen))
+ return -EFAULT;
+ if (len < 0)
+ return -EINVAL;
+
+ memset(&v, 0, sizeof(v));
+
+ switch(optname) {
+ case SO_DEBUG:
+ v.val = sock_flag(sk, SOCK_DBG);
+ break;
+
+ case SO_DONTROUTE:
+ v.val = sock_flag(sk, SOCK_LOCALROUTE);
+ break;
+
+ case SO_BROADCAST:
+ v.val = !!sock_flag(sk, SOCK_BROADCAST);
+ break;
+
+ case SO_SNDBUF:
+ v.val = sk->sk_sndbuf;
+ break;
+
+ case SO_RCVBUF:
+ v.val = sk->sk_rcvbuf;
+ break;
+
+ case SO_REUSEADDR:
+ v.val = sk->sk_reuse;
+ break;
+
+ case SO_KEEPALIVE:
+ v.val = !!sock_flag(sk, SOCK_KEEPOPEN);
+ break;
+
+ case SO_TYPE:
+ v.val = sk->sk_type;
+ break;
+
+ case SO_ERROR:
+ v.val = -sock_error(sk);
+ if (v.val==0)
+ v.val = xchg(&sk->sk_err_soft, 0);
+ break;
+
+ case SO_OOBINLINE:
+ v.val = !!sock_flag(sk, SOCK_URGINLINE);
+ break;
+
+ case SO_NO_CHECK:
+ v.val = sk->sk_no_check;
+ break;
+
+ case SO_PRIORITY:
+ v.val = sk->sk_priority;
+ break;
+
+ case SO_LINGER:
+ lv = sizeof(v.ling);
+ v.ling.l_onoff = !!sock_flag(sk, SOCK_LINGER);
+ v.ling.l_linger = sk->sk_lingertime / HZ;
+ break;
+
+ case SO_BSDCOMPAT:
+ sock_warn_obsolete_bsdism("getsockopt");
+ break;
+
+ case SO_TIMESTAMP:
+ v.val = sock_flag(sk, SOCK_RCVTSTAMP) &&
+ !sock_flag(sk, SOCK_RCVTSTAMPNS);
+ break;
+
+ case SO_TIMESTAMPNS:
+ v.val = sock_flag(sk, SOCK_RCVTSTAMPNS);
+ break;
+
+ case SO_RCVTIMEO:
+ lv=sizeof(struct timeval);
+ if (sk->sk_rcvtimeo == MAX_SCHEDULE_TIMEOUT) {
+ v.tm.tv_sec = 0;
+ v.tm.tv_usec = 0;
+ } else {
+ v.tm.tv_sec = sk->sk_rcvtimeo / HZ;
+ v.tm.tv_usec = ((sk->sk_rcvtimeo % HZ) * 1000000) / HZ;
+ }
+ break;
+
+ case SO_SNDTIMEO:
+ lv=sizeof(struct timeval);
+ if (sk->sk_sndtimeo == MAX_SCHEDULE_TIMEOUT) {
+ v.tm.tv_sec = 0;
+ v.tm.tv_usec = 0;
+ } else {
+ v.tm.tv_sec = sk->sk_sndtimeo / HZ;
+ v.tm.tv_usec = ((sk->sk_sndtimeo % HZ) * 1000000) / HZ;
+ }
+ break;
+
+ case SO_RCVLOWAT:
+ v.val = sk->sk_rcvlowat;
+ break;
+
+ case SO_SNDLOWAT:
+ v.val=1;
+ break;
+
+ case SO_PASSCRED:
+ v.val = test_bit(SOCK_PASSCRED, &sock->flags) ? 1 : 0;
+ break;
+
+ case SO_PEERCRED:
+ if (len > sizeof(sk->sk_peercred))
+ len = sizeof(sk->sk_peercred);
+ if (copy_to_user(optval, &sk->sk_peercred, len))
+ return -EFAULT;
+ goto lenout;
+
+ case SO_PEERNAME:
+ {
+ char address[128];
+
+ if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
+ return -ENOTCONN;
+ if (lv < len)
+ return -EINVAL;
+ if (copy_to_user(optval, address, len))
+ return -EFAULT;
+ goto lenout;
+ }
+
+ /* Dubious BSD thing... Probably nobody even uses it, but
+ * the UNIX standard wants it for whatever reason... -DaveM
+ */
+ case SO_ACCEPTCONN:
+ v.val = sk->sk_state == TCP_LISTEN;
+ break;
+
+ case SO_PASSSEC:
+ v.val = test_bit(SOCK_PASSSEC, &sock->flags) ? 1 : 0;
+ break;
+
+ case SO_PEERSEC:
+ return security_socket_getpeersec_stream(sock, optval, optlen, len);
+
+ case SO_MARK:
+ v.val = sk->sk_mark;
+ break;
+
+ default:
+ return -ENOPROTOOPT;
+ }
+
+ if (len > lv)
+ len = lv;
+ if (copy_to_user(optval, &v, len))
+ return -EFAULT;
+lenout:
+ if (put_user(len, optlen))
+ return -EFAULT;
+ return 0;
+}
+
+/*
+ * Initialize an sk_lock.
+ *
+ * (We also register the sk_lock with the lock validator.)
+ */
+static inline void sock_lock_init(struct sock *sk)
+{
+ sock_lock_init_class_and_name(sk,
+ af_family_slock_key_strings[sk->sk_family],
+ af_family_slock_keys + sk->sk_family,
+ af_family_key_strings[sk->sk_family],
+ af_family_keys + sk->sk_family);
+}
+
+static void sock_copy(struct sock *nsk, const struct sock *osk)
+{
+#ifdef CONFIG_SECURITY_NETWORK
+ void *sptr = nsk->sk_security;
+#endif
+
+ memcpy(nsk, osk, osk->sk_prot->obj_size);
+#ifdef CONFIG_SECURITY_NETWORK
+ nsk->sk_security = sptr;
+ security_sk_clone(osk, nsk);
+#endif
+}
+
+static struct sock *sk_prot_alloc(struct proto *prot, gfp_t priority,
+ int family)
+{
+ struct sock *sk;
+ struct kmem_cache *slab;
+
+ slab = prot->slab;
+ if (slab != NULL)
+ sk = kmem_cache_alloc(slab, priority);
+ else
+ sk = kmalloc(prot->obj_size, priority);
+
+ if (sk != NULL) {
+ if (security_sk_alloc(sk, family, priority))
+ goto out_free;
+
+ if (!try_module_get(prot->owner))
+ goto out_free_sec;
+ }
+
+ return sk;
+
+out_free_sec:
+ security_sk_free(sk);
+out_free:
+ if (slab != NULL)
+ kmem_cache_free(slab, sk);
+ else
+ kfree(sk);
+ return NULL;
+}
+
+static void sk_prot_free(struct proto *prot, struct sock *sk)
+{
+ struct kmem_cache *slab;
+ struct module *owner;
+
+ owner = prot->owner;
+ slab = prot->slab;
+
+ security_sk_free(sk);
+ if (slab != NULL)
+ kmem_cache_free(slab, sk);
+ else
+ kfree(sk);
+ module_put(owner);
+}
+
+/**
+ * sk_alloc - All socket objects are allocated here
+ * @net: the applicable net namespace
+ * @family: protocol family
+ * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
+ * @prot: struct proto associated with this new sock instance
+ */
+struct sock *sk_alloc(struct net *net, int family, gfp_t priority,
+ struct proto *prot)
+{
+ struct sock *sk;
+
+ sk = sk_prot_alloc(prot, priority | __GFP_ZERO, family);
+ if (sk) {
+ sk->sk_family = family;
+ /*
+ * See comment in struct sock definition to understand
+ * why we need sk_prot_creator -acme
+ */
+ sk->sk_prot = sk->sk_prot_creator = prot;
+ sock_lock_init(sk);
+ sock_net_set(sk, get_net(net));
+ }
+
+ return sk;
+}
+
+void sk_free(struct sock *sk)
+{
+ struct sk_filter *filter;
+
+ if (sk->sk_destruct)
+ sk->sk_destruct(sk);
+
+ filter = rcu_dereference(sk->sk_filter);
+ if (filter) {
+ sk_filter_uncharge(sk, filter);
+ rcu_assign_pointer(sk->sk_filter, NULL);
+ }
+
+ sock_disable_timestamp(sk);
+
+ if (atomic_read(&sk->sk_omem_alloc))
+ printk(KERN_DEBUG "%s: optmem leakage (%d bytes) detected.\n",
+ __func__, atomic_read(&sk->sk_omem_alloc));
+
+ put_net(sock_net(sk));
+ sk_prot_free(sk->sk_prot_creator, sk);
+}
+
+/*
+ * Last sock_put should drop referrence to sk->sk_net. It has already
+ * been dropped in sk_change_net. Taking referrence to stopping namespace
+ * is not an option.
+ * Take referrence to a socket to remove it from hash _alive_ and after that
+ * destroy it in the context of init_net.
+ */
+void sk_release_kernel(struct sock *sk)
+{
+ if (sk == NULL || sk->sk_socket == NULL)
+ return;
+
+ sock_hold(sk);
+ sock_release(sk->sk_socket);
+ release_net(sock_net(sk));
+ sock_net_set(sk, get_net(&init_net));
+ sock_put(sk);
+}
+EXPORT_SYMBOL(sk_release_kernel);
+
+struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
+{
+ struct sock *newsk;
+
+ newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
+ if (newsk != NULL) {
+ struct sk_filter *filter;
+
+ sock_copy(newsk, sk);
+
+ /* SANITY */
+ get_net(sock_net(newsk));
+ sk_node_init(&newsk->sk_node);
+ sock_lock_init(newsk);
+ bh_lock_sock(newsk);
+ newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
+
+ atomic_set(&newsk->sk_rmem_alloc, 0);
+ atomic_set(&newsk->sk_wmem_alloc, 0);
+ atomic_set(&newsk->sk_omem_alloc, 0);
+ skb_queue_head_init(&newsk->sk_receive_queue);
+ skb_queue_head_init(&newsk->sk_write_queue);
+#ifdef CONFIG_NET_DMA
+ skb_queue_head_init(&newsk->sk_async_wait_queue);
+#endif
+
+ rwlock_init(&newsk->sk_dst_lock);
+ rwlock_init(&newsk->sk_callback_lock);
+ lockdep_set_class_and_name(&newsk->sk_callback_lock,
+ af_callback_keys + newsk->sk_family,
+ af_family_clock_key_strings[newsk->sk_family]);
+
+ newsk->sk_dst_cache = NULL;
+ newsk->sk_wmem_queued = 0;
+ newsk->sk_forward_alloc = 0;
+ newsk->sk_send_head = NULL;
+ newsk->sk_userlocks = sk->sk_userlocks & ~SOCK_BINDPORT_LOCK;
+
+ sock_reset_flag(newsk, SOCK_DONE);
+ skb_queue_head_init(&newsk->sk_error_queue);
+
+ filter = newsk->sk_filter;
+ if (filter != NULL)
+ sk_filter_charge(newsk, filter);
+
+ if (unlikely(xfrm_sk_clone_policy(newsk))) {
+ /* It is still raw copy of parent, so invalidate
+ * destructor and make plain sk_free() */
+ newsk->sk_destruct = NULL;
+ sk_free(newsk);
+ newsk = NULL;
+ goto out;
+ }
+
+ newsk->sk_err = 0;
+ newsk->sk_priority = 0;
+ atomic_set(&newsk->sk_refcnt, 2);
+
+ /*
+ * Increment the counter in the same struct proto as the master
+ * sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
+ * is the same as sk->sk_prot->socks, as this field was copied
+ * with memcpy).
+ *
+ * This _changes_ the previous behaviour, where
+ * tcp_create_openreq_child always was incrementing the
+ * equivalent to tcp_prot->socks (inet_sock_nr), so this have
+ * to be taken into account in all callers. -acme
+ */
+ sk_refcnt_debug_inc(newsk);
+ sk_set_socket(newsk, NULL);
+ newsk->sk_sleep = NULL;
+
+ if (newsk->sk_prot->sockets_allocated)
+ percpu_counter_inc(newsk->sk_prot->sockets_allocated);
+ }
+out:
+ return newsk;
+}
+
+EXPORT_SYMBOL_GPL(sk_clone);
+
+void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
+{
+ __sk_dst_set(sk, dst);
+ sk->sk_route_caps = dst->dev->features;
+ if (sk->sk_route_caps & NETIF_F_GSO)
+ sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
+ if (sk_can_gso(sk)) {
+ if (dst->header_len) {
+ sk->sk_route_caps &= ~NETIF_F_GSO_MASK;
+ } else {
+ sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
+ sk->sk_gso_max_size = dst->dev->gso_max_size;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(sk_setup_caps);
+
+void __init sk_init(void)
+{
+ if (num_physpages <= 4096) {
+ sysctl_wmem_max = 32767;
+ sysctl_rmem_max = 32767;
+ sysctl_wmem_default = 32767;
+ sysctl_rmem_default = 32767;
+ } else if (num_physpages >= 131072) {
+ sysctl_wmem_max = 131071;
+ sysctl_rmem_max = 131071;
+ }
+}
+
+/*
+ * Simple resource managers for sockets.
+ */
+
+
+/*
+ * Write buffer destructor automatically called from kfree_skb.
+ */
+void sock_wfree(struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+
+ /* In case it might be waiting for more memory. */
+ atomic_sub(skb->truesize, &sk->sk_wmem_alloc);
+ if (!sock_flag(sk, SOCK_USE_WRITE_QUEUE))
+ sk->sk_write_space(sk);
+ sock_put(sk);
+}
+
+/*
+ * Read buffer destructor automatically called from kfree_skb.
+ */
+void sock_rfree(struct sk_buff *skb)
+{
+ struct sock *sk = skb->sk;
+
+ atomic_sub(skb->truesize, &sk->sk_rmem_alloc);
+ sk_mem_uncharge(skb->sk, skb->truesize);
+}
+
+
+int sock_i_uid(struct sock *sk)
+{
+ int uid;
+
+ read_lock(&sk->sk_callback_lock);
+ uid = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_uid : 0;
+ read_unlock(&sk->sk_callback_lock);
+ return uid;
+}
+
+unsigned long sock_i_ino(struct sock *sk)
+{
+ unsigned long ino;
+
+ read_lock(&sk->sk_callback_lock);
+ ino = sk->sk_socket ? SOCK_INODE(sk->sk_socket)->i_ino : 0;
+ read_unlock(&sk->sk_callback_lock);
+ return ino;
+}
+
+/*
+ * Allocate a skb from the socket's send buffer.
+ */
+struct sk_buff *sock_wmalloc(struct sock *sk, unsigned long size, int force,
+ gfp_t priority)
+{
+ if (force || atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
+ struct sk_buff * skb = alloc_skb(size, priority);
+ if (skb) {
+ skb_set_owner_w(skb, sk);
+ return skb;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Allocate a skb from the socket's receive buffer.
+ */
+struct sk_buff *sock_rmalloc(struct sock *sk, unsigned long size, int force,
+ gfp_t priority)
+{
+ if (force || atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
+ struct sk_buff *skb = alloc_skb(size, priority);
+ if (skb) {
+ skb_set_owner_r(skb, sk);
+ return skb;
+ }
+ }
+ return NULL;
+}
+
+/*
+ * Allocate a memory block from the socket's option memory buffer.
+ */
+void *sock_kmalloc(struct sock *sk, int size, gfp_t priority)
+{
+ if ((unsigned)size <= sysctl_optmem_max &&
+ atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) {
+ void *mem;
+ /* First do the add, to avoid the race if kmalloc
+ * might sleep.
+ */
+ atomic_add(size, &sk->sk_omem_alloc);
+ mem = kmalloc(size, priority);
+ if (mem)
+ return mem;
+ atomic_sub(size, &sk->sk_omem_alloc);
+ }
+ return NULL;
+}
+
+/*
+ * Free an option memory block.
+ */
+void sock_kfree_s(struct sock *sk, void *mem, int size)
+{
+ kfree(mem);
+ atomic_sub(size, &sk->sk_omem_alloc);
+}
+
+/* It is almost wait_for_tcp_memory minus release_sock/lock_sock.
+ I think, these locks should be removed for datagram sockets.
+ */
+static long sock_wait_for_wmem(struct sock * sk, long timeo)
+{
+ DEFINE_WAIT(wait);
+
+ clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ for (;;) {
+ if (!timeo)
+ break;
+ if (signal_pending(current))
+ break;
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf)
+ break;
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
+ break;
+ if (sk->sk_err)
+ break;
+ timeo = schedule_timeout(timeo);
+ }
+ finish_wait(sk->sk_sleep, &wait);
+ return timeo;
+}
+
+
+/*
+ * Generic send/receive buffer handlers
+ */
+
+static struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
+ unsigned long header_len,
+ unsigned long data_len,
+ int noblock, int *errcode)
+{
+ struct sk_buff *skb;
+ gfp_t gfp_mask;
+ long timeo;
+ int err;
+
+ gfp_mask = sk->sk_allocation;
+ if (gfp_mask & __GFP_WAIT)
+ gfp_mask |= __GFP_REPEAT;
+
+ timeo = sock_sndtimeo(sk, noblock);
+ while (1) {
+ err = sock_error(sk);
+ if (err != 0)
+ goto failure;
+
+ err = -EPIPE;
+ if (sk->sk_shutdown & SEND_SHUTDOWN)
+ goto failure;
+
+ if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
+ skb = alloc_skb(header_len, gfp_mask);
+ if (skb) {
+ int npages;
+ int i;
+
+ /* No pages, we're done... */
+ if (!data_len)
+ break;
+
+ npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ skb->truesize += data_len;
+ skb_shinfo(skb)->nr_frags = npages;
+ for (i = 0; i < npages; i++) {
+ struct page *page;
+ skb_frag_t *frag;
+
+ page = alloc_pages(sk->sk_allocation, 0);
+ if (!page) {
+ err = -ENOBUFS;
+ skb_shinfo(skb)->nr_frags = i;
+ kfree_skb(skb);
+ goto failure;
+ }
+
+ frag = &skb_shinfo(skb)->frags[i];
+ frag->page = page;
+ frag->page_offset = 0;
+ frag->size = (data_len >= PAGE_SIZE ?
+ PAGE_SIZE :
+ data_len);
+ data_len -= PAGE_SIZE;
+ }
+
+ /* Full success... */
+ break;
+ }
+ err = -ENOBUFS;
+ goto failure;
+ }
+ set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
+ set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
+ err = -EAGAIN;
+ if (!timeo)
+ goto failure;
+ if (signal_pending(current))
+ goto interrupted;
+ timeo = sock_wait_for_wmem(sk, timeo);
+ }
+
+ skb_set_owner_w(skb, sk);
+ return skb;
+
+interrupted:
+ err = sock_intr_errno(timeo);
+failure:
+ *errcode = err;
+ return NULL;
+}
+
+struct sk_buff *sock_alloc_send_skb(struct sock *sk, unsigned long size,
+ int noblock, int *errcode)
+{
+ return sock_alloc_send_pskb(sk, size, 0, noblock, errcode);
+}
+
+static void __lock_sock(struct sock *sk)
+{
+ DEFINE_WAIT(wait);
+
+ for (;;) {
+ prepare_to_wait_exclusive(&sk->sk_lock.wq, &wait,
+ TASK_UNINTERRUPTIBLE);
+ spin_unlock_bh(&sk->sk_lock.slock);
+ schedule();
+ spin_lock_bh(&sk->sk_lock.slock);
+ if (!sock_owned_by_user(sk))
+ break;
+ }
+ finish_wait(&sk->sk_lock.wq, &wait);
+}
+
+static void __release_sock(struct sock *sk)
+{
+ struct sk_buff *skb = sk->sk_backlog.head;
+
+ do {
+ sk->sk_backlog.head = sk->sk_backlog.tail = NULL;
+ bh_unlock_sock(sk);
+
+ do {
+ struct sk_buff *next = skb->next;
+
+ skb->next = NULL;
+ sk_backlog_rcv(sk, skb);
+
+ /*
+ * We are in process context here with softirqs
+ * disabled, use cond_resched_softirq() to preempt.
+ * This is safe to do because we've taken the backlog
+ * queue private:
+ */
+ cond_resched_softirq();
+
+ skb = next;
+ } while (skb != NULL);
+
+ bh_lock_sock(sk);
+ } while ((skb = sk->sk_backlog.head) != NULL);
+}
+
+/**
+ * sk_wait_data - wait for data to arrive at sk_receive_queue
+ * @sk: sock to wait on
+ * @timeo: for how long
+ *
+ * Now socket state including sk->sk_err is changed only under lock,
+ * hence we may omit checks after joining wait queue.
+ * We check receive queue before schedule() only as optimization;
+ * it is very likely that release_sock() added new data.
+ */
+int sk_wait_data(struct sock *sk, long *timeo)
+{
+ int rc;
+ DEFINE_WAIT(wait);
+
+ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE);
+ set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue));
+ clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ finish_wait(sk->sk_sleep, &wait);
+ return rc;
+}
+
+EXPORT_SYMBOL(sk_wait_data);
+
+/**
+ * __sk_mem_schedule - increase sk_forward_alloc and memory_allocated
+ * @sk: socket
+ * @size: memory size to allocate
+ * @kind: allocation type
+ *
+ * If kind is SK_MEM_SEND, it means wmem allocation. Otherwise it means
+ * rmem allocation. This function assumes that protocols which have
+ * memory_pressure use sk_wmem_queued as write buffer accounting.
+ */
+int __sk_mem_schedule(struct sock *sk, int size, int kind)
+{
+ struct proto *prot = sk->sk_prot;
+ int amt = sk_mem_pages(size);
+ int allocated;
+
+ sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
+ allocated = atomic_add_return(amt, prot->memory_allocated);
+
+ /* Under limit. */
+ if (allocated <= prot->sysctl_mem[0]) {
+ if (prot->memory_pressure && *prot->memory_pressure)
+ *prot->memory_pressure = 0;
+ return 1;
+ }
+
+ /* Under pressure. */
+ if (allocated > prot->sysctl_mem[1])
+ if (prot->enter_memory_pressure)
+ prot->enter_memory_pressure(sk);
+
+ /* Over hard limit. */
+ if (allocated > prot->sysctl_mem[2])
+ goto suppress_allocation;
+
+ /* guarantee minimum buffer size under pressure */
+ if (kind == SK_MEM_RECV) {
+ if (atomic_read(&sk->sk_rmem_alloc) < prot->sysctl_rmem[0])
+ return 1;
+ } else { /* SK_MEM_SEND */
+ if (sk->sk_type == SOCK_STREAM) {
+ if (sk->sk_wmem_queued < prot->sysctl_wmem[0])
+ return 1;
+ } else if (atomic_read(&sk->sk_wmem_alloc) <
+ prot->sysctl_wmem[0])
+ return 1;
+ }
+
+ if (prot->memory_pressure) {
+ int alloc;
+
+ if (!*prot->memory_pressure)
+ return 1;
+ alloc = percpu_counter_read_positive(prot->sockets_allocated);
+ if (prot->sysctl_mem[2] > alloc *
+ sk_mem_pages(sk->sk_wmem_queued +
+ atomic_read(&sk->sk_rmem_alloc) +
+ sk->sk_forward_alloc))
+ return 1;
+ }
+
+suppress_allocation:
+
+ if (kind == SK_MEM_SEND && sk->sk_type == SOCK_STREAM) {
+ sk_stream_moderate_sndbuf(sk);
+
+ /* Fail only if socket is _under_ its sndbuf.
+ * In this case we cannot block, so that we have to fail.
+ */
+ if (sk->sk_wmem_queued + size >= sk->sk_sndbuf)
+ return 1;
+ }
+
+ /* Alas. Undo changes. */
+ sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
+ atomic_sub(amt, prot->memory_allocated);
+ return 0;
+}
+
+EXPORT_SYMBOL(__sk_mem_schedule);
+
+/**
+ * __sk_reclaim - reclaim memory_allocated
+ * @sk: socket
+ */
+void __sk_mem_reclaim(struct sock *sk)
+{
+ struct proto *prot = sk->sk_prot;
+
+ atomic_sub(sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT,
+ prot->memory_allocated);
+ sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
+
+ if (prot->memory_pressure && *prot->memory_pressure &&
+ (atomic_read(prot->memory_allocated) < prot->sysctl_mem[0]))
+ *prot->memory_pressure = 0;
+}
+
+EXPORT_SYMBOL(__sk_mem_reclaim);
+
+
+/*
+ * Set of default routines for initialising struct proto_ops when
+ * the protocol does not support a particular function. In certain
+ * cases where it makes no sense for a protocol to have a "do nothing"
+ * function, some default processing is provided.
+ */
+
+int sock_no_bind(struct socket *sock, struct sockaddr *saddr, int len)
+{
+ return -EOPNOTSUPP;
+}
+
+int sock_no_connect(struct socket *sock, struct sockaddr *saddr,
+ int len, int flags)
+{
+ return -EOPNOTSUPP;
+}
+
+int sock_no_socketpair(struct socket *sock1, struct socket *sock2)
+{
+ return -EOPNOTSUPP;
+}
+
+int sock_no_accept(struct socket *sock, struct socket *newsock, int flags)
+{
+ return -EOPNOTSUPP;
+}
+
+int sock_no_getname(struct socket *sock, struct sockaddr *saddr,
+ int *len, int peer)
+{
+ return -EOPNOTSUPP;
+}
+
+unsigned int sock_no_poll(struct file * file, struct socket *sock, poll_table *pt)
+{
+ return 0;
+}
+
+int sock_no_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
+{
+ return -EOPNOTSUPP;
+}
+
+int sock_no_listen(struct socket *sock, int backlog)
+{
+ return -EOPNOTSUPP;
+}
+
+int sock_no_shutdown(struct socket *sock, int how)
+{
+ return -EOPNOTSUPP;
+}
+
+int sock_no_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int optlen)
+{
+ return -EOPNOTSUPP;
+}
+
+int sock_no_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ return -EOPNOTSUPP;
+}
+
+int sock_no_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
+ size_t len)
+{
+ return -EOPNOTSUPP;
+}
+
+int sock_no_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m,
+ size_t len, int flags)
+{
+ return -EOPNOTSUPP;
+}
+
+int sock_no_mmap(struct file *file, struct socket *sock, struct vm_area_struct *vma)
+{
+ /* Mirror missing mmap method error code */
+ return -ENODEV;
+}
+
+ssize_t sock_no_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags)
+{
+ ssize_t res;
+ struct msghdr msg = {.msg_flags = flags};
+ struct kvec iov;
+ char *kaddr = kmap(page);
+ iov.iov_base = kaddr + offset;
+ iov.iov_len = size;
+ res = kernel_sendmsg(sock, &msg, &iov, 1, size);
+ kunmap(page);
+ return res;
+}
+
+/*
+ * Default Socket Callbacks
+ */
+
+static void sock_def_wakeup(struct sock *sk)
+{
+ read_lock(&sk->sk_callback_lock);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible_all(sk->sk_sleep);
+ read_unlock(&sk->sk_callback_lock);
+}
+
+static void sock_def_error_report(struct sock *sk)
+{
+ read_lock(&sk->sk_callback_lock);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible(sk->sk_sleep);
+ sk_wake_async(sk, SOCK_WAKE_IO, POLL_ERR);
+ read_unlock(&sk->sk_callback_lock);
+}
+
+static void sock_def_readable(struct sock *sk, int len)
+{
+ read_lock(&sk->sk_callback_lock);
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible_sync(sk->sk_sleep);
+ sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
+ read_unlock(&sk->sk_callback_lock);
+}
+
+static void sock_def_write_space(struct sock *sk)
+{
+ read_lock(&sk->sk_callback_lock);
+
+ /* Do not wake up a writer until he can make "significant"
+ * progress. --DaveM
+ */
+ if ((atomic_read(&sk->sk_wmem_alloc) << 1) <= sk->sk_sndbuf) {
+ if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
+ wake_up_interruptible_sync(sk->sk_sleep);
+
+ /* Should agree with poll, otherwise some programs break */
+ if (sock_writeable(sk))
+ sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT);
+ }
+
+ read_unlock(&sk->sk_callback_lock);
+}
+
+static void sock_def_destruct(struct sock *sk)
+{
+ kfree(sk->sk_protinfo);
+}
+
+void sk_send_sigurg(struct sock *sk)
+{
+ if (sk->sk_socket && sk->sk_socket->file)
+ if (send_sigurg(&sk->sk_socket->file->f_owner))
+ sk_wake_async(sk, SOCK_WAKE_URG, POLL_PRI);
+}
+
+void sk_reset_timer(struct sock *sk, struct timer_list* timer,
+ unsigned long expires)
+{
+ if (!mod_timer(timer, expires))
+ sock_hold(sk);
+}
+
+EXPORT_SYMBOL(sk_reset_timer);
+
+void sk_stop_timer(struct sock *sk, struct timer_list* timer)
+{
+ if (timer_pending(timer) && del_timer(timer))
+ __sock_put(sk);
+}
+
+EXPORT_SYMBOL(sk_stop_timer);
+
+void sock_init_data(struct socket *sock, struct sock *sk)
+{
+ skb_queue_head_init(&sk->sk_receive_queue);
+ skb_queue_head_init(&sk->sk_write_queue);
+ skb_queue_head_init(&sk->sk_error_queue);
+#ifdef CONFIG_NET_DMA
+ skb_queue_head_init(&sk->sk_async_wait_queue);
+#endif
+
+ sk->sk_send_head = NULL;
+
+ init_timer(&sk->sk_timer);
+
+ sk->sk_allocation = GFP_KERNEL;
+ sk->sk_rcvbuf = sysctl_rmem_default;
+ sk->sk_sndbuf = sysctl_wmem_default;
+ sk->sk_state = TCP_CLOSE;
+ sk_set_socket(sk, sock);
+
+ sock_set_flag(sk, SOCK_ZAPPED);
+
+ if (sock) {
+ sk->sk_type = sock->type;
+ sk->sk_sleep = &sock->wait;
+ sock->sk = sk;
+ } else
+ sk->sk_sleep = NULL;
+
+ rwlock_init(&sk->sk_dst_lock);
+ rwlock_init(&sk->sk_callback_lock);
+ lockdep_set_class_and_name(&sk->sk_callback_lock,
+ af_callback_keys + sk->sk_family,
+ af_family_clock_key_strings[sk->sk_family]);
+
+ sk->sk_state_change = sock_def_wakeup;
+ sk->sk_data_ready = sock_def_readable;
+ sk->sk_write_space = sock_def_write_space;
+ sk->sk_error_report = sock_def_error_report;
+ sk->sk_destruct = sock_def_destruct;
+
+ sk->sk_sndmsg_page = NULL;
+ sk->sk_sndmsg_off = 0;
+
+ sk->sk_peercred.pid = 0;
+ sk->sk_peercred.uid = -1;
+ sk->sk_peercred.gid = -1;
+ sk->sk_write_pending = 0;
+ sk->sk_rcvlowat = 1;
+ sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
+ sk->sk_sndtimeo = MAX_SCHEDULE_TIMEOUT;
+
+ sk->sk_stamp = ktime_set(-1L, 0);
+
+ atomic_set(&sk->sk_refcnt, 1);
+ atomic_set(&sk->sk_drops, 0);
+}
+
+void lock_sock_nested(struct sock *sk, int subclass)
+{
+ might_sleep();
+ spin_lock_bh(&sk->sk_lock.slock);
+ if (sk->sk_lock.owned)
+ __lock_sock(sk);
+ sk->sk_lock.owned = 1;
+ spin_unlock(&sk->sk_lock.slock);
+ /*
+ * The sk_lock has mutex_lock() semantics here:
+ */
+ mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_);
+ local_bh_enable();
+}
+
+EXPORT_SYMBOL(lock_sock_nested);
+
+void release_sock(struct sock *sk)
+{
+ /*
+ * The sk_lock has mutex_unlock() semantics:
+ */
+ mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
+
+ spin_lock_bh(&sk->sk_lock.slock);
+ if (sk->sk_backlog.tail)
+ __release_sock(sk);
+ sk->sk_lock.owned = 0;
+ if (waitqueue_active(&sk->sk_lock.wq))
+ wake_up(&sk->sk_lock.wq);
+ spin_unlock_bh(&sk->sk_lock.slock);
+}
+EXPORT_SYMBOL(release_sock);
+
+int sock_get_timestamp(struct sock *sk, struct timeval __user *userstamp)
+{
+ struct timeval tv;
+ if (!sock_flag(sk, SOCK_TIMESTAMP))
+ sock_enable_timestamp(sk);
+ tv = ktime_to_timeval(sk->sk_stamp);
+ if (tv.tv_sec == -1)
+ return -ENOENT;
+ if (tv.tv_sec == 0) {
+ sk->sk_stamp = ktime_get_real();
+ tv = ktime_to_timeval(sk->sk_stamp);
+ }
+ return copy_to_user(userstamp, &tv, sizeof(tv)) ? -EFAULT : 0;
+}
+EXPORT_SYMBOL(sock_get_timestamp);
+
+int sock_get_timestampns(struct sock *sk, struct timespec __user *userstamp)
+{
+ struct timespec ts;
+ if (!sock_flag(sk, SOCK_TIMESTAMP))
+ sock_enable_timestamp(sk);
+ ts = ktime_to_timespec(sk->sk_stamp);
+ if (ts.tv_sec == -1)
+ return -ENOENT;
+ if (ts.tv_sec == 0) {
+ sk->sk_stamp = ktime_get_real();
+ ts = ktime_to_timespec(sk->sk_stamp);
+ }
+ return copy_to_user(userstamp, &ts, sizeof(ts)) ? -EFAULT : 0;
+}
+EXPORT_SYMBOL(sock_get_timestampns);
+
+void sock_enable_timestamp(struct sock *sk)
+{
+ if (!sock_flag(sk, SOCK_TIMESTAMP)) {
+ sock_set_flag(sk, SOCK_TIMESTAMP);
+ net_enable_timestamp();
+ }
+}
+
+/*
+ * Get a socket option on an socket.
+ *
+ * FIX: POSIX 1003.1g is very ambiguous here. It states that
+ * asynchronous errors should be reported by getsockopt. We assume
+ * this means if you specify SO_ERROR (otherwise whats the point of it).
+ */
+int sock_common_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+
+ return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
+}
+
+EXPORT_SYMBOL(sock_common_getsockopt);
+
+#ifdef CONFIG_COMPAT
+int compat_sock_common_getsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct sock *sk = sock->sk;
+
+ if (sk->sk_prot->compat_getsockopt != NULL)
+ return sk->sk_prot->compat_getsockopt(sk, level, optname,
+ optval, optlen);
+ return sk->sk_prot->getsockopt(sk, level, optname, optval, optlen);
+}
+EXPORT_SYMBOL(compat_sock_common_getsockopt);
+#endif
+
+int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t size, int flags)
+{
+ struct sock *sk = sock->sk;
+ int addr_len = 0;
+ int err;
+
+ err = sk->sk_prot->recvmsg(iocb, sk, msg, size, flags & MSG_DONTWAIT,
+ flags & ~MSG_DONTWAIT, &addr_len);
+ if (err >= 0)
+ msg->msg_namelen = addr_len;
+ return err;
+}
+
+EXPORT_SYMBOL(sock_common_recvmsg);
+
+/*
+ * Set socket options on an inet socket.
+ */
+int sock_common_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int optlen)
+{
+ struct sock *sk = sock->sk;
+
+ return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
+}
+
+EXPORT_SYMBOL(sock_common_setsockopt);
+
+#ifdef CONFIG_COMPAT
+int compat_sock_common_setsockopt(struct socket *sock, int level, int optname,
+ char __user *optval, int optlen)
+{
+ struct sock *sk = sock->sk;
+
+ if (sk->sk_prot->compat_setsockopt != NULL)
+ return sk->sk_prot->compat_setsockopt(sk, level, optname,
+ optval, optlen);
+ return sk->sk_prot->setsockopt(sk, level, optname, optval, optlen);
+}
+EXPORT_SYMBOL(compat_sock_common_setsockopt);
+#endif
+
+void sk_common_release(struct sock *sk)
+{
+ if (sk->sk_prot->destroy)
+ sk->sk_prot->destroy(sk);
+
+ /*
+ * Observation: when sock_common_release is called, processes have
+ * no access to socket. But net still has.
+ * Step one, detach it from networking:
+ *
+ * A. Remove from hash tables.
+ */
+
+ sk->sk_prot->unhash(sk);
+
+ /*
+ * In this point socket cannot receive new packets, but it is possible
+ * that some packets are in flight because some CPU runs receiver and
+ * did hash table lookup before we unhashed socket. They will achieve
+ * receive queue and will be purged by socket destructor.
+ *
+ * Also we still have packets pending on receive queue and probably,
+ * our own packets waiting in device queues. sock_destroy will drain
+ * receive queue, but transmitted packets will delay socket destruction
+ * until the last reference will be released.
+ */
+
+ sock_orphan(sk);
+
+ xfrm_sk_free_policy(sk);
+
+ sk_refcnt_debug_release(sk);
+ sock_put(sk);
+}
+
+EXPORT_SYMBOL(sk_common_release);
+
+static DEFINE_RWLOCK(proto_list_lock);
+static LIST_HEAD(proto_list);
+
+#ifdef CONFIG_PROC_FS
+#define PROTO_INUSE_NR 64 /* should be enough for the first time */
+struct prot_inuse {
+ int val[PROTO_INUSE_NR];
+};
+
+static DECLARE_BITMAP(proto_inuse_idx, PROTO_INUSE_NR);
+
+#ifdef CONFIG_NET_NS
+void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
+{
+ int cpu = smp_processor_id();
+ per_cpu_ptr(net->core.inuse, cpu)->val[prot->inuse_idx] += val;
+}
+EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
+
+int sock_prot_inuse_get(struct net *net, struct proto *prot)
+{
+ int cpu, idx = prot->inuse_idx;
+ int res = 0;
+
+ for_each_possible_cpu(cpu)
+ res += per_cpu_ptr(net->core.inuse, cpu)->val[idx];
+
+ return res >= 0 ? res : 0;
+}
+EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
+
+static int sock_inuse_init_net(struct net *net)
+{
+ net->core.inuse = alloc_percpu(struct prot_inuse);
+ return net->core.inuse ? 0 : -ENOMEM;
+}
+
+static void sock_inuse_exit_net(struct net *net)
+{
+ free_percpu(net->core.inuse);
+}
+
+static struct pernet_operations net_inuse_ops = {
+ .init = sock_inuse_init_net,
+ .exit = sock_inuse_exit_net,
+};
+
+static __init int net_inuse_init(void)
+{
+ if (register_pernet_subsys(&net_inuse_ops))
+ panic("Cannot initialize net inuse counters");
+
+ return 0;
+}
+
+core_initcall(net_inuse_init);
+#else
+static DEFINE_PER_CPU(struct prot_inuse, prot_inuse);
+
+void sock_prot_inuse_add(struct net *net, struct proto *prot, int val)
+{
+ __get_cpu_var(prot_inuse).val[prot->inuse_idx] += val;
+}
+EXPORT_SYMBOL_GPL(sock_prot_inuse_add);
+
+int sock_prot_inuse_get(struct net *net, struct proto *prot)
+{
+ int cpu, idx = prot->inuse_idx;
+ int res = 0;
+
+ for_each_possible_cpu(cpu)
+ res += per_cpu(prot_inuse, cpu).val[idx];
+
+ return res >= 0 ? res : 0;
+}
+EXPORT_SYMBOL_GPL(sock_prot_inuse_get);
+#endif
+
+static void assign_proto_idx(struct proto *prot)
+{
+ prot->inuse_idx = find_first_zero_bit(proto_inuse_idx, PROTO_INUSE_NR);
+
+ if (unlikely(prot->inuse_idx == PROTO_INUSE_NR - 1)) {
+ printk(KERN_ERR "PROTO_INUSE_NR exhausted\n");
+ return;
+ }
+
+ set_bit(prot->inuse_idx, proto_inuse_idx);
+}
+
+static void release_proto_idx(struct proto *prot)
+{
+ if (prot->inuse_idx != PROTO_INUSE_NR - 1)
+ clear_bit(prot->inuse_idx, proto_inuse_idx);
+}
+#else
+static inline void assign_proto_idx(struct proto *prot)
+{
+}
+
+static inline void release_proto_idx(struct proto *prot)
+{
+}
+#endif
+
+int proto_register(struct proto *prot, int alloc_slab)
+{
+ if (alloc_slab) {
+ prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0,
+ SLAB_HWCACHE_ALIGN | prot->slab_flags,
+ NULL);
+
+ if (prot->slab == NULL) {
+ printk(KERN_CRIT "%s: Can't create sock SLAB cache!\n",
+ prot->name);
+ goto out;
+ }
+
+ if (prot->rsk_prot != NULL) {
+ static const char mask[] = "request_sock_%s";
+
+ prot->rsk_prot->slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
+ if (prot->rsk_prot->slab_name == NULL)
+ goto out_free_sock_slab;
+
+ sprintf(prot->rsk_prot->slab_name, mask, prot->name);
+ prot->rsk_prot->slab = kmem_cache_create(prot->rsk_prot->slab_name,
+ prot->rsk_prot->obj_size, 0,
+ SLAB_HWCACHE_ALIGN, NULL);
+
+ if (prot->rsk_prot->slab == NULL) {
+ printk(KERN_CRIT "%s: Can't create request sock SLAB cache!\n",
+ prot->name);
+ goto out_free_request_sock_slab_name;
+ }
+ }
+
+ if (prot->twsk_prot != NULL) {
+ static const char mask[] = "tw_sock_%s";
+
+ prot->twsk_prot->twsk_slab_name = kmalloc(strlen(prot->name) + sizeof(mask) - 1, GFP_KERNEL);
+
+ if (prot->twsk_prot->twsk_slab_name == NULL)
+ goto out_free_request_sock_slab;
+
+ sprintf(prot->twsk_prot->twsk_slab_name, mask, prot->name);
+ prot->twsk_prot->twsk_slab =
+ kmem_cache_create(prot->twsk_prot->twsk_slab_name,
+ prot->twsk_prot->twsk_obj_size,
+ 0,
+ SLAB_HWCACHE_ALIGN |
+ prot->slab_flags,
+ NULL);
+ if (prot->twsk_prot->twsk_slab == NULL)
+ goto out_free_timewait_sock_slab_name;
+ }
+ }
+
+ write_lock(&proto_list_lock);
+ list_add(&prot->node, &proto_list);
+ assign_proto_idx(prot);
+ write_unlock(&proto_list_lock);
+ return 0;
+
+out_free_timewait_sock_slab_name:
+ kfree(prot->twsk_prot->twsk_slab_name);
+out_free_request_sock_slab:
+ if (prot->rsk_prot && prot->rsk_prot->slab) {
+ kmem_cache_destroy(prot->rsk_prot->slab);
+ prot->rsk_prot->slab = NULL;
+ }
+out_free_request_sock_slab_name:
+ kfree(prot->rsk_prot->slab_name);
+out_free_sock_slab:
+ kmem_cache_destroy(prot->slab);
+ prot->slab = NULL;
+out:
+ return -ENOBUFS;
+}
+
+EXPORT_SYMBOL(proto_register);
+
+void proto_unregister(struct proto *prot)
+{
+ write_lock(&proto_list_lock);
+ release_proto_idx(prot);
+ list_del(&prot->node);
+ write_unlock(&proto_list_lock);
+
+ if (prot->slab != NULL) {
+ kmem_cache_destroy(prot->slab);
+ prot->slab = NULL;
+ }
+
+ if (prot->rsk_prot != NULL && prot->rsk_prot->slab != NULL) {
+ kmem_cache_destroy(prot->rsk_prot->slab);
+ kfree(prot->rsk_prot->slab_name);
+ prot->rsk_prot->slab = NULL;
+ }
+
+ if (prot->twsk_prot != NULL && prot->twsk_prot->twsk_slab != NULL) {
+ kmem_cache_destroy(prot->twsk_prot->twsk_slab);
+ kfree(prot->twsk_prot->twsk_slab_name);
+ prot->twsk_prot->twsk_slab = NULL;
+ }
+}
+
+EXPORT_SYMBOL(proto_unregister);
+
+#ifdef CONFIG_PROC_FS
+static void *proto_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(proto_list_lock)
+{
+ read_lock(&proto_list_lock);
+ return seq_list_start_head(&proto_list, *pos);
+}
+
+static void *proto_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ return seq_list_next(v, &proto_list, pos);
+}
+
+static void proto_seq_stop(struct seq_file *seq, void *v)
+ __releases(proto_list_lock)
+{
+ read_unlock(&proto_list_lock);
+}
+
+static char proto_method_implemented(const void *method)
+{
+ return method == NULL ? 'n' : 'y';
+}
+
+static void proto_seq_printf(struct seq_file *seq, struct proto *proto)
+{
+ seq_printf(seq, "%-9s %4u %6d %6d %-3s %6u %-3s %-10s "
+ "%2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c %2c\n",
+ proto->name,
+ proto->obj_size,
+ sock_prot_inuse_get(seq_file_net(seq), proto),
+ proto->memory_allocated != NULL ? atomic_read(proto->memory_allocated) : -1,
+ proto->memory_pressure != NULL ? *proto->memory_pressure ? "yes" : "no" : "NI",
+ proto->max_header,
+ proto->slab == NULL ? "no" : "yes",
+ module_name(proto->owner),
+ proto_method_implemented(proto->close),
+ proto_method_implemented(proto->connect),
+ proto_method_implemented(proto->disconnect),
+ proto_method_implemented(proto->accept),
+ proto_method_implemented(proto->ioctl),
+ proto_method_implemented(proto->init),
+ proto_method_implemented(proto->destroy),
+ proto_method_implemented(proto->shutdown),
+ proto_method_implemented(proto->setsockopt),
+ proto_method_implemented(proto->getsockopt),
+ proto_method_implemented(proto->sendmsg),
+ proto_method_implemented(proto->recvmsg),
+ proto_method_implemented(proto->sendpage),
+ proto_method_implemented(proto->bind),
+ proto_method_implemented(proto->backlog_rcv),
+ proto_method_implemented(proto->hash),
+ proto_method_implemented(proto->unhash),
+ proto_method_implemented(proto->get_port),
+ proto_method_implemented(proto->enter_memory_pressure));
+}
+
+static int proto_seq_show(struct seq_file *seq, void *v)
+{
+ if (v == &proto_list)
+ seq_printf(seq, "%-9s %-4s %-8s %-6s %-5s %-7s %-4s %-10s %s",
+ "protocol",
+ "size",
+ "sockets",
+ "memory",
+ "press",
+ "maxhdr",
+ "slab",
+ "module",
+ "cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n");
+ else
+ proto_seq_printf(seq, list_entry(v, struct proto, node));
+ return 0;
+}
+
+static const struct seq_operations proto_seq_ops = {
+ .start = proto_seq_start,
+ .next = proto_seq_next,
+ .stop = proto_seq_stop,
+ .show = proto_seq_show,
+};
+
+static int proto_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open_net(inode, file, &proto_seq_ops,
+ sizeof(struct seq_net_private));
+}
+
+static const struct file_operations proto_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = proto_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_net,
+};
+
+static __net_init int proto_init_net(struct net *net)
+{
+ if (!proc_net_fops_create(net, "protocols", S_IRUGO, &proto_seq_fops))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static __net_exit void proto_exit_net(struct net *net)
+{
+ proc_net_remove(net, "protocols");
+}
+
+
+static __net_initdata struct pernet_operations proto_net_ops = {
+ .init = proto_init_net,
+ .exit = proto_exit_net,
+};
+
+static int __init proto_init(void)
+{
+ return register_pernet_subsys(&proto_net_ops);
+}
+
+subsys_initcall(proto_init);
+
+#endif /* PROC_FS */
+
+EXPORT_SYMBOL(sk_alloc);
+EXPORT_SYMBOL(sk_free);
+EXPORT_SYMBOL(sk_send_sigurg);
+EXPORT_SYMBOL(sock_alloc_send_skb);
+EXPORT_SYMBOL(sock_init_data);
+EXPORT_SYMBOL(sock_kfree_s);
+EXPORT_SYMBOL(sock_kmalloc);
+EXPORT_SYMBOL(sock_no_accept);
+EXPORT_SYMBOL(sock_no_bind);
+EXPORT_SYMBOL(sock_no_connect);
+EXPORT_SYMBOL(sock_no_getname);
+EXPORT_SYMBOL(sock_no_getsockopt);
+EXPORT_SYMBOL(sock_no_ioctl);
+EXPORT_SYMBOL(sock_no_listen);
+EXPORT_SYMBOL(sock_no_mmap);
+EXPORT_SYMBOL(sock_no_poll);
+EXPORT_SYMBOL(sock_no_recvmsg);
+EXPORT_SYMBOL(sock_no_sendmsg);
+EXPORT_SYMBOL(sock_no_sendpage);
+EXPORT_SYMBOL(sock_no_setsockopt);
+EXPORT_SYMBOL(sock_no_shutdown);
+EXPORT_SYMBOL(sock_no_socketpair);
+EXPORT_SYMBOL(sock_rfree);
+EXPORT_SYMBOL(sock_setsockopt);
+EXPORT_SYMBOL(sock_wfree);
+EXPORT_SYMBOL(sock_wmalloc);
+EXPORT_SYMBOL(sock_i_uid);
+EXPORT_SYMBOL(sock_i_ino);
+EXPORT_SYMBOL(sysctl_optmem_max);
diff --git a/libdde_linux26/contrib/net/ethernet/.svn/all-wcprops b/libdde_linux26/contrib/net/ethernet/.svn/all-wcprops
new file mode 100644
index 00000000..927d242c
--- /dev/null
+++ b/libdde_linux26/contrib/net/ethernet/.svn/all-wcprops
@@ -0,0 +1,11 @@
+K 25
+svn:wc:ra_dav:version-url
+V 71
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/contrib/net/ethernet
+END
+eth.c
+K 25
+svn:wc:ra_dav:version-url
+V 77
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/contrib/net/ethernet/eth.c
+END
diff --git a/libdde_linux26/contrib/net/ethernet/.svn/entries b/libdde_linux26/contrib/net/ethernet/.svn/entries
new file mode 100644
index 00000000..1a83a365
--- /dev/null
+++ b/libdde_linux26/contrib/net/ethernet/.svn/entries
@@ -0,0 +1,62 @@
+9
+
+dir
+465
+http://svn.tudos.org/repos/tudos/trunk/l4/pkg/dde/linux26/contrib/net/ethernet
+http://svn.tudos.org/repos/tudos
+
+
+
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+svn:special svn:externals svn:needs-lock
+
+
+
+
+
+
+
+
+
+
+
+a704ac0b-3a55-4d43-a2a9-7be6f07c34fb
+
+eth.c
+file
+
+
+
+
+2009-11-15T17:16:35.000000Z
+9e32e52c590d3469dc0e13f209149807
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+11140
+
diff --git a/libdde_linux26/contrib/net/ethernet/.svn/format b/libdde_linux26/contrib/net/ethernet/.svn/format
new file mode 100644
index 00000000..ec635144
--- /dev/null
+++ b/libdde_linux26/contrib/net/ethernet/.svn/format
@@ -0,0 +1 @@
+9
diff --git a/libdde_linux26/contrib/net/ethernet/.svn/text-base/eth.c.svn-base b/libdde_linux26/contrib/net/ethernet/.svn/text-base/eth.c.svn-base
new file mode 100644
index 00000000..280352ab
--- /dev/null
+++ b/libdde_linux26/contrib/net/ethernet/.svn/text-base/eth.c.svn-base
@@ -0,0 +1,407 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Ethernet-type device handling.
+ *
+ * Version: @(#)eth.c 1.0.7 05/25/93
+ *
+ * Authors: Ross Biro
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Mark Evans, <evansmp@uhura.aston.ac.uk>
+ * Florian La Roche, <rzsfl@rz.uni-sb.de>
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ *
+ * Fixes:
+ * Mr Linux : Arp problems
+ * Alan Cox : Generic queue tidyup (very tiny here)
+ * Alan Cox : eth_header ntohs should be htons
+ * Alan Cox : eth_rebuild_header missing an htons and
+ * minor other things.
+ * Tegge : Arp bug fixes.
+ * Florian : Removed many unnecessary functions, code cleanup
+ * and changes for new arp and skbuff.
+ * Alan Cox : Redid header building to reflect new format.
+ * Alan Cox : ARP only when compiled with CONFIG_INET
+ * Greg Page : 802.2 and SNAP stuff.
+ * Alan Cox : MAC layer pointers/new format.
+ * Paul Gortmaker : eth_copy_and_sum shouldn't csum padding.
+ * Alan Cox : Protect against forwarding explosions with
+ * older network drivers and IFF_ALLMULTI.
+ * Christer Weinigel : Better rebuild header message.
+ * Andrew Morton : 26Feb01: kill ether_setup() - use netdev_boot_setup().
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/ip.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/if_ether.h>
+#include <net/dst.h>
+#include <net/arp.h>
+#include <net/sock.h>
+#include <net/ipv6.h>
+#include <net/ip.h>
+#include <net/dsa.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+__setup("ether=", netdev_boot_setup);
+
+/**
+ * eth_header - create the Ethernet header
+ * @skb: buffer to alter
+ * @dev: source device
+ * @type: Ethernet type field
+ * @daddr: destination address (NULL leave destination address)
+ * @saddr: source address (NULL use device source address)
+ * @len: packet length (<= skb->len)
+ *
+ *
+ * Set the protocol type. For a packet of type ETH_P_802_3 we put the length
+ * in here instead. It is up to the 802.2 layer to carry protocol information.
+ */
+int eth_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type,
+ const void *daddr, const void *saddr, unsigned len)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
+
+ if (type != ETH_P_802_3)
+ eth->h_proto = htons(type);
+ else
+ eth->h_proto = htons(len);
+
+ /*
+ * Set the source hardware address.
+ */
+
+ if (!saddr)
+ saddr = dev->dev_addr;
+ memcpy(eth->h_source, saddr, ETH_ALEN);
+
+ if (daddr) {
+ memcpy(eth->h_dest, daddr, ETH_ALEN);
+ return ETH_HLEN;
+ }
+
+ /*
+ * Anyway, the loopback-device should never use this function...
+ */
+
+ if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
+ memset(eth->h_dest, 0, ETH_ALEN);
+ return ETH_HLEN;
+ }
+
+ return -ETH_HLEN;
+}
+EXPORT_SYMBOL(eth_header);
+
+/**
+ * eth_rebuild_header- rebuild the Ethernet MAC header.
+ * @skb: socket buffer to update
+ *
+ * This is called after an ARP or IPV6 ndisc it's resolution on this
+ * sk_buff. We now let protocol (ARP) fill in the other fields.
+ *
+ * This routine CANNOT use cached dst->neigh!
+ * Really, it is used only when dst->neigh is wrong.
+ */
+int eth_rebuild_header(struct sk_buff *skb)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ struct net_device *dev = skb->dev;
+
+ switch (eth->h_proto) {
+#ifdef CONFIG_INET
+ case htons(ETH_P_IP):
+ return arp_find(eth->h_dest, skb);
+#endif
+ default:
+ printk(KERN_DEBUG
+ "%s: unable to resolve type %X addresses.\n",
+ dev->name, (int)eth->h_proto);
+
+ memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(eth_rebuild_header);
+
+/**
+ * eth_type_trans - determine the packet's protocol ID.
+ * @skb: received socket data
+ * @dev: receiving network device
+ *
+ * The rule here is that we
+ * assume 802.3 if the type field is short enough to be a length.
+ * This is normal practice and works for any 'now in use' protocol.
+ */
+__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ethhdr *eth;
+ unsigned char *rawp;
+
+ skb->dev = dev;
+ skb_reset_mac_header(skb);
+ skb_pull(skb, ETH_HLEN);
+ eth = eth_hdr(skb);
+
+ if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
+ if (!compare_ether_addr_64bits(eth->h_dest, dev->broadcast))
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+ }
+
+ /*
+ * This ALLMULTI check should be redundant by 1.4
+ * so don't forget to remove it.
+ *
+ * Seems, you forgot to remove it. All silly devices
+ * seems to set IFF_PROMISC.
+ */
+
+ else if (1 /*dev->flags&IFF_PROMISC */ ) {
+ if (unlikely(compare_ether_addr_64bits(eth->h_dest, dev->dev_addr)))
+ skb->pkt_type = PACKET_OTHERHOST;
+ }
+
+ /*
+ * Some variants of DSA tagging don't have an ethertype field
+ * at all, so we check here whether one of those tagging
+ * variants has been configured on the receiving interface,
+ * and if so, set skb->protocol without looking at the packet.
+ */
+ if (netdev_uses_dsa_tags(dev))
+ return htons(ETH_P_DSA);
+ if (netdev_uses_trailer_tags(dev))
+ return htons(ETH_P_TRAILER);
+
+ if (ntohs(eth->h_proto) >= 1536)
+ return eth->h_proto;
+
+ rawp = skb->data;
+
+ /*
+ * This is a magic hack to spot IPX packets. Older Novell breaks
+ * the protocol design and runs IPX over 802.3 without an 802.2 LLC
+ * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
+ * won't work for fault tolerant netware but does for the rest.
+ */
+ if (*(unsigned short *)rawp == 0xFFFF)
+ return htons(ETH_P_802_3);
+
+ /*
+ * Real 802.2 LLC
+ */
+ return htons(ETH_P_802_2);
+}
+EXPORT_SYMBOL(eth_type_trans);
+
+/**
+ * eth_header_parse - extract hardware address from packet
+ * @skb: packet to extract header from
+ * @haddr: destination buffer
+ */
+int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr)
+{
+ const struct ethhdr *eth = eth_hdr(skb);
+ memcpy(haddr, eth->h_source, ETH_ALEN);
+ return ETH_ALEN;
+}
+EXPORT_SYMBOL(eth_header_parse);
+
+/**
+ * eth_header_cache - fill cache entry from neighbour
+ * @neigh: source neighbour
+ * @hh: destination cache entry
+ * Create an Ethernet header template from the neighbour.
+ */
+int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh)
+{
+ __be16 type = hh->hh_type;
+ struct ethhdr *eth;
+ const struct net_device *dev = neigh->dev;
+
+ eth = (struct ethhdr *)
+ (((u8 *) hh->hh_data) + (HH_DATA_OFF(sizeof(*eth))));
+
+ if (type == htons(ETH_P_802_3))
+ return -1;
+
+ eth->h_proto = type;
+ memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
+ memcpy(eth->h_dest, neigh->ha, ETH_ALEN);
+ hh->hh_len = ETH_HLEN;
+ return 0;
+}
+EXPORT_SYMBOL(eth_header_cache);
+
+/**
+ * eth_header_cache_update - update cache entry
+ * @hh: destination cache entry
+ * @dev: network device
+ * @haddr: new hardware address
+ *
+ * Called by Address Resolution module to notify changes in address.
+ */
+void eth_header_cache_update(struct hh_cache *hh,
+ const struct net_device *dev,
+ const unsigned char *haddr)
+{
+ memcpy(((u8 *) hh->hh_data) + HH_DATA_OFF(sizeof(struct ethhdr)),
+ haddr, ETH_ALEN);
+}
+EXPORT_SYMBOL(eth_header_cache_update);
+
+/**
+ * eth_mac_addr - set new Ethernet hardware address
+ * @dev: network device
+ * @p: socket address
+ * Change hardware address of device.
+ *
+ * This doesn't change hardware matching, so needs to be overridden
+ * for most real devices.
+ */
+int eth_mac_addr(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ if (netif_running(dev))
+ return -EBUSY;
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ return 0;
+}
+EXPORT_SYMBOL(eth_mac_addr);
+
+/**
+ * eth_change_mtu - set new MTU size
+ * @dev: network device
+ * @new_mtu: new Maximum Transfer Unit
+ *
+ * Allow changing MTU size. Needs to be overridden for devices
+ * supporting jumbo frames.
+ */
+int eth_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (new_mtu < 68 || new_mtu > ETH_DATA_LEN)
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+EXPORT_SYMBOL(eth_change_mtu);
+
+int eth_validate_addr(struct net_device *dev)
+{
+ if (!is_valid_ether_addr(dev->dev_addr))
+ return -EADDRNOTAVAIL;
+
+ return 0;
+}
+EXPORT_SYMBOL(eth_validate_addr);
+
+const struct header_ops eth_header_ops ____cacheline_aligned = {
+ .create = eth_header,
+ .parse = eth_header_parse,
+ .rebuild = eth_rebuild_header,
+ .cache = eth_header_cache,
+ .cache_update = eth_header_cache_update,
+};
+
+/**
+ * ether_setup - setup Ethernet network device
+ * @dev: network device
+ * Fill in the fields of the device structure with Ethernet-generic values.
+ */
+void ether_setup(struct net_device *dev)
+{
+ dev->header_ops = &eth_header_ops;
+#ifdef CONFIG_COMPAT_NET_DEV_OPS
+ dev->change_mtu = eth_change_mtu;
+ dev->set_mac_address = eth_mac_addr;
+ dev->validate_addr = eth_validate_addr;
+#endif
+ dev->type = ARPHRD_ETHER;
+ dev->hard_header_len = ETH_HLEN;
+ dev->mtu = ETH_DATA_LEN;
+ dev->addr_len = ETH_ALEN;
+ dev->tx_queue_len = 1000; /* Ethernet wants good queues */
+ dev->flags = IFF_BROADCAST|IFF_MULTICAST;
+
+ memset(dev->broadcast, 0xFF, ETH_ALEN);
+
+}
+EXPORT_SYMBOL(ether_setup);
+
+/**
+ * alloc_etherdev_mq - Allocates and sets up an Ethernet device
+ * @sizeof_priv: Size of additional driver-private structure to be allocated
+ * for this Ethernet device
+ * @queue_count: The number of queues this device has.
+ *
+ * Fill in the fields of the device structure with Ethernet-generic
+ * values. Basically does everything except registering the device.
+ *
+ * Constructs a new net device, complete with a private data area of
+ * size (sizeof_priv). A 32-byte (not bit) alignment is enforced for
+ * this private data area.
+ */
+
+struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count)
+{
+ return alloc_netdev_mq(sizeof_priv, "eth%d", ether_setup, queue_count);
+}
+EXPORT_SYMBOL(alloc_etherdev_mq);
+
+static size_t _format_mac_addr(char *buf, int buflen,
+ const unsigned char *addr, int len)
+{
+ int i;
+ char *cp = buf;
+
+ for (i = 0; i < len; i++) {
+ cp += scnprintf(cp, buflen - (cp - buf), "%02x", addr[i]);
+ if (i == len - 1)
+ break;
+ cp += strlcpy(cp, ":", buflen - (cp - buf));
+ }
+ return cp - buf;
+}
+
+ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
+{
+ size_t l;
+
+ l = _format_mac_addr(buf, PAGE_SIZE, addr, len);
+ l += strlcpy(buf + l, "\n", PAGE_SIZE - l);
+ return ((ssize_t) l);
+}
+EXPORT_SYMBOL(sysfs_format_mac);
+
+char *print_mac(char *buf, const unsigned char *addr)
+{
+ _format_mac_addr(buf, MAC_BUF_SIZE, addr, ETH_ALEN);
+ return buf;
+}
+EXPORT_SYMBOL(print_mac);
diff --git a/libdde_linux26/contrib/net/ethernet/eth.c b/libdde_linux26/contrib/net/ethernet/eth.c
new file mode 100644
index 00000000..280352ab
--- /dev/null
+++ b/libdde_linux26/contrib/net/ethernet/eth.c
@@ -0,0 +1,407 @@
+/*
+ * INET An implementation of the TCP/IP protocol suite for the LINUX
+ * operating system. INET is implemented using the BSD Socket
+ * interface as the means of communication with the user level.
+ *
+ * Ethernet-type device handling.
+ *
+ * Version: @(#)eth.c 1.0.7 05/25/93
+ *
+ * Authors: Ross Biro
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ * Mark Evans, <evansmp@uhura.aston.ac.uk>
+ * Florian La Roche, <rzsfl@rz.uni-sb.de>
+ * Alan Cox, <gw4pts@gw4pts.ampr.org>
+ *
+ * Fixes:
+ * Mr Linux : Arp problems
+ * Alan Cox : Generic queue tidyup (very tiny here)
+ * Alan Cox : eth_header ntohs should be htons
+ * Alan Cox : eth_rebuild_header missing an htons and
+ * minor other things.
+ * Tegge : Arp bug fixes.
+ * Florian : Removed many unnecessary functions, code cleanup
+ * and changes for new arp and skbuff.
+ * Alan Cox : Redid header building to reflect new format.
+ * Alan Cox : ARP only when compiled with CONFIG_INET
+ * Greg Page : 802.2 and SNAP stuff.
+ * Alan Cox : MAC layer pointers/new format.
+ * Paul Gortmaker : eth_copy_and_sum shouldn't csum padding.
+ * Alan Cox : Protect against forwarding explosions with
+ * older network drivers and IFF_ALLMULTI.
+ * Christer Weinigel : Better rebuild header message.
+ * Andrew Morton : 26Feb01: kill ether_setup() - use netdev_boot_setup().
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/inet.h>
+#include <linux/ip.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/if_ether.h>
+#include <net/dst.h>
+#include <net/arp.h>
+#include <net/sock.h>
+#include <net/ipv6.h>
+#include <net/ip.h>
+#include <net/dsa.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+
+__setup("ether=", netdev_boot_setup);
+
+/**
+ * eth_header - create the Ethernet header
+ * @skb: buffer to alter
+ * @dev: source device
+ * @type: Ethernet type field
+ * @daddr: destination address (NULL leave destination address)
+ * @saddr: source address (NULL use device source address)
+ * @len: packet length (<= skb->len)
+ *
+ *
+ * Set the protocol type. For a packet of type ETH_P_802_3 we put the length
+ * in here instead. It is up to the 802.2 layer to carry protocol information.
+ */
+int eth_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type,
+ const void *daddr, const void *saddr, unsigned len)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
+
+ if (type != ETH_P_802_3)
+ eth->h_proto = htons(type);
+ else
+ eth->h_proto = htons(len);
+
+ /*
+ * Set the source hardware address.
+ */
+
+ if (!saddr)
+ saddr = dev->dev_addr;
+ memcpy(eth->h_source, saddr, ETH_ALEN);
+
+ if (daddr) {
+ memcpy(eth->h_dest, daddr, ETH_ALEN);
+ return ETH_HLEN;
+ }
+
+ /*
+ * Anyway, the loopback-device should never use this function...
+ */
+
+ if (dev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
+ memset(eth->h_dest, 0, ETH_ALEN);
+ return ETH_HLEN;
+ }
+
+ return -ETH_HLEN;
+}
+EXPORT_SYMBOL(eth_header);
+
+/**
+ * eth_rebuild_header- rebuild the Ethernet MAC header.
+ * @skb: socket buffer to update
+ *
+ * This is called after an ARP or IPV6 ndisc it's resolution on this
+ * sk_buff. We now let protocol (ARP) fill in the other fields.
+ *
+ * This routine CANNOT use cached dst->neigh!
+ * Really, it is used only when dst->neigh is wrong.
+ */
+int eth_rebuild_header(struct sk_buff *skb)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ struct net_device *dev = skb->dev;
+
+ switch (eth->h_proto) {
+#ifdef CONFIG_INET
+ case htons(ETH_P_IP):
+ return arp_find(eth->h_dest, skb);
+#endif
+ default:
+ printk(KERN_DEBUG
+ "%s: unable to resolve type %X addresses.\n",
+ dev->name, (int)eth->h_proto);
+
+ memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(eth_rebuild_header);
+
+/**
+ * eth_type_trans - determine the packet's protocol ID.
+ * @skb: received socket data
+ * @dev: receiving network device
+ *
+ * The rule here is that we
+ * assume 802.3 if the type field is short enough to be a length.
+ * This is normal practice and works for any 'now in use' protocol.
+ */
+__be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ethhdr *eth;
+ unsigned char *rawp;
+
+ skb->dev = dev;
+ skb_reset_mac_header(skb);
+ skb_pull(skb, ETH_HLEN);
+ eth = eth_hdr(skb);
+
+ if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
+ if (!compare_ether_addr_64bits(eth->h_dest, dev->broadcast))
+ skb->pkt_type = PACKET_BROADCAST;
+ else
+ skb->pkt_type = PACKET_MULTICAST;
+ }
+
+ /*
+ * This ALLMULTI check should be redundant by 1.4
+ * so don't forget to remove it.
+ *
+ * Seems, you forgot to remove it. All silly devices
+ * seems to set IFF_PROMISC.
+ */
+
+ else if (1 /*dev->flags&IFF_PROMISC */ ) {
+ if (unlikely(compare_ether_addr_64bits(eth->h_dest, dev->dev_addr)))
+ skb->pkt_type = PACKET_OTHERHOST;
+ }
+
+ /*
+ * Some variants of DSA tagging don't have an ethertype field
+ * at all, so we check here whether one of those tagging
+ * variants has been configured on the receiving interface,
+ * and if so, set skb->protocol without looking at the packet.
+ */
+ if (netdev_uses_dsa_tags(dev))
+ return htons(ETH_P_DSA);
+ if (netdev_uses_trailer_tags(dev))
+ return htons(ETH_P_TRAILER);
+
+ if (ntohs(eth->h_proto) >= 1536)
+ return eth->h_proto;
+
+ rawp = skb->data;
+
+ /*
+ * This is a magic hack to spot IPX packets. Older Novell breaks
+ * the protocol design and runs IPX over 802.3 without an 802.2 LLC
+ * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
+ * won't work for fault tolerant netware but does for the rest.
+ */
+ if (*(unsigned short *)rawp == 0xFFFF)
+ return htons(ETH_P_802_3);
+
+ /*
+ * Real 802.2 LLC
+ */
+ return htons(ETH_P_802_2);
+}
+EXPORT_SYMBOL(eth_type_trans);
+
+/**
+ * eth_header_parse - extract hardware address from packet
+ * @skb: packet to extract header from
+ * @haddr: destination buffer
+ */
+int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr)
+{
+ const struct ethhdr *eth = eth_hdr(skb);
+ memcpy(haddr, eth->h_source, ETH_ALEN);
+ return ETH_ALEN;
+}
+EXPORT_SYMBOL(eth_header_parse);
+
+/**
+ * eth_header_cache - fill cache entry from neighbour
+ * @neigh: source neighbour
+ * @hh: destination cache entry
+ * Create an Ethernet header template from the neighbour.
+ */
+int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh)
+{
+ __be16 type = hh->hh_type;
+ struct ethhdr *eth;
+ const struct net_device *dev = neigh->dev;
+
+ eth = (struct ethhdr *)
+ (((u8 *) hh->hh_data) + (HH_DATA_OFF(sizeof(*eth))));
+
+ if (type == htons(ETH_P_802_3))
+ return -1;
+
+ eth->h_proto = type;
+ memcpy(eth->h_source, dev->dev_addr, ETH_ALEN);
+ memcpy(eth->h_dest, neigh->ha, ETH_ALEN);
+ hh->hh_len = ETH_HLEN;
+ return 0;
+}
+EXPORT_SYMBOL(eth_header_cache);
+
+/**
+ * eth_header_cache_update - update cache entry
+ * @hh: destination cache entry
+ * @dev: network device
+ * @haddr: new hardware address
+ *
+ * Called by Address Resolution module to notify changes in address.
+ */
+void eth_header_cache_update(struct hh_cache *hh,
+ const struct net_device *dev,
+ const unsigned char *haddr)
+{
+ memcpy(((u8 *) hh->hh_data) + HH_DATA_OFF(sizeof(struct ethhdr)),
+ haddr, ETH_ALEN);
+}
+EXPORT_SYMBOL(eth_header_cache_update);
+
+/**
+ * eth_mac_addr - set new Ethernet hardware address
+ * @dev: network device
+ * @p: socket address
+ * Change hardware address of device.
+ *
+ * This doesn't change hardware matching, so needs to be overridden
+ * for most real devices.
+ */
+int eth_mac_addr(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ if (netif_running(dev))
+ return -EBUSY;
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+ memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+ return 0;
+}
+EXPORT_SYMBOL(eth_mac_addr);
+
+/**
+ * eth_change_mtu - set new MTU size
+ * @dev: network device
+ * @new_mtu: new Maximum Transfer Unit
+ *
+ * Allow changing MTU size. Needs to be overridden for devices
+ * supporting jumbo frames.
+ */
+int eth_change_mtu(struct net_device *dev, int new_mtu)
+{
+ if (new_mtu < 68 || new_mtu > ETH_DATA_LEN)
+ return -EINVAL;
+ dev->mtu = new_mtu;
+ return 0;
+}
+EXPORT_SYMBOL(eth_change_mtu);
+
+int eth_validate_addr(struct net_device *dev)
+{
+ if (!is_valid_ether_addr(dev->dev_addr))
+ return -EADDRNOTAVAIL;
+
+ return 0;
+}
+EXPORT_SYMBOL(eth_validate_addr);
+
+const struct header_ops eth_header_ops ____cacheline_aligned = {
+ .create = eth_header,
+ .parse = eth_header_parse,
+ .rebuild = eth_rebuild_header,
+ .cache = eth_header_cache,
+ .cache_update = eth_header_cache_update,
+};
+
+/**
+ * ether_setup - setup Ethernet network device
+ * @dev: network device
+ * Fill in the fields of the device structure with Ethernet-generic values.
+ */
+void ether_setup(struct net_device *dev)
+{
+ dev->header_ops = &eth_header_ops;
+#ifdef CONFIG_COMPAT_NET_DEV_OPS
+ dev->change_mtu = eth_change_mtu;
+ dev->set_mac_address = eth_mac_addr;
+ dev->validate_addr = eth_validate_addr;
+#endif
+ dev->type = ARPHRD_ETHER;
+ dev->hard_header_len = ETH_HLEN;
+ dev->mtu = ETH_DATA_LEN;
+ dev->addr_len = ETH_ALEN;
+ dev->tx_queue_len = 1000; /* Ethernet wants good queues */
+ dev->flags = IFF_BROADCAST|IFF_MULTICAST;
+
+ memset(dev->broadcast, 0xFF, ETH_ALEN);
+
+}
+EXPORT_SYMBOL(ether_setup);
+
+/**
+ * alloc_etherdev_mq - Allocates and sets up an Ethernet device
+ * @sizeof_priv: Size of additional driver-private structure to be allocated
+ * for this Ethernet device
+ * @queue_count: The number of queues this device has.
+ *
+ * Fill in the fields of the device structure with Ethernet-generic
+ * values. Basically does everything except registering the device.
+ *
+ * Constructs a new net device, complete with a private data area of
+ * size (sizeof_priv). A 32-byte (not bit) alignment is enforced for
+ * this private data area.
+ */
+
+struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count)
+{
+ return alloc_netdev_mq(sizeof_priv, "eth%d", ether_setup, queue_count);
+}
+EXPORT_SYMBOL(alloc_etherdev_mq);
+
+static size_t _format_mac_addr(char *buf, int buflen,
+ const unsigned char *addr, int len)
+{
+ int i;
+ char *cp = buf;
+
+ for (i = 0; i < len; i++) {
+ cp += scnprintf(cp, buflen - (cp - buf), "%02x", addr[i]);
+ if (i == len - 1)
+ break;
+ cp += strlcpy(cp, ":", buflen - (cp - buf));
+ }
+ return cp - buf;
+}
+
+ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
+{
+ size_t l;
+
+ l = _format_mac_addr(buf, PAGE_SIZE, addr, len);
+ l += strlcpy(buf + l, "\n", PAGE_SIZE - l);
+ return ((ssize_t) l);
+}
+EXPORT_SYMBOL(sysfs_format_mac);
+
+char *print_mac(char *buf, const unsigned char *addr)
+{
+ _format_mac_addr(buf, MAC_BUF_SIZE, addr, ETH_ALEN);
+ return buf;
+}
+EXPORT_SYMBOL(print_mac);
diff --git a/libdde_linux26/contrib/net/netlink/.svn/all-wcprops b/libdde_linux26/contrib/net/netlink/.svn/all-wcprops
new file mode 100644
index 00000000..ce1fb1f4
--- /dev/null
+++ b/libdde_linux26/contrib/net/netlink/.svn/all-wcprops
@@ -0,0 +1,11 @@
+K 25
+svn:wc:ra_dav:version-url
+V 70
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/contrib/net/netlink
+END
+attr.c
+K 25
+svn:wc:ra_dav:version-url
+V 77
+/repos/tudos/!svn/ver/455/trunk/l4/pkg/dde/linux26/contrib/net/netlink/attr.c
+END
diff --git a/libdde_linux26/contrib/net/netlink/.svn/entries b/libdde_linux26/contrib/net/netlink/.svn/entries
new file mode 100644
index 00000000..266f7da2
--- /dev/null
+++ b/libdde_linux26/contrib/net/netlink/.svn/entries
@@ -0,0 +1,62 @@
+9
+
+dir
+465
+http://svn.tudos.org/repos/tudos/trunk/l4/pkg/dde/linux26/contrib/net/netlink
+http://svn.tudos.org/repos/tudos
+
+
+
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+svn:special svn:externals svn:needs-lock
+
+
+
+
+
+
+
+
+
+
+
+a704ac0b-3a55-4d43-a2a9-7be6f07c34fb
+
+attr.c
+file
+
+
+
+
+2009-11-15T17:16:31.000000Z
+947f16bcde5e9f39a59a4ea48b74cc1b
+2009-05-20T14:32:55.606606Z
+455
+l4check
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+11827
+
diff --git a/libdde_linux26/contrib/net/netlink/.svn/format b/libdde_linux26/contrib/net/netlink/.svn/format
new file mode 100644
index 00000000..ec635144
--- /dev/null
+++ b/libdde_linux26/contrib/net/netlink/.svn/format
@@ -0,0 +1 @@
+9
diff --git a/libdde_linux26/contrib/net/netlink/.svn/text-base/attr.c.svn-base b/libdde_linux26/contrib/net/netlink/.svn/text-base/attr.c.svn-base
new file mode 100644
index 00000000..56c3ce7f
--- /dev/null
+++ b/libdde_linux26/contrib/net/netlink/.svn/text-base/attr.c.svn-base
@@ -0,0 +1,473 @@
+/*
+ * NETLINK Netlink attributes
+ *
+ * Authors: Thomas Graf <tgraf@suug.ch>
+ * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <net/netlink.h>
+
+static u16 nla_attr_minlen[NLA_TYPE_MAX+1] __read_mostly = {
+ [NLA_U8] = sizeof(u8),
+ [NLA_U16] = sizeof(u16),
+ [NLA_U32] = sizeof(u32),
+ [NLA_U64] = sizeof(u64),
+ [NLA_NESTED] = NLA_HDRLEN,
+};
+
+static int validate_nla(struct nlattr *nla, int maxtype,
+ const struct nla_policy *policy)
+{
+ const struct nla_policy *pt;
+ int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla);
+
+ if (type <= 0 || type > maxtype)
+ return 0;
+
+ pt = &policy[type];
+
+ BUG_ON(pt->type > NLA_TYPE_MAX);
+
+ switch (pt->type) {
+ case NLA_FLAG:
+ if (attrlen > 0)
+ return -ERANGE;
+ break;
+
+ case NLA_NUL_STRING:
+ if (pt->len)
+ minlen = min_t(int, attrlen, pt->len + 1);
+ else
+ minlen = attrlen;
+
+ if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL)
+ return -EINVAL;
+ /* fall through */
+
+ case NLA_STRING:
+ if (attrlen < 1)
+ return -ERANGE;
+
+ if (pt->len) {
+ char *buf = nla_data(nla);
+
+ if (buf[attrlen - 1] == '\0')
+ attrlen--;
+
+ if (attrlen > pt->len)
+ return -ERANGE;
+ }
+ break;
+
+ case NLA_BINARY:
+ if (pt->len && attrlen > pt->len)
+ return -ERANGE;
+ break;
+
+ case NLA_NESTED_COMPAT:
+ if (attrlen < pt->len)
+ return -ERANGE;
+ if (attrlen < NLA_ALIGN(pt->len))
+ break;
+ if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN)
+ return -ERANGE;
+ nla = nla_data(nla) + NLA_ALIGN(pt->len);
+ if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN + nla_len(nla))
+ return -ERANGE;
+ break;
+ case NLA_NESTED:
+ /* a nested attributes is allowed to be empty; if its not,
+ * it must have a size of at least NLA_HDRLEN.
+ */
+ if (attrlen == 0)
+ break;
+ default:
+ if (pt->len)
+ minlen = pt->len;
+ else if (pt->type != NLA_UNSPEC)
+ minlen = nla_attr_minlen[pt->type];
+
+ if (attrlen < minlen)
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+/**
+ * nla_validate - Validate a stream of attributes
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ *
+ * Validates all attributes in the specified attribute stream against the
+ * specified policy. Attributes with a type exceeding maxtype will be
+ * ignored. See documenation of struct nla_policy for more details.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int nla_validate(struct nlattr *head, int len, int maxtype,
+ const struct nla_policy *policy)
+{
+ struct nlattr *nla;
+ int rem, err;
+
+ nla_for_each_attr(nla, head, len, rem) {
+ err = validate_nla(nla, maxtype, policy);
+ if (err < 0)
+ goto errout;
+ }
+
+ err = 0;
+errout:
+ return err;
+}
+
+/**
+ * nla_parse - Parse a stream of attributes into a tb buffer
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @policy: validation policy
+ *
+ * Parses a stream of attributes and stores a pointer to each attribute in
+ * the tb array accessable via the attribute type. Attributes with a type
+ * exceeding maxtype will be silently ignored for backwards compatibility
+ * reasons. policy may be set to NULL if no validation is required.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len,
+ const struct nla_policy *policy)
+{
+ struct nlattr *nla;
+ int rem, err;
+
+ memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
+
+ nla_for_each_attr(nla, head, len, rem) {
+ u16 type = nla_type(nla);
+
+ if (type > 0 && type <= maxtype) {
+ if (policy) {
+ err = validate_nla(nla, maxtype, policy);
+ if (err < 0)
+ goto errout;
+ }
+
+ tb[type] = nla;
+ }
+ }
+
+ if (unlikely(rem > 0))
+ printk(KERN_WARNING "netlink: %d bytes leftover after parsing "
+ "attributes.\n", rem);
+
+ err = 0;
+errout:
+ return err;
+}
+
+/**
+ * nla_find - Find a specific attribute in a stream of attributes
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @attrtype: type of attribute to look for
+ *
+ * Returns the first attribute in the stream matching the specified type.
+ */
+struct nlattr *nla_find(struct nlattr *head, int len, int attrtype)
+{
+ struct nlattr *nla;
+ int rem;
+
+ nla_for_each_attr(nla, head, len, rem)
+ if (nla_type(nla) == attrtype)
+ return nla;
+
+ return NULL;
+}
+
+/**
+ * nla_strlcpy - Copy string attribute payload into a sized buffer
+ * @dst: where to copy the string to
+ * @nla: attribute to copy the string from
+ * @dstsize: size of destination buffer
+ *
+ * Copies at most dstsize - 1 bytes into the destination buffer.
+ * The result is always a valid NUL-terminated string. Unlike
+ * strlcpy the destination buffer is always padded out.
+ *
+ * Returns the length of the source buffer.
+ */
+size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize)
+{
+ size_t srclen = nla_len(nla);
+ char *src = nla_data(nla);
+
+ if (srclen > 0 && src[srclen - 1] == '\0')
+ srclen--;
+
+ if (dstsize > 0) {
+ size_t len = (srclen >= dstsize) ? dstsize - 1 : srclen;
+
+ memset(dst, 0, dstsize);
+ memcpy(dst, src, len);
+ }
+
+ return srclen;
+}
+
+/**
+ * nla_memcpy - Copy a netlink attribute into another memory area
+ * @dest: where to copy to memcpy
+ * @src: netlink attribute to copy from
+ * @count: size of the destination area
+ *
+ * Note: The number of bytes copied is limited by the length of
+ * attribute's payload. memcpy
+ *
+ * Returns the number of bytes copied.
+ */
+int nla_memcpy(void *dest, const struct nlattr *src, int count)
+{
+ int minlen = min_t(int, count, nla_len(src));
+
+ memcpy(dest, nla_data(src), minlen);
+
+ return minlen;
+}
+
+/**
+ * nla_memcmp - Compare an attribute with sized memory area
+ * @nla: netlink attribute
+ * @data: memory area
+ * @size: size of memory area
+ */
+int nla_memcmp(const struct nlattr *nla, const void *data,
+ size_t size)
+{
+ int d = nla_len(nla) - size;
+
+ if (d == 0)
+ d = memcmp(nla_data(nla), data, size);
+
+ return d;
+}
+
+/**
+ * nla_strcmp - Compare a string attribute against a string
+ * @nla: netlink string attribute
+ * @str: another string
+ */
+int nla_strcmp(const struct nlattr *nla, const char *str)
+{
+ int len = strlen(str) + 1;
+ int d = nla_len(nla) - len;
+
+ if (d == 0)
+ d = memcmp(nla_data(nla), str, len);
+
+ return d;
+}
+
+/**
+ * __nla_reserve - reserve room for attribute on the skb
+ * @skb: socket buffer to reserve room on
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ *
+ * Adds a netlink attribute header to a socket buffer and reserves
+ * room for the payload but does not copy it.
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for the attribute header and payload.
+ */
+struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
+{
+ struct nlattr *nla;
+
+ nla = (struct nlattr *) skb_put(skb, nla_total_size(attrlen));
+ nla->nla_type = attrtype;
+ nla->nla_len = nla_attr_size(attrlen);
+
+ memset((unsigned char *) nla + nla->nla_len, 0, nla_padlen(attrlen));
+
+ return nla;
+}
+
+/**
+ * __nla_reserve_nohdr - reserve room for attribute without header
+ * @skb: socket buffer to reserve room on
+ * @attrlen: length of attribute payload
+ *
+ * Reserves room for attribute payload without a header.
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for the payload.
+ */
+void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
+{
+ void *start;
+
+ start = skb_put(skb, NLA_ALIGN(attrlen));
+ memset(start, 0, NLA_ALIGN(attrlen));
+
+ return start;
+}
+
+/**
+ * nla_reserve - reserve room for attribute on the skb
+ * @skb: socket buffer to reserve room on
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ *
+ * Adds a netlink attribute header to a socket buffer and reserves
+ * room for the payload but does not copy it.
+ *
+ * Returns NULL if the tailroom of the skb is insufficient to store
+ * the attribute header and payload.
+ */
+struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
+{
+ if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
+ return NULL;
+
+ return __nla_reserve(skb, attrtype, attrlen);
+}
+
+/**
+ * nla_reserve_nohdr - reserve room for attribute without header
+ * @skb: socket buffer to reserve room on
+ * @attrlen: length of attribute payload
+ *
+ * Reserves room for attribute payload without a header.
+ *
+ * Returns NULL if the tailroom of the skb is insufficient to store
+ * the attribute payload.
+ */
+void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
+{
+ if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
+ return NULL;
+
+ return __nla_reserve_nohdr(skb, attrlen);
+}
+
+/**
+ * __nla_put - Add a netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for the attribute header and payload.
+ */
+void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
+ const void *data)
+{
+ struct nlattr *nla;
+
+ nla = __nla_reserve(skb, attrtype, attrlen);
+ memcpy(nla_data(nla), data, attrlen);
+}
+
+/**
+ * __nla_put_nohdr - Add a netlink attribute without header
+ * @skb: socket buffer to add attribute to
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for the attribute payload.
+ */
+void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
+{
+ void *start;
+
+ start = __nla_reserve_nohdr(skb, attrlen);
+ memcpy(start, data, attrlen);
+}
+
+/**
+ * nla_put - Add a netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ *
+ * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
+ * the attribute header and payload.
+ */
+int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
+{
+ if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
+ return -EMSGSIZE;
+
+ __nla_put(skb, attrtype, attrlen, data);
+ return 0;
+}
+
+/**
+ * nla_put_nohdr - Add a netlink attribute without header
+ * @skb: socket buffer to add attribute to
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ *
+ * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
+ * the attribute payload.
+ */
+int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
+{
+ if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
+ return -EMSGSIZE;
+
+ __nla_put_nohdr(skb, attrlen, data);
+ return 0;
+}
+
+/**
+ * nla_append - Add a netlink attribute without header or padding
+ * @skb: socket buffer to add attribute to
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ *
+ * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
+ * the attribute payload.
+ */
+int nla_append(struct sk_buff *skb, int attrlen, const void *data)
+{
+ if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
+ return -EMSGSIZE;
+
+ memcpy(skb_put(skb, attrlen), data, attrlen);
+ return 0;
+}
+
+EXPORT_SYMBOL(nla_validate);
+EXPORT_SYMBOL(nla_parse);
+EXPORT_SYMBOL(nla_find);
+EXPORT_SYMBOL(nla_strlcpy);
+EXPORT_SYMBOL(__nla_reserve);
+EXPORT_SYMBOL(__nla_reserve_nohdr);
+EXPORT_SYMBOL(nla_reserve);
+EXPORT_SYMBOL(nla_reserve_nohdr);
+EXPORT_SYMBOL(__nla_put);
+EXPORT_SYMBOL(__nla_put_nohdr);
+EXPORT_SYMBOL(nla_put);
+EXPORT_SYMBOL(nla_put_nohdr);
+EXPORT_SYMBOL(nla_memcpy);
+EXPORT_SYMBOL(nla_memcmp);
+EXPORT_SYMBOL(nla_strcmp);
+EXPORT_SYMBOL(nla_append);
diff --git a/libdde_linux26/contrib/net/netlink/attr.c b/libdde_linux26/contrib/net/netlink/attr.c
new file mode 100644
index 00000000..56c3ce7f
--- /dev/null
+++ b/libdde_linux26/contrib/net/netlink/attr.c
@@ -0,0 +1,473 @@
+/*
+ * NETLINK Netlink attributes
+ *
+ * Authors: Thomas Graf <tgraf@suug.ch>
+ * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <net/netlink.h>
+
+static u16 nla_attr_minlen[NLA_TYPE_MAX+1] __read_mostly = {
+ [NLA_U8] = sizeof(u8),
+ [NLA_U16] = sizeof(u16),
+ [NLA_U32] = sizeof(u32),
+ [NLA_U64] = sizeof(u64),
+ [NLA_NESTED] = NLA_HDRLEN,
+};
+
+static int validate_nla(struct nlattr *nla, int maxtype,
+ const struct nla_policy *policy)
+{
+ const struct nla_policy *pt;
+ int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla);
+
+ if (type <= 0 || type > maxtype)
+ return 0;
+
+ pt = &policy[type];
+
+ BUG_ON(pt->type > NLA_TYPE_MAX);
+
+ switch (pt->type) {
+ case NLA_FLAG:
+ if (attrlen > 0)
+ return -ERANGE;
+ break;
+
+ case NLA_NUL_STRING:
+ if (pt->len)
+ minlen = min_t(int, attrlen, pt->len + 1);
+ else
+ minlen = attrlen;
+
+ if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL)
+ return -EINVAL;
+ /* fall through */
+
+ case NLA_STRING:
+ if (attrlen < 1)
+ return -ERANGE;
+
+ if (pt->len) {
+ char *buf = nla_data(nla);
+
+ if (buf[attrlen - 1] == '\0')
+ attrlen--;
+
+ if (attrlen > pt->len)
+ return -ERANGE;
+ }
+ break;
+
+ case NLA_BINARY:
+ if (pt->len && attrlen > pt->len)
+ return -ERANGE;
+ break;
+
+ case NLA_NESTED_COMPAT:
+ if (attrlen < pt->len)
+ return -ERANGE;
+ if (attrlen < NLA_ALIGN(pt->len))
+ break;
+ if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN)
+ return -ERANGE;
+ nla = nla_data(nla) + NLA_ALIGN(pt->len);
+ if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN + nla_len(nla))
+ return -ERANGE;
+ break;
+ case NLA_NESTED:
+ /* a nested attributes is allowed to be empty; if its not,
+ * it must have a size of at least NLA_HDRLEN.
+ */
+ if (attrlen == 0)
+ break;
+ default:
+ if (pt->len)
+ minlen = pt->len;
+ else if (pt->type != NLA_UNSPEC)
+ minlen = nla_attr_minlen[pt->type];
+
+ if (attrlen < minlen)
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+/**
+ * nla_validate - Validate a stream of attributes
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ *
+ * Validates all attributes in the specified attribute stream against the
+ * specified policy. Attributes with a type exceeding maxtype will be
+ * ignored. See documenation of struct nla_policy for more details.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int nla_validate(struct nlattr *head, int len, int maxtype,
+ const struct nla_policy *policy)
+{
+ struct nlattr *nla;
+ int rem, err;
+
+ nla_for_each_attr(nla, head, len, rem) {
+ err = validate_nla(nla, maxtype, policy);
+ if (err < 0)
+ goto errout;
+ }
+
+ err = 0;
+errout:
+ return err;
+}
+
+/**
+ * nla_parse - Parse a stream of attributes into a tb buffer
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @policy: validation policy
+ *
+ * Parses a stream of attributes and stores a pointer to each attribute in
+ * the tb array accessable via the attribute type. Attributes with a type
+ * exceeding maxtype will be silently ignored for backwards compatibility
+ * reasons. policy may be set to NULL if no validation is required.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len,
+ const struct nla_policy *policy)
+{
+ struct nlattr *nla;
+ int rem, err;
+
+ memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
+
+ nla_for_each_attr(nla, head, len, rem) {
+ u16 type = nla_type(nla);
+
+ if (type > 0 && type <= maxtype) {
+ if (policy) {
+ err = validate_nla(nla, maxtype, policy);
+ if (err < 0)
+ goto errout;
+ }
+
+ tb[type] = nla;
+ }
+ }
+
+ if (unlikely(rem > 0))
+ printk(KERN_WARNING "netlink: %d bytes leftover after parsing "
+ "attributes.\n", rem);
+
+ err = 0;
+errout:
+ return err;
+}
+
+/**
+ * nla_find - Find a specific attribute in a stream of attributes
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @attrtype: type of attribute to look for
+ *
+ * Returns the first attribute in the stream matching the specified type.
+ */
+struct nlattr *nla_find(struct nlattr *head, int len, int attrtype)
+{
+ struct nlattr *nla;
+ int rem;
+
+ nla_for_each_attr(nla, head, len, rem)
+ if (nla_type(nla) == attrtype)
+ return nla;
+
+ return NULL;
+}
+
+/**
+ * nla_strlcpy - Copy string attribute payload into a sized buffer
+ * @dst: where to copy the string to
+ * @nla: attribute to copy the string from
+ * @dstsize: size of destination buffer
+ *
+ * Copies at most dstsize - 1 bytes into the destination buffer.
+ * The result is always a valid NUL-terminated string. Unlike
+ * strlcpy the destination buffer is always padded out.
+ *
+ * Returns the length of the source buffer.
+ */
+size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize)
+{
+ size_t srclen = nla_len(nla);
+ char *src = nla_data(nla);
+
+ if (srclen > 0 && src[srclen - 1] == '\0')
+ srclen--;
+
+ if (dstsize > 0) {
+ size_t len = (srclen >= dstsize) ? dstsize - 1 : srclen;
+
+ memset(dst, 0, dstsize);
+ memcpy(dst, src, len);
+ }
+
+ return srclen;
+}
+
+/**
+ * nla_memcpy - Copy a netlink attribute into another memory area
+ * @dest: where to copy to memcpy
+ * @src: netlink attribute to copy from
+ * @count: size of the destination area
+ *
+ * Note: The number of bytes copied is limited by the length of
+ * attribute's payload. memcpy
+ *
+ * Returns the number of bytes copied.
+ */
+int nla_memcpy(void *dest, const struct nlattr *src, int count)
+{
+ int minlen = min_t(int, count, nla_len(src));
+
+ memcpy(dest, nla_data(src), minlen);
+
+ return minlen;
+}
+
+/**
+ * nla_memcmp - Compare an attribute with sized memory area
+ * @nla: netlink attribute
+ * @data: memory area
+ * @size: size of memory area
+ */
+int nla_memcmp(const struct nlattr *nla, const void *data,
+ size_t size)
+{
+ int d = nla_len(nla) - size;
+
+ if (d == 0)
+ d = memcmp(nla_data(nla), data, size);
+
+ return d;
+}
+
+/**
+ * nla_strcmp - Compare a string attribute against a string
+ * @nla: netlink string attribute
+ * @str: another string
+ */
+int nla_strcmp(const struct nlattr *nla, const char *str)
+{
+ int len = strlen(str) + 1;
+ int d = nla_len(nla) - len;
+
+ if (d == 0)
+ d = memcmp(nla_data(nla), str, len);
+
+ return d;
+}
+
+/**
+ * __nla_reserve - reserve room for attribute on the skb
+ * @skb: socket buffer to reserve room on
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ *
+ * Adds a netlink attribute header to a socket buffer and reserves
+ * room for the payload but does not copy it.
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for the attribute header and payload.
+ */
+struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
+{
+ struct nlattr *nla;
+
+ nla = (struct nlattr *) skb_put(skb, nla_total_size(attrlen));
+ nla->nla_type = attrtype;
+ nla->nla_len = nla_attr_size(attrlen);
+
+ memset((unsigned char *) nla + nla->nla_len, 0, nla_padlen(attrlen));
+
+ return nla;
+}
+
+/**
+ * __nla_reserve_nohdr - reserve room for attribute without header
+ * @skb: socket buffer to reserve room on
+ * @attrlen: length of attribute payload
+ *
+ * Reserves room for attribute payload without a header.
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for the payload.
+ */
+void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
+{
+ void *start;
+
+ start = skb_put(skb, NLA_ALIGN(attrlen));
+ memset(start, 0, NLA_ALIGN(attrlen));
+
+ return start;
+}
+
+/**
+ * nla_reserve - reserve room for attribute on the skb
+ * @skb: socket buffer to reserve room on
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ *
+ * Adds a netlink attribute header to a socket buffer and reserves
+ * room for the payload but does not copy it.
+ *
+ * Returns NULL if the tailroom of the skb is insufficient to store
+ * the attribute header and payload.
+ */
+struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
+{
+ if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
+ return NULL;
+
+ return __nla_reserve(skb, attrtype, attrlen);
+}
+
+/**
+ * nla_reserve_nohdr - reserve room for attribute without header
+ * @skb: socket buffer to reserve room on
+ * @attrlen: length of attribute payload
+ *
+ * Reserves room for attribute payload without a header.
+ *
+ * Returns NULL if the tailroom of the skb is insufficient to store
+ * the attribute payload.
+ */
+void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen)
+{
+ if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
+ return NULL;
+
+ return __nla_reserve_nohdr(skb, attrlen);
+}
+
+/**
+ * __nla_put - Add a netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for the attribute header and payload.
+ */
+void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
+ const void *data)
+{
+ struct nlattr *nla;
+
+ nla = __nla_reserve(skb, attrtype, attrlen);
+ memcpy(nla_data(nla), data, attrlen);
+}
+
+/**
+ * __nla_put_nohdr - Add a netlink attribute without header
+ * @skb: socket buffer to add attribute to
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for the attribute payload.
+ */
+void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
+{
+ void *start;
+
+ start = __nla_reserve_nohdr(skb, attrlen);
+ memcpy(start, data, attrlen);
+}
+
+/**
+ * nla_put - Add a netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ *
+ * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
+ * the attribute header and payload.
+ */
+int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
+{
+ if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
+ return -EMSGSIZE;
+
+ __nla_put(skb, attrtype, attrlen, data);
+ return 0;
+}
+
+/**
+ * nla_put_nohdr - Add a netlink attribute without header
+ * @skb: socket buffer to add attribute to
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ *
+ * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
+ * the attribute payload.
+ */
+int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data)
+{
+ if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
+ return -EMSGSIZE;
+
+ __nla_put_nohdr(skb, attrlen, data);
+ return 0;
+}
+
+/**
+ * nla_append - Add a netlink attribute without header or padding
+ * @skb: socket buffer to add attribute to
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ *
+ * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store
+ * the attribute payload.
+ */
+int nla_append(struct sk_buff *skb, int attrlen, const void *data)
+{
+ if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen)))
+ return -EMSGSIZE;
+
+ memcpy(skb_put(skb, attrlen), data, attrlen);
+ return 0;
+}
+
+EXPORT_SYMBOL(nla_validate);
+EXPORT_SYMBOL(nla_parse);
+EXPORT_SYMBOL(nla_find);
+EXPORT_SYMBOL(nla_strlcpy);
+EXPORT_SYMBOL(__nla_reserve);
+EXPORT_SYMBOL(__nla_reserve_nohdr);
+EXPORT_SYMBOL(nla_reserve);
+EXPORT_SYMBOL(nla_reserve_nohdr);
+EXPORT_SYMBOL(__nla_put);
+EXPORT_SYMBOL(__nla_put_nohdr);
+EXPORT_SYMBOL(nla_put);
+EXPORT_SYMBOL(nla_put_nohdr);
+EXPORT_SYMBOL(nla_memcpy);
+EXPORT_SYMBOL(nla_memcmp);
+EXPORT_SYMBOL(nla_strcmp);
+EXPORT_SYMBOL(nla_append);
diff --git a/libdde_linux26/contrib/net/sched/.svn/all-wcprops b/libdde_linux26/contrib/net/sched/.svn/all-wcprops
new file mode 100644
index 00000000..1afccd56
--- /dev/null
+++ b/libdde_linux26/contrib/net/sched/.svn/all-wcprops
@@ -0,0 +1,5 @@
+K 25
+svn:wc:ra_dav:version-url
+V 68
+/repos/tudos/!svn/ver/174/trunk/l4/pkg/dde/linux26/contrib/net/sched
+END
diff --git a/libdde_linux26/contrib/net/sched/.svn/entries b/libdde_linux26/contrib/net/sched/.svn/entries
new file mode 100644
index 00000000..71f93f72
--- /dev/null
+++ b/libdde_linux26/contrib/net/sched/.svn/entries
@@ -0,0 +1,28 @@
+9
+
+dir
+465
+http://svn.tudos.org/repos/tudos/trunk/l4/pkg/dde/linux26/contrib/net/sched
+http://svn.tudos.org/repos/tudos
+
+
+
+2007-09-08T19:44:13.897747Z
+174
+l4check
+
+
+svn:special svn:externals svn:needs-lock
+
+
+
+
+
+
+
+
+
+
+
+a704ac0b-3a55-4d43-a2a9-7be6f07c34fb
+
diff --git a/libdde_linux26/contrib/net/sched/.svn/format b/libdde_linux26/contrib/net/sched/.svn/format
new file mode 100644
index 00000000..ec635144
--- /dev/null
+++ b/libdde_linux26/contrib/net/sched/.svn/format
@@ -0,0 +1 @@
+9
diff --git a/libdde_linux26/contrib/net/socket.c b/libdde_linux26/contrib/net/socket.c
new file mode 100644
index 00000000..35dd7371
--- /dev/null
+++ b/libdde_linux26/contrib/net/socket.c
@@ -0,0 +1,2406 @@
+/*
+ * NET An implementation of the SOCKET network access protocol.
+ *
+ * Version: @(#)socket.c 1.1.93 18/02/95
+ *
+ * Authors: Orest Zborowski, <obz@Kodak.COM>
+ * Ross Biro
+ * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *
+ * Fixes:
+ * Anonymous : NOTSOCK/BADF cleanup. Error fix in
+ * shutdown()
+ * Alan Cox : verify_area() fixes
+ * Alan Cox : Removed DDI
+ * Jonathan Kamens : SOCK_DGRAM reconnect bug
+ * Alan Cox : Moved a load of checks to the very
+ * top level.
+ * Alan Cox : Move address structures to/from user
+ * mode above the protocol layers.
+ * Rob Janssen : Allow 0 length sends.
+ * Alan Cox : Asynchronous I/O support (cribbed from the
+ * tty drivers).
+ * Niibe Yutaka : Asynchronous I/O for writes (4.4BSD style)
+ * Jeff Uphoff : Made max number of sockets command-line
+ * configurable.
+ * Matti Aarnio : Made the number of sockets dynamic,
+ * to be allocated when needed, and mr.
+ * Uphoff's max is used as max to be
+ * allowed to allocate.
+ * Linus : Argh. removed all the socket allocation
+ * altogether: it's in the inode now.
+ * Alan Cox : Made sock_alloc()/sock_release() public
+ * for NetROM and future kernel nfsd type
+ * stuff.
+ * Alan Cox : sendmsg/recvmsg basics.
+ * Tom Dyas : Export net symbols.
+ * Marcin Dalecki : Fixed problems with CONFIG_NET="n".
+ * Alan Cox : Added thread locking to sys_* calls
+ * for sockets. May have errors at the
+ * moment.
+ * Kevin Buhr : Fixed the dumb errors in the above.
+ * Andi Kleen : Some small cleanups, optimizations,
+ * and fixed a copy_from_user() bug.
+ * Tigran Aivazian : sys_send(args) calls sys_sendto(args, NULL, 0)
+ * Tigran Aivazian : Made listen(2) backlog sanity checks
+ * protocol-independent
+ *
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ *
+ * This module is effectively the top level interface to the BSD socket
+ * paradigm.
+ *
+ * Based upon Swansea University Computer Society NET3.039
+ */
+
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/file.h>
+#include <linux/net.h>
+#include <linux/interrupt.h>
+#include <linux/thread_info.h>
+#include <linux/rcupdate.h>
+#include <linux/netdevice.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/mutex.h>
+#include <linux/wanrouter.h>
+#include <linux/if_bridge.h>
+#include <linux/if_frad.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/poll.h>
+#include <linux/cache.h>
+#include <linux/module.h>
+#include <linux/highmem.h>
+#include <linux/mount.h>
+#include <linux/security.h>
+#include <linux/syscalls.h>
+#include <linux/compat.h>
+#include <linux/kmod.h>
+#include <linux/audit.h>
+#include <linux/wireless.h>
+#include <linux/nsproxy.h>
+
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+
+#include <net/compat.h>
+#include <net/wext.h>
+
+#include <net/sock.h>
+#include <linux/netfilter.h>
+
+static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
+static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos);
+static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos);
+static int sock_mmap(struct file *file, struct vm_area_struct *vma);
+
+static int sock_close(struct inode *inode, struct file *file);
+static unsigned int sock_poll(struct file *file,
+ struct poll_table_struct *wait);
+static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+#ifdef CONFIG_COMPAT
+static long compat_sock_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg);
+#endif
+static int sock_fasync(int fd, struct file *filp, int on);
+static ssize_t sock_sendpage(struct file *file, struct page *page,
+ int offset, size_t size, loff_t *ppos, int more);
+static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags);
+
+/*
+ * Socket files have a set of 'special' operations as well as the generic file ones. These don't appear
+ * in the operation structures but are done directly via the socketcall() multiplexor.
+ */
+
+static const struct file_operations socket_file_ops = {
+ .owner = THIS_MODULE,
+ .llseek = no_llseek,
+ .aio_read = sock_aio_read,
+ .aio_write = sock_aio_write,
+ .poll = sock_poll,
+ .unlocked_ioctl = sock_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = compat_sock_ioctl,
+#endif
+ .mmap = sock_mmap,
+ .open = sock_no_open, /* special open code to disallow open via /proc */
+ .release = sock_close,
+ .fasync = sock_fasync,
+ .sendpage = sock_sendpage,
+ .splice_write = generic_splice_sendpage,
+ .splice_read = sock_splice_read,
+};
+
+/*
+ * The protocol list. Each protocol is registered in here.
+ */
+
+static DEFINE_SPINLOCK(net_family_lock);
+static const struct net_proto_family *net_families[NPROTO] __read_mostly;
+
+/*
+ * Statistics counters of the socket lists
+ */
+
+static DEFINE_PER_CPU(int, sockets_in_use) = 0;
+
+/*
+ * Support routines.
+ * Move socket addresses back and forth across the kernel/user
+ * divide and look after the messy bits.
+ */
+
+#define MAX_SOCK_ADDR 128 /* 108 for Unix domain -
+ 16 for IP, 16 for IPX,
+ 24 for IPv6,
+ about 80 for AX.25
+ must be at least one bigger than
+ the AF_UNIX size (see net/unix/af_unix.c
+ :unix_mkname()).
+ */
+
+/**
+ * move_addr_to_kernel - copy a socket address into kernel space
+ * @uaddr: Address in user space
+ * @kaddr: Address in kernel space
+ * @ulen: Length in user space
+ *
+ * The address is copied into kernel space. If the provided address is
+ * too long an error code of -EINVAL is returned. If the copy gives
+ * invalid addresses -EFAULT is returned. On a success 0 is returned.
+ */
+
+int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr)
+{
+ if (ulen < 0 || ulen > sizeof(struct sockaddr_storage))
+ return -EINVAL;
+ if (ulen == 0)
+ return 0;
+ if (copy_from_user(kaddr, uaddr, ulen))
+ return -EFAULT;
+ return audit_sockaddr(ulen, kaddr);
+}
+
+/**
+ * move_addr_to_user - copy an address to user space
+ * @kaddr: kernel space address
+ * @klen: length of address in kernel
+ * @uaddr: user space address
+ * @ulen: pointer to user length field
+ *
+ * The value pointed to by ulen on entry is the buffer length available.
+ * This is overwritten with the buffer space used. -EINVAL is returned
+ * if an overlong buffer is specified or a negative buffer size. -EFAULT
+ * is returned if either the buffer or the length field are not
+ * accessible.
+ * After copying the data up to the limit the user specifies, the true
+ * length of the data is written over the length limit the user
+ * specified. Zero is returned for a success.
+ */
+
+int move_addr_to_user(struct sockaddr *kaddr, int klen, void __user *uaddr,
+ int __user *ulen)
+{
+ int err;
+ int len;
+
+ err = get_user(len, ulen);
+ if (err)
+ return err;
+ if (len > klen)
+ len = klen;
+ if (len < 0 || len > sizeof(struct sockaddr_storage))
+ return -EINVAL;
+ if (len) {
+ if (audit_sockaddr(klen, kaddr))
+ return -ENOMEM;
+ if (copy_to_user(uaddr, kaddr, len))
+ return -EFAULT;
+ }
+ /*
+ * "fromlen shall refer to the value before truncation.."
+ * 1003.1g
+ */
+ return __put_user(klen, ulen);
+}
+
+#define SOCKFS_MAGIC 0x534F434B
+
+static struct kmem_cache *sock_inode_cachep __read_mostly;
+
+static struct inode *sock_alloc_inode(struct super_block *sb)
+{
+ struct socket_alloc *ei;
+
+ ei = kmem_cache_alloc(sock_inode_cachep, GFP_KERNEL);
+ if (!ei)
+ return NULL;
+ init_waitqueue_head(&ei->socket.wait);
+
+ ei->socket.fasync_list = NULL;
+ ei->socket.state = SS_UNCONNECTED;
+ ei->socket.flags = 0;
+ ei->socket.ops = NULL;
+ ei->socket.sk = NULL;
+ ei->socket.file = NULL;
+
+ return &ei->vfs_inode;
+}
+
+static void sock_destroy_inode(struct inode *inode)
+{
+ kmem_cache_free(sock_inode_cachep,
+ container_of(inode, struct socket_alloc, vfs_inode));
+}
+
+static void init_once(void *foo)
+{
+ struct socket_alloc *ei = (struct socket_alloc *)foo;
+
+ inode_init_once(&ei->vfs_inode);
+}
+
+static int init_inodecache(void)
+{
+ sock_inode_cachep = kmem_cache_create("sock_inode_cache",
+ sizeof(struct socket_alloc),
+ 0,
+ (SLAB_HWCACHE_ALIGN |
+ SLAB_RECLAIM_ACCOUNT |
+ SLAB_MEM_SPREAD),
+ init_once);
+ if (sock_inode_cachep == NULL)
+ return -ENOMEM;
+ return 0;
+}
+
+static struct super_operations sockfs_ops = {
+ .alloc_inode = sock_alloc_inode,
+ .destroy_inode =sock_destroy_inode,
+ .statfs = simple_statfs,
+};
+
+static int sockfs_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
+{
+ return get_sb_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC,
+ mnt);
+}
+
+static struct vfsmount *sock_mnt __read_mostly;
+
+static struct file_system_type sock_fs_type = {
+ .name = "sockfs",
+ .get_sb = sockfs_get_sb,
+ .kill_sb = kill_anon_super,
+};
+
+static int sockfs_delete_dentry(struct dentry *dentry)
+{
+ /*
+ * At creation time, we pretended this dentry was hashed
+ * (by clearing DCACHE_UNHASHED bit in d_flags)
+ * At delete time, we restore the truth : not hashed.
+ * (so that dput() can proceed correctly)
+ */
+ dentry->d_flags |= DCACHE_UNHASHED;
+ return 0;
+}
+
+/*
+ * sockfs_dname() is called from d_path().
+ */
+static char *sockfs_dname(struct dentry *dentry, char *buffer, int buflen)
+{
+ return dynamic_dname(dentry, buffer, buflen, "socket:[%lu]",
+ dentry->d_inode->i_ino);
+}
+
+static struct dentry_operations sockfs_dentry_operations = {
+ .d_delete = sockfs_delete_dentry,
+ .d_dname = sockfs_dname,
+};
+
+/*
+ * Obtains the first available file descriptor and sets it up for use.
+ *
+ * These functions create file structures and maps them to fd space
+ * of the current process. On success it returns file descriptor
+ * and file struct implicitly stored in sock->file.
+ * Note that another thread may close file descriptor before we return
+ * from this function. We use the fact that now we do not refer
+ * to socket after mapping. If one day we will need it, this
+ * function will increment ref. count on file by 1.
+ *
+ * In any case returned fd MAY BE not valid!
+ * This race condition is unavoidable
+ * with shared fd spaces, we cannot solve it inside kernel,
+ * but we take care of internal coherence yet.
+ */
+
+static int sock_alloc_fd(struct file **filep, int flags)
+{
+ int fd;
+
+ fd = get_unused_fd_flags(flags);
+ if (likely(fd >= 0)) {
+ struct file *file = get_empty_filp();
+
+ *filep = file;
+ if (unlikely(!file)) {
+ put_unused_fd(fd);
+ return -ENFILE;
+ }
+ } else
+ *filep = NULL;
+ return fd;
+}
+
+static int sock_attach_fd(struct socket *sock, struct file *file, int flags)
+{
+ struct dentry *dentry;
+ struct qstr name = { .name = "" };
+
+ dentry = d_alloc(sock_mnt->mnt_sb->s_root, &name);
+ if (unlikely(!dentry))
+ return -ENOMEM;
+
+ dentry->d_op = &sockfs_dentry_operations;
+ /*
+ * We dont want to push this dentry into global dentry hash table.
+ * We pretend dentry is already hashed, by unsetting DCACHE_UNHASHED
+ * This permits a working /proc/$pid/fd/XXX on sockets
+ */
+ dentry->d_flags &= ~DCACHE_UNHASHED;
+ d_instantiate(dentry, SOCK_INODE(sock));
+
+ sock->file = file;
+ init_file(file, sock_mnt, dentry, FMODE_READ | FMODE_WRITE,
+ &socket_file_ops);
+ SOCK_INODE(sock)->i_fop = &socket_file_ops;
+ file->f_flags = O_RDWR | (flags & O_NONBLOCK);
+ file->f_pos = 0;
+ file->private_data = sock;
+
+ return 0;
+}
+
+int sock_map_fd(struct socket *sock, int flags)
+{
+ struct file *newfile;
+ int fd = sock_alloc_fd(&newfile, flags);
+
+ if (likely(fd >= 0)) {
+ int err = sock_attach_fd(sock, newfile, flags);
+
+ if (unlikely(err < 0)) {
+ put_filp(newfile);
+ put_unused_fd(fd);
+ return err;
+ }
+ fd_install(fd, newfile);
+ }
+ return fd;
+}
+
+static struct socket *sock_from_file(struct file *file, int *err)
+{
+ if (file->f_op == &socket_file_ops)
+ return file->private_data; /* set in sock_map_fd */
+
+ *err = -ENOTSOCK;
+ return NULL;
+}
+
+/**
+ * sockfd_lookup - Go from a file number to its socket slot
+ * @fd: file handle
+ * @err: pointer to an error code return
+ *
+ * The file handle passed in is locked and the socket it is bound
+ * too is returned. If an error occurs the err pointer is overwritten
+ * with a negative errno code and NULL is returned. The function checks
+ * for both invalid handles and passing a handle which is not a socket.
+ *
+ * On a success the socket object pointer is returned.
+ */
+
+struct socket *sockfd_lookup(int fd, int *err)
+{
+ struct file *file;
+ struct socket *sock;
+
+ file = fget(fd);
+ if (!file) {
+ *err = -EBADF;
+ return NULL;
+ }
+
+ sock = sock_from_file(file, err);
+ if (!sock)
+ fput(file);
+ return sock;
+}
+
+static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
+{
+ struct file *file;
+ struct socket *sock;
+
+ *err = -EBADF;
+ file = fget_light(fd, fput_needed);
+ if (file) {
+ sock = sock_from_file(file, err);
+ if (sock)
+ return sock;
+ fput_light(file, *fput_needed);
+ }
+ return NULL;
+}
+
+/**
+ * sock_alloc - allocate a socket
+ *
+ * Allocate a new inode and socket object. The two are bound together
+ * and initialised. The socket is then returned. If we are out of inodes
+ * NULL is returned.
+ */
+
+static struct socket *sock_alloc(void)
+{
+ struct inode *inode;
+ struct socket *sock;
+
+ inode = new_inode(sock_mnt->mnt_sb);
+ if (!inode)
+ return NULL;
+
+ sock = SOCKET_I(inode);
+
+ inode->i_mode = S_IFSOCK | S_IRWXUGO;
+ inode->i_uid = current_fsuid();
+ inode->i_gid = current_fsgid();
+
+ get_cpu_var(sockets_in_use)++;
+ put_cpu_var(sockets_in_use);
+ return sock;
+}
+
+/*
+ * In theory you can't get an open on this inode, but /proc provides
+ * a back door. Remember to keep it shut otherwise you'll let the
+ * creepy crawlies in.
+ */
+
+static int sock_no_open(struct inode *irrelevant, struct file *dontcare)
+{
+ return -ENXIO;
+}
+
+const struct file_operations bad_sock_fops = {
+ .owner = THIS_MODULE,
+ .open = sock_no_open,
+};
+
+/**
+ * sock_release - close a socket
+ * @sock: socket to close
+ *
+ * The socket is released from the protocol stack if it has a release
+ * callback, and the inode is then released if the socket is bound to
+ * an inode not a file.
+ */
+
+void sock_release(struct socket *sock)
+{
+ if (sock->ops) {
+ struct module *owner = sock->ops->owner;
+
+ sock->ops->release(sock);
+ sock->ops = NULL;
+ module_put(owner);
+ }
+
+ if (sock->fasync_list)
+ printk(KERN_ERR "sock_release: fasync list not empty!\n");
+
+ get_cpu_var(sockets_in_use)--;
+ put_cpu_var(sockets_in_use);
+ if (!sock->file) {
+ iput(SOCK_INODE(sock));
+ return;
+ }
+ sock->file = NULL;
+}
+
+static inline int __sock_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t size)
+{
+ struct sock_iocb *si = kiocb_to_siocb(iocb);
+ int err;
+
+ si->sock = sock;
+ si->scm = NULL;
+ si->msg = msg;
+ si->size = size;
+
+ err = security_socket_sendmsg(sock, msg, size);
+ if (err)
+ return err;
+
+ return sock->ops->sendmsg(iocb, sock, msg, size);
+}
+
+int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
+{
+ struct kiocb iocb;
+ struct sock_iocb siocb;
+ int ret;
+
+ init_sync_kiocb(&iocb, NULL);
+ iocb.private = &siocb;
+ ret = __sock_sendmsg(&iocb, sock, msg, size);
+ if (-EIOCBQUEUED == ret)
+ ret = wait_on_sync_kiocb(&iocb);
+ return ret;
+}
+
+int kernel_sendmsg(struct socket *sock, struct msghdr *msg,
+ struct kvec *vec, size_t num, size_t size)
+{
+ mm_segment_t oldfs = get_fs();
+ int result;
+
+ set_fs(KERNEL_DS);
+ /*
+ * the following is safe, since for compiler definitions of kvec and
+ * iovec are identical, yielding the same in-core layout and alignment
+ */
+ msg->msg_iov = (struct iovec *)vec;
+ msg->msg_iovlen = num;
+ result = sock_sendmsg(sock, msg, size);
+ set_fs(oldfs);
+ return result;
+}
+
+/*
+ * called from sock_recv_timestamp() if sock_flag(sk, SOCK_RCVTSTAMP)
+ */
+void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
+ struct sk_buff *skb)
+{
+ ktime_t kt = skb->tstamp;
+
+ if (!sock_flag(sk, SOCK_RCVTSTAMPNS)) {
+ struct timeval tv;
+ /* Race occurred between timestamp enabling and packet
+ receiving. Fill in the current time for now. */
+ if (kt.tv64 == 0)
+ kt = ktime_get_real();
+ skb->tstamp = kt;
+ tv = ktime_to_timeval(kt);
+ put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP, sizeof(tv), &tv);
+ } else {
+ struct timespec ts;
+ /* Race occurred between timestamp enabling and packet
+ receiving. Fill in the current time for now. */
+ if (kt.tv64 == 0)
+ kt = ktime_get_real();
+ skb->tstamp = kt;
+ ts = ktime_to_timespec(kt);
+ put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS, sizeof(ts), &ts);
+ }
+}
+
+EXPORT_SYMBOL_GPL(__sock_recv_timestamp);
+
+static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t size, int flags)
+{
+ int err;
+ struct sock_iocb *si = kiocb_to_siocb(iocb);
+
+ si->sock = sock;
+ si->scm = NULL;
+ si->msg = msg;
+ si->size = size;
+ si->flags = flags;
+
+ err = security_socket_recvmsg(sock, msg, size, flags);
+ if (err)
+ return err;
+
+ return sock->ops->recvmsg(iocb, sock, msg, size, flags);
+}
+
+int sock_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t size, int flags)
+{
+ struct kiocb iocb;
+ struct sock_iocb siocb;
+ int ret;
+
+ init_sync_kiocb(&iocb, NULL);
+ iocb.private = &siocb;
+ ret = __sock_recvmsg(&iocb, sock, msg, size, flags);
+ if (-EIOCBQUEUED == ret)
+ ret = wait_on_sync_kiocb(&iocb);
+ return ret;
+}
+
+int kernel_recvmsg(struct socket *sock, struct msghdr *msg,
+ struct kvec *vec, size_t num, size_t size, int flags)
+{
+ mm_segment_t oldfs = get_fs();
+ int result;
+
+ set_fs(KERNEL_DS);
+ /*
+ * the following is safe, since for compiler definitions of kvec and
+ * iovec are identical, yielding the same in-core layout and alignment
+ */
+ msg->msg_iov = (struct iovec *)vec, msg->msg_iovlen = num;
+ result = sock_recvmsg(sock, msg, size, flags);
+ set_fs(oldfs);
+ return result;
+}
+
+static void sock_aio_dtor(struct kiocb *iocb)
+{
+ kfree(iocb->private);
+}
+
+static ssize_t sock_sendpage(struct file *file, struct page *page,
+ int offset, size_t size, loff_t *ppos, int more)
+{
+ struct socket *sock;
+ int flags;
+
+ sock = file->private_data;
+
+ flags = !(file->f_flags & O_NONBLOCK) ? 0 : MSG_DONTWAIT;
+ if (more)
+ flags |= MSG_MORE;
+
+ return sock->ops->sendpage(sock, page, offset, size, flags);
+}
+
+static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
+ struct pipe_inode_info *pipe, size_t len,
+ unsigned int flags)
+{
+ struct socket *sock = file->private_data;
+
+ if (unlikely(!sock->ops->splice_read))
+ return -EINVAL;
+
+ return sock->ops->splice_read(sock, ppos, pipe, len, flags);
+}
+
+static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb,
+ struct sock_iocb *siocb)
+{
+ if (!is_sync_kiocb(iocb)) {
+ siocb = kmalloc(sizeof(*siocb), GFP_KERNEL);
+ if (!siocb)
+ return NULL;
+ iocb->ki_dtor = sock_aio_dtor;
+ }
+
+ siocb->kiocb = iocb;
+ iocb->private = siocb;
+ return siocb;
+}
+
+static ssize_t do_sock_read(struct msghdr *msg, struct kiocb *iocb,
+ struct file *file, const struct iovec *iov,
+ unsigned long nr_segs)
+{
+ struct socket *sock = file->private_data;
+ size_t size = 0;
+ int i;
+
+ for (i = 0; i < nr_segs; i++)
+ size += iov[i].iov_len;
+
+ msg->msg_name = NULL;
+ msg->msg_namelen = 0;
+ msg->msg_control = NULL;
+ msg->msg_controllen = 0;
+ msg->msg_iov = (struct iovec *)iov;
+ msg->msg_iovlen = nr_segs;
+ msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
+
+ return __sock_recvmsg(iocb, sock, msg, size, msg->msg_flags);
+}
+
+static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct sock_iocb siocb, *x;
+
+ if (pos != 0)
+ return -ESPIPE;
+
+ if (iocb->ki_left == 0) /* Match SYS5 behaviour */
+ return 0;
+
+
+ x = alloc_sock_iocb(iocb, &siocb);
+ if (!x)
+ return -ENOMEM;
+ return do_sock_read(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs);
+}
+
+static ssize_t do_sock_write(struct msghdr *msg, struct kiocb *iocb,
+ struct file *file, const struct iovec *iov,
+ unsigned long nr_segs)
+{
+ struct socket *sock = file->private_data;
+ size_t size = 0;
+ int i;
+
+ for (i = 0; i < nr_segs; i++)
+ size += iov[i].iov_len;
+
+ msg->msg_name = NULL;
+ msg->msg_namelen = 0;
+ msg->msg_control = NULL;
+ msg->msg_controllen = 0;
+ msg->msg_iov = (struct iovec *)iov;
+ msg->msg_iovlen = nr_segs;
+ msg->msg_flags = (file->f_flags & O_NONBLOCK) ? MSG_DONTWAIT : 0;
+ if (sock->type == SOCK_SEQPACKET)
+ msg->msg_flags |= MSG_EOR;
+
+ return __sock_sendmsg(iocb, sock, msg, size);
+}
+
+static ssize_t sock_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t pos)
+{
+ struct sock_iocb siocb, *x;
+
+ if (pos != 0)
+ return -ESPIPE;
+
+ x = alloc_sock_iocb(iocb, &siocb);
+ if (!x)
+ return -ENOMEM;
+
+ return do_sock_write(&x->async_msg, iocb, iocb->ki_filp, iov, nr_segs);
+}
+
+/*
+ * Atomic setting of ioctl hooks to avoid race
+ * with module unload.
+ */
+
+static DEFINE_MUTEX(br_ioctl_mutex);
+static int (*br_ioctl_hook) (struct net *, unsigned int cmd, void __user *arg) = NULL;
+
+void brioctl_set(int (*hook) (struct net *, unsigned int, void __user *))
+{
+ mutex_lock(&br_ioctl_mutex);
+ br_ioctl_hook = hook;
+ mutex_unlock(&br_ioctl_mutex);
+}
+
+EXPORT_SYMBOL(brioctl_set);
+
+static DEFINE_MUTEX(vlan_ioctl_mutex);
+static int (*vlan_ioctl_hook) (struct net *, void __user *arg);
+
+void vlan_ioctl_set(int (*hook) (struct net *, void __user *))
+{
+ mutex_lock(&vlan_ioctl_mutex);
+ vlan_ioctl_hook = hook;
+ mutex_unlock(&vlan_ioctl_mutex);
+}
+
+EXPORT_SYMBOL(vlan_ioctl_set);
+
+static DEFINE_MUTEX(dlci_ioctl_mutex);
+static int (*dlci_ioctl_hook) (unsigned int, void __user *);
+
+void dlci_ioctl_set(int (*hook) (unsigned int, void __user *))
+{
+ mutex_lock(&dlci_ioctl_mutex);
+ dlci_ioctl_hook = hook;
+ mutex_unlock(&dlci_ioctl_mutex);
+}
+
+EXPORT_SYMBOL(dlci_ioctl_set);
+
+/*
+ * With an ioctl, arg may well be a user mode pointer, but we don't know
+ * what to do with it - that's up to the protocol still.
+ */
+
+static long sock_ioctl(struct file *file, unsigned cmd, unsigned long arg)
+{
+ struct socket *sock;
+ struct sock *sk;
+ void __user *argp = (void __user *)arg;
+ int pid, err;
+ struct net *net;
+
+ sock = file->private_data;
+ sk = sock->sk;
+ net = sock_net(sk);
+ if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) {
+ err = dev_ioctl(net, cmd, argp);
+ } else
+#ifdef CONFIG_WIRELESS_EXT
+ if (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) {
+ err = dev_ioctl(net, cmd, argp);
+ } else
+#endif /* CONFIG_WIRELESS_EXT */
+ switch (cmd) {
+ case FIOSETOWN:
+ case SIOCSPGRP:
+ err = -EFAULT;
+ if (get_user(pid, (int __user *)argp))
+ break;
+ err = f_setown(sock->file, pid, 1);
+ break;
+ case FIOGETOWN:
+ case SIOCGPGRP:
+ err = put_user(f_getown(sock->file),
+ (int __user *)argp);
+ break;
+ case SIOCGIFBR:
+ case SIOCSIFBR:
+ case SIOCBRADDBR:
+ case SIOCBRDELBR:
+ err = -ENOPKG;
+ if (!br_ioctl_hook)
+ request_module("bridge");
+
+ mutex_lock(&br_ioctl_mutex);
+ if (br_ioctl_hook)
+ err = br_ioctl_hook(net, cmd, argp);
+ mutex_unlock(&br_ioctl_mutex);
+ break;
+ case SIOCGIFVLAN:
+ case SIOCSIFVLAN:
+ err = -ENOPKG;
+ if (!vlan_ioctl_hook)
+ request_module("8021q");
+
+ mutex_lock(&vlan_ioctl_mutex);
+ if (vlan_ioctl_hook)
+ err = vlan_ioctl_hook(net, argp);
+ mutex_unlock(&vlan_ioctl_mutex);
+ break;
+ case SIOCADDDLCI:
+ case SIOCDELDLCI:
+ err = -ENOPKG;
+ if (!dlci_ioctl_hook)
+ request_module("dlci");
+
+ mutex_lock(&dlci_ioctl_mutex);
+ if (dlci_ioctl_hook)
+ err = dlci_ioctl_hook(cmd, argp);
+ mutex_unlock(&dlci_ioctl_mutex);
+ break;
+ default:
+ err = sock->ops->ioctl(sock, cmd, arg);
+
+ /*
+ * If this ioctl is unknown try to hand it down
+ * to the NIC driver.
+ */
+ if (err == -ENOIOCTLCMD)
+ err = dev_ioctl(net, cmd, argp);
+ break;
+ }
+ return err;
+}
+
+int sock_create_lite(int family, int type, int protocol, struct socket **res)
+{
+ int err;
+ struct socket *sock = NULL;
+
+ err = security_socket_create(family, type, protocol, 1);
+ if (err)
+ goto out;
+
+ sock = sock_alloc();
+ if (!sock) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ sock->type = type;
+ err = security_socket_post_create(sock, family, type, protocol, 1);
+ if (err)
+ goto out_release;
+
+out:
+ *res = sock;
+ return err;
+out_release:
+ sock_release(sock);
+ sock = NULL;
+ goto out;
+}
+
+/* No kernel lock held - perfect */
+static unsigned int sock_poll(struct file *file, poll_table *wait)
+{
+ struct socket *sock;
+
+ /*
+ * We can't return errors to poll, so it's either yes or no.
+ */
+ sock = file->private_data;
+ return sock->ops->poll(file, sock, wait);
+}
+
+static int sock_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct socket *sock = file->private_data;
+
+ return sock->ops->mmap(file, sock, vma);
+}
+
+static int sock_close(struct inode *inode, struct file *filp)
+{
+ /*
+ * It was possible the inode is NULL we were
+ * closing an unfinished socket.
+ */
+
+ if (!inode) {
+ printk(KERN_DEBUG "sock_close: NULL inode\n");
+ return 0;
+ }
+ sock_release(SOCKET_I(inode));
+ return 0;
+}
+
+/*
+ * Update the socket async list
+ *
+ * Fasync_list locking strategy.
+ *
+ * 1. fasync_list is modified only under process context socket lock
+ * i.e. under semaphore.
+ * 2. fasync_list is used under read_lock(&sk->sk_callback_lock)
+ * or under socket lock.
+ * 3. fasync_list can be used from softirq context, so that
+ * modification under socket lock have to be enhanced with
+ * write_lock_bh(&sk->sk_callback_lock).
+ * --ANK (990710)
+ */
+
+static int sock_fasync(int fd, struct file *filp, int on)
+{
+ struct fasync_struct *fa, *fna = NULL, **prev;
+ struct socket *sock;
+ struct sock *sk;
+
+ if (on) {
+ fna = kmalloc(sizeof(struct fasync_struct), GFP_KERNEL);
+ if (fna == NULL)
+ return -ENOMEM;
+ }
+
+ sock = filp->private_data;
+
+ sk = sock->sk;
+ if (sk == NULL) {
+ kfree(fna);
+ return -EINVAL;
+ }
+
+ lock_sock(sk);
+
+ prev = &(sock->fasync_list);
+
+ for (fa = *prev; fa != NULL; prev = &fa->fa_next, fa = *prev)
+ if (fa->fa_file == filp)
+ break;
+
+ if (on) {
+ if (fa != NULL) {
+ write_lock_bh(&sk->sk_callback_lock);
+ fa->fa_fd = fd;
+ write_unlock_bh(&sk->sk_callback_lock);
+
+ kfree(fna);
+ goto out;
+ }
+ fna->fa_file = filp;
+ fna->fa_fd = fd;
+ fna->magic = FASYNC_MAGIC;
+ fna->fa_next = sock->fasync_list;
+ write_lock_bh(&sk->sk_callback_lock);
+ sock->fasync_list = fna;
+ write_unlock_bh(&sk->sk_callback_lock);
+ } else {
+ if (fa != NULL) {
+ write_lock_bh(&sk->sk_callback_lock);
+ *prev = fa->fa_next;
+ write_unlock_bh(&sk->sk_callback_lock);
+ kfree(fa);
+ }
+ }
+
+out:
+ release_sock(sock->sk);
+ return 0;
+}
+
+/* This function may be called only under socket lock or callback_lock */
+
+int sock_wake_async(struct socket *sock, int how, int band)
+{
+ if (!sock || !sock->fasync_list)
+ return -1;
+ switch (how) {
+ case SOCK_WAKE_WAITD:
+ if (test_bit(SOCK_ASYNC_WAITDATA, &sock->flags))
+ break;
+ goto call_kill;
+ case SOCK_WAKE_SPACE:
+ if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags))
+ break;
+ /* fall through */
+ case SOCK_WAKE_IO:
+call_kill:
+ __kill_fasync(sock->fasync_list, SIGIO, band);
+ break;
+ case SOCK_WAKE_URG:
+ __kill_fasync(sock->fasync_list, SIGURG, band);
+ }
+ return 0;
+}
+
+static int __sock_create(struct net *net, int family, int type, int protocol,
+ struct socket **res, int kern)
+{
+ int err;
+ struct socket *sock;
+ const struct net_proto_family *pf;
+
+ /*
+ * Check protocol is in range
+ */
+ if (family < 0 || family >= NPROTO)
+ return -EAFNOSUPPORT;
+ if (type < 0 || type >= SOCK_MAX)
+ return -EINVAL;
+
+ /* Compatibility.
+
+ This uglymoron is moved from INET layer to here to avoid
+ deadlock in module load.
+ */
+ if (family == PF_INET && type == SOCK_PACKET) {
+ static int warned;
+ if (!warned) {
+ warned = 1;
+ printk(KERN_INFO "%s uses obsolete (PF_INET,SOCK_PACKET)\n",
+ current->comm);
+ }
+ family = PF_PACKET;
+ }
+
+ err = security_socket_create(family, type, protocol, kern);
+ if (err)
+ return err;
+
+ /*
+ * Allocate the socket and allow the family to set things up. if
+ * the protocol is 0, the family is instructed to select an appropriate
+ * default.
+ */
+ sock = sock_alloc();
+ if (!sock) {
+ if (net_ratelimit())
+ printk(KERN_WARNING "socket: no more sockets\n");
+ return -ENFILE; /* Not exactly a match, but its the
+ closest posix thing */
+ }
+
+ sock->type = type;
+
+#ifdef CONFIG_MODULES
+ /* Attempt to load a protocol module if the find failed.
+ *
+ * 12/09/1996 Marcin: But! this makes REALLY only sense, if the user
+ * requested real, full-featured networking support upon configuration.
+ * Otherwise module support will break!
+ */
+ if (net_families[family] == NULL)
+ request_module("net-pf-%d", family);
+#endif
+
+ rcu_read_lock();
+ pf = rcu_dereference(net_families[family]);
+ err = -EAFNOSUPPORT;
+ if (!pf)
+ goto out_release;
+
+ /*
+ * We will call the ->create function, that possibly is in a loadable
+ * module, so we have to bump that loadable module refcnt first.
+ */
+ if (!try_module_get(pf->owner))
+ goto out_release;
+
+ /* Now protected by module ref count */
+ rcu_read_unlock();
+
+ err = pf->create(net, sock, protocol);
+ if (err < 0)
+ goto out_module_put;
+
+ /*
+ * Now to bump the refcnt of the [loadable] module that owns this
+ * socket at sock_release time we decrement its refcnt.
+ */
+ if (!try_module_get(sock->ops->owner))
+ goto out_module_busy;
+
+ /*
+ * Now that we're done with the ->create function, the [loadable]
+ * module can have its refcnt decremented
+ */
+ module_put(pf->owner);
+ err = security_socket_post_create(sock, family, type, protocol, kern);
+ if (err)
+ goto out_sock_release;
+ *res = sock;
+
+ return 0;
+
+out_module_busy:
+ err = -EAFNOSUPPORT;
+out_module_put:
+ sock->ops = NULL;
+ module_put(pf->owner);
+out_sock_release:
+ sock_release(sock);
+ return err;
+
+out_release:
+ rcu_read_unlock();
+ goto out_sock_release;
+}
+
+int sock_create(int family, int type, int protocol, struct socket **res)
+{
+ return __sock_create(current->nsproxy->net_ns, family, type, protocol, res, 0);
+}
+
+int sock_create_kern(int family, int type, int protocol, struct socket **res)
+{
+ return __sock_create(&init_net, family, type, protocol, res, 1);
+}
+
+SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
+{
+ int retval;
+ struct socket *sock;
+ int flags;
+
+ /* Check the SOCK_* constants for consistency. */
+ BUILD_BUG_ON(SOCK_CLOEXEC != O_CLOEXEC);
+ BUILD_BUG_ON((SOCK_MAX | SOCK_TYPE_MASK) != SOCK_TYPE_MASK);
+ BUILD_BUG_ON(SOCK_CLOEXEC & SOCK_TYPE_MASK);
+ BUILD_BUG_ON(SOCK_NONBLOCK & SOCK_TYPE_MASK);
+
+ flags = type & ~SOCK_TYPE_MASK;
+ if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
+ return -EINVAL;
+ type &= SOCK_TYPE_MASK;
+
+ if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
+ flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
+
+ retval = sock_create(family, type, protocol, &sock);
+ if (retval < 0)
+ goto out;
+
+ retval = sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
+ if (retval < 0)
+ goto out_release;
+
+out:
+ /* It may be already another descriptor 8) Not kernel problem. */
+ return retval;
+
+out_release:
+ sock_release(sock);
+ return retval;
+}
+
+/*
+ * Create a pair of connected sockets.
+ */
+
+SYSCALL_DEFINE4(socketpair, int, family, int, type, int, protocol,
+ int __user *, usockvec)
+{
+ struct socket *sock1, *sock2;
+ int fd1, fd2, err;
+ struct file *newfile1, *newfile2;
+ int flags;
+
+ flags = type & ~SOCK_TYPE_MASK;
+ if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
+ return -EINVAL;
+ type &= SOCK_TYPE_MASK;
+
+ if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
+ flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
+
+ /*
+ * Obtain the first socket and check if the underlying protocol
+ * supports the socketpair call.
+ */
+
+ err = sock_create(family, type, protocol, &sock1);
+ if (err < 0)
+ goto out;
+
+ err = sock_create(family, type, protocol, &sock2);
+ if (err < 0)
+ goto out_release_1;
+
+ err = sock1->ops->socketpair(sock1, sock2);
+ if (err < 0)
+ goto out_release_both;
+
+ fd1 = sock_alloc_fd(&newfile1, flags & O_CLOEXEC);
+ if (unlikely(fd1 < 0)) {
+ err = fd1;
+ goto out_release_both;
+ }
+
+ fd2 = sock_alloc_fd(&newfile2, flags & O_CLOEXEC);
+ if (unlikely(fd2 < 0)) {
+ err = fd2;
+ put_filp(newfile1);
+ put_unused_fd(fd1);
+ goto out_release_both;
+ }
+
+ err = sock_attach_fd(sock1, newfile1, flags & O_NONBLOCK);
+ if (unlikely(err < 0)) {
+ goto out_fd2;
+ }
+
+ err = sock_attach_fd(sock2, newfile2, flags & O_NONBLOCK);
+ if (unlikely(err < 0)) {
+ fput(newfile1);
+ goto out_fd1;
+ }
+
+ audit_fd_pair(fd1, fd2);
+ fd_install(fd1, newfile1);
+ fd_install(fd2, newfile2);
+ /* fd1 and fd2 may be already another descriptors.
+ * Not kernel problem.
+ */
+
+ err = put_user(fd1, &usockvec[0]);
+ if (!err)
+ err = put_user(fd2, &usockvec[1]);
+ if (!err)
+ return 0;
+
+ sys_close(fd2);
+ sys_close(fd1);
+ return err;
+
+out_release_both:
+ sock_release(sock2);
+out_release_1:
+ sock_release(sock1);
+out:
+ return err;
+
+out_fd2:
+ put_filp(newfile1);
+ sock_release(sock1);
+out_fd1:
+ put_filp(newfile2);
+ sock_release(sock2);
+ put_unused_fd(fd1);
+ put_unused_fd(fd2);
+ goto out;
+}
+
+/*
+ * Bind a name to a socket. Nothing much to do here since it's
+ * the protocol's responsibility to handle the local address.
+ *
+ * We move the socket address to kernel space before we call
+ * the protocol layer (having also checked the address is ok).
+ */
+
+SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
+{
+ struct socket *sock;
+ struct sockaddr_storage address;
+ int err, fput_needed;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (sock) {
+ err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
+ if (err >= 0) {
+ err = security_socket_bind(sock,
+ (struct sockaddr *)&address,
+ addrlen);
+ if (!err)
+ err = sock->ops->bind(sock,
+ (struct sockaddr *)
+ &address, addrlen);
+ }
+ fput_light(sock->file, fput_needed);
+ }
+ return err;
+}
+
+/*
+ * Perform a listen. Basically, we allow the protocol to do anything
+ * necessary for a listen, and if that works, we mark the socket as
+ * ready for listening.
+ */
+
+SYSCALL_DEFINE2(listen, int, fd, int, backlog)
+{
+ struct socket *sock;
+ int err, fput_needed;
+ int somaxconn;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (sock) {
+ somaxconn = sock_net(sock->sk)->core.sysctl_somaxconn;
+ if ((unsigned)backlog > somaxconn)
+ backlog = somaxconn;
+
+ err = security_socket_listen(sock, backlog);
+ if (!err)
+ err = sock->ops->listen(sock, backlog);
+
+ fput_light(sock->file, fput_needed);
+ }
+ return err;
+}
+
+/*
+ * For accept, we attempt to create a new socket, set up the link
+ * with the client, wake up the client, then return the new
+ * connected fd. We collect the address of the connector in kernel
+ * space and move it to user at the very end. This is unclean because
+ * we open the socket then return an error.
+ *
+ * 1003.1g adds the ability to recvmsg() to query connection pending
+ * status to recvmsg. We need to add that support in a way thats
+ * clean when we restucture accept also.
+ */
+
+SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
+ int __user *, upeer_addrlen, int, flags)
+{
+ struct socket *sock, *newsock;
+ struct file *newfile;
+ int err, len, newfd, fput_needed;
+ struct sockaddr_storage address;
+
+ if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
+ return -EINVAL;
+
+ if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
+ flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ goto out;
+
+ err = -ENFILE;
+ if (!(newsock = sock_alloc()))
+ goto out_put;
+
+ newsock->type = sock->type;
+ newsock->ops = sock->ops;
+
+ /*
+ * We don't need try_module_get here, as the listening socket (sock)
+ * has the protocol module (sock->ops->owner) held.
+ */
+ __module_get(newsock->ops->owner);
+
+ newfd = sock_alloc_fd(&newfile, flags & O_CLOEXEC);
+ if (unlikely(newfd < 0)) {
+ err = newfd;
+ sock_release(newsock);
+ goto out_put;
+ }
+
+ err = sock_attach_fd(newsock, newfile, flags & O_NONBLOCK);
+ if (err < 0)
+ goto out_fd_simple;
+
+ err = security_socket_accept(sock, newsock);
+ if (err)
+ goto out_fd;
+
+ err = sock->ops->accept(sock, newsock, sock->file->f_flags);
+ if (err < 0)
+ goto out_fd;
+
+ if (upeer_sockaddr) {
+ if (newsock->ops->getname(newsock, (struct sockaddr *)&address,
+ &len, 2) < 0) {
+ err = -ECONNABORTED;
+ goto out_fd;
+ }
+ err = move_addr_to_user((struct sockaddr *)&address,
+ len, upeer_sockaddr, upeer_addrlen);
+ if (err < 0)
+ goto out_fd;
+ }
+
+ /* File flags are not inherited via accept() unlike another OSes. */
+
+ fd_install(newfd, newfile);
+ err = newfd;
+
+ security_socket_post_accept(sock, newsock);
+
+out_put:
+ fput_light(sock->file, fput_needed);
+out:
+ return err;
+out_fd_simple:
+ sock_release(newsock);
+ put_filp(newfile);
+ put_unused_fd(newfd);
+ goto out_put;
+out_fd:
+ fput(newfile);
+ put_unused_fd(newfd);
+ goto out_put;
+}
+
+SYSCALL_DEFINE3(accept, int, fd, struct sockaddr __user *, upeer_sockaddr,
+ int __user *, upeer_addrlen)
+{
+ return sys_accept4(fd, upeer_sockaddr, upeer_addrlen, 0);
+}
+
+/*
+ * Attempt to connect to a socket with the server address. The address
+ * is in user space so we verify it is OK and move it to kernel space.
+ *
+ * For 1003.1g we need to add clean support for a bind to AF_UNSPEC to
+ * break bindings
+ *
+ * NOTE: 1003.1g draft 6.3 is broken with respect to AX.25/NetROM and
+ * other SEQPACKET protocols that take time to connect() as it doesn't
+ * include the -EINPROGRESS status for such sockets.
+ */
+
+SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
+ int, addrlen)
+{
+ struct socket *sock;
+ struct sockaddr_storage address;
+ int err, fput_needed;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ goto out;
+ err = move_addr_to_kernel(uservaddr, addrlen, (struct sockaddr *)&address);
+ if (err < 0)
+ goto out_put;
+
+ err =
+ security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
+ if (err)
+ goto out_put;
+
+ err = sock->ops->connect(sock, (struct sockaddr *)&address, addrlen,
+ sock->file->f_flags);
+out_put:
+ fput_light(sock->file, fput_needed);
+out:
+ return err;
+}
+
+/*
+ * Get the local address ('name') of a socket object. Move the obtained
+ * name to user space.
+ */
+
+SYSCALL_DEFINE3(getsockname, int, fd, struct sockaddr __user *, usockaddr,
+ int __user *, usockaddr_len)
+{
+ struct socket *sock;
+ struct sockaddr_storage address;
+ int len, err, fput_needed;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ goto out;
+
+ err = security_socket_getsockname(sock);
+ if (err)
+ goto out_put;
+
+ err = sock->ops->getname(sock, (struct sockaddr *)&address, &len, 0);
+ if (err)
+ goto out_put;
+ err = move_addr_to_user((struct sockaddr *)&address, len, usockaddr, usockaddr_len);
+
+out_put:
+ fput_light(sock->file, fput_needed);
+out:
+ return err;
+}
+
+/*
+ * Get the remote address ('name') of a socket object. Move the obtained
+ * name to user space.
+ */
+
+SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
+ int __user *, usockaddr_len)
+{
+ struct socket *sock;
+ struct sockaddr_storage address;
+ int len, err, fput_needed;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (sock != NULL) {
+ err = security_socket_getpeername(sock);
+ if (err) {
+ fput_light(sock->file, fput_needed);
+ return err;
+ }
+
+ err =
+ sock->ops->getname(sock, (struct sockaddr *)&address, &len,
+ 1);
+ if (!err)
+ err = move_addr_to_user((struct sockaddr *)&address, len, usockaddr,
+ usockaddr_len);
+ fput_light(sock->file, fput_needed);
+ }
+ return err;
+}
+
+/*
+ * Send a datagram to a given address. We move the address into kernel
+ * space and check the user space data area is readable before invoking
+ * the protocol.
+ */
+
+SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
+ unsigned, flags, struct sockaddr __user *, addr,
+ int, addr_len)
+{
+ struct socket *sock;
+ struct sockaddr_storage address;
+ int err;
+ struct msghdr msg;
+ struct iovec iov;
+ int fput_needed;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ goto out;
+
+ iov.iov_base = buff;
+ iov.iov_len = len;
+ msg.msg_name = NULL;
+ msg.msg_iov = &iov;
+ msg.msg_iovlen = 1;
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_namelen = 0;
+ if (addr) {
+ err = move_addr_to_kernel(addr, addr_len, (struct sockaddr *)&address);
+ if (err < 0)
+ goto out_put;
+ msg.msg_name = (struct sockaddr *)&address;
+ msg.msg_namelen = addr_len;
+ }
+ if (sock->file->f_flags & O_NONBLOCK)
+ flags |= MSG_DONTWAIT;
+ msg.msg_flags = flags;
+ err = sock_sendmsg(sock, &msg, len);
+
+out_put:
+ fput_light(sock->file, fput_needed);
+out:
+ return err;
+}
+
+/*
+ * Send a datagram down a socket.
+ */
+
+SYSCALL_DEFINE4(send, int, fd, void __user *, buff, size_t, len,
+ unsigned, flags)
+{
+ return sys_sendto(fd, buff, len, flags, NULL, 0);
+}
+
+/*
+ * Receive a frame from the socket and optionally record the address of the
+ * sender. We verify the buffers are writable and if needed move the
+ * sender address from kernel to user space.
+ */
+
+SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
+ unsigned, flags, struct sockaddr __user *, addr,
+ int __user *, addr_len)
+{
+ struct socket *sock;
+ struct iovec iov;
+ struct msghdr msg;
+ struct sockaddr_storage address;
+ int err, err2;
+ int fput_needed;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ goto out;
+
+ msg.msg_control = NULL;
+ msg.msg_controllen = 0;
+ msg.msg_iovlen = 1;
+ msg.msg_iov = &iov;
+ iov.iov_len = size;
+ iov.iov_base = ubuf;
+ msg.msg_name = (struct sockaddr *)&address;
+ msg.msg_namelen = sizeof(address);
+ if (sock->file->f_flags & O_NONBLOCK)
+ flags |= MSG_DONTWAIT;
+ err = sock_recvmsg(sock, &msg, size, flags);
+
+ if (err >= 0 && addr != NULL) {
+ err2 = move_addr_to_user((struct sockaddr *)&address,
+ msg.msg_namelen, addr, addr_len);
+ if (err2 < 0)
+ err = err2;
+ }
+
+ fput_light(sock->file, fput_needed);
+out:
+ return err;
+}
+
+/*
+ * Receive a datagram from a socket.
+ */
+
+asmlinkage long sys_recv(int fd, void __user *ubuf, size_t size,
+ unsigned flags)
+{
+ return sys_recvfrom(fd, ubuf, size, flags, NULL, NULL);
+}
+
+/*
+ * Set a socket option. Because we don't know the option lengths we have
+ * to pass the user mode parameter for the protocols to sort out.
+ */
+
+SYSCALL_DEFINE5(setsockopt, int, fd, int, level, int, optname,
+ char __user *, optval, int, optlen)
+{
+ int err, fput_needed;
+ struct socket *sock;
+
+ if (optlen < 0)
+ return -EINVAL;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (sock != NULL) {
+ err = security_socket_setsockopt(sock, level, optname);
+ if (err)
+ goto out_put;
+
+ if (level == SOL_SOCKET)
+ err =
+ sock_setsockopt(sock, level, optname, optval,
+ optlen);
+ else
+ err =
+ sock->ops->setsockopt(sock, level, optname, optval,
+ optlen);
+out_put:
+ fput_light(sock->file, fput_needed);
+ }
+ return err;
+}
+
+/*
+ * Get a socket option. Because we don't know the option lengths we have
+ * to pass a user mode parameter for the protocols to sort out.
+ */
+
+SYSCALL_DEFINE5(getsockopt, int, fd, int, level, int, optname,
+ char __user *, optval, int __user *, optlen)
+{
+ int err, fput_needed;
+ struct socket *sock;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (sock != NULL) {
+ err = security_socket_getsockopt(sock, level, optname);
+ if (err)
+ goto out_put;
+
+ if (level == SOL_SOCKET)
+ err =
+ sock_getsockopt(sock, level, optname, optval,
+ optlen);
+ else
+ err =
+ sock->ops->getsockopt(sock, level, optname, optval,
+ optlen);
+out_put:
+ fput_light(sock->file, fput_needed);
+ }
+ return err;
+}
+
+/*
+ * Shutdown a socket.
+ */
+
+SYSCALL_DEFINE2(shutdown, int, fd, int, how)
+{
+ int err, fput_needed;
+ struct socket *sock;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (sock != NULL) {
+ err = security_socket_shutdown(sock, how);
+ if (!err)
+ err = sock->ops->shutdown(sock, how);
+ fput_light(sock->file, fput_needed);
+ }
+ return err;
+}
+
+/* A couple of helpful macros for getting the address of the 32/64 bit
+ * fields which are the same type (int / unsigned) on our platforms.
+ */
+#define COMPAT_MSG(msg, member) ((MSG_CMSG_COMPAT & flags) ? &msg##_compat->member : &msg->member)
+#define COMPAT_NAMELEN(msg) COMPAT_MSG(msg, msg_namelen)
+#define COMPAT_FLAGS(msg) COMPAT_MSG(msg, msg_flags)
+
+/*
+ * BSD sendmsg interface
+ */
+
+SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags)
+{
+ struct compat_msghdr __user *msg_compat =
+ (struct compat_msghdr __user *)msg;
+ struct socket *sock;
+ struct sockaddr_storage address;
+ struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
+ unsigned char ctl[sizeof(struct cmsghdr) + 20]
+ __attribute__ ((aligned(sizeof(__kernel_size_t))));
+ /* 20 is size of ipv6_pktinfo */
+ unsigned char *ctl_buf = ctl;
+ struct msghdr msg_sys;
+ int err, ctl_len, iov_size, total_len;
+ int fput_needed;
+
+ err = -EFAULT;
+ if (MSG_CMSG_COMPAT & flags) {
+ if (get_compat_msghdr(&msg_sys, msg_compat))
+ return -EFAULT;
+ }
+ else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr)))
+ return -EFAULT;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ goto out;
+
+ /* do not move before msg_sys is valid */
+ err = -EMSGSIZE;
+ if (msg_sys.msg_iovlen > UIO_MAXIOV)
+ goto out_put;
+
+ /* Check whether to allocate the iovec area */
+ err = -ENOMEM;
+ iov_size = msg_sys.msg_iovlen * sizeof(struct iovec);
+ if (msg_sys.msg_iovlen > UIO_FASTIOV) {
+ iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
+ if (!iov)
+ goto out_put;
+ }
+
+ /* This will also move the address data into kernel space */
+ if (MSG_CMSG_COMPAT & flags) {
+ err = verify_compat_iovec(&msg_sys, iov,
+ (struct sockaddr *)&address,
+ VERIFY_READ);
+ } else
+ err = verify_iovec(&msg_sys, iov,
+ (struct sockaddr *)&address,
+ VERIFY_READ);
+ if (err < 0)
+ goto out_freeiov;
+ total_len = err;
+
+ err = -ENOBUFS;
+
+ if (msg_sys.msg_controllen > INT_MAX)
+ goto out_freeiov;
+ ctl_len = msg_sys.msg_controllen;
+ if ((MSG_CMSG_COMPAT & flags) && ctl_len) {
+ err =
+ cmsghdr_from_user_compat_to_kern(&msg_sys, sock->sk, ctl,
+ sizeof(ctl));
+ if (err)
+ goto out_freeiov;
+ ctl_buf = msg_sys.msg_control;
+ ctl_len = msg_sys.msg_controllen;
+ } else if (ctl_len) {
+ if (ctl_len > sizeof(ctl)) {
+ ctl_buf = sock_kmalloc(sock->sk, ctl_len, GFP_KERNEL);
+ if (ctl_buf == NULL)
+ goto out_freeiov;
+ }
+ err = -EFAULT;
+ /*
+ * Careful! Before this, msg_sys.msg_control contains a user pointer.
+ * Afterwards, it will be a kernel pointer. Thus the compiler-assisted
+ * checking falls down on this.
+ */
+ if (copy_from_user(ctl_buf, (void __user *)msg_sys.msg_control,
+ ctl_len))
+ goto out_freectl;
+ msg_sys.msg_control = ctl_buf;
+ }
+ msg_sys.msg_flags = flags;
+
+ if (sock->file->f_flags & O_NONBLOCK)
+ msg_sys.msg_flags |= MSG_DONTWAIT;
+ err = sock_sendmsg(sock, &msg_sys, total_len);
+
+out_freectl:
+ if (ctl_buf != ctl)
+ sock_kfree_s(sock->sk, ctl_buf, ctl_len);
+out_freeiov:
+ if (iov != iovstack)
+ sock_kfree_s(sock->sk, iov, iov_size);
+out_put:
+ fput_light(sock->file, fput_needed);
+out:
+ return err;
+}
+
+/*
+ * BSD recvmsg interface
+ */
+
+SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg,
+ unsigned int, flags)
+{
+ struct compat_msghdr __user *msg_compat =
+ (struct compat_msghdr __user *)msg;
+ struct socket *sock;
+ struct iovec iovstack[UIO_FASTIOV];
+ struct iovec *iov = iovstack;
+ struct msghdr msg_sys;
+ unsigned long cmsg_ptr;
+ int err, iov_size, total_len, len;
+ int fput_needed;
+
+ /* kernel mode address */
+ struct sockaddr_storage addr;
+
+ /* user mode address pointers */
+ struct sockaddr __user *uaddr;
+ int __user *uaddr_len;
+
+ if (MSG_CMSG_COMPAT & flags) {
+ if (get_compat_msghdr(&msg_sys, msg_compat))
+ return -EFAULT;
+ }
+ else if (copy_from_user(&msg_sys, msg, sizeof(struct msghdr)))
+ return -EFAULT;
+
+ sock = sockfd_lookup_light(fd, &err, &fput_needed);
+ if (!sock)
+ goto out;
+
+ err = -EMSGSIZE;
+ if (msg_sys.msg_iovlen > UIO_MAXIOV)
+ goto out_put;
+
+ /* Check whether to allocate the iovec area */
+ err = -ENOMEM;
+ iov_size = msg_sys.msg_iovlen * sizeof(struct iovec);
+ if (msg_sys.msg_iovlen > UIO_FASTIOV) {
+ iov = sock_kmalloc(sock->sk, iov_size, GFP_KERNEL);
+ if (!iov)
+ goto out_put;
+ }
+
+ /*
+ * Save the user-mode address (verify_iovec will change the
+ * kernel msghdr to use the kernel address space)
+ */
+
+ uaddr = (__force void __user *)msg_sys.msg_name;
+ uaddr_len = COMPAT_NAMELEN(msg);
+ if (MSG_CMSG_COMPAT & flags) {
+ err = verify_compat_iovec(&msg_sys, iov,
+ (struct sockaddr *)&addr,
+ VERIFY_WRITE);
+ } else
+ err = verify_iovec(&msg_sys, iov,
+ (struct sockaddr *)&addr,
+ VERIFY_WRITE);
+ if (err < 0)
+ goto out_freeiov;
+ total_len = err;
+
+ cmsg_ptr = (unsigned long)msg_sys.msg_control;
+ msg_sys.msg_flags = flags & (MSG_CMSG_CLOEXEC|MSG_CMSG_COMPAT);
+
+ if (sock->file->f_flags & O_NONBLOCK)
+ flags |= MSG_DONTWAIT;
+ err = sock_recvmsg(sock, &msg_sys, total_len, flags);
+ if (err < 0)
+ goto out_freeiov;
+ len = err;
+
+ if (uaddr != NULL) {
+ err = move_addr_to_user((struct sockaddr *)&addr,
+ msg_sys.msg_namelen, uaddr,
+ uaddr_len);
+ if (err < 0)
+ goto out_freeiov;
+ }
+ err = __put_user((msg_sys.msg_flags & ~MSG_CMSG_COMPAT),
+ COMPAT_FLAGS(msg));
+ if (err)
+ goto out_freeiov;
+ if (MSG_CMSG_COMPAT & flags)
+ err = __put_user((unsigned long)msg_sys.msg_control - cmsg_ptr,
+ &msg_compat->msg_controllen);
+ else
+ err = __put_user((unsigned long)msg_sys.msg_control - cmsg_ptr,
+ &msg->msg_controllen);
+ if (err)
+ goto out_freeiov;
+ err = len;
+
+out_freeiov:
+ if (iov != iovstack)
+ sock_kfree_s(sock->sk, iov, iov_size);
+out_put:
+ fput_light(sock->file, fput_needed);
+out:
+ return err;
+}
+
+#ifdef __ARCH_WANT_SYS_SOCKETCALL
+
+/* Argument list sizes for sys_socketcall */
+#define AL(x) ((x) * sizeof(unsigned long))
+static const unsigned char nargs[19]={
+ AL(0),AL(3),AL(3),AL(3),AL(2),AL(3),
+ AL(3),AL(3),AL(4),AL(4),AL(4),AL(6),
+ AL(6),AL(2),AL(5),AL(5),AL(3),AL(3),
+ AL(4)
+};
+
+#undef AL
+
+/*
+ * System call vectors.
+ *
+ * Argument checking cleaned up. Saved 20% in size.
+ * This function doesn't need to set the kernel lock because
+ * it is set by the callees.
+ */
+
+SYSCALL_DEFINE2(socketcall, int, call, unsigned long __user *, args)
+{
+ unsigned long a[6];
+ unsigned long a0, a1;
+ int err;
+
+ if (call < 1 || call > SYS_ACCEPT4)
+ return -EINVAL;
+
+ /* copy_from_user should be SMP safe. */
+ if (copy_from_user(a, args, nargs[call]))
+ return -EFAULT;
+
+ audit_socketcall(nargs[call] / sizeof(unsigned long), a);
+
+ a0 = a[0];
+ a1 = a[1];
+
+ switch (call) {
+ case SYS_SOCKET:
+ err = sys_socket(a0, a1, a[2]);
+ break;
+ case SYS_BIND:
+ err = sys_bind(a0, (struct sockaddr __user *)a1, a[2]);
+ break;
+ case SYS_CONNECT:
+ err = sys_connect(a0, (struct sockaddr __user *)a1, a[2]);
+ break;
+ case SYS_LISTEN:
+ err = sys_listen(a0, a1);
+ break;
+ case SYS_ACCEPT:
+ err = sys_accept4(a0, (struct sockaddr __user *)a1,
+ (int __user *)a[2], 0);
+ break;
+ case SYS_GETSOCKNAME:
+ err =
+ sys_getsockname(a0, (struct sockaddr __user *)a1,
+ (int __user *)a[2]);
+ break;
+ case SYS_GETPEERNAME:
+ err =
+ sys_getpeername(a0, (struct sockaddr __user *)a1,
+ (int __user *)a[2]);
+ break;
+ case SYS_SOCKETPAIR:
+ err = sys_socketpair(a0, a1, a[2], (int __user *)a[3]);
+ break;
+ case SYS_SEND:
+ err = sys_send(a0, (void __user *)a1, a[2], a[3]);
+ break;
+ case SYS_SENDTO:
+ err = sys_sendto(a0, (void __user *)a1, a[2], a[3],
+ (struct sockaddr __user *)a[4], a[5]);
+ break;
+ case SYS_RECV:
+ err = sys_recv(a0, (void __user *)a1, a[2], a[3]);
+ break;
+ case SYS_RECVFROM:
+ err = sys_recvfrom(a0, (void __user *)a1, a[2], a[3],
+ (struct sockaddr __user *)a[4],
+ (int __user *)a[5]);
+ break;
+ case SYS_SHUTDOWN:
+ err = sys_shutdown(a0, a1);
+ break;
+ case SYS_SETSOCKOPT:
+ err = sys_setsockopt(a0, a1, a[2], (char __user *)a[3], a[4]);
+ break;
+ case SYS_GETSOCKOPT:
+ err =
+ sys_getsockopt(a0, a1, a[2], (char __user *)a[3],
+ (int __user *)a[4]);
+ break;
+ case SYS_SENDMSG:
+ err = sys_sendmsg(a0, (struct msghdr __user *)a1, a[2]);
+ break;
+ case SYS_RECVMSG:
+ err = sys_recvmsg(a0, (struct msghdr __user *)a1, a[2]);
+ break;
+ case SYS_ACCEPT4:
+ err = sys_accept4(a0, (struct sockaddr __user *)a1,
+ (int __user *)a[2], a[3]);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ return err;
+}
+
+#endif /* __ARCH_WANT_SYS_SOCKETCALL */
+
+/**
+ * sock_register - add a socket protocol handler
+ * @ops: description of protocol
+ *
+ * This function is called by a protocol handler that wants to
+ * advertise its address family, and have it linked into the
+ * socket interface. The value ops->family coresponds to the
+ * socket system call protocol family.
+ */
+int sock_register(const struct net_proto_family *ops)
+{
+ int err;
+
+ if (ops->family >= NPROTO) {
+ printk(KERN_CRIT "protocol %d >= NPROTO(%d)\n", ops->family,
+ NPROTO);
+ return -ENOBUFS;
+ }
+
+ spin_lock(&net_family_lock);
+ if (net_families[ops->family])
+ err = -EEXIST;
+ else {
+ net_families[ops->family] = ops;
+ err = 0;
+ }
+ spin_unlock(&net_family_lock);
+
+ printk(KERN_INFO "NET: Registered protocol family %d\n", ops->family);
+ return err;
+}
+
+/**
+ * sock_unregister - remove a protocol handler
+ * @family: protocol family to remove
+ *
+ * This function is called by a protocol handler that wants to
+ * remove its address family, and have it unlinked from the
+ * new socket creation.
+ *
+ * If protocol handler is a module, then it can use module reference
+ * counts to protect against new references. If protocol handler is not
+ * a module then it needs to provide its own protection in
+ * the ops->create routine.
+ */
+void sock_unregister(int family)
+{
+ BUG_ON(family < 0 || family >= NPROTO);
+
+ spin_lock(&net_family_lock);
+ net_families[family] = NULL;
+ spin_unlock(&net_family_lock);
+
+ synchronize_rcu();
+
+ printk(KERN_INFO "NET: Unregistered protocol family %d\n", family);
+}
+
+static int __init sock_init(void)
+{
+ /*
+ * Initialize sock SLAB cache.
+ */
+
+ sk_init();
+
+ /*
+ * Initialize skbuff SLAB cache
+ */
+ skb_init();
+
+ /*
+ * Initialize the protocols module.
+ */
+
+ init_inodecache();
+ register_filesystem(&sock_fs_type);
+ sock_mnt = kern_mount(&sock_fs_type);
+
+ /* The real protocol initialization is performed in later initcalls.
+ */
+
+#ifdef CONFIG_NETFILTER
+ netfilter_init();
+#endif
+
+ return 0;
+}
+
+core_initcall(sock_init); /* early initcall */
+
+#ifdef CONFIG_PROC_FS
+void socket_seq_show(struct seq_file *seq)
+{
+ int cpu;
+ int counter = 0;
+
+ for_each_possible_cpu(cpu)
+ counter += per_cpu(sockets_in_use, cpu);
+
+ /* It can be negative, by the way. 8) */
+ if (counter < 0)
+ counter = 0;
+
+ seq_printf(seq, "sockets: used %d\n", counter);
+}
+#endif /* CONFIG_PROC_FS */
+
+#ifdef CONFIG_COMPAT
+static long compat_sock_ioctl(struct file *file, unsigned cmd,
+ unsigned long arg)
+{
+ struct socket *sock = file->private_data;
+ int ret = -ENOIOCTLCMD;
+ struct sock *sk;
+ struct net *net;
+
+ sk = sock->sk;
+ net = sock_net(sk);
+
+ if (sock->ops->compat_ioctl)
+ ret = sock->ops->compat_ioctl(sock, cmd, arg);
+
+ if (ret == -ENOIOCTLCMD &&
+ (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST))
+ ret = compat_wext_handle_ioctl(net, cmd, arg);
+
+ return ret;
+}
+#endif
+
+int kernel_bind(struct socket *sock, struct sockaddr *addr, int addrlen)
+{
+ return sock->ops->bind(sock, addr, addrlen);
+}
+
+int kernel_listen(struct socket *sock, int backlog)
+{
+ return sock->ops->listen(sock, backlog);
+}
+
+int kernel_accept(struct socket *sock, struct socket **newsock, int flags)
+{
+ struct sock *sk = sock->sk;
+ int err;
+
+ err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
+ newsock);
+ if (err < 0)
+ goto done;
+
+ err = sock->ops->accept(sock, *newsock, flags);
+ if (err < 0) {
+ sock_release(*newsock);
+ *newsock = NULL;
+ goto done;
+ }
+
+ (*newsock)->ops = sock->ops;
+ __module_get((*newsock)->ops->owner);
+
+done:
+ return err;
+}
+
+int kernel_connect(struct socket *sock, struct sockaddr *addr, int addrlen,
+ int flags)
+{
+ return sock->ops->connect(sock, addr, addrlen, flags);
+}
+
+int kernel_getsockname(struct socket *sock, struct sockaddr *addr,
+ int *addrlen)
+{
+ return sock->ops->getname(sock, addr, addrlen, 0);
+}
+
+int kernel_getpeername(struct socket *sock, struct sockaddr *addr,
+ int *addrlen)
+{
+ return sock->ops->getname(sock, addr, addrlen, 1);
+}
+
+int kernel_getsockopt(struct socket *sock, int level, int optname,
+ char *optval, int *optlen)
+{
+ mm_segment_t oldfs = get_fs();
+ int err;
+
+ set_fs(KERNEL_DS);
+ if (level == SOL_SOCKET)
+ err = sock_getsockopt(sock, level, optname, optval, optlen);
+ else
+ err = sock->ops->getsockopt(sock, level, optname, optval,
+ optlen);
+ set_fs(oldfs);
+ return err;
+}
+
+int kernel_setsockopt(struct socket *sock, int level, int optname,
+ char *optval, int optlen)
+{
+ mm_segment_t oldfs = get_fs();
+ int err;
+
+ set_fs(KERNEL_DS);
+ if (level == SOL_SOCKET)
+ err = sock_setsockopt(sock, level, optname, optval, optlen);
+ else
+ err = sock->ops->setsockopt(sock, level, optname, optval,
+ optlen);
+ set_fs(oldfs);
+ return err;
+}
+
+int kernel_sendpage(struct socket *sock, struct page *page, int offset,
+ size_t size, int flags)
+{
+ if (sock->ops->sendpage)
+ return sock->ops->sendpage(sock, page, offset, size, flags);
+
+ return sock_no_sendpage(sock, page, offset, size, flags);
+}
+
+int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg)
+{
+ mm_segment_t oldfs = get_fs();
+ int err;
+
+ set_fs(KERNEL_DS);
+ err = sock->ops->ioctl(sock, cmd, arg);
+ set_fs(oldfs);
+
+ return err;
+}
+
+int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
+{
+ return sock->ops->shutdown(sock, how);
+}
+
+EXPORT_SYMBOL(sock_create);
+EXPORT_SYMBOL(sock_create_kern);
+EXPORT_SYMBOL(sock_create_lite);
+EXPORT_SYMBOL(sock_map_fd);
+EXPORT_SYMBOL(sock_recvmsg);
+EXPORT_SYMBOL(sock_register);
+EXPORT_SYMBOL(sock_release);
+EXPORT_SYMBOL(sock_sendmsg);
+EXPORT_SYMBOL(sock_unregister);
+EXPORT_SYMBOL(sock_wake_async);
+EXPORT_SYMBOL(sockfd_lookup);
+EXPORT_SYMBOL(kernel_sendmsg);
+EXPORT_SYMBOL(kernel_recvmsg);
+EXPORT_SYMBOL(kernel_bind);
+EXPORT_SYMBOL(kernel_listen);
+EXPORT_SYMBOL(kernel_accept);
+EXPORT_SYMBOL(kernel_connect);
+EXPORT_SYMBOL(kernel_getsockname);
+EXPORT_SYMBOL(kernel_getpeername);
+EXPORT_SYMBOL(kernel_getsockopt);
+EXPORT_SYMBOL(kernel_setsockopt);
+EXPORT_SYMBOL(kernel_sendpage);
+EXPORT_SYMBOL(kernel_sock_ioctl);
+EXPORT_SYMBOL(kernel_sock_shutdown);