summaryrefslogtreecommitdiff
path: root/libdde-linux26
diff options
context:
space:
mode:
authorJustus Winter <4winter@informatik.uni-hamburg.de>2015-11-05 23:46:06 +0100
committerJustus Winter <4winter@informatik.uni-hamburg.de>2015-11-05 23:46:06 +0100
commita41984e726601b2f6cf61e3128aba84f7a85436c (patch)
tree6d1156eb0d5779bda12592eca6f2c9ca93722a09 /libdde-linux26
parent573055eb4f44e819f983f083c52716bdec8edf14 (diff)
sync with upstream packaging
Diffstat (limited to 'libdde-linux26')
-rw-r--r--libdde-linux26/contrib/drivers/base/attribute_container.c2
-rw-r--r--libdde-linux26/contrib/drivers/base/platform.c2
-rw-r--r--libdde-linux26/contrib/drivers/base/sys.c2
-rw-r--r--libdde-linux26/contrib/include/linux/compiler-gcc5.h56
-rw-r--r--libdde-linux26/contrib/include/linux/etherdevice.h27
-rw-r--r--libdde-linux26/contrib/include/linux/ethtool.h36
-rw-r--r--libdde-linux26/contrib/include/linux/inotify.h4
-rw-r--r--libdde-linux26/contrib/include/linux/kernel.h2
-rw-r--r--libdde-linux26/contrib/include/linux/mdio.h356
-rw-r--r--libdde-linux26/contrib/include/linux/netdevice.h90
-rw-r--r--libdde-linux26/contrib/include/linux/pci.h1
-rw-r--r--libdde-linux26/contrib/include/linux/pci_ids.h177
-rw-r--r--libdde-linux26/contrib/include/linux/phy.h19
-rw-r--r--libdde-linux26/contrib/include/linux/skbuff.h20
-rw-r--r--libdde-linux26/contrib/include/linux/workqueue.h23
-rw-r--r--libdde-linux26/contrib/include/net/ethoc.h22
-rw-r--r--libdde-linux26/contrib/kernel/rcuclassic.c788
-rw-r--r--libdde-linux26/contrib/kernel/rcupdate.c1
-rw-r--r--libdde-linux26/contrib/lib/devres.c351
-rw-r--r--libdde-linux26/contrib/lib/kobject.c2
-rw-r--r--libdde-linux26/contrib/net/core/net-sysfs.c2
-rw-r--r--libdde-linux26/contrib/net/core/skb_dma_map.c12
-rw-r--r--libdde-linux26/include/linux/device.h5
-rw-r--r--libdde-linux26/include/linux/kernel.h2
-rw-r--r--libdde-linux26/lib/src/Makefile3
-rw-r--r--libdde-linux26/lib/src/arch/l4/pci.c2
-rw-r--r--libdde-linux26/lib/src/drivers/base/core.c2
-rw-r--r--libdde-linux26/lib/src/net/core/dev.c389
-rw-r--r--libdde-linux26/lib/src/net/sched/sch_generic.c40
29 files changed, 2268 insertions, 170 deletions
diff --git a/libdde-linux26/contrib/drivers/base/attribute_container.c b/libdde-linux26/contrib/drivers/base/attribute_container.c
index b9cda053..e0f7859f 100644
--- a/libdde-linux26/contrib/drivers/base/attribute_container.c
+++ b/libdde-linux26/contrib/drivers/base/attribute_container.c
@@ -167,7 +167,7 @@ attribute_container_add_device(struct device *dev,
ic->classdev.parent = get_device(dev);
ic->classdev.class = cont->class;
cont->class->dev_release = attribute_container_release;
- dev_set_name(&ic->classdev, dev_name(dev));
+ dev_set_name(&ic->classdev, "%s", dev_name(dev));
if (fn)
fn(cont, dev, &ic->classdev);
else
diff --git a/libdde-linux26/contrib/drivers/base/platform.c b/libdde-linux26/contrib/drivers/base/platform.c
index 349a1013..030d638c 100644
--- a/libdde-linux26/contrib/drivers/base/platform.c
+++ b/libdde-linux26/contrib/drivers/base/platform.c
@@ -244,7 +244,7 @@ int platform_device_add(struct platform_device *pdev)
if (pdev->id != -1)
dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
else
- dev_set_name(&pdev->dev, pdev->name);
+ dev_set_name(&pdev->dev, "%s", pdev->name);
for (i = 0; i < pdev->num_resources; i++) {
struct resource *p, *r = &pdev->resource[i];
diff --git a/libdde-linux26/contrib/drivers/base/sys.c b/libdde-linux26/contrib/drivers/base/sys.c
index b428c8c4..38e725c1 100644
--- a/libdde-linux26/contrib/drivers/base/sys.c
+++ b/libdde-linux26/contrib/drivers/base/sys.c
@@ -137,7 +137,7 @@ int sysdev_class_register(struct sysdev_class * cls)
cls->kset.kobj.parent = &system_kset->kobj;
cls->kset.kobj.ktype = &ktype_sysdev_class;
cls->kset.kobj.kset = system_kset;
- kobject_set_name(&cls->kset.kobj, cls->name);
+ kobject_set_name(&cls->kset.kobj, "%s", cls->name);
return kset_register(&cls->kset);
}
diff --git a/libdde-linux26/contrib/include/linux/compiler-gcc5.h b/libdde-linux26/contrib/include/linux/compiler-gcc5.h
index 00cb5759..34c58615 100644
--- a/libdde-linux26/contrib/include/linux/compiler-gcc5.h
+++ b/libdde-linux26/contrib/include/linux/compiler-gcc5.h
@@ -2,9 +2,10 @@
#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
#endif
-#define __used __attribute__((__used__))
-#define __must_check __attribute__((warn_unused_result))
-#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
+#define __used __attribute__((__used__))
+#define __must_check __attribute__((warn_unused_result))
+#define __compiler_offsetof(a,b) __builtin_offsetof(a,b)
+#define __always_inline inline __attribute__((always_inline))
/*
* A trick to suppress uninitialized variable warning without generating any
@@ -18,56 +19,7 @@
like BUG(), printk(), panic() etc. [but let's keep them for now for
older compilers]
- Early snapshots of gcc 4.3 don't support this and we can't detect this
- in the preprocessor, but we can live with this because they're unreleased.
- Maketime probing would be overkill here.
-
gcc also has a __attribute__((__hot__)) to move hot functions into
a special section, but I don't see any sense in this right now in
the kernel context */
#define __cold __attribute__((__cold__))
-
-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-
-#ifndef __CHECKER__
-# define __compiletime_warning(message) __attribute__((warning(message)))
-# define __compiletime_error(message) __attribute__((error(message)))
-#endif /* __CHECKER__ */
-
-/*
- * Mark a position in code as unreachable. This can be used to
- * suppress control flow warnings after asm blocks that transfer
- * control elsewhere.
- *
- * Early snapshots of gcc 4.5 don't support this and we can't detect
- * this in the preprocessor, but we can live with this because they're
- * unreleased. Really, we need to have autoconf for the kernel.
- */
-#define unreachable() __builtin_unreachable()
-
-/* Mark a function definition as prohibited from being cloned. */
-#define __noclone __attribute__((__noclone__))
-
-/*
- * Tell the optimizer that something else uses this function or variable.
- */
-#define __visible __attribute__((externally_visible))
-
-/*
- * GCC 'asm goto' miscompiles certain code sequences:
- *
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
- *
- * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
- *
- * (asm goto is automatically volatile - the naming reflects this.)
- */
-#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
-
-#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
-#define __HAVE_BUILTIN_BSWAP32__
-#define __HAVE_BUILTIN_BSWAP64__
-#define __HAVE_BUILTIN_BSWAP16__
-#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
-
-#define KASAN_ABI_VERSION 4
diff --git a/libdde-linux26/contrib/include/linux/etherdevice.h b/libdde-linux26/contrib/include/linux/etherdevice.h
index 1cb0f0b9..fbb3836e 100644
--- a/libdde-linux26/contrib/include/linux/etherdevice.h
+++ b/libdde-linux26/contrib/include/linux/etherdevice.h
@@ -182,6 +182,33 @@ static inline unsigned compare_ether_addr_64bits(const u8 addr1[6+2],
return compare_ether_addr(addr1, addr2);
#endif
}
+
+/**
+ * is_etherdev_addr - Tell if given Ethernet address belongs to the device.
+ * @dev: Pointer to a device structure
+ * @addr: Pointer to a six-byte array containing the Ethernet address
+ *
+ * Compare passed address with all addresses of the device. Return true if the
+ * address if one of the device addresses.
+ *
+ * Note that this function calls compare_ether_addr_64bits() so take care of
+ * the right padding.
+ */
+static inline bool is_etherdev_addr(const struct net_device *dev,
+ const u8 addr[6 + 2])
+{
+ struct netdev_hw_addr *ha;
+ int res = 1;
+
+ rcu_read_lock();
+ for_each_dev_addr(dev, ha) {
+ res = compare_ether_addr_64bits(addr, ha->addr);
+ if (!res)
+ break;
+ }
+ rcu_read_unlock();
+ return !res;
+}
#endif /* __KERNEL__ */
#endif /* _LINUX_ETHERDEVICE_H */
diff --git a/libdde-linux26/contrib/include/linux/ethtool.h b/libdde-linux26/contrib/include/linux/ethtool.h
index 27c67a54..45f34dcc 100644
--- a/libdde-linux26/contrib/include/linux/ethtool.h
+++ b/libdde-linux26/contrib/include/linux/ethtool.h
@@ -25,11 +25,14 @@ struct ethtool_cmd {
__u8 phy_address;
__u8 transceiver; /* Which transceiver to use */
__u8 autoneg; /* Enable or disable autonegotiation */
+ __u8 mdio_support;
__u32 maxtxpkt; /* Tx pkts before generating tx int */
__u32 maxrxpkt; /* Rx pkts before generating rx int */
__u16 speed_hi;
- __u16 reserved2;
- __u32 reserved[3];
+ __u8 eth_tp_mdix;
+ __u8 reserved2;
+ __u32 lp_advertising; /* Features the link partner advertises */
+ __u32 reserved[2];
};
static inline void ethtool_cmd_speed_set(struct ethtool_cmd *ep,
@@ -469,6 +472,13 @@ struct ethtool_ops {
#define ETHTOOL_SRXFH 0x0000002a /* Set RX flow hash configuration */
#define ETHTOOL_GGRO 0x0000002b /* Get GRO enable (ethtool_value) */
#define ETHTOOL_SGRO 0x0000002c /* Set GRO enable (ethtool_value) */
+#define ETHTOOL_GRXRINGS 0x0000002d /* Get RX rings available for LB */
+#define ETHTOOL_GRXCLSRLCNT 0x0000002e /* Get RX class rule count */
+#define ETHTOOL_GRXCLSRULE 0x0000002f /* Get RX classification rule */
+#define ETHTOOL_GRXCLSRLALL 0x00000030 /* Get all RX classification rule */
+#define ETHTOOL_SRXCLSRLDEL 0x00000031 /* Delete RX classification rule */
+#define ETHTOOL_SRXCLSRLINS 0x00000032 /* Insert RX classification rule */
+#define ETHTOOL_FLASHDEV 0x00000033 /* Flash firmware to device */
/* compatibility with older code */
#define SPARC_ETH_GSET ETHTOOL_GSET
@@ -491,6 +501,11 @@ struct ethtool_ops {
#define SUPPORTED_Pause (1 << 13)
#define SUPPORTED_Asym_Pause (1 << 14)
#define SUPPORTED_2500baseX_Full (1 << 15)
+#define SUPPORTED_Backplane (1 << 16)
+#define SUPPORTED_1000baseKX_Full (1 << 17)
+#define SUPPORTED_10000baseKX4_Full (1 << 18)
+#define SUPPORTED_10000baseKR_Full (1 << 19)
+#define SUPPORTED_10000baseR_FEC (1 << 20)
/* Indicates what features are advertised by the interface. */
#define ADVERTISED_10baseT_Half (1 << 0)
@@ -509,6 +524,11 @@ struct ethtool_ops {
#define ADVERTISED_Pause (1 << 13)
#define ADVERTISED_Asym_Pause (1 << 14)
#define ADVERTISED_2500baseX_Full (1 << 15)
+#define ADVERTISED_Backplane (1 << 16)
+#define ADVERTISED_1000baseKX_Full (1 << 17)
+#define ADVERTISED_10000baseKX4_Full (1 << 18)
+#define ADVERTISED_10000baseKR_Full (1 << 19)
+#define ADVERTISED_10000baseR_FEC (1 << 20)
/* The following are all involved in forcing a particular link
* mode for the device for setting things. When getting the
@@ -533,6 +553,7 @@ struct ethtool_ops {
#define PORT_MII 0x02
#define PORT_FIBRE 0x03
#define PORT_BNC 0x04
+#define PORT_OTHER 0xff
/* Which transceiver to use. */
#define XCVR_INTERNAL 0x00
@@ -547,6 +568,11 @@ struct ethtool_ops {
#define AUTONEG_DISABLE 0x00
#define AUTONEG_ENABLE 0x01
+/* Mode MDI or MDI-X */
+#define ETH_TP_MDI_INVALID 0x00
+#define ETH_TP_MDI 0x01
+#define ETH_TP_MDI_X 0x02
+
/* Wake-On-Lan options. */
#define WAKE_PHY (1 << 0)
#define WAKE_UCAST (1 << 1)
@@ -565,6 +591,11 @@ struct ethtool_ops {
#define UDP_V6_FLOW 0x06
#define SCTP_V6_FLOW 0x07
#define AH_ESP_V6_FLOW 0x08
+#define AH_V4_FLOW 0x09
+#define ESP_V4_FLOW 0x0a
+#define AH_V6_FLOW 0x0b
+#define ESP_V6_FLOW 0x0c
+#define IP_USER_FLOW 0x0d
/* L3-L4 network traffic flow hash options */
#define RXH_DEV_PORT (1 << 0)
@@ -577,5 +608,6 @@ struct ethtool_ops {
#define RXH_L4_B_2_3 (1 << 7) /* dst port in case of TCP/UDP/SCTP */
#define RXH_DISCARD (1 << 31)
+#define RX_CLS_FLOW_DISC 0xffffffffffffffffULL
#endif /* _LINUX_ETHTOOL_H */
diff --git a/libdde-linux26/contrib/include/linux/inotify.h b/libdde-linux26/contrib/include/linux/inotify.h
index 37ea2894..782fd29a 100644
--- a/libdde-linux26/contrib/include/linux/inotify.h
+++ b/libdde-linux26/contrib/include/linux/inotify.h
@@ -230,12 +230,12 @@ static inline void put_inotify_watch(struct inotify_watch *watch)
{
}
-extern inline int pin_inotify_watch(struct inotify_watch *watch)
+static inline int pin_inotify_watch(struct inotify_watch *watch)
{
return 0;
}
-extern inline void unpin_inotify_watch(struct inotify_watch *watch)
+static inline void unpin_inotify_watch(struct inotify_watch *watch)
{
}
diff --git a/libdde-linux26/contrib/include/linux/kernel.h b/libdde-linux26/contrib/include/linux/kernel.h
index 7fa37189..0bded10b 100644
--- a/libdde-linux26/contrib/include/linux/kernel.h
+++ b/libdde-linux26/contrib/include/linux/kernel.h
@@ -353,6 +353,8 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
#define pr_info(fmt, ...) \
printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_cont(fmt, ...) \
+ printk(KERN_CONT fmt, ##__VA_ARGS__)
/* If you are writing a driver, please use dev_dbg instead */
#if defined(DEBUG)
diff --git a/libdde-linux26/contrib/include/linux/mdio.h b/libdde-linux26/contrib/include/linux/mdio.h
new file mode 100644
index 00000000..c779b49a
--- /dev/null
+++ b/libdde-linux26/contrib/include/linux/mdio.h
@@ -0,0 +1,356 @@
+/*
+ * linux/mdio.h: definitions for MDIO (clause 45) transceivers
+ * Copyright 2006-2009 Solarflare Communications Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation, incorporated herein by reference.
+ */
+
+#ifndef __LINUX_MDIO_H__
+#define __LINUX_MDIO_H__
+
+#include <linux/mii.h>
+
+/* MDIO Manageable Devices (MMDs). */
+#define MDIO_MMD_PMAPMD 1 /* Physical Medium Attachment/
+ * Physical Medium Dependent */
+#define MDIO_MMD_WIS 2 /* WAN Interface Sublayer */
+#define MDIO_MMD_PCS 3 /* Physical Coding Sublayer */
+#define MDIO_MMD_PHYXS 4 /* PHY Extender Sublayer */
+#define MDIO_MMD_DTEXS 5 /* DTE Extender Sublayer */
+#define MDIO_MMD_TC 6 /* Transmission Convergence */
+#define MDIO_MMD_AN 7 /* Auto-Negotiation */
+#define MDIO_MMD_C22EXT 29 /* Clause 22 extension */
+#define MDIO_MMD_VEND1 30 /* Vendor specific 1 */
+#define MDIO_MMD_VEND2 31 /* Vendor specific 2 */
+
+/* Generic MDIO registers. */
+#define MDIO_CTRL1 MII_BMCR
+#define MDIO_STAT1 MII_BMSR
+#define MDIO_DEVID1 MII_PHYSID1
+#define MDIO_DEVID2 MII_PHYSID2
+#define MDIO_SPEED 4 /* Speed ability */
+#define MDIO_DEVS1 5 /* Devices in package */
+#define MDIO_DEVS2 6
+#define MDIO_CTRL2 7 /* 10G control 2 */
+#define MDIO_STAT2 8 /* 10G status 2 */
+#define MDIO_PMA_TXDIS 9 /* 10G PMA/PMD transmit disable */
+#define MDIO_PMA_RXDET 10 /* 10G PMA/PMD receive signal detect */
+#define MDIO_PMA_EXTABLE 11 /* 10G PMA/PMD extended ability */
+#define MDIO_PKGID1 14 /* Package identifier */
+#define MDIO_PKGID2 15
+#define MDIO_AN_ADVERTISE 16 /* AN advertising (base page) */
+#define MDIO_AN_LPA 19 /* AN LP abilities (base page) */
+#define MDIO_PHYXS_LNSTAT 24 /* PHY XGXS lane state */
+
+/* Media-dependent registers. */
+#define MDIO_PMA_10GBT_SWAPPOL 130 /* 10GBASE-T pair swap & polarity */
+#define MDIO_PMA_10GBT_TXPWR 131 /* 10GBASE-T TX power control */
+#define MDIO_PMA_10GBT_SNR 133 /* 10GBASE-T SNR margin, lane A.
+ * Lanes B-D are numbered 134-136. */
+#define MDIO_PMA_10GBR_FECABLE 170 /* 10GBASE-R FEC ability */
+#define MDIO_PCS_10GBX_STAT1 24 /* 10GBASE-X PCS status 1 */
+#define MDIO_PCS_10GBRT_STAT1 32 /* 10GBASE-R/-T PCS status 1 */
+#define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */
+#define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */
+#define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */
+
+/* LASI (Link Alarm Status Interrupt) registers, defined by XENPAK MSA. */
+#define MDIO_PMA_LASI_RXCTRL 0x9000 /* RX_ALARM control */
+#define MDIO_PMA_LASI_TXCTRL 0x9001 /* TX_ALARM control */
+#define MDIO_PMA_LASI_CTRL 0x9002 /* LASI control */
+#define MDIO_PMA_LASI_RXSTAT 0x9003 /* RX_ALARM status */
+#define MDIO_PMA_LASI_TXSTAT 0x9004 /* TX_ALARM status */
+#define MDIO_PMA_LASI_STAT 0x9005 /* LASI status */
+
+/* Control register 1. */
+/* Enable extended speed selection */
+#define MDIO_CTRL1_SPEEDSELEXT (BMCR_SPEED1000 | BMCR_SPEED100)
+/* All speed selection bits */
+#define MDIO_CTRL1_SPEEDSEL (MDIO_CTRL1_SPEEDSELEXT | 0x003c)
+#define MDIO_CTRL1_FULLDPLX BMCR_FULLDPLX
+#define MDIO_CTRL1_LPOWER BMCR_PDOWN
+#define MDIO_CTRL1_RESET BMCR_RESET
+#define MDIO_PMA_CTRL1_LOOPBACK 0x0001
+#define MDIO_PMA_CTRL1_SPEED1000 BMCR_SPEED1000
+#define MDIO_PMA_CTRL1_SPEED100 BMCR_SPEED100
+#define MDIO_PCS_CTRL1_LOOPBACK BMCR_LOOPBACK
+#define MDIO_PHYXS_CTRL1_LOOPBACK BMCR_LOOPBACK
+#define MDIO_AN_CTRL1_RESTART BMCR_ANRESTART
+#define MDIO_AN_CTRL1_ENABLE BMCR_ANENABLE
+#define MDIO_AN_CTRL1_XNP 0x2000 /* Enable extended next page */
+
+/* 10 Gb/s */
+#define MDIO_CTRL1_SPEED10G (MDIO_CTRL1_SPEEDSELEXT | 0x00)
+/* 10PASS-TS/2BASE-TL */
+#define MDIO_CTRL1_SPEED10P2B (MDIO_CTRL1_SPEEDSELEXT | 0x04)
+
+/* Status register 1. */
+#define MDIO_STAT1_LPOWERABLE 0x0002 /* Low-power ability */
+#define MDIO_STAT1_LSTATUS BMSR_LSTATUS
+#define MDIO_STAT1_FAULT 0x0080 /* Fault */
+#define MDIO_AN_STAT1_LPABLE 0x0001 /* Link partner AN ability */
+#define MDIO_AN_STAT1_ABLE BMSR_ANEGCAPABLE
+#define MDIO_AN_STAT1_RFAULT BMSR_RFAULT
+#define MDIO_AN_STAT1_COMPLETE BMSR_ANEGCOMPLETE
+#define MDIO_AN_STAT1_PAGE 0x0040 /* Page received */
+#define MDIO_AN_STAT1_XNP 0x0080 /* Extended next page status */
+
+/* Speed register. */
+#define MDIO_SPEED_10G 0x0001 /* 10G capable */
+#define MDIO_PMA_SPEED_2B 0x0002 /* 2BASE-TL capable */
+#define MDIO_PMA_SPEED_10P 0x0004 /* 10PASS-TS capable */
+#define MDIO_PMA_SPEED_1000 0x0010 /* 1000M capable */
+#define MDIO_PMA_SPEED_100 0x0020 /* 100M capable */
+#define MDIO_PMA_SPEED_10 0x0040 /* 10M capable */
+#define MDIO_PCS_SPEED_10P2B 0x0002 /* 10PASS-TS/2BASE-TL capable */
+
+/* Device present registers. */
+#define MDIO_DEVS_PRESENT(devad) (1 << (devad))
+#define MDIO_DEVS_PMAPMD MDIO_DEVS_PRESENT(MDIO_MMD_PMAPMD)
+#define MDIO_DEVS_WIS MDIO_DEVS_PRESENT(MDIO_MMD_WIS)
+#define MDIO_DEVS_PCS MDIO_DEVS_PRESENT(MDIO_MMD_PCS)
+#define MDIO_DEVS_PHYXS MDIO_DEVS_PRESENT(MDIO_MMD_PHYXS)
+#define MDIO_DEVS_DTEXS MDIO_DEVS_PRESENT(MDIO_MMD_DTEXS)
+#define MDIO_DEVS_TC MDIO_DEVS_PRESENT(MDIO_MMD_TC)
+#define MDIO_DEVS_AN MDIO_DEVS_PRESENT(MDIO_MMD_AN)
+#define MDIO_DEVS_C22EXT MDIO_DEVS_PRESENT(MDIO_MMD_C22EXT)
+
+/* Control register 2. */
+#define MDIO_PMA_CTRL2_TYPE 0x000f /* PMA/PMD type selection */
+#define MDIO_PMA_CTRL2_10GBCX4 0x0000 /* 10GBASE-CX4 type */
+#define MDIO_PMA_CTRL2_10GBEW 0x0001 /* 10GBASE-EW type */
+#define MDIO_PMA_CTRL2_10GBLW 0x0002 /* 10GBASE-LW type */
+#define MDIO_PMA_CTRL2_10GBSW 0x0003 /* 10GBASE-SW type */
+#define MDIO_PMA_CTRL2_10GBLX4 0x0004 /* 10GBASE-LX4 type */
+#define MDIO_PMA_CTRL2_10GBER 0x0005 /* 10GBASE-ER type */
+#define MDIO_PMA_CTRL2_10GBLR 0x0006 /* 10GBASE-LR type */
+#define MDIO_PMA_CTRL2_10GBSR 0x0007 /* 10GBASE-SR type */
+#define MDIO_PMA_CTRL2_10GBLRM 0x0008 /* 10GBASE-LRM type */
+#define MDIO_PMA_CTRL2_10GBT 0x0009 /* 10GBASE-T type */
+#define MDIO_PMA_CTRL2_10GBKX4 0x000a /* 10GBASE-KX4 type */
+#define MDIO_PMA_CTRL2_10GBKR 0x000b /* 10GBASE-KR type */
+#define MDIO_PMA_CTRL2_1000BT 0x000c /* 1000BASE-T type */
+#define MDIO_PMA_CTRL2_1000BKX 0x000d /* 1000BASE-KX type */
+#define MDIO_PMA_CTRL2_100BTX 0x000e /* 100BASE-TX type */
+#define MDIO_PMA_CTRL2_10BT 0x000f /* 10BASE-T type */
+#define MDIO_PCS_CTRL2_TYPE 0x0003 /* PCS type selection */
+#define MDIO_PCS_CTRL2_10GBR 0x0000 /* 10GBASE-R type */
+#define MDIO_PCS_CTRL2_10GBX 0x0001 /* 10GBASE-X type */
+#define MDIO_PCS_CTRL2_10GBW 0x0002 /* 10GBASE-W type */
+#define MDIO_PCS_CTRL2_10GBT 0x0003 /* 10GBASE-T type */
+
+/* Status register 2. */
+#define MDIO_STAT2_RXFAULT 0x0400 /* Receive fault */
+#define MDIO_STAT2_TXFAULT 0x0800 /* Transmit fault */
+#define MDIO_STAT2_DEVPRST 0xc000 /* Device present */
+#define MDIO_STAT2_DEVPRST_VAL 0x8000 /* Device present value */
+#define MDIO_PMA_STAT2_LBABLE 0x0001 /* PMA loopback ability */
+#define MDIO_PMA_STAT2_10GBEW 0x0002 /* 10GBASE-EW ability */
+#define MDIO_PMA_STAT2_10GBLW 0x0004 /* 10GBASE-LW ability */
+#define MDIO_PMA_STAT2_10GBSW 0x0008 /* 10GBASE-SW ability */
+#define MDIO_PMA_STAT2_10GBLX4 0x0010 /* 10GBASE-LX4 ability */
+#define MDIO_PMA_STAT2_10GBER 0x0020 /* 10GBASE-ER ability */
+#define MDIO_PMA_STAT2_10GBLR 0x0040 /* 10GBASE-LR ability */
+#define MDIO_PMA_STAT2_10GBSR 0x0080 /* 10GBASE-SR ability */
+#define MDIO_PMD_STAT2_TXDISAB 0x0100 /* PMD TX disable ability */
+#define MDIO_PMA_STAT2_EXTABLE 0x0200 /* Extended abilities */
+#define MDIO_PMA_STAT2_RXFLTABLE 0x1000 /* Receive fault ability */
+#define MDIO_PMA_STAT2_TXFLTABLE 0x2000 /* Transmit fault ability */
+#define MDIO_PCS_STAT2_10GBR 0x0001 /* 10GBASE-R capable */
+#define MDIO_PCS_STAT2_10GBX 0x0002 /* 10GBASE-X capable */
+#define MDIO_PCS_STAT2_10GBW 0x0004 /* 10GBASE-W capable */
+#define MDIO_PCS_STAT2_RXFLTABLE 0x1000 /* Receive fault ability */
+#define MDIO_PCS_STAT2_TXFLTABLE 0x2000 /* Transmit fault ability */
+
+/* Transmit disable register. */
+#define MDIO_PMD_TXDIS_GLOBAL 0x0001 /* Global PMD TX disable */
+#define MDIO_PMD_TXDIS_0 0x0002 /* PMD TX disable 0 */
+#define MDIO_PMD_TXDIS_1 0x0004 /* PMD TX disable 1 */
+#define MDIO_PMD_TXDIS_2 0x0008 /* PMD TX disable 2 */
+#define MDIO_PMD_TXDIS_3 0x0010 /* PMD TX disable 3 */
+
+/* Receive signal detect register. */
+#define MDIO_PMD_RXDET_GLOBAL 0x0001 /* Global PMD RX signal detect */
+#define MDIO_PMD_RXDET_0 0x0002 /* PMD RX signal detect 0 */
+#define MDIO_PMD_RXDET_1 0x0004 /* PMD RX signal detect 1 */
+#define MDIO_PMD_RXDET_2 0x0008 /* PMD RX signal detect 2 */
+#define MDIO_PMD_RXDET_3 0x0010 /* PMD RX signal detect 3 */
+
+/* Extended abilities register. */
+#define MDIO_PMA_EXTABLE_10GCX4 0x0001 /* 10GBASE-CX4 ability */
+#define MDIO_PMA_EXTABLE_10GBLRM 0x0002 /* 10GBASE-LRM ability */
+#define MDIO_PMA_EXTABLE_10GBT 0x0004 /* 10GBASE-T ability */
+#define MDIO_PMA_EXTABLE_10GBKX4 0x0008 /* 10GBASE-KX4 ability */
+#define MDIO_PMA_EXTABLE_10GBKR 0x0010 /* 10GBASE-KR ability */
+#define MDIO_PMA_EXTABLE_1000BT 0x0020 /* 1000BASE-T ability */
+#define MDIO_PMA_EXTABLE_1000BKX 0x0040 /* 1000BASE-KX ability */
+#define MDIO_PMA_EXTABLE_100BTX 0x0080 /* 100BASE-TX ability */
+#define MDIO_PMA_EXTABLE_10BT 0x0100 /* 10BASE-T ability */
+
+/* PHY XGXS lane state register. */
+#define MDIO_PHYXS_LNSTAT_SYNC0 0x0001
+#define MDIO_PHYXS_LNSTAT_SYNC1 0x0002
+#define MDIO_PHYXS_LNSTAT_SYNC2 0x0004
+#define MDIO_PHYXS_LNSTAT_SYNC3 0x0008
+#define MDIO_PHYXS_LNSTAT_ALIGN 0x1000
+
+/* PMA 10GBASE-T pair swap & polarity */
+#define MDIO_PMA_10GBT_SWAPPOL_ABNX 0x0001 /* Pair A/B uncrossed */
+#define MDIO_PMA_10GBT_SWAPPOL_CDNX 0x0002 /* Pair C/D uncrossed */
+#define MDIO_PMA_10GBT_SWAPPOL_AREV 0x0100 /* Pair A polarity reversed */
+#define MDIO_PMA_10GBT_SWAPPOL_BREV 0x0200 /* Pair B polarity reversed */
+#define MDIO_PMA_10GBT_SWAPPOL_CREV 0x0400 /* Pair C polarity reversed */
+#define MDIO_PMA_10GBT_SWAPPOL_DREV 0x0800 /* Pair D polarity reversed */
+
+/* PMA 10GBASE-T TX power register. */
+#define MDIO_PMA_10GBT_TXPWR_SHORT 0x0001 /* Short-reach mode */
+
+/* PMA 10GBASE-T SNR registers. */
+/* Value is SNR margin in dB, clamped to range [-127, 127], plus 0x8000. */
+#define MDIO_PMA_10GBT_SNR_BIAS 0x8000
+#define MDIO_PMA_10GBT_SNR_MAX 127
+
+/* PMA 10GBASE-R FEC ability register. */
+#define MDIO_PMA_10GBR_FECABLE_ABLE 0x0001 /* FEC ability */
+#define MDIO_PMA_10GBR_FECABLE_ERRABLE 0x0002 /* FEC error indic. ability */
+
+/* PCS 10GBASE-R/-T status register 1. */
+#define MDIO_PCS_10GBRT_STAT1_BLKLK 0x0001 /* Block lock attained */
+
+/* PCS 10GBASE-R/-T status register 2. */
+#define MDIO_PCS_10GBRT_STAT2_ERR 0x00ff
+#define MDIO_PCS_10GBRT_STAT2_BER 0x3f00
+
+/* AN 10GBASE-T control register. */
+#define MDIO_AN_10GBT_CTRL_ADV10G 0x1000 /* Advertise 10GBASE-T */
+
+/* AN 10GBASE-T status register. */
+#define MDIO_AN_10GBT_STAT_LPTRR 0x0200 /* LP training reset req. */
+#define MDIO_AN_10GBT_STAT_LPLTABLE 0x0400 /* LP loop timing ability */
+#define MDIO_AN_10GBT_STAT_LP10G 0x0800 /* LP is 10GBT capable */
+#define MDIO_AN_10GBT_STAT_REMOK 0x1000 /* Remote OK */
+#define MDIO_AN_10GBT_STAT_LOCOK 0x2000 /* Local OK */
+#define MDIO_AN_10GBT_STAT_MS 0x4000 /* Master/slave config */
+#define MDIO_AN_10GBT_STAT_MSFLT 0x8000 /* Master/slave config fault */
+
+/* LASI RX_ALARM control/status registers. */
+#define MDIO_PMA_LASI_RX_PHYXSLFLT 0x0001 /* PHY XS RX local fault */
+#define MDIO_PMA_LASI_RX_PCSLFLT 0x0008 /* PCS RX local fault */
+#define MDIO_PMA_LASI_RX_PMALFLT 0x0010 /* PMA/PMD RX local fault */
+#define MDIO_PMA_LASI_RX_OPTICPOWERFLT 0x0020 /* RX optical power fault */
+#define MDIO_PMA_LASI_RX_WISLFLT 0x0200 /* WIS local fault */
+
+/* LASI TX_ALARM control/status registers. */
+#define MDIO_PMA_LASI_TX_PHYXSLFLT 0x0001 /* PHY XS TX local fault */
+#define MDIO_PMA_LASI_TX_PCSLFLT 0x0008 /* PCS TX local fault */
+#define MDIO_PMA_LASI_TX_PMALFLT 0x0010 /* PMA/PMD TX local fault */
+#define MDIO_PMA_LASI_TX_LASERPOWERFLT 0x0080 /* Laser output power fault */
+#define MDIO_PMA_LASI_TX_LASERTEMPFLT 0x0100 /* Laser temperature fault */
+#define MDIO_PMA_LASI_TX_LASERBICURRFLT 0x0200 /* Laser bias current fault */
+
+/* LASI control/status registers. */
+#define MDIO_PMA_LASI_LSALARM 0x0001 /* LS_ALARM enable/status */
+#define MDIO_PMA_LASI_TXALARM 0x0002 /* TX_ALARM enable/status */
+#define MDIO_PMA_LASI_RXALARM 0x0004 /* RX_ALARM enable/status */
+
+/* Mapping between MDIO PRTAD/DEVAD and mii_ioctl_data::phy_id */
+
+#define MDIO_PHY_ID_C45 0x8000
+#define MDIO_PHY_ID_PRTAD 0x03e0
+#define MDIO_PHY_ID_DEVAD 0x001f
+#define MDIO_PHY_ID_C45_MASK \
+ (MDIO_PHY_ID_C45 | MDIO_PHY_ID_PRTAD | MDIO_PHY_ID_DEVAD)
+
+static inline __u16 mdio_phy_id_c45(int prtad, int devad)
+{
+ return MDIO_PHY_ID_C45 | (prtad << 5) | devad;
+}
+
+static inline bool mdio_phy_id_is_c45(int phy_id)
+{
+ return (phy_id & MDIO_PHY_ID_C45) && !(phy_id & ~MDIO_PHY_ID_C45_MASK);
+}
+
+static inline __u16 mdio_phy_id_prtad(int phy_id)
+{
+ return (phy_id & MDIO_PHY_ID_PRTAD) >> 5;
+}
+
+static inline __u16 mdio_phy_id_devad(int phy_id)
+{
+ return phy_id & MDIO_PHY_ID_DEVAD;
+}
+
+#define MDIO_SUPPORTS_C22 1
+#define MDIO_SUPPORTS_C45 2
+
+#ifdef __KERNEL__
+
+/**
+ * struct mdio_if_info - Ethernet controller MDIO interface
+ * @prtad: PRTAD of the PHY (%MDIO_PRTAD_NONE if not present/unknown)
+ * @mmds: Mask of MMDs expected to be present in the PHY. This must be
+ * non-zero unless @prtad = %MDIO_PRTAD_NONE.
+ * @mode_support: MDIO modes supported. If %MDIO_SUPPORTS_C22 is set then
+ * MII register access will be passed through with @devad =
+ * %MDIO_DEVAD_NONE. If %MDIO_EMULATE_C22 is set then access to
+ * commonly used clause 22 registers will be translated into
+ * clause 45 registers.
+ * @dev: Net device structure
+ * @mdio_read: Register read function; returns value or negative error code
+ * @mdio_write: Register write function; returns 0 or negative error code
+ */
+struct mdio_if_info {
+ int prtad;
+ u32 mmds;
+ unsigned mode_support;
+
+ struct net_device *dev;
+ int (*mdio_read)(struct net_device *dev, int prtad, int devad,
+ u16 addr);
+ int (*mdio_write)(struct net_device *dev, int prtad, int devad,
+ u16 addr, u16 val);
+};
+
+#define MDIO_PRTAD_NONE (-1)
+#define MDIO_DEVAD_NONE (-1)
+#define MDIO_EMULATE_C22 4
+
+struct ethtool_cmd;
+struct ethtool_pauseparam;
+extern int mdio45_probe(struct mdio_if_info *mdio, int prtad);
+extern int mdio_set_flag(const struct mdio_if_info *mdio,
+ int prtad, int devad, u16 addr, int mask,
+ bool sense);
+extern int mdio45_links_ok(const struct mdio_if_info *mdio, u32 mmds);
+extern int mdio45_nway_restart(const struct mdio_if_info *mdio);
+extern void mdio45_ethtool_gset_npage(const struct mdio_if_info *mdio,
+ struct ethtool_cmd *ecmd,
+ u32 npage_adv, u32 npage_lpa);
+extern void
+mdio45_ethtool_spauseparam_an(const struct mdio_if_info *mdio,
+ const struct ethtool_pauseparam *ecmd);
+
+/**
+ * mdio45_ethtool_gset - get settings for ETHTOOL_GSET
+ * @mdio: MDIO interface
+ * @ecmd: Ethtool request structure
+ *
+ * Since the CSRs for auto-negotiation using next pages are not fully
+ * standardised, this function does not attempt to decode them. Use
+ * mdio45_ethtool_gset_npage() to specify advertisement bits from next
+ * pages.
+ */
+static inline void mdio45_ethtool_gset(const struct mdio_if_info *mdio,
+ struct ethtool_cmd *ecmd)
+{
+ mdio45_ethtool_gset_npage(mdio, ecmd, 0, 0);
+}
+
+extern int mdio_mii_ioctl(const struct mdio_if_info *mdio,
+ struct mii_ioctl_data *mii_data, int cmd);
+
+#endif /* __KERNEL__ */
+#endif /* __LINUX_MDIO_H__ */
diff --git a/libdde-linux26/contrib/include/linux/netdevice.h b/libdde-linux26/contrib/include/linux/netdevice.h
index 65936673..4414989d 100644
--- a/libdde-linux26/contrib/include/linux/netdevice.h
+++ b/libdde-linux26/contrib/include/linux/netdevice.h
@@ -37,6 +37,7 @@
#include <asm/byteorder.h>
#include <linux/device.h>
+#include <linux/rculist.h>
#include <linux/percpu.h>
#include <linux/dmaengine.h>
#include <linux/workqueue.h>
@@ -81,17 +82,19 @@ struct wireless_dev;
#define net_xmit_eval(e) ((e) == NET_XMIT_CN? 0 : (e))
#define net_xmit_errno(e) ((e) != NET_XMIT_CN ? -ENOBUFS : 0)
+/* Driver transmit return codes */
+enum netdev_tx {
+ NETDEV_TX_OK = 0, /* driver took care of packet */
+ NETDEV_TX_BUSY, /* driver tx path was busy*/
+ NETDEV_TX_LOCKED = -1, /* driver tx lock was already taken */
+};
+typedef enum netdev_tx netdev_tx_t;
+
#endif
#define MAX_ADDR_LEN 32 /* Largest hardware address length */
-/* Driver transmit return codes */
-#define NETDEV_TX_OK 0 /* driver took care of packet */
-#define NETDEV_TX_BUSY 1 /* driver tx path was busy*/
-#define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
-
#ifdef __KERNEL__
-
/*
* Compute the worst case header length according to the protocols
* used.
@@ -209,6 +212,24 @@ struct dev_addr_list
#define dmi_users da_users
#define dmi_gusers da_gusers
+struct netdev_hw_addr {
+ struct list_head list;
+ unsigned char addr[MAX_ADDR_LEN];
+ unsigned char type;
+#define NETDEV_HW_ADDR_T_LAN 1
+#define NETDEV_HW_ADDR_T_SAN 2
+#define NETDEV_HW_ADDR_T_SLAVE 3
+#define NETDEV_HW_ADDR_T_UNICAST 4
+ int refcount;
+ bool synced;
+ struct rcu_head rcu_head;
+};
+
+struct netdev_hw_addr_list {
+ struct list_head list;
+ int count;
+};
+
struct hh_cache
{
struct hh_cache *hh_next; /* Next entry */
@@ -441,6 +462,10 @@ struct netdev_queue {
spinlock_t _xmit_lock;
int xmit_lock_owner;
struct Qdisc *qdisc_sleeping;
+ /*
+ * please use this field instead of dev->trans_start
+ */
+ unsigned long trans_start;
} ____cacheline_aligned_in_smp;
@@ -467,9 +492,11 @@ struct netdev_queue {
* This function is called when network device transistions to the down
* state.
*
- * int (*ndo_start_xmit)(struct sk_buff *skb, struct net_device *dev);
+ * netdev_tx_t (*ndo_start_xmit)(struct sk_buff *skb,
+ * struct net_device *dev);
* Called when a packet needs to be transmitted.
- * Must return NETDEV_TX_OK , NETDEV_TX_BUSY, or NETDEV_TX_LOCKED,
+ * Must return NETDEV_TX_OK , NETDEV_TX_BUSY.
+ * (can also return NETDEV_TX_LOCKED iff NETIF_F_LLTX)
* Required can not be NULL.
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb);
@@ -540,7 +567,7 @@ struct net_device_ops {
void (*ndo_uninit)(struct net_device *dev);
int (*ndo_open)(struct net_device *dev);
int (*ndo_stop)(struct net_device *dev);
- int (*ndo_start_xmit) (struct sk_buff *skb,
+ netdev_tx_t (*ndo_start_xmit) (struct sk_buff *skb,
struct net_device *dev);
u16 (*ndo_select_queue)(struct net_device *dev,
struct sk_buff *skb);
@@ -724,10 +751,10 @@ struct net_device
unsigned char addr_len; /* hardware address length */
unsigned short dev_id; /* for shared network cards */
- spinlock_t addr_list_lock;
- struct dev_addr_list *uc_list; /* Secondary unicast mac addresses */
- int uc_count; /* Number of installed ucasts */
+ struct netdev_hw_addr_list uc; /* Secondary unicast
+ mac addresses */
int uc_promisc;
+ spinlock_t addr_list_lock;
struct dev_addr_list *mc_list; /* Multicast mac addresses */
int mc_count; /* Number of installed mcasts */
unsigned int promiscuity;
@@ -753,8 +780,12 @@ struct net_device
*/
unsigned long last_rx; /* Time of last Rx */
/* Interface address info used in eth_type_trans() */
- unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address, (before bcast
- because most packets are unicast) */
+ unsigned char *dev_addr; /* hw address, (before bcast
+ because most packets are
+ unicast) */
+
+ struct netdev_hw_addr_list dev_addrs; /* list of device
+ hw addresses */
unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
@@ -774,6 +805,11 @@ struct net_device
* One part is mostly used on xmit path (device)
*/
/* These may be needed for future network-power-down code. */
+
+ /*
+ * trans_start here is expensive for high speed devices on SMP,
+ * please use netdev_queue->trans_start instead.
+ */
unsigned long trans_start; /* Time (in jiffies) of last Tx */
int watchdog_timeo; /* used by dev_watchdog() */
@@ -1450,6 +1486,8 @@ static inline int netif_carrier_ok(const struct net_device *dev)
return !test_bit(__LINK_STATE_NOCARRIER, &dev->state);
}
+extern unsigned long dev_trans_start(struct net_device *dev);
+
extern void __netdev_watchdog_up(struct net_device *dev);
extern void netif_carrier_on(struct net_device *dev);
@@ -1764,6 +1802,13 @@ static inline void netif_addr_unlock_bh(struct net_device *dev)
spin_unlock_bh(&dev->addr_list_lock);
}
+/*
+ * dev_addrs walker. Should be used only for read access. Call with
+ * rcu_read_lock held.
+ */
+#define for_each_dev_addr(dev, ha) \
+ list_for_each_entry_rcu(ha, &dev->dev_addrs.list, list)
+
/* These functions live elsewhere (drivers/net/net_init.c, but related) */
extern void ether_setup(struct net_device *dev);
@@ -1776,11 +1821,24 @@ extern struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
alloc_netdev_mq(sizeof_priv, name, setup, 1)
extern int register_netdev(struct net_device *dev);
extern void unregister_netdev(struct net_device *dev);
+
+/* Functions used for device addresses handling */
+extern int dev_addr_add(struct net_device *dev, unsigned char *addr,
+ unsigned char addr_type);
+extern int dev_addr_del(struct net_device *dev, unsigned char *addr,
+ unsigned char addr_type);
+extern int dev_addr_add_multiple(struct net_device *to_dev,
+ struct net_device *from_dev,
+ unsigned char addr_type);
+extern int dev_addr_del_multiple(struct net_device *to_dev,
+ struct net_device *from_dev,
+ unsigned char addr_type);
+
/* Functions used for secondary unicast and multicast support */
extern void dev_set_rx_mode(struct net_device *dev);
extern void __dev_set_rx_mode(struct net_device *dev);
-extern int dev_unicast_delete(struct net_device *dev, void *addr, int alen);
-extern int dev_unicast_add(struct net_device *dev, void *addr, int alen);
+extern int dev_unicast_delete(struct net_device *dev, void *addr);
+extern int dev_unicast_add(struct net_device *dev, void *addr);
extern int dev_unicast_sync(struct net_device *to, struct net_device *from);
extern void dev_unicast_unsync(struct net_device *to, struct net_device *from);
extern int dev_mc_delete(struct net_device *dev, void *addr, int alen, int all);
diff --git a/libdde-linux26/contrib/include/linux/pci.h b/libdde-linux26/contrib/include/linux/pci.h
index 7bd624bf..c7fb6600 100644
--- a/libdde-linux26/contrib/include/linux/pci.h
+++ b/libdde-linux26/contrib/include/linux/pci.h
@@ -256,6 +256,7 @@ struct pci_dev {
unsigned int ari_enabled:1; /* ARI forwarding */
unsigned int is_managed:1;
unsigned int is_pcie:1;
+ unsigned int needs_freset:1; /* Dev requires fundamental reset */
unsigned int state_saved:1;
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
diff --git a/libdde-linux26/contrib/include/linux/pci_ids.h b/libdde-linux26/contrib/include/linux/pci_ids.h
index aca8c458..a3393863 100644
--- a/libdde-linux26/contrib/include/linux/pci_ids.h
+++ b/libdde-linux26/contrib/include/linux/pci_ids.h
@@ -2,6 +2,9 @@
* PCI Class, Vendor and Device IDs
*
* Please keep sorted.
+ *
+ * Do not add new entries to this file unless the definitions
+ * are shared between multiple drivers.
*/
/* Device classes and subclasses */
@@ -104,6 +107,7 @@
#define PCI_CLASS_SERIAL_USB_UHCI 0x0c0300
#define PCI_CLASS_SERIAL_USB_OHCI 0x0c0310
#define PCI_CLASS_SERIAL_USB_EHCI 0x0c0320
+#define PCI_CLASS_SERIAL_USB_XHCI 0x0c0330
#define PCI_CLASS_SERIAL_FIBER 0x0c04
#define PCI_CLASS_SERIAL_SMBUS 0x0c05
@@ -389,6 +393,9 @@
#define PCI_DEVICE_ID_VLSI_82C147 0x0105
#define PCI_DEVICE_ID_VLSI_VAS96011 0x0702
+/* AMD RD890 Chipset */
+#define PCI_DEVICE_ID_RD890_IOMMU 0x5a23
+
#define PCI_VENDOR_ID_ADL 0x1005
#define PCI_DEVICE_ID_ADL_2301 0x2301
@@ -478,6 +485,9 @@
#define PCI_DEVICE_ID_IBM_ICOM_V2_ONE_PORT_RVX_ONE_PORT_MDM_PCIE 0x0361
#define PCI_DEVICE_ID_IBM_ICOM_FOUR_PORT_MODEL 0x252
+#define PCI_SUBVENDOR_ID_IBM 0x1014
+#define PCI_SUBDEVICE_ID_IBM_SATURN_SERIAL_ONE_PORT 0x03d4
+
#define PCI_VENDOR_ID_UNISYS 0x1018
#define PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR 0x001C
@@ -526,6 +536,7 @@
#define PCI_DEVICE_ID_AMD_OPUS_7443 0x7443
#define PCI_DEVICE_ID_AMD_VIPER_7443 0x7443
#define PCI_DEVICE_ID_AMD_OPUS_7445 0x7445
+#define PCI_DEVICE_ID_AMD_8111_PCI 0x7460
#define PCI_DEVICE_ID_AMD_8111_LPC 0x7468
#define PCI_DEVICE_ID_AMD_8111_IDE 0x7469
#define PCI_DEVICE_ID_AMD_8111_SMBUS2 0x746a
@@ -535,6 +546,8 @@
#define PCI_DEVICE_ID_AMD_8131_BRIDGE 0x7450
#define PCI_DEVICE_ID_AMD_8131_APIC 0x7451
#define PCI_DEVICE_ID_AMD_8132_BRIDGE 0x7458
+#define PCI_DEVICE_ID_AMD_HUDSON2_SMBUS 0x780b
+#define PCI_DEVICE_ID_AMD_CS5535_IDE 0x208F
#define PCI_DEVICE_ID_AMD_CS5536_ISA 0x2090
#define PCI_DEVICE_ID_AMD_CS5536_FLASH 0x2091
#define PCI_DEVICE_ID_AMD_CS5536_AUDIO 0x2093
@@ -543,9 +556,10 @@
#define PCI_DEVICE_ID_AMD_CS5536_UDC 0x2096
#define PCI_DEVICE_ID_AMD_CS5536_UOC 0x2097
#define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A
-
#define PCI_DEVICE_ID_AMD_LX_VIDEO 0x2081
#define PCI_DEVICE_ID_AMD_LX_AES 0x2082
+#define PCI_DEVICE_ID_AMD_HUDSON2_IDE 0x780c
+#define PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE 0x7800
#define PCI_VENDOR_ID_TRIDENT 0x1023
#define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000
@@ -591,6 +605,8 @@
#define PCI_DEVICE_ID_MATROX_G550 0x2527
#define PCI_DEVICE_ID_MATROX_VIA 0x4536
+#define PCI_VENDOR_ID_MOBILITY_ELECTRONICS 0x14f2
+
#define PCI_VENDOR_ID_CT 0x102c
#define PCI_DEVICE_ID_CT_69000 0x00c0
#define PCI_DEVICE_ID_CT_65545 0x00d8
@@ -766,6 +782,7 @@
#define PCI_DEVICE_ID_TI_X515 0x8036
#define PCI_DEVICE_ID_TI_XX12 0x8039
#define PCI_DEVICE_ID_TI_XX12_FM 0x803b
+#define PCI_DEVICE_ID_TI_XIO2000A 0x8231
#define PCI_DEVICE_ID_TI_1130 0xac12
#define PCI_DEVICE_ID_TI_1031 0xac13
#define PCI_DEVICE_ID_TI_1131 0xac15
@@ -834,6 +851,8 @@
#define PCI_DEVICE_ID_PROMISE_20276 0x5275
#define PCI_DEVICE_ID_PROMISE_20277 0x7275
+#define PCI_VENDOR_ID_FOXCONN 0x105b
+
#define PCI_VENDOR_ID_UMC 0x1060
#define PCI_DEVICE_ID_UMC_UM8673F 0x0101
#define PCI_DEVICE_ID_UMC_UM8886BF 0x673a
@@ -873,6 +892,7 @@
#define PCI_DEVICE_ID_APPLE_SH_SUNGEM 0x0051
#define PCI_DEVICE_ID_APPLE_U3L_AGP 0x0058
#define PCI_DEVICE_ID_APPLE_U3H_AGP 0x0059
+#define PCI_DEVICE_ID_APPLE_U4_PCIE 0x005b
#define PCI_DEVICE_ID_APPLE_IPID2_AGP 0x0066
#define PCI_DEVICE_ID_APPLE_IPID2_ATA 0x0069
#define PCI_DEVICE_ID_APPLE_IPID2_FW 0x006a
@@ -941,6 +961,32 @@
#define PCI_DEVICE_ID_SUN_TOMATILLO 0xa801
#define PCI_DEVICE_ID_SUN_CASSINI 0xabba
+#define PCI_VENDOR_ID_NI 0x1093
+#define PCI_DEVICE_ID_NI_PCI2322 0xd130
+#define PCI_DEVICE_ID_NI_PCI2324 0xd140
+#define PCI_DEVICE_ID_NI_PCI2328 0xd150
+#define PCI_DEVICE_ID_NI_PXI8422_2322 0xd190
+#define PCI_DEVICE_ID_NI_PXI8422_2324 0xd1a0
+#define PCI_DEVICE_ID_NI_PXI8420_2322 0xd1d0
+#define PCI_DEVICE_ID_NI_PXI8420_2324 0xd1e0
+#define PCI_DEVICE_ID_NI_PXI8420_2328 0xd1f0
+#define PCI_DEVICE_ID_NI_PXI8420_23216 0xd1f1
+#define PCI_DEVICE_ID_NI_PCI2322I 0xd250
+#define PCI_DEVICE_ID_NI_PCI2324I 0xd270
+#define PCI_DEVICE_ID_NI_PCI23216 0xd2b0
+#define PCI_DEVICE_ID_NI_PXI8430_2322 0x7080
+#define PCI_DEVICE_ID_NI_PCI8430_2322 0x70db
+#define PCI_DEVICE_ID_NI_PXI8430_2324 0x70dd
+#define PCI_DEVICE_ID_NI_PCI8430_2324 0x70df
+#define PCI_DEVICE_ID_NI_PXI8430_2328 0x70e2
+#define PCI_DEVICE_ID_NI_PCI8430_2328 0x70e4
+#define PCI_DEVICE_ID_NI_PXI8430_23216 0x70e6
+#define PCI_DEVICE_ID_NI_PCI8430_23216 0x70e7
+#define PCI_DEVICE_ID_NI_PXI8432_2322 0x70e8
+#define PCI_DEVICE_ID_NI_PCI8432_2322 0x70ea
+#define PCI_DEVICE_ID_NI_PXI8432_2324 0x70ec
+#define PCI_DEVICE_ID_NI_PCI8432_2324 0x70ee
+
#define PCI_VENDOR_ID_CMD 0x1095
#define PCI_DEVICE_ID_CMD_643 0x0643
#define PCI_DEVICE_ID_CMD_646 0x0646
@@ -976,6 +1022,7 @@
#define PCI_DEVICE_ID_PLX_PCI200SYN 0x3196
#define PCI_DEVICE_ID_PLX_9030 0x9030
#define PCI_DEVICE_ID_PLX_9050 0x9050
+#define PCI_DEVICE_ID_PLX_9056 0x9056
#define PCI_DEVICE_ID_PLX_9080 0x9080
#define PCI_DEVICE_ID_PLX_GTEK_SERIAL2 0xa001
@@ -1037,8 +1084,6 @@
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SMBUS 0x0034
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE 0x0035
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA 0x0036
-#define PCI_DEVICE_ID_NVIDIA_NVENET_10 0x0037
-#define PCI_DEVICE_ID_NVIDIA_NVENET_11 0x0038
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_SATA2 0x003e
#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800_ULTRA 0x0040
#define PCI_DEVICE_ID_NVIDIA_GEFORCE_6800 0x0041
@@ -1049,21 +1094,16 @@
#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE 0x0053
#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA 0x0054
#define PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_SATA2 0x0055
-#define PCI_DEVICE_ID_NVIDIA_NVENET_8 0x0056
-#define PCI_DEVICE_ID_NVIDIA_NVENET_9 0x0057
#define PCI_DEVICE_ID_NVIDIA_CK804_AUDIO 0x0059
#define PCI_DEVICE_ID_NVIDIA_CK804_PCIE 0x005d
#define PCI_DEVICE_ID_NVIDIA_NFORCE2_SMBUS 0x0064
#define PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE 0x0065
-#define PCI_DEVICE_ID_NVIDIA_NVENET_2 0x0066
#define PCI_DEVICE_ID_NVIDIA_MCP2_MODEM 0x0069
#define PCI_DEVICE_ID_NVIDIA_MCP2_AUDIO 0x006a
#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SMBUS 0x0084
#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE 0x0085
-#define PCI_DEVICE_ID_NVIDIA_NVENET_4 0x0086
#define PCI_DEVICE_ID_NVIDIA_MCP2S_MODEM 0x0089
#define PCI_DEVICE_ID_NVIDIA_CK8_AUDIO 0x008a
-#define PCI_DEVICE_ID_NVIDIA_NVENET_5 0x008c
#define PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA 0x008e
#define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GT 0x0090
#define PCI_DEVICE_ID_NVIDIA_GEFORCE_7800_GTX 0x0091
@@ -1079,15 +1119,12 @@
#define PCI_DEVICE_ID_NVIDIA_NFORCE3 0x00d1
#define PCI_DEVICE_ID_NVIDIA_NFORCE3_SMBUS 0x00d4
#define PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE 0x00d5
-#define PCI_DEVICE_ID_NVIDIA_NVENET_3 0x00d6
#define PCI_DEVICE_ID_NVIDIA_MCP3_MODEM 0x00d9
#define PCI_DEVICE_ID_NVIDIA_MCP3_AUDIO 0x00da
-#define PCI_DEVICE_ID_NVIDIA_NVENET_7 0x00df
#define PCI_DEVICE_ID_NVIDIA_NFORCE3S 0x00e1
#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA 0x00e3
#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SMBUS 0x00e4
#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE 0x00e5
-#define PCI_DEVICE_ID_NVIDIA_NVENET_6 0x00e6
#define PCI_DEVICE_ID_NVIDIA_CK8S_AUDIO 0x00ea
#define PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2 0x00ee
#define PCIE_DEVICE_ID_NVIDIA_GEFORCE_6800_ALT1 0x00f0
@@ -1147,7 +1184,6 @@
#define PCI_DEVICE_ID_NVIDIA_NFORCE_SMBUS 0x01b4
#define PCI_DEVICE_ID_NVIDIA_NFORCE_IDE 0x01bc
#define PCI_DEVICE_ID_NVIDIA_MCP1_MODEM 0x01c1
-#define PCI_DEVICE_ID_NVIDIA_NVENET_1 0x01c3
#define PCI_DEVICE_ID_NVIDIA_NFORCE2 0x01e0
#define PCI_DEVICE_ID_NVIDIA_GEFORCE3 0x0200
#define PCI_DEVICE_ID_NVIDIA_GEFORCE3_1 0x0201
@@ -1170,8 +1206,6 @@
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE 0x036E
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA 0x037E
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_SATA2 0x037F
-#define PCI_DEVICE_ID_NVIDIA_NVENET_12 0x0268
-#define PCI_DEVICE_ID_NVIDIA_NVENET_13 0x0269
#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800 0x0280
#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800_8X 0x0281
#define PCI_DEVICE_ID_NVIDIA_GEFORCE4_TI_4800SE 0x0282
@@ -1218,42 +1252,22 @@
#define PCI_DEVICE_ID_NVIDIA_GEFORCE_FX_GO5700_2 0x0348
#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_GO1000 0x034C
#define PCI_DEVICE_ID_NVIDIA_QUADRO_FX_1100 0x034E
-#define PCI_DEVICE_ID_NVIDIA_NVENET_14 0x0372
#define PCI_DEVICE_ID_NVIDIA_NVENET_15 0x0373
-#define PCI_DEVICE_ID_NVIDIA_NVENET_16 0x03E5
-#define PCI_DEVICE_ID_NVIDIA_NVENET_17 0x03E6
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA 0x03E7
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SMBUS 0x03EB
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE 0x03EC
-#define PCI_DEVICE_ID_NVIDIA_NVENET_18 0x03EE
-#define PCI_DEVICE_ID_NVIDIA_NVENET_19 0x03EF
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA2 0x03F6
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_SATA3 0x03F7
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_SMBUS 0x0446
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE 0x0448
-#define PCI_DEVICE_ID_NVIDIA_NVENET_20 0x0450
-#define PCI_DEVICE_ID_NVIDIA_NVENET_21 0x0451
-#define PCI_DEVICE_ID_NVIDIA_NVENET_22 0x0452
-#define PCI_DEVICE_ID_NVIDIA_NVENET_23 0x0453
-#define PCI_DEVICE_ID_NVIDIA_NVENET_24 0x054C
-#define PCI_DEVICE_ID_NVIDIA_NVENET_25 0x054D
-#define PCI_DEVICE_ID_NVIDIA_NVENET_26 0x054E
-#define PCI_DEVICE_ID_NVIDIA_NVENET_27 0x054F
-#define PCI_DEVICE_ID_NVIDIA_NVENET_28 0x07DC
-#define PCI_DEVICE_ID_NVIDIA_NVENET_29 0x07DD
-#define PCI_DEVICE_ID_NVIDIA_NVENET_30 0x07DE
-#define PCI_DEVICE_ID_NVIDIA_NVENET_31 0x07DF
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_SMBUS 0x0542
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE 0x0560
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE 0x056C
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP78S_SMBUS 0x0752
#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE 0x0759
-#define PCI_DEVICE_ID_NVIDIA_NVENET_32 0x0760
-#define PCI_DEVICE_ID_NVIDIA_NVENET_33 0x0761
-#define PCI_DEVICE_ID_NVIDIA_NVENET_34 0x0762
-#define PCI_DEVICE_ID_NVIDIA_NVENET_35 0x0763
-#define PCI_DEVICE_ID_NVIDIA_NVENET_36 0x0AB0
-#define PCI_DEVICE_ID_NVIDIA_NVENET_37 0x0AB1
-#define PCI_DEVICE_ID_NVIDIA_NVENET_38 0x0AB2
-#define PCI_DEVICE_ID_NVIDIA_NVENET_39 0x0AB3
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_SMBUS 0x07D8
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP79_SMBUS 0x0AA2
+#define PCI_DEVICE_ID_NVIDIA_NFORCE_MCP89_SATA 0x0D85
#define PCI_VENDOR_ID_IMS 0x10e0
#define PCI_DEVICE_ID_IMS_TT128 0x9128
@@ -1281,6 +1295,13 @@
#define PCI_VENDOR_ID_CREATIVE 0x1102 /* duplicate: ECTIVA */
#define PCI_DEVICE_ID_CREATIVE_EMU10K1 0x0002
+#define PCI_DEVICE_ID_CREATIVE_20K1 0x0005
+#define PCI_DEVICE_ID_CREATIVE_20K2 0x000b
+#define PCI_SUBDEVICE_ID_CREATIVE_SB0760 0x0024
+#define PCI_SUBDEVICE_ID_CREATIVE_SB08801 0x0041
+#define PCI_SUBDEVICE_ID_CREATIVE_SB08802 0x0042
+#define PCI_SUBDEVICE_ID_CREATIVE_SB08803 0x0043
+#define PCI_SUBDEVICE_ID_CREATIVE_HENDRIX 0x6000
#define PCI_VENDOR_ID_ECTIVA 0x1102 /* duplicate: CREATIVE */
#define PCI_DEVICE_ID_ECTIVA_EV1938 0x8938
@@ -1373,7 +1394,7 @@
#define PCI_DEVICE_ID_VIA_82C598_1 0x8598
#define PCI_DEVICE_ID_VIA_838X_1 0xB188
#define PCI_DEVICE_ID_VIA_83_87XX_1 0xB198
-#define PCI_DEVICE_ID_VIA_C409_IDE 0XC409
+#define PCI_DEVICE_ID_VIA_VX855_IDE 0xC409
#define PCI_DEVICE_ID_VIA_ANON 0xFFFF
#define PCI_VENDOR_ID_SIEMENS 0x110A
@@ -1473,6 +1494,7 @@
#define PCI_DEVICE_ID_SERVERWORKS_HT1000IDE 0x0214
#define PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2 0x0217
#define PCI_DEVICE_ID_SERVERWORKS_CSB6LPC 0x0227
+#define PCI_DEVICE_ID_SERVERWORKS_HT1100LD 0x0408
#define PCI_VENDOR_ID_SBE 0x1176
#define PCI_DEVICE_ID_SBE_WANXL100 0x0301
@@ -1516,6 +1538,8 @@
#define PCI_DEVICE_ID_ARTOP_ATP860R 0x0007
#define PCI_DEVICE_ID_ARTOP_ATP865 0x0008
#define PCI_DEVICE_ID_ARTOP_ATP865R 0x0009
+#define PCI_DEVICE_ID_ARTOP_ATP867A 0x000A
+#define PCI_DEVICE_ID_ARTOP_ATP867B 0x000B
#define PCI_DEVICE_ID_ARTOP_AEC7610 0x8002
#define PCI_DEVICE_ID_ARTOP_AEC7612UW 0x8010
#define PCI_DEVICE_ID_ARTOP_AEC7612U 0x8020
@@ -1813,6 +1837,10 @@
#define PCI_SUBDEVICE_ID_HYPERCOPE_METRO 0x0107
#define PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2 0x0108
+#define PCI_VENDOR_ID_DIGIGRAM 0x1369
+#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_SERIAL_SUBSYSTEM 0xc001
+#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_CAE_SERIAL_SUBSYSTEM 0xc002
+
#define PCI_VENDOR_ID_KAWASAKI 0x136b
#define PCI_DEVICE_ID_MCHIP_KL5A72002 0xff01
@@ -1880,6 +1908,8 @@
#define PCI_SUBDEVICE_ID_CCD_SWYX4S 0xB540
#define PCI_SUBDEVICE_ID_CCD_JH4S20 0xB550
#define PCI_SUBDEVICE_ID_CCD_IOB8ST_1 0xB552
+#define PCI_SUBDEVICE_ID_CCD_JHSE1 0xB553
+#define PCI_SUBDEVICE_ID_CCD_JH8S 0xB55B
#define PCI_SUBDEVICE_ID_CCD_BN4S 0xB560
#define PCI_SUBDEVICE_ID_CCD_BN8S 0xB562
#define PCI_SUBDEVICE_ID_CCD_BNE1 0xB563
@@ -1932,6 +1962,8 @@
#define PCI_DEVICE_ID_LAVA_DSERIAL 0x0100 /* 2x 16550 */
#define PCI_DEVICE_ID_LAVA_QUATRO_A 0x0101 /* 2x 16550, half of 4 port */
#define PCI_DEVICE_ID_LAVA_QUATRO_B 0x0102 /* 2x 16550, half of 4 port */
+#define PCI_DEVICE_ID_LAVA_QUATTRO_A 0x0120 /* 2x 16550A, half of 4 port */
+#define PCI_DEVICE_ID_LAVA_QUATTRO_B 0x0121 /* 2x 16550A, half of 4 port */
#define PCI_DEVICE_ID_LAVA_OCTO_A 0x0180 /* 4x 16550A, half of 8 port */
#define PCI_DEVICE_ID_LAVA_OCTO_B 0x0181 /* 4x 16550A, half of 8 port */
#define PCI_DEVICE_ID_LAVA_PORT_PLUS 0x0200 /* 2x 16650 */
@@ -1962,15 +1994,21 @@
#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_U 0xC118
#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU 0xC11C
#define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501
+#define PCI_DEVICE_ID_OXSEMI_C950 0x950B
#define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511
#define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513
#define PCI_DEVICE_ID_OXSEMI_16PCI952 0x9521
#define PCI_DEVICE_ID_OXSEMI_16PCI952PP 0x9523
+#define PCI_SUBDEVICE_ID_OXSEMI_C950 0x0001
#define PCI_VENDOR_ID_CHELSIO 0x1425
#define PCI_VENDOR_ID_SAMSUNG 0x144d
+#define PCI_VENDOR_ID_GIGABYTE 0x1458
+
+#define PCI_VENDOR_ID_AMBIT 0x1468
+
#define PCI_VENDOR_ID_MYRICOM 0x14c1
#define PCI_VENDOR_ID_TITAN 0x14D2
@@ -1998,6 +2036,7 @@
#define PCI_DEVICE_ID_AFAVLAB_P030 0x2182
#define PCI_SUBDEVICE_ID_AFAVLAB_P061 0x2150
+#define PCI_VENDOR_ID_BCM_GVC 0x14a4
#define PCI_VENDOR_ID_BROADCOM 0x14e4
#define PCI_DEVICE_ID_TIGON3_5752 0x1600
#define PCI_DEVICE_ID_TIGON3_5752M 0x1601
@@ -2047,7 +2086,6 @@
#define PCI_DEVICE_ID_TIGON3_5787M 0x1693
#define PCI_DEVICE_ID_TIGON3_5782 0x1696
#define PCI_DEVICE_ID_TIGON3_5784 0x1698
-#define PCI_DEVICE_ID_TIGON3_5785 0x1699
#define PCI_DEVICE_ID_TIGON3_5786 0x169a
#define PCI_DEVICE_ID_TIGON3_5787 0x169b
#define PCI_DEVICE_ID_TIGON3_5788 0x169c
@@ -2077,6 +2115,7 @@
#define PCI_VENDOR_ID_MAINPINE 0x1522
#define PCI_DEVICE_ID_MAINPINE_PBRIDGE 0x0100
#define PCI_VENDOR_ID_ENE 0x1524
+#define PCI_DEVICE_ID_ENE_CB710_FLASH 0x0510
#define PCI_DEVICE_ID_ENE_CB712_SD 0x0550
#define PCI_DEVICE_ID_ENE_CB712_SD_2 0x0551
#define PCI_DEVICE_ID_ENE_CB714_SD 0x0750
@@ -2112,6 +2151,8 @@
#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c
#define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274
+#define PCI_VENDOR_ID_DFI 0x15bd
+
#define PCI_VENDOR_ID_QUICKNET 0x15e2
#define PCI_DEVICE_ID_QUICKNET_XJ 0x0500
@@ -2131,6 +2172,10 @@
#define PCI_DEVICE_ID_ADDIDATA_APCI7420_3 0x700D
#define PCI_DEVICE_ID_ADDIDATA_APCI7300_3 0x700E
#define PCI_DEVICE_ID_ADDIDATA_APCI7800_3 0x700F
+#define PCI_DEVICE_ID_ADDIDATA_APCIe7300 0x7010
+#define PCI_DEVICE_ID_ADDIDATA_APCIe7420 0x7011
+#define PCI_DEVICE_ID_ADDIDATA_APCIe7500 0x7012
+#define PCI_DEVICE_ID_ADDIDATA_APCIe7800 0x7013
#define PCI_VENDOR_ID_PDC 0x15e9
@@ -2215,10 +2260,20 @@
#define PCI_VENDOR_ID_TOPSPIN 0x1867
+#define PCI_VENDOR_ID_SILAN 0x1904
+
#define PCI_VENDOR_ID_TDI 0x192E
#define PCI_DEVICE_ID_TDI_EHCI 0x0101
#define PCI_VENDOR_ID_FREESCALE 0x1957
+#define PCI_DEVICE_ID_MPC8315E 0x00b4
+#define PCI_DEVICE_ID_MPC8315 0x00b5
+#define PCI_DEVICE_ID_MPC8314E 0x00b6
+#define PCI_DEVICE_ID_MPC8314 0x00b7
+#define PCI_DEVICE_ID_MPC8378E 0x00c4
+#define PCI_DEVICE_ID_MPC8378 0x00c5
+#define PCI_DEVICE_ID_MPC8377E 0x00c6
+#define PCI_DEVICE_ID_MPC8377 0x00c7
#define PCI_DEVICE_ID_MPC8548E 0x0012
#define PCI_DEVICE_ID_MPC8548 0x0013
#define PCI_DEVICE_ID_MPC8543E 0x0014
@@ -2226,6 +2281,8 @@
#define PCI_DEVICE_ID_MPC8547E 0x0018
#define PCI_DEVICE_ID_MPC8545E 0x0019
#define PCI_DEVICE_ID_MPC8545 0x001a
+#define PCI_DEVICE_ID_MPC8569E 0x0061
+#define PCI_DEVICE_ID_MPC8569 0x0060
#define PCI_DEVICE_ID_MPC8568E 0x0020
#define PCI_DEVICE_ID_MPC8568 0x0021
#define PCI_DEVICE_ID_MPC8567E 0x0022
@@ -2238,6 +2295,22 @@
#define PCI_DEVICE_ID_MPC8572 0x0041
#define PCI_DEVICE_ID_MPC8536E 0x0050
#define PCI_DEVICE_ID_MPC8536 0x0051
+#define PCI_DEVICE_ID_P2020E 0x0070
+#define PCI_DEVICE_ID_P2020 0x0071
+#define PCI_DEVICE_ID_P2010E 0x0078
+#define PCI_DEVICE_ID_P2010 0x0079
+#define PCI_DEVICE_ID_P1020E 0x0100
+#define PCI_DEVICE_ID_P1020 0x0101
+#define PCI_DEVICE_ID_P1011E 0x0108
+#define PCI_DEVICE_ID_P1011 0x0109
+#define PCI_DEVICE_ID_P1022E 0x0110
+#define PCI_DEVICE_ID_P1022 0x0111
+#define PCI_DEVICE_ID_P1013E 0x0118
+#define PCI_DEVICE_ID_P1013 0x0119
+#define PCI_DEVICE_ID_P4080E 0x0400
+#define PCI_DEVICE_ID_P4080 0x0401
+#define PCI_DEVICE_ID_P4040E 0x0408
+#define PCI_DEVICE_ID_P4040 0x0409
#define PCI_DEVICE_ID_MPC8641 0x7010
#define PCI_DEVICE_ID_MPC8641D 0x7011
#define PCI_DEVICE_ID_MPC8610 0x7018
@@ -2251,6 +2324,7 @@
#define PCI_VENDOR_ID_JMICRON 0x197B
#define PCI_DEVICE_ID_JMICRON_JMB360 0x2360
#define PCI_DEVICE_ID_JMICRON_JMB361 0x2361
+#define PCI_DEVICE_ID_JMICRON_JMB362 0x2362
#define PCI_DEVICE_ID_JMICRON_JMB363 0x2363
#define PCI_DEVICE_ID_JMICRON_JMB365 0x2365
#define PCI_DEVICE_ID_JMICRON_JMB366 0x2366
@@ -2263,6 +2337,10 @@
#define PCI_DEVICE_ID_KORENIX_JETCARDF0 0x1600
#define PCI_DEVICE_ID_KORENIX_JETCARDF1 0x16ff
+#define PCI_VENDOR_ID_QMI 0x1a32
+
+#define PCI_VENDOR_ID_AZWAVE 0x1a3b
+
#define PCI_VENDOR_ID_TEKRAM 0x1de1
#define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29
@@ -2342,6 +2420,9 @@
#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
+#define PCI_DEVICE_ID_INTEL_CPT_SMBUS 0x1c22
+#define PCI_DEVICE_ID_INTEL_CPT_LPC1 0x1c42
+#define PCI_DEVICE_ID_INTEL_CPT_LPC2 0x1c43
#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
#define PCI_DEVICE_ID_INTEL_82801AA_1 0x2411
#define PCI_DEVICE_ID_INTEL_82801AA_3 0x2413
@@ -2373,6 +2454,7 @@
#define PCI_DEVICE_ID_INTEL_82801CA_12 0x248c
#define PCI_DEVICE_ID_INTEL_82801DB_0 0x24c0
#define PCI_DEVICE_ID_INTEL_82801DB_1 0x24c1
+#define PCI_DEVICE_ID_INTEL_82801DB_2 0x24c2
#define PCI_DEVICE_ID_INTEL_82801DB_3 0x24c3
#define PCI_DEVICE_ID_INTEL_82801DB_5 0x24c5
#define PCI_DEVICE_ID_INTEL_82801DB_6 0x24c6
@@ -2463,6 +2545,8 @@
#define PCI_DEVICE_ID_INTEL_IOAT_TBG3 0x3433
#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
+#define PCI_DEVICE_ID_INTEL_82854_HB 0x358c
+#define PCI_DEVICE_ID_INTEL_82854_IG 0x358e
#define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580
#define PCI_DEVICE_ID_INTEL_82855GM_IG 0x3582
#define PCI_DEVICE_ID_INTEL_E7520_MCH 0x3590
@@ -2476,6 +2560,16 @@
#define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e
#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b
#define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF1 0x3711
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF2 0x3712
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF3 0x3713
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF4 0x3714
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF5 0x3715
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF6 0x3716
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF7 0x3717
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF8 0x3718
+#define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719
#define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14
#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16
#define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18
@@ -2606,6 +2700,7 @@
#define PCI_DEVICE_ID_NETMOS_9835 0x9835
#define PCI_DEVICE_ID_NETMOS_9845 0x9845
#define PCI_DEVICE_ID_NETMOS_9855 0x9855
+#define PCI_DEVICE_ID_NETMOS_9901 0x9901
#define PCI_VENDOR_ID_3COM_2 0xa727
diff --git a/libdde-linux26/contrib/include/linux/phy.h b/libdde-linux26/contrib/include/linux/phy.h
index d7e54d98..b1368b8f 100644
--- a/libdde-linux26/contrib/include/linux/phy.h
+++ b/libdde-linux26/contrib/include/linux/phy.h
@@ -79,7 +79,7 @@ typedef enum {
* Need to be a little smaller than phydev->dev.bus_id to leave room
* for the ":%02x"
*/
-#define MII_BUS_ID_SIZE (BUS_ID_SIZE - 3)
+#define MII_BUS_ID_SIZE (20 - 3)
/*
* The Bus class for PHYs. Devices which provide access to
@@ -315,8 +315,7 @@ struct phy_device {
/* Interrupt and Polling infrastructure */
struct work_struct phy_queue;
- struct work_struct state_queue;
- struct timer_list phy_timer;
+ struct delayed_work state_queue;
atomic_t irq_disable;
struct mutex lock;
@@ -389,6 +388,12 @@ struct phy_driver {
/* Enables or disables interrupts */
int (*config_intr)(struct phy_device *phydev);
+ /*
+ * Checks if the PHY generated an interrupt.
+ * For multi-PHY devices with shared PHY interrupt pin
+ */
+ int (*did_interrupt)(struct phy_device *phydev);
+
/* Clears up any memory if needed */
void (*remove)(struct phy_device *phydev);
@@ -402,7 +407,7 @@ struct phy_driver {
/* A Structure for boards to register fixups with the PHY Lib */
struct phy_fixup {
struct list_head list;
- char bus_id[BUS_ID_SIZE];
+ char bus_id[20];
u32 phy_uid;
u32 phy_uid_mask;
int (*run)(struct phy_device *phydev);
@@ -439,10 +444,16 @@ static inline int phy_write(struct phy_device *phydev, u16 regnum, u16 val)
int get_phy_id(struct mii_bus *bus, int addr, u32 *phy_id);
struct phy_device* get_phy_device(struct mii_bus *bus, int addr);
+int phy_device_register(struct phy_device *phy);
int phy_clear_interrupt(struct phy_device *phydev);
int phy_config_interrupt(struct phy_device *phydev, u32 interrupts);
+int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
+ u32 flags, phy_interface_t interface);
struct phy_device * phy_attach(struct net_device *dev,
const char *bus_id, u32 flags, phy_interface_t interface);
+int phy_connect_direct(struct net_device *dev, struct phy_device *phydev,
+ void (*handler)(struct net_device *), u32 flags,
+ phy_interface_t interface);
struct phy_device * phy_connect(struct net_device *dev, const char *bus_id,
void (*handler)(struct net_device *), u32 flags,
phy_interface_t interface);
diff --git a/libdde-linux26/contrib/include/linux/skbuff.h b/libdde-linux26/contrib/include/linux/skbuff.h
index a9e9534d..745f6159 100644
--- a/libdde-linux26/contrib/include/linux/skbuff.h
+++ b/libdde-linux26/contrib/include/linux/skbuff.h
@@ -142,6 +142,9 @@ struct skb_shared_info {
atomic_t dataref;
unsigned short nr_frags;
unsigned short gso_size;
+#ifdef CONFIG_HAS_DMA
+ dma_addr_t dma_head;
+#endif
/* Warning: this field is not always filled in (UFO)! */
unsigned short gso_segs;
unsigned short gso_type;
@@ -152,7 +155,7 @@ struct skb_shared_info {
struct sk_buff *frag_list;
skb_frag_t frags[MAX_SKB_FRAGS];
#ifdef CONFIG_HAS_DMA
- dma_addr_t dma_maps[MAX_SKB_FRAGS + 1];
+ dma_addr_t dma_maps[MAX_SKB_FRAGS];
#endif
};
@@ -1900,6 +1903,21 @@ static inline void skb_copy_queue_mapping(struct sk_buff *to, const struct sk_bu
to->queue_mapping = from->queue_mapping;
}
+static inline void skb_record_rx_queue(struct sk_buff *skb, u16 rx_queue)
+{
+ skb->queue_mapping = rx_queue + 1;
+}
+
+static inline u16 skb_get_rx_queue(struct sk_buff *skb)
+{
+ return skb->queue_mapping - 1;
+}
+
+static inline bool skb_rx_queue_recorded(struct sk_buff *skb)
+{
+ return (skb->queue_mapping != 0);
+}
+
#ifdef CONFIG_XFRM
static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
{
diff --git a/libdde-linux26/contrib/include/linux/workqueue.h b/libdde-linux26/contrib/include/linux/workqueue.h
index 3cd51e57..cf24c20d 100644
--- a/libdde-linux26/contrib/include/linux/workqueue.h
+++ b/libdde-linux26/contrib/include/linux/workqueue.h
@@ -41,6 +41,11 @@ struct delayed_work {
struct timer_list timer;
};
+static inline struct delayed_work *to_delayed_work(struct work_struct *work)
+{
+ return container_of(work, struct delayed_work, work);
+}
+
struct execute_work {
struct work_struct work;
};
@@ -89,7 +94,7 @@ struct execute_work {
/*
* initialize all of a work item in one go
*
- * NOTE! No point in using "atomic_long_set()": useing a direct
+ * NOTE! No point in using "atomic_long_set()": using a direct
* assignment of the work data initializer allows the compiler
* to generate better code.
*/
@@ -202,6 +207,7 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
extern void flush_workqueue(struct workqueue_struct *wq);
extern void flush_scheduled_work(void);
+extern void flush_delayed_work(struct delayed_work *work);
extern int schedule_work(struct work_struct *work);
extern int schedule_work_on(int cpu, struct work_struct *work);
@@ -235,6 +241,21 @@ static inline int cancel_delayed_work(struct delayed_work *work)
return ret;
}
+/*
+ * Like above, but uses del_timer() instead of del_timer_sync(). This means,
+ * if it returns 0 the timer function may be running and the queueing is in
+ * progress.
+ */
+static inline int __cancel_delayed_work(struct delayed_work *work)
+{
+ int ret;
+
+ ret = del_timer(&work->timer);
+ if (ret)
+ work_clear_pending(&work->work);
+ return ret;
+}
+
extern int cancel_delayed_work_sync(struct delayed_work *work);
/* Obsolete. use cancel_delayed_work_sync() */
diff --git a/libdde-linux26/contrib/include/net/ethoc.h b/libdde-linux26/contrib/include/net/ethoc.h
new file mode 100644
index 00000000..96f3789b
--- /dev/null
+++ b/libdde-linux26/contrib/include/net/ethoc.h
@@ -0,0 +1,22 @@
+/*
+ * linux/include/net/ethoc.h
+ *
+ * Copyright (C) 2008-2009 Avionic Design GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Written by Thierry Reding <thierry.reding@avionic-design.de>
+ */
+
+#ifndef LINUX_NET_ETHOC_H
+#define LINUX_NET_ETHOC_H 1
+
+struct ethoc_platform_data {
+ u8 hwaddr[IFHWADDRLEN];
+ s8 phy_id;
+};
+
+#endif /* !LINUX_NET_ETHOC_H */
+
diff --git a/libdde-linux26/contrib/kernel/rcuclassic.c b/libdde-linux26/contrib/kernel/rcuclassic.c
new file mode 100644
index 00000000..654c640a
--- /dev/null
+++ b/libdde-linux26/contrib/kernel/rcuclassic.c
@@ -0,0 +1,788 @@
+/*
+ * Read-Copy Update mechanism for mutual exclusion
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright IBM Corporation, 2001
+ *
+ * Authors: Dipankar Sarma <dipankar@in.ibm.com>
+ * Manfred Spraul <manfred@colorfullife.com>
+ *
+ * Based on the original work by Paul McKenney <paulmck@us.ibm.com>
+ * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
+ * Papers:
+ * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
+ * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
+ *
+ * For detailed explanation of Read-Copy Update mechanism see -
+ * Documentation/RCU
+ *
+ */
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/smp.h>
+#include <linux/rcupdate.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <asm/atomic.h>
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/completion.h>
+#include <linux/moduleparam.h>
+#include <linux/percpu.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/mutex.h>
+#include <linux/time.h>
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static struct lock_class_key rcu_lock_key;
+struct lockdep_map rcu_lock_map =
+ STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key);
+EXPORT_SYMBOL_GPL(rcu_lock_map);
+#endif
+
+
+/* Definition for rcupdate control block. */
+static struct rcu_ctrlblk rcu_ctrlblk = {
+ .cur = -300,
+ .completed = -300,
+ .pending = -300,
+ .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
+ .cpumask = CPU_BITS_NONE,
+};
+static struct rcu_ctrlblk rcu_bh_ctrlblk = {
+ .cur = -300,
+ .completed = -300,
+ .pending = -300,
+ .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
+ .cpumask = CPU_BITS_NONE,
+};
+
+DEFINE_PER_CPU(struct rcu_data, rcu_data) = { 0L };
+DEFINE_PER_CPU(struct rcu_data, rcu_bh_data) = { 0L };
+
+static int blimit = 10;
+static int qhimark = 10000;
+static int qlowmark = 100;
+
+#ifdef CONFIG_SMP
+static void force_quiescent_state(struct rcu_data *rdp,
+ struct rcu_ctrlblk *rcp)
+{
+ int cpu;
+ unsigned long flags;
+
+ set_need_resched();
+ spin_lock_irqsave(&rcp->lock, flags);
+ if (unlikely(!rcp->signaled)) {
+ rcp->signaled = 1;
+ /*
+ * Don't send IPI to itself. With irqs disabled,
+ * rdp->cpu is the current cpu.
+ *
+ * cpu_online_mask is updated by the _cpu_down()
+ * using __stop_machine(). Since we're in irqs disabled
+ * section, __stop_machine() is not exectuting, hence
+ * the cpu_online_mask is stable.
+ *
+ * However, a cpu might have been offlined _just_ before
+ * we disabled irqs while entering here.
+ * And rcu subsystem might not yet have handled the CPU_DEAD
+ * notification, leading to the offlined cpu's bit
+ * being set in the rcp->cpumask.
+ *
+ * Hence cpumask = (rcp->cpumask & cpu_online_mask) to prevent
+ * sending smp_reschedule() to an offlined CPU.
+ */
+ for_each_cpu_and(cpu,
+ to_cpumask(rcp->cpumask), cpu_online_mask) {
+ if (cpu != rdp->cpu)
+ smp_send_reschedule(cpu);
+ }
+ }
+ spin_unlock_irqrestore(&rcp->lock, flags);
+}
+#else
+static inline void force_quiescent_state(struct rcu_data *rdp,
+ struct rcu_ctrlblk *rcp)
+{
+ set_need_resched();
+}
+#endif
+
+static void __call_rcu(struct rcu_head *head, struct rcu_ctrlblk *rcp,
+ struct rcu_data *rdp)
+{
+ long batch;
+
+ head->next = NULL;
+ smp_mb(); /* Read of rcu->cur must happen after any change by caller. */
+
+ /*
+ * Determine the batch number of this callback.
+ *
+ * Using ACCESS_ONCE to avoid the following error when gcc eliminates
+ * local variable "batch" and emits codes like this:
+ * 1) rdp->batch = rcp->cur + 1 # gets old value
+ * ......
+ * 2)rcu_batch_after(rcp->cur + 1, rdp->batch) # gets new value
+ * then [*nxttail[0], *nxttail[1]) may contain callbacks
+ * that batch# = rdp->batch, see the comment of struct rcu_data.
+ */
+ batch = ACCESS_ONCE(rcp->cur) + 1;
+
+ if (rdp->nxtlist && rcu_batch_after(batch, rdp->batch)) {
+ /* process callbacks */
+ rdp->nxttail[0] = rdp->nxttail[1];
+ rdp->nxttail[1] = rdp->nxttail[2];
+ if (rcu_batch_after(batch - 1, rdp->batch))
+ rdp->nxttail[0] = rdp->nxttail[2];
+ }
+
+ rdp->batch = batch;
+ *rdp->nxttail[2] = head;
+ rdp->nxttail[2] = &head->next;
+
+ if (unlikely(++rdp->qlen > qhimark)) {
+ rdp->blimit = INT_MAX;
+ force_quiescent_state(rdp, &rcu_ctrlblk);
+ }
+}
+
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+
+static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp)
+{
+ rcp->gp_start = jiffies;
+ rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK;
+}
+
+static void print_other_cpu_stall(struct rcu_ctrlblk *rcp)
+{
+ int cpu;
+ long delta;
+ unsigned long flags;
+
+ /* Only let one CPU complain about others per time interval. */
+
+ spin_lock_irqsave(&rcp->lock, flags);
+ delta = jiffies - rcp->jiffies_stall;
+ if (delta < 2 || rcp->cur != rcp->completed) {
+ spin_unlock_irqrestore(&rcp->lock, flags);
+ return;
+ }
+ rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
+ spin_unlock_irqrestore(&rcp->lock, flags);
+
+ /* OK, time to rat on our buddy... */
+
+ printk(KERN_ERR "INFO: RCU detected CPU stalls:");
+ for_each_possible_cpu(cpu) {
+ if (cpumask_test_cpu(cpu, to_cpumask(rcp->cpumask)))
+ printk(" %d", cpu);
+ }
+ printk(" (detected by %d, t=%ld jiffies)\n",
+ smp_processor_id(), (long)(jiffies - rcp->gp_start));
+}
+
+static void print_cpu_stall(struct rcu_ctrlblk *rcp)
+{
+ unsigned long flags;
+
+ printk(KERN_ERR "INFO: RCU detected CPU %d stall (t=%lu/%lu jiffies)\n",
+ smp_processor_id(), jiffies,
+ jiffies - rcp->gp_start);
+ dump_stack();
+ spin_lock_irqsave(&rcp->lock, flags);
+ if ((long)(jiffies - rcp->jiffies_stall) >= 0)
+ rcp->jiffies_stall =
+ jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
+ spin_unlock_irqrestore(&rcp->lock, flags);
+ set_need_resched(); /* kick ourselves to get things going. */
+}
+
+static void check_cpu_stall(struct rcu_ctrlblk *rcp)
+{
+ long delta;
+
+ delta = jiffies - rcp->jiffies_stall;
+ if (cpumask_test_cpu(smp_processor_id(), to_cpumask(rcp->cpumask)) &&
+ delta >= 0) {
+
+ /* We haven't checked in, so go dump stack. */
+ print_cpu_stall(rcp);
+
+ } else if (rcp->cur != rcp->completed && delta >= 2) {
+
+ /* They had two seconds to dump stack, so complain. */
+ print_other_cpu_stall(rcp);
+ }
+}
+
+#else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+
+static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp)
+{
+}
+
+static inline void check_cpu_stall(struct rcu_ctrlblk *rcp)
+{
+}
+
+#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+
+/**
+ * call_rcu - Queue an RCU callback for invocation after a grace period.
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual update function to be invoked after the grace period
+ *
+ * The update function will be invoked some time after a full grace
+ * period elapses, in other words after all currently executing RCU
+ * read-side critical sections have completed. RCU read-side critical
+ * sections are delimited by rcu_read_lock() and rcu_read_unlock(),
+ * and may be nested.
+ */
+void call_rcu(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu))
+{
+ unsigned long flags;
+
+ head->func = func;
+ local_irq_save(flags);
+ __call_rcu(head, &rcu_ctrlblk, &__get_cpu_var(rcu_data));
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(call_rcu);
+
+/**
+ * call_rcu_bh - Queue an RCU for invocation after a quicker grace period.
+ * @head: structure to be used for queueing the RCU updates.
+ * @func: actual update function to be invoked after the grace period
+ *
+ * The update function will be invoked some time after a full grace
+ * period elapses, in other words after all currently executing RCU
+ * read-side critical sections have completed. call_rcu_bh() assumes
+ * that the read-side critical sections end on completion of a softirq
+ * handler. This means that read-side critical sections in process
+ * context must not be interrupted by softirqs. This interface is to be
+ * used when most of the read-side critical sections are in softirq context.
+ * RCU read-side critical sections are delimited by rcu_read_lock() and
+ * rcu_read_unlock(), * if in interrupt context or rcu_read_lock_bh()
+ * and rcu_read_unlock_bh(), if in process context. These may be nested.
+ */
+void call_rcu_bh(struct rcu_head *head,
+ void (*func)(struct rcu_head *rcu))
+{
+ unsigned long flags;
+
+ head->func = func;
+ local_irq_save(flags);
+ __call_rcu(head, &rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(call_rcu_bh);
+
+/*
+ * Return the number of RCU batches processed thus far. Useful
+ * for debug and statistics.
+ */
+long rcu_batches_completed(void)
+{
+ return rcu_ctrlblk.completed;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_completed);
+
+/*
+ * Return the number of RCU batches processed thus far. Useful
+ * for debug and statistics.
+ */
+long rcu_batches_completed_bh(void)
+{
+ return rcu_bh_ctrlblk.completed;
+}
+EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
+
+/* Raises the softirq for processing rcu_callbacks. */
+static inline void raise_rcu_softirq(void)
+{
+ raise_softirq(RCU_SOFTIRQ);
+}
+
+/*
+ * Invoke the completed RCU callbacks. They are expected to be in
+ * a per-cpu list.
+ */
+static void rcu_do_batch(struct rcu_data *rdp)
+{
+ unsigned long flags;
+ struct rcu_head *next, *list;
+ int count = 0;
+
+ list = rdp->donelist;
+ while (list) {
+ next = list->next;
+ prefetch(next);
+ list->func(list);
+ list = next;
+ if (++count >= rdp->blimit)
+ break;
+ }
+ rdp->donelist = list;
+
+ local_irq_save(flags);
+ rdp->qlen -= count;
+ local_irq_restore(flags);
+ if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
+ rdp->blimit = blimit;
+
+ if (!rdp->donelist)
+ rdp->donetail = &rdp->donelist;
+ else
+ raise_rcu_softirq();
+}
+
+/*
+ * Grace period handling:
+ * The grace period handling consists out of two steps:
+ * - A new grace period is started.
+ * This is done by rcu_start_batch. The start is not broadcasted to
+ * all cpus, they must pick this up by comparing rcp->cur with
+ * rdp->quiescbatch. All cpus are recorded in the
+ * rcu_ctrlblk.cpumask bitmap.
+ * - All cpus must go through a quiescent state.
+ * Since the start of the grace period is not broadcasted, at least two
+ * calls to rcu_check_quiescent_state are required:
+ * The first call just notices that a new grace period is running. The
+ * following calls check if there was a quiescent state since the beginning
+ * of the grace period. If so, it updates rcu_ctrlblk.cpumask. If
+ * the bitmap is empty, then the grace period is completed.
+ * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
+ * period (if necessary).
+ */
+
+/*
+ * Register a new batch of callbacks, and start it up if there is currently no
+ * active batch and the batch to be registered has not already occurred.
+ * Caller must hold rcu_ctrlblk.lock.
+ */
+static void rcu_start_batch(struct rcu_ctrlblk *rcp)
+{
+ if (rcp->cur != rcp->pending &&
+ rcp->completed == rcp->cur) {
+ rcp->cur++;
+ record_gp_stall_check_time(rcp);
+
+ /*
+ * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
+ * Barrier Otherwise it can cause tickless idle CPUs to be
+ * included in rcp->cpumask, which will extend graceperiods
+ * unnecessarily.
+ */
+ smp_mb();
+ cpumask_andnot(to_cpumask(rcp->cpumask),
+ cpu_online_mask, nohz_cpu_mask);
+
+ rcp->signaled = 0;
+ }
+}
+
+/*
+ * cpu went through a quiescent state since the beginning of the grace period.
+ * Clear it from the cpu mask and complete the grace period if it was the last
+ * cpu. Start another grace period if someone has further entries pending
+ */
+static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
+{
+ cpumask_clear_cpu(cpu, to_cpumask(rcp->cpumask));
+ if (cpumask_empty(to_cpumask(rcp->cpumask))) {
+ /* batch completed ! */
+ rcp->completed = rcp->cur;
+ rcu_start_batch(rcp);
+ }
+}
+
+/*
+ * Check if the cpu has gone through a quiescent state (say context
+ * switch). If so and if it already hasn't done so in this RCU
+ * quiescent cycle, then indicate that it has done so.
+ */
+static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
+ struct rcu_data *rdp)
+{
+ unsigned long flags;
+
+ if (rdp->quiescbatch != rcp->cur) {
+ /* start new grace period: */
+ rdp->qs_pending = 1;
+ rdp->passed_quiesc = 0;
+ rdp->quiescbatch = rcp->cur;
+ return;
+ }
+
+ /* Grace period already completed for this cpu?
+ * qs_pending is checked instead of the actual bitmap to avoid
+ * cacheline trashing.
+ */
+ if (!rdp->qs_pending)
+ return;
+
+ /*
+ * Was there a quiescent state since the beginning of the grace
+ * period? If no, then exit and wait for the next call.
+ */
+ if (!rdp->passed_quiesc)
+ return;
+ rdp->qs_pending = 0;
+
+ spin_lock_irqsave(&rcp->lock, flags);
+ /*
+ * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
+ * during cpu startup. Ignore the quiescent state.
+ */
+ if (likely(rdp->quiescbatch == rcp->cur))
+ cpu_quiet(rdp->cpu, rcp);
+
+ spin_unlock_irqrestore(&rcp->lock, flags);
+}
+
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+/* warning! helper for rcu_offline_cpu. do not use elsewhere without reviewing
+ * locking requirements, the list it's pulling from has to belong to a cpu
+ * which is dead and hence not processing interrupts.
+ */
+static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,
+ struct rcu_head **tail, long batch)
+{
+ unsigned long flags;
+
+ if (list) {
+ local_irq_save(flags);
+ this_rdp->batch = batch;
+ *this_rdp->nxttail[2] = list;
+ this_rdp->nxttail[2] = tail;
+ local_irq_restore(flags);
+ }
+}
+
+static void __rcu_offline_cpu(struct rcu_data *this_rdp,
+ struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
+{
+ unsigned long flags;
+
+ /*
+ * if the cpu going offline owns the grace period
+ * we can block indefinitely waiting for it, so flush
+ * it here
+ */
+ spin_lock_irqsave(&rcp->lock, flags);
+ if (rcp->cur != rcp->completed)
+ cpu_quiet(rdp->cpu, rcp);
+ rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail, rcp->cur + 1);
+ rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail[2], rcp->cur + 1);
+ spin_unlock(&rcp->lock);
+
+ this_rdp->qlen += rdp->qlen;
+ local_irq_restore(flags);
+}
+
+static void rcu_offline_cpu(int cpu)
+{
+ struct rcu_data *this_rdp = &get_cpu_var(rcu_data);
+ struct rcu_data *this_bh_rdp = &get_cpu_var(rcu_bh_data);
+
+ __rcu_offline_cpu(this_rdp, &rcu_ctrlblk,
+ &per_cpu(rcu_data, cpu));
+ __rcu_offline_cpu(this_bh_rdp, &rcu_bh_ctrlblk,
+ &per_cpu(rcu_bh_data, cpu));
+ put_cpu_var(rcu_data);
+ put_cpu_var(rcu_bh_data);
+}
+
+#else
+
+static void rcu_offline_cpu(int cpu)
+{
+}
+
+#endif
+
+/*
+ * This does the RCU processing work from softirq context.
+ */
+static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
+ struct rcu_data *rdp)
+{
+ unsigned long flags;
+ long completed_snap;
+
+ if (rdp->nxtlist) {
+ local_irq_save(flags);
+ completed_snap = ACCESS_ONCE(rcp->completed);
+
+ /*
+ * move the other grace-period-completed entries to
+ * [rdp->nxtlist, *rdp->nxttail[0]) temporarily
+ */
+ if (!rcu_batch_before(completed_snap, rdp->batch))
+ rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2];
+ else if (!rcu_batch_before(completed_snap, rdp->batch - 1))
+ rdp->nxttail[0] = rdp->nxttail[1];
+
+ /*
+ * the grace period for entries in
+ * [rdp->nxtlist, *rdp->nxttail[0]) has completed and
+ * move these entries to donelist
+ */
+ if (rdp->nxttail[0] != &rdp->nxtlist) {
+ *rdp->donetail = rdp->nxtlist;
+ rdp->donetail = rdp->nxttail[0];
+ rdp->nxtlist = *rdp->nxttail[0];
+ *rdp->donetail = NULL;
+
+ if (rdp->nxttail[1] == rdp->nxttail[0])
+ rdp->nxttail[1] = &rdp->nxtlist;
+ if (rdp->nxttail[2] == rdp->nxttail[0])
+ rdp->nxttail[2] = &rdp->nxtlist;
+ rdp->nxttail[0] = &rdp->nxtlist;
+ }
+
+ local_irq_restore(flags);
+
+ if (rcu_batch_after(rdp->batch, rcp->pending)) {
+ unsigned long flags2;
+
+ /* and start it/schedule start if it's a new batch */
+ spin_lock_irqsave(&rcp->lock, flags2);
+ if (rcu_batch_after(rdp->batch, rcp->pending)) {
+ rcp->pending = rdp->batch;
+ rcu_start_batch(rcp);
+ }
+ spin_unlock_irqrestore(&rcp->lock, flags2);
+ }
+ }
+
+ rcu_check_quiescent_state(rcp, rdp);
+ if (rdp->donelist)
+ rcu_do_batch(rdp);
+}
+
+static void rcu_process_callbacks(struct softirq_action *unused)
+{
+ /*
+ * Memory references from any prior RCU read-side critical sections
+ * executed by the interrupted code must be see before any RCU
+ * grace-period manupulations below.
+ */
+
+ smp_mb(); /* See above block comment. */
+
+ __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
+ __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
+
+ /*
+ * Memory references from any later RCU read-side critical sections
+ * executed by the interrupted code must be see after any RCU
+ * grace-period manupulations above.
+ */
+
+ smp_mb(); /* See above block comment. */
+}
+
+static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
+{
+ /* Check for CPU stalls, if enabled. */
+ check_cpu_stall(rcp);
+
+ if (rdp->nxtlist) {
+ long completed_snap = ACCESS_ONCE(rcp->completed);
+
+ /*
+ * This cpu has pending rcu entries and the grace period
+ * for them has completed.
+ */
+ if (!rcu_batch_before(completed_snap, rdp->batch))
+ return 1;
+ if (!rcu_batch_before(completed_snap, rdp->batch - 1) &&
+ rdp->nxttail[0] != rdp->nxttail[1])
+ return 1;
+ if (rdp->nxttail[0] != &rdp->nxtlist)
+ return 1;
+
+ /*
+ * This cpu has pending rcu entries and the new batch
+ * for then hasn't been started nor scheduled start
+ */
+ if (rcu_batch_after(rdp->batch, rcp->pending))
+ return 1;
+ }
+
+ /* This cpu has finished callbacks to invoke */
+ if (rdp->donelist)
+ return 1;
+
+ /* The rcu core waits for a quiescent state from the cpu */
+ if (rdp->quiescbatch != rcp->cur || rdp->qs_pending)
+ return 1;
+
+ /* nothing to do */
+ return 0;
+}
+
+/*
+ * Check to see if there is any immediate RCU-related work to be done
+ * by the current CPU, returning 1 if so. This function is part of the
+ * RCU implementation; it is -not- an exported member of the RCU API.
+ */
+int rcu_pending(int cpu)
+{
+ return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) ||
+ __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu));
+}
+
+/*
+ * Check to see if any future RCU-related work will need to be done
+ * by the current CPU, even if none need be done immediately, returning
+ * 1 if so. This function is part of the RCU implementation; it is -not-
+ * an exported member of the RCU API.
+ */
+int rcu_needs_cpu(int cpu)
+{
+ struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
+
+ return !!rdp->nxtlist || !!rdp_bh->nxtlist || rcu_pending(cpu);
+}
+
+/*
+ * Top-level function driving RCU grace-period detection, normally
+ * invoked from the scheduler-clock interrupt. This function simply
+ * increments counters that are read only from softirq by this same
+ * CPU, so there are no memory barriers required.
+ */
+void rcu_check_callbacks(int cpu, int user)
+{
+ if (user ||
+ (idle_cpu(cpu) && rcu_scheduler_active &&
+ !in_softirq() && hardirq_count() <= (1 << HARDIRQ_SHIFT))) {
+
+ /*
+ * Get here if this CPU took its interrupt from user
+ * mode or from the idle loop, and if this is not a
+ * nested interrupt. In this case, the CPU is in
+ * a quiescent state, so count it.
+ *
+ * Also do a memory barrier. This is needed to handle
+ * the case where writes from a preempt-disable section
+ * of code get reordered into schedule() by this CPU's
+ * write buffer. The memory barrier makes sure that
+ * the rcu_qsctr_inc() and rcu_bh_qsctr_inc() are see
+ * by other CPUs to happen after any such write.
+ */
+
+ smp_mb(); /* See above block comment. */
+ rcu_qsctr_inc(cpu);
+ rcu_bh_qsctr_inc(cpu);
+
+ } else if (!in_softirq()) {
+
+ /*
+ * Get here if this CPU did not take its interrupt from
+ * softirq, in other words, if it is not interrupting
+ * a rcu_bh read-side critical section. This is an _bh
+ * critical section, so count it. The memory barrier
+ * is needed for the same reason as is the above one.
+ */
+
+ smp_mb(); /* See above block comment. */
+ rcu_bh_qsctr_inc(cpu);
+ }
+ raise_rcu_softirq();
+}
+
+static void __cpuinit rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
+ struct rcu_data *rdp)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&rcp->lock, flags);
+ memset(rdp, 0, sizeof(*rdp));
+ rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2] = &rdp->nxtlist;
+ rdp->donetail = &rdp->donelist;
+ rdp->quiescbatch = rcp->completed;
+ rdp->qs_pending = 0;
+ rdp->cpu = cpu;
+ rdp->blimit = blimit;
+ spin_unlock_irqrestore(&rcp->lock, flags);
+}
+
+static void __cpuinit rcu_online_cpu(int cpu)
+{
+ struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
+ struct rcu_data *bh_rdp = &per_cpu(rcu_bh_data, cpu);
+
+ rcu_init_percpu_data(cpu, &rcu_ctrlblk, rdp);
+ rcu_init_percpu_data(cpu, &rcu_bh_ctrlblk, bh_rdp);
+ open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
+}
+
+static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
+ unsigned long action, void *hcpu)
+{
+ long cpu = (long)hcpu;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ rcu_online_cpu(cpu);
+ break;
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ rcu_offline_cpu(cpu);
+ break;
+ default:
+ break;
+ }
+ return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata rcu_nb = {
+ .notifier_call = rcu_cpu_notify,
+};
+
+/*
+ * Initializes rcu mechanism. Assumed to be called early.
+ * That is before local timer(SMP) or jiffie timer (uniproc) is setup.
+ * Note that rcu_qsctr and friends are implicitly
+ * initialized due to the choice of ``0'' for RCU_CTR_INVALID.
+ */
+void __init __rcu_init(void)
+{
+#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
+ printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
+#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
+ rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
+ (void *)(long)smp_processor_id());
+ /* Register notifier for non-boot CPUs */
+ register_cpu_notifier(&rcu_nb);
+}
+
+module_param(blimit, int, 0);
+module_param(qhimark, int, 0);
+module_param(qlowmark, int, 0);
diff --git a/libdde-linux26/contrib/kernel/rcupdate.c b/libdde-linux26/contrib/kernel/rcupdate.c
index cae8a059..c6bfa1af 100644
--- a/libdde-linux26/contrib/kernel/rcupdate.c
+++ b/libdde-linux26/contrib/kernel/rcupdate.c
@@ -180,6 +180,7 @@ void __init rcu_init(void)
{
__rcu_init();
}
+core_initcall(rcu_init);
void rcu_scheduler_starting(void)
{
diff --git a/libdde-linux26/contrib/lib/devres.c b/libdde-linux26/contrib/lib/devres.c
new file mode 100644
index 00000000..72c89090
--- /dev/null
+++ b/libdde-linux26/contrib/lib/devres.c
@@ -0,0 +1,351 @@
+#include <linux/pci.h>
+#include <linux/io.h>
+#include <linux/module.h>
+
+void devm_ioremap_release(struct device *dev, void *res)
+{
+ iounmap(*(void __iomem **)res);
+}
+
+static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
+{
+ return *(void **)res == match_data;
+}
+
+/**
+ * devm_ioremap - Managed ioremap()
+ * @dev: Generic device to remap IO address for
+ * @offset: BUS offset to map
+ * @size: Size of map
+ *
+ * Managed ioremap(). Map is automatically unmapped on driver detach.
+ */
+void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
+ unsigned long size)
+{
+ void __iomem **ptr, *addr;
+
+ ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ addr = ioremap(offset, size);
+ if (addr) {
+ *ptr = addr;
+ devres_add(dev, ptr);
+ } else
+ devres_free(ptr);
+
+ return addr;
+}
+EXPORT_SYMBOL(devm_ioremap);
+
+/**
+ * devm_ioremap_nocache - Managed ioremap_nocache()
+ * @dev: Generic device to remap IO address for
+ * @offset: BUS offset to map
+ * @size: Size of map
+ *
+ * Managed ioremap_nocache(). Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
+ unsigned long size)
+{
+ void __iomem **ptr, *addr;
+
+ ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ addr = ioremap_nocache(offset, size);
+ if (addr) {
+ *ptr = addr;
+ devres_add(dev, ptr);
+ } else
+ devres_free(ptr);
+
+ return addr;
+}
+EXPORT_SYMBOL(devm_ioremap_nocache);
+
+/**
+ * devm_iounmap - Managed iounmap()
+ * @dev: Generic device to unmap for
+ * @addr: Address to unmap
+ *
+ * Managed iounmap(). @addr must have been mapped using devm_ioremap*().
+ */
+void devm_iounmap(struct device *dev, void __iomem *addr)
+{
+ iounmap(addr);
+ WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
+ (void *)addr));
+}
+EXPORT_SYMBOL(devm_iounmap);
+
+#ifdef CONFIG_HAS_IOPORT
+/*
+ * Generic iomap devres
+ */
+static void devm_ioport_map_release(struct device *dev, void *res)
+{
+ ioport_unmap(*(void __iomem **)res);
+}
+
+static int devm_ioport_map_match(struct device *dev, void *res,
+ void *match_data)
+{
+ return *(void **)res == match_data;
+}
+
+/**
+ * devm_ioport_map - Managed ioport_map()
+ * @dev: Generic device to map ioport for
+ * @port: Port to map
+ * @nr: Number of ports to map
+ *
+ * Managed ioport_map(). Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
+ unsigned int nr)
+{
+ void __iomem **ptr, *addr;
+
+ ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return NULL;
+
+ addr = ioport_map(port, nr);
+ if (addr) {
+ *ptr = addr;
+ devres_add(dev, ptr);
+ } else
+ devres_free(ptr);
+
+ return addr;
+}
+EXPORT_SYMBOL(devm_ioport_map);
+
+/**
+ * devm_ioport_unmap - Managed ioport_unmap()
+ * @dev: Generic device to unmap for
+ * @addr: Address to unmap
+ *
+ * Managed ioport_unmap(). @addr must have been mapped using
+ * devm_ioport_map().
+ */
+void devm_ioport_unmap(struct device *dev, void __iomem *addr)
+{
+ ioport_unmap(addr);
+ WARN_ON(devres_destroy(dev, devm_ioport_map_release,
+ devm_ioport_map_match, (void *)addr));
+}
+EXPORT_SYMBOL(devm_ioport_unmap);
+
+#ifdef CONFIG_PCI
+/*
+ * PCI iomap devres
+ */
+#define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
+
+struct pcim_iomap_devres {
+ void __iomem *table[PCIM_IOMAP_MAX];
+};
+
+static void pcim_iomap_release(struct device *gendev, void *res)
+{
+ struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
+ struct pcim_iomap_devres *this = res;
+ int i;
+
+ for (i = 0; i < PCIM_IOMAP_MAX; i++)
+ if (this->table[i])
+ pci_iounmap(dev, this->table[i]);
+}
+
+/**
+ * pcim_iomap_table - access iomap allocation table
+ * @pdev: PCI device to access iomap table for
+ *
+ * Access iomap allocation table for @dev. If iomap table doesn't
+ * exist and @pdev is managed, it will be allocated. All iomaps
+ * recorded in the iomap table are automatically unmapped on driver
+ * detach.
+ *
+ * This function might sleep when the table is first allocated but can
+ * be safely called without context and guaranteed to succed once
+ * allocated.
+ */
+void __iomem * const * pcim_iomap_table(struct pci_dev *pdev)
+{
+ struct pcim_iomap_devres *dr, *new_dr;
+
+ dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
+ if (dr)
+ return dr->table;
+
+ new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
+ if (!new_dr)
+ return NULL;
+ dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
+ return dr->table;
+}
+EXPORT_SYMBOL(pcim_iomap_table);
+
+/**
+ * pcim_iomap - Managed pcim_iomap()
+ * @pdev: PCI device to iomap for
+ * @bar: BAR to iomap
+ * @maxlen: Maximum length of iomap
+ *
+ * Managed pci_iomap(). Map is automatically unmapped on driver
+ * detach.
+ */
+void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
+{
+ void __iomem **tbl;
+
+ BUG_ON(bar >= PCIM_IOMAP_MAX);
+
+ tbl = (void __iomem **)pcim_iomap_table(pdev);
+ if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
+ return NULL;
+
+ tbl[bar] = pci_iomap(pdev, bar, maxlen);
+ return tbl[bar];
+}
+EXPORT_SYMBOL(pcim_iomap);
+
+/**
+ * pcim_iounmap - Managed pci_iounmap()
+ * @pdev: PCI device to iounmap for
+ * @addr: Address to unmap
+ *
+ * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
+ */
+void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
+{
+ void __iomem **tbl;
+ int i;
+
+ pci_iounmap(pdev, addr);
+
+ tbl = (void __iomem **)pcim_iomap_table(pdev);
+ BUG_ON(!tbl);
+
+ for (i = 0; i < PCIM_IOMAP_MAX; i++)
+ if (tbl[i] == addr) {
+ tbl[i] = NULL;
+ return;
+ }
+ WARN_ON(1);
+}
+EXPORT_SYMBOL(pcim_iounmap);
+
+/**
+ * pcim_iomap_regions - Request and iomap PCI BARs
+ * @pdev: PCI device to map IO resources for
+ * @mask: Mask of BARs to request and iomap
+ * @name: Name used when requesting regions
+ *
+ * Request and iomap regions specified by @mask.
+ */
+int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name)
+{
+ void __iomem * const *iomap;
+ int i, rc;
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap)
+ return -ENOMEM;
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ unsigned long len;
+
+ if (!(mask & (1 << i)))
+ continue;
+
+ rc = -EINVAL;
+ len = pci_resource_len(pdev, i);
+ if (!len)
+ goto err_inval;
+
+ rc = pci_request_region(pdev, i, name);
+ if (rc)
+ goto err_inval;
+
+ rc = -ENOMEM;
+ if (!pcim_iomap(pdev, i, 0))
+ goto err_region;
+ }
+
+ return 0;
+
+ err_region:
+ pci_release_region(pdev, i);
+ err_inval:
+ while (--i >= 0) {
+ if (!(mask & (1 << i)))
+ continue;
+ pcim_iounmap(pdev, iomap[i]);
+ pci_release_region(pdev, i);
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(pcim_iomap_regions);
+
+/**
+ * pcim_iomap_regions_request_all - Request all BARs and iomap specified ones
+ * @pdev: PCI device to map IO resources for
+ * @mask: Mask of BARs to iomap
+ * @name: Name used when requesting regions
+ *
+ * Request all PCI BARs and iomap regions specified by @mask.
+ */
+int pcim_iomap_regions_request_all(struct pci_dev *pdev, u16 mask,
+ const char *name)
+{
+ int request_mask = ((1 << 6) - 1) & ~mask;
+ int rc;
+
+ rc = pci_request_selected_regions(pdev, request_mask, name);
+ if (rc)
+ return rc;
+
+ rc = pcim_iomap_regions(pdev, mask, name);
+ if (rc)
+ pci_release_selected_regions(pdev, request_mask);
+ return rc;
+}
+EXPORT_SYMBOL(pcim_iomap_regions_request_all);
+
+/**
+ * pcim_iounmap_regions - Unmap and release PCI BARs
+ * @pdev: PCI device to map IO resources for
+ * @mask: Mask of BARs to unmap and release
+ *
+ * Unamp and release regions specified by @mask.
+ */
+void pcim_iounmap_regions(struct pci_dev *pdev, u16 mask)
+{
+ void __iomem * const *iomap;
+ int i;
+
+ iomap = pcim_iomap_table(pdev);
+ if (!iomap)
+ return;
+
+ for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
+ if (!(mask & (1 << i)))
+ continue;
+
+ pcim_iounmap(pdev, iomap[i]);
+ pci_release_region(pdev, i);
+ }
+}
+EXPORT_SYMBOL(pcim_iounmap_regions);
+#endif
+#endif
diff --git a/libdde-linux26/contrib/lib/kobject.c b/libdde-linux26/contrib/lib/kobject.c
index 0487d1f6..a1682ef4 100644
--- a/libdde-linux26/contrib/lib/kobject.c
+++ b/libdde-linux26/contrib/lib/kobject.c
@@ -794,7 +794,7 @@ static struct kset *kset_create(const char *name,
kset = kzalloc(sizeof(*kset), GFP_KERNEL);
if (!kset)
return NULL;
- kobject_set_name(&kset->kobj, name);
+ kobject_set_name(&kset->kobj, "%s", name);
kset->uevent_ops = uevent_ops;
kset->kobj.parent = parent_kobj;
diff --git a/libdde-linux26/contrib/net/core/net-sysfs.c b/libdde-linux26/contrib/net/core/net-sysfs.c
index 484f5875..2da59a0a 100644
--- a/libdde-linux26/contrib/net/core/net-sysfs.c
+++ b/libdde-linux26/contrib/net/core/net-sysfs.c
@@ -498,7 +498,7 @@ int netdev_register_kobject(struct net_device *net)
dev->groups = groups;
BUILD_BUG_ON(BUS_ID_SIZE < IFNAMSIZ);
- dev_set_name(dev, net->name);
+ dev_set_name(dev, "%s", net->name);
#ifdef CONFIG_SYSFS
*groups++ = &netstat_group;
diff --git a/libdde-linux26/contrib/net/core/skb_dma_map.c b/libdde-linux26/contrib/net/core/skb_dma_map.c
index 86234923..07d4ac5e 100644
--- a/libdde-linux26/contrib/net/core/skb_dma_map.c
+++ b/libdde-linux26/contrib/net/core/skb_dma_map.c
@@ -20,7 +20,7 @@ int skb_dma_map(struct device *dev, struct sk_buff *skb,
if (dma_mapping_error(dev, map))
goto out_err;
- sp->dma_maps[0] = map;
+ sp->dma_head = map;
for (i = 0; i < sp->nr_frags; i++) {
skb_frag_t *fp = &sp->frags[i];
@@ -28,7 +28,7 @@ int skb_dma_map(struct device *dev, struct sk_buff *skb,
fp->size, dir);
if (dma_mapping_error(dev, map))
goto unwind;
- sp->dma_maps[i + 1] = map;
+ sp->dma_maps[i] = map;
}
sp->num_dma_maps = i + 1;
@@ -38,10 +38,10 @@ unwind:
while (--i >= 0) {
skb_frag_t *fp = &sp->frags[i];
- dma_unmap_page(dev, sp->dma_maps[i + 1],
+ dma_unmap_page(dev, sp->dma_maps[i],
fp->size, dir);
}
- dma_unmap_single(dev, sp->dma_maps[0],
+ dma_unmap_single(dev, sp->dma_head,
skb_headlen(skb), dir);
out_err:
return -ENOMEM;
@@ -54,12 +54,12 @@ void skb_dma_unmap(struct device *dev, struct sk_buff *skb,
struct skb_shared_info *sp = skb_shinfo(skb);
int i;
- dma_unmap_single(dev, sp->dma_maps[0],
+ dma_unmap_single(dev, sp->dma_head,
skb_headlen(skb), dir);
for (i = 0; i < sp->nr_frags; i++) {
skb_frag_t *fp = &sp->frags[i];
- dma_unmap_page(dev, sp->dma_maps[i + 1],
+ dma_unmap_page(dev, sp->dma_maps[i],
fp->size, dir);
}
}
diff --git a/libdde-linux26/include/linux/device.h b/libdde-linux26/include/linux/device.h
index 41b4227d..3dc6e94e 100644
--- a/libdde-linux26/include/linux/device.h
+++ b/libdde-linux26/include/linux/device.h
@@ -553,7 +553,10 @@ extern const char *dev_driver_string(const struct device *dev);
printk(level "%s %s: " format , dev_driver_string(dev) , \
dev_name(dev) , ## arg)
#else
-#define dev_printk(level, dev, format, arg...)
+#include <stdio.h>
+#define dev_printk(level, dev, format, arg...) \
+ fprintf(stderr, level "%s %s: " format, \
+ dev_driver_string(dev), dev_name(dev), ## arg)
#endif
#define dev_emerg(dev, format, arg...) \
diff --git a/libdde-linux26/include/linux/kernel.h b/libdde-linux26/include/linux/kernel.h
index 573ed07c..63549398 100644
--- a/libdde-linux26/include/linux/kernel.h
+++ b/libdde-linux26/include/linux/kernel.h
@@ -363,6 +363,8 @@ static inline char *pack_hex_byte(char *buf, u8 byte)
printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
#define pr_info(fmt, ...) \
printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__)
+#define pr_cont(fmt, ...) \
+ printk(KERN_CONT fmt, ##__VA_ARGS__)
#if defined(DEBUG)
#ifndef DDE_LINUX
diff --git a/libdde-linux26/lib/src/Makefile b/libdde-linux26/lib/src/Makefile
index 4f1ec099..abcc533e 100644
--- a/libdde-linux26/lib/src/Makefile
+++ b/libdde-linux26/lib/src/Makefile
@@ -91,6 +91,8 @@ SRC_C_libdde_linux26.o.a += \
kernel/kthread.c \
kernel/mutex.c \
kernel/notifier.c \
+ kernel/rcupdate.c \
+ kernel/rcuclassic.c \
kernel/resource.c \
kernel/rwsem.c \
kernel/sched.c \
@@ -106,6 +108,7 @@ SRC_C_libdde_linux26.o.a += \
lib/crc32.c \
lib/ctype.c \
lib/cpumask.c \
+ lib/devres.c \
lib/find_next_bit.c \
lib/hexdump.c \
lib/idr.c \
diff --git a/libdde-linux26/lib/src/arch/l4/pci.c b/libdde-linux26/lib/src/arch/l4/pci.c
index b50a7353..6dd8e879 100644
--- a/libdde-linux26/lib/src/arch/l4/pci.c
+++ b/libdde-linux26/lib/src/arch/l4/pci.c
@@ -171,7 +171,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
return pcibios_enable_irq(dev);
#endif
WARN_UNIMPL;
- return -1;
+ return 0;
}
/*******************************************************************************************
diff --git a/libdde-linux26/lib/src/drivers/base/core.c b/libdde-linux26/lib/src/drivers/base/core.c
index e3800714..c6363dd7 100644
--- a/libdde-linux26/lib/src/drivers/base/core.c
+++ b/libdde-linux26/lib/src/drivers/base/core.c
@@ -1253,7 +1253,7 @@ struct device *__root_device_register(const char *name, struct module *owner)
if (!root)
return ERR_PTR(err);
- err = dev_set_name(&root->dev, name);
+ err = dev_set_name(&root->dev, "%s", name);
if (err) {
kfree(root);
return ERR_PTR(err);
diff --git a/libdde-linux26/lib/src/net/core/dev.c b/libdde-linux26/lib/src/net/core/dev.c
index cf036525..3c3a1d13 100644
--- a/libdde-linux26/lib/src/net/core/dev.c
+++ b/libdde-linux26/lib/src/net/core/dev.c
@@ -1731,6 +1731,13 @@ static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
simple_tx_hashrnd_initialized = 1;
}
+ if (skb_rx_queue_recorded(skb)) {
+ u32 val = skb_get_rx_queue(skb);
+
+ hash = jhash_1word(val, simple_tx_hashrnd);
+ goto out;
+ }
+
switch (skb->protocol) {
case htons(ETH_P_IP):
if (!(ip_hdr(skb)->frag_off & htons(IP_MF | IP_OFFSET)))
@@ -1768,6 +1775,7 @@ static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
hash = jhash_3words(addr1, addr2, ports, simple_tx_hashrnd);
+out:
return (u16) (((u64) hash * dev->real_num_tx_queues) >> 32);
}
@@ -3377,10 +3385,10 @@ void __dev_set_rx_mode(struct net_device *dev)
/* Unicast addresses changes may only happen under the rtnl,
* therefore calling __dev_set_promiscuity here is safe.
*/
- if (dev->uc_count > 0 && !dev->uc_promisc) {
+ if (dev->uc.count > 0 && !dev->uc_promisc) {
__dev_set_promiscuity(dev, 1);
dev->uc_promisc = 1;
- } else if (dev->uc_count == 0 && dev->uc_promisc) {
+ } else if (dev->uc.count == 0 && dev->uc_promisc) {
__dev_set_promiscuity(dev, -1);
dev->uc_promisc = 0;
}
@@ -3397,6 +3405,316 @@ void dev_set_rx_mode(struct net_device *dev)
netif_addr_unlock_bh(dev);
}
+/* hw addresses list handling functions */
+
+static int __hw_addr_add(struct netdev_hw_addr_list *list, unsigned char *addr,
+ int addr_len, unsigned char addr_type)
+{
+ struct netdev_hw_addr *ha;
+ int alloc_size;
+
+ if (addr_len > MAX_ADDR_LEN)
+ return -EINVAL;
+
+ list_for_each_entry(ha, &list->list, list) {
+ if (!memcmp(ha->addr, addr, addr_len) &&
+ ha->type == addr_type) {
+ ha->refcount++;
+ return 0;
+ }
+ }
+
+
+ alloc_size = sizeof(*ha);
+ if (alloc_size < L1_CACHE_BYTES)
+ alloc_size = L1_CACHE_BYTES;
+ ha = kmalloc(alloc_size, GFP_ATOMIC);
+ if (!ha)
+ return -ENOMEM;
+ memcpy(ha->addr, addr, addr_len);
+ ha->type = addr_type;
+ ha->refcount = 1;
+ ha->synced = false;
+ list_add_tail_rcu(&ha->list, &list->list);
+ list->count++;
+ return 0;
+}
+
+static void ha_rcu_free(struct rcu_head *head)
+{
+ struct netdev_hw_addr *ha;
+
+ ha = container_of(head, struct netdev_hw_addr, rcu_head);
+ kfree(ha);
+}
+
+static int __hw_addr_del(struct netdev_hw_addr_list *list, unsigned char *addr,
+ int addr_len, unsigned char addr_type)
+{
+ struct netdev_hw_addr *ha;
+
+ list_for_each_entry(ha, &list->list, list) {
+ if (!memcmp(ha->addr, addr, addr_len) &&
+ (ha->type == addr_type || !addr_type)) {
+ if (--ha->refcount)
+ return 0;
+ list_del_rcu(&ha->list);
+ call_rcu(&ha->rcu_head, ha_rcu_free);
+ list->count--;
+ return 0;
+ }
+ }
+ return -ENOENT;
+}
+
+static int __hw_addr_add_multiple(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list,
+ int addr_len,
+ unsigned char addr_type)
+{
+ int err;
+ struct netdev_hw_addr *ha, *ha2;
+ unsigned char type;
+
+ list_for_each_entry(ha, &from_list->list, list) {
+ type = addr_type ? addr_type : ha->type;
+ err = __hw_addr_add(to_list, ha->addr, addr_len, type);
+ if (err)
+ goto unroll;
+ }
+ return 0;
+
+unroll:
+ list_for_each_entry(ha2, &from_list->list, list) {
+ if (ha2 == ha)
+ break;
+ type = addr_type ? addr_type : ha2->type;
+ __hw_addr_del(to_list, ha2->addr, addr_len, type);
+ }
+ return err;
+}
+
+static void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list,
+ int addr_len,
+ unsigned char addr_type)
+{
+ struct netdev_hw_addr *ha;
+ unsigned char type;
+
+ list_for_each_entry(ha, &from_list->list, list) {
+ type = addr_type ? addr_type : ha->type;
+ __hw_addr_del(to_list, ha->addr, addr_len, addr_type);
+ }
+}
+
+static int __hw_addr_sync(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list,
+ int addr_len)
+{
+ int err = 0;
+ struct netdev_hw_addr *ha, *tmp;
+
+ list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
+ if (!ha->synced) {
+ err = __hw_addr_add(to_list, ha->addr,
+ addr_len, ha->type);
+ if (err)
+ break;
+ ha->synced = true;
+ ha->refcount++;
+ } else if (ha->refcount == 1) {
+ __hw_addr_del(to_list, ha->addr, addr_len, ha->type);
+ __hw_addr_del(from_list, ha->addr, addr_len, ha->type);
+ }
+ }
+ return err;
+}
+
+static void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,
+ struct netdev_hw_addr_list *from_list,
+ int addr_len)
+{
+ struct netdev_hw_addr *ha, *tmp;
+
+ list_for_each_entry_safe(ha, tmp, &from_list->list, list) {
+ if (ha->synced) {
+ __hw_addr_del(to_list, ha->addr,
+ addr_len, ha->type);
+ ha->synced = false;
+ __hw_addr_del(from_list, ha->addr,
+ addr_len, ha->type);
+ }
+ }
+}
+
+static void __hw_addr_flush(struct netdev_hw_addr_list *list)
+{
+ struct netdev_hw_addr *ha, *tmp;
+
+ list_for_each_entry_safe(ha, tmp, &list->list, list) {
+ list_del_rcu(&ha->list);
+ call_rcu(&ha->rcu_head, ha_rcu_free);
+ }
+ list->count = 0;
+}
+
+static void __hw_addr_init(struct netdev_hw_addr_list *list)
+{
+ INIT_LIST_HEAD(&list->list);
+ list->count = 0;
+}
+
+/* Device addresses handling functions */
+
+static void dev_addr_flush(struct net_device *dev)
+{
+ /* rtnl_mutex must be held here */
+
+ __hw_addr_flush(&dev->dev_addrs);
+ dev->dev_addr = NULL;
+}
+
+static int dev_addr_init(struct net_device *dev)
+{
+ unsigned char addr[MAX_ADDR_LEN];
+ struct netdev_hw_addr *ha;
+ int err;
+
+ /* rtnl_mutex must be held here */
+
+ __hw_addr_init(&dev->dev_addrs);
+ memset(addr, 0, sizeof(addr));
+ err = __hw_addr_add(&dev->dev_addrs, addr, sizeof(addr),
+ NETDEV_HW_ADDR_T_LAN);
+ if (!err) {
+ /*
+ * Get the first (previously created) address from the list
+ * and set dev_addr pointer to this location.
+ */
+ ha = list_first_entry(&dev->dev_addrs.list,
+ struct netdev_hw_addr, list);
+ dev->dev_addr = ha->addr;
+ }
+ return err;
+}
+
+/**
+ * dev_addr_add - Add a device address
+ * @dev: device
+ * @addr: address to add
+ * @addr_type: address type
+ *
+ * Add a device address to the device or increase the reference count if
+ * it already exists.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+int dev_addr_add(struct net_device *dev, unsigned char *addr,
+ unsigned char addr_type)
+{
+ int err;
+
+ ASSERT_RTNL();
+
+ err = __hw_addr_add(&dev->dev_addrs, addr, dev->addr_len, addr_type);
+ if (!err)
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ return err;
+}
+EXPORT_SYMBOL(dev_addr_add);
+
+/**
+ * dev_addr_del - Release a device address.
+ * @dev: device
+ * @addr: address to delete
+ * @addr_type: address type
+ *
+ * Release reference to a device address and remove it from the device
+ * if the reference count drops to zero.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+int dev_addr_del(struct net_device *dev, unsigned char *addr,
+ unsigned char addr_type)
+{
+ int err;
+ struct netdev_hw_addr *ha;
+
+ ASSERT_RTNL();
+
+ /*
+ * We can not remove the first address from the list because
+ * dev->dev_addr points to that.
+ */
+ ha = list_first_entry(&dev->dev_addrs.list,
+ struct netdev_hw_addr, list);
+ if (ha->addr == dev->dev_addr && ha->refcount == 1)
+ return -ENOENT;
+
+ err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len,
+ addr_type);
+ if (!err)
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ return err;
+}
+EXPORT_SYMBOL(dev_addr_del);
+
+/**
+ * dev_addr_add_multiple - Add device addresses from another device
+ * @to_dev: device to which addresses will be added
+ * @from_dev: device from which addresses will be added
+ * @addr_type: address type - 0 means type will be used from from_dev
+ *
+ * Add device addresses of the one device to another.
+ **
+ * The caller must hold the rtnl_mutex.
+ */
+int dev_addr_add_multiple(struct net_device *to_dev,
+ struct net_device *from_dev,
+ unsigned char addr_type)
+{
+ int err;
+
+ ASSERT_RTNL();
+
+ if (from_dev->addr_len != to_dev->addr_len)
+ return -EINVAL;
+ err = __hw_addr_add_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
+ to_dev->addr_len, addr_type);
+ if (!err)
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
+ return err;
+}
+EXPORT_SYMBOL(dev_addr_add_multiple);
+
+/**
+ * dev_addr_del_multiple - Delete device addresses by another device
+ * @to_dev: device where the addresses will be deleted
+ * @from_dev: device by which addresses the addresses will be deleted
+ * @addr_type: address type - 0 means type will used from from_dev
+ *
+ * Deletes addresses in to device by the list of addresses in from device.
+ *
+ * The caller must hold the rtnl_mutex.
+ */
+int dev_addr_del_multiple(struct net_device *to_dev,
+ struct net_device *from_dev,
+ unsigned char addr_type)
+{
+ ASSERT_RTNL();
+
+ if (from_dev->addr_len != to_dev->addr_len)
+ return -EINVAL;
+ __hw_addr_del_multiple(&to_dev->dev_addrs, &from_dev->dev_addrs,
+ to_dev->addr_len, addr_type);
+ call_netdevice_notifiers(NETDEV_CHANGEADDR, to_dev);
+ return 0;
+}
+EXPORT_SYMBOL(dev_addr_del_multiple);
+
+/* multicast addresses handling functions */
+
int __dev_addr_delete(struct dev_addr_list **list, int *count,
void *addr, int alen, int glbl)
{
@@ -3459,24 +3777,22 @@ int __dev_addr_add(struct dev_addr_list **list, int *count,
* dev_unicast_delete - Release secondary unicast address.
* @dev: device
* @addr: address to delete
- * @alen: length of @addr
*
* Release reference to a secondary unicast address and remove it
* from the device if the reference count drops to zero.
*
* The caller must hold the rtnl_mutex.
*/
-int dev_unicast_delete(struct net_device *dev, void *addr, int alen)
+int dev_unicast_delete(struct net_device *dev, void *addr)
{
int err;
ASSERT_RTNL();
- netif_addr_lock_bh(dev);
- err = __dev_addr_delete(&dev->uc_list, &dev->uc_count, addr, alen, 0);
+ err = __hw_addr_del(&dev->uc, addr, dev->addr_len,
+ NETDEV_HW_ADDR_T_UNICAST);
if (!err)
__dev_set_rx_mode(dev);
- netif_addr_unlock_bh(dev);
return err;
}
EXPORT_SYMBOL(dev_unicast_delete);
@@ -3485,24 +3801,22 @@ EXPORT_SYMBOL(dev_unicast_delete);
* dev_unicast_add - add a secondary unicast address
* @dev: device
* @addr: address to add
- * @alen: length of @addr
*
* Add a secondary unicast address to the device or increase
* the reference count if it already exists.
*
* The caller must hold the rtnl_mutex.
*/
-int dev_unicast_add(struct net_device *dev, void *addr, int alen)
+int dev_unicast_add(struct net_device *dev, void *addr)
{
int err;
ASSERT_RTNL();
- netif_addr_lock_bh(dev);
- err = __dev_addr_add(&dev->uc_list, &dev->uc_count, addr, alen, 0);
+ err = __hw_addr_add(&dev->uc, addr, dev->addr_len,
+ NETDEV_HW_ADDR_T_UNICAST);
if (!err)
__dev_set_rx_mode(dev);
- netif_addr_unlock_bh(dev);
return err;
}
EXPORT_SYMBOL(dev_unicast_add);
@@ -3559,8 +3873,7 @@ void __dev_addr_unsync(struct dev_addr_list **to, int *to_count,
* @from: source device
*
* Add newly added addresses to the destination device and release
- * addresses that have no users left. The source device must be
- * locked by netif_addr_lock_bh.
+ * addresses that have no users left.
*
* This function is intended to be called from the dev->set_rx_mode
* function of layered software devices.
@@ -3569,12 +3882,14 @@ int dev_unicast_sync(struct net_device *to, struct net_device *from)
{
int err = 0;
- netif_addr_lock_bh(to);
- err = __dev_addr_sync(&to->uc_list, &to->uc_count,
- &from->uc_list, &from->uc_count);
+ ASSERT_RTNL();
+
+ if (to->addr_len != from->addr_len)
+ return -EINVAL;
+
+ err = __hw_addr_sync(&to->uc, &from->uc, to->addr_len);
if (!err)
__dev_set_rx_mode(to);
- netif_addr_unlock_bh(to);
return err;
}
EXPORT_SYMBOL(dev_unicast_sync);
@@ -3590,18 +3905,31 @@ EXPORT_SYMBOL(dev_unicast_sync);
*/
void dev_unicast_unsync(struct net_device *to, struct net_device *from)
{
- netif_addr_lock_bh(from);
- netif_addr_lock(to);
+ ASSERT_RTNL();
- __dev_addr_unsync(&to->uc_list, &to->uc_count,
- &from->uc_list, &from->uc_count);
- __dev_set_rx_mode(to);
+ if (to->addr_len != from->addr_len)
+ return;
- netif_addr_unlock(to);
- netif_addr_unlock_bh(from);
+ __hw_addr_unsync(&to->uc, &from->uc, to->addr_len);
+ __dev_set_rx_mode(to);
}
EXPORT_SYMBOL(dev_unicast_unsync);
+static void dev_unicast_flush(struct net_device *dev)
+{
+ /* rtnl_mutex must be held here */
+
+ __hw_addr_flush(&dev->uc);
+}
+
+static void dev_unicast_init(struct net_device *dev)
+{
+ /* rtnl_mutex must be held here */
+
+ __hw_addr_init(&dev->uc);
+}
+
+
static void __dev_addr_discard(struct dev_addr_list **list)
{
struct dev_addr_list *tmp;
@@ -3620,9 +3948,6 @@ static void dev_addr_discard(struct net_device *dev)
{
netif_addr_lock_bh(dev);
- __dev_addr_discard(&dev->uc_list);
- dev->uc_count = 0;
-
__dev_addr_discard(&dev->mc_list);
dev->mc_count = 0;
@@ -4213,6 +4538,7 @@ static void rollback_registered(struct net_device *dev)
/*
* Flush the unicast and multicast chains
*/
+ dev_unicast_flush(dev);
dev_addr_discard(dev);
if (dev->netdev_ops->ndo_uninit)
@@ -4729,6 +5055,8 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
dev = (struct net_device *)
(((long)p + NETDEV_ALIGN_CONST) & ~NETDEV_ALIGN_CONST);
dev->padded = (char *)dev - (char *)p;
+ dev_unicast_init(dev);
+
dev_net_set(dev, &init_net);
dev->_tx = tx;
@@ -4737,6 +5065,7 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
dev->gso_max_size = GSO_MAX_SIZE;
+ dev_addr_init(dev);
netdev_init_queues(dev);
INIT_LIST_HEAD(&dev->napi_list);
@@ -4762,6 +5091,9 @@ void free_netdev(struct net_device *dev)
kfree(dev->_tx);
+ /* Flush device addresses */
+ dev_addr_flush(dev);
+
list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
netif_napi_del(p);
@@ -4923,6 +5255,7 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
/*
* Flush the unicast and multicast chains
*/
+ dev_unicast_flush(dev);
dev_addr_discard(dev);
netdev_unregister_kobject(dev);
diff --git a/libdde-linux26/lib/src/net/sched/sch_generic.c b/libdde-linux26/lib/src/net/sched/sch_generic.c
index a2acd6c4..252ac987 100644
--- a/libdde-linux26/lib/src/net/sched/sch_generic.c
+++ b/libdde-linux26/lib/src/net/sched/sch_generic.c
@@ -200,6 +200,21 @@ void __qdisc_run(struct Qdisc *q)
clear_bit(__QDISC_STATE_RUNNING, &q->state);
}
+unsigned long dev_trans_start(struct net_device *dev)
+{
+ unsigned long val, res = dev->trans_start;
+ unsigned int i;
+
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ val = netdev_get_tx_queue(dev, i)->trans_start;
+ if (val && time_after(val, res))
+ res = val;
+ }
+ dev->trans_start = res;
+ return res;
+}
+EXPORT_SYMBOL(dev_trans_start);
+
static void dev_watchdog(unsigned long arg)
{
struct net_device *dev = (struct net_device *)arg;
@@ -209,25 +224,30 @@ static void dev_watchdog(unsigned long arg)
if (netif_device_present(dev) &&
netif_running(dev) &&
netif_carrier_ok(dev)) {
- int some_queue_stopped = 0;
+ int some_queue_timedout = 0;
unsigned int i;
+ unsigned long trans_start;
for (i = 0; i < dev->num_tx_queues; i++) {
struct netdev_queue *txq;
txq = netdev_get_tx_queue(dev, i);
- if (netif_tx_queue_stopped(txq)) {
- some_queue_stopped = 1;
+ /*
+ * old device drivers set dev->trans_start
+ */
+ trans_start = txq->trans_start ? : dev->trans_start;
+ if (netif_tx_queue_stopped(txq) &&
+ time_after(jiffies, (trans_start +
+ dev->watchdog_timeo))) {
+ some_queue_timedout = 1;
break;
}
}
- if (some_queue_stopped &&
- time_after(jiffies, (dev->trans_start +
- dev->watchdog_timeo))) {
+ if (some_queue_timedout) {
char drivername[64];
- WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit timed out\n",
- dev->name, netdev_drivername(dev, drivername, 64));
+ WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n",
+ dev->name, netdev_drivername(dev, drivername, 64), i);
dev->netdev_ops->ndo_tx_timeout(dev);
}
if (!mod_timer(&dev->watchdog_timer,
@@ -612,8 +632,10 @@ static void transition_one_qdisc(struct net_device *dev,
clear_bit(__QDISC_STATE_DEACTIVATED, &new_qdisc->state);
rcu_assign_pointer(dev_queue->qdisc, new_qdisc);
- if (need_watchdog_p && new_qdisc != &noqueue_qdisc)
+ if (need_watchdog_p && new_qdisc != &noqueue_qdisc) {
+ dev_queue->trans_start = 0;
*need_watchdog_p = 1;
+ }
}
void dev_activate(struct net_device *dev)