summaryrefslogtreecommitdiff
path: root/pfinet/linux-src/net/ipv4
diff options
context:
space:
mode:
Diffstat (limited to 'pfinet/linux-src/net/ipv4')
-rw-r--r--pfinet/linux-src/net/ipv4/af_inet.c116
-rw-r--r--pfinet/linux-src/net/ipv4/arp.c97
-rw-r--r--pfinet/linux-src/net/ipv4/devinet.c97
-rw-r--r--pfinet/linux-src/net/ipv4/tcp.c202
-rw-r--r--pfinet/linux-src/net/ipv4/udp.c104
5 files changed, 320 insertions, 296 deletions
diff --git a/pfinet/linux-src/net/ipv4/af_inet.c b/pfinet/linux-src/net/ipv4/af_inet.c
index e37eb6bd..04e05107 100644
--- a/pfinet/linux-src/net/ipv4/af_inet.c
+++ b/pfinet/linux-src/net/ipv4/af_inet.c
@@ -23,10 +23,10 @@
* when accept() ed
* Alan Cox : Semantics of SO_LINGER aren't state moved
* to close when you look carefully. With
- * this fixed and the accept bug fixed
+ * this fixed and the accept bug fixed
* some RPC stuff seems happier.
* Niibe Yutaka : 4.4BSD style write async I/O
- * Alan Cox,
+ * Alan Cox,
* Tony Gale : Fixed reuse semantics.
* Alan Cox : bind() shouldn't abort existing but dead
* sockets. Stops FTP netin:.. I hope.
@@ -141,7 +141,7 @@ int (*rarp_ioctl_hook)(unsigned int,void*) = NULL;
/*
* Destroy an AF_INET socket
*/
-
+
static __inline__ void kill_sk_queues(struct sock *sk)
{
struct sk_buff *skb;
@@ -177,12 +177,12 @@ static __inline__ void kill_sk_later(struct sock *sk)
{
/* this should never happen. */
/* actually it can if an ack has just been sent. */
- /*
+ /*
* It's more normal than that...
* It can happen because a skb is still in the device queues
* [PR]
*/
-
+
NETDEBUG(printk(KERN_DEBUG "Socket destroy delayed (r=%d w=%d)\n",
atomic_read(&sk->rmem_alloc),
atomic_read(&sk->wmem_alloc)));
@@ -223,12 +223,12 @@ void destroy_sock(struct sock *sk)
* socket object. Mostly it punts to the subprotocols of IP to do
* the work.
*/
-
+
/*
* Set socket options on an inet socket.
*/
-
+
int inet_setsockopt(struct socket *sock, int level, int optname,
char *optval, int optlen)
{
@@ -275,7 +275,7 @@ static int inet_autobind(struct sock *sk)
/*
* Move a socket into listening state.
*/
-
+
int inet_listen(struct socket *sock, int backlog)
{
struct sock *sk = sock->sk;
@@ -330,7 +330,7 @@ static int inet_create(struct socket *sock, int protocol)
/* Compatibility */
if (sock->type == SOCK_PACKET) {
- static int warned;
+ static int warned;
if (net_families[PF_PACKET]==NULL)
{
#if defined(CONFIG_KMOD) && defined(CONFIG_PACKET_MODULE)
@@ -348,7 +348,7 @@ static int inet_create(struct socket *sock, int protocol)
sock->state = SS_UNCONNECTED;
sk = sk_alloc(PF_INET, GFP_KERNEL, 1);
- if (sk == NULL)
+ if (sk == NULL)
goto do_oom;
switch (sock->type) {
@@ -392,13 +392,13 @@ static int inet_create(struct socket *sock, int protocol)
}
sock_init_data(sock,sk);
-
+
sk->destruct = NULL;
sk->zapped=0;
#ifdef CONFIG_TCP_NAGLE_OFF
sk->nonagle = 1;
-#endif
+#endif
sk->family = PF_INET;
sk->protocol = protocol;
@@ -414,7 +414,7 @@ static int inet_create(struct socket *sock, int protocol)
sk->ip_mc_ttl=1;
sk->ip_mc_index=0;
sk->ip_mc_list=NULL;
-
+
if (sk->num) {
/* It assumes that any protocol which allows
* the user to assign a number at socket
@@ -459,7 +459,7 @@ do_oom:
* function we are destroying the object and from then on nobody
* should refer to it.
*/
-
+
int inet_release(struct socket *sock, struct socket *peersock)
{
struct sock *sk = sock->sk;
@@ -505,13 +505,13 @@ static int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
/* If the socket has its own bind function then use it. (RAW) */
if(sk->prot->bind)
return sk->prot->bind(sk, uaddr, addr_len);
-
+
/* Check these errors (active socket, bad address length, double bind). */
if ((sk->state != TCP_CLOSE) ||
(addr_len < sizeof(struct sockaddr_in)) ||
(sk->num != 0))
return -EINVAL;
-
+
chk_addr_ret = inet_addr_type(addr->sin_addr.s_addr);
if (addr->sin_addr.s_addr != 0 && chk_addr_ret != RTN_LOCAL &&
chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST) {
@@ -538,10 +538,10 @@ static int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
/* The kernel masquerader needs some ports. */
if((snum >= PORT_MASQ_BEGIN) && (snum <= PORT_MASQ_END))
return -EADDRINUSE;
-#endif
+#endif
if (snum && snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))
return(-EACCES);
-
+
/* Make sure we are allowed to bind here. */
if (sk->prot->get_port(sk, snum) != 0)
return -EADDRINUSE;
@@ -564,10 +564,10 @@ int inet_dgram_connect(struct socket *sock, struct sockaddr * uaddr,
if (inet_autobind(sk) != 0)
return(-EAGAIN);
- if (sk->prot->connect == NULL)
+ if (sk->prot->connect == NULL)
return(-EOPNOTSUPP);
err = sk->prot->connect(sk, (struct sockaddr *)uaddr, addr_len);
- if (err < 0)
+ if (err < 0)
return(err);
return(0);
}
@@ -594,7 +594,7 @@ static void inet_wait_for_connect(struct sock *sk)
* Connect to a remote host. There is regrettably still a little
* TCP 'magic' in here.
*/
-
+
int inet_stream_connect(struct socket *sock, struct sockaddr * uaddr,
int addr_len, int flags)
{
@@ -609,7 +609,7 @@ int inet_stream_connect(struct socket *sock, struct sockaddr * uaddr,
if(sock->state == SS_CONNECTING) {
/* Note: tcp_connected contains SYN_RECV, which may cause
- bogus results here. -AK */
+ bogus results here. -AK */
if(tcp_connected(sk->state)) {
sock->state = SS_CONNECTED;
return 0;
@@ -619,7 +619,7 @@ int inet_stream_connect(struct socket *sock, struct sockaddr * uaddr,
if (flags & O_NONBLOCK)
return -EALREADY;
} else {
- if (sk->prot->connect == NULL)
+ if (sk->prot->connect == NULL)
return(-EOPNOTSUPP);
/* We may need to bind the socket. */
@@ -635,11 +635,11 @@ int inet_stream_connect(struct socket *sock, struct sockaddr * uaddr,
return(err);
sock->state = SS_CONNECTING;
}
-
+
if (sk->state > TCP_FIN_WAIT2 && sock->state == SS_CONNECTING)
goto sock_error;
- if (sk->state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
+ if (sk->state != TCP_ESTABLISHED && (flags & O_NONBLOCK))
return (-EINPROGRESS);
if (sk->state == TCP_SYN_SENT || sk->state == TCP_SYN_RECV) {
@@ -650,15 +650,15 @@ int inet_stream_connect(struct socket *sock, struct sockaddr * uaddr,
sock->state = SS_CONNECTED;
if ((sk->state != TCP_ESTABLISHED) && sk->err)
- goto sock_error;
+ goto sock_error;
return(0);
-sock_error:
+sock_error:
/* This is ugly but needed to fix a race in the ICMP error handler */
- if (sk->zapped && sk->state != TCP_CLOSE) {
- lock_sock(sk);
+ if (sk->zapped && sk->state != TCP_CLOSE) {
+ lock_sock(sk);
tcp_set_state(sk, TCP_CLOSE);
- release_sock(sk);
+ release_sock(sk);
sk->zapped = 0;
}
sock->state = SS_UNCONNECTED;
@@ -735,16 +735,16 @@ do_err:
/*
* This does both peername and sockname.
*/
-
+
static int inet_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct sock *sk = sock->sk;
struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
-
+
sin->sin_family = AF_INET;
if (peer) {
- if (!tcp_connected(sk->state))
+ if (!tcp_connected(sk->state))
return(-ENOTCONN);
sin->sin_port = sk->dport;
sin->sin_addr.s_addr = sk->daddr;
@@ -767,10 +767,10 @@ int inet_recvmsg(struct socket *sock, struct msghdr *msg, int size,
struct sock *sk = sock->sk;
int addr_len = 0;
int err;
-
+
if (sock->flags & SO_ACCEPTCON)
return(-EINVAL);
- if (sk->prot->recvmsg == NULL)
+ if (sk->prot->recvmsg == NULL)
return(-EOPNOTSUPP);
/* We may need to bind the socket. */
if (inet_autobind(sk) != 0)
@@ -793,7 +793,7 @@ int inet_sendmsg(struct socket *sock, struct msghdr *msg, int size,
send_sig(SIGPIPE, current, 1);
return(-EPIPE);
}
- if (sk->prot->sendmsg == NULL)
+ if (sk->prot->sendmsg == NULL)
return(-EOPNOTSUPP);
if(sk->err)
return sock_error(sk);
@@ -822,7 +822,7 @@ int inet_shutdown(struct socket *sock, int how)
return(-ENOTCONN);
if (sock->state == SS_CONNECTING && sk->state == TCP_ESTABLISHED)
sock->state = SS_CONNECTED;
- if (!tcp_connected(sk->state))
+ if (!tcp_connected(sk->state))
return(-ENOTCONN);
sk->shutdown |= how;
if (sk->prot->shutdown)
@@ -842,6 +842,10 @@ unsigned int inet_poll(struct file * file, struct socket *sock, poll_table *wait
return sk->prot->poll(file, sock, wait);
}
+#ifdef _HURD_
+#define inet_ioctl 0
+#else
+
/*
* ioctl() calls you can issue on an INET socket. Most of these are
* device configuration and stuff and very rarely used. Some ioctls
@@ -858,14 +862,14 @@ static int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
int err;
int pid;
- switch(cmd)
+ switch(cmd)
{
case FIOSETOWN:
case SIOCSPGRP:
err = get_user(pid, (int *) arg);
if (err)
- return err;
- if (current->pid != pid && current->pgrp != -pid &&
+ return err;
+ if (current->pid != pid && current->pgrp != -pid &&
!capable(CAP_NET_ADMIN))
return -EPERM;
sk->proc = pid;
@@ -905,18 +909,18 @@ static int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
case SIOCSIFNETMASK:
case SIOCGIFDSTADDR:
case SIOCSIFDSTADDR:
- case SIOCSIFPFLAGS:
- case SIOCGIFPFLAGS:
+ case SIOCSIFPFLAGS:
+ case SIOCGIFPFLAGS:
case SIOCSIFFLAGS:
return(devinet_ioctl(cmd,(void *) arg));
case SIOCGIFBR:
case SIOCSIFBR:
-#ifdef CONFIG_BRIDGE
+#ifdef CONFIG_BRIDGE
return(br_ioctl(cmd,(void *) arg));
#else
return -ENOPKG;
-#endif
-
+#endif
+
case SIOCADDDLCI:
case SIOCDELDLCI:
#ifdef CONFIG_DLCI
@@ -946,13 +950,15 @@ static int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
#endif
if (sk->prot->ioctl==NULL || (err=sk->prot->ioctl(sk, cmd, arg))==-ENOIOCTLCMD)
- return(dev_ioctl(cmd,(void *) arg));
+ return(dev_ioctl(cmd,(void *) arg));
return err;
}
/*NOTREACHED*/
return(0);
}
+#endif
+
struct proto_ops inet_stream_ops = {
PF_INET,
@@ -962,7 +968,7 @@ struct proto_ops inet_stream_ops = {
inet_stream_connect,
sock_no_socketpair,
inet_accept,
- inet_getname,
+ inet_getname,
inet_poll,
inet_ioctl,
inet_listen,
@@ -983,7 +989,7 @@ struct proto_ops inet_dgram_ops = {
inet_dgram_connect,
sock_no_socketpair,
sock_no_accept,
- inet_getname,
+ inet_getname,
datagram_poll,
inet_ioctl,
sock_no_listen,
@@ -1053,9 +1059,9 @@ extern void tcp_v4_init(struct net_proto_family *);
/*
- * Called by socket.c on kernel startup.
+ * Called by socket.c on kernel startup.
*/
-
+
__initfunc(void inet_proto_init(struct net_proto *pro))
{
struct sk_buff *dummy_skb;
@@ -1070,17 +1076,17 @@ __initfunc(void inet_proto_init(struct net_proto *pro))
}
/*
- * Tell SOCKET that we are alive...
+ * Tell SOCKET that we are alive...
*/
-
+
(void) sock_register(&inet_family_ops);
/*
- * Add all the protocols.
+ * Add all the protocols.
*/
printk(KERN_INFO "IP Protocols: ");
- for(p = inet_protocol_base; p != NULL;)
+ for(p = inet_protocol_base; p != NULL;)
{
struct inet_protocol *tmp = (struct inet_protocol *) p->next;
inet_add_protocol(p);
@@ -1132,7 +1138,7 @@ __initfunc(void inet_proto_init(struct net_proto *pro))
#ifdef CONFIG_IP_MASQUERADE
ip_masq_init();
#endif
-
+
/*
* Initialise the multicast router
*/
diff --git a/pfinet/linux-src/net/ipv4/arp.c b/pfinet/linux-src/net/ipv4/arp.c
index d81c1bee..508142d4 100644
--- a/pfinet/linux-src/net/ipv4/arp.c
+++ b/pfinet/linux-src/net/ipv4/arp.c
@@ -1,6 +1,6 @@
/* linux/net/inet/arp.c
*
- * Version: $Id: arp.c,v 1.77.2.4 1999/09/23 19:03:36 davem Exp $
+ * Version: $Id: arp.c,v 1.77.2.1 1999/06/28 10:39:23 davem Exp $
*
* Copyright (C) 1994 by Florian La Roche
*
@@ -15,9 +15,9 @@
* 2 of the License, or (at your option) any later version.
*
* Fixes:
- * Alan Cox : Removed the Ethernet assumptions in
+ * Alan Cox : Removed the Ethernet assumptions in
* Florian's code
- * Alan Cox : Fixed some small errors in the ARP
+ * Alan Cox : Fixed some small errors in the ARP
* logic
* Alan Cox : Allow >4K in /proc
* Alan Cox : Make ARP add its own protocol entry
@@ -39,18 +39,18 @@
* Jonathan Naylor : Only lookup the hardware address for
* the correct hardware type.
* Germano Caronni : Assorted subtle races.
- * Craig Schlenter : Don't modify permanent entry
+ * Craig Schlenter : Don't modify permanent entry
* during arp_rcv.
* Russ Nelson : Tidied up a few bits.
* Alexey Kuznetsov: Major changes to caching and behaviour,
- * eg intelligent arp probing and
+ * eg intelligent arp probing and
* generation
* of host down events.
* Alan Cox : Missing unlock in device events.
* Eckes : ARP ioctl control errors.
* Alexey Kuznetsov: Arp free fix.
* Manuel Rodriguez: Gratuitous ARP.
- * Jonathan Layes : Added arpd support through kerneld
+ * Jonathan Layes : Added arpd support through kerneld
* message queue (960314)
* Mike Shaver : /proc/sys/net/ipv4/arp_* support
* Mike McLagan : Routing by source
@@ -65,8 +65,6 @@
* clean up the APFDDI & gen. FDDI bits.
* Alexey Kuznetsov: new arp state machine;
* now it is in net/core/neighbour.c.
- * Julian Anastasov: "hidden" flag: hide the
- * interface and don't reply for it
*/
/* RFC1122 Status:
@@ -79,7 +77,7 @@
unresolved IP address. (OK)
950727 -- MS
*/
-
+
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kernel.h>
@@ -264,7 +262,7 @@ static int arp_constructor(struct neighbour *neigh)
switch (dev->type) {
default:
break;
- case ARPHRD_ROSE:
+ case ARPHRD_ROSE:
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
case ARPHRD_AX25:
#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
@@ -310,15 +308,10 @@ static void arp_solicit(struct neighbour *neigh, struct sk_buff *skb)
u32 saddr;
u8 *dst_ha = NULL;
struct device *dev = neigh->dev;
- struct device *dev2;
- struct in_device *in_dev2;
u32 target = *(u32*)neigh->primary_key;
int probes = neigh->probes;
- if (skb &&
- (dev2 = ip_dev_find(skb->nh.iph->saddr)) != NULL &&
- (in_dev2 = dev2->ip_ptr) != NULL &&
- !IN_DEV_HIDDEN(in_dev2))
+ if (skb && inet_addr_type(skb->nh.iph->saddr) == RTN_LOCAL)
saddr = skb->nh.iph->saddr;
else
saddr = inet_select_addr(dev, target, RT_SCOPE_LINK);
@@ -431,8 +424,8 @@ int arp_bind_neighbour(struct dst_entry *dst)
* message.
*/
-void arp_send(int type, int ptype, u32 dest_ip,
- struct device *dev, u32 src_ip,
+void arp_send(int type, int ptype, u32 dest_ip,
+ struct device *dev, u32 src_ip,
unsigned char *dest_hw, unsigned char *src_hw,
unsigned char *target_hw)
{
@@ -443,14 +436,14 @@ void arp_send(int type, int ptype, u32 dest_ip,
/*
* No arp on this interface.
*/
-
+
if (dev->flags&IFF_NOARP)
return;
/*
* Allocate a buffer
*/
-
+
skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)
+ dev->hard_header_len + 15, GFP_ATOMIC);
if (skb == NULL)
@@ -556,10 +549,10 @@ int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
* of the device. Similarly, the hardware types should match. The
* device should be ARP-able. Also, if pln is not 4, then the lookup
* is not from an IP number. We can't currently handle this, so toss
- * it.
- */
+ * it.
+ */
if (in_dev == NULL ||
- arp->ar_hln != dev->addr_len ||
+ arp->ar_hln != dev->addr_len ||
dev->flags & IFF_NOARP ||
skb->pkt_type == PACKET_OTHERHOST ||
skb->pkt_type == PACKET_LOOPBACK ||
@@ -567,7 +560,7 @@ int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
goto out;
switch (dev_type) {
- default:
+ default:
if (arp->ar_pro != __constant_htons(ETH_P_IP))
goto out;
if (htons(dev_type) != arp->ar_hrd)
@@ -634,7 +627,7 @@ int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
tha=arp_ptr;
arp_ptr += dev->addr_len;
memcpy(&tip, arp_ptr, 4);
-/*
+/*
* Check for bad requests for 127.x.x.x and requests for multicast
* addresses. If this is one such, delete it.
*/
@@ -645,29 +638,23 @@ int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
* Process entry. The idea here is we want to send a reply if it is a
* request for us or if it is a request for someone else that we hold
* a proxy for. We want to add an entry to our cache if it is a reply
- * to us or if it is a request for our address.
- * (The assumption for this last is that if someone is requesting our
- * address, they are probably intending to talk to us, so it saves time
- * if we cache their address. Their address is also probably not in
+ * to us or if it is a request for our address.
+ * (The assumption for this last is that if someone is requesting our
+ * address, they are probably intending to talk to us, so it saves time
+ * if we cache their address. Their address is also probably not in
* our cache, since ours is not in their cache.)
- *
+ *
* Putting this another way, we only care about replies if they are to
* us, in which case we add them to the cache. For requests, we care
* about those for us and those for our proxies. We reply to both,
- * and in the case of requests for us we add the requester to the arp
+ * and in the case of requests for us we add the requester to the arp
* cache.
*/
/* Special case: IPv4 duplicate address detection packet (RFC2131) */
if (sip == 0) {
- struct device *dev2;
- struct in_device *in_dev2;
-
if (arp->ar_op == __constant_htons(ARPOP_REQUEST) &&
- (dev2 = ip_dev_find(tip)) != NULL &&
- (dev2 == dev ||
- ((in_dev2 = dev2->ip_ptr) != NULL &&
- !IN_DEV_HIDDEN(in_dev2))))
+ inet_addr_type(tip) == RTN_LOCAL)
arp_send(ARPOP_REPLY,ETH_P_ARP,tip,dev,tip,sha,dev->dev_addr,dev->dev_addr);
goto out;
}
@@ -681,20 +668,6 @@ int arp_rcv(struct sk_buff *skb, struct device *dev, struct packet_type *pt)
if (addr_type == RTN_LOCAL) {
n = neigh_event_ns(&arp_tbl, sha, &sip, dev);
if (n) {
- if (ipv4_devconf.hidden &&
- skb->pkt_type != PACKET_HOST) {
- struct device *dev2;
- struct in_device *in_dev2;
-
- if ((dev2 = ip_dev_find(tip)) != NULL &&
- dev2 != dev &&
- (in_dev2 = dev2->ip_ptr) != NULL &&
- IN_DEV_HIDDEN(in_dev2)) {
- neigh_release(n);
- goto out;
- }
- }
-
arp_send(ARPOP_REPLY,ETH_P_ARP,sip,dev,tip,sha,dev->dev_addr,sha);
neigh_release(n);
}
@@ -813,7 +786,7 @@ int arp_req_set(struct arpreq *r, struct device * dev)
if (!dev)
return -EINVAL;
}
- if (r->arp_ha.sa_family != dev->type)
+ if (r->arp_ha.sa_family != dev->type)
return -EINVAL;
err = -ENOBUFS;
@@ -910,6 +883,9 @@ int arp_req_delete(struct arpreq *r, struct device * dev)
return err;
}
+#ifdef _HURD_
+#define arp_ioctl 0
+#else
/*
* Handle an ARP layer I/O control request.
*/
@@ -977,6 +953,7 @@ out:
rtnl_unlock();
return err;
}
+#endif
/*
* Write the contents of the ARP cache to a PROCfs file.
@@ -1030,7 +1007,7 @@ int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy
hbuffer[k++]=':';
}
hbuffer[--k]=0;
-
+
#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
}
#endif
@@ -1047,7 +1024,7 @@ int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy
"%-17s0x%-10x0x%-10x%s",
in_ntoa(*(u32*)n->primary_key),
hatype,
- arp_state_to_flags(n),
+ arp_state_to_flags(n),
hbuffer);
size += sprintf(buffer+len+size,
" %-17s %s\n",
@@ -1055,7 +1032,7 @@ int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy
len += size;
pos += size;
-
+
if (pos <= offset)
len=0;
if (pos >= offset+length)
@@ -1081,7 +1058,7 @@ int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy
len += size;
pos += size;
-
+
if (pos <= offset)
len=0;
if (pos >= offset+length)
@@ -1091,7 +1068,7 @@ int arp_get_info(char *buffer, char **start, off_t offset, int length, int dummy
done:
neigh_table_unlock(&arp_tbl);
-
+
*start = buffer+len-(pos-offset); /* Start of wanted data */
len = pos-offset; /* Start slop */
if (len>length)
@@ -1165,14 +1142,14 @@ char *ax2asc(ax25_address *a)
if (c != ' ') *s++ = c;
}
-
+
*s++ = '-';
if ((n = ((a->ax25_call[6] >> 1) & 0x0F)) > 9) {
*s++ = '1';
n -= 10;
}
-
+
*s++ = n + '0';
*s++ = '\0';
diff --git a/pfinet/linux-src/net/ipv4/devinet.c b/pfinet/linux-src/net/ipv4/devinet.c
index 41b42a52..d980631b 100644
--- a/pfinet/linux-src/net/ipv4/devinet.c
+++ b/pfinet/linux-src/net/ipv4/devinet.c
@@ -23,7 +23,7 @@
*/
#include <linux/config.h>
-
+
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/bitops.h>
@@ -370,8 +370,8 @@ inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
#endif
-/*
- * Determine a default network mask, based on the IP address.
+/*
+ * Determine a default network mask, based on the IP address.
*/
static __inline__ int inet_abc_len(u32 addr)
@@ -380,21 +380,79 @@ static __inline__ int inet_abc_len(u32 addr)
return 0;
addr = ntohl(addr);
- if (IN_CLASSA(addr))
+ if (IN_CLASSA(addr))
return 8;
- if (IN_CLASSB(addr))
+ if (IN_CLASSB(addr))
return 16;
- if (IN_CLASSC(addr))
+ if (IN_CLASSC(addr))
return 24;
/*
- * Something else, probably a multicast.
+ * Something else, probably a multicast.
*/
-
+
return -1;
}
+#ifdef _HURD_
+
+#define devinet_ioctl 0
+
+error_t
+configure_device (struct device *dev,
+ uint32_t addr, uint32_t netmask)
+{
+ struct in_device *in_dev = dev->ip_ptr;
+ struct in_ifaddr *ifa = in_dev ? in_dev->ifa_list : 0;
+
+ if (ifa)
+ {
+ inet_del_ifa (in_dev, &in_dev->ifa_list, 0);
+ ifa->ifa_broadcast = 0;
+ ifa->ifa_anycast = 0;
+ }
+ else
+ {
+ ifa = inet_alloc_ifa ();
+ if (!ifa)
+ return ENOBUFS;
+ memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
+ }
+
+ if (addr != INADDR_NONE)
+ ifa->ifa_address = ifa->ifa_local = addr;
+ if (netmask != INADDR_NONE)
+ {
+ ifa->ifa_mask = netmask;
+ ifa->ifa_prefixlen = inet_mask_len (ifa->ifa_mask);
+ if ((dev->flags&IFF_BROADCAST) && ifa->ifa_prefixlen < 31)
+ ifa->ifa_broadcast = ifa->ifa_address|~ifa->ifa_mask;
+ else
+ ifa->ifa_broadcast = 0;
+ }
+
+ return - inet_set_ifa (dev, ifa);
+}
+
+void
+inquire_device (struct device *dev,
+ uint32_t *addr, uint32_t *netmask)
+{
+ struct in_device *in_dev = dev->ip_ptr;
+ struct in_ifaddr *ifa = in_dev ? in_dev->ifa_list : 0;
+
+ if (ifa)
+ {
+ *addr = ifa->ifa_local;
+ *netmask = ifa->ifa_mask;
+ }
+ else
+ *addr = *netmask = INADDR_NONE;
+}
+
+#else
+
int devinet_ioctl(unsigned int cmd, void *arg)
{
struct ifreq ifr;
@@ -514,7 +572,7 @@ int devinet_ioctl(unsigned int cmd, void *arg)
#endif
ret = dev_change_flags(dev, ifr.ifr_flags);
break;
-
+
case SIOCSIFADDR: /* Set interface address (and family) */
if (inet_abc_len(sin->sin_addr.s_addr) < 0) {
ret = -EINVAL;
@@ -563,7 +621,7 @@ int devinet_ioctl(unsigned int cmd, void *arg)
inet_insert_ifa(in_dev, ifa);
}
break;
-
+
case SIOCSIFDSTADDR: /* Set the destination address */
if (ifa->ifa_address != sin->sin_addr.s_addr) {
if (inet_abc_len(sin->sin_addr.s_addr) < 0) {
@@ -605,6 +663,8 @@ rarok:
return 0;
}
+#endif
+
static int
inet_gifconf(struct device *dev, char *buf, int len)
{
@@ -657,8 +717,8 @@ u32 inet_select_addr(struct device *dev, u32 dst, int scope)
if (!addr)
addr = ifa->ifa_local;
} endfor_ifa(in_dev);
-
- if (addr)
+
+ if (addr || scope >= RT_SCOPE_LINK)
return addr;
/* Not loopback addresses on loopback should be preferred
@@ -670,9 +730,7 @@ u32 inet_select_addr(struct device *dev, u32 dst, int scope)
continue;
for_primary_ifa(in_dev) {
- if (!IN_DEV_HIDDEN(in_dev) &&
- ifa->ifa_scope <= scope &&
- ifa->ifa_scope != RT_SCOPE_LINK)
+ if (ifa->ifa_scope <= scope)
return ifa->ifa_local;
} endfor_ifa(in_dev);
}
@@ -693,7 +751,7 @@ int unregister_inetaddr_notifier(struct notifier_block *nb)
{
return notifier_chain_unregister(&inetaddr_chain,nb);
}
-
+
static int inetdev_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct device *dev = ptr;
@@ -727,7 +785,7 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, void
case NETDEV_DOWN:
ip_mc_down(in_dev);
break;
- case NETDEV_CHANGEMTU:
+ case NETDEV_CHANGEMTU:
if (dev->mtu >= 68)
break;
/* MTU falled under minimal IP mtu. Disable IP. */
@@ -925,7 +983,7 @@ int devinet_sysctl_forward(ctl_table *ctl, int write, struct file * filp,
static struct devinet_sysctl_table
{
struct ctl_table_header *sysctl_header;
- ctl_table devinet_vars[13];
+ ctl_table devinet_vars[12];
ctl_table devinet_dev[2];
ctl_table devinet_conf_dir[2];
ctl_table devinet_proto_dir[2];
@@ -965,9 +1023,6 @@ static struct devinet_sysctl_table
{NET_IPV4_CONF_LOG_MARTIANS, "log_martians",
&ipv4_devconf.log_martians, sizeof(int), 0644, NULL,
&proc_dointvec},
- {NET_IPV4_CONF_HIDDEN, "hidden",
- &ipv4_devconf.hidden, sizeof(int), 0644, NULL,
- &proc_dointvec},
{0}},
{{NET_PROTO_CONF_ALL, "all", NULL, 0, 0555, devinet_sysctl.devinet_vars},{0}},
diff --git a/pfinet/linux-src/net/ipv4/tcp.c b/pfinet/linux-src/net/ipv4/tcp.c
index 68b7e5e7..89e1bbbf 100644
--- a/pfinet/linux-src/net/ipv4/tcp.c
+++ b/pfinet/linux-src/net/ipv4/tcp.c
@@ -5,7 +5,7 @@
*
* Implementation of the Transmission Control Protocol(TCP).
*
- * Version: $Id: tcp.c,v 1.140.2.5 1999/09/23 19:21:16 davem Exp $
+ * Version: $Id: tcp.c,v 1.140.2.4 1999/08/09 03:13:12 davem Exp $
*
* Authors: Ross Biro, <bir7@leland.Stanford.Edu>
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
@@ -202,7 +202,7 @@
* Eric Schenk : Fix fast close down bug with
* shutdown() followed by close().
* Andi Kleen : Make poll agree with SIGIO
- *
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
@@ -249,8 +249,8 @@
* for violations and the like. tcp.c is just too big... If I say something
* "does?" or "doesn't?", it means I'm not sure, and will have to hash it out
* with Alan. -- MS 950903
- * [Note: Most of the TCP code has been rewriten/redesigned since this
- * RFC1122 check. It is probably not correct anymore. It should be redone
+ * [Note: Most of the TCP code has been rewriten/redesigned since this
+ * RFC1122 check. It is probably not correct anymore. It should be redone
* before 2.2. -AK]
*
* Use of PSH (4.2.2.2)
@@ -384,14 +384,14 @@
*
* ICMP messages (4.2.3.9)
* MUST act on ICMP errors. (does)
- * MUST slow transmission upon receipt of a Source Quench. (doesn't anymore
+ * MUST slow transmission upon receipt of a Source Quench. (doesn't anymore
* because that is deprecated now by the IETF, can be turned on)
* MUST NOT abort connection upon receipt of soft Destination
* Unreachables (0, 1, 5), Time Exceededs and Parameter
* Problems. (doesn't)
* SHOULD report soft Destination Unreachables etc. to the
* application. (does, except during SYN_RECV and may drop messages
- * in some rare cases before accept() - ICMP is unreliable)
+ * in some rare cases before accept() - ICMP is unreliable)
* SHOULD abort connection upon receipt of hard Destination Unreachable
* messages (2, 3, 4). (does, but see above)
*
@@ -435,20 +435,20 @@ kmem_cache_t *tcp_timewait_cachep;
* the socket locked or with interrupts disabled
*/
-static struct open_request *tcp_find_established(struct tcp_opt *tp,
+static struct open_request *tcp_find_established(struct tcp_opt *tp,
struct open_request **prevp)
{
struct open_request *req = tp->syn_wait_queue;
- struct open_request *prev = (struct open_request *)&tp->syn_wait_queue;
+ struct open_request *prev = (struct open_request *)&tp->syn_wait_queue;
while(req) {
- if (req->sk &&
+ if (req->sk &&
((1 << req->sk->state) &
~(TCPF_SYN_SENT|TCPF_SYN_RECV)))
break;
- prev = req;
+ prev = req;
req = req->dl_next;
}
- *prevp = prev;
+ *prevp = prev;
return req;
}
@@ -539,7 +539,7 @@ static unsigned int tcp_listen_poll(struct sock *sk, poll_table *wait)
}
/*
- * Compute minimal free write space needed to queue new packets.
+ * Compute minimal free write space needed to queue new packets.
*/
#define tcp_min_write_space(__sk) \
(atomic_read(&(__sk)->wmem_alloc) / 2)
@@ -605,12 +605,12 @@ unsigned int tcp_poll(struct file * file, struct socket *sock, poll_table *wait)
/*
* Socket write_space callback.
- * This (or rather the sock_wake_async) should agree with poll.
+ * This (or rather the sock_wake_async) should agree with poll.
*/
void tcp_write_space(struct sock *sk)
{
if (sk->dead)
- return;
+ return;
wake_up_interruptible(sk->sleep);
if (sock_wspace(sk) >=
@@ -619,6 +619,23 @@ void tcp_write_space(struct sock *sk)
}
+#ifdef _HURD_
+
+#define tcp_ioctl 0
+
+error_t
+tcp_tiocinq(struct sock *sk, mach_msg_type_number_t *amount)
+{
+ if (sk->state == TCP_LISTEN)
+ return EINVAL;
+ lock_sock(sk);
+ *amount = tcp_readable(sk);
+ release_sock(sk);
+ return 0;
+}
+
+#else
+
int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
int answ;
@@ -652,6 +669,8 @@ int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
return put_user(answ, (int *)arg);
}
+#endif
+
/*
* Wait for a socket to get into the connected state
*
@@ -727,20 +746,20 @@ static void wait_for_tcp_memory(struct sock * sk)
/*
* Wait for a buffer.
- */
-static int wait_for_buffer(struct sock *sk)
-{
- struct wait_queue wait = { current, NULL };
-
- release_sock(sk);
- add_wait_queue(sk->sleep, &wait);
- current->state = TASK_INTERRUPTIBLE;
- schedule();
- current->state = TASK_RUNNING;
+ */
+static int wait_for_buffer(struct sock *sk)
+{
+ struct wait_queue wait = { current, NULL };
+
+ release_sock(sk);
+ add_wait_queue(sk->sleep, &wait);
+ current->state = TASK_INTERRUPTIBLE;
+ schedule();
+ current->state = TASK_RUNNING;
remove_wait_queue(sk->sleep, &wait);
- lock_sock(sk);
- return 0;
-}
+ lock_sock(sk);
+ return 0;
+}
/* When all user supplied data has been queued set the PSH bit */
#define PSH_NEEDED (seglen == 0 && iovlen == 0)
@@ -781,7 +800,7 @@ int tcp_do_sendmsg(struct sock *sk, struct msghdr *msg)
iovlen = msg->msg_iovlen;
iov = msg->msg_iov;
copied = 0;
-
+
while(--iovlen >= 0) {
int seglen=iov->iov_len;
unsigned char * from=iov->iov_base;
@@ -801,7 +820,7 @@ int tcp_do_sendmsg(struct sock *sk, struct msghdr *msg)
/* Make sure that we are established. */
if (sk->shutdown & SEND_SHUTDOWN)
goto do_shutdown;
-
+
/* Now we need to check if we have a half
* built packet we can tack some data onto.
*/
@@ -809,7 +828,7 @@ int tcp_do_sendmsg(struct sock *sk, struct msghdr *msg)
skb = sk->write_queue.prev;
copy = skb->len;
/* If the remote does SWS avoidance we should
- * queue the best we can if not we should in
+ * queue the best we can if not we should in
* fact send multiple packets...
* A method for detecting this would be most
* welcome.
@@ -819,21 +838,21 @@ int tcp_do_sendmsg(struct sock *sk, struct msghdr *msg)
tp->snd_nxt < TCP_SKB_CB(skb)->end_seq) {
int last_byte_was_odd = (copy % 4);
- /*
+ /*
* Check for parallel writers sleeping in user access.
- */
- if (tp->partial_writers++ > 0) {
+ */
+ if (tp->partial_writers++ > 0) {
wait_for_buffer(sk);
tp->partial_writers--;
- continue;
+ continue;
}
-
+
copy = mss_now - copy;
if(copy > skb_tailroom(skb))
copy = skb_tailroom(skb);
if(copy > seglen)
copy = seglen;
-
+
if(last_byte_was_odd) {
if(copy_from_user(skb_put(skb, copy),
from, copy))
@@ -846,7 +865,7 @@ int tcp_do_sendmsg(struct sock *sk, struct msghdr *msg)
from, skb_put(skb, copy),
copy, skb->csum, &err);
}
-
+
/*
* FIXME: the *_user functions should
* return how much data was
@@ -867,8 +886,8 @@ int tcp_do_sendmsg(struct sock *sk, struct msghdr *msg)
if (PSH_NEEDED)
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
- if (--tp->partial_writers > 0)
- wake_up_interruptible(sk->sleep);
+ if (--tp->partial_writers > 0)
+ wake_up_interruptible(sk->sleep);
continue;
}
@@ -1012,7 +1031,7 @@ out:
* this with tcp_send_ack().
* This is called for delayed acks also.
*/
-
+
void tcp_read_wakeup(struct sock *sk)
{
/* If we're closed, don't send an ack, or we'll get a RST
@@ -1028,7 +1047,7 @@ void tcp_read_wakeup(struct sock *sk)
*/
static int tcp_recv_urg(struct sock * sk, int nonblock,
- struct msghdr *msg, int len, int flags,
+ struct msghdr *msg, int len, int flags,
int *addr_len)
{
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
@@ -1050,15 +1069,15 @@ static int tcp_recv_urg(struct sock * sk, int nonblock,
lock_sock(sk);
if (tp->urg_data & URG_VALID) {
- int err = 0;
+ int err = 0;
char c = tp->urg_data;
if (!(flags & MSG_PEEK))
tp->urg_data = URG_READ;
-
+
if(msg->msg_name)
tp->af_specific->addr2sockaddr(sk, (struct sockaddr *)
- msg->msg_name);
+ msg->msg_name);
if(addr_len)
*addr_len = tp->af_specific->sockaddr_len;
@@ -1075,8 +1094,8 @@ static int tcp_recv_urg(struct sock * sk, int nonblock,
}
else
msg->msg_flags|=MSG_TRUNC;
-
- /* N.B. Is this right?? If len == 0 we didn't read any data */
+
+ /* N.B. Is this right?? If len == 0 we didn't read any data */
return err ? -EFAULT : 1;
}
release_sock(sk);
@@ -1111,7 +1130,7 @@ static inline void tcp_eat_skb(struct sock *sk, struct sk_buff * skb)
static void cleanup_rbuf(struct sock *sk, int copied)
{
struct sk_buff *skb;
-
+
/* NOTE! The socket must be locked, so that we don't get
* a messed-up receive queue.
*/
@@ -1143,9 +1162,9 @@ static void cleanup_rbuf(struct sock *sk, int copied)
/*
- * This routine copies from a sock struct into the user buffer.
+ * This routine copies from a sock struct into the user buffer.
*/
-
+
int tcp_recvmsg(struct sock *sk, struct msghdr *msg,
int len, int nonblock, int flags, int *addr_len)
{
@@ -1155,7 +1174,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg,
u32 peek_seq;
volatile u32 *seq; /* So gcc doesn't overoptimise */
unsigned long used;
- int err = 0;
+ int err = 0;
int target = 1; /* Read at least this many bytes */
if (sk->err)
@@ -1176,20 +1195,20 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg,
seq = &tp->copied_seq;
if (flags & MSG_PEEK)
seq = &peek_seq;
-
+
/* Handle the POSIX bogosity MSG_WAITALL. */
if (flags & MSG_WAITALL)
target=len;
add_wait_queue(sk->sleep, &wait);
lock_sock(sk);
-
+
/*
* BUG BUG BUG
- * This violates 1003.1g compliance. We must wait for
+ * This violates 1003.1g compliance. We must wait for
* data to exist even if we read none!
*/
-
+
while (len > 0) {
struct sk_buff * skb;
u32 offset;
@@ -1219,7 +1238,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg,
if (!skb)
break;
- /* Now that we have two receive queues this
+ /* Now that we have two receive queues this
* shouldn't happen.
*/
if (before(*seq, TCP_SKB_CB(skb)->seq)) {
@@ -1366,7 +1385,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg,
if(copied >= 0 && msg->msg_name) {
tp->af_specific->addr2sockaddr(sk, (struct sockaddr *)
- msg->msg_name);
+ msg->msg_name);
if(addr_len)
*addr_len = tp->af_specific->sockaddr_len;
}
@@ -1483,13 +1502,13 @@ static void tcp_close_pending (struct sock *sk)
while(req) {
struct open_request *iter;
-
+
if (req->sk)
tcp_close(req->sk, 0);
iter = req;
req = req->dl_next;
-
+
(*iter->class->destructor)(iter);
tcp_dec_slow_timer(TCP_SLT_SYNACK);
sk->ack_backlog--;
@@ -1583,7 +1602,7 @@ void tcp_close(struct sock *sk, long timeout)
tsk->state = TASK_RUNNING;
remove_wait_queue(sk->sleep, &wait);
-
+
lock_sock(sk);
}
@@ -1613,7 +1632,7 @@ static struct open_request * wait_for_connect(struct sock * sk,
schedule();
lock_sock(sk);
req = tcp_find_established(&(sk->tp_pinfo.af_tcp), pprev);
- if (req)
+ if (req)
break;
if (signal_pending(current))
break;
@@ -1636,7 +1655,7 @@ struct sock *tcp_accept(struct sock *sk, int flags)
struct sock *newsk = NULL;
int error;
- lock_sock(sk);
+ lock_sock(sk);
/* We need to make sure that this socket is listening,
* and that it has something pending.
@@ -1652,10 +1671,10 @@ struct sock *tcp_accept(struct sock *sk, int flags)
error = EAGAIN;
if (flags & O_NONBLOCK)
goto out;
-
+
error = ERESTARTSYS;
req = wait_for_connect(sk, &prev);
- if (!req)
+ if (!req)
goto out;
}
@@ -1663,7 +1682,7 @@ struct sock *tcp_accept(struct sock *sk, int flags)
newsk = req->sk;
req->class->destructor(req);
tcp_openreq_free(req);
- sk->ack_backlog--;
+ sk->ack_backlog--;
if(sk->keepopen)
tcp_inc_slow_timer(TCP_SLT_KEEPALIVE);
@@ -1673,26 +1692,26 @@ struct sock *tcp_accept(struct sock *sk, int flags)
out:
/* sk should be in LISTEN state, thus accept can use sk->err for
* internal purposes without stomping one anyone's feed.
- */
- sk->err = error;
+ */
+ sk->err = error;
release_sock(sk);
return newsk;
}
/*
- * Socket option code for TCP.
+ * Socket option code for TCP.
*/
-
-int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval,
+
+int tcp_setsockopt(struct sock *sk, int level, int optname, char *optval,
int optlen)
{
struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
int val;
if (level != SOL_TCP)
- return tp->af_specific->setsockopt(sk, level, optname,
+ return tp->af_specific->setsockopt(sk, level, optname,
optval, optlen);
-
+
if(optlen<sizeof(int))
return -EINVAL;
@@ -1798,8 +1817,6 @@ extern void __skb_cb_too_small_for_tcp(int, int);
void __init tcp_init(void)
{
struct sk_buff *skb = NULL;
- unsigned long goal;
- int order;
if(sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
__skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
@@ -1825,43 +1842,4 @@ void __init tcp_init(void)
NULL, NULL);
if(!tcp_timewait_cachep)
panic("tcp_init: Cannot alloc tcp_tw_bucket cache.");
-
- /* Size and allocate TCP hash tables. */
- goal = num_physpages >> (20 - PAGE_SHIFT);
- for (order = 0; (1UL << order) < goal; order++)
- ;
- do {
- tcp_ehash_size = (1UL << order) * PAGE_SIZE /
- sizeof(struct sock *);
- tcp_ehash = (struct sock **)
- __get_free_pages(GFP_ATOMIC, order);
- } while (tcp_ehash == NULL && --order >= 0);
-
- if (!tcp_ehash)
- panic("Failed to allocate TCP established hash table\n");
- memset(tcp_ehash, 0, tcp_ehash_size * sizeof(struct sock *));
-
- goal = (((1UL << order) * PAGE_SIZE) / sizeof(struct tcp_bind_bucket *));
- if (goal > (64 * 1024)) {
- /* Don't size the bind-hash larger than the port
- * space, that is just silly.
- */
- goal = (((64 * 1024) * sizeof(struct tcp_bind_bucket *)) / PAGE_SIZE);
- for (order = 0; (1UL << order) < goal; order++)
- ;
- }
-
- do {
- tcp_bhash_size = (1UL << order) * PAGE_SIZE /
- sizeof(struct tcp_bind_bucket *);
- tcp_bhash = (struct tcp_bind_bucket **)
- __get_free_pages(GFP_ATOMIC, order);
- } while (tcp_bhash == NULL && --order >= 0);
-
- if (!tcp_bhash)
- panic("Failed to allocate TCP bind hash table\n");
- memset(tcp_bhash, 0, tcp_bhash_size * sizeof(struct tcp_bind_bucket *));
-
- printk("TCP: Hash tables configured (ehash %d bhash %d)\n",
- tcp_ehash_size, tcp_bhash_size);
}
diff --git a/pfinet/linux-src/net/ipv4/udp.c b/pfinet/linux-src/net/ipv4/udp.c
index 909e858f..87ff84ce 100644
--- a/pfinet/linux-src/net/ipv4/udp.c
+++ b/pfinet/linux-src/net/ipv4/udp.c
@@ -19,8 +19,8 @@
* for udp at least is 'valid'.
* Alan Cox : Fixed icmp handling properly
* Alan Cox : Correct error for oversized datagrams
- * Alan Cox : Tidied select() semantics.
- * Alan Cox : udp_err() fixed properly, also now
+ * Alan Cox : Tidied select() semantics.
+ * Alan Cox : udp_err() fixed properly, also now
* select and read wake correctly on errors
* Alan Cox : udp_send verify_area moved to avoid mem leak
* Alan Cox : UDP can count its memory
@@ -55,7 +55,7 @@
* does have a high hit rate.
* Olaf Kirch : Don't linearise iovec on sendmsg.
* Andi Kleen : Some cleanups, cache destination entry
- * for connect.
+ * for connect.
* Vitaly E. Lavrov : Transparent proxy revived after year coma.
* Melvin Smith : Check msg_name not msg_namelen in sendto(),
* return ENOTCONN for unconnected sockets (POSIX)
@@ -68,10 +68,10 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
-
+
/* RFC1122 Status:
4.1.3.1 (Ports):
- SHOULD send ICMP_PORT_UNREACHABLE in response to datagrams to
+ SHOULD send ICMP_PORT_UNREACHABLE in response to datagrams to
an un-listened port. (OK)
4.1.3.2 (IP Options)
MUST pass IP options from IP -> application (OK)
@@ -399,7 +399,7 @@ static inline struct sock *udp_v4_mcast_next(struct sock *sk,
* This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should
* be closed and the error returned to the user. If err > 0
- * it's just the icmp type << 8 | icmp code.
+ * it's just the icmp type << 8 | icmp code.
* Header points to the ip header of the error packet. We move
* on past this. Then (as it used to claim before adjustment)
* header points to the first 8 bytes of the udp header. We need
@@ -463,16 +463,16 @@ void udp_err(struct sk_buff *skb, unsigned char *dp, int len)
}
/*
- * Various people wanted BSD UDP semantics. Well they've come
+ * Various people wanted BSD UDP semantics. Well they've come
* back out because they slow down response to stuff like dead
* or unreachable name servers and they screw term users something
- * chronic. Oh and it violates RFC1122. So basically fix your
+ * chronic. Oh and it violates RFC1122. So basically fix your
* client code people.
*/
-
+
/*
- * RFC1122: OK. Passes ICMP errors back to application, as per
- * 4.1.3.3. After the comment above, that should be no surprise.
+ * RFC1122: OK. Passes ICMP errors back to application, as per
+ * 4.1.3.3. After the comment above, that should be no surprise.
*/
if (!harderr && !sk->ip_recverr)
@@ -497,7 +497,7 @@ static unsigned short udp_check(struct udphdr *uh, int len, unsigned long saddr,
return(csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base));
}
-struct udpfakehdr
+struct udpfakehdr
{
struct udphdr uh;
u32 saddr;
@@ -512,8 +512,8 @@ struct udpfakehdr
* card and provide an additional callback mode for direct user->board I/O
* transfers. That one will be fun.
*/
-
-static int udp_getfrag(const void *p, char * to, unsigned int offset, unsigned int fraglen)
+
+static int udp_getfrag(const void *p, char * to, unsigned int offset, unsigned int fraglen)
{
struct udpfakehdr *ufh = (struct udpfakehdr *)p;
if (offset==0) {
@@ -522,7 +522,7 @@ static int udp_getfrag(const void *p, char * to, unsigned int offset, unsigned i
return -EFAULT;
ufh->wcheck = csum_partial((char *)ufh, sizeof(struct udphdr),
ufh->wcheck);
- ufh->uh.check = csum_tcpudp_magic(ufh->saddr, ufh->daddr,
+ ufh->uh.check = csum_tcpudp_magic(ufh->saddr, ufh->daddr,
ntohs(ufh->uh.len),
IPPROTO_UDP, ufh->wcheck);
if (ufh->uh.check == 0)
@@ -542,8 +542,8 @@ static int udp_getfrag(const void *p, char * to, unsigned int offset, unsigned i
* CONFIG_FAST_NET set for >10Mb/second boards to activate this sort of coding.
* Timing needed to verify if this is a valid decision.
*/
-
-static int udp_getfrag_nosum(const void *p, char * to, unsigned int offset, unsigned int fraglen)
+
+static int udp_getfrag_nosum(const void *p, char * to, unsigned int offset, unsigned int fraglen)
{
struct udpfakehdr *ufh = (struct udpfakehdr *)p;
@@ -582,7 +582,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, int len)
if (len < 0 || len > 0xFFFF)
return -EMSGSIZE;
- /*
+ /*
* Check the flags.
*/
@@ -600,9 +600,9 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, int len)
#endif
/*
- * Get and verify the address.
+ * Get and verify the address.
*/
-
+
if (msg->msg_name) {
struct sockaddr_in * usin = (struct sockaddr_in*)msg->msg_name;
if (msg->msg_namelen < sizeof(*usin))
@@ -673,7 +673,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, int len)
connected = 0;
}
tos = RT_TOS(sk->ip_tos);
- if (sk->localroute || (msg->msg_flags&MSG_DONTROUTE) ||
+ if (sk->localroute || (msg->msg_flags&MSG_DONTROUTE) ||
(ipc.opt && ipc.opt->is_strictroute)) {
tos |= RTO_ONLINK;
connected = 0;
@@ -703,11 +703,11 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, int len)
(msg->msg_flags&MSG_PROXY ? RTO_TPROXY : 0) |
#endif
tos, ipc.oif);
- if (err)
+ if (err)
goto out;
err = -EACCES;
- if (rt->rt_flags&RTCF_BROADCAST && !sk->broadcast)
+ if (rt->rt_flags&RTCF_BROADCAST && !sk->broadcast)
goto out;
if (connected && sk->dst_cache == NULL)
sk->dst_cache = dst_clone(&rt->u.dst);
@@ -739,13 +739,19 @@ out:
return err;
}
+#ifdef _HURD_
+
+#define udp_ioctl 0
+
+#else
+
/*
* IOCTL requests applicable to the UDP protocol
*/
-
+
int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
- switch(cmd)
+ switch(cmd)
{
case TIOCOUTQ:
{
@@ -782,6 +788,8 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
return(0);
}
+#endif
+
#ifndef HAVE_CSUM_COPY_USER
#undef CONFIG_UDP_DELAY_CSUM
#endif
@@ -809,7 +817,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, int len,
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
-
+
copied = skb->len - sizeof(struct udphdr);
if (copied > len) {
copied = len;
@@ -824,7 +832,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, int len,
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
copied);
} else if (copied > msg->msg_iov[0].iov_len || (msg->msg_flags&MSG_TRUNC)) {
- if ((unsigned short)csum_fold(csum_partial(skb->h.raw, skb->len, skb->csum)))
+ if ((unsigned short)csum_fold(csum_partial(skb->h.raw, skb->len, skb->csum)))
goto csum_copy_err;
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
copied);
@@ -833,11 +841,11 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, int len,
err = 0;
csum = csum_partial(skb->h.raw, sizeof(struct udphdr), skb->csum);
- csum = csum_and_copy_to_user((char*)&skb->h.uh[1], msg->msg_iov[0].iov_base,
+ csum = csum_and_copy_to_user((char*)&skb->h.uh[1], msg->msg_iov[0].iov_base,
copied, csum, &err);
if (err)
goto out_free;
- if ((unsigned short)csum_fold(csum))
+ if ((unsigned short)csum_fold(csum))
goto csum_copy_err;
}
#endif
@@ -851,7 +859,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, int len,
/*
* Check any passed addresses
*/
- if (addr_len)
+ if (addr_len)
*addr_len=sizeof(*sin);
sin->sin_family = AF_INET;
@@ -878,7 +886,7 @@ int udp_recvmsg(struct sock *sk, struct msghdr *msg, int len,
if (sk->ip_cmsg_flags)
ip_cmsg_recv(msg, skb);
err = copied;
-
+
out_free:
skb_free_datagram(sk, skb);
out:
@@ -889,11 +897,11 @@ csum_copy_err:
udp_statistics.UdpInErrors++;
skb_free_datagram(sk, skb);
- /*
+ /*
* Error for blocking case is chosen to masquerade
* as some normal condition.
*/
- return (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
+ return (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
#endif
}
@@ -903,14 +911,14 @@ int udp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
struct rtable *rt;
int err;
-
- if (addr_len < sizeof(*usin))
+
+ if (addr_len < sizeof(*usin))
return(-EINVAL);
/*
* 1003.1g - break association.
*/
-
+
if (usin->sin_family==AF_UNSPEC)
{
sk->saddr=INADDR_ANY;
@@ -922,7 +930,7 @@ int udp_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
return 0;
}
- if (usin->sin_family && usin->sin_family != AF_INET)
+ if (usin->sin_family && usin->sin_family != AF_INET)
return(-EAFNOSUPPORT);
dst_release(xchg(&sk->dst_cache, NULL));
@@ -1056,9 +1064,9 @@ int udp_chkaddr(struct sk_buff *skb)
#endif
/*
- * All we need to do is get the socket, and then do a checksum.
+ * All we need to do is get the socket, and then do a checksum.
*/
-
+
int udp_rcv(struct sk_buff *skb, unsigned short len)
{
struct sock *sk;
@@ -1076,7 +1084,7 @@ int udp_rcv(struct sk_buff *skb, unsigned short len)
/*
* Get the header.
*/
-
+
uh = skb->h.uh;
__skb_pull(skb, skb->h.raw - skb->data);
@@ -1085,7 +1093,7 @@ int udp_rcv(struct sk_buff *skb, unsigned short len)
/*
* Validate the packet and the UDP length.
*/
-
+
ulen = ntohs(uh->len);
if (ulen > len || ulen < sizeof(*uh)) {
@@ -1100,13 +1108,13 @@ int udp_rcv(struct sk_buff *skb, unsigned short len)
if (uh->check &&
(((skb->ip_summed==CHECKSUM_HW)&&udp_check(uh,ulen,saddr,daddr,skb->csum)) ||
((skb->ip_summed==CHECKSUM_NONE) &&
- (udp_check(uh,ulen,saddr,daddr, csum_partial((char*)uh, ulen, 0))))))
+ (udp_check(uh,ulen,saddr,daddr, csum_partial((char*)uh, ulen, 0))))))
goto csum_error;
#else
if (uh->check==0)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else if (skb->ip_summed==CHECKSUM_HW) {
- if (udp_check(uh,ulen,saddr,daddr,skb->csum))
+ if (udp_check(uh,ulen,saddr,daddr,skb->csum))
goto csum_error;
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if (skb->ip_summed != CHECKSUM_UNNECESSARY)
@@ -1124,11 +1132,11 @@ int udp_rcv(struct sk_buff *skb, unsigned short len)
else
#endif
sk = udp_v4_lookup(saddr, uh->source, daddr, uh->dest, skb->dev->ifindex);
-
+
if (sk == NULL) {
#ifdef CONFIG_UDP_DELAY_CSUM
if (skb->ip_summed != CHECKSUM_UNNECESSARY &&
- (unsigned short)csum_fold(csum_partial((char*)uh, ulen, skb->csum)))
+ (unsigned short)csum_fold(csum_partial((char*)uh, ulen, skb->csum)))
goto csum_error;
#endif
udp_statistics.UdpNoPorts++;
@@ -1145,9 +1153,9 @@ int udp_rcv(struct sk_buff *skb, unsigned short len)
return 0;
csum_error:
- /*
- * RFC1122: OK. Discards the bad packet silently (as far as
- * the network is concerned, anyway) as per 4.1.3.4 (MUST).
+ /*
+ * RFC1122: OK. Discards the bad packet silently (as far as
+ * the network is concerned, anyway) as per 4.1.3.4 (MUST).
*/
NETDEBUG(printk(KERN_DEBUG "UDP: bad checksum. From %d.%d.%d.%d:%d to %d.%d.%d.%d:%d ulen %d\n",
NIPQUAD(saddr),