summaryrefslogtreecommitdiff
path: root/ipc
diff options
context:
space:
mode:
authorThomas Bushnell <thomas@gnu.org>1997-02-25 21:28:37 +0000
committerThomas Bushnell <thomas@gnu.org>1997-02-25 21:28:37 +0000
commitf07a4c844da9f0ecae5bbee1ab94be56505f26f7 (patch)
tree12b07c7e578fc1a5f53dbfde2632408491ff2a70 /ipc
Initial source
Diffstat (limited to 'ipc')
-rw-r--r--ipc/fipc.c795
-rw-r--r--ipc/fipc.h95
-rw-r--r--ipc/ipc_entry.c858
-rw-r--r--ipc/ipc_entry.h158
-rw-r--r--ipc/ipc_hash.c626
-rw-r--r--ipc/ipc_hash.h94
-rw-r--r--ipc/ipc_init.c139
-rw-r--r--ipc/ipc_init.h58
-rw-r--r--ipc/ipc_kmsg.c3484
-rw-r--r--ipc/ipc_kmsg.h291
-rw-r--r--ipc/ipc_kmsg_queue.h31
-rwxr-xr-xipc/ipc_machdep.h40
-rw-r--r--ipc/ipc_marequest.c485
-rw-r--r--ipc/ipc_marequest.h98
-rw-r--r--ipc/ipc_mqueue.c754
-rw-r--r--ipc/ipc_mqueue.h108
-rw-r--r--ipc/ipc_notify.c593
-rw-r--r--ipc/ipc_notify.h72
-rw-r--r--ipc/ipc_object.c1346
-rw-r--r--ipc/ipc_object.h192
-rw-r--r--ipc/ipc_port.c1545
-rw-r--r--ipc/ipc_port.h407
-rw-r--r--ipc/ipc_pset.c349
-rw-r--r--ipc/ipc_pset.h95
-rw-r--r--ipc/ipc_right.c2762
-rw-r--r--ipc/ipc_right.h124
-rw-r--r--ipc/ipc_space.c317
-rw-r--r--ipc/ipc_space.h164
-rw-r--r--ipc/ipc_splay.c920
-rw-r--r--ipc/ipc_splay.h114
-rw-r--r--ipc/ipc_table.c205
-rw-r--r--ipc/ipc_table.h138
-rw-r--r--ipc/ipc_target.c78
-rw-r--r--ipc/ipc_target.h68
-rw-r--r--ipc/ipc_thread.c107
-rw-r--r--ipc/ipc_thread.h123
-rw-r--r--ipc/ipc_types.h31
-rw-r--r--ipc/mach_debug.c618
-rw-r--r--ipc/mach_msg.c2279
-rw-r--r--ipc/mach_msg.h68
-rw-r--r--ipc/mach_port.c2505
-rw-r--r--ipc/mach_port.srv27
-rw-r--r--ipc/mach_rpc.c148
-rw-r--r--ipc/port.h90
44 files changed, 23599 insertions, 0 deletions
diff --git a/ipc/fipc.c b/ipc/fipc.c
new file mode 100644
index 0000000..ebab640
--- /dev/null
+++ b/ipc/fipc.c
@@ -0,0 +1,795 @@
+/*
+ * Copyright (c) 1996-1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Utah $Hdr: fipc.c 1.1 96/2/29$
+ * Author: Linus Kamb
+ */
+
+#ifdef FIPC
+
+#include <mach/kern_return.h>
+
+#include <device/device_types.h>
+#include <device/device.h>
+#include <device/dev_hdr.h>
+#include <device/device_port.h>
+#include <device/io_req.h>
+#include <device/if_ether.h>
+#include <net_io.h>
+#include <spl.h>
+#include <kern/lock.h>
+
+#include "fipc.h"
+
+void fipc_packet();
+void allocate_fipc_buffers(boolean_t);
+int fipc_lookup(unsigned short port);
+int fipc_lookup_table_enter(unsigned short port);
+int fipc_lookup_table_remove(unsigned short port);
+int f_lookup_hash(unsigned short port);
+int fipc_done(io_req_t ior);
+
+
+/********************************************************************/
+/* fipc variables
+/********************************************************************/
+
+fipc_port_t fports[N_MAX_OPEN_FIPC_PORTS];
+fipc_lookup_table_ent fipc_lookup_table[N_MAX_OPEN_FIPC_PORTS];
+
+int n_free_recv_bufs = 0;
+int n_free_send_bufs = 0;
+int n_fipc_recv_ports_used = 0;
+
+int fipc_sends = 0;
+int fipc_recvs =0;
+
+fipc_stat_t fipc_stats;
+
+char *fipc_recv_free_list = NULL;
+char *fipc_recv_free_list_tail = NULL;
+char *fipc_send_free_list = NULL;
+char *fipc_send_free_list_tail = NULL;
+
+/* fipc locks */
+decl_simple_lock_data(, fipc_lock);
+decl_simple_lock_data(, fipc_buf_q_lock);
+
+
+/*
+ * Routine: fipc_init(): initializes the fipc data structures.
+ */
+
+void fipc_init(void)
+{
+ int i;
+
+ allocate_fipc_buffers(TRUE); /* recv buffers */
+ allocate_fipc_buffers(FALSE); /* send buffers */
+
+ fipc_stats.dropped_msgs = 0;
+
+ bzero (&fports, sizeof(fports));
+ for (i=0; i<N_MAX_OPEN_FIPC_PORTS; i++)
+ {
+ simple_lock_init(&(fports[i].lock));
+ fipc_lookup_table[i].fpt_num = INVALID;
+ fipc_lookup_table[i].fipc_port = INVALID;
+ }
+}
+
+
+/*
+ * Routine: allocate_fipc_buffers(): allocate more buffers
+ * Currently we are only allocating 1500 byte (ETHERMTU) buffers.
+ * We use the first word in the buffer as the pointer to the next.
+ */
+
+void allocate_fipc_buffers(boolean_t r_buf)
+{
+ char *new_pg;
+ char **free_list, **free_list_tail;
+ int *free_count, min_count, max_count;
+ int total_buffer_size;
+
+ if (r_buf)
+ {
+ free_count = &n_free_recv_bufs;
+ min_count = N_MIN_RECV_BUFS;
+ max_count = N_MAX_RECV_BUFS;
+ free_list = &fipc_recv_free_list;
+ free_list_tail = &fipc_recv_free_list_tail;
+ total_buffer_size = (N_MAX_RECV_BUFS * FIPC_BUFFER_SIZE);
+ total_buffer_size = round_page(total_buffer_size);
+ }
+ else
+ {
+ free_count = &n_free_send_bufs;
+ min_count = N_MIN_SEND_BUFS;
+ max_count = N_MAX_SEND_BUFS;
+ free_list = &fipc_send_free_list;
+ free_list_tail = &fipc_send_free_list_tail;
+ total_buffer_size = (N_MAX_SEND_BUFS * FIPC_BUFFER_SIZE);
+ total_buffer_size = round_page(total_buffer_size);
+ }
+
+ if (!(*free_count)) /* empty buffer pool */
+ {
+#ifdef FI_DEBUG
+ printf ("Allocating new fipc ");
+ if (r_buf)
+ printf ("recv buffer pool.\n");
+ else
+ printf ("send buffer pool.\n");
+#endif
+ *free_list = (char*)kalloc (total_buffer_size);
+ if (!*free_list) /* bummer */
+ panic("allocate_fipc_buffers: no memory");
+ *free_list_tail = *free_list;
+ for (*free_count=1; *free_count<max_count; (*free_count)++)
+ {
+ *(char**)*free_list_tail = *free_list_tail + FIPC_BUFFER_SIZE;
+ *free_list_tail += FIPC_BUFFER_SIZE;
+ }
+ *(char**)*free_list_tail = NULL;
+ }
+ else /* Request to grow the buffer pool. */
+ {
+#ifdef FI_DEBUG
+ printf ("Growing fipc ");
+ if (r_buf)
+ printf ("recv buffer pool.\n");
+ else
+ printf ("send buffer pool.\n");
+#endif
+
+#define GROW_SIZE 8192
+ new_pg = (char*)kalloc (round_page(GROW_SIZE));
+ if (new_pg)
+ {
+ int new_cnt, n_new = GROW_SIZE / FIPC_BUFFER_SIZE;
+
+ if (*free_list_tail != NULL)
+ *(char**)*free_list_tail = new_pg;
+ for ( new_cnt =0; new_cnt<n_new; new_cnt++)
+ {
+ *(char**)*free_list_tail = *free_list_tail + FIPC_BUFFER_SIZE;
+ *free_list_tail += FIPC_BUFFER_SIZE;
+ }
+ *(char**)*free_list_tail = NULL;
+ *free_count +=new_cnt;
+ }
+#ifdef FI_DEBUG
+ else
+ printf ("### kalloc failed in allocate_fipc_buffers()\n");
+#endif
+
+ }
+}
+
+
+/*
+ * Routine: get_fipc_buffer (): returns a free buffer
+ * Takes a size (currently not used), a boolean flag to tell if it is a
+ * receive buffer, and a boolean flag if the request is coming at interrupt
+ * level.
+ */
+
+inline
+char* get_fipc_buffer(int size, boolean_t r_buf, boolean_t at_int_lvl)
+{
+ /* we currently don't care about size, since there is only one
+ * buffer pool. */
+
+ char* head;
+ char **free_list;
+ int *free_count, min_count;
+
+ if (r_buf)
+ {
+ free_count = &n_free_recv_bufs;
+ free_list = &fipc_recv_free_list;
+ min_count = N_MIN_RECV_BUFS;
+ }
+ else
+ {
+ free_count = &n_free_send_bufs;
+ free_list = &fipc_send_free_list;
+ min_count = N_MIN_SEND_BUFS;
+ }
+
+ /*
+ * Since we currently allocate a full complement of receive buffers,
+ * there is no need to allocate more receive buffers. But that is likely
+ * to change, I'm sure.
+ */
+
+ if (*free_count < min_count)
+ {
+ if (!at_int_lvl)
+ allocate_fipc_buffers(r_buf);
+ }
+
+ if (*free_count)
+ {
+ head = *free_list;
+ *free_list = *(char**)*free_list;
+ (*free_count)--;
+ return head;
+ }
+ else
+ return NULL;
+}
+
+
+/*
+ * Routine: return_fipc_buffer (): puts a used buffer back in the pool.
+ */
+
+inline
+void return_fipc_buffer(char* buf, int size,
+ boolean_t r_buf, boolean_t at_int_lvl)
+{
+ /* return the buffer to the free pool */
+ char **free_list, **free_list_tail;
+ int *free_count, min_count;
+
+ if (r_buf)
+ {
+ free_count = &n_free_recv_bufs;
+ free_list = &fipc_recv_free_list;
+ free_list_tail = &fipc_recv_free_list_tail;
+ min_count = N_MIN_RECV_BUFS;
+ }
+ else
+ {
+ free_count = &n_free_send_bufs;
+ free_list = &fipc_send_free_list;
+ free_list_tail = &fipc_send_free_list_tail;
+ min_count = N_MIN_SEND_BUFS;
+ }
+
+#ifdef FI_SECURE
+ bzero(buf, FIPC_BUFFER_SIZE);
+#endif
+
+ if (*free_list_tail != NULL)
+ *(char**)*free_list_tail = buf;
+ *free_list_tail = buf;
+ (*free_count)++;
+ *(char**)buf = NULL;
+
+ if (!at_int_lvl)
+ if (*free_count < min_count)
+ allocate_fipc_buffers(r_buf);
+
+ return;
+}
+
+inline
+int f_lookup_hash(unsigned short port)
+{
+ /* Ok, so it's not really a hash function */
+ int bail=0;
+ int chk=0;
+
+ if (n_fipc_recv_ports_used == N_MAX_OPEN_FIPC_PORTS ||
+ port > MAX_FIPC_PORT_NUM)
+ return INVALID;
+
+ while (fipc_lookup_table[chk].fipc_port != port &&
+ fipc_lookup_table[chk].fpt_num != INVALID &&
+ bail < N_MAX_OPEN_FIPC_PORTS)
+ {
+ chk = (chk+1) % N_MAX_OPEN_FIPC_PORTS;
+ bail++;
+ }
+
+ /* This is redundent, but better safe then sorry */
+ if (bail<N_MAX_OPEN_FIPC_PORTS)
+ return chk;
+ else
+ return INVALID;
+}
+
+inline
+int fipc_lookup_table_enter(unsigned short port)
+{
+ int cfpn = n_fipc_recv_ports_used;
+ int lu_tbl_num = f_lookup_hash(port);
+
+ if (lu_tbl_num == INVALID)
+ return INVALID;
+
+ fipc_lookup_table[lu_tbl_num].fipc_port = port;
+ fipc_lookup_table[lu_tbl_num].fpt_num = cfpn;
+ n_fipc_recv_ports_used += 1;
+ return cfpn;
+}
+
+inline
+int fipc_lookup(unsigned short port)
+{
+ int chk = f_lookup_hash(port);
+
+ if (chk == INVALID)
+ return INVALID;
+
+ if (fipc_lookup_table[chk].fpt_num == INVALID)
+ return fipc_lookup_table_enter(port);
+ else
+ return fipc_lookup_table[chk].fpt_num;
+}
+
+inline
+int fipc_lookup_table_remove(unsigned short port)
+{
+ int chk = f_lookup_hash(port);
+
+ if (chk == INVALID)
+ return 0;
+
+ if (fipc_lookup_table[chk].fipc_port == port)
+ {
+ fports[fipc_lookup_table[chk].fpt_num].valid_msg = 0;
+ fports[fipc_lookup_table[chk].fpt_num].bound = FALSE;
+ fipc_lookup_table[chk].fpt_num = INVALID;
+ fipc_lookup_table[chk].fipc_port = INVALID;
+ n_fipc_recv_ports_used -=1;
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Routine: fipc_packet(): handles incoming fipc packets.
+ * does some simple packet handling and wakes up receiving thread, if any.
+ * called by device controller (currently, nerecv only.)
+ * called at interrupt level and splimp.
+ * Messages are dropped if the recv queue is full.
+ */
+
+ void fipc_packet( char* msg_buf, struct ether_header sender)
+ {
+ int to_port = ((fipc_header_t*)msg_buf)->dest_port;
+ int from_port = ((fipc_header_t*)msg_buf)->send_port;
+ int f_tbl_num;
+ fipc_port_t *cfp;
+ fipc_buffer_q_ent *crqe;
+ int *tail;
+
+#ifdef FI_DEBUG
+ printf ("fipc_packet :(0x%x) %s", msg_buf,
+ msg_buf+sizeof(fipc_header_t));
+#endif
+
+ f_tbl_num = fipc_lookup(to_port);
+ if (f_tbl_num == INVALID)
+ {
+#ifdef FI_DEBUG
+ printf ("Lookup failed.\n");
+#endif
+ fipc_stats.dropped_msgs += 1;
+ return_fipc_buffer (msg_buf, FIPC_BUFFER_SIZE, TRUE, TRUE);
+ return;
+ }
+
+ cfp = &fports[f_tbl_num];
+ tail = &cfp->rq_tail;
+ crqe = &cfp->recv_q[*tail];
+
+ if (cfp->valid_msg == FIPC_RECV_Q_SIZE)
+ {
+ /* Queue full.
+ * Drop packet, return buffer, and return. */
+#ifdef FI_DEBUG
+ printf ("Port %d queue is full: valid_msg count: %d\n",
+ to_port, cfp->valid_msg);
+#endif
+ fipc_stats.dropped_msgs += 1;
+ return_fipc_buffer (msg_buf, FIPC_BUFFER_SIZE, TRUE, TRUE);
+ return;
+ }
+
+ /* "enqueue" at "tail" */
+ crqe->buffer = msg_buf;
+ crqe->size = ((fipc_header_t*)msg_buf)->msg_size;
+ /* This could certainly be done faster... */
+ bcopy(&(sender.ether_shost), &(crqe->sender.hwaddr), ETHER_HWADDR_SIZE);
+ /* This is actually useless, since there _is_ no sender port.. duh. */
+ crqe->sender.port = from_port;
+
+ *tail = ((*tail)+1) % FIPC_RECV_Q_SIZE;
+
+ if (cfp->bound)
+ thread_wakeup(&(cfp->valid_msg));
+ cfp->valid_msg++;
+#ifdef FI_DEBUG
+ printf ("valid_msg: %d\n", cfp->valid_msg);
+#endif
+
+ return;
+ }
+
+
+/*
+ * loopback(): for fipc_sends to the local host.
+ */
+
+inline
+kern_return_t loopback(char *packet)
+{
+ fipc_packet(packet+sizeof(struct ether_header),
+ *(struct ether_header*)packet);
+ return KERN_SUCCESS;
+}
+
+
+/********************************************************************/
+/* Routine: fipc_send
+/********************************************************************/
+
+kern_return_t syscall_fipc_send(fipc_endpoint_t dest,
+ char *user_buffer, int len)
+{
+#ifdef i386
+ static mach_device_t eth_device = 0;
+#else
+ static device_t eth_device = 0;
+#endif
+ static unsigned char hwaddr[ETHER_HWADDR_SIZE+2];
+
+ io_return_t rc;
+ kern_return_t open_res, kr;
+ dev_mode_t mode = D_WRITE;
+ /* register */ io_req_t ior = NULL;
+ struct ether_header *ehdr;
+ fipc_header_t *fhdr;
+ int *d_addr;
+ int data_count;
+ char *fipc_buf, *data_buffer;
+#ifdef FIPC_LOOPBACK
+ boolean_t local_send = FALSE;
+#endif
+
+#ifdef FI_DEBUG
+ printf("fipc_send(dest: %s, port:%d, len:%d, buf:x%x) !!!\n",
+ ether_sprintf(dest.hwaddr), dest.port, len, user_buffer);
+#endif
+
+ if (dest.port > MAX_FIPC_PORT_NUM ||
+ len > FIPC_MSG_SIZE)
+ {
+#ifdef FI_DEBUG
+ printf ("len: %d, dest.port: %u\n", len, dest.port);
+#endif
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ /* We should only need to probe the device once. */
+
+ if (!eth_device)
+ {
+ unsigned char net_hwaddr[ETHER_HWADDR_SIZE+2];
+ int stat_count = sizeof(net_hwaddr)/sizeof(int);
+
+ /* XXX Automatic lookup for ne0 or ne1 was failing... */
+ eth_device = device_lookup(ETHER_DEVICE_NAME);
+#ifdef i386
+ if (eth_device == (mach_device_t) DEVICE_NULL ||
+ eth_device == (mach_device_t)D_NO_SUCH_DEVICE)
+#else
+ if (eth_device == DEVICE_NULL ||
+ eth_device == (device_t)D_NO_SUCH_DEVICE)
+#endif
+ {
+#ifdef FI_DEBUG
+ printf ("FIPC: Couldn't find ethernet device %s.\n",
+ ETHER_DEVICE_NAME);
+#endif
+ return (KERN_FAILURE);
+ }
+
+ /* The device should be open! */
+ if (eth_device->state != DEV_STATE_OPEN)
+ {
+#ifdef FI_DEBUG
+ printf ("Opening ethernet device.\n");
+#endif
+
+ io_req_alloc (ior, 0);
+
+ io_req_alloc (ior, 0);
+
+ ior->io_device = eth_device;
+ ior->io_unit = eth_device->dev_number;
+ ior->io_op = IO_OPEN | IO_CALL;
+ ior->io_mode = mode;
+ ior->io_error = 0;
+ ior->io_done = 0;
+ ior->io_reply_port = MACH_PORT_NULL;
+ ior->io_reply_port_type = 0;
+
+ /* open the device */
+ open_res =
+ (*eth_device->dev_ops->d_open)
+ (eth_device->dev_number,
+ (int)mode, ior);
+ if (ior->io_error != D_SUCCESS)
+ {
+#ifdef FI_DEBUG
+ printf ("Failed to open device ne0\n");
+#endif
+ return open_res;
+ }
+ }
+#ifdef i386
+ rc = mach_device_get_status(eth_device, NET_ADDRESS,
+ net_hwaddr, &stat_count);
+#else
+ rc = ds_device_get_status(eth_device, NET_ADDRESS, net_hwaddr,
+ &stat_count);
+#endif
+ if (rc != D_SUCCESS)
+ {
+#ifdef FI_DEBUG
+ printf("FIPC: Couldn't determine hardware ethernet address: %d\n",
+ rc);
+#endif
+ return KERN_FAILURE;
+ }
+ *(int*)hwaddr = ntohl(*(int*)net_hwaddr);
+ *(int*)(hwaddr+4) = ntohl(*(int*)(net_hwaddr+4));
+#ifdef FI_DEBUG
+ printf ("host: %s\n", ether_sprintf(hwaddr));
+#endif
+ }
+
+#ifdef FIPC_LOOPBACK
+ if (!memcmp(dest.hwaddr, hwaddr, ETHER_HWADDR_SIZE))
+/*
+ if ((*(int*)dest.hwaddr == *(int*)hwaddr) &&
+ ((*(int*)(((char*)dest.hwaddr+4) >> 16)) ==
+ ((*(int*)(((char*)hwaddr+4) >> 16)))))
+*/
+ {
+ local_send = TRUE;
+#ifdef FI_DEBUG
+ printf ("loopback: \n");
+ printf ("host: %s, ", ether_sprintf(hwaddr));
+ printf ("dest: %s\n", ether_sprintf(dest.hwaddr));
+#endif
+ }
+#endif
+
+ data_count = len + sizeof (struct ether_header)
+ + sizeof (fipc_header_t);
+
+#ifdef FIPC_LOOPBACK
+ fipc_buf = get_fipc_buffer(data_count, local_send, FALSE) ;
+#else
+ fipc_buf = get_fipc_buffer(data_count, FALSE, FALSE) ;
+#endif
+
+ if (fipc_buf == NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ ehdr = (struct ether_header *)fipc_buf;
+ d_addr = (int *)ehdr->ether_dhost;
+
+ *(int *)ehdr->ether_dhost = *(int*)dest.hwaddr;
+ *(int *)(ehdr->ether_dhost+4) = *(int*)(dest.hwaddr+4);
+ *(int *)ehdr->ether_shost = *(int *)hwaddr;
+ *(int *)(ehdr->ether_shost+4) = *(int *)(hwaddr+4);
+ ehdr->ether_type = 0x1234; /* Yep. */
+
+#ifdef FIPC_LOOPBACK
+ if (!local_send)
+ {
+#endif
+ if (!ior)
+ io_req_alloc (ior, 0);
+
+ /* Set up the device information. */
+ ior->io_device = eth_device;
+ ior->io_unit = eth_device->dev_number;
+ ior->io_op = IO_WRITE | IO_INBAND | IO_INTERNAL;
+ ior->io_mode = D_WRITE;
+ ior->io_recnum = 0;
+ ior->io_data = fipc_buf;
+ ior->io_count = data_count;
+ ior->io_total = data_count;
+ ior->io_alloc_size = 0;
+ ior->io_residual = 0;
+ ior->io_error = 0;
+ ior->io_done = fipc_done;
+ ior->io_reply_port = MACH_PORT_NULL;
+ ior->io_reply_port_type = 0;
+ ior->io_copy = VM_MAP_COPY_NULL;
+#ifdef FIPC_LOOPBACK
+ }
+#endif
+
+#ifdef FI_DEBUG
+ printf("sending from %s ", ether_sprintf(ehdr->ether_shost));
+ printf("to %s, type x%x, user_port x%x\n",
+ ether_sprintf(ehdr->ether_dhost),
+ (int)ehdr->ether_type,
+ (int)dest.port);
+#endif
+
+ if (len <= FIPC_MSG_SIZE)
+ {
+ fhdr = (fipc_header_t*)(fipc_buf+sizeof(struct ether_header));
+ fhdr->dest_port = dest.port;
+ fhdr->msg_size = len;
+ data_buffer = (char*)fhdr+sizeof(fipc_header_t);
+
+ copyin (user_buffer, data_buffer,
+ min (FIPC_BUFFER_SIZE-sizeof(fipc_header_t), len));
+
+#ifdef FIPC_LOOPBACK
+ /*
+ * Sending to same node. Queue on dest.port of this node.
+ * We just call fipc_packet after setting up the necessary info
+ * and return. fipc_packet queues the packet on the receive
+ * queue for the destination port.
+ */
+ if (local_send)
+ return (loopback(fipc_buf));
+#endif
+
+ /* Now write to the device */
+ /* d_port_death has been co-opted for fipc stuff.
+ * It maps to nefoutput(). */
+
+ rc = (*eth_device->dev_ops->d_port_death) /* that's the one */
+ (eth_device->dev_number, ior);
+ }
+#ifdef FI_DEBUG
+ else /* len > ETHERMTU: multi-packet request */
+ printf ("### multi-packet messages are not supported.\n");
+#endif
+
+ if (rc == D_IO_QUEUED)
+ return KERN_SUCCESS;
+ else
+ return KERN_FAILURE;
+}
+
+#ifdef FIPC_LOOPBACK
+ if (!local_send)
+ {
+#endif
+ if (!ior)
+ io_req_alloc (ior, 0);
+
+ /* Set up the device information. */
+ ior->io_device = eth_device;
+ ior->io_unit = eth_device->dev_number;
+ ior->io_op = IO_WRITE | IO_INBAND | IO_INTERNAL;
+ ior->io_mode = D_WRITE;
+ ior->io_recnum = 0;
+ ior->io_data = fipc_buf;
+ ior->io_count = data_count;
+ ior->io_total = data_count;
+ ior->io_alloc_size = 0;
+ ior->io_residual = 0;
+ ior->io_error = 0;
+ ior->io_done = fipc_done;
+ ior->io_reply_port = MACH_PORT_NULL;
+ ior->io_reply_port_type = 0;
+ ior->io_copy = VM_MAP_COPY_NULL;
+#ifdef FIPC_LOOPBACK
+ }
+#endif
+
+/********************************************************************
+/* syscall_fipc_recv()
+/*
+/********************************************************************/
+
+kern_return_t syscall_fipc_recv(unsigned short user_port,
+ char *user_buffer, int *user_size, fipc_endpoint_t *user_sender)
+{
+ char* f_buffer;
+ fipc_port_t *cfp;
+ fipc_buffer_q_ent *crqe;
+ int *head;
+ int msg_size;
+ int fport_num = fipc_lookup(user_port);
+ spl_t spl;
+
+#ifdef FI_DEBUG
+ printf("fipc_recv(0x%x, 0x%x) !!!\n", user_port, user_buffer);
+#endif
+
+ if (user_port > MAX_FIPC_PORT_NUM)
+ {
+#ifdef FI_DEBUG
+ printf ("Invalid FIPC port: %u\n", user_port);
+#endif
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (fport_num == INVALID)
+ return KERN_RESOURCE_SHORTAGE;
+
+ cfp = &fports[fport_num];
+ head = &cfp->rq_head;
+ crqe = &cfp->recv_q[*head];
+
+ if (cfp->bound != FALSE)
+ {
+#ifdef FI_DEBUG
+ printf ("FIPC Port %u is currently bound.\n", user_port);
+#endif
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ copyin(user_size, &msg_size, sizeof(int));
+
+ spl = splimp();
+
+ cfp->bound = TRUE;
+ while (!(cfp->valid_msg))
+ {
+ assert_wait(&(cfp->valid_msg), TRUE);
+ splx(spl);
+ thread_block ((void(*)())0);
+ if (current_thread()->wait_result != THREAD_AWAKENED)
+ {
+ cfp->bound = FALSE;
+ return KERN_FAILURE;
+ }
+ spl = splimp();
+ }
+
+ cfp->valid_msg--;
+ f_buffer = crqe->buffer;
+ msg_size = min (crqe->size, msg_size);
+
+ crqe->buffer = NULL;
+ crqe->size = 0;
+ *head = ((*head)+1) % FIPC_RECV_Q_SIZE;
+ cfp->bound = FALSE;
+
+ splx(spl);
+
+ copyout(f_buffer+sizeof(fipc_header_t), user_buffer, msg_size);
+ copyout(&(crqe->sender), user_sender, sizeof(fipc_endpoint_t));
+ copyout(&msg_size, user_size, sizeof(msg_size));
+
+ return_fipc_buffer(f_buffer, FIPC_BUFFER_SIZE, TRUE, FALSE);
+
+ return KERN_SUCCESS;
+}
+
+
+/*
+ * Final clean-up after the packet has been sent off.
+ */
+int fipc_done(io_req_t ior)
+{
+ return_fipc_buffer(ior->io_data, FIPC_BUFFER_SIZE, FALSE, FALSE);
+
+ return 1;
+}
+
+#endif /* FIPC */
diff --git a/ipc/fipc.h b/ipc/fipc.h
new file mode 100644
index 0000000..2b545c4
--- /dev/null
+++ b/ipc/fipc.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 1996-1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Utah $Hdr: fipc.h 1.1 96/2/29$
+ * Author: Linus Kamb
+ */
+
+#include <kern/lock.h>
+#include <device/if_ether.h>
+
+
+#define N_MAX_OPEN_FIPC_PORTS 32 /* In practice,
+ * this should be much larger */
+#define MAX_FIPC_PORT_NUM 4095 /* ditto */
+
+#define FIPC_MSG_TYPE 0x1234
+
+#define FIPC_BUFFER_SIZE ETHERMTU
+#define FIPC_MSG_SIZE (FIPC_BUFFER_SIZE - sizeof(fipc_header_t))
+
+#define FIPC_RECV_Q_SIZE 4
+#define N_MIN_RECV_BUFS 5 /* 2 pages worth */
+#define N_MAX_RECV_BUFS (N_MAX_OPEN_FIPC_PORTS * FIPC_RECV_Q_SIZE)
+#define N_MIN_SEND_BUFS 2
+#define N_MAX_SEND_BUFS 5
+
+#define INVALID -1
+
+#define ETHER_HWADDR_SIZE 6
+#define ETHER_DEVICE_NAME "ne0"
+
+typedef struct fipc_endpoint_structure
+{
+ unsigned char hwaddr[ETHER_HWADDR_SIZE];
+ unsigned short port;
+} fipc_endpoint_t;
+
+typedef struct fipc_buffer_structure
+{
+ char *buffer;
+ unsigned short size;
+ fipc_endpoint_t sender;
+} fipc_buffer_q_ent;
+
+typedef struct fipc_port_structure
+{
+ simple_lock_data_t lock;
+ boolean_t bound;
+ int valid_msg;
+ fipc_buffer_q_ent recv_q[FIPC_RECV_Q_SIZE];
+ int rq_head, rq_tail;
+} fipc_port_t;
+
+typedef struct fipc_header_structure
+{
+ unsigned short dest_port;
+ unsigned short send_port;
+ unsigned int msg_size;
+} fipc_header_t;
+
+typedef struct fipc_lookup_table_ent_structure
+{
+ int fipc_port;
+ int fpt_num; /* f_ports[] entry number */
+} fipc_lookup_table_ent;
+
+typedef struct fipc_stat_structure
+{
+ int dropped_msgs;
+} fipc_stat_t;
+
+#define min(a,b) (((a)<=(b)?(a):(b)))
+
+char* get_fipc_buffer(int, boolean_t, boolean_t);
+void fipc_packet(char*, struct ether_header);
+
+extern int fipc_sends;
+extern int fipc_recvs;
+
diff --git a/ipc/ipc_entry.c b/ipc/ipc_entry.c
new file mode 100644
index 0000000..305c98e
--- /dev/null
+++ b/ipc/ipc_entry.c
@@ -0,0 +1,858 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ *
+/*
+ * File: ipc/ipc_entry.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Primitive functions to manipulate translation entries.
+ */
+
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <kern/assert.h>
+#include <kern/sched_prim.h>
+#include <kern/zalloc.h>
+#include <ipc/port.h>
+#include <ipc/ipc_types.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_splay.h>
+#include <ipc/ipc_hash.h>
+#include <ipc/ipc_table.h>
+#include <ipc/ipc_object.h>
+
+zone_t ipc_tree_entry_zone;
+
+/*
+ * Routine: ipc_entry_tree_collision
+ * Purpose:
+ * Checks if "name" collides with an allocated name
+ * in the space's tree. That is, returns TRUE
+ * if the splay tree contains a name with the same
+ * index as "name".
+ * Conditions:
+ * The space is locked (read or write) and active.
+ */
+
+boolean_t
+ipc_entry_tree_collision(
+ ipc_space_t space,
+ mach_port_t name)
+{
+ mach_port_index_t index;
+ mach_port_t lower, upper;
+
+ assert(space->is_active);
+
+ /*
+ * Check if we collide with the next smaller name
+ * or the next larger name.
+ */
+
+ ipc_splay_tree_bounds(&space->is_tree, name, &lower, &upper);
+
+ index = MACH_PORT_INDEX(name);
+ return (((lower != ~0) && (MACH_PORT_INDEX(lower) == index)) ||
+ ((upper != 0) && (MACH_PORT_INDEX(upper) == index)));
+}
+
+/*
+ * Routine: ipc_entry_lookup
+ * Purpose:
+ * Searches for an entry, given its name.
+ * Conditions:
+ * The space must be read or write locked throughout.
+ * The space must be active.
+ */
+
+ipc_entry_t
+ipc_entry_lookup(space, name)
+ ipc_space_t space;
+ mach_port_t name;
+{
+ mach_port_index_t index;
+ ipc_entry_t entry;
+
+ assert(space->is_active);
+
+ index = MACH_PORT_INDEX(name);
+ if (index < space->is_table_size) {
+ entry = &space->is_table[index];
+ if (IE_BITS_GEN(entry->ie_bits) != MACH_PORT_GEN(name))
+ if (entry->ie_bits & IE_BITS_COLLISION) {
+ assert(space->is_tree_total > 0);
+ goto tree_lookup;
+ } else
+ entry = IE_NULL;
+ else if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
+ entry = IE_NULL;
+ } else if (space->is_tree_total == 0)
+ entry = IE_NULL;
+ else
+ tree_lookup:
+ entry = (ipc_entry_t)
+ ipc_splay_tree_lookup(&space->is_tree, name);
+
+ assert((entry == IE_NULL) || IE_BITS_TYPE(entry->ie_bits));
+ return entry;
+}
+
+/*
+ * Routine: ipc_entry_get
+ * Purpose:
+ * Tries to allocate an entry out of the space.
+ * Conditions:
+ * The space is write-locked and active throughout.
+ * An object may be locked. Will not allocate memory.
+ * Returns:
+ * KERN_SUCCESS A free entry was found.
+ * KERN_NO_SPACE No entry allocated.
+ */
+
+kern_return_t
+ipc_entry_get(space, namep, entryp)
+ ipc_space_t space;
+ mach_port_t *namep;
+ ipc_entry_t *entryp;
+{
+ ipc_entry_t table;
+ mach_port_index_t first_free;
+ mach_port_t new_name;
+ ipc_entry_t free_entry;
+
+ assert(space->is_active);
+
+ table = space->is_table;
+ first_free = table->ie_next;
+
+ if (first_free == 0)
+ return KERN_NO_SPACE;
+
+ free_entry = &table[first_free];
+ table->ie_next = free_entry->ie_next;
+
+ /*
+ * Initialize the new entry. We need only
+ * increment the generation number and clear ie_request.
+ */
+
+ {
+ mach_port_gen_t gen;
+
+ assert((free_entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
+ gen = free_entry->ie_bits + IE_BITS_GEN_ONE;
+ free_entry->ie_bits = gen;
+ free_entry->ie_request = 0;
+ new_name = MACH_PORT_MAKE(first_free, gen);
+ }
+
+ /*
+ * The new name can't be MACH_PORT_NULL because index
+ * is non-zero. It can't be MACH_PORT_DEAD because
+ * the table isn't allowed to grow big enough.
+ * (See comment in ipc/ipc_table.h.)
+ */
+
+ assert(MACH_PORT_VALID(new_name));
+ assert(free_entry->ie_object == IO_NULL);
+
+ *namep = new_name;
+ *entryp = free_entry;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_entry_alloc
+ * Purpose:
+ * Allocate an entry out of the space.
+ * Conditions:
+ * The space is not locked before, but it is write-locked after
+ * if the call is successful. May allocate memory.
+ * Returns:
+ * KERN_SUCCESS An entry was allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NO_SPACE No room for an entry in the space.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory for an entry.
+ */
+
+kern_return_t
+ipc_entry_alloc(
+ ipc_space_t space,
+ mach_port_t *namep,
+ ipc_entry_t *entryp)
+{
+ kern_return_t kr;
+
+ is_write_lock(space);
+
+ for (;;) {
+ if (!space->is_active) {
+ is_write_unlock(space);
+ return KERN_INVALID_TASK;
+ }
+
+ kr = ipc_entry_get(space, namep, entryp);
+ if (kr == KERN_SUCCESS)
+ return kr;
+
+ kr = ipc_entry_grow_table(space);
+ if (kr != KERN_SUCCESS)
+ return kr; /* space is unlocked */
+ }
+}
+
+/*
+ * Routine: ipc_entry_alloc_name
+ * Purpose:
+ * Allocates/finds an entry with a specific name.
+ * If an existing entry is returned, its type will be nonzero.
+ * Conditions:
+ * The space is not locked before, but it is write-locked after
+ * if the call is successful. May allocate memory.
+ * Returns:
+ * KERN_SUCCESS Found existing entry with same name.
+ * KERN_SUCCESS Allocated a new entry.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_entry_alloc_name(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t *entryp)
+{
+ mach_port_index_t index = MACH_PORT_INDEX(name);
+ mach_port_gen_t gen = MACH_PORT_GEN(name);
+ ipc_tree_entry_t tree_entry = ITE_NULL;
+
+ assert(MACH_PORT_VALID(name));
+
+
+ is_write_lock(space);
+
+ for (;;) {
+ ipc_entry_t entry;
+ ipc_tree_entry_t tentry;
+ ipc_table_size_t its;
+
+ if (!space->is_active) {
+ is_write_unlock(space);
+ if (tree_entry) ite_free(tree_entry);
+ return KERN_INVALID_TASK;
+ }
+
+ /*
+ * If we are under the table cutoff,
+ * there are three cases:
+ * 1) The entry is inuse, for the same name
+ * 2) The entry is inuse, for a different name
+ * 3) The entry is free
+ */
+
+ if ((0 < index) && (index < space->is_table_size)) {
+ ipc_entry_t table = space->is_table;
+
+ entry = &table[index];
+
+ if (IE_BITS_TYPE(entry->ie_bits)) {
+ if (IE_BITS_GEN(entry->ie_bits) == gen) {
+ *entryp = entry;
+ if (tree_entry) ite_free(tree_entry);
+ return KERN_SUCCESS;
+ }
+ } else {
+ mach_port_index_t free_index, next_index;
+
+ /*
+ * Rip the entry out of the free list.
+ */
+
+ for (free_index = 0;
+ (next_index = table[free_index].ie_next)
+ != index;
+ free_index = next_index)
+ continue;
+
+ table[free_index].ie_next =
+ table[next_index].ie_next;
+
+ entry->ie_bits = gen;
+ assert(entry->ie_object == IO_NULL);
+ entry->ie_request = 0;
+
+ *entryp = entry;
+ if (tree_entry) ite_free(tree_entry);
+ return KERN_SUCCESS;
+ }
+ }
+
+ /*
+ * Before trying to allocate any memory,
+ * check if the entry already exists in the tree.
+ * This avoids spurious resource errors.
+ * The splay tree makes a subsequent lookup/insert
+ * of the same name cheap, so this costs little.
+ */
+
+ if ((space->is_tree_total > 0) &&
+ ((tentry = ipc_splay_tree_lookup(&space->is_tree, name))
+ != ITE_NULL)) {
+ assert(tentry->ite_space == space);
+ assert(IE_BITS_TYPE(tentry->ite_bits));
+
+ *entryp = &tentry->ite_entry;
+ if (tree_entry) ite_free(tree_entry);
+ return KERN_SUCCESS;
+ }
+
+ its = space->is_table_next;
+
+ /*
+ * Check if the table should be grown.
+ *
+ * Note that if space->is_table_size == its->its_size,
+ * then we won't ever try to grow the table.
+ *
+ * Note that we are optimistically assuming that name
+ * doesn't collide with any existing names. (So if
+ * it were entered into the tree, is_tree_small would
+ * be incremented.) This is OK, because even in that
+ * case, we don't lose memory by growing the table.
+ */
+
+ if ((space->is_table_size <= index) &&
+ (index < its->its_size) &&
+ (((its->its_size - space->is_table_size) *
+ sizeof(struct ipc_entry)) <
+ ((space->is_tree_small + 1) *
+ sizeof(struct ipc_tree_entry)))) {
+ kern_return_t kr;
+
+ /*
+ * Can save space by growing the table.
+ * Because the space will be unlocked,
+ * we must restart.
+ */
+
+ kr = ipc_entry_grow_table(space);
+ assert(kr != KERN_NO_SPACE);
+ if (kr != KERN_SUCCESS) {
+ /* space is unlocked */
+ if (tree_entry) ite_free(tree_entry);
+ return kr;
+ }
+
+ continue;
+ }
+
+ /*
+ * If a splay-tree entry was allocated previously,
+ * go ahead and insert it into the tree.
+ */
+
+ if (tree_entry != ITE_NULL) {
+ space->is_tree_total++;
+
+ if (index < space->is_table_size)
+ space->is_table[index].ie_bits |=
+ IE_BITS_COLLISION;
+ else if ((index < its->its_size) &&
+ !ipc_entry_tree_collision(space, name))
+ space->is_tree_small++;
+
+ ipc_splay_tree_insert(&space->is_tree,
+ name, tree_entry);
+
+ tree_entry->ite_bits = 0;
+ tree_entry->ite_object = IO_NULL;
+ tree_entry->ite_request = 0;
+ tree_entry->ite_space = space;
+ *entryp = &tree_entry->ite_entry;
+ return KERN_SUCCESS;
+ }
+
+ /*
+ * Allocate a tree entry and try again.
+ */
+
+ is_write_unlock(space);
+ tree_entry = ite_alloc();
+ if (tree_entry == ITE_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+ is_write_lock(space);
+ }
+}
+
+/*
+ * Routine: ipc_entry_dealloc
+ * Purpose:
+ * Deallocates an entry from a space.
+ * Conditions:
+ * The space must be write-locked throughout.
+ * The space must be active.
+ */
+
+void
+ipc_entry_dealloc(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t entry)
+{
+ ipc_entry_t table;
+ ipc_entry_num_t size;
+ mach_port_index_t index;
+
+ assert(space->is_active);
+ assert(entry->ie_object == IO_NULL);
+ assert(entry->ie_request == 0);
+
+ index = MACH_PORT_INDEX(name);
+ table = space->is_table;
+ size = space->is_table_size;
+
+ if ((index < size) && (entry == &table[index])) {
+ assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name));
+
+ if (entry->ie_bits & IE_BITS_COLLISION) {
+ struct ipc_splay_tree small, collisions;
+ ipc_tree_entry_t tentry;
+ mach_port_t tname;
+ boolean_t pick;
+ ipc_entry_bits_t bits;
+ ipc_object_t obj;
+
+ /* must move an entry from tree to table */
+
+ ipc_splay_tree_split(&space->is_tree,
+ MACH_PORT_MAKE(index+1, 0),
+ &collisions);
+ ipc_splay_tree_split(&collisions,
+ MACH_PORT_MAKE(index, 0),
+ &small);
+
+ pick = ipc_splay_tree_pick(&collisions,
+ &tname, &tentry);
+ assert(pick);
+ assert(MACH_PORT_INDEX(tname) == index);
+
+ bits = tentry->ite_bits;
+ entry->ie_bits = bits | MACH_PORT_GEN(tname);
+ entry->ie_object = obj = tentry->ite_object;
+ entry->ie_request = tentry->ite_request;
+ assert(tentry->ite_space == space);
+
+ if (IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND) {
+ ipc_hash_global_delete(space, obj,
+ tname, tentry);
+ ipc_hash_local_insert(space, obj,
+ index, entry);
+ }
+
+ ipc_splay_tree_delete(&collisions, tname, tentry);
+
+ assert(space->is_tree_total > 0);
+ space->is_tree_total--;
+
+ /* check if collision bit should still be on */
+
+ pick = ipc_splay_tree_pick(&collisions,
+ &tname, &tentry);
+ if (pick) {
+ entry->ie_bits |= IE_BITS_COLLISION;
+ ipc_splay_tree_join(&space->is_tree,
+ &collisions);
+ }
+
+ ipc_splay_tree_join(&space->is_tree, &small);
+ } else {
+ entry->ie_bits &= IE_BITS_GEN_MASK;
+ entry->ie_next = table->ie_next;
+ table->ie_next = index;
+ }
+ } else {
+ ipc_tree_entry_t tentry = (ipc_tree_entry_t) entry;
+
+ assert(tentry->ite_space == space);
+
+ ipc_splay_tree_delete(&space->is_tree, name, tentry);
+
+ assert(space->is_tree_total > 0);
+ space->is_tree_total--;
+
+ if (index < size) {
+ ipc_entry_t ientry = &table[index];
+
+ assert(ientry->ie_bits & IE_BITS_COLLISION);
+
+ if (!ipc_entry_tree_collision(space, name))
+ ientry->ie_bits &= ~IE_BITS_COLLISION;
+ } else if ((index < space->is_table_next->its_size) &&
+ !ipc_entry_tree_collision(space, name)) {
+ assert(space->is_tree_small > 0);
+ space->is_tree_small--;
+ }
+ }
+}
+
+/*
+ * Routine: ipc_entry_grow_table
+ * Purpose:
+ * Grows the table in a space.
+ * Conditions:
+ * The space must be write-locked and active before.
+ * If successful, it is also returned locked.
+ * Allocates memory.
+ * Returns:
+ * KERN_SUCCESS Grew the table.
+ * KERN_SUCCESS Somebody else grew the table.
+ * KERN_SUCCESS The space died.
+ * KERN_NO_SPACE Table has maximum size already.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate a new table.
+ */
+
+kern_return_t
+ipc_entry_grow_table(space)
+ ipc_space_t space;
+{
+ ipc_entry_num_t osize, size, nsize;
+
+ do {
+ ipc_entry_t otable, table;
+ ipc_table_size_t oits, its, nits;
+ mach_port_index_t i, free_index;
+
+ assert(space->is_active);
+
+ if (space->is_growing) {
+ /*
+ * Somebody else is growing the table.
+ * We just wait for them to finish.
+ */
+
+ assert_wait((event_t) space, FALSE);
+ is_write_unlock(space);
+ thread_block((void (*)()) 0);
+ is_write_lock(space);
+ return KERN_SUCCESS;
+ }
+
+ otable = space->is_table;
+ its = space->is_table_next;
+ size = its->its_size;
+ oits = its - 1;
+ osize = oits->its_size;
+ nits = its + 1;
+ nsize = nits->its_size;
+
+ if (osize == size) {
+ is_write_unlock(space);
+ return KERN_NO_SPACE;
+ }
+
+ assert((osize < size) && (size <= nsize));
+
+ /*
+ * OK, we'll attempt to grow the table.
+ * The realloc requires that the old table
+ * remain in existence.
+ */
+
+ space->is_growing = TRUE;
+ is_write_unlock(space);
+ if (it_entries_reallocable(oits))
+ table = it_entries_realloc(oits, otable, its);
+ else
+ table = it_entries_alloc(its);
+ is_write_lock(space);
+ space->is_growing = FALSE;
+
+ /*
+ * We need to do a wakeup on the space,
+ * to rouse waiting threads. We defer
+ * this until the space is unlocked,
+ * because we don't want them to spin.
+ */
+
+ if (table == IE_NULL) {
+ is_write_unlock(space);
+ thread_wakeup((event_t) space);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ if (!space->is_active) {
+ /*
+ * The space died while it was unlocked.
+ */
+
+ is_write_unlock(space);
+ thread_wakeup((event_t) space);
+ it_entries_free(its, table);
+ is_write_lock(space);
+ return KERN_SUCCESS;
+ }
+
+ assert(space->is_table == otable);
+ assert(space->is_table_next == its);
+ assert(space->is_table_size == osize);
+
+ space->is_table = table;
+ space->is_table_size = size;
+ space->is_table_next = nits;
+
+ /*
+ * If we did a realloc, it remapped the data.
+ * Otherwise we copy by hand first. Then we have
+ * to clear the index fields in the old part and
+ * zero the new part.
+ */
+
+ if (!it_entries_reallocable(oits))
+ (void) memcpy((void *) table, (const void *) otable,
+ osize * sizeof(struct ipc_entry));
+
+ for (i = 0; i < osize; i++)
+ table[i].ie_index = 0;
+
+ (void) memset((void *) (table + osize), 0,
+ (size - osize) * sizeof(struct ipc_entry));
+
+ /*
+ * Put old entries into the reverse hash table.
+ */
+
+ for (i = 0; i < osize; i++) {
+ ipc_entry_t entry = &table[i];
+
+ if (IE_BITS_TYPE(entry->ie_bits) ==
+ MACH_PORT_TYPE_SEND)
+ ipc_hash_local_insert(space, entry->ie_object,
+ i, entry);
+ }
+
+ /*
+ * If there are entries in the splay tree,
+ * then we have work to do:
+ * 1) transfer entries to the table
+ * 2) update is_tree_small
+ */
+
+ if (space->is_tree_total > 0) {
+ mach_port_index_t index;
+ boolean_t delete;
+ struct ipc_splay_tree ignore;
+ struct ipc_splay_tree move;
+ struct ipc_splay_tree small;
+ ipc_entry_num_t nosmall;
+ ipc_tree_entry_t tentry;
+
+ /*
+ * The splay tree divides into four regions,
+ * based on the index of the entries:
+ * 1) 0 <= index < osize
+ * 2) osize <= index < size
+ * 3) size <= index < nsize
+ * 4) nsize <= index
+ *
+ * Entries in the first part are ignored.
+ * Entries in the second part, that don't
+ * collide, are moved into the table.
+ * Entries in the third part, that don't
+ * collide, are counted for is_tree_small.
+ * Entries in the fourth part are ignored.
+ */
+
+ ipc_splay_tree_split(&space->is_tree,
+ MACH_PORT_MAKE(nsize, 0),
+ &small);
+ ipc_splay_tree_split(&small,
+ MACH_PORT_MAKE(size, 0),
+ &move);
+ ipc_splay_tree_split(&move,
+ MACH_PORT_MAKE(osize, 0),
+ &ignore);
+
+ /* move entries into the table */
+
+ for (tentry = ipc_splay_traverse_start(&move);
+ tentry != ITE_NULL;
+ tentry = ipc_splay_traverse_next(&move, delete)) {
+ mach_port_t name;
+ mach_port_gen_t gen;
+ mach_port_type_t type;
+ ipc_entry_bits_t bits;
+ ipc_object_t obj;
+ ipc_entry_t entry;
+
+ name = tentry->ite_name;
+ gen = MACH_PORT_GEN(name);
+ index = MACH_PORT_INDEX(name);
+
+ assert(tentry->ite_space == space);
+ assert((osize <= index) && (index < size));
+
+ entry = &table[index];
+
+ /* collision with previously moved entry? */
+
+ bits = entry->ie_bits;
+ if (bits != 0) {
+ assert(IE_BITS_TYPE(bits));
+ assert(IE_BITS_GEN(bits) != gen);
+
+ entry->ie_bits =
+ bits | IE_BITS_COLLISION;
+ delete = FALSE;
+ continue;
+ }
+
+ bits = tentry->ite_bits;
+ type = IE_BITS_TYPE(bits);
+ assert(type != MACH_PORT_TYPE_NONE);
+
+ entry->ie_bits = bits | gen;
+ entry->ie_object = obj = tentry->ite_object;
+ entry->ie_request = tentry->ite_request;
+
+ if (type == MACH_PORT_TYPE_SEND) {
+ ipc_hash_global_delete(space, obj,
+ name, tentry);
+ ipc_hash_local_insert(space, obj,
+ index, entry);
+ }
+
+ space->is_tree_total--;
+ delete = TRUE;
+ }
+ ipc_splay_traverse_finish(&move);
+
+ /* count entries for is_tree_small */
+
+ nosmall = 0; index = 0;
+ for (tentry = ipc_splay_traverse_start(&small);
+ tentry != ITE_NULL;
+ tentry = ipc_splay_traverse_next(&small, FALSE)) {
+ mach_port_index_t nindex;
+
+ nindex = MACH_PORT_INDEX(tentry->ite_name);
+
+ if (nindex != index) {
+ nosmall++;
+ index = nindex;
+ }
+ }
+ ipc_splay_traverse_finish(&small);
+
+ assert(nosmall <= (nsize - size));
+ assert(nosmall <= space->is_tree_total);
+ space->is_tree_small = nosmall;
+
+ /* put the splay tree back together */
+
+ ipc_splay_tree_join(&space->is_tree, &small);
+ ipc_splay_tree_join(&space->is_tree, &move);
+ ipc_splay_tree_join(&space->is_tree, &ignore);
+ }
+
+ /*
+ * Add entries in the new part which still aren't used
+ * to the free list. Add them in reverse order,
+ * and set the generation number to -1, so that
+ * early allocations produce "natural" names.
+ */
+
+ free_index = table[0].ie_next;
+ for (i = size-1; i >= osize; --i) {
+ ipc_entry_t entry = &table[i];
+
+ if (entry->ie_bits == 0) {
+ entry->ie_bits = IE_BITS_GEN_MASK;
+ entry->ie_next = free_index;
+ free_index = i;
+ }
+ }
+ table[0].ie_next = free_index;
+
+ /*
+ * Now we need to free the old table.
+ * If the space dies or grows while unlocked,
+ * then we can quit here.
+ */
+
+ is_write_unlock(space);
+ thread_wakeup((event_t) space);
+ it_entries_free(oits, otable);
+ is_write_lock(space);
+ if (!space->is_active || (space->is_table_next != nits))
+ return KERN_SUCCESS;
+
+ /*
+ * We might have moved enough entries from
+ * the splay tree into the table that
+ * the table can be profitably grown again.
+ *
+ * Note that if size == nsize, then
+ * space->is_tree_small == 0.
+ */
+ } while ((space->is_tree_small > 0) &&
+ (((nsize - size) * sizeof(struct ipc_entry)) <
+ (space->is_tree_small * sizeof(struct ipc_tree_entry))));
+
+ return KERN_SUCCESS;
+}
+
+
+#if MACH_KDB
+#include <ddb/db_output.h>
+#define printf kdbprintf
+
+ipc_entry_t db_ipc_object_by_name(
+ task_t task,
+ mach_port_t name);
+
+
+ipc_entry_t
+db_ipc_object_by_name(
+ task_t task,
+ mach_port_t name)
+{
+ ipc_space_t space = task->itk_space;
+ ipc_entry_t entry;
+
+
+ entry = ipc_entry_lookup(space, name);
+ if(entry != IE_NULL) {
+ iprintf("(task 0x%x, name 0x%x) ==> object 0x%x",
+ entry->ie_object);
+ return (ipc_entry_t) entry->ie_object;
+ }
+ return entry;
+}
+#endif /* MACH_KDB */
diff --git a/ipc/ipc_entry.h b/ipc/ipc_entry.h
new file mode 100644
index 0000000..ea0c0a2
--- /dev/null
+++ b/ipc/ipc_entry.h
@@ -0,0 +1,158 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_entry.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for translation entries, which represent
+ * tasks' capabilities for ports and port sets.
+ */
+
+#ifndef _IPC_IPC_ENTRY_H_
+#define _IPC_IPC_ENTRY_H_
+
+#include <mach/port.h>
+#include <mach/kern_return.h>
+#include <kern/zalloc.h>
+#include <ipc/port.h>
+#include <ipc/ipc_table.h>
+
+/*
+ * Spaces hold capabilities for ipc_object_t's (ports and port sets).
+ * Each ipc_entry_t records a capability. Most capabilities have
+ * small names, and the entries are elements of a table.
+ * Capabilities can have large names, and a splay tree holds
+ * those entries. The cutoff point between the table and the tree
+ * is adjusted dynamically to minimize memory consumption.
+ *
+ * The ie_index field of entries in the table implements
+ * a ordered hash table with open addressing and linear probing.
+ * This hash table converts (space, object) -> name.
+ * It is used independently of the other fields.
+ *
+ * Free (unallocated) entries in the table have null ie_object
+ * fields. The ie_bits field is zero except for IE_BITS_GEN.
+ * The ie_next (ie_request) field links free entries into a free list.
+ *
+ * The first entry in the table (index 0) is always free.
+ * It is used as the head of the free list.
+ */
+
+typedef unsigned int ipc_entry_bits_t;
+typedef ipc_table_elems_t ipc_entry_num_t; /* number of entries */
+
+typedef struct ipc_entry {
+ ipc_entry_bits_t ie_bits;
+ struct ipc_object *ie_object;
+ union {
+ mach_port_index_t next;
+ /*XXX ipc_port_request_index_t request;*/
+ unsigned int request;
+ } index;
+ union {
+ mach_port_index_t table;
+ struct ipc_tree_entry *tree;
+ } hash;
+} *ipc_entry_t;
+
+#define IE_NULL ((ipc_entry_t) 0)
+
+#define ie_request index.request
+#define ie_next index.next
+#define ie_index hash.table
+
+#define IE_BITS_UREFS_MASK 0x0000ffff /* 16 bits of user-reference */
+#define IE_BITS_UREFS(bits) ((bits) & IE_BITS_UREFS_MASK)
+
+#define IE_BITS_TYPE_MASK 0x001f0000 /* 5 bits of capability type */
+#define IE_BITS_TYPE(bits) ((bits) & IE_BITS_TYPE_MASK)
+
+#define IE_BITS_MAREQUEST 0x00200000 /* 1 bit for msg-accepted */
+
+#define IE_BITS_COMPAT 0x00400000 /* 1 bit for compatibility */
+
+#define IE_BITS_COLLISION 0x00800000 /* 1 bit for collisions */
+#define IE_BITS_RIGHT_MASK 0x007fffff /* relevant to the right */
+
+#if PORT_GENERATIONS
+#define IE_BITS_GEN_MASK 0xff000000U /* 8 bits for generation */
+#define IE_BITS_GEN(bits) ((bits) & IE_BITS_GEN_MASK)
+#define IE_BITS_GEN_ONE 0x01000000 /* low bit of generation */
+#else
+#define IE_BITS_GEN_MASK 0
+#define IE_BITS_GEN(bits) 0
+#define IE_BITS_GEN_ONE 0
+#endif
+
+
+typedef struct ipc_tree_entry {
+ struct ipc_entry ite_entry;
+ mach_port_t ite_name;
+ struct ipc_space *ite_space;
+ struct ipc_tree_entry *ite_lchild;
+ struct ipc_tree_entry *ite_rchild;
+} *ipc_tree_entry_t;
+
+#define ITE_NULL ((ipc_tree_entry_t) 0)
+
+#define ite_bits ite_entry.ie_bits
+#define ite_object ite_entry.ie_object
+#define ite_request ite_entry.ie_request
+#define ite_next ite_entry.hash.tree
+
+extern zone_t ipc_tree_entry_zone;
+
+#define ite_alloc() ((ipc_tree_entry_t) zalloc(ipc_tree_entry_zone))
+#define ite_free(ite) zfree(ipc_tree_entry_zone, (vm_offset_t) (ite))
+
+
+extern ipc_entry_t
+ipc_entry_lookup(/* ipc_space_t space, mach_port_t name */);
+
+extern kern_return_t
+ipc_entry_get(/* ipc_space_t space,
+ mach_port_t *namep, ipc_entry_t *entryp */);
+
+extern kern_return_t
+ipc_entry_alloc(/* ipc_space_t space,
+ mach_port_t *namep, ipc_entry_t *entryp */);
+
+extern kern_return_t
+ipc_entry_alloc_name(/* ipc_space_t space, mach_port_t name,
+ ipc_entry_t *entryp */);
+
+extern void
+ipc_entry_dealloc(/* ipc_space_t space, mach_port_t name,
+ ipc_entry_t entry */);
+
+extern kern_return_t
+ipc_entry_grow_table(/* ipc_space_t space */);
+
+#endif _IPC_IPC_ENTRY_H_
diff --git a/ipc/ipc_hash.c b/ipc/ipc_hash.c
new file mode 100644
index 0000000..50024b5
--- /dev/null
+++ b/ipc/ipc_hash.c
@@ -0,0 +1,626 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_hash.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Entry hash table operations.
+ */
+
+#include <mach/boolean.h>
+#include <mach/port.h>
+#include <kern/lock.h>
+#include <kern/kalloc.h>
+#include <ipc/port.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_hash.h>
+#include <ipc/ipc_init.h>
+#include <ipc/ipc_types.h>
+
+#include <mach_ipc_debug.h>
+#if MACH_IPC_DEBUG
+#include <mach/kern_return.h>
+#include <mach_debug/hash_info.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_user.h>
+#endif
+
+
+
+/*
+ * Routine: ipc_hash_lookup
+ * Purpose:
+ * Converts (space, obj) -> (name, entry).
+ * Returns TRUE if an entry was found.
+ * Conditions:
+ * The space must be locked (read or write) throughout.
+ */
+
+boolean_t
+ipc_hash_lookup(space, obj, namep, entryp)
+ ipc_space_t space;
+ ipc_object_t obj;
+ mach_port_t *namep;
+ ipc_entry_t *entryp;
+{
+ return (ipc_hash_local_lookup(space, obj, namep, entryp) ||
+ ((space->is_tree_hash > 0) &&
+ ipc_hash_global_lookup(space, obj, namep,
+ (ipc_tree_entry_t *) entryp)));
+}
+
+/*
+ * Routine: ipc_hash_insert
+ * Purpose:
+ * Inserts an entry into the appropriate reverse hash table,
+ * so that ipc_hash_lookup will find it.
+ * Conditions:
+ * The space must be write-locked.
+ */
+
+void
+ipc_hash_insert(
+ ipc_space_t space,
+ ipc_object_t obj,
+ mach_port_t name,
+ ipc_entry_t entry)
+{
+ mach_port_index_t index;
+
+ index = MACH_PORT_INDEX(name);
+ if ((index < space->is_table_size) &&
+ (entry == &space->is_table[index]))
+ ipc_hash_local_insert(space, obj, index, entry);
+ else
+ ipc_hash_global_insert(space, obj, name,
+ (ipc_tree_entry_t) entry);
+}
+
+/*
+ * Routine: ipc_hash_delete
+ * Purpose:
+ * Deletes an entry from the appropriate reverse hash table.
+ * Conditions:
+ * The space must be write-locked.
+ */
+
+void
+ipc_hash_delete(
+ ipc_space_t space,
+ ipc_object_t obj,
+ mach_port_t name,
+ ipc_entry_t entry)
+{
+ mach_port_index_t index;
+
+ index = MACH_PORT_INDEX(name);
+ if ((index < space->is_table_size) &&
+ (entry == &space->is_table[index]))
+ ipc_hash_local_delete(space, obj, index, entry);
+ else
+ ipc_hash_global_delete(space, obj, name,
+ (ipc_tree_entry_t) entry);
+}
+
+/*
+ * The global reverse hash table holds splay tree entries.
+ * It is a simple open-chaining hash table with singly-linked buckets.
+ * Each bucket is locked separately, with an exclusive lock.
+ * Within each bucket, move-to-front is used.
+ */
+
+typedef natural_t ipc_hash_index_t;
+
+ipc_hash_index_t ipc_hash_global_size;
+ipc_hash_index_t ipc_hash_global_mask;
+
+#define IH_GLOBAL_HASH(space, obj) \
+ (((((ipc_hash_index_t) ((vm_offset_t)space)) >> 4) + \
+ (((ipc_hash_index_t) ((vm_offset_t)obj)) >> 6)) & \
+ ipc_hash_global_mask)
+
+typedef struct ipc_hash_global_bucket {
+ decl_simple_lock_data(, ihgb_lock_data)
+ ipc_tree_entry_t ihgb_head;
+} *ipc_hash_global_bucket_t;
+
+#define IHGB_NULL ((ipc_hash_global_bucket_t) 0)
+
+#define ihgb_lock_init(ihgb) simple_lock_init(&(ihgb)->ihgb_lock_data)
+#define ihgb_lock(ihgb) simple_lock(&(ihgb)->ihgb_lock_data)
+#define ihgb_unlock(ihgb) simple_unlock(&(ihgb)->ihgb_lock_data)
+
+ipc_hash_global_bucket_t ipc_hash_global_table;
+
+/*
+ * Routine: ipc_hash_global_lookup
+ * Purpose:
+ * Converts (space, obj) -> (name, entry).
+ * Looks in the global table, for splay tree entries.
+ * Returns TRUE if an entry was found.
+ * Conditions:
+ * The space must be locked (read or write) throughout.
+ */
+
+boolean_t
+ipc_hash_global_lookup(
+ ipc_space_t space,
+ ipc_object_t obj,
+ mach_port_t *namep,
+ ipc_tree_entry_t *entryp)
+{
+ ipc_hash_global_bucket_t bucket;
+ ipc_tree_entry_t this, *last;
+
+ assert(space != IS_NULL);
+ assert(obj != IO_NULL);
+
+ bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
+ ihgb_lock(bucket);
+
+ if ((this = bucket->ihgb_head) != ITE_NULL) {
+ if ((this->ite_object == obj) &&
+ (this->ite_space == space)) {
+ /* found it at front; no need to move */
+
+ *namep = this->ite_name;
+ *entryp = this;
+ } else for (last = &this->ite_next;
+ (this = *last) != ITE_NULL;
+ last = &this->ite_next) {
+ if ((this->ite_object == obj) &&
+ (this->ite_space == space)) {
+ /* found it; move to front */
+
+ *last = this->ite_next;
+ this->ite_next = bucket->ihgb_head;
+ bucket->ihgb_head = this;
+
+ *namep = this->ite_name;
+ *entryp = this;
+ break;
+ }
+ }
+ }
+
+ ihgb_unlock(bucket);
+ return this != ITE_NULL;
+}
+
+/*
+ * Routine: ipc_hash_global_insert
+ * Purpose:
+ * Inserts an entry into the global reverse hash table.
+ * Conditions:
+ * The space must be write-locked.
+ */
+
+void
+ipc_hash_global_insert(
+ ipc_space_t space,
+ ipc_object_t obj,
+ mach_port_t name,
+ ipc_tree_entry_t entry)
+{
+ ipc_hash_global_bucket_t bucket;
+
+
+ assert(entry->ite_name == name);
+ assert(space != IS_NULL);
+ assert(entry->ite_space == space);
+ assert(obj != IO_NULL);
+ assert(entry->ite_object == obj);
+
+ space->is_tree_hash++;
+ assert(space->is_tree_hash <= space->is_tree_total);
+
+ bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
+ ihgb_lock(bucket);
+
+ /* insert at front of bucket */
+
+ entry->ite_next = bucket->ihgb_head;
+ bucket->ihgb_head = entry;
+
+ ihgb_unlock(bucket);
+}
+
+/*
+ * Routine: ipc_hash_global_delete
+ * Purpose:
+ * Deletes an entry from the global reverse hash table.
+ * Conditions:
+ * The space must be write-locked.
+ */
+
+void
+ipc_hash_global_delete(
+ ipc_space_t space,
+ ipc_object_t obj,
+ mach_port_t name,
+ ipc_tree_entry_t entry)
+{
+ ipc_hash_global_bucket_t bucket;
+ ipc_tree_entry_t this, *last;
+
+ assert(entry->ite_name == name);
+ assert(space != IS_NULL);
+ assert(entry->ite_space == space);
+ assert(obj != IO_NULL);
+ assert(entry->ite_object == obj);
+
+ assert(space->is_tree_hash > 0);
+ space->is_tree_hash--;
+
+ bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
+ ihgb_lock(bucket);
+
+ for (last = &bucket->ihgb_head;
+ (this = *last) != ITE_NULL;
+ last = &this->ite_next) {
+ if (this == entry) {
+ /* found it; remove from bucket */
+
+ *last = this->ite_next;
+ break;
+ }
+ }
+ assert(this != ITE_NULL);
+
+ ihgb_unlock(bucket);
+}
+
+/*
+ * Each space has a local reverse hash table, which holds
+ * entries from the space's table. In fact, the hash table
+ * just uses a field (ie_index) in the table itself.
+ *
+ * The local hash table is an open-addressing hash table,
+ * which means that when a collision occurs, instead of
+ * throwing the entry into a bucket, the entry is rehashed
+ * to another position in the table. In this case the rehash
+ * is very simple: linear probing (ie, just increment the position).
+ * This simple rehash makes deletions tractable (they're still a pain),
+ * but it means that collisions tend to build up into clumps.
+ *
+ * Because at least one entry in the table (index 0) is always unused,
+ * there will always be room in the reverse hash table. If a table
+ * with n slots gets completely full, the reverse hash table will
+ * have one giant clump of n-1 slots and one free slot somewhere.
+ * Because entries are only entered into the reverse table if they
+ * are pure send rights (not receive, send-once, port-set,
+ * or dead-name rights), and free entries of course aren't entered,
+ * I expect the reverse hash table won't get unreasonably full.
+ *
+ * Ordered hash tables (Amble & Knuth, Computer Journal, v. 17, no. 2,
+ * pp. 135-142.) may be desirable here. They can dramatically help
+ * unsuccessful lookups. But unsuccessful lookups are almost always
+ * followed by insertions, and those slow down somewhat. They
+ * also can help deletions somewhat. Successful lookups aren't affected.
+ * So possibly a small win; probably nothing significant.
+ */
+
+#define IH_LOCAL_HASH(obj, size) \
+ ((((mach_port_index_t) (obj)) >> 6) % (size))
+
+/*
+ * Routine: ipc_hash_local_lookup
+ * Purpose:
+ * Converts (space, obj) -> (name, entry).
+ * Looks in the space's local table, for table entries.
+ * Returns TRUE if an entry was found.
+ * Conditions:
+ * The space must be locked (read or write) throughout.
+ */
+
+boolean_t
+ipc_hash_local_lookup(
+ ipc_space_t space,
+ ipc_object_t obj,
+ mach_port_t *namep,
+ ipc_entry_t *entryp)
+{
+ ipc_entry_t table;
+ ipc_entry_num_t size;
+ mach_port_index_t hindex, index;
+
+ assert(space != IS_NULL);
+ assert(obj != IO_NULL);
+
+ table = space->is_table;
+ size = space->is_table_size;
+ hindex = IH_LOCAL_HASH(obj, size);
+
+ /*
+ * Ideally, table[hindex].ie_index is the name we want.
+ * However, must check ie_object to verify this,
+ * because collisions can happen. In case of a collision,
+ * search farther along in the clump.
+ */
+
+ while ((index = table[hindex].ie_index) != 0) {
+ ipc_entry_t entry = &table[index];
+
+ if (entry->ie_object == obj) {
+ *namep = MACH_PORT_MAKEB(index, entry->ie_bits);
+ *entryp = entry;
+ return TRUE;
+ }
+
+ if (++hindex == size)
+ hindex = 0;
+ }
+
+ return FALSE;
+}
+
+/*
+ * Routine: ipc_hash_local_insert
+ * Purpose:
+ * Inserts an entry into the space's reverse hash table.
+ * Conditions:
+ * The space must be write-locked.
+ */
+
+void
+ipc_hash_local_insert(
+ ipc_space_t space,
+ ipc_object_t obj,
+ mach_port_index_t index,
+ ipc_entry_t entry)
+{
+ ipc_entry_t table;
+ ipc_entry_num_t size;
+ mach_port_index_t hindex;
+
+ assert(index != 0);
+ assert(space != IS_NULL);
+ assert(obj != IO_NULL);
+
+ table = space->is_table;
+ size = space->is_table_size;
+ hindex = IH_LOCAL_HASH(obj, size);
+
+ assert(entry == &table[index]);
+ assert(entry->ie_object == obj);
+
+ /*
+ * We want to insert at hindex, but there may be collisions.
+ * If a collision occurs, search for the end of the clump
+ * and insert there.
+ */
+
+ while (table[hindex].ie_index != 0) {
+ if (++hindex == size)
+ hindex = 0;
+ }
+
+ table[hindex].ie_index = index;
+}
+
+/*
+ * Routine: ipc_hash_local_delete
+ * Purpose:
+ * Deletes an entry from the space's reverse hash table.
+ * Conditions:
+ * The space must be write-locked.
+ */
+
+void
+ipc_hash_local_delete(
+ ipc_space_t space,
+ ipc_object_t obj,
+ mach_port_index_t index,
+ ipc_entry_t entry)
+{
+ ipc_entry_t table;
+ ipc_entry_num_t size;
+ mach_port_index_t hindex, dindex;
+
+ assert(index != MACH_PORT_NULL);
+ assert(space != IS_NULL);
+ assert(obj != IO_NULL);
+
+ table = space->is_table;
+ size = space->is_table_size;
+ hindex = IH_LOCAL_HASH(obj, size);
+
+ assert(entry == &table[index]);
+ assert(entry->ie_object == obj);
+
+ /*
+ * First check we have the right hindex for this index.
+ * In case of collision, we have to search farther
+ * along in this clump.
+ */
+
+ while (table[hindex].ie_index != index) {
+ if (table[hindex].ie_index == 0)
+ {
+ static int gak = 0;
+ if (gak == 0)
+ {
+ printf("gak! entry wasn't in hash table!\n");
+ gak = 1;
+ }
+ return;
+ }
+ if (++hindex == size)
+ hindex = 0;
+ }
+
+ /*
+ * Now we want to set table[hindex].ie_index = 0.
+ * But if we aren't the last index in a clump,
+ * this might cause problems for lookups of objects
+ * farther along in the clump that are displaced
+ * due to collisions. Searches for them would fail
+ * at hindex instead of succeeding.
+ *
+ * So we must check the clump after hindex for objects
+ * that are so displaced, and move one up to the new hole.
+ *
+ * hindex - index of new hole in the clump
+ * dindex - index we are checking for a displaced object
+ *
+ * When we move a displaced object up into the hole,
+ * it creates a new hole, and we have to repeat the process
+ * until we get to the end of the clump.
+ */
+
+ for (dindex = hindex; index != 0; hindex = dindex) {
+ for (;;) {
+ mach_port_index_t tindex;
+ ipc_object_t tobj;
+
+ if (++dindex == size)
+ dindex = 0;
+ assert(dindex != hindex);
+
+ /* are we at the end of the clump? */
+
+ index = table[dindex].ie_index;
+ if (index == 0)
+ break;
+
+ /* is this a displaced object? */
+
+ tobj = table[index].ie_object;
+ assert(tobj != IO_NULL);
+ tindex = IH_LOCAL_HASH(tobj, size);
+
+ if ((dindex < hindex) ?
+ ((dindex < tindex) && (tindex <= hindex)) :
+ ((dindex < tindex) || (tindex <= hindex)))
+ break;
+ }
+
+ table[hindex].ie_index = index;
+ }
+}
+
+/*
+ * Routine: ipc_hash_init
+ * Purpose:
+ * Initialize the reverse hash table implementation.
+ */
+
+void
+ipc_hash_init(void)
+{
+ ipc_hash_index_t i;
+
+ /* if not configured, initialize ipc_hash_global_size */
+
+ if (ipc_hash_global_size == 0) {
+ ipc_hash_global_size = ipc_tree_entry_max >> 8;
+ if (ipc_hash_global_size < 32)
+ ipc_hash_global_size = 32;
+ }
+
+ /* make sure it is a power of two */
+
+ ipc_hash_global_mask = ipc_hash_global_size - 1;
+ if ((ipc_hash_global_size & ipc_hash_global_mask) != 0) {
+ natural_t bit;
+
+ /* round up to closest power of two */
+
+ for (bit = 1;; bit <<= 1) {
+ ipc_hash_global_mask |= bit;
+ ipc_hash_global_size = ipc_hash_global_mask + 1;
+
+ if ((ipc_hash_global_size & ipc_hash_global_mask) == 0)
+ break;
+ }
+ }
+
+ /* allocate ipc_hash_global_table */
+
+ ipc_hash_global_table = (ipc_hash_global_bucket_t)
+ kalloc((vm_size_t) (ipc_hash_global_size *
+ sizeof(struct ipc_hash_global_bucket)));
+ assert(ipc_hash_global_table != IHGB_NULL);
+
+ /* and initialize it */
+
+ for (i = 0; i < ipc_hash_global_size; i++) {
+ ipc_hash_global_bucket_t bucket;
+
+ bucket = &ipc_hash_global_table[i];
+ ihgb_lock_init(bucket);
+ bucket->ihgb_head = ITE_NULL;
+ }
+}
+
+#if MACH_IPC_DEBUG
+
+/*
+ * Routine: ipc_hash_info
+ * Purpose:
+ * Return information about the global reverse hash table.
+ * Fills the buffer with as much information as possible
+ * and returns the desired size of the buffer.
+ * Conditions:
+ * Nothing locked. The caller should provide
+ * possibly-pageable memory.
+ */
+
+
+ipc_hash_index_t
+ipc_hash_info(
+ hash_info_bucket_t *info,
+ mach_msg_type_number_t count)
+{
+ ipc_hash_index_t i;
+
+ if (ipc_hash_global_size < count)
+ count = ipc_hash_global_size;
+
+ for (i = 0; i < count; i++) {
+ ipc_hash_global_bucket_t bucket = &ipc_hash_global_table[i];
+ unsigned int bucket_count = 0;
+ ipc_tree_entry_t entry;
+
+ ihgb_lock(bucket);
+ for (entry = bucket->ihgb_head;
+ entry != ITE_NULL;
+ entry = entry->ite_next)
+ bucket_count++;
+ ihgb_unlock(bucket);
+
+ /* don't touch pageable memory while holding locks */
+ info[i].hib_count = bucket_count;
+ }
+
+ return ipc_hash_global_size;
+}
+
+#endif /* MACH_IPC_DEBUG */
diff --git a/ipc/ipc_hash.h b/ipc/ipc_hash.h
new file mode 100644
index 0000000..f4c2f55
--- /dev/null
+++ b/ipc/ipc_hash.h
@@ -0,0 +1,94 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_hash.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Declarations of entry hash table operations.
+ */
+
+#ifndef _IPC_IPC_HASH_H_
+#define _IPC_IPC_HASH_H_
+
+#include <mach_ipc_debug.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+
+extern void
+ipc_hash_init();
+
+#if MACH_IPC_DEBUG
+
+extern unsigned int
+ipc_hash_info(/* hash_info_bucket_t *, unsigned int */);
+
+#endif MACH_IPC_DEBUG
+
+extern boolean_t
+ipc_hash_lookup(/* ipc_space_t space, ipc_object_t obj,
+ mach_port_t *namep, ipc_entry_t *entryp */);
+
+extern void
+ipc_hash_insert(/* ipc_space_t space, ipc_object_t obj,
+ mach_port_t name, ipc_entry_t entry */);
+
+extern void
+ipc_hash_delete(/* ipc_space_t space, ipc_object_t obj,
+ mach_port_t name, ipc_entry_t entry */);
+
+/*
+ * For use by functions that know what they're doing:
+ * the global primitives, for splay tree entries,
+ * and the local primitives, for table entries.
+ */
+
+extern boolean_t
+ipc_hash_global_lookup(/* ipc_space_t space, ipc_object_t obj,
+ mach_port_t *namep, ipc_tree_entry_t *entryp */);
+
+extern void
+ipc_hash_global_insert(/* ipc_space_t space, ipc_object_t obj,
+ mach_port_t name, ipc_tree_entry_t entry */);
+
+extern void
+ipc_hash_global_delete(/* ipc_space_t space, ipc_object_t obj,
+ mach_port_t name, ipc_tree_entry_t entry */);
+
+extern boolean_t
+ipc_hash_local_lookup(/* ipc_space_t space, ipc_object_t obj,
+ mach_port_t *namep, ipc_entry_t *entryp */);
+
+extern void
+ipc_hash_local_insert(/* ipc_space_t space, ipc_object_t obj,
+ mach_port_index_t index, ipc_entry_t entry */);
+
+extern void
+ipc_hash_local_delete(/* ipc_space_t space, ipc_object_t obj,
+ mach_port_index_t index, ipc_entry_t entry */);
+
+#endif _IPC_IPC_HASH_H_
diff --git a/ipc/ipc_init.c b/ipc/ipc_init.c
new file mode 100644
index 0000000..29b0819
--- /dev/null
+++ b/ipc/ipc_init.c
@@ -0,0 +1,139 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_init.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to initialize the IPC system.
+ */
+
+#include <mach/kern_return.h>
+#include <kern/mach_param.h>
+#include <kern/ipc_host.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_marequest.h>
+#include <ipc/ipc_notify.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_hash.h>
+#include <ipc/ipc_init.h>
+
+
+
+vm_map_t ipc_kernel_map;
+vm_size_t ipc_kernel_map_size = 1024 * 1024;
+
+int ipc_space_max = SPACE_MAX;
+int ipc_tree_entry_max = ITE_MAX;
+int ipc_port_max = PORT_MAX;
+int ipc_pset_max = SET_MAX;
+
+/*
+ * Routine: ipc_bootstrap
+ * Purpose:
+ * Initialization needed before the kernel task
+ * can be created.
+ */
+
+void
+ipc_bootstrap(void)
+{
+ kern_return_t kr;
+
+ ipc_port_multiple_lock_init();
+
+ ipc_port_timestamp_lock_init();
+ ipc_port_timestamp_data = 0;
+
+ ipc_space_zone = zinit(sizeof(struct ipc_space),
+ ipc_space_max * sizeof(struct ipc_space),
+ sizeof(struct ipc_space),
+ IPC_ZONE_TYPE, "ipc spaces");
+
+ ipc_tree_entry_zone =
+ zinit(sizeof(struct ipc_tree_entry),
+ ipc_tree_entry_max * sizeof(struct ipc_tree_entry),
+ sizeof(struct ipc_tree_entry),
+ IPC_ZONE_TYPE, "ipc tree entries");
+
+ ipc_object_zones[IOT_PORT] =
+ zinit(sizeof(struct ipc_port),
+ ipc_port_max * sizeof(struct ipc_port),
+ sizeof(struct ipc_port),
+ ZONE_EXHAUSTIBLE, "ipc ports");
+
+ ipc_object_zones[IOT_PORT_SET] =
+ zinit(sizeof(struct ipc_pset),
+ ipc_pset_max * sizeof(struct ipc_pset),
+ sizeof(struct ipc_pset),
+ IPC_ZONE_TYPE, "ipc port sets");
+
+ /* create special spaces */
+
+ kr = ipc_space_create_special(&ipc_space_kernel);
+ assert(kr == KERN_SUCCESS);
+
+ kr = ipc_space_create_special(&ipc_space_reply);
+ assert(kr == KERN_SUCCESS);
+
+#if NORMA_IPC
+ kr = ipc_space_create_special(&ipc_space_remote);
+ assert(kr == KERN_SUCCESS);
+#endif NORMA_IPC
+
+ /* initialize modules with hidden data structures */
+
+ ipc_table_init();
+ ipc_notify_init();
+ ipc_hash_init();
+ ipc_marequest_init();
+}
+
+/*
+ * Routine: ipc_init
+ * Purpose:
+ * Final initialization of the IPC system.
+ */
+
+void
+ipc_init()
+{
+ vm_offset_t min, max;
+
+ ipc_kernel_map = kmem_suballoc(kernel_map, &min, &max,
+ ipc_kernel_map_size, TRUE);
+
+ ipc_host_init();
+}
diff --git a/ipc/ipc_init.h b/ipc/ipc_init.h
new file mode 100644
index 0000000..b2f1dd4
--- /dev/null
+++ b/ipc/ipc_init.h
@@ -0,0 +1,58 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_init.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Declarations of functions to initialize the IPC system.
+ */
+
+#ifndef _IPC_IPC_INIT_H_
+#define _IPC_IPC_INIT_H_
+
+/* all IPC zones should be exhaustible */
+#define IPC_ZONE_TYPE ZONE_EXHAUSTIBLE
+
+extern int ipc_space_max;
+extern int ipc_tree_entry_max;
+extern int ipc_port_max;
+extern int ipc_pset_max;
+
+/*
+ * Exported interfaces
+ */
+
+/* IPC initialization needed before creation of kernel task */
+extern void ipc_bootstrap(void);
+
+/* Remaining IPC initialization */
+extern void ipc_init(void);
+
+#endif /* _IPC_IPC_INIT_H_ */
diff --git a/ipc/ipc_kmsg.c b/ipc/ipc_kmsg.c
new file mode 100644
index 0000000..d860fd1
--- /dev/null
+++ b/ipc/ipc_kmsg.c
@@ -0,0 +1,3484 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_kmsg.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Operations on kernel messages.
+ */
+
+#include <cpus.h>
+#include <mach_ipc_compat.h>
+#include <norma_ipc.h>
+#include <norma_vm.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/message.h>
+#include <mach/port.h>
+#include <kern/assert.h>
+#include <kern/kalloc.h>
+#include <vm/vm_map.h>
+#include <vm/vm_object.h>
+#include <vm/vm_kern.h>
+#include <ipc/port.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_thread.h>
+#include <ipc/ipc_marequest.h>
+#include <ipc/ipc_notify.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_right.h>
+
+#include <ipc/ipc_machdep.h>
+
+extern int copyinmap();
+extern int copyoutmap();
+void ipc_msg_print(); /* forward */
+
+#define is_misaligned(x) ( ((vm_offset_t)(x)) & (sizeof(vm_offset_t)-1) )
+#define ptr_align(x) \
+ ( ( ((vm_offset_t)(x)) + (sizeof(vm_offset_t)-1) ) & ~(sizeof(vm_offset_t)-1) )
+
+ipc_kmsg_t ipc_kmsg_cache[NCPUS];
+
+/*
+ * Routine: ipc_kmsg_enqueue
+ * Purpose:
+ * Enqueue a kmsg.
+ */
+
+void
+ipc_kmsg_enqueue(
+ ipc_kmsg_queue_t queue,
+ ipc_kmsg_t kmsg)
+{
+ ipc_kmsg_enqueue_macro(queue, kmsg);
+}
+
+/*
+ * Routine: ipc_kmsg_dequeue
+ * Purpose:
+ * Dequeue and return a kmsg.
+ */
+
+ipc_kmsg_t
+ipc_kmsg_dequeue(
+ ipc_kmsg_queue_t queue)
+{
+ ipc_kmsg_t first;
+
+ first = ipc_kmsg_queue_first(queue);
+
+ if (first != IKM_NULL)
+ ipc_kmsg_rmqueue_first_macro(queue, first);
+
+ return first;
+}
+
+/*
+ * Routine: ipc_kmsg_rmqueue
+ * Purpose:
+ * Pull a kmsg out of a queue.
+ */
+
+void
+ipc_kmsg_rmqueue(
+ ipc_kmsg_queue_t queue,
+ ipc_kmsg_t kmsg)
+{
+ ipc_kmsg_t next, prev;
+
+ assert(queue->ikmq_base != IKM_NULL);
+
+ next = kmsg->ikm_next;
+ prev = kmsg->ikm_prev;
+
+ if (next == kmsg) {
+ assert(prev == kmsg);
+ assert(queue->ikmq_base == kmsg);
+
+ queue->ikmq_base = IKM_NULL;
+ } else {
+ if (queue->ikmq_base == kmsg)
+ queue->ikmq_base = next;
+
+ next->ikm_prev = prev;
+ prev->ikm_next = next;
+ }
+ /* XXX Temporary debug logic */
+ kmsg->ikm_next = IKM_BOGUS;
+ kmsg->ikm_prev = IKM_BOGUS;
+}
+
+/*
+ * Routine: ipc_kmsg_queue_next
+ * Purpose:
+ * Return the kmsg following the given kmsg.
+ * (Or IKM_NULL if it is the last one in the queue.)
+ */
+
+ipc_kmsg_t
+ipc_kmsg_queue_next(
+ ipc_kmsg_queue_t queue,
+ ipc_kmsg_t kmsg)
+{
+ ipc_kmsg_t next;
+
+ assert(queue->ikmq_base != IKM_NULL);
+
+ next = kmsg->ikm_next;
+ if (queue->ikmq_base == next)
+ next = IKM_NULL;
+
+ return next;
+}
+
+/*
+ * Routine: ipc_kmsg_destroy
+ * Purpose:
+ * Destroys a kernel message. Releases all rights,
+ * references, and memory held by the message.
+ * Frees the message.
+ * Conditions:
+ * No locks held.
+ */
+
+void
+ipc_kmsg_destroy(
+ ipc_kmsg_t kmsg)
+{
+ ipc_kmsg_queue_t queue;
+ boolean_t empty;
+
+ /*
+ * ipc_kmsg_clean can cause more messages to be destroyed.
+ * Curtail recursion by queueing messages. If a message
+ * is already queued, then this is a recursive call.
+ */
+
+ queue = &current_thread()->ith_messages;
+ empty = ipc_kmsg_queue_empty(queue);
+ ipc_kmsg_enqueue(queue, kmsg);
+
+ if (empty) {
+ /* must leave kmsg in queue while cleaning it */
+
+ while ((kmsg = ipc_kmsg_queue_first(queue)) != IKM_NULL) {
+ ipc_kmsg_clean(kmsg);
+ ipc_kmsg_rmqueue(queue, kmsg);
+ ikm_free(kmsg);
+ }
+ }
+}
+
+/*
+ * Routine: ipc_kmsg_clean_body
+ * Purpose:
+ * Cleans the body of a kernel message.
+ * Releases all rights, references, and memory.
+ *
+ * The last type/data pair might stretch past eaddr.
+ * (See the usage in ipc_kmsg_copyout.)
+ * Conditions:
+ * No locks held.
+ */
+
+void
+ipc_kmsg_clean_body(saddr, eaddr)
+ vm_offset_t saddr;
+ vm_offset_t eaddr;
+{
+ while (saddr < eaddr) {
+ mach_msg_type_long_t *type;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline, is_port;
+ vm_size_t length;
+
+ type = (mach_msg_type_long_t *) saddr;
+ is_inline = ((mach_msg_type_t*)type)->msgt_inline;
+ if (((mach_msg_type_t*)type)->msgt_longform) {
+ /* This must be aligned */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ (is_misaligned(type))) {
+ saddr = ptr_align(saddr);
+ continue;
+ }
+ name = type->msgtl_name;
+ size = type->msgtl_size;
+ number = type->msgtl_number;
+ saddr += sizeof(mach_msg_type_long_t);
+ } else {
+ name = ((mach_msg_type_t*)type)->msgt_name;
+ size = ((mach_msg_type_t*)type)->msgt_size;
+ number = ((mach_msg_type_t*)type)->msgt_number;
+ saddr += sizeof(mach_msg_type_t);
+ }
+
+ /* padding (ptrs and ports) ? */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ ((size >> 3) == sizeof(natural_t)))
+ saddr = ptr_align(saddr);
+
+ /* calculate length of data in bytes, rounding up */
+
+ length = ((number * size) + 7) >> 3;
+
+ is_port = MACH_MSG_TYPE_PORT_ANY(name);
+
+ if (is_port) {
+ ipc_object_t *objects;
+ mach_msg_type_number_t i;
+
+ if (is_inline) {
+ objects = (ipc_object_t *) saddr;
+ /* sanity check */
+ while (eaddr < (vm_offset_t)&objects[number]) number--;
+ } else {
+ objects = (ipc_object_t *)
+ * (vm_offset_t *) saddr;
+ }
+
+ /* destroy port rights carried in the message */
+
+ for (i = 0; i < number; i++) {
+ ipc_object_t object = objects[i];
+
+ if (!IO_VALID(object))
+ continue;
+
+ ipc_object_destroy(object, name);
+ }
+ }
+
+ if (is_inline) {
+ /* inline data sizes round up to int boundaries */
+
+ saddr += (length + 3) &~ 3;
+ } else {
+ vm_offset_t data = * (vm_offset_t *) saddr;
+
+ /* destroy memory carried in the message */
+
+ if (length == 0)
+ assert(data == 0);
+ else if (is_port)
+ kfree(data, length);
+ else
+ vm_map_copy_discard((vm_map_copy_t) data);
+
+ saddr += sizeof(vm_offset_t);
+ }
+ }
+}
+
+/*
+ * Routine: ipc_kmsg_clean
+ * Purpose:
+ * Cleans a kernel message. Releases all rights,
+ * references, and memory held by the message.
+ * Conditions:
+ * No locks held.
+ */
+
+void
+ipc_kmsg_clean(kmsg)
+ ipc_kmsg_t kmsg;
+{
+ ipc_marequest_t marequest;
+ ipc_object_t object;
+ mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits;
+
+ marequest = kmsg->ikm_marequest;
+ if (marequest != IMAR_NULL)
+ ipc_marequest_destroy(marequest);
+
+ object = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
+ if (IO_VALID(object))
+ ipc_object_destroy(object, MACH_MSGH_BITS_REMOTE(mbits));
+
+ object = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
+ if (IO_VALID(object))
+ ipc_object_destroy(object, MACH_MSGH_BITS_LOCAL(mbits));
+
+ if (mbits & MACH_MSGH_BITS_COMPLEX) {
+ vm_offset_t saddr, eaddr;
+
+ saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
+ eaddr = (vm_offset_t) &kmsg->ikm_header +
+ kmsg->ikm_header.msgh_size;
+
+ ipc_kmsg_clean_body(saddr, eaddr);
+ }
+}
+
+/*
+ * Routine: ipc_kmsg_clean_partial
+ * Purpose:
+ * Cleans a partially-acquired kernel message.
+ * eaddr is the address of the type specification
+ * in the body of the message that contained the error.
+ * If dolast, the memory and port rights in this last
+ * type spec are also cleaned. In that case, number
+ * specifies the number of port rights to clean.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_kmsg_clean_partial(kmsg, eaddr, dolast, number)
+ ipc_kmsg_t kmsg;
+ vm_offset_t eaddr;
+ boolean_t dolast;
+ mach_msg_type_number_t number;
+{
+ ipc_object_t object;
+ mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits;
+ vm_offset_t saddr;
+
+ assert(kmsg->ikm_marequest == IMAR_NULL);
+
+ object = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
+ assert(IO_VALID(object));
+ ipc_object_destroy(object, MACH_MSGH_BITS_REMOTE(mbits));
+
+ object = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
+ if (IO_VALID(object))
+ ipc_object_destroy(object, MACH_MSGH_BITS_LOCAL(mbits));
+
+ saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
+ ipc_kmsg_clean_body(saddr, eaddr);
+
+ if (dolast) {
+ mach_msg_type_long_t *type;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t rnumber;
+ boolean_t is_inline, is_port;
+ vm_size_t length;
+
+xxx: type = (mach_msg_type_long_t *) eaddr;
+ is_inline = ((mach_msg_type_t*)type)->msgt_inline;
+ if (((mach_msg_type_t*)type)->msgt_longform) {
+ /* This must be aligned */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ (is_misaligned(type))) {
+ eaddr = ptr_align(eaddr);
+ goto xxx;
+ }
+ name = type->msgtl_name;
+ size = type->msgtl_size;
+ rnumber = type->msgtl_number;
+ eaddr += sizeof(mach_msg_type_long_t);
+ } else {
+ name = ((mach_msg_type_t*)type)->msgt_name;
+ size = ((mach_msg_type_t*)type)->msgt_size;
+ rnumber = ((mach_msg_type_t*)type)->msgt_number;
+ eaddr += sizeof(mach_msg_type_t);
+ }
+
+ /* padding (ptrs and ports) ? */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ ((size >> 3) == sizeof(natural_t)))
+ eaddr = ptr_align(eaddr);
+
+ /* calculate length of data in bytes, rounding up */
+
+ length = ((rnumber * size) + 7) >> 3;
+
+ is_port = MACH_MSG_TYPE_PORT_ANY(name);
+
+ if (is_port) {
+ ipc_object_t *objects;
+ mach_msg_type_number_t i;
+
+ objects = (ipc_object_t *)
+ (is_inline ? eaddr : * (vm_offset_t *) eaddr);
+
+ /* destroy port rights carried in the message */
+
+ for (i = 0; i < number; i++) {
+ ipc_object_t obj = objects[i];
+
+ if (!IO_VALID(obj))
+ continue;
+
+ ipc_object_destroy(obj, name);
+ }
+ }
+
+ if (!is_inline) {
+ vm_offset_t data = * (vm_offset_t *) eaddr;
+
+ /* destroy memory carried in the message */
+
+ if (length == 0)
+ assert(data == 0);
+ else if (is_port)
+ kfree(data, length);
+ else
+ vm_map_copy_discard((vm_map_copy_t) data);
+ }
+ }
+}
+
+/*
+ * Routine: ipc_kmsg_free
+ * Purpose:
+ * Free a kernel message buffer.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_kmsg_free(kmsg)
+ ipc_kmsg_t kmsg;
+{
+ vm_size_t size = kmsg->ikm_size;
+
+ switch (size) {
+#if NORMA_IPC
+ case IKM_SIZE_NORMA:
+ /* return it to the norma ipc code */
+ norma_kmsg_put(kmsg);
+ break;
+#endif NORMA_IPC
+
+ case IKM_SIZE_NETWORK:
+ /* return it to the network code */
+ net_kmsg_put(kmsg);
+ break;
+
+ default:
+ kfree((vm_offset_t) kmsg, size);
+ break;
+ }
+}
+
+/*
+ * Routine: ipc_kmsg_get
+ * Purpose:
+ * Allocates a kernel message buffer.
+ * Copies a user message to the message buffer.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Acquired a message buffer.
+ * MACH_SEND_MSG_TOO_SMALL Message smaller than a header.
+ * MACH_SEND_MSG_TOO_SMALL Message size not long-word multiple.
+ * MACH_SEND_NO_BUFFER Couldn't allocate a message buffer.
+ * MACH_SEND_INVALID_DATA Couldn't copy message data.
+ */
+
+mach_msg_return_t
+ipc_kmsg_get(msg, size, kmsgp)
+ mach_msg_header_t *msg;
+ mach_msg_size_t size;
+ ipc_kmsg_t *kmsgp;
+{
+ ipc_kmsg_t kmsg;
+
+ if ((size < sizeof(mach_msg_header_t)) || (size & 3))
+ return MACH_SEND_MSG_TOO_SMALL;
+
+ if (size <= IKM_SAVED_MSG_SIZE) {
+ kmsg = ikm_cache();
+ if (kmsg != IKM_NULL) {
+ ikm_cache() = IKM_NULL;
+ ikm_check_initialized(kmsg, IKM_SAVED_KMSG_SIZE);
+ } else {
+ kmsg = ikm_alloc(IKM_SAVED_MSG_SIZE);
+ if (kmsg == IKM_NULL)
+ return MACH_SEND_NO_BUFFER;
+ ikm_init(kmsg, IKM_SAVED_MSG_SIZE);
+ }
+ } else {
+ kmsg = ikm_alloc(size);
+ if (kmsg == IKM_NULL)
+ return MACH_SEND_NO_BUFFER;
+ ikm_init(kmsg, size);
+ }
+
+ if (copyinmsg((char *) msg, (char *) &kmsg->ikm_header, size)) {
+ ikm_free(kmsg);
+ return MACH_SEND_INVALID_DATA;
+ }
+
+ kmsg->ikm_header.msgh_size = size;
+ *kmsgp = kmsg;
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: ipc_kmsg_get_from_kernel
+ * Purpose:
+ * Allocates a kernel message buffer.
+ * Copies a kernel message to the message buffer.
+ * Only resource errors are allowed.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Acquired a message buffer.
+ * MACH_SEND_NO_BUFFER Couldn't allocate a message buffer.
+ */
+
+extern mach_msg_return_t
+ipc_kmsg_get_from_kernel(msg, size, kmsgp)
+ mach_msg_header_t *msg;
+ mach_msg_size_t size;
+ ipc_kmsg_t *kmsgp;
+{
+ ipc_kmsg_t kmsg;
+
+ assert(size >= sizeof(mach_msg_header_t));
+ assert((size & 3) == 0);
+
+ kmsg = ikm_alloc(size);
+ if (kmsg == IKM_NULL)
+ return MACH_SEND_NO_BUFFER;
+ ikm_init(kmsg, size);
+
+ bcopy((char *) msg, (char *) &kmsg->ikm_header, size);
+
+ kmsg->ikm_header.msgh_size = size;
+ *kmsgp = kmsg;
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: ipc_kmsg_put
+ * Purpose:
+ * Copies a message buffer to a user message.
+ * Copies only the specified number of bytes.
+ * Frees the message buffer.
+ * Conditions:
+ * Nothing locked. The message buffer must have clean
+ * header (ikm_marequest) fields.
+ * Returns:
+ * MACH_MSG_SUCCESS Copied data out of message buffer.
+ * MACH_RCV_INVALID_DATA Couldn't copy to user message.
+ */
+
+mach_msg_return_t
+ipc_kmsg_put(msg, kmsg, size)
+ mach_msg_header_t *msg;
+ ipc_kmsg_t kmsg;
+ mach_msg_size_t size;
+{
+ mach_msg_return_t mr;
+
+ ikm_check_initialized(kmsg, kmsg->ikm_size);
+
+ if (copyoutmsg((char *) &kmsg->ikm_header, (char *) msg, size))
+ mr = MACH_RCV_INVALID_DATA;
+ else
+ mr = MACH_MSG_SUCCESS;
+
+ if ((kmsg->ikm_size == IKM_SAVED_KMSG_SIZE) &&
+ (ikm_cache() == IKM_NULL))
+ ikm_cache() = kmsg;
+ else
+ ikm_free(kmsg);
+
+ return mr;
+}
+
+/*
+ * Routine: ipc_kmsg_put_to_kernel
+ * Purpose:
+ * Copies a message buffer to a kernel message.
+ * Frees the message buffer.
+ * No errors allowed.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_kmsg_put_to_kernel(
+ mach_msg_header_t *msg,
+ ipc_kmsg_t kmsg,
+ mach_msg_size_t size)
+{
+#if DIPC
+ assert(!KMSG_IN_DIPC(kmsg));
+#endif /* DIPC */
+
+ (void) memcpy((void *) msg, (const void *) &kmsg->ikm_header, size);
+
+ ikm_free(kmsg);
+}
+
+/*
+ * Routine: ipc_kmsg_copyin_header
+ * Purpose:
+ * "Copy-in" port rights in the header of a message.
+ * Operates atomically; if it doesn't succeed the
+ * message header and the space are left untouched.
+ * If it does succeed the remote/local port fields
+ * contain object pointers instead of port names,
+ * and the bits field is updated. The destination port
+ * will be a valid port pointer.
+ *
+ * The notify argument implements the MACH_SEND_CANCEL option.
+ * If it is not MACH_PORT_NULL, it should name a receive right.
+ * If the processing of the destination port would generate
+ * a port-deleted notification (because the right for the
+ * destination port is destroyed and it had a request for
+ * a dead-name notification registered), and the port-deleted
+ * notification would be sent to the named receive right,
+ * then it isn't sent and the send-once right for the notify
+ * port is quietly destroyed.
+ *
+ * [MACH_IPC_COMPAT] There is an atomicity problem if the
+ * reply port is a compat entry and dies at an inopportune
+ * time. This doesn't have any serious consequences
+ * (an observant user task might conceivably notice that
+ * the destination and reply ports were handled inconsistently),
+ * only happens in compat mode, and is extremely unlikely.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Successful copyin.
+ * MACH_SEND_INVALID_HEADER
+ * Illegal value in the message header bits.
+ * MACH_SEND_INVALID_DEST The space is dead.
+ * MACH_SEND_INVALID_NOTIFY
+ * Notify is non-null and doesn't name a receive right.
+ * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
+ * MACH_SEND_INVALID_DEST Can't copyin destination port.
+ * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
+ * MACH_SEND_INVALID_REPLY Can't copyin reply port.
+ * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyin_header(msg, space, notify)
+ mach_msg_header_t *msg;
+ ipc_space_t space;
+ mach_port_t notify;
+{
+ mach_msg_bits_t mbits = msg->msgh_bits &~ MACH_MSGH_BITS_CIRCULAR;
+ mach_port_t dest_name = msg->msgh_remote_port;
+ mach_port_t reply_name = msg->msgh_local_port;
+ kern_return_t kr;
+
+#ifndef MIGRATING_THREADS
+ /* first check for common cases */
+
+ if (notify == MACH_PORT_NULL) switch (MACH_MSGH_BITS_PORTS(mbits)) {
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0): {
+ ipc_entry_t entry;
+ ipc_entry_bits_t bits;
+ ipc_port_t dest_port;
+
+ /* sending an asynchronous message */
+
+ if (reply_name != MACH_PORT_NULL)
+ break;
+
+ is_read_lock(space);
+ if (!space->is_active)
+ goto abort_async;
+
+ /* optimized ipc_entry_lookup */
+
+ {
+ mach_port_index_t index = MACH_PORT_INDEX(dest_name);
+ mach_port_gen_t gen = MACH_PORT_GEN(dest_name);
+
+ if (index >= space->is_table_size)
+ goto abort_async;
+
+ entry = &space->is_table[index];
+ bits = entry->ie_bits;
+
+ /* check generation number and type bit */
+
+ if ((bits & (IE_BITS_GEN_MASK|MACH_PORT_TYPE_SEND)) !=
+ (gen | MACH_PORT_TYPE_SEND))
+ goto abort_async;
+ }
+
+ /* optimized ipc_right_copyin */
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ dest_port = (ipc_port_t) entry->ie_object;
+ assert(dest_port != IP_NULL);
+
+ ip_lock(dest_port);
+ /* can unlock space now without compromising atomicity */
+ is_read_unlock(space);
+
+ if (!ip_active(dest_port)) {
+ ip_unlock(dest_port);
+ break;
+ }
+
+ assert(dest_port->ip_srights > 0);
+ dest_port->ip_srights++;
+ ip_reference(dest_port);
+ ip_unlock(dest_port);
+
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0));
+ msg->msgh_remote_port = (mach_port_t) dest_port;
+ return MACH_MSG_SUCCESS;
+
+ abort_async:
+ is_read_unlock(space);
+ break;
+ }
+
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,
+ MACH_MSG_TYPE_MAKE_SEND_ONCE): {
+ ipc_entry_num_t size;
+ ipc_entry_t table;
+ ipc_entry_t entry;
+ ipc_entry_bits_t bits;
+ ipc_port_t dest_port, reply_port;
+
+ /* sending a request message */
+
+ is_read_lock(space);
+ if (!space->is_active)
+ goto abort_request;
+
+ size = space->is_table_size;
+ table = space->is_table;
+
+ /* optimized ipc_entry_lookup of dest_name */
+
+ {
+ mach_port_index_t index = MACH_PORT_INDEX(dest_name);
+ mach_port_gen_t gen = MACH_PORT_GEN(dest_name);
+
+ if (index >= size)
+ goto abort_request;
+
+ entry = &table[index];
+ bits = entry->ie_bits;
+
+ /* check generation number and type bit */
+
+ if ((bits & (IE_BITS_GEN_MASK|MACH_PORT_TYPE_SEND)) !=
+ (gen | MACH_PORT_TYPE_SEND))
+ goto abort_request;
+ }
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ dest_port = (ipc_port_t) entry->ie_object;
+ assert(dest_port != IP_NULL);
+
+ /* optimized ipc_entry_lookup of reply_name */
+
+ {
+ mach_port_index_t index = MACH_PORT_INDEX(reply_name);
+ mach_port_gen_t gen = MACH_PORT_GEN(reply_name);
+
+ if (index >= size)
+ goto abort_request;
+
+ entry = &table[index];
+ bits = entry->ie_bits;
+
+ /* check generation number and type bit */
+
+ if ((bits & (IE_BITS_GEN_MASK|MACH_PORT_TYPE_RECEIVE)) !=
+ (gen | MACH_PORT_TYPE_RECEIVE))
+ goto abort_request;
+ }
+
+ reply_port = (ipc_port_t) entry->ie_object;
+ assert(reply_port != IP_NULL);
+
+ /*
+ * To do an atomic copyin, need simultaneous
+ * locks on both ports and the space. If
+ * dest_port == reply_port, and simple locking is
+ * enabled, then we will abort. Otherwise it's
+ * OK to unlock twice.
+ */
+
+ ip_lock(dest_port);
+ if (!ip_active(dest_port) || !ip_lock_try(reply_port)) {
+ ip_unlock(dest_port);
+ goto abort_request;
+ }
+ /* can unlock space now without compromising atomicity */
+ is_read_unlock(space);
+
+ assert(dest_port->ip_srights > 0);
+ dest_port->ip_srights++;
+ ip_reference(dest_port);
+ ip_unlock(dest_port);
+
+ assert(ip_active(reply_port));
+ assert(reply_port->ip_receiver_name == reply_name);
+ assert(reply_port->ip_receiver == space);
+
+ reply_port->ip_sorights++;
+ ip_reference(reply_port);
+ ip_unlock(reply_port);
+
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
+ MACH_MSG_TYPE_PORT_SEND_ONCE));
+ msg->msgh_remote_port = (mach_port_t) dest_port;
+ msg->msgh_local_port = (mach_port_t) reply_port;
+ return MACH_MSG_SUCCESS;
+
+ abort_request:
+ is_read_unlock(space);
+ break;
+ }
+
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0): {
+ mach_port_index_t index;
+ mach_port_gen_t gen;
+ ipc_entry_t table;
+ ipc_entry_t entry;
+ ipc_entry_bits_t bits;
+ ipc_port_t dest_port;
+
+ /* sending a reply message */
+
+ if (reply_name != MACH_PORT_NULL)
+ break;
+
+ is_write_lock(space);
+ if (!space->is_active)
+ goto abort_reply;
+
+ /* optimized ipc_entry_lookup */
+
+ table = space->is_table;
+
+ index = MACH_PORT_INDEX(dest_name);
+ gen = MACH_PORT_GEN(dest_name);
+
+ if (index >= space->is_table_size)
+ goto abort_reply;
+
+ entry = &table[index];
+ bits = entry->ie_bits;
+
+ /* check generation number, collision bit, and type bit */
+
+ if ((bits & (IE_BITS_GEN_MASK|IE_BITS_COLLISION|
+ MACH_PORT_TYPE_SEND_ONCE)) !=
+ (gen | MACH_PORT_TYPE_SEND_ONCE))
+ goto abort_reply;
+
+ /* optimized ipc_right_copyin */
+
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
+ assert(IE_BITS_UREFS(bits) == 1);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+
+ if (entry->ie_request != 0)
+ goto abort_reply;
+
+ dest_port = (ipc_port_t) entry->ie_object;
+ assert(dest_port != IP_NULL);
+
+ ip_lock(dest_port);
+ if (!ip_active(dest_port)) {
+ ip_unlock(dest_port);
+ goto abort_reply;
+ }
+
+ assert(dest_port->ip_sorights > 0);
+ ip_unlock(dest_port);
+
+ /* optimized ipc_entry_dealloc */
+
+ entry->ie_next = table->ie_next;
+ table->ie_next = index;
+ entry->ie_bits = gen;
+ entry->ie_object = IO_NULL;
+ is_write_unlock(space);
+
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
+ 0));
+ msg->msgh_remote_port = (mach_port_t) dest_port;
+ return MACH_MSG_SUCCESS;
+
+ abort_reply:
+ is_write_unlock(space);
+ break;
+ }
+
+ default:
+ /* don't bother optimizing */
+ break;
+ }
+#endif /* MIGRATING_THREADS */
+
+ {
+ mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
+ mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
+ ipc_object_t dest_port, reply_port;
+ ipc_port_t dest_soright, reply_soright;
+ ipc_port_t notify_port = 0; /* '=0' to quiet gcc warnings */
+
+ if (!MACH_MSG_TYPE_PORT_ANY_SEND(dest_type))
+ return MACH_SEND_INVALID_HEADER;
+
+ if ((reply_type == 0) ?
+ (reply_name != MACH_PORT_NULL) :
+ !MACH_MSG_TYPE_PORT_ANY_SEND(reply_type))
+ return MACH_SEND_INVALID_HEADER;
+
+ is_write_lock(space);
+ if (!space->is_active)
+ goto invalid_dest;
+
+ if (notify != MACH_PORT_NULL) {
+ ipc_entry_t entry;
+
+ if (((entry = ipc_entry_lookup(space, notify)) == IE_NULL) ||
+ ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0)) {
+ is_write_unlock(space);
+ return MACH_SEND_INVALID_NOTIFY;
+ }
+
+ notify_port = (ipc_port_t) entry->ie_object;
+ }
+
+ if (dest_name == reply_name) {
+ ipc_entry_t entry;
+ mach_port_t name = dest_name;
+
+ /*
+ * Destination and reply ports are the same!
+ * This is a little tedious to make atomic, because
+ * there are 25 combinations of dest_type/reply_type.
+ * However, most are easy. If either is move-sonce,
+ * then there must be an error. If either are
+ * make-send or make-sonce, then we must be looking
+ * at a receive right so the port can't die.
+ * The hard cases are the combinations of
+ * copy-send and make-send.
+ */
+
+ entry = ipc_entry_lookup(space, name);
+ if (entry == IE_NULL)
+ goto invalid_dest;
+
+ assert(reply_type != 0); /* because name not null */
+
+ if (!ipc_right_copyin_check(space, name, entry, reply_type))
+ goto invalid_reply;
+
+ if ((dest_type == MACH_MSG_TYPE_MOVE_SEND_ONCE) ||
+ (reply_type == MACH_MSG_TYPE_MOVE_SEND_ONCE)) {
+ /*
+ * Why must there be an error? To get a valid
+ * destination, this entry must name a live
+ * port (not a dead name or dead port). However
+ * a successful move-sonce will destroy a
+ * live entry. Therefore the other copyin,
+ * whatever it is, would fail. We've already
+ * checked for reply port errors above,
+ * so report a destination error.
+ */
+
+ goto invalid_dest;
+ } else if ((dest_type == MACH_MSG_TYPE_MAKE_SEND) ||
+ (dest_type == MACH_MSG_TYPE_MAKE_SEND_ONCE) ||
+ (reply_type == MACH_MSG_TYPE_MAKE_SEND) ||
+ (reply_type == MACH_MSG_TYPE_MAKE_SEND_ONCE)) {
+ kr = ipc_right_copyin(space, name, entry,
+ dest_type, FALSE,
+ &dest_port, &dest_soright);
+ if (kr != KERN_SUCCESS)
+ goto invalid_dest;
+
+ /*
+ * Either dest or reply needs a receive right.
+ * We know the receive right is there, because
+ * of the copyin_check and copyin calls. Hence
+ * the port is not in danger of dying. If dest
+ * used the receive right, then the right needed
+ * by reply (and verified by copyin_check) will
+ * still be there.
+ */
+
+ assert(IO_VALID(dest_port));
+ assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
+ assert(dest_soright == IP_NULL);
+
+ kr = ipc_right_copyin(space, name, entry,
+ reply_type, TRUE,
+ &reply_port, &reply_soright);
+
+ assert(kr == KERN_SUCCESS);
+ assert(reply_port == dest_port);
+ assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
+ assert(reply_soright == IP_NULL);
+ } else if ((dest_type == MACH_MSG_TYPE_COPY_SEND) &&
+ (reply_type == MACH_MSG_TYPE_COPY_SEND)) {
+ /*
+ * To make this atomic, just do one copy-send,
+ * and dup the send right we get out.
+ */
+
+ kr = ipc_right_copyin(space, name, entry,
+ dest_type, FALSE,
+ &dest_port, &dest_soright);
+ if (kr != KERN_SUCCESS)
+ goto invalid_dest;
+
+ assert(entry->ie_bits & MACH_PORT_TYPE_SEND);
+ assert(dest_soright == IP_NULL);
+
+ /*
+ * It's OK if the port we got is dead now,
+ * so reply_port is IP_DEAD, because the msg
+ * won't go anywhere anyway.
+ */
+
+ reply_port = (ipc_object_t)
+ ipc_port_copy_send((ipc_port_t) dest_port);
+ reply_soright = IP_NULL;
+ } else if ((dest_type == MACH_MSG_TYPE_MOVE_SEND) &&
+ (reply_type == MACH_MSG_TYPE_MOVE_SEND)) {
+ /*
+ * This is an easy case. Just use our
+ * handy-dandy special-purpose copyin call
+ * to get two send rights for the price of one.
+ */
+
+ kr = ipc_right_copyin_two(space, name, entry,
+ &dest_port, &dest_soright);
+ if (kr != KERN_SUCCESS)
+ goto invalid_dest;
+
+ /* the entry might need to be deallocated */
+
+ if (IE_BITS_TYPE(entry->ie_bits)
+ == MACH_PORT_TYPE_NONE)
+ ipc_entry_dealloc(space, name, entry);
+
+ reply_port = dest_port;
+ reply_soright = IP_NULL;
+ } else {
+ ipc_port_t soright;
+
+ assert(((dest_type == MACH_MSG_TYPE_COPY_SEND) &&
+ (reply_type == MACH_MSG_TYPE_MOVE_SEND)) ||
+ ((dest_type == MACH_MSG_TYPE_MOVE_SEND) &&
+ (reply_type == MACH_MSG_TYPE_COPY_SEND)));
+
+ /*
+ * To make this atomic, just do a move-send,
+ * and dup the send right we get out.
+ */
+
+ kr = ipc_right_copyin(space, name, entry,
+ MACH_MSG_TYPE_MOVE_SEND, FALSE,
+ &dest_port, &soright);
+ if (kr != KERN_SUCCESS)
+ goto invalid_dest;
+
+ /* the entry might need to be deallocated */
+
+ if (IE_BITS_TYPE(entry->ie_bits)
+ == MACH_PORT_TYPE_NONE)
+ ipc_entry_dealloc(space, name, entry);
+
+ /*
+ * It's OK if the port we got is dead now,
+ * so reply_port is IP_DEAD, because the msg
+ * won't go anywhere anyway.
+ */
+
+ reply_port = (ipc_object_t)
+ ipc_port_copy_send((ipc_port_t) dest_port);
+
+ if (dest_type == MACH_MSG_TYPE_MOVE_SEND) {
+ dest_soright = soright;
+ reply_soright = IP_NULL;
+ } else {
+ dest_soright = IP_NULL;
+ reply_soright = soright;
+ }
+ }
+ } else if (!MACH_PORT_VALID(reply_name)) {
+ ipc_entry_t entry;
+
+ /*
+ * No reply port! This is an easy case
+ * to make atomic. Just copyin the destination.
+ */
+
+ entry = ipc_entry_lookup(space, dest_name);
+ if (entry == IE_NULL)
+ goto invalid_dest;
+
+ kr = ipc_right_copyin(space, dest_name, entry,
+ dest_type, FALSE,
+ &dest_port, &dest_soright);
+ if (kr != KERN_SUCCESS)
+ goto invalid_dest;
+
+ /* the entry might need to be deallocated */
+
+ if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
+ ipc_entry_dealloc(space, dest_name, entry);
+
+ reply_port = (ipc_object_t) reply_name;
+ reply_soright = IP_NULL;
+ } else {
+ ipc_entry_t dest_entry, reply_entry;
+ ipc_port_t saved_reply;
+
+ /*
+ * This is the tough case to make atomic.
+ * The difficult problem is serializing with port death.
+ * At the time we copyin dest_port, it must be alive.
+ * If reply_port is alive when we copyin it, then
+ * we are OK, because we serialize before the death
+ * of both ports. Assume reply_port is dead at copyin.
+ * Then if dest_port dies/died after reply_port died,
+ * we are OK, because we serialize between the death
+ * of the two ports. So the bad case is when dest_port
+ * dies after its copyin, reply_port dies before its
+ * copyin, and dest_port dies before reply_port. Then
+ * the copyins operated as if dest_port was alive
+ * and reply_port was dead, which shouldn't have happened
+ * because they died in the other order.
+ *
+ * We handle the bad case by undoing the copyins
+ * (which is only possible because the ports are dead)
+ * and failing with MACH_SEND_INVALID_DEST, serializing
+ * after the death of the ports.
+ *
+ * Note that it is easy for a user task to tell if
+ * a copyin happened before or after a port died.
+ * For example, suppose both dest and reply are
+ * send-once rights (types are both move-sonce) and
+ * both rights have dead-name requests registered.
+ * If a port dies before copyin, a dead-name notification
+ * is generated and the dead name's urefs are incremented,
+ * and if the copyin happens first, a port-deleted
+ * notification is generated.
+ *
+ * Note that although the entries are different,
+ * dest_port and reply_port might still be the same.
+ */
+
+ dest_entry = ipc_entry_lookup(space, dest_name);
+ if (dest_entry == IE_NULL)
+ goto invalid_dest;
+
+ reply_entry = ipc_entry_lookup(space, reply_name);
+ if (reply_entry == IE_NULL)
+ goto invalid_reply;
+
+ assert(dest_entry != reply_entry); /* names are not equal */
+ assert(reply_type != 0); /* because reply_name not null */
+
+ if (!ipc_right_copyin_check(space, reply_name, reply_entry,
+ reply_type))
+ goto invalid_reply;
+
+ kr = ipc_right_copyin(space, dest_name, dest_entry,
+ dest_type, FALSE,
+ &dest_port, &dest_soright);
+ if (kr != KERN_SUCCESS)
+ goto invalid_dest;
+
+ assert(IO_VALID(dest_port));
+
+ saved_reply = (ipc_port_t) reply_entry->ie_object;
+ /* might be IP_NULL, if this is a dead name */
+ if (saved_reply != IP_NULL)
+ ipc_port_reference(saved_reply);
+
+ kr = ipc_right_copyin(space, reply_name, reply_entry,
+ reply_type, TRUE,
+ &reply_port, &reply_soright);
+#if MACH_IPC_COMPAT
+ if (kr != KERN_SUCCESS) {
+ assert(kr == KERN_INVALID_NAME);
+
+ /*
+ * Oops. This must have been a compat entry
+ * and the port died after the check above.
+ * We should back out the copyin of dest_port,
+ * and report MACH_SEND_INVALID_REPLY, but
+ * if dest_port is alive we can't always do that.
+ * Punt and pretend we got IO_DEAD, skipping
+ * further hairy atomicity problems.
+ */
+
+ reply_port = IO_DEAD;
+ reply_soright = IP_NULL;
+ goto skip_reply_checks;
+ }
+#else MACH_IPC_COMPAT
+ assert(kr == KERN_SUCCESS);
+#endif MACH_IPC_COMPAT
+
+ if ((saved_reply != IP_NULL) && (reply_port == IO_DEAD)) {
+ ipc_port_t dest = (ipc_port_t) dest_port;
+ ipc_port_timestamp_t timestamp;
+ boolean_t must_undo;
+
+ /*
+ * The reply port died before copyin.
+ * Check if dest port died before reply.
+ */
+
+ ip_lock(saved_reply);
+ assert(!ip_active(saved_reply));
+ timestamp = saved_reply->ip_timestamp;
+ ip_unlock(saved_reply);
+
+ ip_lock(dest);
+ must_undo = (!ip_active(dest) &&
+ IP_TIMESTAMP_ORDER(dest->ip_timestamp,
+ timestamp));
+ ip_unlock(dest);
+
+ if (must_undo) {
+ /*
+ * Our worst nightmares are realized.
+ * Both destination and reply ports
+ * are dead, but in the wrong order,
+ * so we must undo the copyins and
+ * possibly generate a dead-name notif.
+ */
+
+ ipc_right_copyin_undo(
+ space, dest_name, dest_entry,
+ dest_type, dest_port,
+ dest_soright);
+ /* dest_entry may be deallocated now */
+
+ ipc_right_copyin_undo(
+ space, reply_name, reply_entry,
+ reply_type, reply_port,
+ reply_soright);
+ /* reply_entry may be deallocated now */
+
+ is_write_unlock(space);
+
+ if (dest_soright != IP_NULL)
+ ipc_notify_dead_name(dest_soright,
+ dest_name);
+ assert(reply_soright == IP_NULL);
+
+ ipc_port_release(saved_reply);
+ return MACH_SEND_INVALID_DEST;
+ }
+ }
+
+ /* the entries might need to be deallocated */
+
+ if (IE_BITS_TYPE(reply_entry->ie_bits) == MACH_PORT_TYPE_NONE)
+ ipc_entry_dealloc(space, reply_name, reply_entry);
+
+#if MACH_IPC_COMPAT
+ skip_reply_checks:
+ /*
+ * We jump here if the reply entry was a compat entry
+ * and the port died on us. In this case, the copyin
+ * code already deallocated reply_entry.
+ */
+#endif MACH_IPC_COMPAT
+
+ if (IE_BITS_TYPE(dest_entry->ie_bits) == MACH_PORT_TYPE_NONE)
+ ipc_entry_dealloc(space, dest_name, dest_entry);
+
+ if (saved_reply != IP_NULL)
+ ipc_port_release(saved_reply);
+ }
+
+ /*
+ * At this point, dest_port, reply_port,
+ * dest_soright, reply_soright are all initialized.
+ * Any defunct entries have been deallocated.
+ * The space is still write-locked, and we need to
+ * make the MACH_SEND_CANCEL check. The notify_port pointer
+ * is still usable, because the copyin code above won't ever
+ * deallocate a receive right, so its entry still exists
+ * and holds a ref. Note notify_port might even equal
+ * dest_port or reply_port.
+ */
+
+ if ((notify != MACH_PORT_NULL) &&
+ (dest_soright == notify_port)) {
+ ipc_port_release_sonce(dest_soright);
+ dest_soright = IP_NULL;
+ }
+
+ is_write_unlock(space);
+
+ if (dest_soright != IP_NULL)
+ ipc_notify_port_deleted(dest_soright, dest_name);
+
+ if (reply_soright != IP_NULL)
+ ipc_notify_port_deleted(reply_soright, reply_name);
+
+ dest_type = ipc_object_copyin_type(dest_type);
+ reply_type = ipc_object_copyin_type(reply_type);
+
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(dest_type, reply_type));
+ msg->msgh_remote_port = (mach_port_t) dest_port;
+ msg->msgh_local_port = (mach_port_t) reply_port;
+ }
+
+ return MACH_MSG_SUCCESS;
+
+ invalid_dest:
+ is_write_unlock(space);
+ return MACH_SEND_INVALID_DEST;
+
+ invalid_reply:
+ is_write_unlock(space);
+ return MACH_SEND_INVALID_REPLY;
+}
+
+mach_msg_return_t
+ipc_kmsg_copyin_body(kmsg, space, map)
+ ipc_kmsg_t kmsg;
+ ipc_space_t space;
+ vm_map_t map;
+{
+ ipc_object_t dest;
+ vm_offset_t saddr, eaddr;
+ boolean_t complex;
+ mach_msg_return_t mr;
+ boolean_t use_page_lists, steal_pages;
+
+ dest = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
+ complex = FALSE;
+ use_page_lists = ipc_kobject_vm_page_list(ip_kotype((ipc_port_t)dest));
+ steal_pages = ipc_kobject_vm_page_steal(ip_kotype((ipc_port_t)dest));
+
+#if NORMA_IPC
+ if (IP_NORMA_IS_PROXY((ipc_port_t) dest)) {
+ use_page_lists = TRUE;
+ steal_pages = TRUE;
+ }
+#endif NORMA_IPC
+
+ saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
+ eaddr = (vm_offset_t) &kmsg->ikm_header + kmsg->ikm_header.msgh_size;
+
+ while (saddr < eaddr) {
+ vm_offset_t taddr = saddr;
+ mach_msg_type_long_t *type;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline, longform, dealloc, is_port;
+ vm_offset_t data;
+ vm_size_t length;
+ kern_return_t kr;
+
+ type = (mach_msg_type_long_t *) saddr;
+
+ if (((eaddr - saddr) < sizeof(mach_msg_type_t)) ||
+ ((longform = ((mach_msg_type_t*)type)->msgt_longform) &&
+ ((eaddr - saddr) < sizeof(mach_msg_type_long_t)))) {
+ ipc_kmsg_clean_partial(kmsg, taddr, FALSE, 0);
+ return MACH_SEND_MSG_TOO_SMALL;
+ }
+
+ is_inline = ((mach_msg_type_t*)type)->msgt_inline;
+ dealloc = ((mach_msg_type_t*)type)->msgt_deallocate;
+ if (longform) {
+ /* This must be aligned */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ (is_misaligned(type))) {
+ saddr = ptr_align(saddr);
+ continue;
+ }
+ name = type->msgtl_name;
+ size = type->msgtl_size;
+ number = type->msgtl_number;
+ saddr += sizeof(mach_msg_type_long_t);
+ } else {
+ name = ((mach_msg_type_t*)type)->msgt_name;
+ size = ((mach_msg_type_t*)type)->msgt_size;
+ number = ((mach_msg_type_t*)type)->msgt_number;
+ saddr += sizeof(mach_msg_type_t);
+ }
+
+ is_port = MACH_MSG_TYPE_PORT_ANY(name);
+
+ if ((is_port && (size != PORT_T_SIZE_IN_BITS)) ||
+ (longform && ((type->msgtl_header.msgt_name != 0) ||
+ (type->msgtl_header.msgt_size != 0) ||
+ (type->msgtl_header.msgt_number != 0))) ||
+ (((mach_msg_type_t*)type)->msgt_unused != 0) ||
+ (dealloc && is_inline)) {
+ ipc_kmsg_clean_partial(kmsg, taddr, FALSE, 0);
+ return MACH_SEND_INVALID_TYPE;
+ }
+
+ /* padding (ptrs and ports) ? */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ ((size >> 3) == sizeof(natural_t)))
+ saddr = ptr_align(saddr);
+
+ /* calculate length of data in bytes, rounding up */
+
+ length = ((number * size) + 7) >> 3;
+
+ if (is_inline) {
+ vm_size_t amount;
+
+ /* inline data sizes round up to int boundaries */
+
+ amount = (length + 3) &~ 3;
+ if ((eaddr - saddr) < amount) {
+ ipc_kmsg_clean_partial(kmsg, taddr, FALSE, 0);
+ return MACH_SEND_MSG_TOO_SMALL;
+ }
+
+ data = saddr;
+ saddr += amount;
+ } else {
+ vm_offset_t addr;
+
+ if (sizeof(vm_offset_t) > sizeof(mach_msg_type_t))
+ saddr = ptr_align(saddr);
+
+ if ((eaddr - saddr) < sizeof(vm_offset_t)) {
+ ipc_kmsg_clean_partial(kmsg, taddr, FALSE, 0);
+ return MACH_SEND_MSG_TOO_SMALL;
+ }
+
+ /* grab the out-of-line data */
+
+ addr = * (vm_offset_t *) saddr;
+
+ if (length == 0)
+ data = 0;
+ else if (is_port) {
+ data = kalloc(length);
+ if (data == 0)
+ goto invalid_memory;
+
+ if (copyinmap(map, (char *) addr,
+ (char *) data, length) ||
+ (dealloc &&
+ (vm_deallocate(map, addr, length) !=
+ KERN_SUCCESS))) {
+ kfree(data, length);
+ goto invalid_memory;
+ }
+ } else {
+ vm_map_copy_t copy;
+
+ if (use_page_lists) {
+ kr = vm_map_copyin_page_list(map,
+ addr, length, dealloc,
+ steal_pages, &copy, FALSE);
+ } else {
+ kr = vm_map_copyin(map, addr, length,
+ dealloc, &copy);
+ }
+ if (kr != KERN_SUCCESS) {
+ invalid_memory:
+ ipc_kmsg_clean_partial(kmsg, taddr,
+ FALSE, 0);
+ return MACH_SEND_INVALID_MEMORY;
+ }
+
+ data = (vm_offset_t) copy;
+ }
+
+ * (vm_offset_t *) saddr = data;
+ saddr += sizeof(vm_offset_t);
+ complex = TRUE;
+ }
+
+ if (is_port) {
+ mach_msg_type_name_t newname =
+ ipc_object_copyin_type(name);
+ ipc_object_t *objects = (ipc_object_t *) data;
+ mach_msg_type_number_t i;
+
+ if (longform)
+ type->msgtl_name = newname;
+ else
+ ((mach_msg_type_t*)type)->msgt_name = newname;
+
+ for (i = 0; i < number; i++) {
+ mach_port_t port = (mach_port_t) objects[i];
+ ipc_object_t object;
+
+ if (!MACH_PORT_VALID(port))
+ continue;
+
+ kr = ipc_object_copyin(space, port,
+ name, &object);
+ if (kr != KERN_SUCCESS) {
+ ipc_kmsg_clean_partial(kmsg, taddr,
+ TRUE, i);
+ return MACH_SEND_INVALID_RIGHT;
+ }
+
+ if ((newname == MACH_MSG_TYPE_PORT_RECEIVE) &&
+ ipc_port_check_circularity(
+ (ipc_port_t) object,
+ (ipc_port_t) dest))
+ kmsg->ikm_header.msgh_bits |=
+ MACH_MSGH_BITS_CIRCULAR;
+
+ objects[i] = object;
+ }
+
+ complex = TRUE;
+ }
+ }
+
+ if (!complex)
+ kmsg->ikm_header.msgh_bits &= ~MACH_MSGH_BITS_COMPLEX;
+
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: ipc_kmsg_copyin
+ * Purpose:
+ * "Copy-in" port rights and out-of-line memory
+ * in the message.
+ *
+ * In all failure cases, the message is left holding
+ * no rights or memory. However, the message buffer
+ * is not deallocated. If successful, the message
+ * contains a valid destination port.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Successful copyin.
+ * MACH_SEND_INVALID_HEADER
+ * Illegal value in the message header bits.
+ * MACH_SEND_INVALID_NOTIFY Bad notify port.
+ * MACH_SEND_INVALID_DEST Can't copyin destination port.
+ * MACH_SEND_INVALID_REPLY Can't copyin reply port.
+ * MACH_SEND_INVALID_MEMORY Can't grab out-of-line memory.
+ * MACH_SEND_INVALID_RIGHT Can't copyin port right in body.
+ * MACH_SEND_INVALID_TYPE Bad type specification.
+ * MACH_SEND_MSG_TOO_SMALL Body is too small for types/data.
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyin(kmsg, space, map, notify)
+ ipc_kmsg_t kmsg;
+ ipc_space_t space;
+ vm_map_t map;
+ mach_port_t notify;
+{
+ mach_msg_return_t mr;
+
+ mr = ipc_kmsg_copyin_header(&kmsg->ikm_header, space, notify);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+
+ if ((kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_COMPLEX) == 0)
+ return MACH_MSG_SUCCESS;
+
+ return ipc_kmsg_copyin_body(kmsg, space, map);
+}
+
+/*
+ * Routine: ipc_kmsg_copyin_from_kernel
+ * Purpose:
+ * "Copy-in" port rights and out-of-line memory
+ * in a message sent from the kernel.
+ *
+ * Because the message comes from the kernel,
+ * the implementation assumes there are no errors
+ * or peculiarities in the message.
+ *
+ * Returns TRUE if queueing the message
+ * would result in a circularity.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_kmsg_copyin_from_kernel(
+ ipc_kmsg_t kmsg)
+{
+ mach_msg_bits_t bits = kmsg->ikm_header.msgh_bits;
+ mach_msg_type_name_t rname = MACH_MSGH_BITS_REMOTE(bits);
+ mach_msg_type_name_t lname = MACH_MSGH_BITS_LOCAL(bits);
+ ipc_object_t remote = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
+ ipc_object_t local = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
+ vm_offset_t saddr, eaddr;
+
+ /* translate the destination and reply ports */
+
+ ipc_object_copyin_from_kernel(remote, rname);
+ if (IO_VALID(local))
+ ipc_object_copyin_from_kernel(local, lname);
+
+ /*
+ * The common case is a complex message with no reply port,
+ * because that is what the memory_object interface uses.
+ */
+
+ if (bits == (MACH_MSGH_BITS_COMPLEX |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0))) {
+ bits = (MACH_MSGH_BITS_COMPLEX |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0));
+
+ kmsg->ikm_header.msgh_bits = bits;
+ } else {
+ bits = (MACH_MSGH_BITS_OTHER(bits) |
+ MACH_MSGH_BITS(ipc_object_copyin_type(rname),
+ ipc_object_copyin_type(lname)));
+
+ kmsg->ikm_header.msgh_bits = bits;
+ if ((bits & MACH_MSGH_BITS_COMPLEX) == 0)
+ return;
+ }
+
+ saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
+ eaddr = (vm_offset_t) &kmsg->ikm_header + kmsg->ikm_header.msgh_size;
+
+ while (saddr < eaddr) {
+ mach_msg_type_long_t *type;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline, longform, is_port;
+ vm_offset_t data;
+ vm_size_t length;
+
+ type = (mach_msg_type_long_t *) saddr;
+ is_inline = ((mach_msg_type_t*)type)->msgt_inline;
+ longform = ((mach_msg_type_t*)type)->msgt_longform;
+ /* type->msgtl_header.msgt_deallocate not used */
+ if (longform) {
+ /* This must be aligned */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ (is_misaligned(type))) {
+ saddr = ptr_align(saddr);
+ continue;
+ }
+ name = type->msgtl_name;
+ size = type->msgtl_size;
+ number = type->msgtl_number;
+ saddr += sizeof(mach_msg_type_long_t);
+ } else {
+ name = ((mach_msg_type_t*)type)->msgt_name;
+ size = ((mach_msg_type_t*)type)->msgt_size;
+ number = ((mach_msg_type_t*)type)->msgt_number;
+ saddr += sizeof(mach_msg_type_t);
+ }
+
+ /* padding (ptrs and ports) ? */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ ((size >> 3) == sizeof(natural_t)))
+ saddr = ptr_align(saddr);
+
+ /* calculate length of data in bytes, rounding up */
+
+ length = ((number * size) + 7) >> 3;
+
+ is_port = MACH_MSG_TYPE_PORT_ANY(name);
+
+ if (is_inline) {
+ /* inline data sizes round up to int boundaries */
+
+ data = saddr;
+ saddr += (length + 3) &~ 3;
+ } else {
+ /*
+ * The sender should supply ready-made memory
+ * for us, so we don't need to do anything.
+ */
+
+ data = * (vm_offset_t *) saddr;
+ saddr += sizeof(vm_offset_t);
+ }
+
+ if (is_port) {
+ mach_msg_type_name_t newname =
+ ipc_object_copyin_type(name);
+ ipc_object_t *objects = (ipc_object_t *) data;
+ mach_msg_type_number_t i;
+
+ if (longform)
+ type->msgtl_name = newname;
+ else
+ ((mach_msg_type_t*)type)->msgt_name = newname;
+ for (i = 0; i < number; i++) {
+ ipc_object_t object = objects[i];
+
+ if (!IO_VALID(object))
+ continue;
+
+ ipc_object_copyin_from_kernel(object, name);
+
+ if ((newname == MACH_MSG_TYPE_PORT_RECEIVE) &&
+ ipc_port_check_circularity(
+ (ipc_port_t) object,
+ (ipc_port_t) remote))
+ kmsg->ikm_header.msgh_bits |=
+ MACH_MSGH_BITS_CIRCULAR;
+ }
+ }
+ }
+}
+
+/*
+ * Routine: ipc_kmsg_copyout_header
+ * Purpose:
+ * "Copy-out" port rights in the header of a message.
+ * Operates atomically; if it doesn't succeed the
+ * message header and the space are left untouched.
+ * If it does succeed the remote/local port fields
+ * contain port names instead of object pointers,
+ * and the bits field is updated.
+ *
+ * The notify argument implements the MACH_RCV_NOTIFY option.
+ * If it is not MACH_PORT_NULL, it should name a receive right.
+ * If the process of receiving the reply port creates a
+ * new right in the receiving task, then the new right is
+ * automatically registered for a dead-name notification,
+ * with the notify port supplying the send-once right.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Copied out port rights.
+ * MACH_RCV_INVALID_NOTIFY
+ * Notify is non-null and doesn't name a receive right.
+ * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
+ * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_SPACE
+ * The space is dead.
+ * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_SPACE
+ * No room in space for another name.
+ * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_KERNEL
+ * Couldn't allocate memory for the reply port.
+ * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_KERNEL
+ * Couldn't allocate memory for the dead-name request.
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyout_header(msg, space, notify)
+ mach_msg_header_t *msg;
+ ipc_space_t space;
+ mach_port_t notify;
+{
+ mach_msg_bits_t mbits = msg->msgh_bits;
+ ipc_port_t dest = (ipc_port_t) msg->msgh_remote_port;
+
+ assert(IP_VALID(dest));
+
+#ifndef MIGRATING_THREADS
+ /* first check for common cases */
+
+ if (notify == MACH_PORT_NULL) switch (MACH_MSGH_BITS_PORTS(mbits)) {
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0): {
+ mach_port_t dest_name;
+ ipc_port_t nsrequest;
+
+ /* receiving an asynchronous message */
+
+ ip_lock(dest);
+ if (!ip_active(dest)) {
+ ip_unlock(dest);
+ break;
+ }
+
+ /* optimized ipc_object_copyout_dest */
+
+ assert(dest->ip_srights > 0);
+ ip_release(dest);
+
+ if (dest->ip_receiver == space)
+ dest_name = dest->ip_receiver_name;
+ else
+ dest_name = MACH_PORT_NULL;
+
+ if ((--dest->ip_srights == 0) &&
+ ((nsrequest = dest->ip_nsrequest) != IP_NULL)) {
+ mach_port_mscount_t mscount;
+
+ dest->ip_nsrequest = IP_NULL;
+ mscount = dest->ip_mscount;
+ ip_unlock(dest);
+
+ ipc_notify_no_senders(nsrequest, mscount);
+ } else
+ ip_unlock(dest);
+
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(0, MACH_MSG_TYPE_PORT_SEND));
+ msg->msgh_local_port = dest_name;
+ msg->msgh_remote_port = MACH_PORT_NULL;
+ return MACH_MSG_SUCCESS;
+ }
+
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
+ MACH_MSG_TYPE_PORT_SEND_ONCE): {
+ ipc_entry_t table;
+ mach_port_index_t index;
+ ipc_entry_t entry;
+ ipc_port_t reply = (ipc_port_t) msg->msgh_local_port;
+ mach_port_t dest_name, reply_name;
+ ipc_port_t nsrequest;
+
+ /* receiving a request message */
+
+ if (!IP_VALID(reply))
+ break;
+
+ is_write_lock(space);
+ if (!space->is_active ||
+ ((index = (table = space->is_table)->ie_next) == 0)) {
+ is_write_unlock(space);
+ break;
+ }
+
+ /*
+ * To do an atomic copyout, need simultaneous
+ * locks on both ports and the space. If
+ * dest == reply, and simple locking is
+ * enabled, then we will abort. Otherwise it's
+ * OK to unlock twice.
+ */
+
+ ip_lock(dest);
+ if (!ip_active(dest) || !ip_lock_try(reply)) {
+ ip_unlock(dest);
+ is_write_unlock(space);
+ break;
+ }
+
+ if (!ip_active(reply)) {
+ ip_unlock(reply);
+ ip_unlock(dest);
+ is_write_unlock(space);
+ break;
+ }
+
+ assert(reply->ip_sorights > 0);
+ ip_unlock(reply);
+
+ /* optimized ipc_entry_get */
+
+ entry = &table[index];
+ table->ie_next = entry->ie_next;
+ entry->ie_request = 0;
+
+ {
+ mach_port_gen_t gen;
+
+ assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
+ gen = entry->ie_bits + IE_BITS_GEN_ONE;
+
+ reply_name = MACH_PORT_MAKE(index, gen);
+
+ /* optimized ipc_right_copyout */
+
+ entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1);
+ }
+
+ assert(MACH_PORT_VALID(reply_name));
+ entry->ie_object = (ipc_object_t) reply;
+ is_write_unlock(space);
+
+ /* optimized ipc_object_copyout_dest */
+
+ assert(dest->ip_srights > 0);
+ ip_release(dest);
+
+ if (dest->ip_receiver == space)
+ dest_name = dest->ip_receiver_name;
+ else
+ dest_name = MACH_PORT_NULL;
+
+ if ((--dest->ip_srights == 0) &&
+ ((nsrequest = dest->ip_nsrequest) != IP_NULL)) {
+ mach_port_mscount_t mscount;
+
+ dest->ip_nsrequest = IP_NULL;
+ mscount = dest->ip_mscount;
+ ip_unlock(dest);
+
+ ipc_notify_no_senders(nsrequest, mscount);
+ } else
+ ip_unlock(dest);
+
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
+ MACH_MSG_TYPE_PORT_SEND));
+ msg->msgh_local_port = dest_name;
+ msg->msgh_remote_port = reply_name;
+ return MACH_MSG_SUCCESS;
+ }
+
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
+ mach_port_t dest_name;
+
+ /* receiving a reply message */
+
+ ip_lock(dest);
+ if (!ip_active(dest)) {
+ ip_unlock(dest);
+ break;
+ }
+
+ /* optimized ipc_object_copyout_dest */
+
+ assert(dest->ip_sorights > 0);
+
+ if (dest->ip_receiver == space) {
+ ip_release(dest);
+ dest->ip_sorights--;
+ dest_name = dest->ip_receiver_name;
+ ip_unlock(dest);
+ } else {
+ ip_unlock(dest);
+
+ ipc_notify_send_once(dest);
+ dest_name = MACH_PORT_NULL;
+ }
+
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(0, MACH_MSG_TYPE_PORT_SEND_ONCE));
+ msg->msgh_local_port = dest_name;
+ msg->msgh_remote_port = MACH_PORT_NULL;
+ return MACH_MSG_SUCCESS;
+ }
+
+ default:
+ /* don't bother optimizing */
+ break;
+ }
+#endif /* MIGRATING_THREADS */
+
+ {
+ mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
+ mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
+ ipc_port_t reply = (ipc_port_t) msg->msgh_local_port;
+ mach_port_t dest_name, reply_name;
+
+ if (IP_VALID(reply)) {
+ ipc_port_t notify_port;
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ /*
+ * Handling notify (for MACH_RCV_NOTIFY) is tricky.
+ * The problem is atomically making a send-once right
+ * from the notify port and installing it for a
+ * dead-name request in the new entry, because this
+ * requires two port locks (on the notify port and
+ * the reply port). However, we can safely make
+ * and consume send-once rights for the notify port
+ * as long as we hold the space locked. This isn't
+ * an atomicity problem, because the only way
+ * to detect that a send-once right has been created
+ * and then consumed if it wasn't needed is by getting
+ * at the receive right to look at ip_sorights, and
+ * because the space is write-locked status calls can't
+ * lookup the notify port receive right. When we make
+ * the send-once right, we lock the notify port,
+ * so any status calls in progress will be done.
+ */
+
+ is_write_lock(space);
+
+ for (;;) {
+ ipc_port_request_index_t request;
+
+ if (!space->is_active) {
+ is_write_unlock(space);
+ return (MACH_RCV_HEADER_ERROR|
+ MACH_MSG_IPC_SPACE);
+ }
+
+ if (notify != MACH_PORT_NULL) {
+ notify_port = ipc_port_lookup_notify(space,
+ notify);
+ if (notify_port == IP_NULL) {
+ is_write_unlock(space);
+ return MACH_RCV_INVALID_NOTIFY;
+ }
+ } else
+ notify_port = IP_NULL;
+
+ if ((reply_type != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
+ ipc_right_reverse(space, (ipc_object_t) reply,
+ &reply_name, &entry)) {
+ /* reply port is locked and active */
+
+ /*
+ * We don't need the notify_port
+ * send-once right, but we can't release
+ * it here because reply port is locked.
+ * Wait until after the copyout to
+ * release the notify port right.
+ */
+
+ assert(entry->ie_bits &
+ MACH_PORT_TYPE_SEND_RECEIVE);
+ break;
+ }
+
+ ip_lock(reply);
+ if (!ip_active(reply)) {
+ ip_release(reply);
+ ip_check_unlock(reply);
+
+ if (notify_port != IP_NULL)
+ ipc_port_release_sonce(notify_port);
+
+ ip_lock(dest);
+ is_write_unlock(space);
+
+ reply = IP_DEAD;
+ reply_name = MACH_PORT_DEAD;
+ goto copyout_dest;
+ }
+
+ kr = ipc_entry_get(space, &reply_name, &entry);
+ if (kr != KERN_SUCCESS) {
+ ip_unlock(reply);
+
+ if (notify_port != IP_NULL)
+ ipc_port_release_sonce(notify_port);
+
+ /* space is locked */
+ kr = ipc_entry_grow_table(space);
+ if (kr != KERN_SUCCESS) {
+ /* space is unlocked */
+
+ if (kr == KERN_RESOURCE_SHORTAGE)
+ return (MACH_RCV_HEADER_ERROR|
+ MACH_MSG_IPC_KERNEL);
+ else
+ return (MACH_RCV_HEADER_ERROR|
+ MACH_MSG_IPC_SPACE);
+ }
+ /* space is locked again; start over */
+
+ continue;
+ }
+
+ assert(IE_BITS_TYPE(entry->ie_bits)
+ == MACH_PORT_TYPE_NONE);
+ assert(entry->ie_object == IO_NULL);
+
+ if (notify_port == IP_NULL) {
+ /* not making a dead-name request */
+
+ entry->ie_object = (ipc_object_t) reply;
+ break;
+ }
+
+ kr = ipc_port_dnrequest(reply, reply_name,
+ notify_port, &request);
+ if (kr != KERN_SUCCESS) {
+ ip_unlock(reply);
+
+ ipc_port_release_sonce(notify_port);
+
+ ipc_entry_dealloc(space, reply_name, entry);
+ is_write_unlock(space);
+
+ ip_lock(reply);
+ if (!ip_active(reply)) {
+ /* will fail next time around loop */
+
+ ip_unlock(reply);
+ is_write_lock(space);
+ continue;
+ }
+
+ kr = ipc_port_dngrow(reply);
+ /* port is unlocked */
+ if (kr != KERN_SUCCESS)
+ return (MACH_RCV_HEADER_ERROR|
+ MACH_MSG_IPC_KERNEL);
+
+ is_write_lock(space);
+ continue;
+ }
+
+ notify_port = IP_NULL; /* don't release right below */
+
+ entry->ie_object = (ipc_object_t) reply;
+ entry->ie_request = request;
+ break;
+ }
+
+ /* space and reply port are locked and active */
+
+ ip_reference(reply); /* hold onto the reply port */
+
+ kr = ipc_right_copyout(space, reply_name, entry,
+ reply_type, TRUE, (ipc_object_t) reply);
+ /* reply port is unlocked */
+ assert(kr == KERN_SUCCESS);
+
+ if (notify_port != IP_NULL)
+ ipc_port_release_sonce(notify_port);
+
+ ip_lock(dest);
+ is_write_unlock(space);
+ } else {
+ /*
+ * No reply port! This is an easy case.
+ * We only need to have the space locked
+ * when checking notify and when locking
+ * the destination (to ensure atomicity).
+ */
+
+ is_read_lock(space);
+ if (!space->is_active) {
+ is_read_unlock(space);
+ return MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_SPACE;
+ }
+
+ if (notify != MACH_PORT_NULL) {
+ ipc_entry_t entry;
+
+ /* must check notify even though it won't be used */
+
+ if (((entry = ipc_entry_lookup(space, notify))
+ == IE_NULL) ||
+ ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0)) {
+ is_read_unlock(space);
+ return MACH_RCV_INVALID_NOTIFY;
+ }
+ }
+
+ ip_lock(dest);
+ is_read_unlock(space);
+
+ reply_name = (mach_port_t) reply;
+ }
+
+ /*
+ * At this point, the space is unlocked and the destination
+ * port is locked. (Lock taken while space was locked.)
+ * reply_name is taken care of; we still need dest_name.
+ * We still hold a ref for reply (if it is valid).
+ *
+ * If the space holds receive rights for the destination,
+ * we return its name for the right. Otherwise the task
+ * managed to destroy or give away the receive right between
+ * receiving the message and this copyout. If the destination
+ * is dead, return MACH_PORT_DEAD, and if the receive right
+ * exists somewhere else (another space, in transit)
+ * return MACH_PORT_NULL.
+ *
+ * Making this copyout operation atomic with the previous
+ * copyout of the reply port is a bit tricky. If there was
+ * no real reply port (it wasn't IP_VALID) then this isn't
+ * an issue. If the reply port was dead at copyout time,
+ * then we are OK, because if dest is dead we serialize
+ * after the death of both ports and if dest is alive
+ * we serialize after reply died but before dest's (later) death.
+ * So assume reply was alive when we copied it out. If dest
+ * is alive, then we are OK because we serialize before
+ * the ports' deaths. So assume dest is dead when we look at it.
+ * If reply dies/died after dest, then we are OK because
+ * we serialize after dest died but before reply dies.
+ * So the hard case is when reply is alive at copyout,
+ * dest is dead at copyout, and reply died before dest died.
+ * In this case pretend that dest is still alive, so
+ * we serialize while both ports are alive.
+ *
+ * Because the space lock is held across the copyout of reply
+ * and locking dest, the receive right for dest can't move
+ * in or out of the space while the copyouts happen, so
+ * that isn't an atomicity problem. In the last hard case
+ * above, this implies that when dest is dead that the
+ * space couldn't have had receive rights for dest at
+ * the time reply was copied-out, so when we pretend
+ * that dest is still alive, we can return MACH_PORT_NULL.
+ *
+ * If dest == reply, then we have to make it look like
+ * either both copyouts happened before the port died,
+ * or both happened after the port died. This special
+ * case works naturally if the timestamp comparison
+ * is done correctly.
+ */
+
+ copyout_dest:
+
+ if (ip_active(dest)) {
+ ipc_object_copyout_dest(space, (ipc_object_t) dest,
+ dest_type, &dest_name);
+ /* dest is unlocked */
+ } else {
+ ipc_port_timestamp_t timestamp;
+
+ timestamp = dest->ip_timestamp;
+ ip_release(dest);
+ ip_check_unlock(dest);
+
+ if (IP_VALID(reply)) {
+ ip_lock(reply);
+ if (ip_active(reply) ||
+ IP_TIMESTAMP_ORDER(timestamp,
+ reply->ip_timestamp))
+ dest_name = MACH_PORT_DEAD;
+ else
+ dest_name = MACH_PORT_NULL;
+ ip_unlock(reply);
+ } else
+ dest_name = MACH_PORT_DEAD;
+ }
+
+ if (IP_VALID(reply))
+ ipc_port_release(reply);
+
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(reply_type, dest_type));
+ msg->msgh_local_port = dest_name;
+ msg->msgh_remote_port = reply_name;
+ }
+
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: ipc_kmsg_copyout_object
+ * Purpose:
+ * Copy-out a port right. Always returns a name,
+ * even for unsuccessful return codes. Always
+ * consumes the supplied object.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS The space acquired the right
+ * (name is valid) or the object is dead (MACH_PORT_DEAD).
+ * MACH_MSG_IPC_SPACE No room in space for the right,
+ * or the space is dead. (Name is MACH_PORT_NULL.)
+ * MACH_MSG_IPC_KERNEL Kernel resource shortage.
+ * (Name is MACH_PORT_NULL.)
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyout_object(space, object, msgt_name, namep)
+ ipc_space_t space;
+ ipc_object_t object;
+ mach_msg_type_name_t msgt_name;
+ mach_port_t *namep;
+{
+ if (!IO_VALID(object)) {
+ *namep = (mach_port_t) object;
+ return MACH_MSG_SUCCESS;
+ }
+
+#ifndef MIGRATING_THREADS
+ /*
+ * Attempt quick copyout of send rights. We optimize for a
+ * live port for which the receiver holds send (and not
+ * receive) rights in his local table.
+ */
+
+ if (msgt_name != MACH_MSG_TYPE_PORT_SEND)
+ goto slow_copyout;
+
+ {
+ register ipc_port_t port = (ipc_port_t) object;
+ ipc_entry_t entry;
+
+ is_write_lock(space);
+ if (!space->is_active) {
+ is_write_unlock(space);
+ goto slow_copyout;
+ }
+
+ ip_lock(port);
+ if (!ip_active(port) ||
+ !ipc_hash_local_lookup(space, (ipc_object_t) port,
+ namep, &entry)) {
+ ip_unlock(port);
+ is_write_unlock(space);
+ goto slow_copyout;
+ }
+
+ /*
+ * Copyout the send right, incrementing urefs
+ * unless it would overflow, and consume the right.
+ */
+
+ assert(port->ip_srights > 1);
+ port->ip_srights--;
+ ip_release(port);
+ ip_unlock(port);
+
+ assert(entry->ie_bits & MACH_PORT_TYPE_SEND);
+ assert(IE_BITS_UREFS(entry->ie_bits) > 0);
+ assert(IE_BITS_UREFS(entry->ie_bits) < MACH_PORT_UREFS_MAX);
+
+ {
+ register ipc_entry_bits_t bits = entry->ie_bits + 1;
+
+ if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX)
+ entry->ie_bits = bits;
+ }
+
+ is_write_unlock(space);
+ return MACH_MSG_SUCCESS;
+ }
+
+ slow_copyout:
+#endif /* MIGRATING_THREADS */
+
+ {
+ kern_return_t kr;
+
+ kr = ipc_object_copyout(space, object, msgt_name, TRUE, namep);
+ if (kr != KERN_SUCCESS) {
+ ipc_object_destroy(object, msgt_name);
+
+ if (kr == KERN_INVALID_CAPABILITY)
+ *namep = MACH_PORT_DEAD;
+ else {
+ *namep = MACH_PORT_NULL;
+
+ if (kr == KERN_RESOURCE_SHORTAGE)
+ return MACH_MSG_IPC_KERNEL;
+ else
+ return MACH_MSG_IPC_SPACE;
+ }
+ }
+
+ return MACH_MSG_SUCCESS;
+ }
+}
+
+/*
+ * Routine: ipc_kmsg_copyout_body
+ * Purpose:
+ * "Copy-out" port rights and out-of-line memory
+ * in the body of a message.
+ *
+ * The error codes are a combination of special bits.
+ * The copyout proceeds despite errors.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Successful copyout.
+ * MACH_MSG_IPC_SPACE No room for port right in name space.
+ * MACH_MSG_VM_SPACE No room for memory in address space.
+ * MACH_MSG_IPC_KERNEL Resource shortage handling port right.
+ * MACH_MSG_VM_KERNEL Resource shortage handling memory.
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyout_body(saddr, eaddr, space, map)
+ vm_offset_t saddr, eaddr;
+ ipc_space_t space;
+ vm_map_t map;
+{
+ mach_msg_return_t mr = MACH_MSG_SUCCESS;
+ kern_return_t kr;
+
+ while (saddr < eaddr) {
+ vm_offset_t taddr = saddr;
+ mach_msg_type_long_t *type;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline, longform, is_port;
+ vm_size_t length;
+ vm_offset_t addr;
+
+ type = (mach_msg_type_long_t *) saddr;
+ is_inline = ((mach_msg_type_t*)type)->msgt_inline;
+ longform = ((mach_msg_type_t*)type)->msgt_longform;
+ if (longform) {
+ /* This must be aligned */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ (is_misaligned(type))) {
+ saddr = ptr_align(saddr);
+ continue;
+ }
+ name = type->msgtl_name;
+ size = type->msgtl_size;
+ number = type->msgtl_number;
+ saddr += sizeof(mach_msg_type_long_t);
+ } else {
+ name = ((mach_msg_type_t*)type)->msgt_name;
+ size = ((mach_msg_type_t*)type)->msgt_size;
+ number = ((mach_msg_type_t*)type)->msgt_number;
+ saddr += sizeof(mach_msg_type_t);
+ }
+
+ /* padding (ptrs and ports) ? */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ ((size >> 3) == sizeof(natural_t)))
+ saddr = ptr_align(saddr);
+
+ /* calculate length of data in bytes, rounding up */
+
+ length = ((number * size) + 7) >> 3;
+
+ is_port = MACH_MSG_TYPE_PORT_ANY(name);
+
+ if (is_port) {
+ mach_port_t *objects;
+ mach_msg_type_number_t i;
+
+ if (!is_inline && (length != 0)) {
+ /* first allocate memory in the map */
+
+ kr = vm_allocate(map, &addr, length, TRUE);
+ if (kr != KERN_SUCCESS) {
+ ipc_kmsg_clean_body(taddr, saddr);
+ goto vm_copyout_failure;
+ }
+ }
+
+ objects = (mach_port_t *)
+ (is_inline ? saddr : * (vm_offset_t *) saddr);
+
+ /* copyout port rights carried in the message */
+
+ for (i = 0; i < number; i++) {
+ ipc_object_t object =
+ (ipc_object_t) objects[i];
+
+ mr |= ipc_kmsg_copyout_object(space, object,
+ name, &objects[i]);
+ }
+ }
+
+ if (is_inline) {
+ /* inline data sizes round up to int boundaries */
+
+ ((mach_msg_type_t*)type)->msgt_deallocate = FALSE;
+ saddr += (length + 3) &~ 3;
+ } else {
+ vm_offset_t data;
+
+ if (sizeof(vm_offset_t) > sizeof(mach_msg_type_t))
+ saddr = ptr_align(saddr);
+
+ data = * (vm_offset_t *) saddr;
+
+ /* copyout memory carried in the message */
+
+ if (length == 0) {
+ assert(data == 0);
+ addr = 0;
+ } else if (is_port) {
+ /* copyout to memory allocated above */
+
+ (void) copyoutmap(map, (char *) data,
+ (char *) addr, length);
+ kfree(data, length);
+ } else {
+ vm_map_copy_t copy = (vm_map_copy_t) data;
+
+ kr = vm_map_copyout(map, &addr, copy);
+ if (kr != KERN_SUCCESS) {
+ vm_map_copy_discard(copy);
+
+ vm_copyout_failure:
+
+ addr = 0;
+ if (longform)
+ type->msgtl_size = 0;
+ else
+ ((mach_msg_type_t*)type)->msgt_size = 0;
+
+ if (kr == KERN_RESOURCE_SHORTAGE)
+ mr |= MACH_MSG_VM_KERNEL;
+ else
+ mr |= MACH_MSG_VM_SPACE;
+ }
+ }
+
+ ((mach_msg_type_t*)type)->msgt_deallocate = TRUE;
+ * (vm_offset_t *) saddr = addr;
+ saddr += sizeof(vm_offset_t);
+ }
+ }
+
+ return mr;
+}
+
+/*
+ * Routine: ipc_kmsg_copyout
+ * Purpose:
+ * "Copy-out" port rights and out-of-line memory
+ * in the message.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Copied out all rights and memory.
+ * MACH_RCV_INVALID_NOTIFY Bad notify port.
+ * Rights and memory in the message are intact.
+ * MACH_RCV_HEADER_ERROR + special bits
+ * Rights and memory in the message are intact.
+ * MACH_RCV_BODY_ERROR + special bits
+ * The message header was successfully copied out.
+ * As much of the body was handled as possible.
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyout(kmsg, space, map, notify)
+ ipc_kmsg_t kmsg;
+ ipc_space_t space;
+ vm_map_t map;
+ mach_port_t notify;
+{
+ mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits;
+ mach_msg_return_t mr;
+
+ mr = ipc_kmsg_copyout_header(&kmsg->ikm_header, space, notify);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+
+ if (mbits & MACH_MSGH_BITS_COMPLEX) {
+ vm_offset_t saddr, eaddr;
+
+ saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
+ eaddr = (vm_offset_t) &kmsg->ikm_header +
+ kmsg->ikm_header.msgh_size;
+
+ mr = ipc_kmsg_copyout_body(saddr, eaddr, space, map);
+ if (mr != MACH_MSG_SUCCESS)
+ mr |= MACH_RCV_BODY_ERROR;
+ }
+
+ return mr;
+}
+
+/*
+ * Routine: ipc_kmsg_copyout_pseudo
+ * Purpose:
+ * Does a pseudo-copyout of the message.
+ * This is like a regular copyout, except
+ * that the ports in the header are handled
+ * as if they are in the body. They aren't reversed.
+ *
+ * The error codes are a combination of special bits.
+ * The copyout proceeds despite errors.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Successful copyout.
+ * MACH_MSG_IPC_SPACE No room for port right in name space.
+ * MACH_MSG_VM_SPACE No room for memory in address space.
+ * MACH_MSG_IPC_KERNEL Resource shortage handling port right.
+ * MACH_MSG_VM_KERNEL Resource shortage handling memory.
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyout_pseudo(
+ ipc_kmsg_t kmsg,
+ ipc_space_t space,
+ vm_map_t map)
+{
+ mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits;
+ ipc_object_t dest = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
+ ipc_object_t reply = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
+ mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
+ mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
+ mach_port_t dest_name, reply_name;
+ mach_msg_return_t mr;
+
+ assert(IO_VALID(dest));
+
+ mr = (ipc_kmsg_copyout_object(space, dest, dest_type, &dest_name) |
+ ipc_kmsg_copyout_object(space, reply, reply_type, &reply_name));
+
+ kmsg->ikm_header.msgh_bits = mbits &~ MACH_MSGH_BITS_CIRCULAR;
+ kmsg->ikm_header.msgh_remote_port = dest_name;
+ kmsg->ikm_header.msgh_local_port = reply_name;
+
+ if (mbits & MACH_MSGH_BITS_COMPLEX) {
+ vm_offset_t saddr, eaddr;
+
+ saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
+ eaddr = (vm_offset_t) &kmsg->ikm_header +
+ kmsg->ikm_header.msgh_size;
+
+ mr |= ipc_kmsg_copyout_body(saddr, eaddr, space, map);
+ }
+
+ return mr;
+}
+
+/*
+ * Routine: ipc_kmsg_copyout_dest
+ * Purpose:
+ * Copies out the destination port in the message.
+ * Destroys all other rights and memory in the message.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_kmsg_copyout_dest(kmsg, space)
+ ipc_kmsg_t kmsg;
+ ipc_space_t space;
+{
+ mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits;
+ ipc_object_t dest = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
+ ipc_object_t reply = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
+ mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
+ mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
+ mach_port_t dest_name, reply_name;
+
+ assert(IO_VALID(dest));
+
+ io_lock(dest);
+ if (io_active(dest)) {
+ ipc_object_copyout_dest(space, dest, dest_type, &dest_name);
+ /* dest is unlocked */
+ } else {
+ io_release(dest);
+ io_check_unlock(dest);
+ dest_name = MACH_PORT_DEAD;
+ }
+
+ if (IO_VALID(reply)) {
+ ipc_object_destroy(reply, reply_type);
+ reply_name = MACH_PORT_NULL;
+ } else
+ reply_name = (mach_port_t) reply;
+
+ kmsg->ikm_header.msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(reply_type, dest_type));
+ kmsg->ikm_header.msgh_local_port = dest_name;
+ kmsg->ikm_header.msgh_remote_port = reply_name;
+
+ if (mbits & MACH_MSGH_BITS_COMPLEX) {
+ vm_offset_t saddr, eaddr;
+
+ saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
+ eaddr = (vm_offset_t) &kmsg->ikm_header +
+ kmsg->ikm_header.msgh_size;
+
+ ipc_kmsg_clean_body(saddr, eaddr);
+ }
+}
+
+#if NORMA_IPC || NORMA_VM
+/*
+ * Routine: ipc_kmsg_copyout_to_kernel
+ * Purpose:
+ * Copies out the destination and reply ports in the message.
+ * Leaves all other rights and memory in the message alone.
+ * Conditions:
+ * Nothing locked.
+ *
+ * Derived from ipc_kmsg_copyout_dest.
+ * Use by mach_msg_rpc_from_kernel (which used to use copyout_dest).
+ * We really do want to save rights and memory.
+ */
+
+void
+ipc_kmsg_copyout_to_kernel(kmsg, space)
+ ipc_kmsg_t kmsg;
+ ipc_space_t space;
+{
+ mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits;
+ ipc_object_t dest = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
+ ipc_object_t reply = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
+ mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
+ mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
+ mach_port_t dest_name, reply_name;
+
+ assert(IO_VALID(dest));
+
+ io_lock(dest);
+ if (io_active(dest)) {
+ ipc_object_copyout_dest(space, dest, dest_type, &dest_name);
+ /* dest is unlocked */
+ } else {
+ io_release(dest);
+ io_check_unlock(dest);
+ dest_name = MACH_PORT_DEAD;
+ }
+
+ reply_name = (mach_port_t) reply;
+
+ kmsg->ikm_header.msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(reply_type, dest_type));
+ kmsg->ikm_header.msgh_local_port = dest_name;
+ kmsg->ikm_header.msgh_remote_port = reply_name;
+}
+#endif NORMA_IPC || NORMA_VM
+
+#if MACH_IPC_COMPAT
+
+/*
+ * Routine: ipc_kmsg_copyin_compat
+ * Purpose:
+ * "Copy-in" port rights and out-of-line memory
+ * in the message.
+ *
+ * In all failure cases, the message is left holding
+ * no rights or memory. However, the message buffer
+ * is not deallocated. If successful, the message
+ * contains a valid destination port.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Successful copyin.
+ * MACH_SEND_INVALID_DEST Can't copyin destination port.
+ * MACH_SEND_INVALID_REPLY Can't copyin reply port.
+ * MACH_SEND_INVALID_MEMORY Can't grab out-of-line memory.
+ * MACH_SEND_INVALID_RIGHT Can't copyin port right in body.
+ * MACH_SEND_INVALID_TYPE Bad type specification.
+ * MACH_SEND_MSG_TOO_SMALL Body is too small for types/data.
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyin_compat(kmsg, space, map)
+ ipc_kmsg_t kmsg;
+ ipc_space_t space;
+ vm_map_t map;
+{
+ msg_header_t msg;
+ mach_port_t dest_name;
+ mach_port_t reply_name;
+ ipc_object_t dest, reply;
+ mach_msg_type_name_t dest_type, reply_type;
+ vm_offset_t saddr, eaddr;
+ boolean_t complex;
+ kern_return_t kr;
+ boolean_t use_page_lists, steal_pages;
+
+ msg = * (msg_header_t *) &kmsg->ikm_header;
+ dest_name = (mach_port_t) msg.msg_remote_port;
+ reply_name = (mach_port_t) msg.msg_local_port;
+
+ /* translate the destination and reply ports */
+
+ kr = ipc_object_copyin_header(space, dest_name, &dest, &dest_type);
+ if (kr != KERN_SUCCESS)
+ return MACH_SEND_INVALID_DEST;
+
+ if (reply_name == MACH_PORT_NULL) {
+ reply = IO_NULL;
+ reply_type = 0;
+ } else {
+ kr = ipc_object_copyin_header(space, reply_name,
+ &reply, &reply_type);
+ if (kr != KERN_SUCCESS) {
+ ipc_object_destroy(dest, dest_type);
+ return MACH_SEND_INVALID_REPLY;
+ }
+ }
+
+ kmsg->ikm_header.msgh_bits = MACH_MSGH_BITS(dest_type, reply_type);
+ kmsg->ikm_header.msgh_size = (mach_msg_size_t) msg.msg_size;
+ kmsg->ikm_header.msgh_remote_port = (mach_port_t) dest;
+ kmsg->ikm_header.msgh_local_port = (mach_port_t) reply;
+ kmsg->ikm_header.msgh_seqno = (mach_msg_kind_t) msg.msg_type;
+ kmsg->ikm_header.msgh_id = (mach_msg_id_t) msg.msg_id;
+
+ if (msg.msg_simple)
+ return MACH_MSG_SUCCESS;
+
+ complex = FALSE;
+ use_page_lists = ipc_kobject_vm_page_list(ip_kotype((ipc_port_t)dest));
+ steal_pages = ipc_kobject_vm_page_steal(ip_kotype((ipc_port_t)dest));
+
+#if NORMA_IPC
+ if (IP_NORMA_IS_PROXY((ipc_port_t) dest)) {
+ use_page_lists = TRUE;
+ steal_pages = TRUE;
+ }
+#endif NORMA_IPC
+
+ saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
+ eaddr = (vm_offset_t) &kmsg->ikm_header + kmsg->ikm_header.msgh_size;
+
+ while (saddr < eaddr) {
+ vm_offset_t taddr = saddr;
+ mach_msg_type_long_t *type;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline, longform, dealloc, is_port;
+ vm_offset_t data;
+ vm_size_t length;
+
+ type = (mach_msg_type_long_t *) saddr;
+
+ if (((eaddr - saddr) < sizeof(mach_msg_type_t)) ||
+ ((longform = ((mach_msg_type_t*)type)->msgt_longform) &&
+ ((eaddr - saddr) < sizeof(mach_msg_type_long_t)))) {
+ ipc_kmsg_clean_partial(kmsg, taddr, FALSE, 0);
+ return MACH_SEND_MSG_TOO_SMALL;
+ }
+
+ is_inline = ((mach_msg_type_t*)type)->msgt_inline;
+ dealloc = ((mach_msg_type_t*)type)->msgt_deallocate;
+ if (longform) {
+ /* This must be aligned */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ (is_misaligned(type))) {
+ saddr = ptr_align(saddr);
+ continue;
+ }
+ name = type->msgtl_name;
+ size = type->msgtl_size;
+ number = type->msgtl_number;
+ saddr += sizeof(mach_msg_type_long_t);
+ } else {
+ name = ((mach_msg_type_t*)type)->msgt_name;
+ size = ((mach_msg_type_t*)type)->msgt_size;
+ number = ((mach_msg_type_t*)type)->msgt_number;
+ saddr += sizeof(mach_msg_type_t);
+ }
+
+ is_port = MSG_TYPE_PORT_ANY(name);
+
+ if (is_port && (size != PORT_T_SIZE_IN_BITS)) {
+ ipc_kmsg_clean_partial(kmsg, taddr, FALSE, 0);
+ return MACH_SEND_INVALID_TYPE;
+ }
+
+ /*
+ * New IPC says these should be zero, but old IPC
+ * tasks often leave them with random values. So
+ * we have to clear them.
+ */
+
+ ((mach_msg_type_t*)type)->msgt_unused = 0;
+ if (longform) {
+ type->msgtl_header.msgt_name = 0;
+ type->msgtl_header.msgt_size = 0;
+ type->msgtl_header.msgt_number = 0;
+ }
+
+ /* padding (ptrs and ports) ? */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ ((size >> 3) == sizeof(natural_t)))
+ saddr = ptr_align(saddr);
+
+ /* calculate length of data in bytes, rounding up */
+
+ length = ((number * size) + 7) >> 3;
+
+ if (is_inline) {
+ vm_size_t amount;
+
+ /* inline data sizes round up to int boundaries */
+
+ amount = (length + 3) &~ 3;
+ if ((eaddr - saddr) < amount) {
+ ipc_kmsg_clean_partial(kmsg, taddr, FALSE, 0);
+ return MACH_SEND_MSG_TOO_SMALL;
+ }
+
+ data = saddr;
+ saddr += amount;
+ } else {
+ vm_offset_t addr;
+
+ if ((eaddr - saddr) < sizeof(vm_offset_t)) {
+ ipc_kmsg_clean_partial(kmsg, taddr, FALSE, 0);
+ return MACH_SEND_MSG_TOO_SMALL;
+ }
+
+ /* grab the out-of-line data */
+
+ addr = * (vm_offset_t *) saddr;
+
+ if (length == 0)
+ data = 0;
+ else if (is_port) {
+ data = kalloc(length);
+ if (data == 0)
+ goto invalid_memory;
+
+ if (copyinmap(map, (char *) addr,
+ (char *) data, length) ||
+ (dealloc &&
+ (vm_deallocate(map, addr, length) !=
+ KERN_SUCCESS))) {
+ kfree(data, length);
+ goto invalid_memory;
+ }
+ } else {
+ vm_map_copy_t copy;
+
+ if (use_page_lists) {
+ kr = vm_map_copyin_page_list(map,
+ addr, length, dealloc,
+ steal_pages, &copy, FALSE);
+ } else {
+ kr = vm_map_copyin(map, addr, length,
+ dealloc,
+ &copy);
+ }
+ if (kr != KERN_SUCCESS) {
+ invalid_memory:
+ ipc_kmsg_clean_partial(kmsg, taddr,
+ FALSE, 0);
+ return MACH_SEND_INVALID_MEMORY;
+ }
+
+ data = (vm_offset_t) copy;
+ }
+
+ * (vm_offset_t *) saddr = data;
+ saddr += sizeof(vm_offset_t);
+ complex = TRUE;
+ }
+
+ if (is_port) {
+ mach_msg_type_name_t newname =
+ ipc_object_copyin_type(name);
+ ipc_object_t *objects = (ipc_object_t *) data;
+ mach_msg_type_number_t i;
+
+ if (longform)
+ type->msgtl_name = newname;
+ else
+ ((mach_msg_type_t*)type)->msgt_name = newname;
+
+ for (i = 0; i < number; i++) {
+ mach_port_t port = (mach_port_t) objects[i];
+ ipc_object_t object;
+
+ if (!MACH_PORT_VALID(port))
+ continue;
+
+ kr = ipc_object_copyin_compat(space, port,
+ name, dealloc, &object);
+ if (kr != KERN_SUCCESS) {
+ ipc_kmsg_clean_partial(kmsg, taddr,
+ TRUE, i);
+ return MACH_SEND_INVALID_RIGHT;
+ }
+
+ if ((newname == MACH_MSG_TYPE_PORT_RECEIVE) &&
+ ipc_port_check_circularity(
+ (ipc_port_t) object,
+ (ipc_port_t) dest))
+ kmsg->ikm_header.msgh_bits |=
+ MACH_MSGH_BITS_CIRCULAR;
+
+ objects[i] = object;
+ }
+
+ complex = TRUE;
+ }
+ }
+
+ if (complex)
+ kmsg->ikm_header.msgh_bits |= MACH_MSGH_BITS_COMPLEX;
+
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: ipc_kmsg_copyout_compat
+ * Purpose:
+ * "Copy-out" port rights and out-of-line memory
+ * in the message, producing an old IPC message.
+ *
+ * Doesn't bother to handle the header atomically.
+ * Skips over errors. Problem ports produce MACH_PORT_NULL
+ * (MACH_PORT_DEAD is never produced), and problem memory
+ * produces a zero address.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Copied out rights and memory.
+ */
+
+mach_msg_return_t
+ipc_kmsg_copyout_compat(kmsg, space, map)
+ ipc_kmsg_t kmsg;
+ ipc_space_t space;
+ vm_map_t map;
+{
+ msg_header_t msg;
+ mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits;
+ ipc_object_t dest = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
+ ipc_object_t reply = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
+ mach_port_t dest_name, reply_name;
+ vm_offset_t saddr, eaddr;
+ kern_return_t kr;
+
+ assert(IO_VALID(dest));
+
+ io_lock(dest);
+ if (io_active(dest)) {
+ mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
+
+ ipc_object_copyout_dest(space, dest, dest_type, &dest_name);
+ /* dest is unlocked */
+ } else {
+ io_release(dest);
+ io_check_unlock(dest);
+ dest_name = MACH_PORT_NULL;
+ }
+
+ if (IO_VALID(reply)) {
+ mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
+
+ kr = ipc_object_copyout_compat(space, reply, reply_type,
+ &reply_name);
+ if (kr != KERN_SUCCESS) {
+ ipc_object_destroy(reply, reply_type);
+ reply_name = MACH_PORT_NULL;
+ }
+ } else
+ reply_name = MACH_PORT_NULL;
+
+ msg.msg_unused = 0;
+ msg.msg_simple = (mbits & MACH_MSGH_BITS_COMPLEX) ? FALSE : TRUE;
+ msg.msg_size = (msg_size_t) kmsg->ikm_header.msgh_size;
+ msg.msg_type = (integer_t) kmsg->ikm_header.msgh_seqno;
+ msg.msg_local_port = (port_name_t) dest_name;
+ msg.msg_remote_port = (port_name_t) reply_name;
+ msg.msg_id = (integer_t) kmsg->ikm_header.msgh_id;
+ * (msg_header_t *) &kmsg->ikm_header = msg;
+
+ if (msg.msg_simple)
+ return MACH_MSG_SUCCESS;
+
+ saddr = (vm_offset_t) (&kmsg->ikm_header + 1);
+ eaddr = (vm_offset_t) &kmsg->ikm_header + kmsg->ikm_header.msgh_size;
+
+ while (saddr < eaddr) {
+ vm_offset_t taddr = saddr;
+ mach_msg_type_long_t *type;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline, longform, is_port;
+ vm_size_t length;
+ vm_offset_t addr;
+
+ type = (mach_msg_type_long_t *) saddr;
+ is_inline = ((mach_msg_type_t*)type)->msgt_inline;
+ longform = ((mach_msg_type_t*)type)->msgt_longform;
+ if (longform) {
+ /* This must be aligned */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ (is_misaligned(type))) {
+ saddr = ptr_align(saddr);
+ continue;
+ }
+ name = type->msgtl_name;
+ size = type->msgtl_size;
+ number = type->msgtl_number;
+ saddr += sizeof(mach_msg_type_long_t);
+ } else {
+ name = ((mach_msg_type_t*)type)->msgt_name;
+ size = ((mach_msg_type_t*)type)->msgt_size;
+ number = ((mach_msg_type_t*)type)->msgt_number;
+ saddr += sizeof(mach_msg_type_t);
+ }
+
+ /* padding (ptrs and ports) ? */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ ((size >> 3) == sizeof(natural_t)))
+ saddr = ptr_align(saddr);
+
+ /* calculate length of data in bytes, rounding up */
+
+ length = ((number * size) + 7) >> 3;
+
+ is_port = MACH_MSG_TYPE_PORT_ANY(name);
+
+ if (is_port) {
+ mach_port_t *objects;
+ mach_msg_type_number_t i;
+ mach_msg_type_name_t newname;
+
+ if (!is_inline && (length != 0)) {
+ /* first allocate memory in the map */
+
+ kr = vm_allocate(map, &addr, length, TRUE);
+ if (kr != KERN_SUCCESS) {
+ ipc_kmsg_clean_body(taddr, saddr);
+ goto vm_copyout_failure;
+ }
+ }
+
+ newname = ipc_object_copyout_type_compat(name);
+ if (longform)
+ type->msgtl_name = newname;
+ else
+ ((mach_msg_type_t*)type)->msgt_name = newname;
+
+ objects = (mach_port_t *)
+ (is_inline ? saddr : * (vm_offset_t *) saddr);
+
+ /* copyout port rights carried in the message */
+
+ for (i = 0; i < number; i++) {
+ ipc_object_t object =
+ (ipc_object_t) objects[i];
+
+ if (!IO_VALID(object)) {
+ objects[i] = MACH_PORT_NULL;
+ continue;
+ }
+
+ kr = ipc_object_copyout_compat(space, object,
+ name, &objects[i]);
+ if (kr != KERN_SUCCESS) {
+ ipc_object_destroy(object, name);
+ objects[i] = MACH_PORT_NULL;
+ }
+ }
+ }
+
+ if (is_inline) {
+ /* inline data sizes round up to int boundaries */
+
+ saddr += (length + 3) &~ 3;
+ } else {
+ vm_offset_t data = * (vm_offset_t *) saddr;
+
+ /* copyout memory carried in the message */
+
+ if (length == 0) {
+ assert(data == 0);
+ addr = 0;
+ } else if (is_port) {
+ /* copyout to memory allocated above */
+
+ (void) copyoutmap(map, (char *) data,
+ (char *) addr, length);
+ kfree(data, length);
+ } else {
+ vm_map_copy_t copy = (vm_map_copy_t) data;
+
+ kr = vm_map_copyout(map, &addr, copy);
+ if (kr != KERN_SUCCESS) {
+ vm_map_copy_discard(copy);
+
+ vm_copyout_failure:
+
+ addr = 0;
+ }
+ }
+
+ * (vm_offset_t *) saddr = addr;
+ saddr += sizeof(vm_offset_t);
+ }
+ }
+
+ return MACH_MSG_SUCCESS;
+}
+
+#endif MACH_IPC_COMPAT
+
+#include <mach_kdb.h>
+#if MACH_KDB
+
+char *
+ipc_type_name(type_name, received)
+ int type_name;
+ boolean_t received;
+{
+ switch (type_name) {
+ case MACH_MSG_TYPE_BOOLEAN:
+ return "boolean";
+
+ case MACH_MSG_TYPE_INTEGER_16:
+ return "short";
+
+ case MACH_MSG_TYPE_INTEGER_32:
+ return "int32";
+
+ case MACH_MSG_TYPE_INTEGER_64:
+ return "int64";
+
+ case MACH_MSG_TYPE_CHAR:
+ return "char";
+
+ case MACH_MSG_TYPE_BYTE:
+ return "byte";
+
+ case MACH_MSG_TYPE_REAL:
+ return "real";
+
+ case MACH_MSG_TYPE_STRING:
+ return "string";
+
+ case MACH_MSG_TYPE_PORT_NAME:
+ return "port_name";
+
+ case MACH_MSG_TYPE_MOVE_RECEIVE:
+ if (received) {
+ return "port_receive";
+ } else {
+ return "move_receive";
+ }
+
+ case MACH_MSG_TYPE_MOVE_SEND:
+ if (received) {
+ return "port_send";
+ } else {
+ return "move_send";
+ }
+
+ case MACH_MSG_TYPE_MOVE_SEND_ONCE:
+ if (received) {
+ return "port_send_once";
+ } else {
+ return "move_send_once";
+ }
+
+ case MACH_MSG_TYPE_COPY_SEND:
+ return "copy_send";
+
+ case MACH_MSG_TYPE_MAKE_SEND:
+ return "make_send";
+
+ case MACH_MSG_TYPE_MAKE_SEND_ONCE:
+ return "make_send_once";
+
+ default:
+ return (char *) 0;
+ }
+}
+
+void
+ipc_print_type_name(
+ int type_name)
+{
+ char *name = ipc_type_name(type_name, TRUE);
+ if (name) {
+ printf("%s", name);
+ } else {
+ printf("type%d", type_name);
+ }
+}
+
+/*
+ * ipc_kmsg_print [ debug ]
+ */
+void
+ipc_kmsg_print(kmsg)
+ ipc_kmsg_t kmsg;
+{
+ db_printf("kmsg=0x%x\n", kmsg);
+ db_printf("ikm_next=0x%x,prev=0x%x,size=%d,marequest=0x%x",
+ kmsg->ikm_next,
+ kmsg->ikm_prev,
+ kmsg->ikm_size,
+ kmsg->ikm_marequest);
+#if NORMA_IPC
+ db_printf(",page=0x%x,copy=0x%x\n",
+ kmsg->ikm_page,
+ kmsg->ikm_copy);
+#else NORMA_IPC
+ db_printf("\n");
+#endif NORMA_IPC
+ ipc_msg_print(&kmsg->ikm_header);
+}
+
+/*
+ * ipc_msg_print [ debug ]
+ */
+void
+ipc_msg_print(msgh)
+ mach_msg_header_t *msgh;
+{
+ vm_offset_t saddr, eaddr;
+
+ db_printf("msgh_bits=0x%x: ", msgh->msgh_bits);
+ if (msgh->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
+ db_printf("complex,");
+ }
+ if (msgh->msgh_bits & MACH_MSGH_BITS_CIRCULAR) {
+ db_printf("circular,");
+ }
+ if (msgh->msgh_bits & MACH_MSGH_BITS_COMPLEX_PORTS) {
+ db_printf("complex_ports,");
+ }
+ if (msgh->msgh_bits & MACH_MSGH_BITS_COMPLEX_DATA) {
+ db_printf("complex_data,");
+ }
+ if (msgh->msgh_bits & MACH_MSGH_BITS_MIGRATED) {
+ db_printf("migrated,");
+ }
+ if (msgh->msgh_bits & MACH_MSGH_BITS_UNUSED) {
+ db_printf("unused=0x%x,",
+ msgh->msgh_bits & MACH_MSGH_BITS_UNUSED);
+ }
+ db_printf("l=0x%x,r=0x%x\n",
+ MACH_MSGH_BITS_LOCAL(msgh->msgh_bits),
+ MACH_MSGH_BITS_REMOTE(msgh->msgh_bits));
+
+ db_printf("msgh_id=%d,size=%d,seqno=%d,",
+ msgh->msgh_id,
+ msgh->msgh_size,
+ msgh->msgh_seqno);
+
+ if (msgh->msgh_remote_port) {
+ db_printf("remote=0x%x(", msgh->msgh_remote_port);
+ ipc_print_type_name(MACH_MSGH_BITS_REMOTE(msgh->msgh_bits));
+ db_printf("),");
+ } else {
+ db_printf("remote=null,\n");
+ }
+
+ if (msgh->msgh_local_port) {
+ db_printf("local=0x%x(", msgh->msgh_local_port);
+ ipc_print_type_name(MACH_MSGH_BITS_LOCAL(msgh->msgh_bits));
+ db_printf(")\n");
+ } else {
+ db_printf("local=null\n");
+ }
+
+ saddr = (vm_offset_t) (msgh + 1);
+ eaddr = (vm_offset_t) msgh + msgh->msgh_size;
+
+ while (saddr < eaddr) {
+ mach_msg_type_long_t *type;
+ mach_msg_type_name_t name;
+ mach_msg_type_size_t size;
+ mach_msg_type_number_t number;
+ boolean_t is_inline, longform, dealloc, is_port;
+ vm_size_t length;
+
+ type = (mach_msg_type_long_t *) saddr;
+
+ if (((eaddr - saddr) < sizeof(mach_msg_type_t)) ||
+ ((longform = ((mach_msg_type_t*)type)->msgt_longform) &&
+ ((eaddr - saddr) < sizeof(mach_msg_type_long_t)))) {
+ db_printf("*** msg too small\n");
+ return;
+ }
+
+ is_inline = ((mach_msg_type_t*)type)->msgt_inline;
+ dealloc = ((mach_msg_type_t*)type)->msgt_deallocate;
+ if (longform) {
+ /* This must be aligned */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ (is_misaligned(type))) {
+ saddr = ptr_align(saddr);
+ continue;
+ }
+ name = type->msgtl_name;
+ size = type->msgtl_size;
+ number = type->msgtl_number;
+ saddr += sizeof(mach_msg_type_long_t);
+ } else {
+ name = ((mach_msg_type_t*)type)->msgt_name;
+ size = ((mach_msg_type_t*)type)->msgt_size;
+ number = ((mach_msg_type_t*)type)->msgt_number;
+ saddr += sizeof(mach_msg_type_t);
+ }
+
+ db_printf("-- type=");
+ ipc_print_type_name(name);
+ if (! is_inline) {
+ db_printf(",ool");
+ }
+ if (dealloc) {
+ db_printf(",dealloc");
+ }
+ if (longform) {
+ db_printf(",longform");
+ }
+ db_printf(",size=%d,number=%d,addr=0x%x\n",
+ size,
+ number,
+ saddr);
+
+ is_port = MACH_MSG_TYPE_PORT_ANY(name);
+
+ if ((is_port && (size != PORT_T_SIZE_IN_BITS)) ||
+ (longform && ((type->msgtl_header.msgt_name != 0) ||
+ (type->msgtl_header.msgt_size != 0) ||
+ (type->msgtl_header.msgt_number != 0))) ||
+ (((mach_msg_type_t*)type)->msgt_unused != 0) ||
+ (dealloc && is_inline)) {
+ db_printf("*** invalid type\n");
+ return;
+ }
+
+ /* padding (ptrs and ports) ? */
+ if ((sizeof(natural_t) > sizeof(mach_msg_type_t)) &&
+ ((size >> 3) == sizeof(natural_t)))
+ saddr = ptr_align(saddr);
+
+ /* calculate length of data in bytes, rounding up */
+
+ length = ((number * size) + 7) >> 3;
+
+ if (is_inline) {
+ vm_size_t amount;
+ int i, numwords;
+
+ /* inline data sizes round up to int boundaries */
+ amount = (length + 3) &~ 3;
+ if ((eaddr - saddr) < amount) {
+ db_printf("*** too small\n");
+ return;
+ }
+ numwords = amount / sizeof(int);
+ if (numwords > 8) {
+ numwords = 8;
+ }
+ for (i = 0; i < numwords; i++) {
+ db_printf("0x%x\n", ((int *) saddr)[i]);
+ }
+ if (numwords < amount / sizeof(int)) {
+ db_printf("...\n");
+ }
+ saddr += amount;
+ } else {
+ if ((eaddr - saddr) < sizeof(vm_offset_t)) {
+ db_printf("*** too small\n");
+ return;
+ }
+ db_printf("0x%x\n", * (vm_offset_t *) saddr);
+ saddr += sizeof(vm_offset_t);
+ }
+ }
+}
+#endif MACH_KDB
diff --git a/ipc/ipc_kmsg.h b/ipc/ipc_kmsg.h
new file mode 100644
index 0000000..8fdbeb5
--- /dev/null
+++ b/ipc/ipc_kmsg.h
@@ -0,0 +1,291 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_kmsg.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for kernel messages.
+ */
+
+#ifndef _IPC_IPC_KMSG_H_
+#define _IPC_IPC_KMSG_H_
+
+#include <cpus.h>
+#include <mach_ipc_compat.h>
+#include <norma_ipc.h>
+
+#include <mach/machine/vm_types.h>
+#include <mach/message.h>
+#include <kern/assert.h>
+#include "cpu_number.h"
+#include <kern/macro_help.h>
+#include <kern/kalloc.h>
+#include <ipc/ipc_marequest.h>
+#if NORMA_IPC
+#include <vm/vm_page.h>
+#include <vm/vm_map.h>
+#endif NORMA_IPC
+
+/*
+ * This structure is only the header for a kmsg buffer;
+ * the actual buffer is normally larger. The rest of the buffer
+ * holds the body of the message.
+ *
+ * In a kmsg, the port fields hold pointers to ports instead
+ * of port names. These pointers hold references.
+ *
+ * The ikm_header.msgh_remote_port field is the destination
+ * of the message.
+ */
+
+typedef struct ipc_kmsg {
+ struct ipc_kmsg *ikm_next, *ikm_prev;
+ vm_size_t ikm_size;
+ ipc_marequest_t ikm_marequest;
+#if NORMA_IPC
+ vm_page_t ikm_page;
+ vm_map_copy_t ikm_copy;
+ unsigned long ikm_source_node;
+#endif NORMA_IPC
+ mach_msg_header_t ikm_header;
+} *ipc_kmsg_t;
+
+#define IKM_NULL ((ipc_kmsg_t) 0)
+
+#define IKM_OVERHEAD \
+ (sizeof(struct ipc_kmsg) - sizeof(mach_msg_header_t))
+
+#define ikm_plus_overhead(size) ((vm_size_t)((size) + IKM_OVERHEAD))
+#define ikm_less_overhead(size) ((mach_msg_size_t)((size) - IKM_OVERHEAD))
+
+/*
+ * XXX For debugging.
+ */
+#define IKM_BOGUS ((ipc_kmsg_t) 0xffffff10)
+
+/*
+ * We keep a per-processor cache of kernel message buffers.
+ * The cache saves the overhead/locking of using kalloc/kfree.
+ * The per-processor cache seems to miss less than a per-thread cache,
+ * and it also uses less memory. Access to the cache doesn't
+ * require locking.
+ */
+
+extern ipc_kmsg_t ipc_kmsg_cache[NCPUS];
+
+#define ikm_cache() ipc_kmsg_cache[cpu_number()]
+
+/*
+ * The size of the kernel message buffers that will be cached.
+ * IKM_SAVED_KMSG_SIZE includes overhead; IKM_SAVED_MSG_SIZE doesn't.
+ */
+
+#define IKM_SAVED_KMSG_SIZE ((vm_size_t) 256)
+#define IKM_SAVED_MSG_SIZE ikm_less_overhead(IKM_SAVED_KMSG_SIZE)
+
+#define ikm_alloc(size) \
+ ((ipc_kmsg_t) kalloc(ikm_plus_overhead(size)))
+
+#define ikm_init(kmsg, size) \
+MACRO_BEGIN \
+ ikm_init_special((kmsg), ikm_plus_overhead(size)); \
+MACRO_END
+
+#define ikm_init_special(kmsg, size) \
+MACRO_BEGIN \
+ (kmsg)->ikm_size = (size); \
+ (kmsg)->ikm_marequest = IMAR_NULL; \
+MACRO_END
+
+#define ikm_check_initialized(kmsg, size) \
+MACRO_BEGIN \
+ assert((kmsg)->ikm_size == (size)); \
+ assert((kmsg)->ikm_marequest == IMAR_NULL); \
+MACRO_END
+
+/*
+ * Non-positive message sizes are special. They indicate that
+ * the message buffer doesn't come from ikm_alloc and
+ * requires some special handling to free.
+ *
+ * ipc_kmsg_free is the non-macro form of ikm_free.
+ * It frees kmsgs of all varieties.
+ */
+
+#define IKM_SIZE_NORMA 0
+#define IKM_SIZE_NETWORK -1
+
+#define ikm_free(kmsg) \
+MACRO_BEGIN \
+ register vm_size_t _size = (kmsg)->ikm_size; \
+ \
+ if ((integer_t)_size > 0) \
+ kfree((vm_offset_t) (kmsg), _size); \
+ else \
+ ipc_kmsg_free(kmsg); \
+MACRO_END
+
+/*
+ * struct ipc_kmsg_queue is defined in kern/thread.h instead of here,
+ * so that kern/thread.h doesn't have to include ipc/ipc_kmsg.h.
+ */
+
+#include <ipc/ipc_kmsg_queue.h>
+
+typedef struct ipc_kmsg_queue *ipc_kmsg_queue_t;
+
+#define IKMQ_NULL ((ipc_kmsg_queue_t) 0)
+
+
+#define ipc_kmsg_queue_init(queue) \
+MACRO_BEGIN \
+ (queue)->ikmq_base = IKM_NULL; \
+MACRO_END
+
+#define ipc_kmsg_queue_empty(queue) ((queue)->ikmq_base == IKM_NULL)
+
+/* Enqueue a kmsg */
+extern void ipc_kmsg_enqueue(
+ ipc_kmsg_queue_t queue,
+ ipc_kmsg_t kmsg);
+
+/* Dequeue and return a kmsg */
+extern ipc_kmsg_t ipc_kmsg_dequeue(
+ ipc_kmsg_queue_t queue);
+
+/* Pull a kmsg out of a queue */
+extern void ipc_kmsg_rmqueue(
+ ipc_kmsg_queue_t queue,
+ ipc_kmsg_t kmsg);
+
+#define ipc_kmsg_queue_first(queue) ((queue)->ikmq_base)
+
+/* Return the kmsg following the given kmsg */
+extern ipc_kmsg_t ipc_kmsg_queue_next(
+ ipc_kmsg_queue_t queue,
+ ipc_kmsg_t kmsg);
+
+#define ipc_kmsg_rmqueue_first_macro(queue, kmsg) \
+MACRO_BEGIN \
+ register ipc_kmsg_t _next; \
+ \
+ assert((queue)->ikmq_base == (kmsg)); \
+ \
+ _next = (kmsg)->ikm_next; \
+ if (_next == (kmsg)) { \
+ assert((kmsg)->ikm_prev == (kmsg)); \
+ (queue)->ikmq_base = IKM_NULL; \
+ } else { \
+ register ipc_kmsg_t _prev = (kmsg)->ikm_prev; \
+ \
+ (queue)->ikmq_base = _next; \
+ _next->ikm_prev = _prev; \
+ _prev->ikm_next = _next; \
+ } \
+ /* XXX Debug paranoia */ \
+ kmsg->ikm_next = IKM_BOGUS; \
+ kmsg->ikm_prev = IKM_BOGUS; \
+MACRO_END
+
+#define ipc_kmsg_enqueue_macro(queue, kmsg) \
+MACRO_BEGIN \
+ register ipc_kmsg_t _first = (queue)->ikmq_base; \
+ \
+ if (_first == IKM_NULL) { \
+ (queue)->ikmq_base = (kmsg); \
+ (kmsg)->ikm_next = (kmsg); \
+ (kmsg)->ikm_prev = (kmsg); \
+ } else { \
+ register ipc_kmsg_t _last = _first->ikm_prev; \
+ \
+ (kmsg)->ikm_next = _first; \
+ (kmsg)->ikm_prev = _last; \
+ _first->ikm_prev = (kmsg); \
+ _last->ikm_next = (kmsg); \
+ } \
+MACRO_END
+
+extern void
+ipc_kmsg_destroy(/* ipc_kmsg_t */);
+
+extern void
+ipc_kmsg_clean(/* ipc_kmsg_t */);
+
+extern void
+ipc_kmsg_free(/* ipc_kmsg_t */);
+
+extern mach_msg_return_t
+ipc_kmsg_get(/* mach_msg_header_t *, mach_msg_size_t, ipc_kmsg_t * */);
+
+extern mach_msg_return_t
+ipc_kmsg_get_from_kernel(/* mach_msg_header_t *, mach_msg_size_t,
+ ipc_kmsg_t * */);
+
+extern mach_msg_return_t
+ipc_kmsg_put(/* mach_msg_header_t *, ipc_kmsg_t, mach_msg_size_t */);
+
+extern void
+ipc_kmsg_put_to_kernel(/* mach_msg_header_t *, ipc_kmsg_t, mach_msg_size_t */);
+
+extern mach_msg_return_t
+ipc_kmsg_copyin_header(/* mach_msg_header_t *, ipc_space_t, mach_port_t */);
+
+extern mach_msg_return_t
+ipc_kmsg_copyin(/* ipc_kmsg_t, ipc_space_t, vm_map_t, mach_port_t */);
+
+extern void
+ipc_kmsg_copyin_from_kernel(/* ipc_kmsg_t */);
+
+extern mach_msg_return_t
+ipc_kmsg_copyout_header(/* mach_msg_header_t *, ipc_space_t, mach_port_t */);
+
+extern mach_msg_return_t
+ipc_kmsg_copyout_object(/* ipc_space_t, ipc_object_t,
+ mach_msg_type_name_t, mach_port_t * */);
+
+extern mach_msg_return_t
+ipc_kmsg_copyout_body(/* vm_offset_t, vm_offset_t, ipc_space_t, vm_map_t */);
+
+extern mach_msg_return_t
+ipc_kmsg_copyout(/* ipc_kmsg_t, ipc_space_t, vm_map_t, mach_port_t */);
+
+extern mach_msg_return_t
+ipc_kmsg_copyout_pseudo(/* ipc_kmsg_t, ipc_space_t, vm_map_t */);
+
+extern void
+ipc_kmsg_copyout_dest(/* ipc_kmsg_t, ipc_space_t */);
+
+#if MACH_IPC_COMPAT
+
+extern mach_msg_return_t
+ipc_kmsg_copyin_compat(/* ipc_kmsg_t, ipc_space_t, vm_map_t */);
+
+extern mach_msg_return_t
+ipc_kmsg_copyout_compat(/* ipc_kmsg_t, ipc_space_t, vm_map_t */);
+
+#endif MACH_IPC_COMPAT
+#endif _IPC_IPC_KMSG_H_
diff --git a/ipc/ipc_kmsg_queue.h b/ipc/ipc_kmsg_queue.h
new file mode 100644
index 0000000..51ccbe2
--- /dev/null
+++ b/ipc/ipc_kmsg_queue.h
@@ -0,0 +1,31 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+#ifndef _IPC_KMSG_QUEUE_H_
+#define _IPC_KMSG_QUEUE_H_
+struct ipc_kmsg_queue {
+ struct ipc_kmsg *ikmq_base; };
+#endif
+
diff --git a/ipc/ipc_machdep.h b/ipc/ipc_machdep.h
new file mode 100755
index 0000000..e864c4b
--- /dev/null
+++ b/ipc/ipc_machdep.h
@@ -0,0 +1,40 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * At times, we need to know the size of a port in bits
+ */
+
+/* 64 bit machines */
+#if defined(__alpha)
+#define PORT_T_SIZE_IN_BITS 64
+#endif
+
+/* default, 32 bit machines */
+#if !defined(PORT_T_SIZE_IN_BITS)
+#define PORT_T_SIZE_IN_BITS 32
+#endif
+
diff --git a/ipc/ipc_marequest.c b/ipc/ipc_marequest.c
new file mode 100644
index 0000000..6ddffa0
--- /dev/null
+++ b/ipc/ipc_marequest.c
@@ -0,0 +1,485 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_marequest.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to handle msg-accepted requests.
+ */
+
+#include <mach_ipc_compat.h>
+
+#include <mach/message.h>
+#include <mach/port.h>
+#include <kern/lock.h>
+#include <kern/mach_param.h>
+#include <kern/kalloc.h>
+#include <kern/zalloc.h>
+#include <ipc/port.h>
+#include <ipc/ipc_init.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_right.h>
+#include <ipc/ipc_marequest.h>
+#include <ipc/ipc_notify.h>
+
+#include <mach_ipc_debug.h>
+#if MACH_IPC_DEBUG
+#include <mach/kern_return.h>
+#include <mach_debug/hash_info.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_user.h>
+#endif
+
+
+zone_t ipc_marequest_zone;
+int ipc_marequest_max = IMAR_MAX;
+
+#define imar_alloc() ((ipc_marequest_t) zalloc(ipc_marequest_zone))
+#define imar_free(imar) zfree(ipc_marequest_zone, (vm_offset_t) (imar))
+
+typedef unsigned int ipc_marequest_index_t;
+
+ipc_marequest_index_t ipc_marequest_size;
+ipc_marequest_index_t ipc_marequest_mask;
+
+#define IMAR_HASH(space, name) \
+ ((((ipc_marequest_index_t)((vm_offset_t)space) >> 4) + \
+ MACH_PORT_INDEX(name) + MACH_PORT_NGEN(name)) & \
+ ipc_marequest_mask)
+
+typedef struct ipc_marequest_bucket {
+ decl_simple_lock_data(, imarb_lock_data)
+ ipc_marequest_t imarb_head;
+} *ipc_marequest_bucket_t;
+
+#define IMARB_NULL ((ipc_marequest_bucket_t) 0)
+
+#define imarb_lock_init(imarb) simple_lock_init(&(imarb)->imarb_lock_data)
+#define imarb_lock(imarb) simple_lock(&(imarb)->imarb_lock_data)
+#define imarb_unlock(imarb) simple_unlock(&(imarb)->imarb_lock_data)
+
+ipc_marequest_bucket_t ipc_marequest_table;
+
+
+
+/*
+ * Routine: ipc_marequest_init
+ * Purpose:
+ * Initialize the msg-accepted request module.
+ */
+
+void
+ipc_marequest_init()
+{
+ ipc_marequest_index_t i;
+
+ /* if not configured, initialize ipc_marequest_size */
+
+ if (ipc_marequest_size == 0) {
+ ipc_marequest_size = ipc_marequest_max >> 8;
+ if (ipc_marequest_size < 16)
+ ipc_marequest_size = 16;
+ }
+
+ /* make sure it is a power of two */
+
+ ipc_marequest_mask = ipc_marequest_size - 1;
+ if ((ipc_marequest_size & ipc_marequest_mask) != 0) {
+ unsigned int bit;
+
+ /* round up to closest power of two */
+
+ for (bit = 1;; bit <<= 1) {
+ ipc_marequest_mask |= bit;
+ ipc_marequest_size = ipc_marequest_mask + 1;
+
+ if ((ipc_marequest_size & ipc_marequest_mask) == 0)
+ break;
+ }
+ }
+
+ /* allocate ipc_marequest_table */
+
+ ipc_marequest_table = (ipc_marequest_bucket_t)
+ kalloc((vm_size_t) (ipc_marequest_size *
+ sizeof(struct ipc_marequest_bucket)));
+ assert(ipc_marequest_table != IMARB_NULL);
+
+ /* and initialize it */
+
+ for (i = 0; i < ipc_marequest_size; i++) {
+ ipc_marequest_bucket_t bucket;
+
+ bucket = &ipc_marequest_table[i];
+ imarb_lock_init(bucket);
+ bucket->imarb_head = IMAR_NULL;
+ }
+
+ ipc_marequest_zone =
+ zinit(sizeof(struct ipc_marequest),
+ ipc_marequest_max * sizeof(struct ipc_marequest),
+ sizeof(struct ipc_marequest),
+ IPC_ZONE_TYPE, "ipc msg-accepted requests");
+}
+
+/*
+ * Routine: ipc_marequest_create
+ * Purpose:
+ * Create a msg-accepted request, because
+ * a sender is forcing a message with MACH_SEND_NOTIFY.
+ *
+ * The "notify" argument should name a receive right
+ * that is used to create the send-once notify port.
+ *
+ * [MACH_IPC_COMPAT] If "notify" is MACH_PORT_NULL,
+ * then an old-style msg-accepted request is created.
+ * Conditions:
+ * Nothing locked; refs held for space and port.
+ * Returns:
+ * MACH_MSG_SUCCESS Msg-accepted request created.
+ * MACH_SEND_INVALID_NOTIFY The space is dead.
+ * MACH_SEND_INVALID_NOTIFY The notify port is bad.
+ * MACH_SEND_NOTIFY_IN_PROGRESS
+ * This space has already forced a message to this port.
+ * MACH_SEND_NO_NOTIFY Can't allocate a msg-accepted request.
+ */
+
+mach_msg_return_t
+ipc_marequest_create(space, port, notify, marequestp)
+ ipc_space_t space;
+ ipc_port_t port;
+ mach_port_t notify;
+ ipc_marequest_t *marequestp;
+{
+ mach_port_t name;
+ ipc_entry_t entry;
+ ipc_port_t soright;
+ ipc_marequest_t marequest;
+ ipc_marequest_bucket_t bucket;
+
+#if !MACH_IPC_COMPAT
+ assert(notify != MACH_PORT_NULL);
+#endif !MACH_IPC_COMPAT
+
+ marequest = imar_alloc();
+ if (marequest == IMAR_NULL)
+ return MACH_SEND_NO_NOTIFY;
+
+ /*
+ * Delay creating the send-once right until
+ * we know there will be no errors. Otherwise,
+ * we would have to worry about disposing of it
+ * when it turned out it wasn't needed.
+ */
+
+ is_write_lock(space);
+ if (!space->is_active) {
+ is_write_unlock(space);
+ imar_free(marequest);
+ return MACH_SEND_INVALID_NOTIFY;
+ }
+
+ if (ipc_right_reverse(space, (ipc_object_t) port, &name, &entry)) {
+ ipc_entry_bits_t bits;
+
+ /* port is locked and active */
+ ip_unlock(port);
+ bits = entry->ie_bits;
+
+ assert(port == (ipc_port_t) entry->ie_object);
+ assert(bits & MACH_PORT_TYPE_SEND_RECEIVE);
+
+ if (bits & IE_BITS_MAREQUEST) {
+ is_write_unlock(space);
+ imar_free(marequest);
+ return MACH_SEND_NOTIFY_IN_PROGRESS;
+ }
+
+#if MACH_IPC_COMPAT
+ if (notify == MACH_PORT_NULL)
+ soright = IP_NULL;
+ else
+#endif MACH_IPC_COMPAT
+ if ((soright = ipc_port_lookup_notify(space, notify))
+ == IP_NULL) {
+ is_write_unlock(space);
+ imar_free(marequest);
+ return MACH_SEND_INVALID_NOTIFY;
+ }
+
+ entry->ie_bits = bits | IE_BITS_MAREQUEST;
+
+ is_reference(space);
+ marequest->imar_space = space;
+ marequest->imar_name = name;
+ marequest->imar_soright = soright;
+
+ bucket = &ipc_marequest_table[IMAR_HASH(space, name)];
+ imarb_lock(bucket);
+
+ marequest->imar_next = bucket->imarb_head;
+ bucket->imarb_head = marequest;
+
+ imarb_unlock(bucket);
+ } else {
+#if MACH_IPC_COMPAT
+ if (notify == MACH_PORT_NULL)
+ soright = IP_NULL;
+ else
+#endif MACH_IPC_COMPAT
+ if ((soright = ipc_port_lookup_notify(space, notify))
+ == IP_NULL) {
+ is_write_unlock(space);
+ imar_free(marequest);
+ return MACH_SEND_INVALID_NOTIFY;
+ }
+
+ is_reference(space);
+ marequest->imar_space = space;
+ marequest->imar_name = MACH_PORT_NULL;
+ marequest->imar_soright = soright;
+ }
+
+ is_write_unlock(space);
+ *marequestp = marequest;
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: ipc_marequest_cancel
+ * Purpose:
+ * Cancel a msg-accepted request, because
+ * the space's entry is being destroyed.
+ * Conditions:
+ * The space is write-locked and active.
+ */
+
+void
+ipc_marequest_cancel(space, name)
+ ipc_space_t space;
+ mach_port_t name;
+{
+ ipc_marequest_bucket_t bucket;
+ ipc_marequest_t marequest, *last;
+
+ assert(space->is_active);
+
+ bucket = &ipc_marequest_table[IMAR_HASH(space, name)];
+ imarb_lock(bucket);
+
+ for (last = &bucket->imarb_head;
+ (marequest = *last) != IMAR_NULL;
+ last = &marequest->imar_next)
+ if ((marequest->imar_space == space) &&
+ (marequest->imar_name == name))
+ break;
+
+ assert(marequest != IMAR_NULL);
+ *last = marequest->imar_next;
+ imarb_unlock(bucket);
+
+ marequest->imar_name = MACH_PORT_NULL;
+}
+
+/*
+ * Routine: ipc_marequest_rename
+ * Purpose:
+ * Rename a msg-accepted request, because the entry
+ * in the space is being renamed.
+ * Conditions:
+ * The space is write-locked and active.
+ */
+
+void
+ipc_marequest_rename(space, old, new)
+ ipc_space_t space;
+ mach_port_t old, new;
+{
+ ipc_marequest_bucket_t bucket;
+ ipc_marequest_t marequest, *last;
+
+ assert(space->is_active);
+
+ bucket = &ipc_marequest_table[IMAR_HASH(space, old)];
+ imarb_lock(bucket);
+
+ for (last = &bucket->imarb_head;
+ (marequest = *last) != IMAR_NULL;
+ last = &marequest->imar_next)
+ if ((marequest->imar_space == space) &&
+ (marequest->imar_name == old))
+ break;
+
+ assert(marequest != IMAR_NULL);
+ *last = marequest->imar_next;
+ imarb_unlock(bucket);
+
+ marequest->imar_name = new;
+
+ bucket = &ipc_marequest_table[IMAR_HASH(space, new)];
+ imarb_lock(bucket);
+
+ marequest->imar_next = bucket->imarb_head;
+ bucket->imarb_head = marequest;
+
+ imarb_unlock(bucket);
+}
+
+/*
+ * Routine: ipc_marequest_destroy
+ * Purpose:
+ * Destroy a msg-accepted request, because
+ * the kernel message is being received/destroyed.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_marequest_destroy(marequest)
+ ipc_marequest_t marequest;
+{
+ ipc_space_t space = marequest->imar_space;
+ mach_port_t name;
+ ipc_port_t soright;
+#if MACH_IPC_COMPAT
+ ipc_port_t sright = IP_NULL;
+#endif MACH_IPC_COMPAT
+
+ is_write_lock(space);
+
+ name = marequest->imar_name;
+ soright = marequest->imar_soright;
+
+ if (name != MACH_PORT_NULL) {
+ ipc_marequest_bucket_t bucket;
+ ipc_marequest_t this, *last;
+
+ bucket = &ipc_marequest_table[IMAR_HASH(space, name)];
+ imarb_lock(bucket);
+
+ for (last = &bucket->imarb_head;
+ (this = *last) != IMAR_NULL;
+ last = &this->imar_next)
+ if ((this->imar_space == space) &&
+ (this->imar_name == name))
+ break;
+
+ assert(this == marequest);
+ *last = this->imar_next;
+ imarb_unlock(bucket);
+
+ if (space->is_active) {
+ ipc_entry_t entry;
+
+ entry = ipc_entry_lookup(space, name);
+ assert(entry != IE_NULL);
+ assert(entry->ie_bits & IE_BITS_MAREQUEST);
+ assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
+
+ entry->ie_bits &= ~IE_BITS_MAREQUEST;
+
+#if MACH_IPC_COMPAT
+ if (soright == IP_NULL)
+ sright = ipc_space_make_notify(space);
+#endif MACH_IPC_COMPAT
+ } else
+ name = MACH_PORT_NULL;
+ }
+
+ is_write_unlock(space);
+ is_release(space);
+
+ imar_free(marequest);
+
+#if MACH_IPC_COMPAT
+ if (soright == IP_NULL) {
+ if (IP_VALID(sright)) {
+ assert(name != MACH_PORT_NULL);
+ ipc_notify_msg_accepted_compat(sright, name);
+ }
+
+ return;
+ }
+ assert(sright == IP_NULL);
+#endif MACH_IPC_COMPAT
+
+ assert(soright != IP_NULL);
+ ipc_notify_msg_accepted(soright, name);
+}
+
+#if MACH_IPC_DEBUG
+
+
+/*
+ * Routine: ipc_marequest_info
+ * Purpose:
+ * Return information about the marequest hash table.
+ * Fills the buffer with as much information as possible
+ * and returns the desired size of the buffer.
+ * Conditions:
+ * Nothing locked. The caller should provide
+ * possibly-pageable memory.
+ */
+
+unsigned int
+ipc_marequest_info(maxp, info, count)
+ unsigned int *maxp;
+ hash_info_bucket_t *info;
+ unsigned int count;
+{
+ ipc_marequest_index_t i;
+
+ if (ipc_marequest_size < count)
+ count = ipc_marequest_size;
+
+ for (i = 0; i < count; i++) {
+ ipc_marequest_bucket_t bucket = &ipc_marequest_table[i];
+ unsigned int bucket_count = 0;
+ ipc_marequest_t marequest;
+
+ imarb_lock(bucket);
+ for (marequest = bucket->imarb_head;
+ marequest != IMAR_NULL;
+ marequest = marequest->imar_next)
+ bucket_count++;
+ imarb_unlock(bucket);
+
+ /* don't touch pageable memory while holding locks */
+ info[i].hib_count = bucket_count;
+ }
+
+ *maxp = ipc_marequest_max;
+ return ipc_marequest_size;
+}
+
+#endif MACH_IPC_DEBUG
diff --git a/ipc/ipc_marequest.h b/ipc/ipc_marequest.h
new file mode 100644
index 0000000..0e0380e
--- /dev/null
+++ b/ipc/ipc_marequest.h
@@ -0,0 +1,98 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_marequest.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for msg-accepted requests.
+ */
+
+#ifndef _IPC_IPC_MAREQUEST_H_
+#define _IPC_IPC_MAREQUEST_H_
+
+#include <mach_ipc_debug.h>
+
+#include <mach/kern_return.h>
+#include <mach/port.h>
+
+/*
+ * A msg-accepted request is made when MACH_SEND_NOTIFY is used
+ * to force a message to a send right. The IE_BITS_MAREQUEST bit
+ * in an entry indicates the entry is blocked because MACH_SEND_NOTIFY
+ * has already been used to force a message. The kmsg holds
+ * a pointer to the marequest; it is destroyed when the kmsg
+ * is received/destroyed. (If the send right is destroyed,
+ * this just changes imar_name. If the space is destroyed,
+ * the marequest is left unchanged.)
+ *
+ * Locking considerations: The imar_space field is read-only and
+ * points to the space which locks the imar_name field. imar_soright
+ * is read-only. Normally it is a non-null send-once right for
+ * the msg-accepted notification, but in compat mode it is null
+ * and the notification goes to the space's notify port. Normally
+ * imar_name is non-null, but if the send right is destroyed then
+ * it is changed to be null. imar_next is locked by a bucket lock;
+ * imar_name is read-only when the request is in a bucket. (So lookups
+ * in the bucket can safely check imar_space and imar_name.)
+ * imar_space and imar_soright both hold references.
+ */
+
+typedef struct ipc_marequest {
+ struct ipc_space *imar_space;
+ mach_port_t imar_name;
+ struct ipc_port *imar_soright;
+ struct ipc_marequest *imar_next;
+} *ipc_marequest_t;
+
+#define IMAR_NULL ((ipc_marequest_t) 0)
+
+
+extern void
+ipc_marequest_init();
+
+#if MACH_IPC_DEBUG
+
+extern unsigned int
+ipc_marequest_info(/* unsigned int *, hash_info_bucket_t *, unsigned int */);
+
+#endif MACH_IPC_DEBUG
+
+extern mach_msg_return_t
+ipc_marequest_create(/* ipc_space_t space, mach_port_t name,
+ ipc_port_t soright, ipc_marequest_t *marequestp */);
+
+extern void
+ipc_marequest_cancel(/* ipc_space_t space, mach_port_t name */);
+
+extern void
+ipc_marequest_rename(/* ipc_space_t space,
+ mach_port_t old, mach_port_t new */);
+
+extern void
+ipc_marequest_destroy(/* ipc_marequest_t marequest */);
+
+#endif _IPC_IPC_MAREQUEST_H_
diff --git a/ipc/ipc_mqueue.c b/ipc/ipc_mqueue.c
new file mode 100644
index 0000000..5447c49
--- /dev/null
+++ b/ipc/ipc_mqueue.c
@@ -0,0 +1,754 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_mqueue.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to manipulate IPC message queues.
+ */
+
+#include <norma_ipc.h>
+
+#include <mach/port.h>
+#include <mach/message.h>
+#include <kern/assert.h>
+#include <kern/counters.h>
+#include <kern/sched_prim.h>
+#include <kern/ipc_sched.h>
+#include <kern/ipc_kobject.h>
+#include <ipc/ipc_mqueue.h>
+#include <ipc/ipc_thread.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_marequest.h>
+
+
+
+#if NORMA_IPC
+extern ipc_mqueue_t norma_ipc_handoff_mqueue;
+extern ipc_kmsg_t norma_ipc_handoff_msg;
+extern mach_msg_size_t norma_ipc_handoff_max_size;
+extern mach_msg_size_t norma_ipc_handoff_msg_size;
+extern ipc_kmsg_t norma_ipc_kmsg_accept();
+#endif NORMA_IPC
+
+/*
+ * Routine: ipc_mqueue_init
+ * Purpose:
+ * Initialize a newly-allocated message queue.
+ */
+
+void
+ipc_mqueue_init(
+ ipc_mqueue_t mqueue)
+{
+ imq_lock_init(mqueue);
+ ipc_kmsg_queue_init(&mqueue->imq_messages);
+ ipc_thread_queue_init(&mqueue->imq_threads);
+}
+
+/*
+ * Routine: ipc_mqueue_move
+ * Purpose:
+ * Move messages from one queue (source) to another (dest).
+ * Only moves messages sent to the specified port.
+ * Conditions:
+ * Both queues must be locked.
+ * (This is sufficient to manipulate port->ip_seqno.)
+ */
+
+void
+ipc_mqueue_move(
+ ipc_mqueue_t dest,
+ ipc_mqueue_t source,
+ ipc_port_t port)
+{
+ ipc_kmsg_queue_t oldq, newq;
+ ipc_thread_queue_t blockedq;
+ ipc_kmsg_t kmsg, next;
+ ipc_thread_t th;
+
+ oldq = &source->imq_messages;
+ newq = &dest->imq_messages;
+ blockedq = &dest->imq_threads;
+
+ for (kmsg = ipc_kmsg_queue_first(oldq);
+ kmsg != IKM_NULL; kmsg = next) {
+ next = ipc_kmsg_queue_next(oldq, kmsg);
+
+ /* only move messages sent to port */
+
+ if (kmsg->ikm_header.msgh_remote_port != (mach_port_t) port)
+ continue;
+
+ ipc_kmsg_rmqueue(oldq, kmsg);
+
+ /* before adding kmsg to newq, check for a blocked receiver */
+
+ while ((th = ipc_thread_dequeue(blockedq)) != ITH_NULL) {
+ assert(ipc_kmsg_queue_empty(newq));
+
+ thread_go(th);
+
+ /* check if the receiver can handle the message */
+
+ if (kmsg->ikm_header.msgh_size <= th->ith_msize) {
+ th->ith_state = MACH_MSG_SUCCESS;
+ th->ith_kmsg = kmsg;
+ th->ith_seqno = port->ip_seqno++;
+
+ goto next_kmsg;
+ }
+
+ th->ith_state = MACH_RCV_TOO_LARGE;
+ th->ith_msize = kmsg->ikm_header.msgh_size;
+ }
+
+ /* didn't find a receiver to handle the message */
+
+ ipc_kmsg_enqueue(newq, kmsg);
+ next_kmsg:;
+ }
+}
+
+/*
+ * Routine: ipc_mqueue_changed
+ * Purpose:
+ * Wake up receivers waiting in a message queue.
+ * Conditions:
+ * The message queue is locked.
+ */
+
+void
+ipc_mqueue_changed(
+ ipc_mqueue_t mqueue,
+ mach_msg_return_t mr)
+{
+ ipc_thread_t th;
+
+ while ((th = ipc_thread_dequeue(&mqueue->imq_threads)) != ITH_NULL) {
+ th->ith_state = mr;
+ thread_go(th);
+ }
+}
+
+/*
+ * Routine: ipc_mqueue_send
+ * Purpose:
+ * Send a message to a port. The message holds a reference
+ * for the destination port in the msgh_remote_port field.
+ *
+ * If unsuccessful, the caller still has possession of
+ * the message and must do something with it. If successful,
+ * the message is queued, given to a receiver, destroyed,
+ * or handled directly by the kernel via mach_msg.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS The message was accepted.
+ * MACH_SEND_TIMED_OUT Caller still has message.
+ * MACH_SEND_INTERRUPTED Caller still has message.
+ */
+
+mach_msg_return_t
+ipc_mqueue_send(kmsg, option, time_out)
+ ipc_kmsg_t kmsg;
+ mach_msg_option_t option;
+ mach_msg_timeout_t time_out;
+{
+ ipc_port_t port;
+
+ port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
+ assert(IP_VALID(port));
+
+ ip_lock(port);
+
+ if (port->ip_receiver == ipc_space_kernel) {
+ ipc_kmsg_t reply;
+
+ /*
+ * We can check ip_receiver == ipc_space_kernel
+ * before checking that the port is active because
+ * ipc_port_dealloc_kernel clears ip_receiver
+ * before destroying a kernel port.
+ */
+
+ assert(ip_active(port));
+ ip_unlock(port);
+
+ reply = ipc_kobject_server(kmsg);
+ if (reply != IKM_NULL)
+ ipc_mqueue_send_always(reply);
+
+ return MACH_MSG_SUCCESS;
+ }
+
+#if NORMA_IPC
+ if (IP_NORMA_IS_PROXY(port)) {
+ mach_msg_return_t mr;
+
+ mr = norma_ipc_send(kmsg);
+ ip_unlock(port);
+ return mr;
+ }
+#endif NORMA_IPC
+
+ for (;;) {
+ ipc_thread_t self;
+
+ /*
+ * Can't deliver to a dead port.
+ * However, we can pretend it got sent
+ * and was then immediately destroyed.
+ */
+
+ if (!ip_active(port)) {
+ /*
+ * We can't let ipc_kmsg_destroy deallocate
+ * the port right, because we might end up
+ * in an infinite loop trying to deliver
+ * a send-once notification.
+ */
+
+ ip_release(port);
+ ip_check_unlock(port);
+ kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
+#if NORMA_IPC
+ /* XXX until ipc_kmsg_destroy is fixed... */
+ norma_ipc_finish_receiving(&kmsg);
+#endif NORMA_IPC
+ ipc_kmsg_destroy(kmsg);
+ return MACH_MSG_SUCCESS;
+ }
+
+ /*
+ * Don't block if:
+ * 1) We're under the queue limit.
+ * 2) Caller used the MACH_SEND_ALWAYS internal option.
+ * 3) Message is sent to a send-once right.
+ */
+
+ if ((port->ip_msgcount < port->ip_qlimit) ||
+ (option & MACH_SEND_ALWAYS) ||
+ (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits) ==
+ MACH_MSG_TYPE_PORT_SEND_ONCE))
+ break;
+
+ /* must block waiting for queue to clear */
+
+ self = current_thread();
+
+ if (option & MACH_SEND_TIMEOUT) {
+ if (time_out == 0) {
+ ip_unlock(port);
+ return MACH_SEND_TIMED_OUT;
+ }
+
+ thread_will_wait_with_timeout(self, time_out);
+ } else
+ thread_will_wait(self);
+
+ ipc_thread_enqueue(&port->ip_blocked, self);
+ self->ith_state = MACH_SEND_IN_PROGRESS;
+
+ ip_unlock(port);
+ counter(c_ipc_mqueue_send_block++);
+ thread_block((void (*)(void)) 0);
+ ip_lock(port);
+
+ /* why did we wake up? */
+
+ if (self->ith_state == MACH_MSG_SUCCESS)
+ continue;
+ assert(self->ith_state == MACH_SEND_IN_PROGRESS);
+
+ /* take ourselves off blocked queue */
+
+ ipc_thread_rmqueue(&port->ip_blocked, self);
+
+ /*
+ * Thread wakeup-reason field tells us why
+ * the wait was interrupted.
+ */
+
+ switch (self->ith_wait_result) {
+ case THREAD_INTERRUPTED:
+ /* send was interrupted - give up */
+
+ ip_unlock(port);
+ return MACH_SEND_INTERRUPTED;
+
+ case THREAD_TIMED_OUT:
+ /* timeout expired */
+
+ assert(option & MACH_SEND_TIMEOUT);
+ time_out = 0;
+ break;
+
+ case THREAD_RESTART:
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_mqueue_send");
+#else
+ panic("ipc_mqueue_send");
+#endif
+ }
+ }
+
+ if (kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_CIRCULAR) {
+ ip_unlock(port);
+
+ /* don't allow the creation of a circular loop */
+
+#if NORMA_IPC
+ /* XXX until ipc_kmsg_destroy is fixed... */
+ norma_ipc_finish_receiving(&kmsg);
+#endif NORMA_IPC
+ ipc_kmsg_destroy(kmsg);
+ return MACH_MSG_SUCCESS;
+ }
+
+ {
+ ipc_mqueue_t mqueue;
+ ipc_pset_t pset;
+ ipc_thread_t receiver;
+ ipc_thread_queue_t receivers;
+
+ port->ip_msgcount++;
+ assert(port->ip_msgcount > 0);
+
+ pset = port->ip_pset;
+ if (pset == IPS_NULL)
+ mqueue = &port->ip_messages;
+ else
+ mqueue = &pset->ips_messages;
+
+ imq_lock(mqueue);
+ receivers = &mqueue->imq_threads;
+
+ /*
+ * Can unlock the port now that the msg queue is locked
+ * and we know the port is active. While the msg queue
+ * is locked, we have control of the kmsg, so the ref in
+ * it for the port is still good. If the msg queue is in
+ * a set (dead or alive), then we're OK because the port
+ * is still a member of the set and the set won't go away
+ * until the port is taken out, which tries to lock the
+ * set's msg queue to remove the port's msgs.
+ */
+
+ ip_unlock(port);
+
+ /* check for a receiver for the message */
+
+#if NORMA_IPC
+ if (mqueue == norma_ipc_handoff_mqueue) {
+ norma_ipc_handoff_msg = kmsg;
+ if (kmsg->ikm_header.msgh_size <= norma_ipc_handoff_max_size) {
+ imq_unlock(mqueue);
+ return MACH_MSG_SUCCESS;
+ }
+ norma_ipc_handoff_msg_size = kmsg->ikm_header.msgh_size;
+ }
+#endif NORMA_IPC
+ for (;;) {
+ receiver = ipc_thread_queue_first(receivers);
+ if (receiver == ITH_NULL) {
+ /* no receivers; queue kmsg */
+
+ ipc_kmsg_enqueue_macro(&mqueue->imq_messages, kmsg);
+ imq_unlock(mqueue);
+ break;
+ }
+
+ ipc_thread_rmqueue_first_macro(receivers, receiver);
+ assert(ipc_kmsg_queue_empty(&mqueue->imq_messages));
+
+ if (kmsg->ikm_header.msgh_size <= receiver->ith_msize) {
+ /* got a successful receiver */
+
+ receiver->ith_state = MACH_MSG_SUCCESS;
+ receiver->ith_kmsg = kmsg;
+ receiver->ith_seqno = port->ip_seqno++;
+ imq_unlock(mqueue);
+
+ thread_go(receiver);
+ break;
+ }
+
+ receiver->ith_state = MACH_RCV_TOO_LARGE;
+ receiver->ith_msize = kmsg->ikm_header.msgh_size;
+ thread_go(receiver);
+ }
+ }
+
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: ipc_mqueue_copyin
+ * Purpose:
+ * Convert a name in a space to a message queue.
+ * Conditions:
+ * Nothing locked. If successful, the message queue
+ * is returned locked and caller gets a ref for the object.
+ * This ref ensures the continued existence of the queue.
+ * Returns:
+ * MACH_MSG_SUCCESS Found a message queue.
+ * MACH_RCV_INVALID_NAME The space is dead.
+ * MACH_RCV_INVALID_NAME The name doesn't denote a right.
+ * MACH_RCV_INVALID_NAME
+ * The denoted right is not receive or port set.
+ * MACH_RCV_IN_SET Receive right is a member of a set.
+ */
+
+mach_msg_return_t
+ipc_mqueue_copyin(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_mqueue_t *mqueuep,
+ ipc_object_t *objectp)
+{
+ ipc_entry_t entry;
+ ipc_entry_bits_t bits;
+ ipc_object_t object;
+ ipc_mqueue_t mqueue;
+
+ is_read_lock(space);
+ if (!space->is_active) {
+ is_read_unlock(space);
+ return MACH_RCV_INVALID_NAME;
+ }
+
+ entry = ipc_entry_lookup(space, name);
+ if (entry == IE_NULL) {
+ is_read_unlock(space);
+ return MACH_RCV_INVALID_NAME;
+ }
+
+ bits = entry->ie_bits;
+ object = entry->ie_object;
+
+ if (bits & MACH_PORT_TYPE_RECEIVE) {
+ ipc_port_t port;
+ ipc_pset_t pset;
+
+ port = (ipc_port_t) object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+ is_read_unlock(space);
+
+ pset = port->ip_pset;
+ if (pset != IPS_NULL) {
+ ips_lock(pset);
+ if (ips_active(pset)) {
+ ips_unlock(pset);
+ ip_unlock(port);
+ return MACH_RCV_IN_SET;
+ }
+
+ ipc_pset_remove(pset, port);
+ ips_check_unlock(pset);
+ assert(port->ip_pset == IPS_NULL);
+ }
+
+ mqueue = &port->ip_messages;
+ } else if (bits & MACH_PORT_TYPE_PORT_SET) {
+ ipc_pset_t pset;
+
+ pset = (ipc_pset_t) object;
+ assert(pset != IPS_NULL);
+
+ ips_lock(pset);
+ assert(ips_active(pset));
+ assert(pset->ips_local_name == name);
+ is_read_unlock(space);
+
+ mqueue = &pset->ips_messages;
+ } else {
+ is_read_unlock(space);
+ return MACH_RCV_INVALID_NAME;
+ }
+
+ /*
+ * At this point, the object is locked and active,
+ * the space is unlocked, and mqueue is initialized.
+ */
+
+ io_reference(object);
+ imq_lock(mqueue);
+ io_unlock(object);
+
+ *objectp = object;
+ *mqueuep = mqueue;
+ return MACH_MSG_SUCCESS;
+}
+
+/*
+ * Routine: ipc_mqueue_receive
+ * Purpose:
+ * Receive a message from a message queue.
+ *
+ * If continuation is non-zero, then we might discard
+ * our kernel stack when we block. We will continue
+ * after unblocking by executing continuation.
+ *
+ * If resume is true, then we are resuming a receive
+ * operation after a blocked receive discarded our stack.
+ * Conditions:
+ * The message queue is locked; it will be returned unlocked.
+ *
+ * Our caller must hold a reference for the port or port set
+ * to which this queue belongs, to keep the queue
+ * from being deallocated. Furthermore, the port or set
+ * must have been active when the queue was locked.
+ *
+ * The kmsg is returned with clean header fields
+ * and with the circular bit turned off.
+ * Returns:
+ * MACH_MSG_SUCCESS Message returned in kmsgp.
+ * MACH_RCV_TOO_LARGE Message size returned in kmsgp.
+ * MACH_RCV_TIMED_OUT No message obtained.
+ * MACH_RCV_INTERRUPTED No message obtained.
+ * MACH_RCV_PORT_DIED Port/set died; no message.
+ * MACH_RCV_PORT_CHANGED Port moved into set; no msg.
+ *
+ */
+
+mach_msg_return_t
+ipc_mqueue_receive(
+ ipc_mqueue_t mqueue,
+ mach_msg_option_t option,
+ mach_msg_size_t max_size,
+ mach_msg_timeout_t time_out,
+ boolean_t resume,
+ void (*continuation)(void),
+ ipc_kmsg_t *kmsgp,
+ mach_port_seqno_t *seqnop)
+{
+ ipc_port_t port;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+
+ {
+ ipc_kmsg_queue_t kmsgs = &mqueue->imq_messages;
+ ipc_thread_t self = current_thread();
+
+ if (resume)
+ goto after_thread_block;
+
+ for (;;) {
+ kmsg = ipc_kmsg_queue_first(kmsgs);
+#if NORMA_IPC
+ /*
+ * It may be possible to make this work even when a timeout
+ * is specified.
+ *
+ * Netipc_replenish should be moved somewhere else.
+ */
+ if (kmsg == IKM_NULL && ! (option & MACH_RCV_TIMEOUT)) {
+ netipc_replenish(FALSE);
+ *kmsgp = IKM_NULL;
+ kmsg = norma_ipc_kmsg_accept(mqueue, max_size,
+ (mach_msg_size_t *)kmsgp);
+ if (kmsg != IKM_NULL) {
+ port = (ipc_port_t)
+ kmsg->ikm_header.msgh_remote_port;
+ seqno = port->ip_seqno++;
+ break;
+ }
+ if (*kmsgp) {
+ imq_unlock(mqueue);
+ return MACH_RCV_TOO_LARGE;
+ }
+ }
+#endif NORMA_IPC
+ if (kmsg != IKM_NULL) {
+ /* check space requirements */
+
+ if (kmsg->ikm_header.msgh_size > max_size) {
+ * (mach_msg_size_t *) kmsgp =
+ kmsg->ikm_header.msgh_size;
+ imq_unlock(mqueue);
+ return MACH_RCV_TOO_LARGE;
+ }
+
+ ipc_kmsg_rmqueue_first_macro(kmsgs, kmsg);
+ port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
+ seqno = port->ip_seqno++;
+ break;
+ }
+
+ /* must block waiting for a message */
+
+ if (option & MACH_RCV_TIMEOUT) {
+ if (time_out == 0) {
+ imq_unlock(mqueue);
+ return MACH_RCV_TIMED_OUT;
+ }
+
+ thread_will_wait_with_timeout(self, time_out);
+ } else
+ thread_will_wait(self);
+
+ ipc_thread_enqueue_macro(&mqueue->imq_threads, self);
+ self->ith_state = MACH_RCV_IN_PROGRESS;
+ self->ith_msize = max_size;
+
+ imq_unlock(mqueue);
+ if (continuation != (void (*)(void)) 0) {
+ counter(c_ipc_mqueue_receive_block_user++);
+ } else {
+ counter(c_ipc_mqueue_receive_block_kernel++);
+ }
+ thread_block(continuation);
+ after_thread_block:
+ imq_lock(mqueue);
+
+ /* why did we wake up? */
+
+ if (self->ith_state == MACH_MSG_SUCCESS) {
+ /* pick up the message that was handed to us */
+
+ kmsg = self->ith_kmsg;
+ seqno = self->ith_seqno;
+ port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
+ break;
+ }
+
+ switch (self->ith_state) {
+ case MACH_RCV_TOO_LARGE:
+ /* pick up size of the too-large message */
+
+ * (mach_msg_size_t *) kmsgp = self->ith_msize;
+ /* fall-through */
+
+ case MACH_RCV_PORT_DIED:
+ case MACH_RCV_PORT_CHANGED:
+ /* something bad happened to the port/set */
+
+ imq_unlock(mqueue);
+ return self->ith_state;
+
+ case MACH_RCV_IN_PROGRESS:
+ /*
+ * Awakened for other than IPC completion.
+ * Remove ourselves from the waiting queue,
+ * then check the wakeup cause.
+ */
+
+ ipc_thread_rmqueue(&mqueue->imq_threads, self);
+
+ switch (self->ith_wait_result) {
+ case THREAD_INTERRUPTED:
+ /* receive was interrupted - give up */
+
+ imq_unlock(mqueue);
+ return MACH_RCV_INTERRUPTED;
+
+ case THREAD_TIMED_OUT:
+ /* timeout expired */
+
+ assert(option & MACH_RCV_TIMEOUT);
+ time_out = 0;
+ break;
+
+ case THREAD_RESTART:
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_mqueue_receive");
+#else
+ panic("ipc_mqueue_receive");
+#endif
+ }
+ break;
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_mqueue_receive: strange ith_state");
+#else
+ panic("ipc_mqueue_receive: strange ith_state");
+#endif
+ }
+ }
+
+ /* we have a kmsg; unlock the msg queue */
+
+ imq_unlock(mqueue);
+ assert(kmsg->ikm_header.msgh_size <= max_size);
+ }
+
+ {
+ ipc_marequest_t marequest;
+
+ marequest = kmsg->ikm_marequest;
+ if (marequest != IMAR_NULL) {
+ ipc_marequest_destroy(marequest);
+ kmsg->ikm_marequest = IMAR_NULL;
+ }
+ assert((kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_CIRCULAR) == 0);
+
+ assert(port == (ipc_port_t) kmsg->ikm_header.msgh_remote_port);
+ ip_lock(port);
+
+ if (ip_active(port)) {
+ ipc_thread_queue_t senders;
+ ipc_thread_t sender;
+
+ assert(port->ip_msgcount > 0);
+ port->ip_msgcount--;
+
+ senders = &port->ip_blocked;
+ sender = ipc_thread_queue_first(senders);
+
+ if ((sender != ITH_NULL) &&
+ (port->ip_msgcount < port->ip_qlimit)) {
+ ipc_thread_rmqueue(senders, sender);
+ sender->ith_state = MACH_MSG_SUCCESS;
+ thread_go(sender);
+ }
+ }
+
+ ip_unlock(port);
+ }
+
+#if NORMA_IPC
+ norma_ipc_finish_receiving(&kmsg);
+#endif NORMA_IPC
+ *kmsgp = kmsg;
+ *seqnop = seqno;
+ return MACH_MSG_SUCCESS;
+}
diff --git a/ipc/ipc_mqueue.h b/ipc/ipc_mqueue.h
new file mode 100644
index 0000000..690fe28
--- /dev/null
+++ b/ipc/ipc_mqueue.h
@@ -0,0 +1,108 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_mqueue.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for message queues.
+ */
+
+#ifndef _IPC_IPC_MQUEUE_H_
+#define _IPC_IPC_MQUEUE_H_
+
+#include <mach/message.h>
+#include <kern/assert.h>
+#include <kern/lock.h>
+#include <kern/macro_help.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_thread.h>
+
+typedef struct ipc_mqueue {
+ decl_simple_lock_data(, imq_lock_data)
+ struct ipc_kmsg_queue imq_messages;
+ struct ipc_thread_queue imq_threads;
+} *ipc_mqueue_t;
+
+#define IMQ_NULL ((ipc_mqueue_t) 0)
+
+#define imq_lock_init(mq) simple_lock_init(&(mq)->imq_lock_data)
+#define imq_lock(mq) simple_lock(&(mq)->imq_lock_data)
+#define imq_lock_try(mq) simple_lock_try(&(mq)->imq_lock_data)
+#define imq_unlock(mq) simple_unlock(&(mq)->imq_lock_data)
+
+extern void
+ipc_mqueue_init(/* ipc_mqueue_t */);
+
+extern void
+ipc_mqueue_move(/* ipc_mqueue_t, ipc_mqueue_t, ipc_port_t */);
+
+extern void
+ipc_mqueue_changed(/* ipc_mqueue_t, mach_msg_return_t */);
+
+extern mach_msg_return_t
+ipc_mqueue_send(/* ipc_kmsg_t, mach_msg_option_t, mach_msg_timeout_t */);
+
+#define IMQ_NULL_CONTINUE ((void (*)()) 0)
+
+extern mach_msg_return_t
+ipc_mqueue_receive(/* ipc_mqueue_t, mach_msg_option_t,
+ mach_msg_size_t, mach_msg_timeout_t,
+ boolean_t, void (*)(),
+ ipc_kmsg_t *, mach_port_seqno_t * */);
+
+/*
+ * extern void
+ * ipc_mqueue_send_always(ipc_kmsg_t);
+ *
+ * Unfortunately, to avoid warnings/lint about unused variables
+ * when assertions are turned off, we need two versions of this.
+ */
+
+#include <kern/assert.h>
+
+#if MACH_ASSERT
+
+#define ipc_mqueue_send_always(kmsg) \
+MACRO_BEGIN \
+ mach_msg_return_t mr; \
+ \
+ mr = ipc_mqueue_send((kmsg), MACH_SEND_ALWAYS, \
+ MACH_MSG_TIMEOUT_NONE); \
+ assert(mr == MACH_MSG_SUCCESS); \
+MACRO_END
+
+#else MACH_ASSERT
+
+#define ipc_mqueue_send_always(kmsg) \
+MACRO_BEGIN \
+ (void) ipc_mqueue_send((kmsg), MACH_SEND_ALWAYS, \
+ MACH_MSG_TIMEOUT_NONE); \
+MACRO_END
+
+#endif /* MACH_ASSERT */
+
+#endif /* _IPC_IPC_MQUEUE_H_ */
diff --git a/ipc/ipc_notify.c b/ipc/ipc_notify.c
new file mode 100644
index 0000000..870f301
--- /dev/null
+++ b/ipc/ipc_notify.c
@@ -0,0 +1,593 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_notify.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Notification-sending functions.
+ */
+
+#include <mach_ipc_compat.h>
+
+#include <mach/port.h>
+#include <mach/message.h>
+#include <mach/notify.h>
+#include <kern/assert.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_mqueue.h>
+#include <ipc/ipc_notify.h>
+#include <ipc/ipc_port.h>
+
+#include <ipc/ipc_machdep.h>
+
+mach_port_deleted_notification_t ipc_notify_port_deleted_template;
+mach_msg_accepted_notification_t ipc_notify_msg_accepted_template;
+mach_port_destroyed_notification_t ipc_notify_port_destroyed_template;
+mach_no_senders_notification_t ipc_notify_no_senders_template;
+mach_send_once_notification_t ipc_notify_send_once_template;
+mach_dead_name_notification_t ipc_notify_dead_name_template;
+
+#if MACH_IPC_COMPAT
+/*
+ * When notification messages are received via the old
+ * msg_receive trap, the msg_type field should contain
+ * MSG_TYPE_EMERGENCY. We arrange for this by putting
+ * MSG_TYPE_EMERGENCY into msgh_seqno, which
+ * ipc_kmsg_copyout_compat copies to msg_type.
+ */
+
+#define NOTIFY_MSGH_SEQNO MSG_TYPE_EMERGENCY
+#else MACH_IPC_COMPAT
+#define NOTIFY_MSGH_SEQNO 0
+#endif MACH_IPC_COMPAT
+
+/*
+ * Routine: ipc_notify_init_port_deleted
+ * Purpose:
+ * Initialize a template for port-deleted notifications.
+ */
+
+void
+ipc_notify_init_port_deleted(n)
+ mach_port_deleted_notification_t *n;
+{
+ mach_msg_header_t *m = &n->not_header;
+ mach_msg_type_t *t = &n->not_type;
+
+ m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0);
+ m->msgh_size = sizeof *n;
+ m->msgh_seqno = NOTIFY_MSGH_SEQNO;
+ m->msgh_local_port = MACH_PORT_NULL;
+ m->msgh_remote_port = MACH_PORT_NULL;
+ m->msgh_id = MACH_NOTIFY_PORT_DELETED;
+
+ t->msgt_name = MACH_MSG_TYPE_PORT_NAME;
+ t->msgt_size = PORT_T_SIZE_IN_BITS;
+ t->msgt_number = 1;
+ t->msgt_inline = TRUE;
+ t->msgt_longform = FALSE;
+ t->msgt_deallocate = FALSE;
+ t->msgt_unused = 0;
+
+ n->not_port = MACH_PORT_NULL;
+}
+
+/*
+ * Routine: ipc_notify_init_msg_accepted
+ * Purpose:
+ * Initialize a template for msg-accepted notifications.
+ */
+
+void
+ipc_notify_init_msg_accepted(n)
+ mach_msg_accepted_notification_t *n;
+{
+ mach_msg_header_t *m = &n->not_header;
+ mach_msg_type_t *t = &n->not_type;
+
+ m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0);
+ m->msgh_size = sizeof *n;
+ m->msgh_seqno = NOTIFY_MSGH_SEQNO;
+ m->msgh_local_port = MACH_PORT_NULL;
+ m->msgh_remote_port = MACH_PORT_NULL;
+ m->msgh_id = MACH_NOTIFY_MSG_ACCEPTED;
+
+ t->msgt_name = MACH_MSG_TYPE_PORT_NAME;
+ t->msgt_size = PORT_T_SIZE_IN_BITS;
+ t->msgt_number = 1;
+ t->msgt_inline = TRUE;
+ t->msgt_longform = FALSE;
+ t->msgt_deallocate = FALSE;
+ t->msgt_unused = 0;
+
+ n->not_port = MACH_PORT_NULL;
+}
+
+/*
+ * Routine: ipc_notify_init_port_destroyed
+ * Purpose:
+ * Initialize a template for port-destroyed notifications.
+ */
+
+void
+ipc_notify_init_port_destroyed(
+ mach_port_destroyed_notification_t *n)
+{
+ mach_msg_header_t *m = &n->not_header;
+ mach_msg_type_t *t = &n->not_type;
+
+ m->msgh_bits = MACH_MSGH_BITS_COMPLEX |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0);
+ m->msgh_size = sizeof *n;
+ m->msgh_seqno = NOTIFY_MSGH_SEQNO;
+ m->msgh_local_port = MACH_PORT_NULL;
+ m->msgh_remote_port = MACH_PORT_NULL;
+ m->msgh_id = MACH_NOTIFY_PORT_DESTROYED;
+
+ t->msgt_name = MACH_MSG_TYPE_PORT_RECEIVE;
+ t->msgt_size = PORT_T_SIZE_IN_BITS;
+ t->msgt_number = 1;
+ t->msgt_inline = TRUE;
+ t->msgt_longform = FALSE;
+ t->msgt_deallocate = FALSE;
+ t->msgt_unused = 0;
+
+ n->not_port = MACH_PORT_NULL;
+}
+
+/*
+ * Routine: ipc_notify_init_no_senders
+ * Purpose:
+ * Initialize a template for no-senders notifications.
+ */
+
+void
+ipc_notify_init_no_senders(
+ mach_no_senders_notification_t *n)
+{
+ mach_msg_header_t *m = &n->not_header;
+ mach_msg_type_t *t = &n->not_type;
+
+ m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0);
+ m->msgh_size = sizeof *n;
+ m->msgh_seqno = NOTIFY_MSGH_SEQNO;
+ m->msgh_local_port = MACH_PORT_NULL;
+ m->msgh_remote_port = MACH_PORT_NULL;
+ m->msgh_id = MACH_NOTIFY_NO_SENDERS;
+
+ t->msgt_name = MACH_MSG_TYPE_INTEGER_32;
+ t->msgt_size = PORT_T_SIZE_IN_BITS;
+ t->msgt_number = 1;
+ t->msgt_inline = TRUE;
+ t->msgt_longform = FALSE;
+ t->msgt_deallocate = FALSE;
+ t->msgt_unused = 0;
+
+ n->not_count = 0;
+}
+
+/*
+ * Routine: ipc_notify_init_send_once
+ * Purpose:
+ * Initialize a template for send-once notifications.
+ */
+
+void
+ipc_notify_init_send_once(
+ mach_send_once_notification_t *n)
+{
+ mach_msg_header_t *m = &n->not_header;
+
+ m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0);
+ m->msgh_size = sizeof *n;
+ m->msgh_seqno = NOTIFY_MSGH_SEQNO;
+ m->msgh_local_port = MACH_PORT_NULL;
+ m->msgh_remote_port = MACH_PORT_NULL;
+ m->msgh_id = MACH_NOTIFY_SEND_ONCE;
+}
+
+/*
+ * Routine: ipc_notify_init_dead_name
+ * Purpose:
+ * Initialize a template for dead-name notifications.
+ */
+
+void
+ipc_notify_init_dead_name(
+ mach_dead_name_notification_t *n)
+{
+ mach_msg_header_t *m = &n->not_header;
+ mach_msg_type_t *t = &n->not_type;
+
+ m->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0);
+ m->msgh_size = sizeof *n;
+ m->msgh_seqno = NOTIFY_MSGH_SEQNO;
+ m->msgh_local_port = MACH_PORT_NULL;
+ m->msgh_remote_port = MACH_PORT_NULL;
+ m->msgh_id = MACH_NOTIFY_DEAD_NAME;
+
+ t->msgt_name = MACH_MSG_TYPE_PORT_NAME;
+ t->msgt_size = PORT_T_SIZE_IN_BITS;
+ t->msgt_number = 1;
+ t->msgt_inline = TRUE;
+ t->msgt_longform = FALSE;
+ t->msgt_deallocate = FALSE;
+ t->msgt_unused = 0;
+
+ n->not_port = MACH_PORT_NULL;
+}
+
+/*
+ * Routine: ipc_notify_init
+ * Purpose:
+ * Initialize the notification subsystem.
+ */
+
+void
+ipc_notify_init(void)
+{
+ ipc_notify_init_port_deleted(&ipc_notify_port_deleted_template);
+ ipc_notify_init_msg_accepted(&ipc_notify_msg_accepted_template);
+ ipc_notify_init_port_destroyed(&ipc_notify_port_destroyed_template);
+ ipc_notify_init_no_senders(&ipc_notify_no_senders_template);
+ ipc_notify_init_send_once(&ipc_notify_send_once_template);
+ ipc_notify_init_dead_name(&ipc_notify_dead_name_template);
+}
+
+/*
+ * Routine: ipc_notify_port_deleted
+ * Purpose:
+ * Send a port-deleted notification.
+ * Conditions:
+ * Nothing locked.
+ * Consumes a ref/soright for port.
+ */
+
+void
+ipc_notify_port_deleted(port, name)
+ ipc_port_t port;
+ mach_port_t name;
+{
+ ipc_kmsg_t kmsg;
+ mach_port_deleted_notification_t *n;
+
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL) {
+ printf("dropped port-deleted (0x%08x, 0x%x)\n", port, name);
+ ipc_port_release_sonce(port);
+ return;
+ }
+
+ ikm_init(kmsg, sizeof *n);
+ n = (mach_port_deleted_notification_t *) &kmsg->ikm_header;
+ *n = ipc_notify_port_deleted_template;
+
+ n->not_header.msgh_remote_port = (mach_port_t) port;
+ n->not_port = name;
+
+ ipc_mqueue_send_always(kmsg);
+}
+
+/*
+ * Routine: ipc_notify_msg_accepted
+ * Purpose:
+ * Send a msg-accepted notification.
+ * Conditions:
+ * Nothing locked.
+ * Consumes a ref/soright for port.
+ */
+
+void
+ipc_notify_msg_accepted(port, name)
+ ipc_port_t port;
+ mach_port_t name;
+{
+ ipc_kmsg_t kmsg;
+ mach_msg_accepted_notification_t *n;
+
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL) {
+ printf("dropped msg-accepted (0x%08x, 0x%x)\n", port, name);
+ ipc_port_release_sonce(port);
+ return;
+ }
+
+ ikm_init(kmsg, sizeof *n);
+ n = (mach_msg_accepted_notification_t *) &kmsg->ikm_header;
+ *n = ipc_notify_msg_accepted_template;
+
+ n->not_header.msgh_remote_port = (mach_port_t) port;
+ n->not_port = name;
+
+ ipc_mqueue_send_always(kmsg);
+}
+
+/*
+ * Routine: ipc_notify_port_destroyed
+ * Purpose:
+ * Send a port-destroyed notification.
+ * Conditions:
+ * Nothing locked.
+ * Consumes a ref/soright for port.
+ * Consumes a ref for right, which should be a receive right
+ * prepped for placement into a message. (In-transit,
+ * or in-limbo if a circularity was detected.)
+ */
+
+void
+ipc_notify_port_destroyed(port, right)
+ ipc_port_t port;
+ ipc_port_t right;
+{
+ ipc_kmsg_t kmsg;
+ mach_port_destroyed_notification_t *n;
+
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL) {
+ printf("dropped port-destroyed (0x%08x, 0x%08x)\n",
+ port, right);
+ ipc_port_release_sonce(port);
+ ipc_port_release_receive(right);
+ return;
+ }
+
+ ikm_init(kmsg, sizeof *n);
+ n = (mach_port_destroyed_notification_t *) &kmsg->ikm_header;
+ *n = ipc_notify_port_destroyed_template;
+
+ n->not_header.msgh_remote_port = (mach_port_t) port;
+ n->not_port = (mach_port_t) right;
+
+ ipc_mqueue_send_always(kmsg);
+}
+
+/*
+ * Routine: ipc_notify_no_senders
+ * Purpose:
+ * Send a no-senders notification.
+ * Conditions:
+ * Nothing locked.
+ * Consumes a ref/soright for port.
+ */
+
+void
+ipc_notify_no_senders(port, mscount)
+ ipc_port_t port;
+ mach_port_mscount_t mscount;
+{
+ ipc_kmsg_t kmsg;
+ mach_no_senders_notification_t *n;
+
+#if NORMA_IPC
+ if (ip_nsproxyp(port)) {
+ assert(mscount == 0);
+ norma_ipc_notify_no_senders(ip_nsproxy(port));
+ return;
+ }
+#endif NORMA_IPC
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL) {
+ printf("dropped no-senders (0x%08x, %u)\n", port, mscount);
+ ipc_port_release_sonce(port);
+ return;
+ }
+
+ ikm_init(kmsg, sizeof *n);
+ n = (mach_no_senders_notification_t *) &kmsg->ikm_header;
+ *n = ipc_notify_no_senders_template;
+
+ n->not_header.msgh_remote_port = (mach_port_t) port;
+ n->not_count = mscount;
+
+ ipc_mqueue_send_always(kmsg);
+}
+
+/*
+ * Routine: ipc_notify_send_once
+ * Purpose:
+ * Send a send-once notification.
+ * Conditions:
+ * Nothing locked.
+ * Consumes a ref/soright for port.
+ */
+
+void
+ipc_notify_send_once(port)
+ ipc_port_t port;
+{
+ ipc_kmsg_t kmsg;
+ mach_send_once_notification_t *n;
+
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL) {
+ printf("dropped send-once (0x%08x)\n", port);
+ ipc_port_release_sonce(port);
+ return;
+ }
+
+ ikm_init(kmsg, sizeof *n);
+ n = (mach_send_once_notification_t *) &kmsg->ikm_header;
+ *n = ipc_notify_send_once_template;
+
+ n->not_header.msgh_remote_port = (mach_port_t) port;
+
+ ipc_mqueue_send_always(kmsg);
+}
+
+/*
+ * Routine: ipc_notify_dead_name
+ * Purpose:
+ * Send a dead-name notification.
+ * Conditions:
+ * Nothing locked.
+ * Consumes a ref/soright for port.
+ */
+
+void
+ipc_notify_dead_name(port, name)
+ ipc_port_t port;
+ mach_port_t name;
+{
+ ipc_kmsg_t kmsg;
+ mach_dead_name_notification_t *n;
+
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL) {
+ printf("dropped dead-name (0x%08x, 0x%x)\n", port, name);
+ ipc_port_release_sonce(port);
+ return;
+ }
+
+ ikm_init(kmsg, sizeof *n);
+ n = (mach_dead_name_notification_t *) &kmsg->ikm_header;
+ *n = ipc_notify_dead_name_template;
+
+ n->not_header.msgh_remote_port = (mach_port_t) port;
+ n->not_port = name;
+
+ ipc_mqueue_send_always(kmsg);
+}
+
+#if MACH_IPC_COMPAT
+
+/*
+ * Routine: ipc_notify_port_deleted_compat
+ * Purpose:
+ * Send a port-deleted notification.
+ * Sends it to a send right instead of a send-once right.
+ * Conditions:
+ * Nothing locked.
+ * Consumes a ref/sright for port.
+ */
+
+void
+ipc_notify_port_deleted_compat(port, name)
+ ipc_port_t port;
+ mach_port_t name;
+{
+ ipc_kmsg_t kmsg;
+ mach_port_deleted_notification_t *n;
+
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL) {
+ printf("dropped port-deleted-compat (0x%08x, 0x%x)\n",
+ port, name);
+ ipc_port_release_send(port);
+ return;
+ }
+
+ ikm_init(kmsg, sizeof *n);
+ n = (mach_port_deleted_notification_t *) &kmsg->ikm_header;
+ *n = ipc_notify_port_deleted_template;
+
+ n->not_header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0);
+ n->not_header.msgh_remote_port = (mach_port_t) port;
+ n->not_port = name;
+
+ ipc_mqueue_send_always(kmsg);
+}
+
+/*
+ * Routine: ipc_notify_msg_accepted_compat
+ * Purpose:
+ * Send a msg-accepted notification.
+ * Sends it to a send right instead of a send-once right.
+ * Conditions:
+ * Nothing locked.
+ * Consumes a ref/sright for port.
+ */
+
+void
+ipc_notify_msg_accepted_compat(port, name)
+ ipc_port_t port;
+ mach_port_t name;
+{
+ ipc_kmsg_t kmsg;
+ mach_msg_accepted_notification_t *n;
+
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL) {
+ printf("dropped msg-accepted-compat (0x%08x, 0x%x)\n",
+ port, name);
+ ipc_port_release_send(port);
+ return;
+ }
+
+ ikm_init(kmsg, sizeof *n);
+ n = (mach_msg_accepted_notification_t *) &kmsg->ikm_header;
+ *n = ipc_notify_msg_accepted_template;
+
+ n->not_header.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0);
+ n->not_header.msgh_remote_port = (mach_port_t) port;
+ n->not_port = name;
+
+ ipc_mqueue_send_always(kmsg);
+}
+
+/*
+ * Routine: ipc_notify_port_destroyed_compat
+ * Purpose:
+ * Send a port-destroyed notification.
+ * Sends it to a send right instead of a send-once right.
+ * Conditions:
+ * Nothing locked.
+ * Consumes a ref/sright for port.
+ * Consumes a ref for right, which should be a receive right
+ * prepped for placement into a message. (In-transit,
+ * or in-limbo if a circularity was detected.)
+ */
+
+void
+ipc_notify_port_destroyed_compat(port, right)
+ ipc_port_t port;
+ ipc_port_t right;
+{
+ ipc_kmsg_t kmsg;
+ mach_port_destroyed_notification_t *n;
+
+ kmsg = ikm_alloc(sizeof *n);
+ if (kmsg == IKM_NULL) {
+ printf("dropped port-destroyed-compat (0x%08x, 0x%08x)\n",
+ port, right);
+ ipc_port_release_send(port);
+ ipc_port_release_receive(right);
+ return;
+ }
+
+ ikm_init(kmsg, sizeof *n);
+ n = (mach_port_destroyed_notification_t *) &kmsg->ikm_header;
+ *n = ipc_notify_port_destroyed_template;
+
+ n->not_header.msgh_bits = MACH_MSGH_BITS_COMPLEX |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0);
+ n->not_header.msgh_remote_port = (mach_port_t) port;
+ n->not_port = (mach_port_t) right;
+
+ ipc_mqueue_send_always(kmsg);
+}
+
+#endif MACH_IPC_COMPAT
diff --git a/ipc/ipc_notify.h b/ipc/ipc_notify.h
new file mode 100644
index 0000000..66e0633
--- /dev/null
+++ b/ipc/ipc_notify.h
@@ -0,0 +1,72 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_notify.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Declarations of notification-sending functions.
+ */
+
+#ifndef _IPC_IPC_NOTIFY_H_
+#define _IPC_IPC_NOTIFY_H_
+
+#include <mach_ipc_compat.h>
+
+extern void
+ipc_notify_init();
+
+extern void
+ipc_notify_port_deleted(/* ipc_port_t, mach_port_t */);
+
+extern void
+ipc_notify_msg_accepted(/* ipc_port_t, mach_port_t */);
+
+extern void
+ipc_notify_port_destroyed(/* ipc_port_t, ipc_port_t */);
+
+extern void
+ipc_notify_no_senders(/* ipc_port_t, mach_port_mscount_t */);
+
+extern void
+ipc_notify_send_once(/* ipc_port_t */);
+
+extern void
+ipc_notify_dead_name(/* ipc_port_t, mach_port_t */);
+
+#if MACH_IPC_COMPAT
+
+extern void
+ipc_notify_port_deleted_compat(/* ipc_port_t, mach_port_t */);
+
+extern void
+ipc_notify_msg_accepted_compat(/* ipc_port_t, mach_port_t */);
+
+extern void
+ipc_notify_port_destroyed_compat(/* ipc_port_t, ipc_port_t */);
+
+#endif MACH_IPC_COMPAT
+#endif _IPC_IPC_NOTIFY_H_
diff --git a/ipc/ipc_object.c b/ipc/ipc_object.c
new file mode 100644
index 0000000..cdef3cd
--- /dev/null
+++ b/ipc/ipc_object.c
@@ -0,0 +1,1346 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_object.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to manipulate IPC objects.
+ */
+
+#include <mach_ipc_compat.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <mach/message.h>
+#include <ipc/port.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_hash.h>
+#include <ipc/ipc_right.h>
+#include <ipc/ipc_notify.h>
+#include <ipc/ipc_pset.h>
+
+zone_t ipc_object_zones[IOT_NUMBER];
+
+
+
+/*
+ * Routine: ipc_object_reference
+ * Purpose:
+ * Take a reference to an object.
+ */
+
+void
+ipc_object_reference(
+ ipc_object_t object)
+{
+ io_lock(object);
+ assert(object->io_references > 0);
+ io_reference(object);
+ io_unlock(object);
+}
+
+/*
+ * Routine: ipc_object_release
+ * Purpose:
+ * Release a reference to an object.
+ */
+
+void
+ipc_object_release(
+ ipc_object_t object)
+{
+ io_lock(object);
+ assert(object->io_references > 0);
+ io_release(object);
+ io_check_unlock(object);
+}
+
+/*
+ * Routine: ipc_object_translate
+ * Purpose:
+ * Look up an object in a space.
+ * Conditions:
+ * Nothing locked before. If successful, the object
+ * is returned locked. The caller doesn't get a ref.
+ * Returns:
+ * KERN_SUCCESS Objected returned locked.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote the correct right.
+ */
+
+kern_return_t
+ipc_object_translate(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_port_right_t right,
+ ipc_object_t *objectp)
+{
+ ipc_entry_t entry;
+ ipc_object_t object;
+ kern_return_t kr;
+
+ kr = ipc_right_lookup_read(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is read-locked and active */
+
+ if ((entry->ie_bits & MACH_PORT_TYPE(right)) == (mach_port_right_t) 0) {
+ is_read_unlock(space);
+ return KERN_INVALID_RIGHT;
+ }
+
+ object = entry->ie_object;
+ assert(object != IO_NULL);
+
+ io_lock(object);
+ is_read_unlock(space);
+
+ *objectp = object;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_object_alloc_dead
+ * Purpose:
+ * Allocate a dead-name entry.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS The dead name is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NO_SPACE No room for an entry in the space.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_object_alloc_dead(
+ ipc_space_t space,
+ mach_port_t *namep)
+{
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+
+ kr = ipc_entry_alloc(space, namep, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked */
+
+ /* null object, MACH_PORT_TYPE_DEAD_NAME, 1 uref */
+
+ assert(entry->ie_object == IO_NULL);
+ entry->ie_bits |= MACH_PORT_TYPE_DEAD_NAME | 1;
+
+ is_write_unlock(space);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_object_alloc_dead_name
+ * Purpose:
+ * Allocate a dead-name entry, with a specific name.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS The dead name is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NAME_EXISTS The name already denotes a right.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_object_alloc_dead_name(
+ ipc_space_t space,
+ mach_port_t name)
+{
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+
+ kr = ipc_entry_alloc_name(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked */
+
+ if (ipc_right_inuse(space, name, entry))
+ return KERN_NAME_EXISTS;
+
+ /* null object, MACH_PORT_TYPE_DEAD_NAME, 1 uref */
+
+ assert(entry->ie_object == IO_NULL);
+ entry->ie_bits |= MACH_PORT_TYPE_DEAD_NAME | 1;
+
+ is_write_unlock(space);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_object_alloc
+ * Purpose:
+ * Allocate an object.
+ * Conditions:
+ * Nothing locked. If successful, the object is returned locked.
+ * The caller doesn't get a reference for the object.
+ * Returns:
+ * KERN_SUCCESS The object is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NO_SPACE No room for an entry in the space.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_object_alloc(
+ ipc_space_t space,
+ ipc_object_type_t otype,
+ mach_port_type_t type,
+ mach_port_urefs_t urefs,
+ mach_port_t *namep,
+ ipc_object_t *objectp)
+{
+ ipc_object_t object;
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ assert(otype < IOT_NUMBER);
+ assert((type & MACH_PORT_TYPE_ALL_RIGHTS) == type);
+ assert(type != MACH_PORT_TYPE_NONE);
+ assert(urefs <= MACH_PORT_UREFS_MAX);
+
+ object = io_alloc(otype);
+ if (object == IO_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ if (otype == IOT_PORT) {
+ ipc_port_t port = (ipc_port_t)object;
+
+ bzero((char *)port, sizeof(*port));
+ } else if (otype == IOT_PORT_SET) {
+ ipc_pset_t pset = (ipc_pset_t)object;
+
+ bzero((char *)pset, sizeof(*pset));
+ }
+ *namep = (mach_port_t)object;
+ kr = ipc_entry_alloc(space, namep, &entry);
+ if (kr != KERN_SUCCESS) {
+ io_free(otype, object);
+ return kr;
+ }
+ /* space is write-locked */
+
+ entry->ie_bits |= type | urefs;
+ entry->ie_object = object;
+
+ io_lock_init(object);
+ io_lock(object);
+ is_write_unlock(space);
+
+ object->io_references = 1; /* for entry, not caller */
+ object->io_bits = io_makebits(TRUE, otype, 0);
+
+ *objectp = object;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_object_alloc_name
+ * Purpose:
+ * Allocate an object, with a specific name.
+ * Conditions:
+ * Nothing locked. If successful, the object is returned locked.
+ * The caller doesn't get a reference for the object.
+ * Returns:
+ * KERN_SUCCESS The object is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NAME_EXISTS The name already denotes a right.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_object_alloc_name(
+ ipc_space_t space,
+ ipc_object_type_t otype,
+ mach_port_type_t type,
+ mach_port_urefs_t urefs,
+ mach_port_t name,
+ ipc_object_t *objectp)
+{
+ ipc_object_t object;
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ assert(otype < IOT_NUMBER);
+ assert((type & MACH_PORT_TYPE_ALL_RIGHTS) == type);
+ assert(type != MACH_PORT_TYPE_NONE);
+ assert(urefs <= MACH_PORT_UREFS_MAX);
+
+ object = io_alloc(otype);
+ if (object == IO_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ if (otype == IOT_PORT) {
+ ipc_port_t port = (ipc_port_t)object;
+
+ bzero((char *)port, sizeof(*port));
+ } else if (otype == IOT_PORT_SET) {
+ ipc_pset_t pset = (ipc_pset_t)object;
+
+ bzero((char *)pset, sizeof(*pset));
+ }
+
+ kr = ipc_entry_alloc_name(space, name, &entry);
+ if (kr != KERN_SUCCESS) {
+ io_free(otype, object);
+ return kr;
+ }
+ /* space is write-locked */
+
+ if (ipc_right_inuse(space, name, entry)) {
+ io_free(otype, object);
+ return KERN_NAME_EXISTS;
+ }
+
+ entry->ie_bits |= type | urefs;
+ entry->ie_object = object;
+
+ io_lock_init(object);
+ io_lock(object);
+ is_write_unlock(space);
+
+ object->io_references = 1; /* for entry, not caller */
+ object->io_bits = io_makebits(TRUE, otype, 0);
+
+ *objectp = object;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_object_copyin_type
+ * Purpose:
+ * Convert a send type name to a received type name.
+ */
+
+mach_msg_type_name_t
+ipc_object_copyin_type(
+ mach_msg_type_name_t msgt_name)
+{
+ switch (msgt_name) {
+ case 0:
+ return 0;
+
+ case MACH_MSG_TYPE_MOVE_RECEIVE:
+ return MACH_MSG_TYPE_PORT_RECEIVE;
+
+ case MACH_MSG_TYPE_MOVE_SEND_ONCE:
+ case MACH_MSG_TYPE_MAKE_SEND_ONCE:
+ return MACH_MSG_TYPE_PORT_SEND_ONCE;
+
+ case MACH_MSG_TYPE_MOVE_SEND:
+ case MACH_MSG_TYPE_MAKE_SEND:
+ case MACH_MSG_TYPE_COPY_SEND:
+ return MACH_MSG_TYPE_PORT_SEND;
+
+#if MACH_IPC_COMPAT
+ case MSG_TYPE_PORT:
+ return MACH_MSG_TYPE_PORT_SEND;
+
+ case MSG_TYPE_PORT_ALL:
+ return MACH_MSG_TYPE_PORT_RECEIVE;
+#endif MACH_IPC_COMPAT
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_object_copyin_type: strange rights");
+#else
+ panic("ipc_object_copyin_type: strange rights");
+#endif
+ return 0; /* in case assert/panic returns */
+ }
+}
+
+/*
+ * Routine: ipc_object_copyin
+ * Purpose:
+ * Copyin a capability from a space.
+ * If successful, the caller gets a ref
+ * for the resulting object, unless it is IO_DEAD.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Acquired an object, possibly IO_DEAD.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME Name doesn't exist in space.
+ * KERN_INVALID_RIGHT Name doesn't denote correct right.
+ */
+
+kern_return_t
+ipc_object_copyin(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_msg_type_name_t msgt_name,
+ ipc_object_t *objectp)
+{
+ ipc_entry_t entry;
+ ipc_port_t soright;
+ kern_return_t kr;
+
+ /*
+ * Could first try a read lock when doing
+ * MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND,
+ * and MACH_MSG_TYPE_MAKE_SEND_ONCE.
+ */
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ kr = ipc_right_copyin(space, name, entry,
+ msgt_name, TRUE,
+ objectp, &soright);
+ if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+
+ if ((kr == KERN_SUCCESS) && (soright != IP_NULL))
+ ipc_notify_port_deleted(soright, name);
+
+ return kr;
+}
+
+/*
+ * Routine: ipc_object_copyin_from_kernel
+ * Purpose:
+ * Copyin a naked capability from the kernel.
+ *
+ * MACH_MSG_TYPE_MOVE_RECEIVE
+ * The receiver must be ipc_space_kernel.
+ * Consumes the naked receive right.
+ * MACH_MSG_TYPE_COPY_SEND
+ * A naked send right must be supplied.
+ * The port gains a reference, and a send right
+ * if the port is still active.
+ * MACH_MSG_TYPE_MAKE_SEND
+ * The receiver must be ipc_space_kernel.
+ * The port gains a reference and a send right.
+ * MACH_MSG_TYPE_MOVE_SEND
+ * Consumes a naked send right.
+ * MACH_MSG_TYPE_MAKE_SEND_ONCE
+ * The receiver must be ipc_space_kernel.
+ * The port gains a reference and a send-once right.
+ * MACH_MSG_TYPE_MOVE_SEND_ONCE
+ * Consumes a naked send-once right.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_object_copyin_from_kernel(
+ ipc_object_t object,
+ mach_msg_type_name_t msgt_name)
+{
+ assert(IO_VALID(object));
+
+ switch (msgt_name) {
+ case MACH_MSG_TYPE_MOVE_RECEIVE: {
+ ipc_port_t port = (ipc_port_t) object;
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name != MACH_PORT_NULL);
+ assert(port->ip_receiver == ipc_space_kernel);
+
+ /* relevant part of ipc_port_clear_receiver */
+ ipc_port_set_mscount(port, 0);
+
+ port->ip_receiver_name = MACH_PORT_NULL;
+ port->ip_destination = IP_NULL;
+ ip_unlock(port);
+ break;
+ }
+
+ case MACH_MSG_TYPE_COPY_SEND: {
+ ipc_port_t port = (ipc_port_t) object;
+
+ ip_lock(port);
+ if (ip_active(port)) {
+ assert(port->ip_srights > 0);
+ port->ip_srights++;
+ }
+ ip_reference(port);
+ ip_unlock(port);
+ break;
+ }
+
+ case MACH_MSG_TYPE_MAKE_SEND: {
+ ipc_port_t port = (ipc_port_t) object;
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name != MACH_PORT_NULL);
+ assert(port->ip_receiver == ipc_space_kernel);
+
+ ip_reference(port);
+ port->ip_mscount++;
+ port->ip_srights++;
+ ip_unlock(port);
+ break;
+ }
+
+ case MACH_MSG_TYPE_MOVE_SEND:
+ /* move naked send right into the message */
+ break;
+
+ case MACH_MSG_TYPE_MAKE_SEND_ONCE: {
+ ipc_port_t port = (ipc_port_t) object;
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name != MACH_PORT_NULL);
+ assert(port->ip_receiver == ipc_space_kernel);
+
+ ip_reference(port);
+ port->ip_sorights++;
+ ip_unlock(port);
+ break;
+ }
+
+ case MACH_MSG_TYPE_MOVE_SEND_ONCE:
+ /* move naked send-once right into the message */
+ break;
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_object_copyin_from_kernel: strange rights");
+#else
+ panic("ipc_object_copyin_from_kernel: strange rights");
+#endif
+ }
+}
+
+/*
+ * Routine: ipc_object_destroy
+ * Purpose:
+ * Destroys a naked capability.
+ * Consumes a ref for the object.
+ *
+ * A receive right should be in limbo or in transit.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_object_destroy(
+ ipc_object_t object,
+ mach_msg_type_name_t msgt_name)
+{
+ assert(IO_VALID(object));
+ assert(io_otype(object) == IOT_PORT);
+
+ switch (msgt_name) {
+ case MACH_MSG_TYPE_PORT_SEND:
+ ipc_port_release_send((ipc_port_t) object);
+ break;
+
+ case MACH_MSG_TYPE_PORT_SEND_ONCE:
+ ipc_notify_send_once((ipc_port_t) object);
+ break;
+
+ case MACH_MSG_TYPE_PORT_RECEIVE:
+ ipc_port_release_receive((ipc_port_t) object);
+ break;
+
+ default:
+ panic("ipc_object_destroy: strange rights");
+ }
+}
+
+/*
+ * Routine: ipc_object_copyout
+ * Purpose:
+ * Copyout a capability, placing it into a space.
+ * If successful, consumes a ref for the object.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Copied out object, consumed ref.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_CAPABILITY The object is dead.
+ * KERN_NO_SPACE No room in space for another right.
+ * KERN_RESOURCE_SHORTAGE No memory available.
+ * KERN_UREFS_OVERFLOW Urefs limit exceeded
+ * and overflow wasn't specified.
+ */
+
+kern_return_t
+ipc_object_copyout(
+ ipc_space_t space,
+ ipc_object_t object,
+ mach_msg_type_name_t msgt_name,
+ boolean_t overflow,
+ mach_port_t *namep)
+{
+ mach_port_t name;
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ assert(IO_VALID(object));
+ assert(io_otype(object) == IOT_PORT);
+
+ is_write_lock(space);
+
+ for (;;) {
+ if (!space->is_active) {
+ is_write_unlock(space);
+ return KERN_INVALID_TASK;
+ }
+
+ if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
+ ipc_right_reverse(space, object, &name, &entry)) {
+ /* object is locked and active */
+
+ assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
+ break;
+ }
+
+ kr = ipc_entry_get(space, &name, &entry);
+ if (kr != KERN_SUCCESS) {
+ /* unlocks/locks space, so must start again */
+
+ kr = ipc_entry_grow_table(space);
+ if (kr != KERN_SUCCESS)
+ return kr; /* space is unlocked */
+
+ continue;
+ }
+
+ assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE);
+ assert(entry->ie_object == IO_NULL);
+
+ io_lock(object);
+ if (!io_active(object)) {
+ io_unlock(object);
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+ return KERN_INVALID_CAPABILITY;
+ }
+
+ entry->ie_object = object;
+ break;
+ }
+
+ /* space is write-locked and active, object is locked and active */
+
+ kr = ipc_right_copyout(space, name, entry,
+ msgt_name, overflow, object);
+ /* object is unlocked */
+ is_write_unlock(space);
+
+ if (kr == KERN_SUCCESS)
+ *namep = name;
+ return kr;
+}
+
+#if 0
+/* XXX same, but don't check for already-existing send rights */
+kern_return_t
+ipc_object_copyout_multiname(space, object, namep)
+ ipc_space_t space;
+ ipc_object_t object;
+ mach_port_t *namep;
+{
+ mach_port_t name;
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ assert(IO_VALID(object));
+ assert(io_otype(object) == IOT_PORT);
+
+ is_write_lock(space);
+
+ for (;;) {
+ if (!space->is_active) {
+ is_write_unlock(space);
+ return KERN_INVALID_TASK;
+ }
+
+ kr = ipc_entry_get(space, &name, &entry);
+ if (kr != KERN_SUCCESS) {
+ /* unlocks/locks space, so must start again */
+
+ kr = ipc_entry_grow_table(space);
+ if (kr != KERN_SUCCESS)
+ return kr; /* space is unlocked */
+
+ continue;
+ }
+
+ assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE);
+ assert(entry->ie_object == IO_NULL);
+
+ io_lock(object);
+ if (!io_active(object)) {
+ io_unlock(object);
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+ return KERN_INVALID_CAPABILITY;
+ }
+
+ entry->ie_object = object;
+ break;
+ }
+
+ /* space is write-locked and active, object is locked and active */
+
+ kr = ipc_right_copyout_multiname(space, name, entry, object);
+ /* object is unlocked */
+ is_write_unlock(space);
+
+ if (kr == KERN_SUCCESS)
+ *namep = name;
+ return kr;
+}
+#endif 0
+
+/*
+ * Routine: ipc_object_copyout_name
+ * Purpose:
+ * Copyout a capability, placing it into a space.
+ * The specified name is used for the capability.
+ * If successful, consumes a ref for the object.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Copied out object, consumed ref.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_CAPABILITY The object is dead.
+ * KERN_RESOURCE_SHORTAGE No memory available.
+ * KERN_UREFS_OVERFLOW Urefs limit exceeded
+ * and overflow wasn't specified.
+ * KERN_RIGHT_EXISTS Space has rights under another name.
+ * KERN_NAME_EXISTS Name is already used.
+ */
+
+kern_return_t
+ipc_object_copyout_name(
+ ipc_space_t space,
+ ipc_object_t object,
+ mach_msg_type_name_t msgt_name,
+ boolean_t overflow,
+ mach_port_t name)
+{
+ mach_port_t oname;
+ ipc_entry_t oentry;
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ assert(IO_VALID(object));
+ assert(io_otype(object) == IOT_PORT);
+
+ kr = ipc_entry_alloc_name(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
+ ipc_right_reverse(space, object, &oname, &oentry)) {
+ /* object is locked and active */
+
+ if (name != oname) {
+ io_unlock(object);
+
+ if (IE_BITS_TYPE(entry->ie_bits)
+ == MACH_PORT_TYPE_NONE)
+ ipc_entry_dealloc(space, name, entry);
+
+ is_write_unlock(space);
+ return KERN_RIGHT_EXISTS;
+ }
+
+ assert(entry == oentry);
+ assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
+ } else {
+ if (ipc_right_inuse(space, name, entry))
+ return KERN_NAME_EXISTS;
+
+ assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE);
+ assert(entry->ie_object == IO_NULL);
+
+ io_lock(object);
+ if (!io_active(object)) {
+ io_unlock(object);
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+ return KERN_INVALID_CAPABILITY;
+ }
+
+ entry->ie_object = object;
+ }
+
+ /* space is write-locked and active, object is locked and active */
+
+ kr = ipc_right_copyout(space, name, entry,
+ msgt_name, overflow, object);
+ /* object is unlocked */
+ is_write_unlock(space);
+ return kr;
+}
+
+/*
+ * Routine: ipc_object_copyout_dest
+ * Purpose:
+ * Translates/consumes the destination right of a message.
+ * This is unlike normal copyout because the right is consumed
+ * in a funny way instead of being given to the receiving space.
+ * The receiver gets his name for the port, if he has receive
+ * rights, otherwise MACH_PORT_NULL.
+ * Conditions:
+ * The object is locked and active. Nothing else locked.
+ * The object is unlocked and loses a reference.
+ */
+
+void
+ipc_object_copyout_dest(
+ ipc_space_t space,
+ ipc_object_t object,
+ mach_msg_type_name_t msgt_name,
+ mach_port_t *namep)
+{
+ mach_port_t name;
+
+ assert(IO_VALID(object));
+ assert(io_active(object));
+
+ io_release(object);
+
+ /*
+ * If the space is the receiver/owner of the object,
+ * then we quietly consume the right and return
+ * the space's name for the object. Otherwise
+ * we destroy the right and return MACH_PORT_NULL.
+ */
+
+ switch (msgt_name) {
+ case MACH_MSG_TYPE_PORT_SEND: {
+ ipc_port_t port = (ipc_port_t) object;
+ ipc_port_t nsrequest = IP_NULL;
+ mach_port_mscount_t mscount = 0; /* '=0' to shut up lint */
+
+ assert(port->ip_srights > 0);
+ if (--port->ip_srights == 0) {
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL) {
+ port->ip_nsrequest = IP_NULL;
+ mscount = port->ip_mscount;
+ }
+ }
+
+ if (port->ip_receiver == space)
+ name = port->ip_receiver_name;
+ else
+ name = MACH_PORT_NULL;
+
+ ip_unlock(port);
+
+ if (nsrequest != IP_NULL)
+ ipc_notify_no_senders(nsrequest, mscount);
+
+ break;
+ }
+
+ case MACH_MSG_TYPE_PORT_SEND_ONCE: {
+ ipc_port_t port = (ipc_port_t) object;
+
+ assert(port->ip_sorights > 0);
+
+ if (port->ip_receiver == space) {
+ /* quietly consume the send-once right */
+
+ port->ip_sorights--;
+ name = port->ip_receiver_name;
+ ip_unlock(port);
+ } else {
+ /*
+ * A very bizarre case. The message
+ * was received, but before this copyout
+ * happened the space lost receive rights.
+ * We can't quietly consume the soright
+ * out from underneath some other task,
+ * so generate a send-once notification.
+ */
+
+ ip_reference(port); /* restore ref */
+ ip_unlock(port);
+
+ ipc_notify_send_once(port);
+ name = MACH_PORT_NULL;
+ }
+
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_object_copyout_dest: strange rights");
+#else
+ panic("ipc_object_copyout_dest: strange rights");
+#endif
+
+ }
+
+ *namep = name;
+}
+
+/*
+ * Routine: ipc_object_rename
+ * Purpose:
+ * Rename an entry in a space.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Renamed the entry.
+ * KERN_INVALID_TASK The space was dead.
+ * KERN_INVALID_NAME oname didn't denote an entry.
+ * KERN_NAME_EXISTS nname already denoted an entry.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate new entry.
+ */
+
+kern_return_t
+ipc_object_rename(
+ ipc_space_t space,
+ mach_port_t oname,
+ mach_port_t nname)
+{
+ ipc_entry_t oentry, nentry;
+ kern_return_t kr;
+
+ kr = ipc_entry_alloc_name(space, nname, &nentry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ if (ipc_right_inuse(space, nname, nentry)) {
+ /* space is unlocked */
+ return KERN_NAME_EXISTS;
+ }
+
+ /* don't let ipc_entry_lookup see the uninitialized new entry */
+
+ if ((oname == nname) ||
+ ((oentry = ipc_entry_lookup(space, oname)) == IE_NULL)) {
+ ipc_entry_dealloc(space, nname, nentry);
+ is_write_unlock(space);
+ return KERN_INVALID_NAME;
+ }
+
+ kr = ipc_right_rename(space, oname, oentry, nname, nentry);
+ /* space is unlocked */
+ return kr;
+}
+
+#if MACH_IPC_COMPAT
+
+/*
+ * Routine: ipc_object_copyout_type_compat
+ * Purpose:
+ * Convert a carried type name to an old type name.
+ */
+
+mach_msg_type_name_t
+ipc_object_copyout_type_compat(msgt_name)
+ mach_msg_type_name_t msgt_name;
+{
+ switch (msgt_name) {
+ case MACH_MSG_TYPE_PORT_SEND:
+ case MACH_MSG_TYPE_PORT_SEND_ONCE:
+ return MSG_TYPE_PORT;
+
+ case MACH_MSG_TYPE_PORT_RECEIVE:
+ return MSG_TYPE_PORT_ALL;
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_object_copyout_type_compat: strange rights");
+#else
+ panic("ipc_object_copyout_type_compat: strange rights");
+#endif
+ }
+}
+
+/*
+ * Routine: ipc_object_copyin_compat
+ * Purpose:
+ * Copyin a capability from a space.
+ * If successful, the caller gets a ref
+ * for the resulting object, which is always valid.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Acquired a valid object.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME Name doesn't exist in space.
+ * KERN_INVALID_RIGHT Name doesn't denote correct right.
+ */
+
+kern_return_t
+ipc_object_copyin_compat(space, name, msgt_name, dealloc, objectp)
+ ipc_space_t space;
+ mach_port_t name;
+ mach_msg_type_name_t msgt_name;
+ boolean_t dealloc;
+ ipc_object_t *objectp;
+{
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ kr = ipc_right_copyin_compat(space, name, entry,
+ msgt_name, dealloc, objectp);
+ /* space is unlocked */
+ return kr;
+}
+
+/*
+ * Routine: ipc_object_copyin_header
+ * Purpose:
+ * Copyin a capability from a space.
+ * If successful, the caller gets a ref
+ * for the resulting object, which is always valid.
+ * The type of the acquired capability is returned.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Acquired a valid object.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME Name doesn't exist in space.
+ * KERN_INVALID_RIGHT Name doesn't denote correct right.
+ */
+
+kern_return_t
+ipc_object_copyin_header(space, name, objectp, msgt_namep)
+ ipc_space_t space;
+ mach_port_t name;
+ ipc_object_t *objectp;
+ mach_msg_type_name_t *msgt_namep;
+{
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ kr = ipc_right_copyin_header(space, name, entry,
+ objectp, msgt_namep);
+ /* space is unlocked */
+ return kr;
+}
+
+/*
+ * Routine: ipc_object_copyout_compat
+ * Purpose:
+ * Copyout a capability, placing it into a space.
+ * If successful, consumes a ref for the object.
+ *
+ * Marks new entries with IE_BITS_COMPAT.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Copied out object, consumed ref.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_CAPABILITY The object is dead.
+ * KERN_NO_SPACE No room in space for another right.
+ * KERN_RESOURCE_SHORTAGE No memory available.
+ */
+
+kern_return_t
+ipc_object_copyout_compat(space, object, msgt_name, namep)
+ ipc_space_t space;
+ ipc_object_t object;
+ mach_msg_type_name_t msgt_name;
+ mach_port_t *namep;
+{
+ mach_port_t name;
+ ipc_entry_t entry;
+ ipc_port_t port;
+ kern_return_t kr;
+
+ assert(IO_VALID(object));
+ assert(io_otype(object) == IOT_PORT);
+ port = (ipc_port_t) object;
+
+ is_write_lock(space);
+
+ for (;;) {
+ ipc_port_request_index_t request;
+
+ if (!space->is_active) {
+ is_write_unlock(space);
+ return KERN_INVALID_TASK;
+ }
+
+ if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
+ ipc_right_reverse(space, (ipc_object_t) port,
+ &name, &entry)) {
+ /* port is locked and active */
+
+ assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
+ break;
+ }
+
+ kr = ipc_entry_get(space, &name, &entry);
+ if (kr != KERN_SUCCESS) {
+ /* unlocks/locks space, so must start again */
+
+ kr = ipc_entry_grow_table(space);
+ if (kr != KERN_SUCCESS)
+ return kr; /* space is unlocked */
+
+ continue;
+ }
+
+ assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE);
+ assert(entry->ie_object == IO_NULL);
+
+ ip_lock(port);
+ if (!ip_active(port)) {
+ ip_unlock(port);
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+ return KERN_INVALID_CAPABILITY;
+ }
+
+ kr = ipc_port_dnrequest(port, name, ipr_spacem(space),
+ &request);
+ if (kr != KERN_SUCCESS) {
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+
+ kr = ipc_port_dngrow(port);
+ /* port is unlocked */
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ is_write_lock(space);
+ continue;
+ }
+
+ is_reference(space); /* for dnrequest */
+ entry->ie_object = (ipc_object_t) port;
+ entry->ie_request = request;
+ entry->ie_bits |= IE_BITS_COMPAT;
+ break;
+ }
+
+ /* space is write-locked and active, port is locked and active */
+
+ kr = ipc_right_copyout(space, name, entry,
+ msgt_name, TRUE, (ipc_object_t) port);
+ /* object is unlocked */
+ is_write_unlock(space);
+
+ if (kr == KERN_SUCCESS)
+ *namep = name;
+ return kr;
+}
+
+/*
+ * Routine: ipc_object_copyout_name_compat
+ * Purpose:
+ * Copyout a capability, placing it into a space.
+ * The specified name is used for the capability.
+ * If successful, consumes a ref for the object.
+ *
+ * Like ipc_object_copyout_name, except that
+ * the name can't be in use at all, even for the same
+ * port, and IE_BITS_COMPAT gets turned on.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Copied out object, consumed ref.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_CAPABILITY The object is dead.
+ * KERN_RESOURCE_SHORTAGE No memory available.
+ * KERN_RIGHT_EXISTS Space has rights under another name.
+ * KERN_NAME_EXISTS Name is already used.
+ */
+
+kern_return_t
+ipc_object_copyout_name_compat(space, object, msgt_name, name)
+ ipc_space_t space;
+ ipc_object_t object;
+ mach_msg_type_name_t msgt_name;
+ mach_port_t name;
+{
+ ipc_entry_t entry;
+ ipc_port_t port;
+ kern_return_t kr;
+
+ assert(IO_VALID(object));
+ assert(io_otype(object) == IOT_PORT);
+ port = (ipc_port_t) object;
+
+ for (;;) {
+ mach_port_t oname;
+ ipc_entry_t oentry;
+ ipc_port_request_index_t request;
+
+ kr = ipc_entry_alloc_name(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ if (ipc_right_inuse(space, name, entry))
+ return KERN_NAME_EXISTS; /* space is unlocked */
+
+ assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE);
+ assert(entry->ie_object == IO_NULL);
+
+ if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
+ ipc_right_reverse(space, (ipc_object_t) port,
+ &oname, &oentry)) {
+ /* port is locked and active */
+
+ ip_unlock(port);
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+ return KERN_RIGHT_EXISTS;
+ }
+
+ ip_lock(port);
+ if (!ip_active(port)) {
+ ip_unlock(port);
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+ return KERN_INVALID_CAPABILITY;
+ }
+
+ kr = ipc_port_dnrequest(port, name, ipr_spacem(space),
+ &request);
+ if (kr != KERN_SUCCESS) {
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+
+ kr = ipc_port_dngrow(port);
+ /* port is unlocked */
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ continue;
+ }
+
+ is_reference(space); /* for dnrequest */
+ entry->ie_object = (ipc_object_t) port;
+ entry->ie_request = request;
+ entry->ie_bits |= IE_BITS_COMPAT;
+ break;
+ }
+
+ /* space is write-locked and active, port is locked and active */
+
+ kr = ipc_right_copyout(space, name, entry,
+ msgt_name, TRUE, (ipc_object_t) port);
+ /* object is unlocked */
+ is_write_unlock(space);
+
+ assert(kr == KERN_SUCCESS);
+ return kr;
+}
+
+#endif MACH_IPC_COMPAT
+
+#include <mach_kdb.h>
+
+
+#if MACH_KDB
+#define printf kdbprintf
+
+/*
+ * Routine: ipc_object_print
+ * Purpose:
+ * Pretty-print an object for kdb.
+ */
+
+char *ikot_print_array[IKOT_MAX_TYPE] = {
+ "(NONE) ",
+ "(THREAD) ",
+ "(TASK) ",
+ "(HOST) ",
+ "(HOST_PRIV) ",
+ "(PROCESSOR) ",
+ "(PSET) ",
+ "(PSET_NAME) ",
+ "(PAGER) ",
+ "(PAGER_REQUEST) ",
+ "(DEVICE) ", /* 10 */
+ "(XMM_OBJECT) ",
+ "(XMM_PAGER) ",
+ "(XMM_KERNEL) ",
+ "(XMM_REPLY) ",
+ "(PAGER_TERMINATING)",
+ "(PAGING_NAME) ",
+ "(HOST_SECURITY) ",
+ "(LEDGER) ",
+ "(MASTER_DEVICE) ",
+ "(ACTIVATION) ", /* 20 */
+ "(SUBSYSTEM) ",
+ "(IO_DONE_QUEUE) ",
+ "(SEMAPHORE) ",
+ "(LOCK_SET) ",
+ "(CLOCK) ",
+ "(CLOCK_CTRL) ", /* 26 */
+ /* << new entries here */
+ "(UNKNOWN) " /* magic catchall */
+}; /* Please keep in sync with kern/ipc_kobject.h */
+
+void
+ipc_object_print(
+ ipc_object_t object)
+{
+ int kotype;
+
+ iprintf("%s", io_active(object) ? "active" : "dead");
+ printf(", refs=%d", object->io_references);
+ printf(", otype=%d", io_otype(object));
+ kotype = io_kotype(object);
+ if (kotype >= 0 && kotype < IKOT_MAX_TYPE)
+ printf(", kotype=%d %s\n", io_kotype(object),
+ ikot_print_array[kotype]);
+ else
+ printf(", kotype=0x%x %s\n", io_kotype(object),
+ ikot_print_array[IKOT_UNKNOWN]);
+}
+
+#endif /* MACH_KDB */
diff --git a/ipc/ipc_object.h b/ipc/ipc_object.h
new file mode 100644
index 0000000..dccec59
--- /dev/null
+++ b/ipc/ipc_object.h
@@ -0,0 +1,192 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_object.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for IPC objects, for which tasks have capabilities.
+ */
+
+#ifndef _IPC_IPC_OBJECT_H_
+#define _IPC_IPC_OBJECT_H_
+
+#include <mach_ipc_compat.h>
+
+#include <mach/kern_return.h>
+#include <mach/message.h>
+#include <kern/lock.h>
+#include <kern/macro_help.h>
+#include <kern/zalloc.h>
+
+typedef unsigned int ipc_object_refs_t;
+typedef unsigned int ipc_object_bits_t;
+typedef unsigned int ipc_object_type_t;
+
+typedef struct ipc_object {
+ decl_simple_lock_data(,io_lock_data)
+ ipc_object_refs_t io_references;
+ ipc_object_bits_t io_bits;
+} *ipc_object_t;
+
+#define IO_NULL ((ipc_object_t) 0)
+#define IO_DEAD ((ipc_object_t) -1)
+
+#define IO_VALID(io) (((io) != IO_NULL) && ((io) != IO_DEAD))
+
+#define IO_BITS_KOTYPE 0x0000ffff /* used by the object */
+#define IO_BITS_OTYPE 0x7fff0000 /* determines a zone */
+#define IO_BITS_ACTIVE 0x80000000U /* is object alive? */
+
+#define io_active(io) ((int)(io)->io_bits < 0) /* hack */
+
+#define io_otype(io) (((io)->io_bits & IO_BITS_OTYPE) >> 16)
+#define io_kotype(io) ((io)->io_bits & IO_BITS_KOTYPE)
+
+#define io_makebits(active, otype, kotype) \
+ (((active) ? IO_BITS_ACTIVE : 0) | ((otype) << 16) | (kotype))
+
+/*
+ * Object types: ports, port sets, kernel-loaded ports
+ */
+#define IOT_PORT 0
+#define IOT_PORT_SET 1
+#define IOT_NUMBER 2 /* number of types used */
+
+extern zone_t ipc_object_zones[IOT_NUMBER];
+
+#define io_alloc(otype) \
+ ((ipc_object_t) zalloc(ipc_object_zones[(otype)]))
+
+#define io_free(otype, io) \
+ zfree(ipc_object_zones[(otype)], (vm_offset_t) (io))
+
+#define io_lock_init(io) simple_lock_init(&(io)->io_lock_data)
+#define io_lock(io) simple_lock(&(io)->io_lock_data)
+#define io_lock_try(io) simple_lock_try(&(io)->io_lock_data)
+#define io_unlock(io) simple_unlock(&(io)->io_lock_data)
+
+#define io_check_unlock(io) \
+MACRO_BEGIN \
+ ipc_object_refs_t _refs = (io)->io_references; \
+ \
+ io_unlock(io); \
+ if (_refs == 0) \
+ io_free(io_otype(io), io); \
+MACRO_END
+
+#define io_reference(io) \
+MACRO_BEGIN \
+ (io)->io_references++; \
+MACRO_END
+
+#define io_release(io) \
+MACRO_BEGIN \
+ (io)->io_references--; \
+MACRO_END
+
+extern void
+ipc_object_reference(/* ipc_object_t */);
+
+extern void
+ipc_object_release(/* ipc_object_t */);
+
+extern kern_return_t
+ipc_object_translate(/* ipc_space_t, mach_port_t,
+ mach_port_right_t, ipc_object_t * */);
+
+extern kern_return_t
+ipc_object_alloc_dead(/* ipc_space_t, mach_port_t * */);
+
+extern kern_return_t
+ipc_object_alloc_dead_name(/* ipc_space_t, mach_port_t */);
+
+extern kern_return_t
+ipc_object_alloc(/* ipc_space_t, ipc_object_type_t,
+ mach_port_type_t, mach_port_urefs_t,
+ mach_port_t *, ipc_object_t * */);
+
+extern kern_return_t
+ipc_object_alloc_name(/* ipc_space_t, ipc_object_type_t,
+ mach_port_type_t, mach_port_urefs_t,
+ mach_port_t, ipc_object_t * */);
+
+extern mach_msg_type_name_t
+ipc_object_copyin_type(/* mach_msg_type_name_t */);
+
+extern kern_return_t
+ipc_object_copyin(/* ipc_space_t, mach_port_t,
+ mach_msg_type_name_t, ipc_object_t * */);
+
+extern void
+ipc_object_copyin_from_kernel(/* ipc_object_t, mach_msg_type_name_t */);
+
+extern void
+ipc_object_destroy(/* ipc_object_t, mach_msg_type_name_t */);
+
+extern kern_return_t
+ipc_object_copyout(/* ipc_space_t, ipc_object_t,
+ mach_msg_type_name_t, boolean_t, mach_port_t * */);
+
+extern kern_return_t
+ipc_object_copyout_name(/* ipc_space_t, ipc_object_t,
+ mach_msg_type_name_t, boolean_t, mach_port_t */);
+
+extern void
+ipc_object_copyout_dest(/* ipc_space_t, ipc_object_t,
+ mach_msg_type_name_t, mach_port_t * */);
+
+extern kern_return_t
+ipc_object_rename(/* ipc_space_t, mach_port_t, mach_port_t */);
+
+#if MACH_IPC_COMPAT
+
+extern mach_msg_type_name_t
+ipc_object_copyout_type_compat(/* mach_msg_type_name_t */);
+
+extern kern_return_t
+ipc_object_copyin_compat(/* ipc_space_t, mach_port_t,
+ mach_msg_type_name_t, boolean_t,
+ ipc_object_t * */);
+
+extern kern_return_t
+ipc_object_copyin_header(/* ipc_space_t, mach_port_t,
+ ipc_object_t *, mach_msg_type_name_t * */);
+
+extern kern_return_t
+ipc_object_copyout_compat(/* ipc_space_t, ipc_object_t,
+ mach_msg_type_name_t, mach_port_t * */);
+
+extern kern_return_t
+ipc_object_copyout_name_compat(/* ipc_space_t, ipc_object_t,
+ mach_msg_type_name_t, mach_port_t */);
+
+#endif MACH_IPC_COMPAT
+
+extern void
+ipc_object_print(/* ipc_object_t */);
+
+#endif _IPC_IPC_OBJECT_H_
diff --git a/ipc/ipc_port.c b/ipc/ipc_port.c
new file mode 100644
index 0000000..770e780
--- /dev/null
+++ b/ipc/ipc_port.c
@@ -0,0 +1,1545 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * File: ipc/ipc_port.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to manipulate IPC ports.
+ */
+
+#include <mach_ipc_compat.h>
+
+#include <mach/port.h>
+#include <mach/kern_return.h>
+#include <kern/lock.h>
+#include <kern/ipc_sched.h>
+#include <kern/ipc_kobject.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_thread.h>
+#include <ipc/ipc_mqueue.h>
+#include <ipc/ipc_notify.h>
+#if NORMA_IPC
+#include <norma/ipc_node.h>
+#endif NORMA_IPC
+
+
+
+decl_simple_lock_data(, ipc_port_multiple_lock_data)
+
+decl_simple_lock_data(, ipc_port_timestamp_lock_data)
+ipc_port_timestamp_t ipc_port_timestamp_data;
+
+/*
+ * Routine: ipc_port_timestamp
+ * Purpose:
+ * Retrieve a timestamp value.
+ */
+
+ipc_port_timestamp_t
+ipc_port_timestamp(void)
+{
+ ipc_port_timestamp_t timestamp;
+
+ ipc_port_timestamp_lock();
+ timestamp = ipc_port_timestamp_data++;
+ ipc_port_timestamp_unlock();
+
+ return timestamp;
+}
+
+/*
+ * Routine: ipc_port_dnrequest
+ * Purpose:
+ * Try to allocate a dead-name request slot.
+ * If successful, returns the request index.
+ * Otherwise returns zero.
+ * Conditions:
+ * The port is locked and active.
+ * Returns:
+ * KERN_SUCCESS A request index was found.
+ * KERN_NO_SPACE No index allocated.
+ */
+
+kern_return_t
+ipc_port_dnrequest(port, name, soright, indexp)
+ ipc_port_t port;
+ mach_port_t name;
+ ipc_port_t soright;
+ ipc_port_request_index_t *indexp;
+{
+ ipc_port_request_t ipr, table;
+ ipc_port_request_index_t index;
+
+ assert(ip_active(port));
+ assert(name != MACH_PORT_NULL);
+ assert(soright != IP_NULL);
+
+ table = port->ip_dnrequests;
+ if (table == IPR_NULL)
+ return KERN_NO_SPACE;
+
+ index = table->ipr_next;
+ if (index == 0)
+ return KERN_NO_SPACE;
+
+ ipr = &table[index];
+ assert(ipr->ipr_name == MACH_PORT_NULL);
+
+ table->ipr_next = ipr->ipr_next;
+ ipr->ipr_name = name;
+ ipr->ipr_soright = soright;
+
+ *indexp = index;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_port_dngrow
+ * Purpose:
+ * Grow a port's table of dead-name requests.
+ * Conditions:
+ * The port must be locked and active.
+ * Nothing else locked; will allocate memory.
+ * Upon return the port is unlocked.
+ * Returns:
+ * KERN_SUCCESS Grew the table.
+ * KERN_SUCCESS Somebody else grew the table.
+ * KERN_SUCCESS The port died.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate new table.
+ */
+
+kern_return_t
+ipc_port_dngrow(port)
+ ipc_port_t port;
+{
+ ipc_table_size_t its;
+ ipc_port_request_t otable, ntable;
+
+ assert(ip_active(port));
+
+ otable = port->ip_dnrequests;
+ if (otable == IPR_NULL)
+ its = &ipc_table_dnrequests[0];
+ else
+ its = otable->ipr_size + 1;
+
+ ip_reference(port);
+ ip_unlock(port);
+
+ if ((its->its_size == 0) ||
+ ((ntable = it_dnrequests_alloc(its)) == IPR_NULL)) {
+ ipc_port_release(port);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ ip_lock(port);
+ ip_release(port);
+
+ /*
+ * Check that port is still active and that nobody else
+ * has slipped in and grown the table on us. Note that
+ * just checking port->ip_dnrequests == otable isn't
+ * sufficient; must check ipr_size.
+ */
+
+ if (ip_active(port) &&
+ (port->ip_dnrequests == otable) &&
+ ((otable == IPR_NULL) || (otable->ipr_size+1 == its))) {
+ ipc_table_size_t oits = 0; /* '=0' to shut up lint */
+ ipc_table_elems_t osize, nsize;
+ ipc_port_request_index_t free, i;
+
+ /* copy old table to new table */
+
+ if (otable != IPR_NULL) {
+ oits = otable->ipr_size;
+ osize = oits->its_size;
+ free = otable->ipr_next;
+
+ bcopy((char *)(otable + 1), (char *)(ntable + 1),
+ (osize - 1) * sizeof(struct ipc_port_request));
+ } else {
+ osize = 1;
+ free = 0;
+ }
+
+ nsize = its->its_size;
+ assert(nsize > osize);
+
+ /* add new elements to the new table's free list */
+
+ for (i = osize; i < nsize; i++) {
+ ipc_port_request_t ipr = &ntable[i];
+
+ ipr->ipr_name = MACH_PORT_NULL;
+ ipr->ipr_next = free;
+ free = i;
+ }
+
+ ntable->ipr_next = free;
+ ntable->ipr_size = its;
+ port->ip_dnrequests = ntable;
+ ip_unlock(port);
+
+ if (otable != IPR_NULL)
+ it_dnrequests_free(oits, otable);
+ } else {
+ ip_check_unlock(port);
+ it_dnrequests_free(its, ntable);
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_port_dncancel
+ * Purpose:
+ * Cancel a dead-name request and return the send-once right.
+ * Conditions:
+ * The port must locked and active.
+ */
+
+ipc_port_t
+ipc_port_dncancel(
+ ipc_port_t port,
+ mach_port_t name,
+ ipc_port_request_index_t index)
+{
+ ipc_port_request_t ipr, table;
+ ipc_port_t dnrequest;
+
+ assert(ip_active(port));
+ assert(name != MACH_PORT_NULL);
+ assert(index != 0);
+
+ table = port->ip_dnrequests;
+ assert(table != IPR_NULL);
+
+ ipr = &table[index];
+ dnrequest = ipr->ipr_soright;
+ assert(ipr->ipr_name == name);
+
+ /* return ipr to the free list inside the table */
+
+ ipr->ipr_name = MACH_PORT_NULL;
+ ipr->ipr_next = table->ipr_next;
+ table->ipr_next = index;
+
+ return dnrequest;
+}
+
+/*
+ * Routine: ipc_port_pdrequest
+ * Purpose:
+ * Make a port-deleted request, returning the
+ * previously registered send-once right.
+ * Just cancels the previous request if notify is IP_NULL.
+ * Conditions:
+ * The port is locked and active. It is unlocked.
+ * Consumes a ref for notify (if non-null), and
+ * returns previous with a ref (if non-null).
+ */
+
+void
+ipc_port_pdrequest(
+ ipc_port_t port,
+ ipc_port_t notify,
+ ipc_port_t *previousp)
+{
+ ipc_port_t previous;
+
+ assert(ip_active(port));
+
+ previous = port->ip_pdrequest;
+ port->ip_pdrequest = notify;
+ ip_unlock(port);
+
+ *previousp = previous;
+}
+
+/*
+ * Routine: ipc_port_nsrequest
+ * Purpose:
+ * Make a no-senders request, returning the
+ * previously registered send-once right.
+ * Just cancels the previous request if notify is IP_NULL.
+ * Conditions:
+ * The port is locked and active. It is unlocked.
+ * Consumes a ref for notify (if non-null), and
+ * returns previous with a ref (if non-null).
+ */
+
+void
+ipc_port_nsrequest(
+ ipc_port_t port,
+ mach_port_mscount_t sync,
+ ipc_port_t notify,
+ ipc_port_t *previousp)
+{
+ ipc_port_t previous;
+ mach_port_mscount_t mscount;
+
+ assert(ip_active(port));
+
+ previous = port->ip_nsrequest;
+ mscount = port->ip_mscount;
+
+ if ((port->ip_srights == 0) &&
+ (sync <= mscount) &&
+ (notify != IP_NULL)) {
+ port->ip_nsrequest = IP_NULL;
+ ip_unlock(port);
+ ipc_notify_no_senders(notify, mscount);
+ } else {
+ port->ip_nsrequest = notify;
+ ip_unlock(port);
+ }
+
+ *previousp = previous;
+}
+
+/*
+ * Routine: ipc_port_set_qlimit
+ * Purpose:
+ * Changes a port's queue limit; the maximum number
+ * of messages which may be queued to the port.
+ * Conditions:
+ * The port is locked and active.
+ */
+
+void
+ipc_port_set_qlimit(
+ ipc_port_t port,
+ mach_port_msgcount_t qlimit)
+{
+ assert(ip_active(port));
+
+ /* wake up senders allowed by the new qlimit */
+
+ if (qlimit > port->ip_qlimit) {
+ mach_port_msgcount_t i, wakeup;
+
+ /* caution: wakeup, qlimit are unsigned */
+
+ wakeup = qlimit - port->ip_qlimit;
+
+ for (i = 0; i < wakeup; i++) {
+ ipc_thread_t th;
+
+ th = ipc_thread_dequeue(&port->ip_blocked);
+ if (th == ITH_NULL)
+ break;
+
+ th->ith_state = MACH_MSG_SUCCESS;
+ thread_go(th);
+ }
+ }
+
+ port->ip_qlimit = qlimit;
+}
+
+/*
+ * Routine: ipc_port_lock_mqueue
+ * Purpose:
+ * Locks and returns the message queue that the port is using.
+ * The message queue may be in the port or in its port set.
+ * Conditions:
+ * The port is locked and active.
+ * Port set, message queue locks may be taken.
+ */
+
+ipc_mqueue_t
+ipc_port_lock_mqueue(port)
+ ipc_port_t port;
+{
+ if (port->ip_pset != IPS_NULL) {
+ ipc_pset_t pset = port->ip_pset;
+
+ ips_lock(pset);
+ if (ips_active(pset)) {
+ imq_lock(&pset->ips_messages);
+ ips_unlock(pset);
+ return &pset->ips_messages;
+ }
+
+ ipc_pset_remove(pset, port);
+ ips_check_unlock(pset);
+ }
+
+ imq_lock(&port->ip_messages);
+ return &port->ip_messages;
+}
+
+/*
+ * Routine: ipc_port_set_seqno
+ * Purpose:
+ * Changes a port's sequence number.
+ * Conditions:
+ * The port is locked and active.
+ * Port set, message queue locks may be taken.
+ */
+
+void
+ipc_port_set_seqno(port, seqno)
+ ipc_port_t port;
+ mach_port_seqno_t seqno;
+{
+ ipc_mqueue_t mqueue;
+
+ mqueue = ipc_port_lock_mqueue(port);
+ port->ip_seqno = seqno;
+ imq_unlock(mqueue);
+}
+
+/*
+ * Routine: ipc_port_clear_receiver
+ * Purpose:
+ * Prepares a receive right for transmission/destruction.
+ * Conditions:
+ * The port is locked and active.
+ */
+
+void
+ipc_port_clear_receiver(
+ ipc_port_t port)
+{
+ ipc_pset_t pset;
+
+ assert(ip_active(port));
+
+ pset = port->ip_pset;
+ if (pset != IPS_NULL) {
+ /* No threads receiving from port, but must remove from set. */
+
+ ips_lock(pset);
+ ipc_pset_remove(pset, port);
+ ips_check_unlock(pset);
+ } else {
+ /* Else, wake up all receivers, indicating why. */
+
+ imq_lock(&port->ip_messages);
+ ipc_mqueue_changed(&port->ip_messages, MACH_RCV_PORT_DIED);
+ imq_unlock(&port->ip_messages);
+ }
+
+ ipc_port_set_mscount(port, 0);
+ imq_lock(&port->ip_messages);
+ port->ip_seqno = 0;
+ imq_unlock(&port->ip_messages);
+}
+
+/*
+ * Routine: ipc_port_init
+ * Purpose:
+ * Initializes a newly-allocated port.
+ * Doesn't touch the ip_object fields.
+ */
+
+void
+ipc_port_init(
+ ipc_port_t port,
+ ipc_space_t space,
+ mach_port_t name)
+{
+ /* port->ip_kobject doesn't have to be initialized */
+
+ ipc_target_init(&port->ip_target, name);
+
+ port->ip_receiver = space;
+
+ port->ip_mscount = 0;
+ port->ip_srights = 0;
+ port->ip_sorights = 0;
+
+ port->ip_nsrequest = IP_NULL;
+ port->ip_pdrequest = IP_NULL;
+ port->ip_dnrequests = IPR_NULL;
+
+ port->ip_pset = IPS_NULL;
+ port->ip_cur_target = &port->ip_target;
+ port->ip_seqno = 0;
+ port->ip_msgcount = 0;
+ port->ip_qlimit = MACH_PORT_QLIMIT_DEFAULT;
+
+#if NORMA_IPC
+ port->ip_norma_uid = 0;
+ port->ip_norma_dest_node = 0;
+ port->ip_norma_stransit = 0;
+ port->ip_norma_sotransit = 0;
+ port->ip_norma_xmm_object_refs = 0;
+ port->ip_norma_is_proxy = FALSE;
+ port->ip_norma_is_special = FALSE;
+ port->ip_norma_atrium = IP_NULL;
+ port->ip_norma_queue_next = port;
+ port->ip_norma_xmm_object = IP_NULL;
+ port->ip_norma_next = port;
+ port->ip_norma_spare1 = 0L;
+ port->ip_norma_spare2 = 0L;
+ port->ip_norma_spare3 = 0L;
+ port->ip_norma_spare4 = 0L;
+#endif NORMA_IPC
+
+ ipc_mqueue_init(&port->ip_messages);
+ ipc_thread_queue_init(&port->ip_blocked);
+}
+
+/*
+ * Routine: ipc_port_alloc
+ * Purpose:
+ * Allocate a port.
+ * Conditions:
+ * Nothing locked. If successful, the port is returned
+ * locked. (The caller doesn't have a reference.)
+ * Returns:
+ * KERN_SUCCESS The port is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NO_SPACE No room for an entry in the space.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_port_alloc(
+ ipc_space_t space,
+ mach_port_t *namep,
+ ipc_port_t *portp)
+{
+ ipc_port_t port;
+ mach_port_t name;
+ kern_return_t kr;
+
+ kr = ipc_object_alloc(space, IOT_PORT,
+ MACH_PORT_TYPE_RECEIVE, 0,
+ &name, (ipc_object_t *) &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ /* port is locked */
+
+ ipc_port_init(port, space, name);
+
+ *namep = name;
+ *portp = port;
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_port_alloc_name
+ * Purpose:
+ * Allocate a port, with a specific name.
+ * Conditions:
+ * Nothing locked. If successful, the port is returned
+ * locked. (The caller doesn't have a reference.)
+ * Returns:
+ * KERN_SUCCESS The port is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NAME_EXISTS The name already denotes a right.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_port_alloc_name(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_port_t *portp)
+{
+ ipc_port_t port;
+ kern_return_t kr;
+
+ kr = ipc_object_alloc_name(space, IOT_PORT,
+ MACH_PORT_TYPE_RECEIVE, 0,
+ name, (ipc_object_t *) &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked */
+
+ ipc_port_init(port, space, name);
+
+ *portp = port;
+ return KERN_SUCCESS;
+}
+
+#if MACH_IPC_COMPAT
+/*
+ * Routine: ipc_port_delete_compat
+ * Purpose:
+ * Find and destroy a compat entry for a dead port.
+ * If successful, generate a port-deleted notification.
+ * Conditions:
+ * Nothing locked; the port is dead.
+ * Frees a ref for the space.
+ */
+
+void
+ipc_port_delete_compat(port, space, name)
+ ipc_port_t port;
+ ipc_space_t space;
+ mach_port_t name;
+{
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ assert(!ip_active(port));
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr == KERN_SUCCESS) {
+ ipc_port_t sright;
+
+ /* space is write-locked and active */
+
+ if ((ipc_port_t) entry->ie_object == port) {
+ assert(entry->ie_bits & IE_BITS_COMPAT);
+
+ sright = ipc_space_make_notify(space);
+
+ kr = ipc_right_destroy(space, name, entry);
+ /* space is unlocked */
+ assert(kr == KERN_INVALID_NAME);
+ } else {
+ is_write_unlock(space);
+ sright = IP_NULL;
+ }
+
+ if (IP_VALID(sright))
+ ipc_notify_port_deleted_compat(sright, name);
+ }
+
+ is_release(space);
+}
+#endif MACH_IPC_COMPAT
+
+/*
+ * Routine: ipc_port_destroy
+ * Purpose:
+ * Destroys a port. Cleans up queued messages.
+ *
+ * If the port has a backup, it doesn't get destroyed,
+ * but is sent in a port-destroyed notification to the backup.
+ * Conditions:
+ * The port is locked and alive; nothing else locked.
+ * The caller has a reference, which is consumed.
+ * Afterwards, the port is unlocked and dead.
+ */
+
+void
+ipc_port_destroy(
+ ipc_port_t port)
+{
+ ipc_port_t pdrequest, nsrequest;
+ ipc_mqueue_t mqueue;
+ ipc_kmsg_queue_t kmqueue;
+ ipc_kmsg_t kmsg;
+ ipc_thread_t sender;
+ ipc_port_request_t dnrequests;
+
+ assert(ip_active(port));
+ /* port->ip_receiver_name is garbage */
+ /* port->ip_receiver/port->ip_destination is garbage */
+ assert(port->ip_pset == IPS_NULL);
+ assert(port->ip_mscount == 0);
+ assert(port->ip_seqno == 0);
+
+ /* first check for a backup port */
+
+ pdrequest = port->ip_pdrequest;
+ if (pdrequest != IP_NULL) {
+ /* we assume the ref for pdrequest */
+ port->ip_pdrequest = IP_NULL;
+
+ /* make port be in limbo */
+ port->ip_receiver_name = MACH_PORT_NULL;
+ port->ip_destination = IP_NULL;
+ ip_unlock(port);
+
+#if MACH_IPC_COMPAT
+ /*
+ * pdrequest might actually be a send right instead
+ * of a send-once right, indicated by the low bit
+ * of the pointer value. If this is the case,
+ * we must use ipc_notify_port_destroyed_compat.
+ */
+
+ if (ip_pdsendp(pdrequest)) {
+ ipc_port_t sright = ip_pdsend(pdrequest);
+
+ if (!ipc_port_check_circularity(port, sright)) {
+ /* consumes our refs for port and sright */
+ ipc_notify_port_destroyed_compat(sright, port);
+ return;
+ } else {
+ /* consume sright and destroy port */
+ ipc_port_release_send(sright);
+ }
+ } else
+#endif MACH_IPC_COMPAT
+
+ if (!ipc_port_check_circularity(port, pdrequest)) {
+ /* consumes our refs for port and pdrequest */
+ ipc_notify_port_destroyed(pdrequest, port);
+ return;
+ } else {
+ /* consume pdrequest and destroy port */
+ ipc_port_release_sonce(pdrequest);
+ }
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_pset == IPS_NULL);
+ assert(port->ip_mscount == 0);
+ assert(port->ip_seqno == 0);
+ assert(port->ip_pdrequest == IP_NULL);
+ assert(port->ip_receiver_name == MACH_PORT_NULL);
+ assert(port->ip_destination == IP_NULL);
+
+ /* fall through and destroy the port */
+ }
+
+#if NORMA_IPC
+ /*
+ * destroy any NORMA_IPC state associated with port
+ */
+ norma_ipc_port_destroy(port);
+#endif NORMA_IPC
+
+ /*
+ * rouse all blocked senders
+ *
+ * This must be done with the port locked, because
+ * ipc_mqueue_send can play with the ip_blocked queue
+ * of a dead port.
+ */
+
+ while ((sender = ipc_thread_dequeue(&port->ip_blocked)) != ITH_NULL) {
+ sender->ith_state = MACH_MSG_SUCCESS;
+ thread_go(sender);
+ }
+
+ /* once port is dead, we don't need to keep it locked */
+
+ port->ip_object.io_bits &= ~IO_BITS_ACTIVE;
+ port->ip_timestamp = ipc_port_timestamp();
+ ip_unlock(port);
+
+ /* throw away no-senders request */
+
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL)
+ ipc_notify_send_once(nsrequest); /* consumes ref */
+
+ /* destroy any queued messages */
+
+ mqueue = &port->ip_messages;
+ imq_lock(mqueue);
+ assert(ipc_thread_queue_empty(&mqueue->imq_threads));
+ kmqueue = &mqueue->imq_messages;
+
+ while ((kmsg = ipc_kmsg_dequeue(kmqueue)) != IKM_NULL) {
+ imq_unlock(mqueue);
+
+ assert(kmsg->ikm_header.msgh_remote_port ==
+ (mach_port_t) port);
+
+ ipc_port_release(port);
+ kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
+ ipc_kmsg_destroy(kmsg);
+
+ imq_lock(mqueue);
+ }
+
+ imq_unlock(mqueue);
+
+ /* generate dead-name notifications */
+
+ dnrequests = port->ip_dnrequests;
+ if (dnrequests != IPR_NULL) {
+ ipc_table_size_t its = dnrequests->ipr_size;
+ ipc_table_elems_t size = its->its_size;
+ ipc_port_request_index_t index;
+
+ for (index = 1; index < size; index++) {
+ ipc_port_request_t ipr = &dnrequests[index];
+ mach_port_t name = ipr->ipr_name;
+ ipc_port_t soright;
+
+ if (name == MACH_PORT_NULL)
+ continue;
+
+ soright = ipr->ipr_soright;
+ assert(soright != IP_NULL);
+
+#if MACH_IPC_COMPAT
+ if (ipr_spacep(soright)) {
+ ipc_port_delete_compat(port,
+ ipr_space(soright), name);
+ continue;
+ }
+#endif MACH_IPC_COMPAT
+
+ ipc_notify_dead_name(soright, name);
+ }
+
+ it_dnrequests_free(its, dnrequests);
+ }
+
+ if (ip_kotype(port) != IKOT_NONE)
+ ipc_kobject_destroy(port);
+
+ /* Common destruction for the IPC target. */
+ ipc_target_terminate(&port->ip_target);
+
+ ipc_port_release(port); /* consume caller's ref */
+}
+
+/*
+ * Routine: ipc_port_check_circularity
+ * Purpose:
+ * Check if queueing "port" in a message for "dest"
+ * would create a circular group of ports and messages.
+ *
+ * If no circularity (FALSE returned), then "port"
+ * is changed from "in limbo" to "in transit".
+ *
+ * That is, we want to set port->ip_destination == dest,
+ * but guaranteeing that this doesn't create a circle
+ * port->ip_destination->ip_destination->... == port
+ * Conditions:
+ * No ports locked. References held for "port" and "dest".
+ */
+
+boolean_t
+ipc_port_check_circularity(
+ ipc_port_t port,
+ ipc_port_t dest)
+{
+ ipc_port_t base;
+
+ assert(port != IP_NULL);
+ assert(dest != IP_NULL);
+
+ if (port == dest)
+ return TRUE;
+ base = dest;
+
+ /*
+ * First try a quick check that can run in parallel.
+ * No circularity if dest is not in transit.
+ */
+
+ ip_lock(port);
+ if (ip_lock_try(dest)) {
+ if (!ip_active(dest) ||
+ (dest->ip_receiver_name != MACH_PORT_NULL) ||
+ (dest->ip_destination == IP_NULL))
+ goto not_circular;
+
+ /* dest is in transit; further checking necessary */
+
+ ip_unlock(dest);
+ }
+ ip_unlock(port);
+
+ ipc_port_multiple_lock(); /* massive serialization */
+
+ /*
+ * Search for the end of the chain (a port not in transit),
+ * acquiring locks along the way.
+ */
+
+ for (;;) {
+ ip_lock(base);
+
+ if (!ip_active(base) ||
+ (base->ip_receiver_name != MACH_PORT_NULL) ||
+ (base->ip_destination == IP_NULL))
+ break;
+
+ base = base->ip_destination;
+ }
+
+ /* all ports in chain from dest to base, inclusive, are locked */
+
+ if (port == base) {
+ /* circularity detected! */
+
+ ipc_port_multiple_unlock();
+
+ /* port (== base) is in limbo */
+
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == MACH_PORT_NULL);
+ assert(port->ip_destination == IP_NULL);
+
+ while (dest != IP_NULL) {
+ ipc_port_t next;
+
+ /* dest is in transit or in limbo */
+
+ assert(ip_active(dest));
+ assert(dest->ip_receiver_name == MACH_PORT_NULL);
+
+ next = dest->ip_destination;
+ ip_unlock(dest);
+ dest = next;
+ }
+
+ return TRUE;
+ }
+
+ /*
+ * The guarantee: lock port while the entire chain is locked.
+ * Once port is locked, we can take a reference to dest,
+ * add port to the chain, and unlock everything.
+ */
+
+ ip_lock(port);
+ ipc_port_multiple_unlock();
+
+ not_circular:
+
+ /* port is in limbo */
+
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == MACH_PORT_NULL);
+ assert(port->ip_destination == IP_NULL);
+
+ ip_reference(dest);
+ port->ip_destination = dest;
+
+ /* now unlock chain */
+
+ while (port != base) {
+ ipc_port_t next;
+
+ /* port is in transit */
+
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == MACH_PORT_NULL);
+ assert(port->ip_destination != IP_NULL);
+
+ next = port->ip_destination;
+ ip_unlock(port);
+ port = next;
+ }
+
+ /* base is not in transit */
+
+ assert(!ip_active(base) ||
+ (base->ip_receiver_name != MACH_PORT_NULL) ||
+ (base->ip_destination == IP_NULL));
+ ip_unlock(base);
+
+ return FALSE;
+}
+
+/*
+ * Routine: ipc_port_lookup_notify
+ * Purpose:
+ * Make a send-once notify port from a receive right.
+ * Returns IP_NULL if name doesn't denote a receive right.
+ * Conditions:
+ * The space must be locked (read or write) and active.
+ */
+
+ipc_port_t
+ipc_port_lookup_notify(
+ ipc_space_t space,
+ mach_port_t name)
+{
+ ipc_port_t port;
+ ipc_entry_t entry;
+
+ assert(space->is_active);
+
+ entry = ipc_entry_lookup(space, name);
+ if (entry == IE_NULL)
+ return IP_NULL;
+
+ if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0)
+ return IP_NULL;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+
+ ip_reference(port);
+ port->ip_sorights++;
+ ip_unlock(port);
+
+ return port;
+}
+
+/*
+ * Routine: ipc_port_make_send
+ * Purpose:
+ * Make a naked send right from a receive right.
+ * Conditions:
+ * The port is not locked but it is active.
+ */
+
+ipc_port_t
+ipc_port_make_send(
+ ipc_port_t port)
+{
+ assert(IP_VALID(port));
+
+ ip_lock(port);
+ assert(ip_active(port));
+ port->ip_mscount++;
+ port->ip_srights++;
+ ip_reference(port);
+ ip_unlock(port);
+
+ return port;
+}
+
+/*
+ * Routine: ipc_port_copy_send
+ * Purpose:
+ * Make a naked send right from another naked send right.
+ * IP_NULL -> IP_NULL
+ * IP_DEAD -> IP_DEAD
+ * dead port -> IP_DEAD
+ * live port -> port + ref
+ * Conditions:
+ * Nothing locked except possibly a space.
+ */
+
+ipc_port_t
+ipc_port_copy_send(
+ ipc_port_t port)
+{
+ ipc_port_t sright;
+
+ if (!IP_VALID(port))
+ return port;
+
+ ip_lock(port);
+ if (ip_active(port)) {
+ assert(port->ip_srights > 0);
+
+ ip_reference(port);
+ port->ip_srights++;
+ sright = port;
+ } else
+ sright = IP_DEAD;
+ ip_unlock(port);
+
+ return sright;
+}
+
+/*
+ * Routine: ipc_port_copyout_send
+ * Purpose:
+ * Copyout a naked send right (possibly null/dead),
+ * or if that fails, destroy the right.
+ * Conditions:
+ * Nothing locked.
+ */
+
+mach_port_t
+ipc_port_copyout_send(
+ ipc_port_t sright,
+ ipc_space_t space)
+{
+ mach_port_t name;
+
+ if (IP_VALID(sright)) {
+ kern_return_t kr;
+
+ kr = ipc_object_copyout(space, (ipc_object_t) sright,
+ MACH_MSG_TYPE_PORT_SEND, TRUE, &name);
+ if (kr != KERN_SUCCESS) {
+ ipc_port_release_send(sright);
+
+ if (kr == KERN_INVALID_CAPABILITY)
+ name = MACH_PORT_DEAD;
+ else
+ name = MACH_PORT_NULL;
+ }
+ } else
+ name = (mach_port_t) sright;
+
+ return name;
+}
+
+/*
+ * Routine: ipc_port_release_send
+ * Purpose:
+ * Release a (valid) naked send right.
+ * Consumes a ref for the port.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_port_release_send(
+ ipc_port_t port)
+{
+ ipc_port_t nsrequest = IP_NULL;
+ mach_port_mscount_t mscount;
+
+ assert(IP_VALID(port));
+
+ ip_lock(port);
+ ip_release(port);
+
+ if (!ip_active(port)) {
+ ip_check_unlock(port);
+ return;
+ }
+
+ assert(port->ip_srights > 0);
+
+ if (--port->ip_srights == 0) {
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL) {
+ port->ip_nsrequest = IP_NULL;
+ mscount = port->ip_mscount;
+ }
+ }
+
+ ip_unlock(port);
+
+ if (nsrequest != IP_NULL)
+ ipc_notify_no_senders(nsrequest, mscount);
+}
+
+/*
+ * Routine: ipc_port_make_sonce
+ * Purpose:
+ * Make a naked send-once right from a receive right.
+ * Conditions:
+ * The port is not locked but it is active.
+ */
+
+ipc_port_t
+ipc_port_make_sonce(
+ ipc_port_t port)
+{
+ assert(IP_VALID(port));
+
+ ip_lock(port);
+ assert(ip_active(port));
+ port->ip_sorights++;
+ ip_reference(port);
+ ip_unlock(port);
+
+ return port;
+}
+
+/*
+ * Routine: ipc_port_release_sonce
+ * Purpose:
+ * Release a naked send-once right.
+ * Consumes a ref for the port.
+ *
+ * In normal situations, this is never used.
+ * Send-once rights are only consumed when
+ * a message (possibly a send-once notification)
+ * is sent to them.
+ * Conditions:
+ * Nothing locked except possibly a space.
+ */
+
+void
+ipc_port_release_sonce(
+ ipc_port_t port)
+{
+ assert(IP_VALID(port));
+
+ ip_lock(port);
+
+ assert(port->ip_sorights > 0);
+
+ port->ip_sorights--;
+
+ ip_release(port);
+
+ if (!ip_active(port)) {
+ ip_check_unlock(port);
+ return;
+ }
+
+ ip_unlock(port);
+}
+
+/*
+ * Routine: ipc_port_release_receive
+ * Purpose:
+ * Release a naked (in limbo or in transit) receive right.
+ * Consumes a ref for the port; destroys the port.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_port_release_receive(
+ ipc_port_t port)
+{
+ ipc_port_t dest;
+
+ assert(IP_VALID(port));
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == MACH_PORT_NULL);
+ dest = port->ip_destination;
+
+ ipc_port_destroy(port); /* consumes ref, unlocks */
+
+ if (dest != IP_NULL)
+ ipc_port_release(dest);
+}
+
+/*
+ * Routine: ipc_port_alloc_special
+ * Purpose:
+ * Allocate a port in a special space.
+ * The new port is returned with one ref.
+ * If unsuccessful, IP_NULL is returned.
+ * Conditions:
+ * Nothing locked.
+ */
+
+ipc_port_t
+ipc_port_alloc_special(space)
+ ipc_space_t space;
+{
+#if NORMA_IPC
+#if i386
+ int ret = (&ret)[2]; /* where we were called from */
+#else
+ int ret = (int) ipc_port_alloc_special;
+#endif
+ extern int input_msgh_id;
+#endif NORMA_IPC
+ ipc_port_t port;
+
+ port = (ipc_port_t) io_alloc(IOT_PORT);
+ if (port == IP_NULL)
+ return IP_NULL;
+
+ io_lock_init(&port->ip_object);
+ port->ip_references = 1;
+ port->ip_object.io_bits = io_makebits(TRUE, IOT_PORT, 0);
+
+ /*
+ * The actual values of ip_receiver_name aren't important,
+ * as long as they are valid (not null/dead).
+ *
+ * Mach4: we set it to the internal port structure address
+ * so we can always just pass on ip_receiver_name during
+ * an rpc regardless of whether the destination is user or
+ * kernel (i.e. no special-casing code for the kernel along
+ * the fast rpc path).
+ */
+
+ ipc_port_init(port, space, (mach_port_t)port);
+
+#if NORMA_IPC
+ port->ip_norma_spare1 = ret;
+ port->ip_norma_spare2 = input_msgh_id;
+#endif NORMA_IPC
+ return port;
+}
+
+/*
+ * Routine: ipc_port_dealloc_special
+ * Purpose:
+ * Deallocate a port in a special space.
+ * Consumes one ref for the port.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_port_dealloc_special(
+ ipc_port_t port,
+ ipc_space_t space)
+{
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name != MACH_PORT_NULL);
+ assert(port->ip_receiver == space);
+
+ /*
+ * We clear ip_receiver_name and ip_receiver to simplify
+ * the ipc_space_kernel check in ipc_mqueue_send.
+ */
+
+ port->ip_receiver_name = MACH_PORT_NULL;
+ port->ip_receiver = IS_NULL;
+
+ /*
+ * For ipc_space_kernel, all ipc_port_clear_receiver does
+ * is clean things up for the assertions in ipc_port_destroy.
+ * For ipc_space_reply, there might be a waiting receiver.
+ */
+
+ ipc_port_clear_receiver(port);
+ ipc_port_destroy(port);
+}
+
+#if MACH_IPC_COMPAT
+
+/*
+ * Routine: ipc_port_alloc_compat
+ * Purpose:
+ * Allocate a port.
+ * Conditions:
+ * Nothing locked. If successful, the port is returned
+ * locked. (The caller doesn't have a reference.)
+ *
+ * Like ipc_port_alloc, except that the new entry
+ * is IE_BITS_COMPAT.
+ * Returns:
+ * KERN_SUCCESS The port is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NO_SPACE No room for an entry in the space.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_port_alloc_compat(space, namep, portp)
+ ipc_space_t space;
+ mach_port_t *namep;
+ ipc_port_t *portp;
+{
+ ipc_port_t port;
+ ipc_entry_t entry;
+ mach_port_t name;
+ ipc_table_size_t its;
+ ipc_port_request_t table;
+ ipc_table_elems_t size;
+ ipc_port_request_index_t free, i;
+ kern_return_t kr;
+
+ port = ip_alloc();
+ if (port == IP_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ its = &ipc_table_dnrequests[0];
+ table = it_dnrequests_alloc(its);
+ if (table == IPR_NULL) {
+ ip_free(port);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ kr = ipc_entry_alloc(space, &name, &entry);
+ if (kr != KERN_SUCCESS) {
+ ip_free(port);
+ it_dnrequests_free(its, table);
+ return kr;
+ }
+ /* space is write-locked */
+
+ entry->ie_object = (ipc_object_t) port;
+ entry->ie_request = 1;
+ entry->ie_bits |= IE_BITS_COMPAT|MACH_PORT_TYPE_RECEIVE;
+
+ ip_lock_init(port);
+ ip_lock(port);
+ is_write_unlock(space);
+
+ port->ip_references = 1; /* for entry, not caller */
+ port->ip_bits = io_makebits(TRUE, IOT_PORT, 0);
+
+ ipc_port_init(port, space, name);
+
+ size = its->its_size;
+ assert(size > 1);
+ free = 0;
+
+ for (i = 2; i < size; i++) {
+ ipc_port_request_t ipr = &table[i];
+
+ ipr->ipr_name = MACH_PORT_NULL;
+ ipr->ipr_next = free;
+ free = i;
+ }
+
+ table->ipr_next = free;
+ table->ipr_size = its;
+ port->ip_dnrequests = table;
+
+ table[1].ipr_name = name;
+ table[1].ipr_soright = ipr_spacem(space);
+ is_reference(space);
+
+ *namep = name;
+ *portp = port;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_port_copyout_send_compat
+ * Purpose:
+ * Copyout a naked send right (possibly null/dead),
+ * or if that fails, destroy the right.
+ * Like ipc_port_copyout_send, except that if a
+ * new translation is created it has the compat bit.
+ * Conditions:
+ * Nothing locked.
+ */
+
+mach_port_t
+ipc_port_copyout_send_compat(sright, space)
+ ipc_port_t sright;
+ ipc_space_t space;
+{
+ mach_port_t name;
+
+ if (IP_VALID(sright)) {
+ kern_return_t kr;
+
+ kr = ipc_object_copyout_compat(space, (ipc_object_t) sright,
+ MACH_MSG_TYPE_PORT_SEND, &name);
+ if (kr != KERN_SUCCESS) {
+ ipc_port_release_send(sright);
+ name = MACH_PORT_NULL;
+ }
+ } else
+ name = (mach_port_t) sright;
+
+ return name;
+}
+
+/*
+ * Routine: ipc_port_copyout_receiver
+ * Purpose:
+ * Copyout a port reference (possibly null)
+ * by giving the caller his name for the port,
+ * if he is the receiver.
+ * Conditions:
+ * Nothing locked. Consumes a ref for the port.
+ */
+
+mach_port_t
+ipc_port_copyout_receiver(port, space)
+ ipc_port_t port;
+ ipc_space_t space;
+{
+ mach_port_t name;
+
+ if (!IP_VALID(port))
+ return MACH_PORT_NULL;
+
+ ip_lock(port);
+ if (port->ip_receiver == space) {
+ name = port->ip_receiver_name;
+ assert(MACH_PORT_VALID(name));
+ } else
+ name = MACH_PORT_NULL;
+
+ ip_release(port);
+ ip_check_unlock(port);
+
+ return name;
+}
+
+#endif MACH_IPC_COMPAT
+
+#include <mach_kdb.h>
+
+
+#if MACH_KDB
+#define printf kdbprintf
+
+/*
+ * Routine: ipc_port_print
+ * Purpose:
+ * Pretty-print a port for kdb.
+ */
+
+void
+ipc_port_print(port)
+ ipc_port_t port;
+{
+ extern int indent;
+
+ printf("port 0x%x\n", port);
+
+ indent += 2;
+
+ ipc_object_print(&port->ip_object);
+ iprintf("receiver=0x%x", port->ip_receiver);
+ printf(", receiver_name=0x%x\n", port->ip_receiver_name);
+
+ iprintf("mscount=%d", port->ip_mscount);
+ printf(", srights=%d", port->ip_srights);
+ printf(", sorights=%d\n", port->ip_sorights);
+
+ iprintf("nsrequest=0x%x", port->ip_nsrequest);
+ printf(", pdrequest=0x%x", port->ip_pdrequest);
+ printf(", dnrequests=0x%x\n", port->ip_dnrequests);
+
+ iprintf("pset=0x%x", port->ip_pset);
+ printf(", seqno=%d", port->ip_seqno);
+ printf(", msgcount=%d", port->ip_msgcount);
+ printf(", qlimit=%d\n", port->ip_qlimit);
+
+ iprintf("kmsgs=0x%x", port->ip_messages.imq_messages.ikmq_base);
+ printf(", rcvrs=0x%x", port->ip_messages.imq_threads.ithq_base);
+ printf(", sndrs=0x%x", port->ip_blocked.ithq_base);
+ printf(", kobj=0x%x\n", port->ip_kobject);
+
+#if NORMA_IPC
+ iprintf("norma_uid=%x", port->ip_norma_uid);
+ printf(", dest_node=%d", port->ip_norma_dest_node);
+ printf(", stransit=%d", port->ip_norma_stransit);
+ printf(", xorefs=%d", port->ip_norma_xmm_object_refs);
+ printf(", sotransit=%d\n", port->ip_norma_sotransit);
+
+ iprintf("norma_is_proxy=%d", port->ip_norma_is_proxy);
+ printf(", is_special=%d\n", port->ip_norma_is_special);
+
+ iprintf("norma_atrium=0x%x", port->ip_norma_atrium);
+ printf(", queue_next=0x%x", port->ip_norma_queue_next);
+ printf(", xmm_object=0x%x", port->ip_norma_xmm_object);
+ printf(", next=0x%x\n", port->ip_norma_next);
+
+ iprintf("norma_spare1=0x%x", port->ip_norma_spare1);
+ printf(", norma_spare2=0x%x", port->ip_norma_spare2);
+ printf(", norma_spare3=0x%x", port->ip_norma_spare3);
+ printf(", norma_spare4=0x%x\n", port->ip_norma_spare4);
+#endif NORMA_IPC
+
+ indent -=2;
+}
+
+#endif MACH_KDB
diff --git a/ipc/ipc_port.h b/ipc/ipc_port.h
new file mode 100644
index 0000000..21d4309
--- /dev/null
+++ b/ipc/ipc_port.h
@@ -0,0 +1,407 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_port.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for ports.
+ */
+
+#ifndef _IPC_IPC_PORT_H_
+#define _IPC_IPC_PORT_H_
+
+#include <mach_ipc_compat.h>
+#include <norma_ipc.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <kern/lock.h>
+#include <kern/macro_help.h>
+#include <kern/ipc_kobject.h>
+#include <ipc/ipc_mqueue.h>
+#include <ipc/ipc_table.h>
+#include <ipc/ipc_thread.h>
+#include "ipc_target.h"
+#include <mach/rpc.h>
+
+/*
+ * A receive right (port) can be in four states:
+ * 1) dead (not active, ip_timestamp has death time)
+ * 2) in a space (ip_receiver_name != 0, ip_receiver points
+ * to the space but doesn't hold a ref for it)
+ * 3) in transit (ip_receiver_name == 0, ip_destination points
+ * to the destination port and holds a ref for it)
+ * 4) in limbo (ip_receiver_name == 0, ip_destination == IP_NULL)
+ *
+ * If the port is active, and ip_receiver points to some space,
+ * then ip_receiver_name != 0, and that space holds receive rights.
+ * If the port is not active, then ip_timestamp contains a timestamp
+ * taken when the port was destroyed.
+ */
+
+typedef unsigned int ipc_port_timestamp_t;
+
+struct ipc_port {
+ struct ipc_target ip_target;
+
+ /* This points to the ip_target above if this port isn't on a port set;
+ otherwise it points to the port set's ips_target. */
+ struct ipc_target *ip_cur_target;
+
+ union {
+ struct ipc_space *receiver;
+ struct ipc_port *destination;
+ ipc_port_timestamp_t timestamp;
+ } data;
+
+ ipc_kobject_t ip_kobject;
+
+ mach_port_mscount_t ip_mscount;
+ mach_port_rights_t ip_srights;
+ mach_port_rights_t ip_sorights;
+
+ struct ipc_port *ip_nsrequest;
+ struct ipc_port *ip_pdrequest;
+ struct ipc_port_request *ip_dnrequests;
+
+ struct ipc_pset *ip_pset;
+ mach_port_seqno_t ip_seqno; /* locked by message queue */
+ mach_port_msgcount_t ip_msgcount;
+ mach_port_msgcount_t ip_qlimit;
+ struct ipc_thread_queue ip_blocked;
+
+#if NORMA_IPC
+ unsigned long ip_norma_uid;
+ unsigned long ip_norma_dest_node;
+ long ip_norma_stransit;
+ long ip_norma_sotransit;
+ long ip_norma_xmm_object_refs;
+ boolean_t ip_norma_is_proxy;
+ boolean_t ip_norma_is_special;
+ struct ipc_port *ip_norma_atrium;
+ struct ipc_port *ip_norma_queue_next;
+ struct ipc_port *ip_norma_xmm_object;
+ struct ipc_port *ip_norma_next;
+ long ip_norma_spare1;
+ long ip_norma_spare2;
+ long ip_norma_spare3;
+ long ip_norma_spare4;
+#endif NORMA_IPC
+};
+
+#define ip_object ip_target.ipt_object
+#define ip_receiver_name ip_target.ipt_name
+#define ip_messages ip_target.ipt_messages
+#define ip_references ip_object.io_references
+#define ip_bits ip_object.io_bits
+#define ip_receiver data.receiver
+#define ip_destination data.destination
+#define ip_timestamp data.timestamp
+
+#define IP_NULL ((ipc_port_t) IO_NULL)
+#define IP_DEAD ((ipc_port_t) IO_DEAD)
+
+#define IP_VALID(port) IO_VALID(&(port)->ip_object)
+
+#define ip_active(port) io_active(&(port)->ip_object)
+#define ip_lock_init(port) io_lock_init(&(port)->ip_object)
+#define ip_lock(port) io_lock(&(port)->ip_object)
+#define ip_lock_try(port) io_lock_try(&(port)->ip_object)
+#define ip_unlock(port) io_unlock(&(port)->ip_object)
+#define ip_check_unlock(port) io_check_unlock(&(port)->ip_object)
+#define ip_reference(port) io_reference(&(port)->ip_object)
+#define ip_release(port) io_release(&(port)->ip_object)
+
+#define ip_alloc() ((ipc_port_t) io_alloc(IOT_PORT))
+#define ip_free(port) io_free(IOT_PORT, &(port)->ip_object)
+
+#define ip_kotype(port) io_kotype(&(port)->ip_object)
+
+typedef ipc_table_index_t ipc_port_request_index_t;
+
+typedef struct ipc_port_request {
+ union {
+ struct ipc_port *port;
+ ipc_port_request_index_t index;
+ } notify;
+
+ union {
+ mach_port_t name;
+ struct ipc_table_size *size;
+ } name;
+} *ipc_port_request_t;
+
+#define ipr_next notify.index
+#define ipr_size name.size
+
+#define ipr_soright notify.port
+#define ipr_name name.name
+
+#define IPR_NULL ((ipc_port_request_t) 0)
+
+#if MACH_IPC_COMPAT
+/*
+ * For backwards compatibility, the ip_pdrequest field can hold a
+ * send right instead of a send-once right. This is indicated by
+ * the low bit of the pointer. This works because the zone package
+ * guarantees that the two low bits of port pointers are zero.
+ */
+
+#define ip_pdsendp(soright) ((unsigned int)(soright) & 1)
+#define ip_pdsend(soright) ((ipc_port_t)((unsigned int)(soright) &~ 1))
+#define ip_pdsendm(sright) ((ipc_port_t)((unsigned int)(sright) | 1))
+
+/*
+ * For backwards compatibility, the ipr_soright field can hold
+ * a space pointer. This is indicated by the low bit of the pointer.
+ * This works because the zone package guarantees that the two low
+ * bits of port and space pointers are zero.
+ */
+
+#define ipr_spacep(soright) ((unsigned int)(soright) & 1)
+#define ipr_space(soright) ((ipc_space_t)((unsigned int)(soright) &~ 1))
+#define ipr_spacem(space) ((ipc_port_t)((unsigned int)(space) | 1))
+#endif MACH_IPC_COMPAT
+
+/*
+ * Taking the ipc_port_multiple lock grants the privilege
+ * to lock multiple ports at once. No ports must locked
+ * when it is taken.
+ */
+
+decl_simple_lock_data(extern, ipc_port_multiple_lock_data)
+
+#define ipc_port_multiple_lock_init() \
+ simple_lock_init(&ipc_port_multiple_lock_data)
+
+#define ipc_port_multiple_lock() \
+ simple_lock(&ipc_port_multiple_lock_data)
+
+#define ipc_port_multiple_unlock() \
+ simple_unlock(&ipc_port_multiple_lock_data)
+
+/*
+ * The port timestamp facility provides timestamps
+ * for port destruction. It is used to serialize
+ * mach_port_names with port death.
+ */
+
+decl_simple_lock_data(extern, ipc_port_timestamp_lock_data)
+extern ipc_port_timestamp_t ipc_port_timestamp_data;
+
+#define ipc_port_timestamp_lock_init() \
+ simple_lock_init(&ipc_port_timestamp_lock_data)
+
+#define ipc_port_timestamp_lock() \
+ simple_lock(&ipc_port_timestamp_lock_data)
+
+#define ipc_port_timestamp_unlock() \
+ simple_unlock(&ipc_port_timestamp_lock_data)
+
+extern ipc_port_timestamp_t
+ipc_port_timestamp();
+
+/*
+ * Compares two timestamps, and returns TRUE if one
+ * happened before two. Note that this formulation
+ * works when the timestamp wraps around at 2^32,
+ * as long as one and two aren't too far apart.
+ */
+
+#define IP_TIMESTAMP_ORDER(one, two) ((int) ((one) - (two)) < 0)
+
+#define ipc_port_translate_receive(space, name, portp) \
+ ipc_object_translate((space), (name), \
+ MACH_PORT_RIGHT_RECEIVE, \
+ (ipc_object_t *) (portp))
+
+#define ipc_port_translate_send(space, name, portp) \
+ ipc_object_translate((space), (name), \
+ MACH_PORT_RIGHT_SEND, \
+ (ipc_object_t *) (portp))
+
+extern kern_return_t
+ipc_port_dnrequest(/* ipc_port_t, mach_port_t, ipc_port_t,
+ ipc_port_request_index_t * */);
+
+extern kern_return_t
+ipc_port_dngrow(/* ipc_port_t */);
+
+extern ipc_port_t
+ipc_port_dncancel(/* ipc_port_t, mach_port_t, ipc_port_request_index_t */);
+
+#define ipc_port_dnrename(port, index, oname, nname) \
+MACRO_BEGIN \
+ ipc_port_request_t ipr, table; \
+ \
+ assert(ip_active(port)); \
+ \
+ table = port->ip_dnrequests; \
+ assert(table != IPR_NULL); \
+ \
+ ipr = &table[index]; \
+ assert(ipr->ipr_name == oname); \
+ \
+ ipr->ipr_name = nname; \
+MACRO_END
+
+/* Make a port-deleted request */
+extern void ipc_port_pdrequest(
+ ipc_port_t port,
+ ipc_port_t notify,
+ ipc_port_t *previousp);
+
+/* Make a no-senders request */
+extern void ipc_port_nsrequest(
+ ipc_port_t port,
+ mach_port_mscount_t sync,
+ ipc_port_t notify,
+ ipc_port_t *previousp);
+
+/* Change a port's queue limit */
+extern void ipc_port_set_qlimit(
+ ipc_port_t port,
+ mach_port_msgcount_t qlimit);
+
+#define ipc_port_set_mscount(port, mscount) \
+MACRO_BEGIN \
+ assert(ip_active(port)); \
+ \
+ (port)->ip_mscount = (mscount); \
+MACRO_END
+
+extern struct ipc_mqueue *
+ipc_port_lock_mqueue(/* ipc_port_t */);
+
+extern void
+ipc_port_set_seqno(/* ipc_port_t, mach_port_seqno_t */);
+
+extern void
+ipc_port_clear_receiver(/* ipc_port_t */);
+
+extern void
+ipc_port_init(/* ipc_port_t, ipc_space_t, mach_port_t */);
+
+extern kern_return_t
+ipc_port_alloc(/* ipc_space_t, mach_port_t *, ipc_port_t * */);
+
+extern kern_return_t
+ipc_port_alloc_name(/* ipc_space_t, mach_port_t, ipc_port_t * */);
+
+extern void
+ipc_port_destroy(/* ipc_port_t */);
+
+extern boolean_t
+ipc_port_check_circularity(/* ipc_port_t, ipc_port_t */);
+
+extern ipc_port_t
+ipc_port_lookup_notify(/* ipc_space_t, mach_port_t */);
+
+extern ipc_port_t
+ipc_port_make_send(/* ipc_port_t */);
+
+extern ipc_port_t
+ipc_port_copy_send(/* ipc_port_t */);
+
+extern mach_port_t
+ipc_port_copyout_send(/* ipc_port_t, ipc_space_t */);
+
+extern void
+ipc_port_release_send(/* ipc_port_t */);
+
+extern ipc_port_t
+ipc_port_make_sonce(/* ipc_port_t */);
+
+extern void
+ipc_port_release_sonce(/* ipc_port_t */);
+
+extern void
+ipc_port_release_receive(/* ipc_port_t */);
+
+extern ipc_port_t
+ipc_port_alloc_special(/* ipc_space_t */);
+
+extern void
+ipc_port_dealloc_special(/* ipc_port_t */);
+
+#define ipc_port_alloc_kernel() \
+ ipc_port_alloc_special(ipc_space_kernel)
+#define ipc_port_dealloc_kernel(port) \
+ ipc_port_dealloc_special((port), ipc_space_kernel)
+
+#define ipc_port_alloc_reply() \
+ ipc_port_alloc_special(ipc_space_reply)
+#define ipc_port_dealloc_reply(port) \
+ ipc_port_dealloc_special((port), ipc_space_reply)
+
+#define ipc_port_reference(port) \
+ ipc_object_reference(&(port)->ip_object)
+
+#define ipc_port_release(port) \
+ ipc_object_release(&(port)->ip_object)
+
+#if MACH_IPC_COMPAT
+
+extern kern_return_t
+ipc_port_alloc_compat(/* ipc_space_t, mach_port_t *, ipc_port_t * */);
+
+extern mach_port_t
+ipc_port_copyout_send_compat(/* ipc_port_t, ipc_space_t */);
+
+extern mach_port_t
+ipc_port_copyout_receiver(/* ipc_port_t, ipc_space_t */);
+
+#endif MACH_IPC_COMPAT
+
+extern void
+ipc_port_print(/* ipc_port_t */);
+
+#if NORMA_IPC
+
+#define IP_NORMA_IS_PROXY(port) ((port)->ip_norma_is_proxy)
+
+/*
+ * A proxy never has a real nsrequest, but is always has a fake
+ * nsrequest so that the norma ipc system is notified when there
+ * are no send rights for a proxy. A fake nsrequest is indicated by
+ * the low bit of the pointer. This works because the zone package
+ * guarantees that the two low bits of port pointers are zero.
+ */
+
+#define ip_nsproxyp(nsrequest) ((unsigned int)(nsrequest) & 1)
+#define ip_nsproxy(nsrequest) ((ipc_port_t)((unsigned int)(nsrequest) &~ 1))
+#define ip_nsproxym(proxy) ((ipc_port_t)((unsigned int)(proxy) | 1))
+
+#endif NORMA_IPC
+
+#endif _IPC_IPC_PORT_H_
diff --git a/ipc/ipc_pset.c b/ipc/ipc_pset.c
new file mode 100644
index 0000000..57705d6
--- /dev/null
+++ b/ipc/ipc_pset.c
@@ -0,0 +1,349 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_pset.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to manipulate IPC port sets.
+ */
+
+#include <mach/port.h>
+#include <mach/kern_return.h>
+#include <mach/message.h>
+#include <ipc/ipc_mqueue.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_right.h>
+#include <ipc/ipc_space.h>
+
+
+/*
+ * Routine: ipc_pset_alloc
+ * Purpose:
+ * Allocate a port set.
+ * Conditions:
+ * Nothing locked. If successful, the port set is returned
+ * locked. (The caller doesn't have a reference.)
+ * Returns:
+ * KERN_SUCCESS The port set is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NO_SPACE No room for an entry in the space.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_pset_alloc(
+ ipc_space_t space,
+ mach_port_t *namep,
+ ipc_pset_t *psetp)
+{
+ ipc_pset_t pset;
+ mach_port_t name;
+ kern_return_t kr;
+
+ kr = ipc_object_alloc(space, IOT_PORT_SET,
+ MACH_PORT_TYPE_PORT_SET, 0,
+ &name, (ipc_object_t *) &pset);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* pset is locked */
+
+ ipc_target_init(&pset->ips_target, name);
+
+ *namep = name;
+ *psetp = pset;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_pset_alloc_name
+ * Purpose:
+ * Allocate a port set, with a specific name.
+ * Conditions:
+ * Nothing locked. If successful, the port set is returned
+ * locked. (The caller doesn't have a reference.)
+ * Returns:
+ * KERN_SUCCESS The port set is allocated.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_NAME_EXISTS The name already denotes a right.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_pset_alloc_name(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_pset_t *psetp)
+{
+ ipc_pset_t pset;
+ kern_return_t kr;
+
+
+ kr = ipc_object_alloc_name(space, IOT_PORT_SET,
+ MACH_PORT_TYPE_PORT_SET, 0,
+ name, (ipc_object_t *) &pset);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* pset is locked */
+
+ ipc_target_init(&pset->ips_target, name);
+
+ *psetp = pset;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_pset_add
+ * Purpose:
+ * Puts a port into a port set.
+ * The port set gains a reference.
+ * Conditions:
+ * Both port and port set are locked and active.
+ * The port isn't already in a set.
+ * The owner of the port set is also receiver for the port.
+ */
+
+void
+ipc_pset_add(
+ ipc_pset_t pset,
+ ipc_port_t port)
+{
+ assert(ips_active(pset));
+ assert(ip_active(port));
+ assert(port->ip_pset == IPS_NULL);
+
+ port->ip_pset = pset;
+ port->ip_cur_target = &pset->ips_target;
+ ips_reference(pset);
+
+ imq_lock(&port->ip_messages);
+ imq_lock(&pset->ips_messages);
+
+ /* move messages from port's queue to the port set's queue */
+
+ ipc_mqueue_move(&pset->ips_messages, &port->ip_messages, port);
+ imq_unlock(&pset->ips_messages);
+ assert(ipc_kmsg_queue_empty(&port->ip_messages.imq_messages));
+
+ /* wake up threads waiting to receive from the port */
+
+ ipc_mqueue_changed(&port->ip_messages, MACH_RCV_PORT_CHANGED);
+ assert(ipc_thread_queue_empty(&port->ip_messages.imq_threads));
+ imq_unlock(&port->ip_messages);
+}
+
+/*
+ * Routine: ipc_pset_remove
+ * Purpose:
+ * Removes a port from a port set.
+ * The port set loses a reference.
+ * Conditions:
+ * Both port and port set are locked.
+ * The port must be active.
+ */
+
+void
+ipc_pset_remove(
+ ipc_pset_t pset,
+ ipc_port_t port)
+{
+ assert(ip_active(port));
+ assert(port->ip_pset == pset);
+
+ port->ip_pset = IPS_NULL;
+ port->ip_cur_target = &port->ip_target;
+ ips_release(pset);
+
+ imq_lock(&port->ip_messages);
+ imq_lock(&pset->ips_messages);
+
+ /* move messages from port set's queue to the port's queue */
+
+ ipc_mqueue_move(&port->ip_messages, &pset->ips_messages, port);
+
+ imq_unlock(&pset->ips_messages);
+ imq_unlock(&port->ip_messages);
+}
+
+/*
+ * Routine: ipc_pset_move
+ * Purpose:
+ * If nset is IPS_NULL, removes port
+ * from the port set it is in. Otherwise, adds
+ * port to nset, removing it from any set
+ * it might already be in.
+ * Conditions:
+ * The space is read-locked.
+ * Returns:
+ * KERN_SUCCESS Moved the port.
+ * KERN_NOT_IN_SET nset is null and port isn't in a set.
+ */
+
+kern_return_t
+ipc_pset_move(
+ ipc_space_t space,
+ ipc_port_t port,
+ ipc_pset_t nset)
+{
+ ipc_pset_t oset;
+
+ /*
+ * While we've got the space locked, it holds refs for
+ * the port and nset (because of the entries). Also,
+ * they must be alive. While we've got port locked, it
+ * holds a ref for oset, which might not be alive.
+ */
+
+ ip_lock(port);
+ assert(ip_active(port));
+
+ oset = port->ip_pset;
+
+ if (oset == nset) {
+ /* the port is already in the new set: a noop */
+
+ is_read_unlock(space);
+ } else if (oset == IPS_NULL) {
+ /* just add port to the new set */
+
+ ips_lock(nset);
+ assert(ips_active(nset));
+ is_read_unlock(space);
+
+ ipc_pset_add(nset, port);
+
+ ips_unlock(nset);
+ } else if (nset == IPS_NULL) {
+ /* just remove port from the old set */
+
+ is_read_unlock(space);
+ ips_lock(oset);
+
+ ipc_pset_remove(oset, port);
+
+ if (ips_active(oset))
+ ips_unlock(oset);
+ else {
+ ips_check_unlock(oset);
+ oset = IPS_NULL; /* trigger KERN_NOT_IN_SET */
+ }
+ } else {
+ /* atomically move port from oset to nset */
+
+ if (oset < nset) {
+ ips_lock(oset);
+ ips_lock(nset);
+ } else {
+ ips_lock(nset);
+ ips_lock(oset);
+ }
+
+ is_read_unlock(space);
+ assert(ips_active(nset));
+
+ ipc_pset_remove(oset, port);
+ ipc_pset_add(nset, port);
+
+ ips_unlock(nset);
+ ips_check_unlock(oset); /* KERN_NOT_IN_SET not a possibility */
+ }
+
+ ip_unlock(port);
+
+ return (((nset == IPS_NULL) && (oset == IPS_NULL)) ?
+ KERN_NOT_IN_SET : KERN_SUCCESS);
+}
+
+/*
+ * Routine: ipc_pset_destroy
+ * Purpose:
+ * Destroys a port_set.
+ *
+ * Doesn't remove members from the port set;
+ * that happens lazily. As members are removed,
+ * their messages are removed from the queue.
+ * Conditions:
+ * The port_set is locked and alive.
+ * The caller has a reference, which is consumed.
+ * Afterwards, the port_set is unlocked and dead.
+ */
+
+void
+ipc_pset_destroy(
+ ipc_pset_t pset)
+{
+ assert(ips_active(pset));
+
+ pset->ips_object.io_bits &= ~IO_BITS_ACTIVE;
+
+ imq_lock(&pset->ips_messages);
+ ipc_mqueue_changed(&pset->ips_messages, MACH_RCV_PORT_DIED);
+ imq_unlock(&pset->ips_messages);
+
+ /* Common destruction for the IPC target. */
+ ipc_target_terminate(&pset->ips_target);
+
+ ips_release(pset); /* consume the ref our caller gave us */
+ ips_check_unlock(pset);
+}
+
+#include <mach_kdb.h>
+
+
+#if MACH_KDB
+#define printf kdbprintf
+
+/*
+ * Routine: ipc_pset_print
+ * Purpose:
+ * Pretty-print a port set for kdb.
+ */
+
+void
+ipc_pset_print(
+ ipc_pset_t pset)
+{
+ extern int indent;
+
+ printf("pset 0x%x\n", pset);
+
+ indent += 2;
+
+ ipc_object_print(&pset->ips_object);
+ iprintf("local_name = 0x%x\n", pset->ips_local_name);
+ iprintf("kmsgs = 0x%x", pset->ips_messages.imq_messages.ikmq_base);
+ printf(",rcvrs = 0x%x\n", pset->ips_messages.imq_threads.ithq_base);
+
+ indent -=2;
+}
+
+#endif MACH_KDB
diff --git a/ipc/ipc_pset.h b/ipc/ipc_pset.h
new file mode 100644
index 0000000..23e3e25
--- /dev/null
+++ b/ipc/ipc_pset.h
@@ -0,0 +1,95 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_pset.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for port sets.
+ */
+
+#ifndef _IPC_IPC_PSET_H_
+#define _IPC_IPC_PSET_H_
+
+#include <mach/port.h>
+#include <mach/kern_return.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_mqueue.h>
+#include "ipc_target.h"
+
+typedef struct ipc_pset {
+ struct ipc_target ips_target;
+
+} *ipc_pset_t;
+
+#define ips_object ips_target.ipt_object
+#define ips_local_name ips_target.ipt_name
+#define ips_messages ips_target.ipt_messages
+#define ips_references ips_object.io_references
+
+#define IPS_NULL ((ipc_pset_t) IO_NULL)
+
+#define ips_active(pset) io_active(&(pset)->ips_object)
+#define ips_lock(pset) io_lock(&(pset)->ips_object)
+#define ips_lock_try(pset) io_lock_try(&(pset)->ips_object)
+#define ips_unlock(pset) io_unlock(&(pset)->ips_object)
+#define ips_check_unlock(pset) io_check_unlock(&(pset)->ips_object)
+#define ips_reference(pset) io_reference(&(pset)->ips_object)
+#define ips_release(pset) io_release(&(pset)->ips_object)
+
+extern kern_return_t
+ipc_pset_alloc(/* ipc_space_t, mach_port_t *, ipc_pset_t * */);
+
+extern kern_return_t
+ipc_pset_alloc_name(/* ipc_space_t, mach_port_t, ipc_pset_t * */);
+
+extern void
+ipc_pset_add(/* ipc_pset_t, ipc_port_t */);
+
+extern void
+ipc_pset_remove(/* ipc_pset_t, ipc_port_t */);
+
+extern kern_return_t
+ipc_pset_move(/* ipc_space_t, mach_port_t, mach_port_t */);
+
+extern void
+ipc_pset_destroy(/* ipc_pset_t */);
+
+#define ipc_pset_reference(pset) \
+ ipc_object_reference(&(pset)->ips_object)
+
+#define ipc_pset_release(pset) \
+ ipc_object_release(&(pset)->ips_object)
+
+extern void
+ipc_pset_print(/* ipc_pset_t */);
+
+#endif _IPC_IPC_PSET_H_
diff --git a/ipc/ipc_right.c b/ipc/ipc_right.c
new file mode 100644
index 0000000..54cd99f
--- /dev/null
+++ b/ipc/ipc_right.c
@@ -0,0 +1,2762 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_right.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to manipulate IPC capabilities.
+ */
+
+#include <mach_ipc_compat.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <mach/message.h>
+#include <kern/assert.h>
+#include <ipc/port.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_hash.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_marequest.h>
+#include <ipc/ipc_right.h>
+#include <ipc/ipc_notify.h>
+
+
+
+/*
+ * Routine: ipc_right_lookup_write
+ * Purpose:
+ * Finds an entry in a space, given the name.
+ * Conditions:
+ * Nothing locked. If successful, the space is write-locked.
+ * Returns:
+ * KERN_SUCCESS Found an entry.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME Name doesn't exist in space.
+ */
+
+kern_return_t
+ipc_right_lookup_write(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t *entryp)
+{
+ ipc_entry_t entry;
+
+ assert(space != IS_NULL);
+
+ is_write_lock(space);
+
+ if (!space->is_active) {
+ is_write_unlock(space);
+ return KERN_INVALID_TASK;
+ }
+
+ if ((entry = ipc_entry_lookup(space, name)) == IE_NULL) {
+ is_write_unlock(space);
+ return KERN_INVALID_NAME;
+ }
+
+ *entryp = entry;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_right_reverse
+ * Purpose:
+ * Translate (space, object) -> (name, entry).
+ * Only finds send/receive rights.
+ * Returns TRUE if an entry is found; if so,
+ * the object is locked and active.
+ * Conditions:
+ * The space must be locked (read or write) and active.
+ * Nothing else locked.
+ */
+
+boolean_t
+ipc_right_reverse(
+ ipc_space_t space,
+ ipc_object_t object,
+ mach_port_t *namep,
+ ipc_entry_t *entryp)
+{
+ ipc_port_t port;
+ mach_port_t name;
+ ipc_entry_t entry;
+
+ /* would switch on io_otype to handle multiple types of object */
+
+ assert(space->is_active);
+ assert(io_otype(object) == IOT_PORT);
+
+ port = (ipc_port_t) object;
+
+ ip_lock(port);
+ if (!ip_active(port)) {
+ ip_unlock(port);
+
+ return FALSE;
+ }
+
+ if (port->ip_receiver == space) {
+ name = port->ip_receiver_name;
+ assert(name != MACH_PORT_NULL);
+
+ entry = ipc_entry_lookup(space, name);
+
+ assert(entry != IE_NULL);
+ assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
+ assert(port == (ipc_port_t) entry->ie_object);
+
+ *namep = name;
+ *entryp = entry;
+ return TRUE;
+ }
+
+ if (ipc_hash_lookup(space, (ipc_object_t) port, namep, entryp)) {
+ assert((entry = *entryp) != IE_NULL);
+ assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND);
+ assert(port == (ipc_port_t) entry->ie_object);
+
+ return TRUE;
+ }
+
+ ip_unlock(port);
+ return FALSE;
+}
+
+/*
+ * Routine: ipc_right_dnrequest
+ * Purpose:
+ * Make a dead-name request, returning the previously
+ * registered send-once right. If notify is IP_NULL,
+ * just cancels the previously registered request.
+ *
+ * This interacts with the IE_BITS_COMPAT, because they
+ * both use ie_request. If this is a compat entry, then
+ * previous always gets IP_NULL. If notify is IP_NULL,
+ * then the entry remains a compat entry. Otherwise
+ * the real dead-name request is registered and the entry
+ * is no longer a compat entry.
+ * Conditions:
+ * Nothing locked. May allocate memory.
+ * Only consumes/returns refs if successful.
+ * Returns:
+ * KERN_SUCCESS Made/canceled dead-name request.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME Name doesn't exist in space.
+ * KERN_INVALID_RIGHT Name doesn't denote port/dead rights.
+ * KERN_INVALID_ARGUMENT Name denotes dead name, but
+ * immediate is FALSE or notify is IP_NULL.
+ * KERN_UREFS_OVERFLOW Name denotes dead name, but
+ * generating immediate notif. would overflow urefs.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_right_dnrequest(
+ ipc_space_t space,
+ mach_port_t name,
+ boolean_t immediate,
+ ipc_port_t notify,
+ ipc_port_t *previousp)
+{
+ ipc_port_t previous;
+
+ for (;;) {
+ ipc_entry_t entry;
+ ipc_entry_bits_t bits;
+ kern_return_t kr;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ bits = entry->ie_bits;
+ if (bits & MACH_PORT_TYPE_PORT_RIGHTS) {
+ ipc_port_t port;
+ ipc_port_request_index_t request;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (!ipc_right_check(space, port, name, entry)) {
+ /* port is locked and active */
+
+ if (notify == IP_NULL) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT) {
+ assert(entry->ie_request != 0);
+
+ previous = IP_NULL;
+ } else
+#endif MACH_IPC_COMPAT
+ previous = ipc_right_dncancel_macro(
+ space, port, name, entry);
+
+ ip_unlock(port);
+ is_write_unlock(space);
+ break;
+ }
+
+ /*
+ * If a registered soright exists,
+ * want to atomically switch with it.
+ * If ipc_port_dncancel finds us a
+ * soright, then the following
+ * ipc_port_dnrequest will reuse
+ * that slot, so we are guaranteed
+ * not to unlock and retry.
+ */
+
+ previous = ipc_right_dncancel_macro(space,
+ port, name, entry);
+
+ kr = ipc_port_dnrequest(port, name, notify,
+ &request);
+ if (kr != KERN_SUCCESS) {
+ assert(previous == IP_NULL);
+ is_write_unlock(space);
+
+ kr = ipc_port_dngrow(port);
+ /* port is unlocked */
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ continue;
+ }
+
+ assert(request != 0);
+ ip_unlock(port);
+
+ entry->ie_request = request;
+#if MACH_IPC_COMPAT
+ entry->ie_bits = bits &~ IE_BITS_COMPAT;
+#endif MACH_IPC_COMPAT
+ is_write_unlock(space);
+ break;
+ }
+
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT) {
+ is_write_unlock(space);
+ return KERN_INVALID_NAME;
+ }
+#endif MACH_IPC_COMPAT
+
+ bits = entry->ie_bits;
+ assert(bits & MACH_PORT_TYPE_DEAD_NAME);
+ }
+
+ if ((bits & MACH_PORT_TYPE_DEAD_NAME) &&
+ immediate && (notify != IP_NULL)) {
+ mach_port_urefs_t urefs = IE_BITS_UREFS(bits);
+
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
+ assert(urefs > 0);
+
+ if (MACH_PORT_UREFS_OVERFLOW(urefs, 1)) {
+ is_write_unlock(space);
+ return KERN_UREFS_OVERFLOW;
+ }
+
+ entry->ie_bits = bits + 1; /* increment urefs */
+ is_write_unlock(space);
+
+ ipc_notify_dead_name(notify, name);
+ previous = IP_NULL;
+ break;
+ }
+
+ is_write_unlock(space);
+ if (bits & MACH_PORT_TYPE_PORT_OR_DEAD)
+ return KERN_INVALID_ARGUMENT;
+ else
+ return KERN_INVALID_RIGHT;
+ }
+
+ *previousp = previous;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_right_dncancel
+ * Purpose:
+ * Cancel a dead-name request and return the send-once right.
+ * Afterwards, entry->ie_request == 0.
+ * Conditions:
+ * The space must be write-locked; the port must be locked.
+ * The port must be active; the space doesn't have to be.
+ */
+
+ipc_port_t
+ipc_right_dncancel(
+ ipc_space_t space,
+ ipc_port_t port,
+ mach_port_t name,
+ ipc_entry_t entry)
+{
+ ipc_port_t dnrequest;
+
+ assert(ip_active(port));
+ assert(port == (ipc_port_t) entry->ie_object);
+
+ dnrequest = ipc_port_dncancel(port, name, entry->ie_request);
+ entry->ie_request = 0;
+
+#if MACH_IPC_COMPAT
+ assert(!ipr_spacep(dnrequest) == !(entry->ie_bits & IE_BITS_COMPAT));
+
+ /* if this is actually a space ptr, just release the ref */
+
+ if (entry->ie_bits & IE_BITS_COMPAT) {
+ assert(space == ipr_space(dnrequest));
+
+ is_release(space);
+ dnrequest = IP_NULL;
+ }
+#endif MACH_IPC_COMPAT
+
+ return dnrequest;
+}
+
+/*
+ * Routine: ipc_right_inuse
+ * Purpose:
+ * Check if an entry is being used.
+ * Returns TRUE if it is.
+ * Conditions:
+ * The space is write-locked and active.
+ * It is unlocked if the entry is inuse.
+ */
+
+boolean_t
+ipc_right_inuse(space, name, entry)
+ ipc_space_t space;
+ mach_port_t name;
+ ipc_entry_t entry;
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ if (IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE) {
+#if MACH_IPC_COMPAT
+ mach_port_type_t type = IE_BITS_TYPE(bits);
+
+ /*
+ * There is yet hope. If the port has died, we
+ * must clean up the entry so it's as good as new.
+ */
+
+ if ((bits & IE_BITS_COMPAT) &&
+ ((type == MACH_PORT_TYPE_SEND) ||
+ (type == MACH_PORT_TYPE_SEND_ONCE))) {
+ ipc_port_t port;
+ boolean_t active;
+
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert(entry->ie_request != 0);
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ active = ip_active(port);
+ ip_unlock(port);
+
+ if (!active) {
+ if (type == MACH_PORT_TYPE_SEND) {
+ /* clean up msg-accepted request */
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(
+ space, name);
+
+ ipc_hash_delete(
+ space, (ipc_object_t) port,
+ name, entry);
+ } else {
+ assert(IE_BITS_UREFS(bits) == 1);
+ assert(!(bits & IE_BITS_MAREQUEST));
+ }
+
+ ipc_port_release(port);
+
+ entry->ie_request = 0;
+ entry->ie_object = IO_NULL;
+ entry->ie_bits &= ~IE_BITS_RIGHT_MASK;
+
+ return FALSE;
+ }
+ }
+#endif MACH_IPC_COMPAT
+
+ is_write_unlock(space);
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+/*
+ * Routine: ipc_right_check
+ * Purpose:
+ * Check if the port has died. If it has,
+ * clean up the entry and return TRUE.
+ * Conditions:
+ * The space is write-locked; the port is not locked.
+ * If returns FALSE, the port is also locked and active.
+ * Otherwise, entry is converted to a dead name, freeing
+ * a reference to port.
+ *
+ * [MACH_IPC_COMPAT] If the port is dead, and this is a
+ * compat mode entry, then the port reference is released
+ * and the entry is destroyed. The call returns TRUE,
+ * and the space is left locked.
+ */
+
+boolean_t
+ipc_right_check(space, port, name, entry)
+ ipc_space_t space;
+ ipc_port_t port;
+ mach_port_t name;
+ ipc_entry_t entry;
+{
+ ipc_entry_bits_t bits;
+
+ assert(space->is_active);
+ assert(port == (ipc_port_t) entry->ie_object);
+
+ ip_lock(port);
+ if (ip_active(port))
+ return FALSE;
+ ip_unlock(port);
+
+ /* this was either a pure send right or a send-once right */
+
+ bits = entry->ie_bits;
+ assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ if (bits & MACH_PORT_TYPE_SEND) {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
+
+ /* clean up msg-accepted request */
+
+ if (bits & IE_BITS_MAREQUEST) {
+ bits &= ~IE_BITS_MAREQUEST;
+
+ ipc_marequest_cancel(space, name);
+ }
+
+ ipc_hash_delete(space, (ipc_object_t) port, name, entry);
+ } else {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
+ assert(IE_BITS_UREFS(bits) == 1);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+ }
+
+ ipc_port_release(port);
+
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT) {
+ assert(entry->ie_request != 0);
+ entry->ie_request = 0;
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+
+ return TRUE;
+ }
+#endif MACH_IPC_COMPAT
+
+ /* convert entry to dead name */
+
+ bits = (bits &~ IE_BITS_TYPE_MASK) | MACH_PORT_TYPE_DEAD_NAME;
+
+ if (entry->ie_request != 0) {
+ assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX);
+
+ entry->ie_request = 0;
+ bits++; /* increment urefs */
+ }
+
+ entry->ie_bits = bits;
+ entry->ie_object = IO_NULL;
+
+ return TRUE;
+}
+
+/*
+ * Routine: ipc_right_clean
+ * Purpose:
+ * Cleans up an entry in a dead space.
+ * The entry isn't deallocated or removed
+ * from reverse hash tables.
+ * Conditions:
+ * The space is dead and unlocked.
+ */
+
+void
+ipc_right_clean(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t entry)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ mach_port_type_t type = IE_BITS_TYPE(bits);
+
+ assert(!space->is_active);
+
+ /*
+ * We can't clean up IE_BITS_MAREQUEST when the space is dead.
+ * This is because ipc_marequest_destroy can't turn off
+ * the bit if the space is dead. Hence, it might be on
+ * even though the marequest has been destroyed. It's OK
+ * not to cancel the marequest, because ipc_marequest_destroy
+ * cancels for us if the space is dead.
+ *
+ * IE_BITS_COMPAT/ipc_right_dncancel doesn't have this
+ * problem, because we check that the port is active. If
+ * we didn't cancel IE_BITS_COMPAT, ipc_port_destroy
+ * would still work, but dead space refs would accumulate
+ * in ip_dnrequests. They would use up slots in
+ * ip_dnrequests and keep the spaces from being freed.
+ */
+
+ switch (type) {
+ case MACH_PORT_TYPE_DEAD_NAME:
+ assert(entry->ie_request == 0);
+ assert(entry->ie_object == IO_NULL);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+ break;
+
+ case MACH_PORT_TYPE_PORT_SET: {
+ ipc_pset_t pset = (ipc_pset_t) entry->ie_object;
+
+ assert(entry->ie_request == 0);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+ assert(pset != IPS_NULL);
+
+ ips_lock(pset);
+ assert(ips_active(pset));
+
+ ipc_pset_destroy(pset); /* consumes ref, unlocks */
+ break;
+ }
+
+ case MACH_PORT_TYPE_SEND:
+ case MACH_PORT_TYPE_RECEIVE:
+ case MACH_PORT_TYPE_SEND_RECEIVE:
+ case MACH_PORT_TYPE_SEND_ONCE: {
+ ipc_port_t port = (ipc_port_t) entry->ie_object;
+ ipc_port_t dnrequest;
+ ipc_port_t nsrequest = IP_NULL;
+ mach_port_mscount_t mscount = 0; /* '=0' to shut up lint */
+
+ assert(port != IP_NULL);
+ ip_lock(port);
+
+ if (!ip_active(port)) {
+ ip_release(port);
+ ip_check_unlock(port);
+ break;
+ }
+
+ dnrequest = ipc_right_dncancel_macro(space, port, name, entry);
+
+ if (type & MACH_PORT_TYPE_SEND) {
+ assert(port->ip_srights > 0);
+ if (--port->ip_srights == 0) {
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL) {
+ port->ip_nsrequest = IP_NULL;
+ mscount = port->ip_mscount;
+ }
+ }
+ }
+
+ if (type & MACH_PORT_TYPE_RECEIVE) {
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+
+ ipc_port_clear_receiver(port);
+ ipc_port_destroy(port); /* consumes our ref, unlocks */
+ } else if (type & MACH_PORT_TYPE_SEND_ONCE) {
+ assert(port->ip_sorights > 0);
+ ip_unlock(port);
+
+ ipc_notify_send_once(port); /* consumes our ref */
+ } else {
+ assert(port->ip_receiver != space);
+
+ ip_release(port);
+ ip_unlock(port); /* port is active */
+ }
+
+ if (nsrequest != IP_NULL)
+ ipc_notify_no_senders(nsrequest, mscount);
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_clean: strange type");
+#else
+ panic("ipc_right_clean: strange type");
+#endif
+ }
+}
+
+/*
+ * Routine: ipc_right_destroy
+ * Purpose:
+ * Destroys an entry in a space.
+ * Conditions:
+ * The space is write-locked, and is unlocked upon return.
+ * The space must be active.
+ * Returns:
+ * KERN_SUCCESS The entry was destroyed.
+ */
+
+kern_return_t
+ipc_right_destroy(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t entry)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ mach_port_type_t type = IE_BITS_TYPE(bits);
+
+ assert(space->is_active);
+
+ switch (type) {
+ case MACH_PORT_TYPE_DEAD_NAME:
+ assert(entry->ie_request == 0);
+ assert(entry->ie_object == IO_NULL);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+ break;
+
+ case MACH_PORT_TYPE_PORT_SET: {
+ ipc_pset_t pset = (ipc_pset_t) entry->ie_object;
+
+ assert(entry->ie_request == 0);
+ assert(pset != IPS_NULL);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+
+ ips_lock(pset);
+ assert(ips_active(pset));
+ is_write_unlock(space);
+
+ ipc_pset_destroy(pset); /* consumes ref, unlocks */
+ break;
+ }
+
+ case MACH_PORT_TYPE_SEND:
+ case MACH_PORT_TYPE_RECEIVE:
+ case MACH_PORT_TYPE_SEND_RECEIVE:
+ case MACH_PORT_TYPE_SEND_ONCE: {
+ ipc_port_t port = (ipc_port_t) entry->ie_object;
+ ipc_port_t nsrequest = IP_NULL;
+ mach_port_mscount_t mscount = 0; /* '=0' to shut up lint */
+ ipc_port_t dnrequest;
+
+ assert(port != IP_NULL);
+
+ if (bits & IE_BITS_MAREQUEST) {
+ assert(type & MACH_PORT_TYPE_SEND_RECEIVE);
+
+ ipc_marequest_cancel(space, name);
+ }
+
+ if (type == MACH_PORT_TYPE_SEND)
+ ipc_hash_delete(space, (ipc_object_t) port,
+ name, entry);
+
+ ip_lock(port);
+
+ if (!ip_active(port)) {
+ assert((type & MACH_PORT_TYPE_RECEIVE) == 0);
+
+ ip_release(port);
+ ip_check_unlock(port);
+
+ entry->ie_request = 0;
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ return KERN_INVALID_NAME;
+#endif MACH_IPC_COMPAT
+ break;
+ }
+
+ dnrequest = ipc_right_dncancel_macro(space, port, name, entry);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+
+ if (type & MACH_PORT_TYPE_SEND) {
+ assert(port->ip_srights > 0);
+ if (--port->ip_srights == 0) {
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL) {
+ port->ip_nsrequest = IP_NULL;
+ mscount = port->ip_mscount;
+ }
+ }
+ }
+
+ if (type & MACH_PORT_TYPE_RECEIVE) {
+ assert(ip_active(port));
+ assert(port->ip_receiver == space);
+
+ ipc_port_clear_receiver(port);
+ ipc_port_destroy(port); /* consumes our ref, unlocks */
+ } else if (type & MACH_PORT_TYPE_SEND_ONCE) {
+ assert(port->ip_sorights > 0);
+ ip_unlock(port);
+
+ ipc_notify_send_once(port); /* consumes our ref */
+ } else {
+ assert(port->ip_receiver != space);
+
+ ip_release(port);
+ ip_unlock(port);
+ }
+
+ if (nsrequest != IP_NULL)
+ ipc_notify_no_senders(nsrequest, mscount);
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_destroy: strange type");
+#else
+ panic("ipc_right_destroy: strange type");
+#endif
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_right_dealloc
+ * Purpose:
+ * Releases a send/send-once/dead-name user ref.
+ * Like ipc_right_delta with a delta of -1,
+ * but looks at the entry to determine the right.
+ * Conditions:
+ * The space is write-locked, and is unlocked upon return.
+ * The space must be active.
+ * Returns:
+ * KERN_SUCCESS A user ref was released.
+ * KERN_INVALID_RIGHT Entry has wrong type.
+ * KERN_INVALID_NAME [MACH_IPC_COMPAT]
+ * Caller should pretend lookup of entry failed.
+ */
+
+kern_return_t
+ipc_right_dealloc(space, name, entry)
+ ipc_space_t space;
+ mach_port_t name;
+ ipc_entry_t entry;
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ mach_port_type_t type = IE_BITS_TYPE(bits);
+
+ assert(space->is_active);
+
+ switch (type) {
+ case MACH_PORT_TYPE_DEAD_NAME: {
+ dead_name:
+
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert(entry->ie_request == 0);
+ assert(entry->ie_object == IO_NULL);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+
+ if (IE_BITS_UREFS(bits) == 1)
+ ipc_entry_dealloc(space, name, entry);
+ else
+ entry->ie_bits = bits-1; /* decrement urefs */
+
+ is_write_unlock(space);
+ break;
+ }
+
+ case MACH_PORT_TYPE_SEND_ONCE: {
+ ipc_port_t port, dnrequest;
+
+ assert(IE_BITS_UREFS(bits) == 1);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+#endif MACH_IPC_COMPAT
+
+ bits = entry->ie_bits;
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
+ goto dead_name;
+ }
+ /* port is locked and active */
+
+ assert(port->ip_sorights > 0);
+
+ dnrequest = ipc_right_dncancel_macro(space, port, name, entry);
+ ip_unlock(port);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+
+ ipc_notify_send_once(port);
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+ break;
+ }
+
+ case MACH_PORT_TYPE_SEND: {
+ ipc_port_t port;
+ ipc_port_t dnrequest = IP_NULL;
+ ipc_port_t nsrequest = IP_NULL;
+ mach_port_mscount_t mscount = 0; /* '=0' to shut up lint */
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+#endif MACH_IPC_COMPAT
+
+ bits = entry->ie_bits;
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
+ goto dead_name;
+ }
+ /* port is locked and active */
+
+ assert(port->ip_srights > 0);
+
+ if (IE_BITS_UREFS(bits) == 1) {
+ if (--port->ip_srights == 0) {
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL) {
+ port->ip_nsrequest = IP_NULL;
+ mscount = port->ip_mscount;
+ }
+ }
+
+ dnrequest = ipc_right_dncancel_macro(space, port,
+ name, entry);
+
+ ipc_hash_delete(space, (ipc_object_t) port,
+ name, entry);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+
+ ip_release(port);
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ } else
+ entry->ie_bits = bits-1; /* decrement urefs */
+
+ ip_unlock(port); /* even if dropped a ref, port is active */
+ is_write_unlock(space);
+
+ if (nsrequest != IP_NULL)
+ ipc_notify_no_senders(nsrequest, mscount);
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+ break;
+ }
+
+ case MACH_PORT_TYPE_SEND_RECEIVE: {
+ ipc_port_t port;
+ ipc_port_t nsrequest = IP_NULL;
+ mach_port_mscount_t mscount = 0; /* '=0' to shut up lint */
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+ assert(port->ip_srights > 0);
+
+ if (IE_BITS_UREFS(bits) == 1) {
+ if (--port->ip_srights == 0) {
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL) {
+ port->ip_nsrequest = IP_NULL;
+ mscount = port->ip_mscount;
+ }
+ }
+
+ entry->ie_bits = bits &~ (IE_BITS_UREFS_MASK|
+ MACH_PORT_TYPE_SEND);
+ } else
+ entry->ie_bits = bits-1; /* decrement urefs */
+
+ ip_unlock(port);
+ is_write_unlock(space);
+
+ if (nsrequest != IP_NULL)
+ ipc_notify_no_senders(nsrequest, mscount);
+ break;
+ }
+
+ default:
+ is_write_unlock(space);
+ return KERN_INVALID_RIGHT;
+ }
+
+ return KERN_SUCCESS;
+
+#if MACH_IPC_COMPAT
+ invalid_name:
+ is_write_unlock(space);
+ return KERN_INVALID_NAME;
+#endif MACH_IPC_COMPAT
+}
+
+/*
+ * Routine: ipc_right_delta
+ * Purpose:
+ * Modifies the user-reference count for a right.
+ * May deallocate the right, if the count goes to zero.
+ * Conditions:
+ * The space is write-locked, and is unlocked upon return.
+ * The space must be active.
+ * Returns:
+ * KERN_SUCCESS Count was modified.
+ * KERN_INVALID_RIGHT Entry has wrong type.
+ * KERN_INVALID_VALUE Bad delta for the right.
+ * KERN_UREFS_OVERFLOW OK delta, except would overflow.
+ * KERN_INVALID_NAME [MACH_IPC_COMPAT]
+ * Caller should pretend lookup of entry failed.
+ */
+
+kern_return_t
+ipc_right_delta(space, name, entry, right, delta)
+ ipc_space_t space;
+ mach_port_t name;
+ ipc_entry_t entry;
+ mach_port_right_t right;
+ mach_port_delta_t delta;
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ assert(space->is_active);
+ assert(right < MACH_PORT_RIGHT_NUMBER);
+
+ /* Rights-specific restrictions and operations. */
+
+ switch (right) {
+ case MACH_PORT_RIGHT_PORT_SET: {
+ ipc_pset_t pset;
+
+ if ((bits & MACH_PORT_TYPE_PORT_SET) == 0)
+ goto invalid_right;
+
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_PORT_SET);
+ assert(IE_BITS_UREFS(bits) == 0);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+ assert(entry->ie_request == 0);
+
+ if (delta == 0)
+ goto success;
+
+ if (delta != -1)
+ goto invalid_value;
+
+ pset = (ipc_pset_t) entry->ie_object;
+ assert(pset != IPS_NULL);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+
+ ips_lock(pset);
+ assert(ips_active(pset));
+ is_write_unlock(space);
+
+ ipc_pset_destroy(pset); /* consumes ref, unlocks */
+ break;
+ }
+
+ case MACH_PORT_RIGHT_RECEIVE: {
+ ipc_port_t port;
+ ipc_port_t dnrequest = IP_NULL;
+
+ if ((bits & MACH_PORT_TYPE_RECEIVE) == 0)
+ goto invalid_right;
+
+ if (delta == 0)
+ goto success;
+
+ if (delta != -1)
+ goto invalid_value;
+
+ if (bits & IE_BITS_MAREQUEST) {
+ bits &= ~IE_BITS_MAREQUEST;
+
+ ipc_marequest_cancel(space, name);
+ }
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ /*
+ * The port lock is needed for ipc_right_dncancel;
+ * otherwise, we wouldn't have to take the lock
+ * until just before dropping the space lock.
+ */
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT) {
+ assert(entry->ie_request != 0);
+ dnrequest = ipc_right_dncancel(space, port,
+ name, entry);
+ assert(dnrequest == IP_NULL);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ } else
+#endif MACH_IPC_COMPAT
+ if (bits & MACH_PORT_TYPE_SEND) {
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_SEND_RECEIVE);
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX);
+ assert(port->ip_srights > 0);
+
+ /*
+ * The remaining send right turns into a
+ * dead name. Notice we don't decrement
+ * ip_srights, generate a no-senders notif,
+ * or use ipc_right_dncancel, because the
+ * port is destroyed "first".
+ */
+
+ bits &= ~IE_BITS_TYPE_MASK;
+ bits |= MACH_PORT_TYPE_DEAD_NAME;
+
+ if (entry->ie_request != 0) {
+ entry->ie_request = 0;
+ bits++; /* increment urefs */
+ }
+
+ entry->ie_bits = bits;
+ entry->ie_object = IO_NULL;
+ } else {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
+ assert(IE_BITS_UREFS(bits) == 0);
+
+ dnrequest = ipc_right_dncancel_macro(space, port,
+ name, entry);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ }
+ is_write_unlock(space);
+
+ ipc_port_clear_receiver(port);
+ ipc_port_destroy(port); /* consumes ref, unlocks */
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+ break;
+ }
+
+ case MACH_PORT_RIGHT_SEND_ONCE: {
+ ipc_port_t port, dnrequest;
+
+ if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0)
+ goto invalid_right;
+
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
+ assert(IE_BITS_UREFS(bits) == 1);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+
+ if ((delta > 0) || (delta < -1))
+ goto invalid_value;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+#endif MACH_IPC_COMPAT
+
+ assert(!(entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE));
+ goto invalid_right;
+ }
+ /* port is locked and active */
+
+ assert(port->ip_sorights > 0);
+
+ if (delta == 0) {
+ ip_unlock(port);
+ goto success;
+ }
+
+ dnrequest = ipc_right_dncancel_macro(space, port, name, entry);
+ ip_unlock(port);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+
+ ipc_notify_send_once(port);
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+ break;
+ }
+
+ case MACH_PORT_RIGHT_DEAD_NAME: {
+ mach_port_urefs_t urefs;
+
+ if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
+ ipc_port_t port;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (!ipc_right_check(space, port, name, entry)) {
+ /* port is locked and active */
+ ip_unlock(port);
+ goto invalid_right;
+ }
+
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+#endif MACH_IPC_COMPAT
+
+ bits = entry->ie_bits;
+ } else if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0)
+ goto invalid_right;
+
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+ assert(entry->ie_object == IO_NULL);
+ assert(entry->ie_request == 0);
+
+ urefs = IE_BITS_UREFS(bits);
+ if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta))
+ goto invalid_value;
+ if (MACH_PORT_UREFS_OVERFLOW(urefs, delta))
+ goto urefs_overflow;
+
+ if ((urefs + delta) == 0)
+ ipc_entry_dealloc(space, name, entry);
+ else
+ entry->ie_bits = bits + delta;
+
+ is_write_unlock(space);
+ break;
+ }
+
+ case MACH_PORT_RIGHT_SEND: {
+ mach_port_urefs_t urefs;
+ ipc_port_t port;
+ ipc_port_t dnrequest = IP_NULL;
+ ipc_port_t nsrequest = IP_NULL;
+ mach_port_mscount_t mscount = 0; /* '=0' to shut up lint */
+
+ if ((bits & MACH_PORT_TYPE_SEND) == 0)
+ goto invalid_right;
+
+ /* maximum urefs for send is MACH_PORT_UREFS_MAX-1 */
+
+ urefs = IE_BITS_UREFS(bits);
+ if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta))
+ goto invalid_value;
+ if (MACH_PORT_UREFS_OVERFLOW(urefs+1, delta))
+ goto urefs_overflow;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+#endif MACH_IPC_COMPAT
+
+ assert((entry->ie_bits & MACH_PORT_TYPE_SEND) == 0);
+ goto invalid_right;
+ }
+ /* port is locked and active */
+
+ assert(port->ip_srights > 0);
+
+ if ((urefs + delta) == 0) {
+ if (--port->ip_srights == 0) {
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL) {
+ port->ip_nsrequest = IP_NULL;
+ mscount = port->ip_mscount;
+ }
+ }
+
+ if (bits & MACH_PORT_TYPE_RECEIVE) {
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_SEND_RECEIVE);
+
+ entry->ie_bits = bits &~ (IE_BITS_UREFS_MASK|
+ MACH_PORT_TYPE_SEND);
+ } else {
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_SEND);
+
+ dnrequest = ipc_right_dncancel_macro(
+ space, port, name, entry);
+
+ ipc_hash_delete(space, (ipc_object_t) port,
+ name, entry);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+
+ ip_release(port);
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ }
+ } else
+ entry->ie_bits = bits + delta;
+
+ ip_unlock(port); /* even if dropped a ref, port is active */
+ is_write_unlock(space);
+
+ if (nsrequest != IP_NULL)
+ ipc_notify_no_senders(nsrequest, mscount);
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_delta: strange right");
+#else
+ panic("ipc_right_delta: strange right");
+#endif
+ }
+
+ return KERN_SUCCESS;
+
+ success:
+ is_write_unlock(space);
+ return KERN_SUCCESS;
+
+ invalid_right:
+ is_write_unlock(space);
+ return KERN_INVALID_RIGHT;
+
+ invalid_value:
+ is_write_unlock(space);
+ return KERN_INVALID_VALUE;
+
+ urefs_overflow:
+ is_write_unlock(space);
+ return KERN_UREFS_OVERFLOW;
+
+#if MACH_IPC_COMPAT
+ invalid_name:
+ is_write_unlock(space);
+ return KERN_INVALID_NAME;
+#endif MACH_IPC_COMPAT
+}
+
+/*
+ * Routine: ipc_right_info
+ * Purpose:
+ * Retrieves information about the right.
+ * Conditions:
+ * The space is write-locked, and is unlocked upon return
+ * if the call is unsuccessful. The space must be active.
+ * Returns:
+ * KERN_SUCCESS Retrieved info; space still locked.
+ */
+
+kern_return_t
+ipc_right_info(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t entry,
+ mach_port_type_t *typep,
+ mach_port_urefs_t *urefsp)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ ipc_port_request_index_t request;
+ mach_port_type_t type;
+
+ if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
+ ipc_port_t port = (ipc_port_t) entry->ie_object;
+
+ if (ipc_right_check(space, port, name, entry)) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT) {
+ is_write_unlock(space);
+ return KERN_INVALID_NAME;
+ }
+#endif MACH_IPC_COMPAT
+
+ bits = entry->ie_bits;
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
+ } else
+ ip_unlock(port);
+ }
+
+ type = IE_BITS_TYPE(bits);
+ request = entry->ie_request;
+
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ type |= MACH_PORT_TYPE_COMPAT;
+ else
+#endif MACH_IPC_COMPAT
+ if (request != 0)
+ type |= MACH_PORT_TYPE_DNREQUEST;
+ if (bits & IE_BITS_MAREQUEST)
+ type |= MACH_PORT_TYPE_MAREQUEST;
+
+ *typep = type;
+ *urefsp = IE_BITS_UREFS(bits);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_right_copyin_check
+ * Purpose:
+ * Check if a subsequent ipc_right_copyin would succeed.
+ * Conditions:
+ * The space is locked (read or write) and active.
+ */
+
+boolean_t
+ipc_right_copyin_check(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t entry,
+ mach_msg_type_name_t msgt_name)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ assert(space->is_active);
+
+ switch (msgt_name) {
+ case MACH_MSG_TYPE_MAKE_SEND:
+ case MACH_MSG_TYPE_MAKE_SEND_ONCE:
+ case MACH_MSG_TYPE_MOVE_RECEIVE:
+ if ((bits & MACH_PORT_TYPE_RECEIVE) == 0)
+ return FALSE;
+
+ break;
+
+ case MACH_MSG_TYPE_COPY_SEND:
+ case MACH_MSG_TYPE_MOVE_SEND:
+ case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
+ ipc_port_t port;
+ boolean_t active;
+
+ if (bits & MACH_PORT_TYPE_DEAD_NAME)
+ break;
+
+ if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0)
+ return FALSE;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ active = ip_active(port);
+ ip_unlock(port);
+
+ if (!active) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ return FALSE;
+#endif MACH_IPC_COMPAT
+
+ break;
+ }
+
+ if (msgt_name == MACH_MSG_TYPE_MOVE_SEND_ONCE) {
+ if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0)
+ return FALSE;
+ } else {
+ if ((bits & MACH_PORT_TYPE_SEND) == 0)
+ return FALSE;
+ }
+
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_copyin_check: strange rights");
+#else
+ panic("ipc_right_copyin_check: strange rights");
+#endif
+ }
+
+ return TRUE;
+}
+
+/*
+ * Routine: ipc_right_copyin
+ * Purpose:
+ * Copyin a capability from a space.
+ * If successful, the caller gets a ref
+ * for the resulting object, unless it is IO_DEAD,
+ * and possibly a send-once right which should
+ * be used in a port-deleted notification.
+ *
+ * If deadok is not TRUE, the copyin operation
+ * will fail instead of producing IO_DEAD.
+ *
+ * The entry is never deallocated (except
+ * when KERN_INVALID_NAME), so the caller
+ * should deallocate the entry if its type
+ * is MACH_PORT_TYPE_NONE.
+ * Conditions:
+ * The space is write-locked and active.
+ * Returns:
+ * KERN_SUCCESS Acquired an object, possibly IO_DEAD.
+ * KERN_INVALID_RIGHT Name doesn't denote correct right.
+ */
+
+kern_return_t
+ipc_right_copyin(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t entry,
+ mach_msg_type_name_t msgt_name,
+ boolean_t deadok,
+ ipc_object_t *objectp,
+ ipc_port_t *sorightp)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ assert(space->is_active);
+
+ switch (msgt_name) {
+ case MACH_MSG_TYPE_MAKE_SEND: {
+ ipc_port_t port;
+
+ if ((bits & MACH_PORT_TYPE_RECEIVE) == 0)
+ goto invalid_right;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+
+ port->ip_mscount++;
+ port->ip_srights++;
+ ip_reference(port);
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ *sorightp = IP_NULL;
+ break;
+ }
+
+ case MACH_MSG_TYPE_MAKE_SEND_ONCE: {
+ ipc_port_t port;
+
+ if ((bits & MACH_PORT_TYPE_RECEIVE) == 0)
+ goto invalid_right;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+
+ port->ip_sorights++;
+ ip_reference(port);
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ *sorightp = IP_NULL;
+ break;
+ }
+
+ case MACH_MSG_TYPE_MOVE_RECEIVE: {
+ ipc_port_t port;
+ ipc_port_t dnrequest = IP_NULL;
+
+ if ((bits & MACH_PORT_TYPE_RECEIVE) == 0)
+ goto invalid_right;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+
+ if (bits & MACH_PORT_TYPE_SEND) {
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_SEND_RECEIVE);
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert(port->ip_srights > 0);
+
+ ipc_hash_insert(space, (ipc_object_t) port,
+ name, entry);
+
+ ip_reference(port);
+ } else {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
+ assert(IE_BITS_UREFS(bits) == 0);
+
+ dnrequest = ipc_right_dncancel_macro(space, port,
+ name, entry);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+
+ entry->ie_object = IO_NULL;
+ }
+ entry->ie_bits = bits &~ MACH_PORT_TYPE_RECEIVE;
+
+ ipc_port_clear_receiver(port);
+
+ port->ip_receiver_name = MACH_PORT_NULL;
+ port->ip_destination = IP_NULL;
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ *sorightp = dnrequest;
+ break;
+ }
+
+ case MACH_MSG_TYPE_COPY_SEND: {
+ ipc_port_t port;
+
+ if (bits & MACH_PORT_TYPE_DEAD_NAME)
+ goto copy_dead;
+
+ /* allow for dead send-once rights */
+
+ if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0)
+ goto invalid_right;
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+#endif MACH_IPC_COMPAT
+
+ bits = entry->ie_bits;
+ goto copy_dead;
+ }
+ /* port is locked and active */
+
+ if ((bits & MACH_PORT_TYPE_SEND) == 0) {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
+ assert(port->ip_sorights > 0);
+
+ ip_unlock(port);
+ goto invalid_right;
+ }
+
+ assert(port->ip_srights > 0);
+
+ port->ip_srights++;
+ ip_reference(port);
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ *sorightp = IP_NULL;
+ break;
+ }
+
+ case MACH_MSG_TYPE_MOVE_SEND: {
+ ipc_port_t port;
+ ipc_port_t dnrequest = IP_NULL;
+
+ if (bits & MACH_PORT_TYPE_DEAD_NAME)
+ goto move_dead;
+
+ /* allow for dead send-once rights */
+
+ if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0)
+ goto invalid_right;
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+#endif MACH_IPC_COMPAT
+
+ bits = entry->ie_bits;
+ goto move_dead;
+ }
+ /* port is locked and active */
+
+ if ((bits & MACH_PORT_TYPE_SEND) == 0) {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
+ assert(port->ip_sorights > 0);
+
+ ip_unlock(port);
+ goto invalid_right;
+ }
+
+ assert(port->ip_srights > 0);
+
+ if (IE_BITS_UREFS(bits) == 1) {
+ if (bits & MACH_PORT_TYPE_RECEIVE) {
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_SEND_RECEIVE);
+
+ ip_reference(port);
+ } else {
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_SEND);
+
+ dnrequest = ipc_right_dncancel_macro(
+ space, port, name, entry);
+
+ ipc_hash_delete(space, (ipc_object_t) port,
+ name, entry);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+
+ entry->ie_object = IO_NULL;
+ }
+ entry->ie_bits = bits &~
+ (IE_BITS_UREFS_MASK|MACH_PORT_TYPE_SEND);
+ } else {
+ port->ip_srights++;
+ ip_reference(port);
+ entry->ie_bits = bits-1; /* decrement urefs */
+ }
+
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ *sorightp = dnrequest;
+ break;
+ }
+
+ case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
+ ipc_port_t port;
+ ipc_port_t dnrequest;
+
+ if (bits & MACH_PORT_TYPE_DEAD_NAME)
+ goto move_dead;
+
+ /* allow for dead send rights */
+
+ if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0)
+ goto invalid_right;
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+#endif MACH_IPC_COMPAT
+
+ bits = entry->ie_bits;
+ goto move_dead;
+ }
+ /* port is locked and active */
+
+ if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
+ assert(bits & MACH_PORT_TYPE_SEND);
+ assert(port->ip_srights > 0);
+
+ ip_unlock(port);
+ goto invalid_right;
+ }
+
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
+ assert(IE_BITS_UREFS(bits) == 1);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+ assert(port->ip_sorights > 0);
+
+ dnrequest = ipc_right_dncancel_macro(space, port, name, entry);
+ ip_unlock(port);
+
+ entry->ie_object = IO_NULL;
+ entry->ie_bits = bits &~ MACH_PORT_TYPE_SEND_ONCE;
+
+ *objectp = (ipc_object_t) port;
+ *sorightp = dnrequest;
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_copyin: strange rights");
+#else
+ panic("ipc_right_copyin: strange rights");
+#endif
+ }
+
+ return KERN_SUCCESS;
+
+ copy_dead:
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+ assert(entry->ie_request == 0);
+ assert(entry->ie_object == 0);
+
+ if (!deadok)
+ goto invalid_right;
+
+ *objectp = IO_DEAD;
+ *sorightp = IP_NULL;
+ return KERN_SUCCESS;
+
+ move_dead:
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+ assert(entry->ie_request == 0);
+ assert(entry->ie_object == 0);
+
+ if (!deadok)
+ goto invalid_right;
+
+ if (IE_BITS_UREFS(bits) == 1)
+ entry->ie_bits = bits &~ MACH_PORT_TYPE_DEAD_NAME;
+ else
+ entry->ie_bits = bits-1; /* decrement urefs */
+
+ *objectp = IO_DEAD;
+ *sorightp = IP_NULL;
+ return KERN_SUCCESS;
+
+ invalid_right:
+ return KERN_INVALID_RIGHT;
+
+#if MACH_IPC_COMPAT
+ invalid_name:
+ return KERN_INVALID_NAME;
+#endif MACH_IPC_COMPAT
+}
+
+/*
+ * Routine: ipc_right_copyin_undo
+ * Purpose:
+ * Undoes the effects of an ipc_right_copyin
+ * of a send/send-once right that is dead.
+ * (Object is either IO_DEAD or a dead port.)
+ * Conditions:
+ * The space is write-locked and active.
+ */
+
+void
+ipc_right_copyin_undo(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t entry,
+ mach_msg_type_name_t msgt_name,
+ ipc_object_t object,
+ ipc_port_t soright)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ assert(space->is_active);
+
+ assert((msgt_name == MACH_MSG_TYPE_MOVE_SEND) ||
+ (msgt_name == MACH_MSG_TYPE_COPY_SEND) ||
+ (msgt_name == MACH_MSG_TYPE_MOVE_SEND_ONCE));
+
+ if (soright != IP_NULL) {
+ assert((msgt_name == MACH_MSG_TYPE_MOVE_SEND) ||
+ (msgt_name == MACH_MSG_TYPE_MOVE_SEND_ONCE));
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
+ assert(entry->ie_object == IO_NULL);
+ assert(object != IO_DEAD);
+
+ entry->ie_bits = ((bits &~ IE_BITS_RIGHT_MASK) |
+ MACH_PORT_TYPE_DEAD_NAME | 2);
+ } else if (IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE) {
+ assert((msgt_name == MACH_MSG_TYPE_MOVE_SEND) ||
+ (msgt_name == MACH_MSG_TYPE_MOVE_SEND_ONCE));
+ assert(entry->ie_object == IO_NULL);
+
+ entry->ie_bits = ((bits &~ IE_BITS_RIGHT_MASK) |
+ MACH_PORT_TYPE_DEAD_NAME | 1);
+ } else if (IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME) {
+ assert(entry->ie_object == IO_NULL);
+ assert(object == IO_DEAD);
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ if (msgt_name != MACH_MSG_TYPE_COPY_SEND) {
+ assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX);
+
+ entry->ie_bits = bits+1; /* increment urefs */
+ }
+ } else {
+ assert((msgt_name == MACH_MSG_TYPE_MOVE_SEND) ||
+ (msgt_name == MACH_MSG_TYPE_COPY_SEND));
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
+ assert(object != IO_DEAD);
+ assert(entry->ie_object == object);
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ if (msgt_name != MACH_MSG_TYPE_COPY_SEND) {
+ assert(IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX-1);
+
+ entry->ie_bits = bits+1; /* increment urefs */
+ }
+
+ /*
+ * May as well convert the entry to a dead name.
+ * (Or if it is a compat entry, destroy it.)
+ */
+
+ (void) ipc_right_check(space, (ipc_port_t) object,
+ name, entry);
+ /* object is dead so it is not locked */
+ }
+
+ /* release the reference acquired by copyin */
+
+ if (object != IO_DEAD)
+ ipc_object_release(object);
+}
+
+/*
+ * Routine: ipc_right_copyin_two
+ * Purpose:
+ * Like ipc_right_copyin with MACH_MSG_TYPE_MOVE_SEND
+ * and deadok == FALSE, except that this moves two
+ * send rights at once.
+ * Conditions:
+ * The space is write-locked and active.
+ * The object is returned with two refs/send rights.
+ * Returns:
+ * KERN_SUCCESS Acquired an object.
+ * KERN_INVALID_RIGHT Name doesn't denote correct right.
+ */
+
+kern_return_t
+ipc_right_copyin_two(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t entry,
+ ipc_object_t *objectp,
+ ipc_port_t *sorightp)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ mach_port_urefs_t urefs;
+ ipc_port_t port;
+ ipc_port_t dnrequest = IP_NULL;
+
+ assert(space->is_active);
+
+ if ((bits & MACH_PORT_TYPE_SEND) == 0)
+ goto invalid_right;
+
+ urefs = IE_BITS_UREFS(bits);
+ if (urefs < 2)
+ goto invalid_right;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+#endif MACH_IPC_COMPAT
+
+ goto invalid_right;
+ }
+ /* port is locked and active */
+
+ assert(port->ip_srights > 0);
+
+ if (urefs == 2) {
+ if (bits & MACH_PORT_TYPE_RECEIVE) {
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_SEND_RECEIVE);
+
+ port->ip_srights++;
+ ip_reference(port);
+ ip_reference(port);
+ } else {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
+
+ dnrequest = ipc_right_dncancel_macro(space, port,
+ name, entry);
+
+ ipc_hash_delete(space, (ipc_object_t) port,
+ name, entry);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+
+ port->ip_srights++;
+ ip_reference(port);
+ entry->ie_object = IO_NULL;
+ }
+ entry->ie_bits = bits &~
+ (IE_BITS_UREFS_MASK|MACH_PORT_TYPE_SEND);
+ } else {
+ port->ip_srights += 2;
+ ip_reference(port);
+ ip_reference(port);
+ entry->ie_bits = bits-2; /* decrement urefs */
+ }
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ *sorightp = dnrequest;
+ return KERN_SUCCESS;
+
+ invalid_right:
+ return KERN_INVALID_RIGHT;
+
+#if MACH_IPC_COMPAT
+ invalid_name:
+ return KERN_INVALID_NAME;
+#endif MACH_IPC_COMPAT
+}
+
+/*
+ * Routine: ipc_right_copyout
+ * Purpose:
+ * Copyout a capability to a space.
+ * If successful, consumes a ref for the object.
+ *
+ * Always succeeds when given a newly-allocated entry,
+ * because user-reference overflow isn't a possibility.
+ *
+ * If copying out the object would cause the user-reference
+ * count in the entry to overflow, and overflow is TRUE,
+ * then instead the user-reference count is left pegged
+ * to its maximum value and the copyout succeeds anyway.
+ * Conditions:
+ * The space is write-locked and active.
+ * The object is locked and active.
+ * The object is unlocked; the space isn't.
+ * Returns:
+ * KERN_SUCCESS Copied out capability.
+ * KERN_UREFS_OVERFLOW User-refs would overflow;
+ * guaranteed not to happen with a fresh entry
+ * or if overflow=TRUE was specified.
+ */
+
+kern_return_t
+ipc_right_copyout(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t entry,
+ mach_msg_type_name_t msgt_name,
+ boolean_t overflow,
+ ipc_object_t object)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ ipc_port_t port;
+
+ assert(IO_VALID(object));
+ assert(io_otype(object) == IOT_PORT);
+ assert(io_active(object));
+ assert(entry->ie_object == object);
+
+ port = (ipc_port_t) object;
+
+ switch (msgt_name) {
+ case MACH_MSG_TYPE_PORT_SEND_ONCE:
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
+ assert(port->ip_sorights > 0);
+
+ /* transfer send-once right and ref to entry */
+ ip_unlock(port);
+
+ entry->ie_bits = bits | (MACH_PORT_TYPE_SEND_ONCE | 1);
+ break;
+
+ case MACH_MSG_TYPE_PORT_SEND:
+ assert(port->ip_srights > 0);
+
+ if (bits & MACH_PORT_TYPE_SEND) {
+ mach_port_urefs_t urefs = IE_BITS_UREFS(bits);
+
+ assert(port->ip_srights > 1);
+ assert(urefs > 0);
+ assert(urefs < MACH_PORT_UREFS_MAX);
+
+ if (urefs+1 == MACH_PORT_UREFS_MAX) {
+ if (overflow) {
+ /* leave urefs pegged to maximum */
+
+ port->ip_srights--;
+ ip_release(port);
+ ip_unlock(port);
+ return KERN_SUCCESS;
+ }
+
+ ip_unlock(port);
+ return KERN_UREFS_OVERFLOW;
+ }
+
+ port->ip_srights--;
+ ip_release(port);
+ ip_unlock(port);
+ } else if (bits & MACH_PORT_TYPE_RECEIVE) {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
+ assert(IE_BITS_UREFS(bits) == 0);
+
+ /* transfer send right to entry */
+ ip_release(port);
+ ip_unlock(port);
+ } else {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
+ assert(IE_BITS_UREFS(bits) == 0);
+
+ /* transfer send right and ref to entry */
+ ip_unlock(port);
+
+ /* entry is locked holding ref, so can use port */
+
+ ipc_hash_insert(space, (ipc_object_t) port,
+ name, entry);
+ }
+
+ entry->ie_bits = (bits | MACH_PORT_TYPE_SEND) + 1;
+ break;
+
+ case MACH_MSG_TYPE_PORT_RECEIVE: {
+ ipc_port_t dest;
+
+ assert(port->ip_mscount == 0);
+ assert(port->ip_receiver_name == MACH_PORT_NULL);
+ dest = port->ip_destination;
+
+ port->ip_receiver_name = name;
+ port->ip_receiver = space;
+
+ assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
+
+ if (bits & MACH_PORT_TYPE_SEND) {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert(port->ip_srights > 0);
+
+ ip_release(port);
+ ip_unlock(port);
+
+ /* entry is locked holding ref, so can use port */
+
+ ipc_hash_delete(space, (ipc_object_t) port,
+ name, entry);
+ } else {
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
+ assert(IE_BITS_UREFS(bits) == 0);
+
+ /* transfer ref to entry */
+ ip_unlock(port);
+ }
+
+ entry->ie_bits = bits | MACH_PORT_TYPE_RECEIVE;
+
+ if (dest != IP_NULL)
+ ipc_port_release(dest);
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_copyout: strange rights");
+#else
+ panic("ipc_right_copyout: strange rights");
+#endif
+ }
+
+ return KERN_SUCCESS;
+}
+
+#if 0
+/*XXX same, but allows multiple duplicate send rights */
+kern_return_t
+ipc_right_copyout_multiname(space, name, entry, object)
+ ipc_space_t space;
+ mach_port_t name;
+ ipc_entry_t entry;
+ ipc_object_t object;
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ ipc_port_t port;
+
+ assert(IO_VALID(object));
+ assert(io_otype(object) == IOT_PORT);
+ assert(io_active(object));
+ assert(entry->ie_object == object);
+
+ port = (ipc_port_t) object;
+
+ assert(port->ip_srights > 0);
+
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
+ assert(IE_BITS_UREFS(bits) == 0);
+
+ /* transfer send right and ref to entry */
+ ip_unlock(port);
+
+ /* entry is locked holding ref, so can use port */
+
+ entry->ie_bits = (bits | MACH_PORT_TYPE_SEND) + 1;
+
+ return KERN_SUCCESS;
+}
+#endif
+
+/*
+ * Routine: ipc_right_rename
+ * Purpose:
+ * Transfer an entry from one name to another.
+ * The old entry is deallocated.
+ * Conditions:
+ * The space is write-locked and active.
+ * The new entry is unused. Upon return,
+ * the space is unlocked.
+ * Returns:
+ * KERN_SUCCESS Moved entry to new name.
+ */
+
+kern_return_t
+ipc_right_rename(
+ ipc_space_t space,
+ mach_port_t oname,
+ ipc_entry_t oentry,
+ mach_port_t nname,
+ ipc_entry_t nentry)
+{
+ ipc_entry_bits_t bits = oentry->ie_bits;
+ ipc_port_request_index_t request = oentry->ie_request;
+ ipc_object_t object = oentry->ie_object;
+
+ assert(space->is_active);
+ assert(oname != nname);
+
+ /*
+ * If IE_BITS_COMPAT, we can't allow the entry to be renamed
+ * if the port is dead. (This would foil ipc_port_destroy.)
+ * Instead we should fail because oentry shouldn't exist.
+ * Note IE_BITS_COMPAT implies ie_request != 0.
+ */
+
+ if (request != 0) {
+ ipc_port_t port;
+
+ assert(bits & MACH_PORT_TYPE_PORT_RIGHTS);
+ port = (ipc_port_t) object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, oname, oentry)) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT) {
+ ipc_entry_dealloc(space, nname, nentry);
+ is_write_unlock(space);
+ return KERN_INVALID_NAME;
+ }
+#endif MACH_IPC_COMPAT
+
+ bits = oentry->ie_bits;
+ assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
+ assert(oentry->ie_request == 0);
+ request = 0;
+ assert(oentry->ie_object == IO_NULL);
+ object = IO_NULL;
+ } else {
+ /* port is locked and active */
+
+ ipc_port_dnrename(port, request, oname, nname);
+ ip_unlock(port);
+ oentry->ie_request = 0;
+ }
+ }
+
+ if (bits & IE_BITS_MAREQUEST) {
+ assert(bits & MACH_PORT_TYPE_SEND_RECEIVE);
+
+ ipc_marequest_rename(space, oname, nname);
+ }
+
+ /* initialize nentry before letting ipc_hash_insert see it */
+
+ assert((nentry->ie_bits & IE_BITS_RIGHT_MASK) == 0);
+ nentry->ie_bits |= bits & IE_BITS_RIGHT_MASK;
+ nentry->ie_request = request;
+ nentry->ie_object = object;
+
+ switch (IE_BITS_TYPE(bits)) {
+ case MACH_PORT_TYPE_SEND: {
+ ipc_port_t port;
+
+ port = (ipc_port_t) object;
+ assert(port != IP_NULL);
+
+ ipc_hash_delete(space, (ipc_object_t) port, oname, oentry);
+ ipc_hash_insert(space, (ipc_object_t) port, nname, nentry);
+ break;
+ }
+
+ case MACH_PORT_TYPE_RECEIVE:
+ case MACH_PORT_TYPE_SEND_RECEIVE: {
+ ipc_port_t port;
+
+ port = (ipc_port_t) object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == oname);
+ assert(port->ip_receiver == space);
+
+ port->ip_receiver_name = nname;
+ ip_unlock(port);
+ break;
+ }
+
+ case MACH_PORT_TYPE_PORT_SET: {
+ ipc_pset_t pset;
+
+ pset = (ipc_pset_t) object;
+ assert(pset != IPS_NULL);
+
+ ips_lock(pset);
+ assert(ips_active(pset));
+ assert(pset->ips_local_name == oname);
+
+ pset->ips_local_name = nname;
+ ips_unlock(pset);
+ break;
+ }
+
+ case MACH_PORT_TYPE_SEND_ONCE:
+ case MACH_PORT_TYPE_DEAD_NAME:
+ break;
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_rename: strange rights");
+#else
+ panic("ipc_right_rename: strange rights");
+#endif
+ }
+
+ assert(oentry->ie_request == 0);
+ oentry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, oname, oentry);
+ is_write_unlock(space);
+
+ return KERN_SUCCESS;
+}
+
+#if MACH_IPC_COMPAT
+
+/*
+ * Routine: ipc_right_copyin_compat
+ * Purpose:
+ * Copyin a capability from a space.
+ * If successful, the caller gets a ref
+ * for the resulting object, which is always valid.
+ * Conditions:
+ * The space is write-locked, and is unlocked upon return.
+ * The space must be active.
+ * Returns:
+ * KERN_SUCCESS Acquired a valid object.
+ * KERN_INVALID_RIGHT Name doesn't denote correct right.
+ * KERN_INVALID_NAME [MACH_IPC_COMPAT]
+ * Caller should pretend lookup of entry failed.
+ */
+
+kern_return_t
+ipc_right_copyin_compat(space, name, entry, msgt_name, dealloc, objectp)
+ ipc_space_t space;
+ mach_port_t name;
+ ipc_entry_t entry;
+ mach_msg_type_name_t msgt_name;
+ boolean_t dealloc;
+ ipc_object_t *objectp;
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ assert(space->is_active);
+
+ switch (msgt_name) {
+ case MSG_TYPE_PORT:
+ if (dealloc) {
+ ipc_port_t port;
+ ipc_port_t dnrequest;
+
+ /*
+ * Pulls a send right out of the space,
+ * leaving the space with no rights.
+ * Not allowed to destroy the port,
+ * so the space can't have receive rights.
+ * Doesn't operate on dead names.
+ */
+
+ if (IE_BITS_TYPE(bits) != MACH_PORT_TYPE_SEND)
+ goto invalid_right;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+
+ goto invalid_right;
+ }
+ /* port is locked and active */
+
+ dnrequest = ipc_right_dncancel_macro(space, port,
+ name, entry);
+
+ assert(port->ip_srights > 0);
+ ip_unlock(port);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+
+ *objectp = (ipc_object_t) port;
+ break;
+ } else {
+ ipc_port_t port;
+
+ /*
+ * Pulls a send right out of the space,
+ * making a send right if necessary.
+ * Doesn't operate on dead names.
+ */
+
+ if ((bits & MACH_PORT_TYPE_SEND_RECEIVE) == 0)
+ goto invalid_right;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+
+ goto invalid_right;
+ }
+ /* port is locked and active */
+
+ is_write_unlock(space);
+
+ if ((bits & MACH_PORT_TYPE_SEND) == 0) {
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_RECEIVE);
+ assert(IE_BITS_UREFS(bits) == 0);
+
+ port->ip_mscount++;
+ }
+
+ port->ip_srights++;
+ ip_reference(port);
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ break;
+ }
+
+ case MSG_TYPE_PORT_ALL:
+ if (dealloc) {
+ ipc_port_t port;
+ ipc_port_t dnrequest = IP_NULL;
+ ipc_port_t nsrequest = IP_NULL;
+ mach_port_mscount_t mscount = 0; /* '=0' to shut up lint */
+
+ /*
+ * Like MACH_MSG_TYPE_MOVE_RECEIVE, except that
+ * the space is always left without rights,
+ * so we kill send rights if necessary.
+ */
+
+ if ((bits & MACH_PORT_TYPE_RECEIVE) == 0)
+ goto invalid_right;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+
+ dnrequest = ipc_right_dncancel_macro(space, port,
+ name, entry);
+
+ if (bits & IE_BITS_MAREQUEST)
+ ipc_marequest_cancel(space, name);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+ is_write_unlock(space);
+
+ if (bits & MACH_PORT_TYPE_SEND) {
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_SEND_RECEIVE);
+ assert(IE_BITS_UREFS(bits) > 0);
+ assert(port->ip_srights > 0);
+
+ if (--port->ip_srights == 0) {
+ nsrequest = port->ip_nsrequest;
+ if (nsrequest != IP_NULL) {
+ port->ip_nsrequest = IP_NULL;
+ mscount = port->ip_mscount;
+ }
+ }
+ }
+
+ ipc_port_clear_receiver(port);
+
+ port->ip_receiver_name = MACH_PORT_NULL;
+ port->ip_destination = IP_NULL;
+ ip_unlock(port);
+
+ if (nsrequest != IP_NULL)
+ ipc_notify_no_senders(nsrequest, mscount);
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+
+ *objectp = (ipc_object_t) port;
+ break;
+ } else {
+ ipc_port_t port;
+
+ /*
+ * Like MACH_MSG_TYPE_MOVE_RECEIVE, except that
+ * the space is always left with send rights,
+ * so we make a send right if necessary.
+ */
+
+ if ((bits & MACH_PORT_TYPE_RECEIVE) == 0)
+ goto invalid_right;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+
+ if ((bits & MACH_PORT_TYPE_SEND) == 0) {
+ assert(IE_BITS_TYPE(bits) ==
+ MACH_PORT_TYPE_RECEIVE);
+ assert(IE_BITS_UREFS(bits) == 0);
+
+ /* ip_mscount will be cleared below */
+ port->ip_srights++;
+ bits |= MACH_PORT_TYPE_SEND | 1;
+ }
+
+ ipc_hash_insert(space, (ipc_object_t) port,
+ name, entry);
+
+ entry->ie_bits = bits &~ MACH_PORT_TYPE_RECEIVE;
+ is_write_unlock(space);
+
+ ipc_port_clear_receiver(port); /* clears ip_mscount */
+
+ port->ip_receiver_name = MACH_PORT_NULL;
+ port->ip_destination = IP_NULL;
+ ip_reference(port);
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_copyin_compat: strange rights");
+#else
+ panic("ipc_right_copyin_compat: strange rights");
+#endif
+ }
+
+ return KERN_SUCCESS;
+
+ invalid_right:
+ is_write_unlock(space);
+ return KERN_INVALID_RIGHT;
+
+ invalid_name:
+ is_write_unlock(space);
+ return KERN_INVALID_NAME;
+}
+
+/*
+ * Routine: ipc_right_copyin_header
+ * Purpose:
+ * Copyin a capability from a space.
+ * If successful, the caller gets a ref
+ * for the resulting object, which is always valid.
+ * The type of the acquired capability is returned.
+ * Conditions:
+ * The space is write-locked, and is unlocked upon return.
+ * The space must be active.
+ * Returns:
+ * KERN_SUCCESS Acquired a valid object.
+ * KERN_INVALID_RIGHT Name doesn't denote correct right.
+ * KERN_INVALID_NAME [MACH_IPC_COMPAT]
+ * Caller should pretend lookup of entry failed.
+ */
+
+kern_return_t
+ipc_right_copyin_header(space, name, entry, objectp, msgt_namep)
+ ipc_space_t space;
+ mach_port_t name;
+ ipc_entry_t entry;
+ ipc_object_t *objectp;
+ mach_msg_type_name_t *msgt_namep;
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ mach_port_type_t type = IE_BITS_TYPE(bits);
+
+ assert(space->is_active);
+
+ switch (type) {
+ case MACH_PORT_TYPE_PORT_SET:
+ case MACH_PORT_TYPE_DEAD_NAME:
+ goto invalid_right;
+
+ case MACH_PORT_TYPE_RECEIVE: {
+ ipc_port_t port;
+
+ /*
+ * Like MACH_MSG_TYPE_MAKE_SEND.
+ */
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+ assert(port->ip_receiver_name == name);
+ assert(port->ip_receiver == space);
+ is_write_unlock(space);
+
+ port->ip_mscount++;
+ port->ip_srights++;
+ ip_reference(port);
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ *msgt_namep = MACH_MSG_TYPE_PORT_SEND;
+ break;
+ }
+
+ case MACH_PORT_TYPE_SEND:
+ case MACH_PORT_TYPE_SEND_RECEIVE: {
+ ipc_port_t port;
+
+ /*
+ * Like MACH_MSG_TYPE_COPY_SEND,
+ * except that the port must be alive.
+ */
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+
+ goto invalid_right;
+ }
+ /* port is locked and active */
+
+ assert(port->ip_srights > 0);
+ is_write_unlock(space);
+
+ port->ip_srights++;
+ ip_reference(port);
+ ip_unlock(port);
+
+ *objectp = (ipc_object_t) port;
+ *msgt_namep = MACH_MSG_TYPE_PORT_SEND;
+ break;
+ }
+
+ case MACH_PORT_TYPE_SEND_ONCE: {
+ ipc_port_t port;
+ ipc_port_t dnrequest, notify;
+
+ /*
+ * Like MACH_MSG_TYPE_MOVE_SEND_ONCE,
+ * except that the port must be alive
+ * and a port-deleted notification is generated.
+ */
+
+ assert(IE_BITS_UREFS(bits) == 1);
+ assert((bits & IE_BITS_MAREQUEST) == 0);
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (ipc_right_check(space, port, name, entry)) {
+ if (bits & IE_BITS_COMPAT)
+ goto invalid_name;
+
+ goto invalid_right;
+ }
+ /* port is locked and active */
+
+ assert(port->ip_sorights > 0);
+
+ dnrequest = ipc_right_dncancel_macro(space, port, name, entry);
+ ip_unlock(port);
+
+ entry->ie_object = IO_NULL;
+ ipc_entry_dealloc(space, name, entry);
+
+ notify = ipc_space_make_notify(space);
+ is_write_unlock(space);
+
+ if (dnrequest != IP_NULL)
+ ipc_notify_port_deleted(dnrequest, name);
+
+ if (IP_VALID(notify))
+ ipc_notify_port_deleted_compat(notify, name);
+
+ *objectp = (ipc_object_t) port;
+ *msgt_namep = MACH_MSG_TYPE_PORT_SEND_ONCE;
+ break;
+ }
+
+ default:
+#if MACH_ASSERT
+ assert(!"ipc_right_copyin_header: strange rights");
+#else
+ panic("ipc_right_copyin_header: strange rights");
+#endif
+ }
+
+ return KERN_SUCCESS;
+
+ invalid_right:
+ is_write_unlock(space);
+ return KERN_INVALID_RIGHT;
+
+ invalid_name:
+ is_write_unlock(space);
+ return KERN_INVALID_NAME;
+}
+
+#endif MACH_IPC_COMPAT
diff --git a/ipc/ipc_right.h b/ipc/ipc_right.h
new file mode 100644
index 0000000..7c0f2a3
--- /dev/null
+++ b/ipc/ipc_right.h
@@ -0,0 +1,124 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_right.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Declarations of functions to manipulate IPC capabilities.
+ */
+
+#ifndef _IPC_IPC_RIGHT_H_
+#define _IPC_IPC_RIGHT_H_
+
+#include <mach_ipc_compat.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <ipc/ipc_port.h>
+
+#define ipc_right_lookup_read ipc_right_lookup_write
+
+extern kern_return_t
+ipc_right_lookup_write(/* ipc_space_t, mach_port_t, ipc_entry_t * */);
+
+extern boolean_t
+ipc_right_reverse(/* ipc_space_t, ipc_object_t,
+ mach_port_t *, ipc_entry_t * */);
+
+extern kern_return_t
+ipc_right_dnrequest(/* ipc_space_t, mach_port_t, boolean_t,
+ ipc_port_t, ipc_port_t * */);
+
+extern ipc_port_t
+ipc_right_dncancel(/* ipc_space_t, ipc_port_t, mach_port_t, ipc_entry_t */);
+
+#define ipc_right_dncancel_macro(space, port, name, entry) \
+ (((entry)->ie_request == 0) ? IP_NULL : \
+ ipc_right_dncancel((space), (port), (name), (entry)))
+
+extern boolean_t
+ipc_right_inuse(/* ipc_space_t, mach_port_t, ipc_entry_t */);
+
+extern boolean_t
+ipc_right_check(/* ipc_space_t, mach_port_t, ipc_entry_t, ipc_port_t */);
+
+extern void
+ipc_right_clean(/* ipc_space_t, mach_port_t, ipc_entry_t */);
+
+extern kern_return_t
+ipc_right_destroy(/* ipc_space_t, mach_port_t, ipc_entry_t */);
+
+extern kern_return_t
+ipc_right_dealloc(/* ipc_space_t, mach_port_t, ipc_entry_t */);
+
+extern kern_return_t
+ipc_right_delta(/* ipc_space_t, mach_port_t, ipc_entry_t,
+ mach_port_right_t, mach_port_delta_t */);
+
+extern kern_return_t
+ipc_right_info(/* ipc_space_t, mach_port_t, ipc_entry_t,
+ mach_port_type_t *, mach_port_urefs_t * */);
+
+extern boolean_t
+ipc_right_copyin_check(/* ipc_space_t, mach_port_t, ipc_entry_t,
+ mach_msg_type_name_t */);
+
+extern kern_return_t
+ipc_right_copyin(/* ipc_space_t, mach_port_t, ipc_entry_t,
+ mach_msg_type_name_t, boolean_t,
+ ipc_object_t *, ipc_port_t * */);
+
+extern void
+ipc_right_copyin_undo(/* ipc_space_t, mach_port_t, ipc_entry_t,
+ mach_msg_type_name_t, ipc_object_t, ipc_port_t */);
+
+extern kern_return_t
+ipc_right_copyin_two(/* ipc_space_t, mach_port_t, ipc_entry_t,
+ ipc_object_t *, ipc_port_t * */);
+
+extern kern_return_t
+ipc_right_copyout(/* ipc_space_t, mach_port_t, ipc_entry_t,
+ mach_msg_type_name_t, boolean_t, ipc_object_t */);
+
+extern kern_return_t
+ipc_right_rename(/* ipc_space_t, mach_port_t, ipc_entry_t,
+ mach_port_t, ipc_entry_t */);
+
+#if MACH_IPC_COMPAT
+
+extern kern_return_t
+ipc_right_copyin_compat(/* ipc_space_t, mach_port_t, ipc_entry_t,
+ mach_msg_type_name_t, boolean_t, ipc_object_t * */);
+
+extern kern_return_t
+ipc_right_copyin_header(/* ipc_space_t, mach_port_t, ipc_entry_t,
+ ipc_object_t *, mach_msg_type_name_t * */);
+
+#endif MACH_IPC_COMPAT
+#endif _IPC_IPC_RIGHT_H_
diff --git a/ipc/ipc_space.c b/ipc/ipc_space.c
new file mode 100644
index 0000000..7e3cba9
--- /dev/null
+++ b/ipc/ipc_space.c
@@ -0,0 +1,317 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_space.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to manipulate IPC capability spaces.
+ */
+
+#include <mach_ipc_compat.h>
+#include <norma_ipc.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <kern/assert.h>
+#include <kern/sched_prim.h>
+#include <kern/zalloc.h>
+#include <ipc/port.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_splay.h>
+#include <ipc/ipc_hash.h>
+#include <ipc/ipc_table.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_right.h>
+
+
+
+zone_t ipc_space_zone;
+ipc_space_t ipc_space_kernel;
+ipc_space_t ipc_space_reply;
+#if NORMA_IPC
+ipc_space_t ipc_space_remote;
+#endif NORMA_IPC
+
+/*
+ * Routine: ipc_space_reference
+ * Routine: ipc_space_release
+ * Purpose:
+ * Function versions of the IPC space macros.
+ * The "is_" cover macros can be defined to use the
+ * macros or the functions, as desired.
+ */
+
+void
+ipc_space_reference(
+ ipc_space_t space)
+{
+ ipc_space_reference_macro(space);
+}
+
+void
+ipc_space_release(
+ ipc_space_t space)
+{
+ ipc_space_release_macro(space);
+}
+
+/*
+ * Routine: ipc_space_create
+ * Purpose:
+ * Creates a new IPC space.
+ *
+ * The new space has two references, one for the caller
+ * and one because it is active.
+ * Conditions:
+ * Nothing locked. Allocates memory.
+ * Returns:
+ * KERN_SUCCESS Created a space.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_space_create(
+ ipc_table_size_t initial,
+ ipc_space_t *spacep)
+{
+ ipc_space_t space;
+ ipc_entry_t table;
+ ipc_entry_num_t new_size;
+ mach_port_index_t index;
+
+ space = is_alloc();
+ if (space == IS_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ table = it_entries_alloc(initial);
+ if (table == IE_NULL) {
+ is_free(space);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ new_size = initial->its_size;
+ memset((void *) table, 0, new_size * sizeof(struct ipc_entry));
+
+ /*
+ * Initialize the free list in the table.
+ * Add the entries in reverse order, and
+ * set the generation number to -1, so that
+ * initial allocations produce "natural" names.
+ */
+
+ for (index = 0; index < new_size; index++) {
+ ipc_entry_t entry = &table[index];
+
+ entry->ie_bits = IE_BITS_GEN_MASK;
+ entry->ie_next = index+1;
+ }
+ table[new_size-1].ie_next = 0;
+
+ is_ref_lock_init(space);
+ space->is_references = 2;
+
+ is_lock_init(space);
+ space->is_active = TRUE;
+ space->is_growing = FALSE;
+ space->is_table = table;
+ space->is_table_size = new_size;
+ space->is_table_next = initial+1;
+
+ ipc_splay_tree_init(&space->is_tree);
+ space->is_tree_total = 0;
+ space->is_tree_small = 0;
+ space->is_tree_hash = 0;
+
+#if MACH_IPC_COMPAT
+ {
+ mach_port_t name;
+ ipc_port_t port;
+ kern_return_t kr;
+
+ /*
+ * ipc_port_alloc_compat probably won't look at is_notify,
+ * but make sure all fields have sane values anyway.
+ */
+
+ space->is_notify = IP_NULL;
+
+ kr = ipc_port_alloc_compat(space, &name, &port);
+ if (kr != KERN_SUCCESS) {
+ ipc_space_destroy(space);
+ is_release(space);
+ return kr;
+ }
+
+ ip_reference(port);
+ port->ip_srights++;
+ ip_unlock(port);
+ space->is_notify = port;
+ }
+#endif MACH_IPC_COMPAT
+
+ *spacep = space;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_space_create_special
+ * Purpose:
+ * Create a special space. A special space
+ * doesn't hold rights in the normal way.
+ * Instead it is place-holder for holding
+ * disembodied (naked) receive rights.
+ * See ipc_port_alloc_special/ipc_port_dealloc_special.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Created a space.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+ipc_space_create_special(
+ ipc_space_t *spacep)
+{
+ ipc_space_t space;
+
+ space = is_alloc();
+ if (space == IS_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ is_ref_lock_init(space);
+ space->is_references = 1;
+
+ is_lock_init(space);
+ space->is_active = FALSE;
+
+ *spacep = space;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_space_destroy
+ * Purpose:
+ * Marks the space as dead and cleans up the entries.
+ * Does nothing if the space is already dead.
+ * Conditions:
+ * Nothing locked.
+ */
+
+void
+ipc_space_destroy(
+ ipc_space_t space)
+{
+ ipc_tree_entry_t tentry;
+ ipc_entry_t table;
+ ipc_entry_num_t size;
+ mach_port_index_t index;
+ boolean_t active;
+
+ assert(space != IS_NULL);
+
+ is_write_lock(space);
+ active = space->is_active;
+ space->is_active = FALSE;
+ is_write_unlock(space);
+
+ if (!active)
+ return;
+
+ /*
+ * If somebody is trying to grow the table,
+ * we must wait until they finish and figure
+ * out the space died.
+ */
+
+ is_read_lock(space);
+ while (space->is_growing) {
+ assert_wait((event_t) space, FALSE);
+ is_read_unlock(space);
+ thread_block((void (*)(void)) 0);
+ is_read_lock(space);
+ }
+ is_read_unlock(space);
+
+ /*
+ * Now we can futz with it without having it locked.
+ */
+
+ table = space->is_table;
+ size = space->is_table_size;
+
+ for (index = 0; index < size; index++) {
+ ipc_entry_t entry = &table[index];
+ mach_port_type_t type = IE_BITS_TYPE(entry->ie_bits);
+
+ if (type != MACH_PORT_TYPE_NONE) {
+ mach_port_t name =
+ MACH_PORT_MAKEB(index, entry->ie_bits);
+
+ ipc_right_clean(space, name, entry);
+ }
+ }
+
+ it_entries_free(space->is_table_next-1, table);
+
+ for (tentry = ipc_splay_traverse_start(&space->is_tree);
+ tentry != ITE_NULL;
+ tentry = ipc_splay_traverse_next(&space->is_tree, TRUE)) {
+ mach_port_type_t type = IE_BITS_TYPE(tentry->ite_bits);
+ mach_port_t name = tentry->ite_name;
+
+ assert(type != MACH_PORT_TYPE_NONE);
+
+ /* use object before ipc_right_clean releases ref */
+
+ if (type == MACH_PORT_TYPE_SEND)
+ ipc_hash_global_delete(space, tentry->ite_object,
+ name, tentry);
+
+ ipc_right_clean(space, name, &tentry->ite_entry);
+ }
+ ipc_splay_traverse_finish(&space->is_tree);
+
+#if MACH_IPC_COMPAT
+ if (IP_VALID(space->is_notify))
+ ipc_port_release_send(space->is_notify);
+#endif MACH_IPC_COMPAT
+
+ /*
+ * Because the space is now dead,
+ * we must release the "active" reference for it.
+ * Our caller still has his reference.
+ */
+
+ is_release(space);
+}
diff --git a/ipc/ipc_space.h b/ipc/ipc_space.h
new file mode 100644
index 0000000..430971f
--- /dev/null
+++ b/ipc/ipc_space.h
@@ -0,0 +1,164 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_space.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for IPC spaces of capabilities.
+ */
+
+#ifndef _IPC_IPC_SPACE_H_
+#define _IPC_IPC_SPACE_H_
+
+#include <mach_ipc_compat.h>
+#include <norma_ipc.h>
+
+#include <mach/boolean.h>
+#include <mach/kern_return.h>
+#include <kern/macro_help.h>
+#include <kern/lock.h>
+#include <ipc/ipc_splay.h>
+
+/*
+ * Every task has a space of IPC capabilities.
+ * IPC operations like send and receive use this space.
+ * IPC kernel calls manipulate the space of the target task.
+ *
+ * Every space has a non-NULL is_table with is_table_size entries.
+ * A space may have a NULL is_tree. is_tree_small records the
+ * number of entries in the tree that, if the table were to grow
+ * to the next larger size, would move from the tree to the table.
+ *
+ * is_growing marks when the table is in the process of growing.
+ * When the table is growing, it can't be freed or grown by another
+ * thread, because of krealloc/kmem_realloc's requirements.
+ */
+
+typedef unsigned int ipc_space_refs_t;
+
+struct ipc_space {
+ decl_simple_lock_data(,is_ref_lock_data)
+ ipc_space_refs_t is_references;
+
+ decl_simple_lock_data(,is_lock_data)
+ boolean_t is_active; /* is the space alive? */
+ boolean_t is_growing; /* is the space growing? */
+ ipc_entry_t is_table; /* an array of entries */
+ ipc_entry_num_t is_table_size; /* current size of table */
+ struct ipc_table_size *is_table_next; /* info for larger table */
+ struct ipc_splay_tree is_tree; /* a splay tree of entries */
+ ipc_entry_num_t is_tree_total; /* number of entries in the tree */
+ ipc_entry_num_t is_tree_small; /* # of small entries in the tree */
+ ipc_entry_num_t is_tree_hash; /* # of hashed entries in the tree */
+
+#if MACH_IPC_COMPAT
+ struct ipc_port *is_notify; /* notification port */
+#endif MACH_IPC_COMPAT
+};
+
+#define IS_NULL ((ipc_space_t) 0)
+
+extern zone_t ipc_space_zone;
+
+#define is_alloc() ((ipc_space_t) zalloc(ipc_space_zone))
+#define is_free(is) zfree(ipc_space_zone, (vm_offset_t) (is))
+
+extern struct ipc_space *ipc_space_kernel;
+extern struct ipc_space *ipc_space_reply;
+#if NORMA_IPC
+extern struct ipc_space *ipc_space_remote;
+#endif NORMA_IPC
+
+#define is_ref_lock_init(is) simple_lock_init(&(is)->is_ref_lock_data)
+
+#define ipc_space_reference_macro(is) \
+MACRO_BEGIN \
+ simple_lock(&(is)->is_ref_lock_data); \
+ assert((is)->is_references > 0); \
+ (is)->is_references++; \
+ simple_unlock(&(is)->is_ref_lock_data); \
+MACRO_END
+
+#define ipc_space_release_macro(is) \
+MACRO_BEGIN \
+ ipc_space_refs_t _refs; \
+ \
+ simple_lock(&(is)->is_ref_lock_data); \
+ assert((is)->is_references > 0); \
+ _refs = --(is)->is_references; \
+ simple_unlock(&(is)->is_ref_lock_data); \
+ \
+ if (_refs == 0) \
+ is_free(is); \
+MACRO_END
+
+#define is_lock_init(is) simple_lock_init(&(is)->is_lock_data)
+
+#define is_read_lock(is) simple_lock(&(is)->is_lock_data)
+#define is_read_unlock(is) simple_unlock(&(is)->is_lock_data)
+
+#define is_write_lock(is) simple_lock(&(is)->is_lock_data)
+#define is_write_lock_try(is) simple_lock_try(&(is)->is_lock_data)
+#define is_write_unlock(is) simple_unlock(&(is)->is_lock_data)
+
+#define is_write_to_read_lock(is)
+
+extern void ipc_space_reference(struct ipc_space *space);
+extern void ipc_space_release(struct ipc_space *space);
+
+#define is_reference(is) ipc_space_reference(is)
+#define is_release(is) ipc_space_release(is)
+
+kern_return_t ipc_space_create(/* ipc_table_size_t, ipc_space_t * */);
+kern_return_t ipc_space_create_special(struct ipc_space **);
+void ipc_space_destroy(struct ipc_space *);
+
+#if MACH_IPC_COMPAT
+
+/*
+ * Routine: ipc_space_make_notify
+ * Purpose:
+ * Given a space, return a send right for a notification.
+ * May return IP_NULL/IP_DEAD.
+ * Conditions:
+ * The space is locked (read or write) and active.
+ *
+ * ipc_port_t
+ * ipc_space_make_notify(space)
+ * ipc_space_t space;
+ */
+
+#define ipc_space_make_notify(space) \
+ ipc_port_copy_send(space->is_notify)
+
+#endif MACH_IPC_COMPAT
+#endif _IPC_IPC_SPACE_H_
diff --git a/ipc/ipc_splay.c b/ipc/ipc_splay.c
new file mode 100644
index 0000000..6fb5bcb
--- /dev/null
+++ b/ipc/ipc_splay.c
@@ -0,0 +1,920 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_splay.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Primitive splay tree operations.
+ */
+
+#include <mach/port.h>
+#include <kern/assert.h>
+#include <kern/macro_help.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_splay.h>
+
+/*
+ * Splay trees are self-adjusting binary search trees.
+ * They have the following attractive properties:
+ * 1) Space efficient; only two pointers per entry.
+ * 2) Robust performance; amortized O(log n) per operation.
+ * 3) Recursion not needed.
+ * This makes them a good fall-back data structure for those
+ * entries that don't fit into the lookup table.
+ *
+ * The paper by Sleator and Tarjan, JACM v. 32, no. 3, pp. 652-686,
+ * describes the splaying operation. ipc_splay_prim_lookup
+ * and ipc_splay_prim_assemble implement the top-down splay
+ * described on p. 669.
+ *
+ * The tree is stored in an unassembled form. If ist_root is null,
+ * then the tree has no entries. Otherwise, ist_name records
+ * the value used for the last lookup. ist_root points to the
+ * middle tree obtained from the top-down splay. ist_ltree and
+ * ist_rtree point to left and right subtrees, whose entries
+ * are all smaller (larger) than those in the middle tree.
+ * ist_ltreep and ist_rtreep are pointers to fields in the
+ * left and right subtrees. ist_ltreep points to the rchild field
+ * of the largest entry in ltree, and ist_rtreep points to the
+ * lchild field of the smallest entry in rtree. The pointed-to
+ * fields aren't initialized. If the left (right) subtree is null,
+ * then ist_ltreep (ist_rtreep) points to the ist_ltree (ist_rtree)
+ * field in the splay structure itself.
+ *
+ * The primary advantage of the unassembled form is that repeated
+ * unsuccessful lookups are efficient. In particular, an unsuccessful
+ * lookup followed by an insert only requires one splaying operation.
+ *
+ * The traversal algorithm works via pointer inversion.
+ * When descending down the tree, child pointers are reversed
+ * to point back to the parent entry. When ascending,
+ * the pointers are restored to their original value.
+ *
+ * The biggest potential problem with the splay tree implementation
+ * is that the operations, even lookup, require an exclusive lock.
+ * If IPC spaces are protected with exclusive locks, then
+ * the splay tree doesn't require its own lock, and ist_lock/ist_unlock
+ * needn't do anything. If IPC spaces are protected with read/write
+ * locks then ist_lock/ist_unlock should provide exclusive access.
+ *
+ * If it becomes important to let lookups run in parallel,
+ * or if the restructuring makes lookups too expensive, then
+ * there is hope. Use a read/write lock on the splay tree.
+ * Keep track of the number of entries in the tree. When doing
+ * a lookup, first try a non-restructuring lookup with a read lock held,
+ * with a bound (based on log of size of the tree) on the number of
+ * entries to traverse. If the lookup runs up against the bound,
+ * then take a write lock and do a reorganizing lookup.
+ * This way, if lookups only access roughly balanced parts
+ * of the tree, then lookups run in parallel and do no restructuring.
+ *
+ * The traversal algorithm currently requires an exclusive lock.
+ * If that is a problem, the tree could be changed from an lchild/rchild
+ * representation to a leftmost child/right sibling representation.
+ * In conjunction with non-restructing lookups, this would let
+ * lookups and traversals all run in parallel. But this representation
+ * is more complicated and would slow down the operations.
+ */
+
+/*
+ * Boundary values to hand to ipc_splay_prim_lookup:
+ */
+
+#define MACH_PORT_SMALLEST ((mach_port_t) 0)
+#define MACH_PORT_LARGEST ((mach_port_t) ~0)
+
+/*
+ * Routine: ipc_splay_prim_lookup
+ * Purpose:
+ * Searches for the node labeled name in the splay tree.
+ * Returns three nodes (treep, ltreep, rtreep) and
+ * two pointers to nodes (ltreepp, rtreepp).
+ *
+ * ipc_splay_prim_lookup splits the supplied tree into
+ * three subtrees, left, middle, and right, returned
+ * in ltreep, treep, and rtreep.
+ *
+ * If name is present in the tree, then it is at
+ * the root of the middle tree. Otherwise, the root
+ * of the middle tree is the last node traversed.
+ *
+ * ipc_splay_prim_lookup returns a pointer into
+ * the left subtree, to the rchild field of its
+ * largest node, in ltreepp. It returns a pointer
+ * into the right subtree, to the lchild field of its
+ * smallest node, in rtreepp.
+ */
+
+static void
+ipc_splay_prim_lookup(
+ mach_port_t name,
+ ipc_tree_entry_t tree,
+ ipc_tree_entry_t *treep,
+ ipc_tree_entry_t *ltreep,
+ ipc_tree_entry_t **ltreepp,
+ ipc_tree_entry_t *rtreep,
+ ipc_tree_entry_t **rtreepp)
+{
+ mach_port_t tname; /* temp name */
+ ipc_tree_entry_t lchild, rchild; /* temp child pointers */
+
+ assert(tree != ITE_NULL);
+
+#define link_left \
+MACRO_BEGIN \
+ *ltreep = tree; \
+ ltreep = &tree->ite_rchild; \
+ tree = *ltreep; \
+MACRO_END
+
+#define link_right \
+MACRO_BEGIN \
+ *rtreep = tree; \
+ rtreep = &tree->ite_lchild; \
+ tree = *rtreep; \
+MACRO_END
+
+#define rotate_left \
+MACRO_BEGIN \
+ ipc_tree_entry_t temp = tree; \
+ \
+ tree = temp->ite_rchild; \
+ temp->ite_rchild = tree->ite_lchild; \
+ tree->ite_lchild = temp; \
+MACRO_END
+
+#define rotate_right \
+MACRO_BEGIN \
+ ipc_tree_entry_t temp = tree; \
+ \
+ tree = temp->ite_lchild; \
+ temp->ite_lchild = tree->ite_rchild; \
+ tree->ite_rchild = temp; \
+MACRO_END
+
+ while (name != (tname = tree->ite_name)) {
+ if (name < tname) {
+ /* descend to left */
+
+ lchild = tree->ite_lchild;
+ if (lchild == ITE_NULL)
+ break;
+ tname = lchild->ite_name;
+
+ if ((name < tname) &&
+ (lchild->ite_lchild != ITE_NULL))
+ rotate_right;
+ link_right;
+ if ((name > tname) &&
+ (lchild->ite_rchild != ITE_NULL))
+ link_left;
+ } else {
+ /* descend to right */
+
+ rchild = tree->ite_rchild;
+ if (rchild == ITE_NULL)
+ break;
+ tname = rchild->ite_name;
+
+ if ((name > tname) &&
+ (rchild->ite_rchild != ITE_NULL))
+ rotate_left;
+ link_left;
+ if ((name < tname) &&
+ (rchild->ite_lchild != ITE_NULL))
+ link_right;
+ }
+
+ assert(tree != ITE_NULL);
+ }
+
+ *treep = tree;
+ *ltreepp = ltreep;
+ *rtreepp = rtreep;
+
+#undef link_left
+#undef link_right
+#undef rotate_left
+#undef rotate_right
+}
+
+/*
+ * Routine: ipc_splay_prim_assemble
+ * Purpose:
+ * Assembles the results of ipc_splay_prim_lookup
+ * into a splay tree with the found node at the root.
+ *
+ * ltree and rtree are by-reference so storing
+ * through ltreep and rtreep can change them.
+ */
+
+static void
+ipc_splay_prim_assemble(
+ ipc_tree_entry_t tree,
+ ipc_tree_entry_t *ltree,
+ ipc_tree_entry_t *ltreep,
+ ipc_tree_entry_t *rtree,
+ ipc_tree_entry_t *rtreep)
+{
+ assert(tree != ITE_NULL);
+
+ *ltreep = tree->ite_lchild;
+ *rtreep = tree->ite_rchild;
+
+ tree->ite_lchild = *ltree;
+ tree->ite_rchild = *rtree;
+}
+
+/*
+ * Routine: ipc_splay_tree_init
+ * Purpose:
+ * Initialize a raw splay tree for use.
+ */
+
+void
+ipc_splay_tree_init(
+ ipc_splay_tree_t splay)
+{
+ splay->ist_root = ITE_NULL;
+}
+
+/*
+ * Routine: ipc_splay_tree_pick
+ * Purpose:
+ * Picks and returns a random entry in a splay tree.
+ * Returns FALSE if the splay tree is empty.
+ */
+
+boolean_t
+ipc_splay_tree_pick(
+ ipc_splay_tree_t splay,
+ mach_port_t *namep,
+ ipc_tree_entry_t *entryp)
+{
+ ipc_tree_entry_t root;
+
+ ist_lock(splay);
+
+ root = splay->ist_root;
+ if (root != ITE_NULL) {
+ *namep = root->ite_name;
+ *entryp = root;
+ }
+
+ ist_unlock(splay);
+
+ return root != ITE_NULL;
+}
+
+/*
+ * Routine: ipc_splay_tree_lookup
+ * Purpose:
+ * Finds an entry in a splay tree.
+ * Returns ITE_NULL if not found.
+ */
+
+ipc_tree_entry_t
+ipc_splay_tree_lookup(
+ ipc_splay_tree_t splay,
+ mach_port_t name)
+{
+ ipc_tree_entry_t root;
+
+ ist_lock(splay);
+
+ root = splay->ist_root;
+ if (root != ITE_NULL) {
+ if (splay->ist_name != name) {
+ ipc_splay_prim_assemble(root,
+ &splay->ist_ltree, splay->ist_ltreep,
+ &splay->ist_rtree, splay->ist_rtreep);
+ ipc_splay_prim_lookup(name, root, &root,
+ &splay->ist_ltree, &splay->ist_ltreep,
+ &splay->ist_rtree, &splay->ist_rtreep);
+ splay->ist_name = name;
+ splay->ist_root = root;
+ }
+
+ if (name != root->ite_name)
+ root = ITE_NULL;
+ }
+
+ ist_unlock(splay);
+
+ return root;
+}
+
+/*
+ * Routine: ipc_splay_tree_insert
+ * Purpose:
+ * Inserts a new entry into a splay tree.
+ * The caller supplies a new entry.
+ * The name can't already be present in the tree.
+ */
+
+void
+ipc_splay_tree_insert(
+ ipc_splay_tree_t splay,
+ mach_port_t name,
+ ipc_tree_entry_t entry)
+{
+ ipc_tree_entry_t root;
+
+ assert(entry != ITE_NULL);
+
+ ist_lock(splay);
+
+ root = splay->ist_root;
+ if (root == ITE_NULL) {
+ entry->ite_lchild = ITE_NULL;
+ entry->ite_rchild = ITE_NULL;
+ } else {
+ if (splay->ist_name != name) {
+ ipc_splay_prim_assemble(root,
+ &splay->ist_ltree, splay->ist_ltreep,
+ &splay->ist_rtree, splay->ist_rtreep);
+ ipc_splay_prim_lookup(name, root, &root,
+ &splay->ist_ltree, &splay->ist_ltreep,
+ &splay->ist_rtree, &splay->ist_rtreep);
+ }
+
+ assert(root->ite_name != name);
+
+ if (name < root->ite_name) {
+ assert(root->ite_lchild == ITE_NULL);
+
+ *splay->ist_ltreep = ITE_NULL;
+ *splay->ist_rtreep = root;
+ } else {
+ assert(root->ite_rchild == ITE_NULL);
+
+ *splay->ist_ltreep = root;
+ *splay->ist_rtreep = ITE_NULL;
+ }
+
+ entry->ite_lchild = splay->ist_ltree;
+ entry->ite_rchild = splay->ist_rtree;
+ }
+
+ entry->ite_name = name;
+ splay->ist_root = entry;
+ splay->ist_name = name;
+ splay->ist_ltreep = &splay->ist_ltree;
+ splay->ist_rtreep = &splay->ist_rtree;
+
+ ist_unlock(splay);
+}
+
+/*
+ * Routine: ipc_splay_tree_delete
+ * Purpose:
+ * Deletes an entry from a splay tree.
+ * The name must be present in the tree.
+ * Frees the entry.
+ *
+ * The "entry" argument isn't currently used.
+ * Other implementations might want it, though.
+ */
+
+void
+ipc_splay_tree_delete(
+ ipc_splay_tree_t splay,
+ mach_port_t name,
+ ipc_tree_entry_t entry)
+{
+ ipc_tree_entry_t root, saved;
+
+ ist_lock(splay);
+
+ root = splay->ist_root;
+ assert(root != ITE_NULL);
+
+ if (splay->ist_name != name) {
+ ipc_splay_prim_assemble(root,
+ &splay->ist_ltree, splay->ist_ltreep,
+ &splay->ist_rtree, splay->ist_rtreep);
+ ipc_splay_prim_lookup(name, root, &root,
+ &splay->ist_ltree, &splay->ist_ltreep,
+ &splay->ist_rtree, &splay->ist_rtreep);
+ }
+
+ assert(root->ite_name == name);
+ assert(root == entry);
+
+ *splay->ist_ltreep = root->ite_lchild;
+ *splay->ist_rtreep = root->ite_rchild;
+ ite_free(root);
+
+ root = splay->ist_ltree;
+ saved = splay->ist_rtree;
+
+ if (root == ITE_NULL)
+ root = saved;
+ else if (saved != ITE_NULL) {
+ /*
+ * Find the largest node in the left subtree, and splay it
+ * to the root. Then add the saved right subtree.
+ */
+
+ ipc_splay_prim_lookup(MACH_PORT_LARGEST, root, &root,
+ &splay->ist_ltree, &splay->ist_ltreep,
+ &splay->ist_rtree, &splay->ist_rtreep);
+ ipc_splay_prim_assemble(root,
+ &splay->ist_ltree, splay->ist_ltreep,
+ &splay->ist_rtree, splay->ist_rtreep);
+
+ assert(root->ite_rchild == ITE_NULL);
+ root->ite_rchild = saved;
+ }
+
+ splay->ist_root = root;
+ if (root != ITE_NULL) {
+ splay->ist_name = root->ite_name;
+ splay->ist_ltreep = &splay->ist_ltree;
+ splay->ist_rtreep = &splay->ist_rtree;
+ }
+
+ ist_unlock(splay);
+}
+
+/*
+ * Routine: ipc_splay_tree_split
+ * Purpose:
+ * Split a splay tree. Puts all entries smaller than "name"
+ * into a new tree, "small".
+ *
+ * Doesn't do locking on "small", because nobody else
+ * should be fiddling with the uninitialized tree.
+ */
+
+void
+ipc_splay_tree_split(
+ ipc_splay_tree_t splay,
+ mach_port_t name,
+ ipc_splay_tree_t small)
+{
+ ipc_tree_entry_t root;
+
+ ipc_splay_tree_init(small);
+
+ ist_lock(splay);
+
+ root = splay->ist_root;
+ if (root != ITE_NULL) {
+ /* lookup name, to get it (or last traversed) to the top */
+
+ if (splay->ist_name != name) {
+ ipc_splay_prim_assemble(root,
+ &splay->ist_ltree, splay->ist_ltreep,
+ &splay->ist_rtree, splay->ist_rtreep);
+ ipc_splay_prim_lookup(name, root, &root,
+ &splay->ist_ltree, &splay->ist_ltreep,
+ &splay->ist_rtree, &splay->ist_rtreep);
+ }
+
+ if (root->ite_name < name) {
+ /* root goes into small */
+
+ *splay->ist_ltreep = root->ite_lchild;
+ *splay->ist_rtreep = ITE_NULL;
+ root->ite_lchild = splay->ist_ltree;
+ assert(root->ite_rchild == ITE_NULL);
+
+ small->ist_root = root;
+ small->ist_name = root->ite_name;
+ small->ist_ltreep = &small->ist_ltree;
+ small->ist_rtreep = &small->ist_rtree;
+
+ /* rtree goes into splay */
+
+ root = splay->ist_rtree;
+ splay->ist_root = root;
+ if (root != ITE_NULL) {
+ splay->ist_name = root->ite_name;
+ splay->ist_ltreep = &splay->ist_ltree;
+ splay->ist_rtreep = &splay->ist_rtree;
+ }
+ } else {
+ /* root stays in splay */
+
+ *splay->ist_ltreep = root->ite_lchild;
+ root->ite_lchild = ITE_NULL;
+
+ splay->ist_root = root;
+ splay->ist_name = name;
+ splay->ist_ltreep = &splay->ist_ltree;
+
+ /* ltree goes into small */
+
+ root = splay->ist_ltree;
+ small->ist_root = root;
+ if (root != ITE_NULL) {
+ small->ist_name = root->ite_name;
+ small->ist_ltreep = &small->ist_ltree;
+ small->ist_rtreep = &small->ist_rtree;
+ }
+ }
+ }
+
+ ist_unlock(splay);
+}
+
+/*
+ * Routine: ipc_splay_tree_join
+ * Purpose:
+ * Joins two splay trees. Merges the entries in "small",
+ * which must all be smaller than the entries in "splay",
+ * into "splay".
+ */
+
+void
+ipc_splay_tree_join(
+ ipc_splay_tree_t splay,
+ ipc_splay_tree_t small)
+{
+ ipc_tree_entry_t sroot;
+
+ /* pull entries out of small */
+
+ ist_lock(small);
+
+ sroot = small->ist_root;
+ if (sroot != ITE_NULL) {
+ ipc_splay_prim_assemble(sroot,
+ &small->ist_ltree, small->ist_ltreep,
+ &small->ist_rtree, small->ist_rtreep);
+ small->ist_root = ITE_NULL;
+ }
+
+ ist_unlock(small);
+
+ /* put entries, if any, into splay */
+
+ if (sroot != ITE_NULL) {
+ ipc_tree_entry_t root;
+
+ ist_lock(splay);
+
+ root = splay->ist_root;
+ if (root == ITE_NULL) {
+ root = sroot;
+ } else {
+ /* get smallest entry in splay tree to top */
+
+ if (splay->ist_name != MACH_PORT_SMALLEST) {
+ ipc_splay_prim_assemble(root,
+ &splay->ist_ltree, splay->ist_ltreep,
+ &splay->ist_rtree, splay->ist_rtreep);
+ ipc_splay_prim_lookup(MACH_PORT_SMALLEST,
+ root, &root,
+ &splay->ist_ltree, &splay->ist_ltreep,
+ &splay->ist_rtree, &splay->ist_rtreep);
+ }
+
+ ipc_splay_prim_assemble(root,
+ &splay->ist_ltree, splay->ist_ltreep,
+ &splay->ist_rtree, splay->ist_rtreep);
+
+ assert(root->ite_lchild == ITE_NULL);
+ assert(sroot->ite_name < root->ite_name);
+ root->ite_lchild = sroot;
+ }
+
+ splay->ist_root = root;
+ splay->ist_name = root->ite_name;
+ splay->ist_ltreep = &splay->ist_ltree;
+ splay->ist_rtreep = &splay->ist_rtree;
+
+ ist_unlock(splay);
+ }
+}
+
+/*
+ * Routine: ipc_splay_tree_bounds
+ * Purpose:
+ * Given a name, returns the largest value present
+ * in the tree that is smaller than or equal to the name,
+ * or ~0 if no such value exists. Similarly, returns
+ * the smallest value present that is greater than or
+ * equal to the name, or 0 if no such value exists.
+ *
+ * Hence, if
+ * lower = upper, then lower = name = upper
+ * and name is present in the tree
+ * lower = ~0 and upper = 0,
+ * then the tree is empty
+ * lower = ~0 and upper > 0, then name < upper
+ * and upper is smallest value in tree
+ * lower < ~0 and upper = 0, then lower < name
+ * and lower is largest value in tree
+ * lower < ~0 and upper > 0, then lower < name < upper
+ * and they are tight bounds on name
+ *
+ * (Note MACH_PORT_SMALLEST = 0 and MACH_PORT_LARGEST = ~0.)
+ */
+
+void
+ipc_splay_tree_bounds(
+ ipc_splay_tree_t splay,
+ mach_port_t name,
+ mach_port_t *lowerp,
+ mach_port_t *upperp)
+{
+ ipc_tree_entry_t root;
+
+ ist_lock(splay);
+
+ root = splay->ist_root;
+ if (root == ITE_NULL) {
+ *lowerp = MACH_PORT_LARGEST;
+ *upperp = MACH_PORT_SMALLEST;
+ } else {
+ mach_port_t rname;
+
+ if (splay->ist_name != name) {
+ ipc_splay_prim_assemble(root,
+ &splay->ist_ltree, splay->ist_ltreep,
+ &splay->ist_rtree, splay->ist_rtreep);
+ ipc_splay_prim_lookup(name, root, &root,
+ &splay->ist_ltree, &splay->ist_ltreep,
+ &splay->ist_rtree, &splay->ist_rtreep);
+ splay->ist_name = name;
+ splay->ist_root = root;
+ }
+
+ rname = root->ite_name;
+
+ /*
+ * OK, it's a hack. We convert the ltreep and rtreep
+ * pointers back into real entry pointers,
+ * so we can pick the names out of the entries.
+ */
+
+ if (rname <= name)
+ *lowerp = rname;
+ else if (splay->ist_ltreep == &splay->ist_ltree)
+ *lowerp = MACH_PORT_LARGEST;
+ else {
+ ipc_tree_entry_t entry;
+
+ entry = (ipc_tree_entry_t)
+ ((char *)splay->ist_ltreep -
+ ((char *)&root->ite_rchild -
+ (char *)root));
+ *lowerp = entry->ite_name;
+ }
+
+ if (rname >= name)
+ *upperp = rname;
+ else if (splay->ist_rtreep == &splay->ist_rtree)
+ *upperp = MACH_PORT_SMALLEST;
+ else {
+ ipc_tree_entry_t entry;
+
+ entry = (ipc_tree_entry_t)
+ ((char *)splay->ist_rtreep -
+ ((char *)&root->ite_lchild -
+ (char *)root));
+ *upperp = entry->ite_name;
+ }
+ }
+
+ ist_unlock(splay);
+}
+
+/*
+ * Routine: ipc_splay_traverse_start
+ * Routine: ipc_splay_traverse_next
+ * Routine: ipc_splay_traverse_finish
+ * Purpose:
+ * Perform a symmetric order traversal of a splay tree.
+ * Usage:
+ * for (entry = ipc_splay_traverse_start(splay);
+ * entry != ITE_NULL;
+ * entry = ipc_splay_traverse_next(splay, delete)) {
+ * do something with entry
+ * }
+ * ipc_splay_traverse_finish(splay);
+ *
+ * If "delete" is TRUE, then the current entry
+ * is removed from the tree and deallocated.
+ *
+ * During the traversal, the splay tree is locked.
+ */
+
+ipc_tree_entry_t
+ipc_splay_traverse_start(
+ ipc_splay_tree_t splay)
+{
+ ipc_tree_entry_t current, parent;
+
+ ist_lock(splay);
+
+ current = splay->ist_root;
+ if (current != ITE_NULL) {
+ ipc_splay_prim_assemble(current,
+ &splay->ist_ltree, splay->ist_ltreep,
+ &splay->ist_rtree, splay->ist_rtreep);
+
+ parent = ITE_NULL;
+
+ while (current->ite_lchild != ITE_NULL) {
+ ipc_tree_entry_t next;
+
+ next = current->ite_lchild;
+ current->ite_lchild = parent;
+ parent = current;
+ current = next;
+ }
+
+ splay->ist_ltree = current;
+ splay->ist_rtree = parent;
+ }
+
+ return current;
+}
+
+ipc_tree_entry_t
+ipc_splay_traverse_next(
+ ipc_splay_tree_t splay,
+ boolean_t delete)
+{
+ ipc_tree_entry_t current, parent;
+
+ /* pick up where traverse_entry left off */
+
+ current = splay->ist_ltree;
+ parent = splay->ist_rtree;
+ assert(current != ITE_NULL);
+
+ if (!delete)
+ goto traverse_right;
+
+ /* we must delete current and patch the tree */
+
+ if (current->ite_lchild == ITE_NULL) {
+ if (current->ite_rchild == ITE_NULL) {
+ /* like traverse_back, but with deletion */
+
+ if (parent == ITE_NULL) {
+ ite_free(current);
+
+ splay->ist_root = ITE_NULL;
+ return ITE_NULL;
+ }
+
+ if (current->ite_name < parent->ite_name) {
+ ite_free(current);
+
+ current = parent;
+ parent = current->ite_lchild;
+ current->ite_lchild = ITE_NULL;
+ goto traverse_entry;
+ } else {
+ ite_free(current);
+
+ current = parent;
+ parent = current->ite_rchild;
+ current->ite_rchild = ITE_NULL;
+ goto traverse_back;
+ }
+ } else {
+ ipc_tree_entry_t prev;
+
+ prev = current;
+ current = current->ite_rchild;
+ ite_free(prev);
+ goto traverse_left;
+ }
+ } else {
+ if (current->ite_rchild == ITE_NULL) {
+ ipc_tree_entry_t prev;
+
+ prev = current;
+ current = current->ite_lchild;
+ ite_free(prev);
+ goto traverse_back;
+ } else {
+ ipc_tree_entry_t prev;
+ ipc_tree_entry_t ltree, rtree;
+ ipc_tree_entry_t *ltreep, *rtreep;
+
+ /* replace current with largest of left children */
+
+ prev = current;
+ ipc_splay_prim_lookup(MACH_PORT_LARGEST,
+ current->ite_lchild, &current,
+ &ltree, &ltreep, &rtree, &rtreep);
+ ipc_splay_prim_assemble(current,
+ &ltree, ltreep, &rtree, rtreep);
+
+ assert(current->ite_rchild == ITE_NULL);
+ current->ite_rchild = prev->ite_rchild;
+ ite_free(prev);
+ goto traverse_right;
+ }
+ }
+ /*NOTREACHED*/
+
+ /*
+ * A state machine: for each entry, we
+ * 1) traverse left subtree
+ * 2) traverse the entry
+ * 3) traverse right subtree
+ * 4) traverse back to parent
+ */
+
+ traverse_left:
+ if (current->ite_lchild != ITE_NULL) {
+ ipc_tree_entry_t next;
+
+ next = current->ite_lchild;
+ current->ite_lchild = parent;
+ parent = current;
+ current = next;
+ goto traverse_left;
+ }
+
+ traverse_entry:
+ splay->ist_ltree = current;
+ splay->ist_rtree = parent;
+ return current;
+
+ traverse_right:
+ if (current->ite_rchild != ITE_NULL) {
+ ipc_tree_entry_t next;
+
+ next = current->ite_rchild;
+ current->ite_rchild = parent;
+ parent = current;
+ current = next;
+ goto traverse_left;
+ }
+
+ traverse_back:
+ if (parent == ITE_NULL) {
+ splay->ist_root = current;
+ return ITE_NULL;
+ }
+
+ if (current->ite_name < parent->ite_name) {
+ ipc_tree_entry_t prev;
+
+ prev = current;
+ current = parent;
+ parent = current->ite_lchild;
+ current->ite_lchild = prev;
+ goto traverse_entry;
+ } else {
+ ipc_tree_entry_t prev;
+
+ prev = current;
+ current = parent;
+ parent = current->ite_rchild;
+ current->ite_rchild = prev;
+ goto traverse_back;
+ }
+}
+
+void
+ipc_splay_traverse_finish(
+ ipc_splay_tree_t splay)
+{
+ ipc_tree_entry_t root;
+
+ root = splay->ist_root;
+ if (root != ITE_NULL) {
+ splay->ist_name = root->ite_name;
+ splay->ist_ltreep = &splay->ist_ltree;
+ splay->ist_rtreep = &splay->ist_rtree;
+ }
+
+ ist_unlock(splay);
+}
+
diff --git a/ipc/ipc_splay.h b/ipc/ipc_splay.h
new file mode 100644
index 0000000..d3316ef
--- /dev/null
+++ b/ipc/ipc_splay.h
@@ -0,0 +1,114 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_splay.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Declarations of primitive splay tree operations.
+ */
+
+#ifndef _IPC_IPC_SPLAY_H_
+#define _IPC_IPC_SPLAY_H_
+
+#include <mach/port.h>
+#include <kern/assert.h>
+#include <kern/macro_help.h>
+#include <ipc/ipc_entry.h>
+
+typedef struct ipc_splay_tree {
+ mach_port_t ist_name; /* name used in last lookup */
+ ipc_tree_entry_t ist_root; /* root of middle tree */
+ ipc_tree_entry_t ist_ltree; /* root of left tree */
+ ipc_tree_entry_t *ist_ltreep; /* pointer into left tree */
+ ipc_tree_entry_t ist_rtree; /* root of right tree */
+ ipc_tree_entry_t *ist_rtreep; /* pointer into right tree */
+} *ipc_splay_tree_t;
+
+#define ist_lock(splay) /* no locking */
+#define ist_unlock(splay) /* no locking */
+
+/* Initialize a raw splay tree */
+extern void ipc_splay_tree_init(
+ ipc_splay_tree_t splay);
+
+/* Pick a random entry in a splay tree */
+extern boolean_t ipc_splay_tree_pick(
+ ipc_splay_tree_t splay,
+ mach_port_t *namep,
+ ipc_tree_entry_t *entryp);
+
+/* Find an entry in a splay tree */
+extern ipc_tree_entry_t ipc_splay_tree_lookup(
+ ipc_splay_tree_t splay,
+ mach_port_t name);
+
+/* Insert a new entry into a splay tree */
+extern void ipc_splay_tree_insert(
+ ipc_splay_tree_t splay,
+ mach_port_t name,
+ ipc_tree_entry_t entry);
+
+/* Delete an entry from a splay tree */
+extern void ipc_splay_tree_delete(
+ ipc_splay_tree_t splay,
+ mach_port_t name,
+ ipc_tree_entry_t entry);
+
+/* Split a splay tree */
+extern void ipc_splay_tree_split(
+ ipc_splay_tree_t splay,
+ mach_port_t name,
+ ipc_splay_tree_t entry);
+
+/* Join two splay trees */
+extern void ipc_splay_tree_join(
+ ipc_splay_tree_t splay,
+ ipc_splay_tree_t small);
+
+/* Do a bounded splay tree lookup */
+extern void ipc_splay_tree_bounds(
+ ipc_splay_tree_t splay,
+ mach_port_t name,
+ mach_port_t *lowerp,
+ mach_port_t *upperp);
+
+/* Initialize a symmetric order traversal of a splay tree */
+extern ipc_tree_entry_t ipc_splay_traverse_start(
+ ipc_splay_tree_t splay);
+
+/* Return the next entry in a symmetric order traversal of a splay tree */
+extern ipc_tree_entry_t ipc_splay_traverse_next(
+ ipc_splay_tree_t splay,
+ boolean_t delete);
+
+/* Terminate a symmetric order traversal of a splay tree */
+extern void ipc_splay_traverse_finish(
+ ipc_splay_tree_t splay);
+
+#endif /* _IPC_IPC_SPLAY_H_ */
diff --git a/ipc/ipc_table.c b/ipc/ipc_table.c
new file mode 100644
index 0000000..e572358
--- /dev/null
+++ b/ipc/ipc_table.c
@@ -0,0 +1,205 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_table.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Functions to manipulate tables of IPC capabilities.
+ */
+
+#include <mach/kern_return.h>
+#include <mach/vm_param.h>
+#include <ipc/ipc_table.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_entry.h>
+#include <kern/kalloc.h>
+#include <vm/vm_kern.h>
+
+/*
+ * Forward declarations
+ */
+void ipc_table_fill(
+ ipc_table_size_t its,
+ unsigned int num,
+ unsigned int min,
+ vm_size_t elemsize);
+
+/*
+ * We borrow the kalloc map, rather than creating
+ * yet another submap of the kernel map.
+ */
+
+extern vm_map_t kalloc_map;
+
+ipc_table_size_t ipc_table_entries;
+unsigned int ipc_table_entries_size = 512;
+
+ipc_table_size_t ipc_table_dnrequests;
+unsigned int ipc_table_dnrequests_size = 64;
+
+void
+ipc_table_fill(
+ ipc_table_size_t its, /* array to fill */
+ unsigned int num, /* size of array */
+ unsigned int min, /* at least this many elements */
+ vm_size_t elemsize) /* size of elements */
+{
+ unsigned int index;
+ vm_size_t minsize = min * elemsize;
+ vm_size_t size;
+ vm_size_t incrsize;
+
+ /* first use powers of two, up to the page size */
+
+ for (index = 0, size = 1;
+ (index < num) && (size < PAGE_SIZE);
+ size <<= 1) {
+ if (size >= minsize) {
+ its[index].its_size = size / elemsize;
+ index++;
+ }
+ }
+
+ /* then increments of a page, then two pages, etc. */
+
+ for (incrsize = PAGE_SIZE; index < num;) {
+ unsigned int period;
+
+ for (period = 0;
+ (period < 15) && (index < num);
+ period++, size += incrsize) {
+ if (size >= minsize) {
+ its[index].its_size = size / elemsize;
+ index++;
+ }
+ }
+ if (incrsize < (PAGE_SIZE << 3))
+ incrsize <<= 1;
+ }
+}
+
+void
+ipc_table_init(void)
+{
+ ipc_table_entries = (ipc_table_size_t)
+ kalloc(sizeof(struct ipc_table_size) *
+ ipc_table_entries_size);
+ assert(ipc_table_entries != ITS_NULL);
+
+ ipc_table_fill(ipc_table_entries, ipc_table_entries_size - 1,
+ 4, sizeof(struct ipc_entry));
+
+ /* the last two elements should have the same size */
+
+ ipc_table_entries[ipc_table_entries_size - 1].its_size =
+ ipc_table_entries[ipc_table_entries_size - 2].its_size;
+
+
+ ipc_table_dnrequests = (ipc_table_size_t)
+ kalloc(sizeof(struct ipc_table_size) *
+ ipc_table_dnrequests_size);
+ assert(ipc_table_dnrequests != ITS_NULL);
+
+ ipc_table_fill(ipc_table_dnrequests, ipc_table_dnrequests_size - 1,
+ 2, sizeof(struct ipc_port_request));
+
+ /* the last element should have zero size */
+
+ ipc_table_dnrequests[ipc_table_dnrequests_size - 1].its_size = 0;
+}
+
+/*
+ * Routine: ipc_table_alloc
+ * Purpose:
+ * Allocate a table.
+ * Conditions:
+ * May block.
+ */
+
+vm_offset_t
+ipc_table_alloc(
+ vm_size_t size)
+{
+ vm_offset_t table;
+
+ if (size < PAGE_SIZE)
+ table = kalloc(size);
+ else
+ if (kmem_alloc(kalloc_map, &table, size) != KERN_SUCCESS)
+ table = 0;
+
+ return table;
+}
+
+/*
+ * Routine: ipc_table_realloc
+ * Purpose:
+ * Reallocate a big table.
+ *
+ * The new table remaps the old table,
+ * so copying is not necessary.
+ * Conditions:
+ * Only works for page-size or bigger tables.
+ * May block.
+ */
+
+vm_offset_t
+ipc_table_realloc(
+ vm_size_t old_size,
+ vm_offset_t old_table,
+ vm_size_t new_size)
+{
+ vm_offset_t new_table;
+
+ if (kmem_realloc(kalloc_map, old_table, old_size,
+ &new_table, new_size) != KERN_SUCCESS)
+ new_table = 0;
+
+ return new_table;
+}
+
+/*
+ * Routine: ipc_table_free
+ * Purpose:
+ * Free a table allocated with ipc_table_alloc or
+ * ipc_table_realloc.
+ * Conditions:
+ * May block.
+ */
+
+void
+ipc_table_free(
+ vm_size_t size,
+ vm_offset_t table)
+{
+ if (size < PAGE_SIZE)
+ kfree(table, size);
+ else
+ kmem_free(kalloc_map, table, size);
+}
diff --git a/ipc/ipc_table.h b/ipc/ipc_table.h
new file mode 100644
index 0000000..3bfcc46
--- /dev/null
+++ b/ipc/ipc_table.h
@@ -0,0 +1,138 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_table.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for tables, used for IPC capabilities (ipc_entry_t)
+ * and dead-name requests (ipc_port_request_t).
+ */
+
+#ifndef _IPC_IPC_TABLE_H_
+#define _IPC_IPC_TABLE_H_
+
+#include <mach/boolean.h>
+#include <mach/vm_param.h>
+
+/*
+ * The is_table_next field of an ipc_space_t points to
+ * an ipc_table_size structure. These structures must
+ * be elements of an array, ipc_table_entries.
+ *
+ * The array must end with two elements with the same its_size value.
+ * Except for the terminating element, the its_size values must
+ * be strictly increasing. The largest (last) its_size value
+ * must be less than or equal to MACH_PORT_INDEX(MACH_PORT_DEAD).
+ * This ensures that
+ * 1) MACH_PORT_INDEX(MACH_PORT_DEAD) isn't a valid index
+ * in the table, so ipc_entry_get won't allocate it.
+ * 2) MACH_PORT_MAKE(index+1, 0) and MAKE_PORT_MAKE(size, 0)
+ * won't ever overflow.
+ *
+ *
+ * The ipr_size field of the first element in a table of
+ * dead-name requests (ipc_port_request_t) points to the
+ * ipc_table_size structure. The structures must be elements
+ * of ipc_table_dnrequests. ipc_table_dnrequests must end
+ * with an element with zero its_size, and except for this last
+ * element, the its_size values must be strictly increasing.
+ *
+ * The is_table_next field points to the ipc_table_size structure
+ * for the next larger size of table, not the one currently in use.
+ * The ipr_size field points to the currently used ipc_table_size.
+ */
+
+typedef unsigned int ipc_table_index_t; /* index into tables */
+typedef unsigned int ipc_table_elems_t; /* size of tables */
+
+typedef struct ipc_table_size {
+ ipc_table_elems_t its_size; /* number of elements in table */
+} *ipc_table_size_t;
+
+#define ITS_NULL ((ipc_table_size_t) 0)
+
+extern ipc_table_size_t ipc_table_entries;
+extern ipc_table_size_t ipc_table_dnrequests;
+
+extern void
+ipc_table_init();
+
+/*
+ * Note that ipc_table_alloc, ipc_table_realloc, and ipc_table_free
+ * all potentially use the VM system. Hence simple locks can't
+ * be held across them.
+ *
+ * We can't use a copying realloc, because the realloc happens
+ * with the data unlocked. ipc_table_realloc remaps the data,
+ * so it is OK.
+ */
+
+/* Allocate a table */
+extern vm_offset_t ipc_table_alloc(
+ vm_size_t size);
+
+/* Reallocate a big table */
+extern vm_offset_t ipc_table_realloc(
+ vm_size_t old_size,
+ vm_offset_t old_table,
+ vm_size_t new_size);
+
+/* Free a table */
+extern void ipc_table_free(
+ vm_size_t size,
+ vm_offset_t table);
+
+#define it_entries_alloc(its) \
+ ((ipc_entry_t) \
+ ipc_table_alloc((its)->its_size * sizeof(struct ipc_entry)))
+
+#define it_entries_reallocable(its) \
+ (((its)->its_size * sizeof(struct ipc_entry)) >= PAGE_SIZE)
+
+#define it_entries_realloc(its, table, nits) \
+ ((ipc_entry_t) \
+ ipc_table_realloc((its)->its_size * sizeof(struct ipc_entry), \
+ (vm_offset_t)(table), \
+ (nits)->its_size * sizeof(struct ipc_entry)))
+
+#define it_entries_free(its, table) \
+ ipc_table_free((its)->its_size * sizeof(struct ipc_entry), \
+ (vm_offset_t)(table))
+
+#define it_dnrequests_alloc(its) \
+ ((ipc_port_request_t) \
+ ipc_table_alloc((its)->its_size * \
+ sizeof(struct ipc_port_request)))
+
+#define it_dnrequests_free(its, table) \
+ ipc_table_free((its)->its_size * \
+ sizeof(struct ipc_port_request), \
+ (vm_offset_t)(table))
+
+#endif /* _IPC_IPC_TABLE_H_ */
diff --git a/ipc/ipc_target.c b/ipc/ipc_target.c
new file mode 100644
index 0000000..b791db2
--- /dev/null
+++ b/ipc/ipc_target.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ */
+/*
+ * File: ipc_target.c
+ *
+ * Implementation for common part of IPC ports and port sets
+ * representing a target of messages and migrating RPCs.
+ */
+
+#include "sched_prim.h"
+#include "ipc_target.h"
+
+void
+ipc_target_init(struct ipc_target *ipt, mach_port_t name)
+{
+ ipt->ipt_name = name;
+ ipc_mqueue_init(&ipt->ipt_messages);
+
+#ifdef MIGRATING_THREADS
+ ipt->ipt_type = IPT_TYPE_MESSAGE_RPC;
+ ipt->ipt_acts = 0;
+
+ ipc_target_machine_init(ipt);
+#endif
+}
+
+void
+ipc_target_terminate(struct ipc_target *ipt)
+{
+}
+
+#ifdef MIGRATING_THREADS
+struct Act *
+ipc_target_block(struct ipc_target *ipt)
+{
+ struct Act *act;
+
+ ipt_lock(ipt);
+ while ((act = ipt->ipt_acts) == 0) {
+ /* XXX mp unsafe */
+ ipt->ipt_waiting = 1;
+ ipt_unlock(ipt);
+ thread_wait((int)&ipt->ipt_acts, FALSE);
+ ipt_lock(ipt);
+ }
+ ipt->ipt_acts = act->ipt_next;
+ ipt_unlock(ipt);
+
+ return act;
+}
+
+void
+ipc_target_wakeup(struct ipc_target *ipt)
+{
+ ipt_lock(ipt);
+ if (ipt->ipt_waiting) {
+ thread_wakeup((int)&ipt->ipt_acts);
+ ipt->ipt_waiting = 0;
+ }
+ ipt_unlock(ipt);
+}
+#endif /* MIGRATING_THREADS */
+
diff --git a/ipc/ipc_target.h b/ipc/ipc_target.h
new file mode 100644
index 0000000..a66e687
--- /dev/null
+++ b/ipc/ipc_target.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ */
+/*
+ * File: ipc_target.h
+ *
+ * Common part of IPC ports and port sets
+ * representing a target of messages and migrating RPCs.
+ */
+
+#ifndef _IPC_IPC_RECEIVER_H_
+#define _IPC_IPC_RECEIVER_H_
+
+#include "ipc_mqueue.h"
+#include "ipc_object.h"
+#include <mach/rpc.h>
+
+typedef struct ipc_target {
+
+ struct ipc_object ipt_object;
+
+ mach_port_t ipt_name;
+ struct ipc_mqueue ipt_messages;
+
+#ifdef MIGRATING_THREADS
+ /*** Migrating RPC stuff ***/
+
+ int ipt_type;
+
+ /* User entry info for migrating RPC */
+ rpc_info_t ipt_rpcinfo;
+
+ /* List of available activations, all active but not in use. */
+ struct Act *ipt_acts;
+
+ /* TRUE if someone is waiting for an activation from this pool. */
+ int ipt_waiting;
+#endif /* MIGRATING_THREADS */
+
+} *ipc_target_t;
+
+#define IPT_TYPE_MESSAGE_RPC 1
+#define IPT_TYPE_MIGRATE_RPC 2
+
+void ipc_target_init(struct ipc_target *ipt, mach_port_t name);
+void ipc_target_terminate(struct ipc_target *ipt);
+
+#define ipt_lock(ipt) io_lock(&(ipt)->ipt_object)
+#define ipt_unlock(ipt) io_unlock(&(ipt)->ipt_object)
+#define ipt_reference(ipt) io_reference(&(ipt)->ipt_object)
+#define ipt_release(ipt) io_release(&(ipt)->ipt_object)
+#define ipt_check_unlock(ipt) io_check_unlock(&(ipt)->ipt_object)
+
+#endif /* _IPC_IPC_RECEIVER_H_ */
diff --git a/ipc/ipc_thread.c b/ipc/ipc_thread.c
new file mode 100644
index 0000000..1e738a5
--- /dev/null
+++ b/ipc/ipc_thread.c
@@ -0,0 +1,107 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_thread.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * IPC operations on threads.
+ */
+
+#include <kern/assert.h>
+#include <ipc/ipc_thread.h>
+
+/*
+ * Routine: ipc_thread_enqueue
+ * Purpose:
+ * Enqueue a thread.
+ */
+
+void
+ipc_thread_enqueue(
+ ipc_thread_queue_t queue,
+ ipc_thread_t thread)
+{
+ ipc_thread_enqueue_macro(queue, thread);
+}
+
+/*
+ * Routine: ipc_thread_dequeue
+ * Purpose:
+ * Dequeue and return a thread.
+ */
+
+ipc_thread_t
+ipc_thread_dequeue(
+ ipc_thread_queue_t queue)
+{
+ ipc_thread_t first;
+
+ first = ipc_thread_queue_first(queue);
+
+ if (first != ITH_NULL)
+ ipc_thread_rmqueue_first_macro(queue, first);
+
+ return first;
+}
+
+/*
+ * Routine: ipc_thread_rmqueue
+ * Purpose:
+ * Pull a thread out of a queue.
+ */
+
+void
+ipc_thread_rmqueue(
+ ipc_thread_queue_t queue,
+ ipc_thread_t thread)
+{
+ ipc_thread_t next, prev;
+
+ assert(queue->ithq_base != ITH_NULL);
+
+ next = thread->ith_next;
+ prev = thread->ith_prev;
+
+ if (next == thread) {
+ assert(prev == thread);
+ assert(queue->ithq_base == thread);
+
+ queue->ithq_base = ITH_NULL;
+ } else {
+ if (queue->ithq_base == thread)
+ queue->ithq_base = next;
+
+ next->ith_prev = prev;
+ prev->ith_next = next;
+ ipc_thread_links_init(thread);
+ }
+}
diff --git a/ipc/ipc_thread.h b/ipc/ipc_thread.h
new file mode 100644
index 0000000..e8bfe4a
--- /dev/null
+++ b/ipc/ipc_thread.h
@@ -0,0 +1,123 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_thread.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Definitions for the IPC component of threads.
+ */
+
+#ifndef _IPC_IPC_THREAD_H_
+#define _IPC_IPC_THREAD_H_
+
+#include <kern/thread.h>
+
+typedef thread_t ipc_thread_t;
+
+#define ITH_NULL THREAD_NULL
+
+#define ith_lock_init(thread) simple_lock_init(&(thread)->ith_lock_data)
+#define ith_lock(thread) simple_lock(&(thread)->ith_lock_data)
+#define ith_unlock(thread) simple_unlock(&(thread)->ith_lock_data)
+
+typedef struct ipc_thread_queue {
+ ipc_thread_t ithq_base;
+} *ipc_thread_queue_t;
+
+#define ITHQ_NULL ((ipc_thread_queue_t) 0)
+
+
+#define ipc_thread_links_init(thread) \
+MACRO_BEGIN \
+ (thread)->ith_next = (thread); \
+ (thread)->ith_prev = (thread); \
+MACRO_END
+
+#define ipc_thread_queue_init(queue) \
+MACRO_BEGIN \
+ (queue)->ithq_base = ITH_NULL; \
+MACRO_END
+
+#define ipc_thread_queue_empty(queue) ((queue)->ithq_base == ITH_NULL)
+
+#define ipc_thread_queue_first(queue) ((queue)->ithq_base)
+
+#define ipc_thread_rmqueue_first_macro(queue, thread) \
+MACRO_BEGIN \
+ register ipc_thread_t _next; \
+ \
+ assert((queue)->ithq_base == (thread)); \
+ \
+ _next = (thread)->ith_next; \
+ if (_next == (thread)) { \
+ assert((thread)->ith_prev == (thread)); \
+ (queue)->ithq_base = ITH_NULL; \
+ } else { \
+ register ipc_thread_t _prev = (thread)->ith_prev; \
+ \
+ (queue)->ithq_base = _next; \
+ _next->ith_prev = _prev; \
+ _prev->ith_next = _next; \
+ ipc_thread_links_init(thread); \
+ } \
+MACRO_END
+
+#define ipc_thread_enqueue_macro(queue, thread) \
+MACRO_BEGIN \
+ register ipc_thread_t _first = (queue)->ithq_base; \
+ \
+ if (_first == ITH_NULL) { \
+ (queue)->ithq_base = (thread); \
+ assert((thread)->ith_next == (thread)); \
+ assert((thread)->ith_prev == (thread)); \
+ } else { \
+ register ipc_thread_t _last = _first->ith_prev; \
+ \
+ (thread)->ith_next = _first; \
+ (thread)->ith_prev = _last; \
+ _first->ith_prev = (thread); \
+ _last->ith_next = (thread); \
+ } \
+MACRO_END
+
+/* Enqueue a thread on a message queue */
+extern void ipc_thread_enqueue(
+ ipc_thread_queue_t queue,
+ ipc_thread_t thread);
+
+/* Dequeue a thread from a message queue */
+extern ipc_thread_t ipc_thread_dequeue(
+ ipc_thread_queue_t queue);
+
+/* Remove a thread from a message queue */
+extern void ipc_thread_rmqueue(
+ ipc_thread_queue_t queue,
+ ipc_thread_t thread);
+
+#endif /* _IPC_IPC_THREAD_H_ */
diff --git a/ipc/ipc_types.h b/ipc/ipc_types.h
new file mode 100644
index 0000000..c8f0d0b
--- /dev/null
+++ b/ipc/ipc_types.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 1995, 1994, 1993, 1992, 1991, 1990
+ * Open Software Foundation, Inc.
+ *
+ * Permission to use, copy, modify, and distribute this software and
+ * its documentation for any purpose and without fee is hereby granted,
+ * provided that the above copyright notice appears in all copies and
+ * that both the copyright notice and this permission notice appear in
+ * supporting documentation, and that the name of ("OSF") or Open Software
+ * Foundation not be used in advertising or publicity pertaining to
+ * distribution of the software without specific, written prior permission.
+ *
+ * OSF DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL OSF BE LIABLE FOR ANY
+ * SPECIAL, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN
+ * ACTION OF CONTRACT, NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING
+ * OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE
+ */
+/*
+ * OSF Research Institute MK6.1 (unencumbered) 1/31/1995
+ */
+
+#ifndef _IPC_TYPES_H_
+#define _IPC_TYPES_H_
+
+typedef struct ipc_space *ipc_space_t;
+typedef struct ipc_port *ipc_port_t;
+
+#endif /* _IPC_TYPES_H_ */
diff --git a/ipc/mach_debug.c b/ipc/mach_debug.c
new file mode 100644
index 0000000..cd8fad0
--- /dev/null
+++ b/ipc/mach_debug.c
@@ -0,0 +1,618 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/mach_debug.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Exported kernel calls. See mach_debug/mach_debug.defs.
+ */
+
+#include <mach_ipc_compat.h>
+
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <mach/machine/vm_types.h>
+#include <mach/vm_param.h>
+#include <mach_debug/ipc_info.h>
+#include <mach_debug/hash_info.h>
+#include <kern/host.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_hash.h>
+#include <ipc/ipc_marequest.h>
+#include <ipc/ipc_table.h>
+#include <ipc/ipc_right.h>
+
+
+
+/*
+ * Routine: mach_port_get_srights [kernel call]
+ * Purpose:
+ * Retrieve the number of extant send rights
+ * that a receive right has.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved number of send rights.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote receive rights.
+ */
+
+kern_return_t
+mach_port_get_srights(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_port_rights_t *srightsp)
+{
+ ipc_port_t port;
+ kern_return_t kr;
+ mach_port_rights_t srights;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ srights = port->ip_srights;
+ ip_unlock(port);
+
+ *srightsp = srights;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: host_ipc_hash_info
+ * Purpose:
+ * Return information about the global reverse hash table.
+ * Conditions:
+ * Nothing locked. Obeys CountInOut protocol.
+ * Returns:
+ * KERN_SUCCESS Returned information.
+ * KERN_INVALID_HOST The host is null.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+host_ipc_hash_info(
+ host_t host,
+ hash_info_bucket_array_t *infop,
+ mach_msg_type_number_t *countp)
+{
+ vm_offset_t addr;
+ vm_size_t size;
+ hash_info_bucket_t *info;
+ unsigned int potential, actual;
+ kern_return_t kr;
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_HOST;
+
+ /* start with in-line data */
+
+ info = *infop;
+ potential = *countp;
+
+ for (;;) {
+ actual = ipc_hash_info(info, potential);
+ if (actual <= potential)
+ break;
+
+ /* allocate more memory */
+
+ if (info != *infop)
+ kmem_free(ipc_kernel_map, addr, size);
+
+ size = round_page(actual * sizeof *info);
+ kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
+ if (kr != KERN_SUCCESS)
+ return KERN_RESOURCE_SHORTAGE;
+
+ info = (hash_info_bucket_t *) addr;
+ potential = size/sizeof *info;
+ }
+
+ if (info == *infop) {
+ /* data fit in-line; nothing to deallocate */
+
+ *countp = actual;
+ } else if (actual == 0) {
+ kmem_free(ipc_kernel_map, addr, size);
+
+ *countp = 0;
+ } else {
+ vm_map_copy_t copy;
+ vm_size_t used;
+
+ used = round_page(actual * sizeof *info);
+
+ if (used != size)
+ kmem_free(ipc_kernel_map, addr + used, size - used);
+
+ kr = vm_map_copyin(ipc_kernel_map, addr, used,
+ TRUE, &copy);
+ assert(kr == KERN_SUCCESS);
+
+ *infop = (hash_info_bucket_t *) copy;
+ *countp = actual;
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: host_ipc_marequest_info
+ * Purpose:
+ * Return information about the marequest hash table.
+ * Conditions:
+ * Nothing locked. Obeys CountInOut protocol.
+ * Returns:
+ * KERN_SUCCESS Returned information.
+ * KERN_INVALID_HOST The host is null.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+host_ipc_marequest_info(host, maxp, infop, countp)
+ host_t host;
+ unsigned int *maxp;
+ hash_info_bucket_array_t *infop;
+ unsigned int *countp;
+{
+ vm_offset_t addr;
+ vm_size_t size = 0; /* '=0' to shut up lint */
+ hash_info_bucket_t *info;
+ unsigned int potential, actual;
+ kern_return_t kr;
+
+ if (host == HOST_NULL)
+ return KERN_INVALID_HOST;
+
+ /* start with in-line data */
+
+ info = *infop;
+ potential = *countp;
+
+ for (;;) {
+ actual = ipc_marequest_info(maxp, info, potential);
+ if (actual <= potential)
+ break;
+
+ /* allocate more memory */
+
+ if (info != *infop)
+ kmem_free(ipc_kernel_map, addr, size);
+
+ size = round_page(actual * sizeof *info);
+ kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
+ if (kr != KERN_SUCCESS)
+ return KERN_RESOURCE_SHORTAGE;
+
+ info = (hash_info_bucket_t *) addr;
+ potential = size/sizeof *info;
+ }
+
+ if (info == *infop) {
+ /* data fit in-line; nothing to deallocate */
+
+ *countp = actual;
+ } else if (actual == 0) {
+ kmem_free(ipc_kernel_map, addr, size);
+
+ *countp = 0;
+ } else {
+ vm_map_copy_t copy;
+ vm_size_t used;
+
+ used = round_page(actual * sizeof *info);
+
+ if (used != size)
+ kmem_free(ipc_kernel_map, addr + used, size - used);
+
+ kr = vm_map_copyin(ipc_kernel_map, addr, used,
+ TRUE, &copy);
+ assert(kr == KERN_SUCCESS);
+
+ *infop = (hash_info_bucket_t *) copy;
+ *countp = actual;
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_space_info
+ * Purpose:
+ * Returns information about an IPC space.
+ * Conditions:
+ * Nothing locked. Obeys CountInOut protocol.
+ * Returns:
+ * KERN_SUCCESS Returned information.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+mach_port_space_info(
+ ipc_space_t space,
+ ipc_info_space_t *infop,
+ ipc_info_name_array_t *tablep,
+ mach_msg_type_number_t *tableCntp,
+ ipc_info_tree_name_array_t *treep,
+ mach_msg_type_number_t *treeCntp)
+{
+ ipc_info_name_t *table_info;
+ unsigned int table_potential, table_actual;
+ vm_offset_t table_addr;
+ vm_size_t table_size;
+ ipc_info_tree_name_t *tree_info;
+ unsigned int tree_potential, tree_actual;
+ vm_offset_t tree_addr;
+ vm_size_t tree_size;
+ ipc_tree_entry_t tentry;
+ ipc_entry_t table;
+ ipc_entry_num_t tsize;
+ mach_port_index_t index;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ /* start with in-line memory */
+
+ table_info = *tablep;
+ table_potential = *tableCntp;
+ tree_info = *treep;
+ tree_potential = *treeCntp;
+
+ for (;;) {
+ is_read_lock(space);
+ if (!space->is_active) {
+ is_read_unlock(space);
+ if (table_info != *tablep)
+ kmem_free(ipc_kernel_map,
+ table_addr, table_size);
+ if (tree_info != *treep)
+ kmem_free(ipc_kernel_map,
+ tree_addr, tree_size);
+ return KERN_INVALID_TASK;
+ }
+
+ table_actual = space->is_table_size;
+ tree_actual = space->is_tree_total;
+
+ if ((table_actual <= table_potential) &&
+ (tree_actual <= tree_potential))
+ break;
+
+ is_read_unlock(space);
+
+ if (table_actual > table_potential) {
+ if (table_info != *tablep)
+ kmem_free(ipc_kernel_map,
+ table_addr, table_size);
+
+ table_size = round_page(table_actual *
+ sizeof *table_info);
+ kr = kmem_alloc(ipc_kernel_map,
+ &table_addr, table_size);
+ if (kr != KERN_SUCCESS) {
+ if (tree_info != *treep)
+ kmem_free(ipc_kernel_map,
+ tree_addr, tree_size);
+
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ table_info = (ipc_info_name_t *) table_addr;
+ table_potential = table_size/sizeof *table_info;
+ }
+
+ if (tree_actual > tree_potential) {
+ if (tree_info != *treep)
+ kmem_free(ipc_kernel_map,
+ tree_addr, tree_size);
+
+ tree_size = round_page(tree_actual *
+ sizeof *tree_info);
+ kr = kmem_alloc(ipc_kernel_map,
+ &tree_addr, tree_size);
+ if (kr != KERN_SUCCESS) {
+ if (table_info != *tablep)
+ kmem_free(ipc_kernel_map,
+ table_addr, table_size);
+
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ tree_info = (ipc_info_tree_name_t *) tree_addr;
+ tree_potential = tree_size/sizeof *tree_info;
+ }
+ }
+ /* space is read-locked and active; we have enough wired memory */
+
+ infop->iis_genno_mask = MACH_PORT_NGEN(MACH_PORT_DEAD);
+ infop->iis_table_size = space->is_table_size;
+ infop->iis_table_next = space->is_table_next->its_size;
+ infop->iis_tree_size = space->is_tree_total;
+ infop->iis_tree_small = space->is_tree_small;
+ infop->iis_tree_hash = space->is_tree_hash;
+
+ table = space->is_table;
+ tsize = space->is_table_size;
+
+ for (index = 0; index < tsize; index++) {
+ ipc_info_name_t *iin = &table_info[index];
+ ipc_entry_t entry = &table[index];
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ iin->iin_name = MACH_PORT_MAKEB(index, bits);
+ iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE;
+#if MACH_IPC_COMPAT
+ iin->iin_compat = (bits & IE_BITS_COMPAT) ? TRUE : FALSE;
+#else MACH_IPC_COMPAT
+ iin->iin_compat = FALSE;
+#endif MACH_IPC_COMPAT
+ iin->iin_marequest = (bits & IE_BITS_MAREQUEST) ? TRUE : FALSE;
+ iin->iin_type = IE_BITS_TYPE(bits);
+ iin->iin_urefs = IE_BITS_UREFS(bits);
+ iin->iin_object = (vm_offset_t) entry->ie_object;
+ iin->iin_next = entry->ie_next;
+ iin->iin_hash = entry->ie_index;
+ }
+
+ for (tentry = ipc_splay_traverse_start(&space->is_tree), index = 0;
+ tentry != ITE_NULL;
+ tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) {
+ ipc_info_tree_name_t *iitn = &tree_info[index++];
+ ipc_info_name_t *iin = &iitn->iitn_name;
+ ipc_entry_t entry = &tentry->ite_entry;
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ assert(IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE);
+
+ iin->iin_name = tentry->ite_name;
+ iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE;
+#if MACH_IPC_COMPAT
+ iin->iin_compat = (bits & IE_BITS_COMPAT) ? TRUE : FALSE;
+#else MACH_IPC_COMPAT
+ iin->iin_compat = FALSE;
+#endif MACH_IPC_COMPAT
+ iin->iin_marequest = (bits & IE_BITS_MAREQUEST) ? TRUE : FALSE;
+ iin->iin_type = IE_BITS_TYPE(bits);
+ iin->iin_urefs = IE_BITS_UREFS(bits);
+ iin->iin_object = (vm_offset_t) entry->ie_object;
+ iin->iin_next = entry->ie_next;
+ iin->iin_hash = entry->ie_index;
+
+ if (tentry->ite_lchild == ITE_NULL)
+ iitn->iitn_lchild = MACH_PORT_NULL;
+ else
+ iitn->iitn_lchild = tentry->ite_lchild->ite_name;
+
+ if (tentry->ite_rchild == ITE_NULL)
+ iitn->iitn_rchild = MACH_PORT_NULL;
+ else
+ iitn->iitn_rchild = tentry->ite_rchild->ite_name;
+
+ }
+ ipc_splay_traverse_finish(&space->is_tree);
+ is_read_unlock(space);
+
+ if (table_info == *tablep) {
+ /* data fit in-line; nothing to deallocate */
+
+ *tableCntp = table_actual;
+ } else if (table_actual == 0) {
+ kmem_free(ipc_kernel_map, table_addr, table_size);
+
+ *tableCntp = 0;
+ } else {
+ vm_size_t size_used, rsize_used;
+ vm_map_copy_t copy;
+
+ /* kmem_alloc doesn't zero memory */
+
+ size_used = table_actual * sizeof *table_info;
+ rsize_used = round_page(size_used);
+
+ if (rsize_used != table_size)
+ kmem_free(ipc_kernel_map,
+ table_addr + rsize_used,
+ table_size - rsize_used);
+
+ if (size_used != rsize_used)
+ bzero((char *) (table_addr + size_used),
+ rsize_used - size_used);
+
+ kr = vm_map_copyin(ipc_kernel_map, table_addr, rsize_used,
+ TRUE, &copy);
+
+ assert(kr == KERN_SUCCESS);
+
+ *tablep = (ipc_info_name_t *) copy;
+ *tableCntp = table_actual;
+ }
+
+ if (tree_info == *treep) {
+ /* data fit in-line; nothing to deallocate */
+
+ *treeCntp = tree_actual;
+ } else if (tree_actual == 0) {
+ kmem_free(ipc_kernel_map, tree_addr, tree_size);
+
+ *treeCntp = 0;
+ } else {
+ vm_size_t size_used, rsize_used;
+ vm_map_copy_t copy;
+
+ /* kmem_alloc doesn't zero memory */
+
+ size_used = tree_actual * sizeof *tree_info;
+ rsize_used = round_page(size_used);
+
+ if (rsize_used != tree_size)
+ kmem_free(ipc_kernel_map,
+ tree_addr + rsize_used,
+ tree_size - rsize_used);
+
+ if (size_used != rsize_used)
+ bzero((char *) (tree_addr + size_used),
+ rsize_used - size_used);
+
+ kr = vm_map_copyin(ipc_kernel_map, tree_addr, rsize_used,
+ TRUE, &copy);
+
+ assert(kr == KERN_SUCCESS);
+
+ *treep = (ipc_info_tree_name_t *) copy;
+ *treeCntp = tree_actual;
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_dnrequest_info
+ * Purpose:
+ * Returns information about the dead-name requests
+ * registered with the named receive right.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved information.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote receive rights.
+ */
+
+kern_return_t
+mach_port_dnrequest_info(
+ ipc_space_t space,
+ mach_port_t name,
+ unsigned int *totalp,
+ unsigned int *usedp)
+{
+ unsigned int total, used;
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ if (port->ip_dnrequests == IPR_NULL) {
+ total = 0;
+ used = 0;
+ } else {
+ ipc_port_request_t dnrequests = port->ip_dnrequests;
+ ipc_port_request_index_t index;
+
+ total = dnrequests->ipr_size->its_size;
+
+ for (index = 1, used = 0;
+ index < total; index++) {
+ ipc_port_request_t ipr = &dnrequests[index];
+
+ if (ipr->ipr_name != MACH_PORT_NULL)
+ used++;
+ }
+ }
+ ip_unlock(port);
+
+ *totalp = total;
+ *usedp = used;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_kernel_object [kernel call]
+ * Purpose:
+ * Retrieve the type and address of the kernel object
+ * represented by a send or receive right.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved kernel object info.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote
+ * send or receive rights.
+ */
+
+kern_return_t
+mach_port_kernel_object(
+ ipc_space_t space,
+ mach_port_t name,
+ unsigned int *typep,
+ vm_offset_t *addrp)
+{
+ ipc_entry_t entry;
+ ipc_port_t port;
+ kern_return_t kr;
+
+ kr = ipc_right_lookup_read(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is read-locked and active */
+
+ if ((entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE) == 0) {
+ is_read_unlock(space);
+ return KERN_INVALID_RIGHT;
+ }
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ is_read_unlock(space);
+
+ if (!ip_active(port)) {
+ ip_unlock(port);
+ return KERN_INVALID_RIGHT;
+ }
+
+ *typep = (unsigned int) ip_kotype(port);
+ *addrp = (vm_offset_t) port->ip_kobject;
+ ip_unlock(port);
+ return KERN_SUCCESS;
+}
diff --git a/ipc/mach_msg.c b/ipc/mach_msg.c
new file mode 100644
index 0000000..ffcccf4
--- /dev/null
+++ b/ipc/mach_msg.c
@@ -0,0 +1,2279 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/mach_msg.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Exported message traps. See mach/message.h.
+ */
+
+#include <mach_ipc_compat.h>
+#include <norma_ipc.h>
+
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <mach/message.h>
+#include <kern/assert.h>
+#include <kern/counters.h>
+#include <kern/lock.h>
+#include <kern/sched_prim.h>
+#include <kern/ipc_sched.h>
+#include <vm/vm_map.h>
+#include <ipc/ipc_kmsg.h>
+#include <ipc/ipc_marequest.h>
+#include <ipc/ipc_mqueue.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_notify.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_thread.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/mach_msg.h>
+
+
+
+extern void exception_raise_continue();
+extern void exception_raise_continue_fast();
+#ifndef CONTINUATIONS
+#define mach_msg_receive_continue 0
+#define msg_receive_continue 0
+#endif
+
+/*
+ * Routine: mach_msg_send
+ * Purpose:
+ * Send a message.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Sent the message.
+ * MACH_SEND_MSG_TOO_SMALL Message smaller than a header.
+ * MACH_SEND_NO_BUFFER Couldn't allocate buffer.
+ * MACH_SEND_INVALID_DATA Couldn't copy message data.
+ * MACH_SEND_INVALID_HEADER
+ * Illegal value in the message header bits.
+ * MACH_SEND_INVALID_DEST The space is dead.
+ * MACH_SEND_INVALID_NOTIFY Bad notify port.
+ * MACH_SEND_INVALID_DEST Can't copyin destination port.
+ * MACH_SEND_INVALID_REPLY Can't copyin reply port.
+ * MACH_SEND_TIMED_OUT Timeout expired without delivery.
+ * MACH_SEND_INTERRUPTED Delivery interrupted.
+ * MACH_SEND_NO_NOTIFY Can't allocate a msg-accepted request.
+ * MACH_SEND_WILL_NOTIFY Msg-accepted notif. requested.
+ * MACH_SEND_NOTIFY_IN_PROGRESS
+ * This space has already forced a message to this port.
+ */
+
+mach_msg_return_t
+mach_msg_send(msg, option, send_size, time_out, notify)
+ mach_msg_header_t *msg;
+ mach_msg_option_t option;
+ mach_msg_size_t send_size;
+ mach_msg_timeout_t time_out;
+ mach_port_t notify;
+{
+ ipc_space_t space = current_space();
+ vm_map_t map = current_map();
+ ipc_kmsg_t kmsg;
+ mach_msg_return_t mr;
+
+ mr = ipc_kmsg_get(msg, send_size, &kmsg);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+
+ if (option & MACH_SEND_CANCEL) {
+ if (notify == MACH_PORT_NULL)
+ mr = MACH_SEND_INVALID_NOTIFY;
+ else
+ mr = ipc_kmsg_copyin(kmsg, space, map, notify);
+ } else
+ mr = ipc_kmsg_copyin(kmsg, space, map, MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ ikm_free(kmsg);
+ return mr;
+ }
+
+ if (option & MACH_SEND_NOTIFY) {
+ mr = ipc_mqueue_send(kmsg, MACH_SEND_TIMEOUT,
+ ((option & MACH_SEND_TIMEOUT) ?
+ time_out : MACH_MSG_TIMEOUT_NONE));
+ if (mr == MACH_SEND_TIMED_OUT) {
+ ipc_port_t dest = (ipc_port_t)
+ kmsg->ikm_header.msgh_remote_port;
+
+ if (notify == MACH_PORT_NULL)
+ mr = MACH_SEND_INVALID_NOTIFY;
+ else
+ mr = ipc_marequest_create(space, dest,
+ notify, &kmsg->ikm_marequest);
+ if (mr == MACH_MSG_SUCCESS) {
+ ipc_mqueue_send_always(kmsg);
+ return MACH_SEND_WILL_NOTIFY;
+ }
+ }
+ } else
+ mr = ipc_mqueue_send(kmsg, option & MACH_SEND_TIMEOUT,
+ time_out);
+
+ if (mr != MACH_MSG_SUCCESS) {
+ mr |= ipc_kmsg_copyout_pseudo(kmsg, space, map);
+
+ assert(kmsg->ikm_marequest == IMAR_NULL);
+ (void) ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
+ }
+
+ return mr;
+}
+
+/*
+ * Routine: mach_msg_receive
+ * Purpose:
+ * Receive a message.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Received a message.
+ * MACH_RCV_INVALID_NAME The name doesn't denote a right,
+ * or the denoted right is not receive or port set.
+ * MACH_RCV_IN_SET Receive right is a member of a set.
+ * MACH_RCV_TOO_LARGE Message wouldn't fit into buffer.
+ * MACH_RCV_TIMED_OUT Timeout expired without a message.
+ * MACH_RCV_INTERRUPTED Reception interrupted.
+ * MACH_RCV_PORT_DIED Port/set died while receiving.
+ * MACH_RCV_PORT_CHANGED Port moved into set while receiving.
+ * MACH_RCV_INVALID_DATA Couldn't copy to user buffer.
+ * MACH_RCV_INVALID_NOTIFY Bad notify port.
+ * MACH_RCV_HEADER_ERROR
+ */
+
+mach_msg_return_t
+mach_msg_receive(msg, option, rcv_size, rcv_name, time_out, notify)
+ mach_msg_header_t *msg;
+ mach_msg_option_t option;
+ mach_msg_size_t rcv_size;
+ mach_port_t rcv_name;
+ mach_msg_timeout_t time_out;
+ mach_port_t notify;
+{
+ ipc_thread_t self = current_thread();
+ ipc_space_t space = current_space();
+ vm_map_t map = current_map();
+ ipc_object_t object;
+ ipc_mqueue_t mqueue;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+ mach_msg_return_t mr;
+
+ mr = ipc_mqueue_copyin(space, rcv_name, &mqueue, &object);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+ /* hold ref for object; mqueue is locked */
+
+#ifdef CONTINUATIONS
+ /*
+ * ipc_mqueue_receive may not return, because if we block
+ * then our kernel stack may be discarded. So we save
+ * state here for mach_msg_receive_continue to pick up.
+ */
+
+ self->ith_msg = msg;
+ self->ith_option = option;
+ self->ith_rcv_size = rcv_size;
+ self->ith_timeout = time_out;
+ self->ith_notify = notify;
+ self->ith_object = object;
+ self->ith_mqueue = mqueue;
+#endif
+
+ if (option & MACH_RCV_LARGE) {
+ mr = ipc_mqueue_receive(mqueue, option & MACH_RCV_TIMEOUT,
+ rcv_size, time_out,
+ FALSE, mach_msg_receive_continue,
+ &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_object_release(object);
+ if (mr != MACH_MSG_SUCCESS) {
+ if (mr == MACH_RCV_TOO_LARGE) {
+ mach_msg_size_t real_size =
+ (mach_msg_size_t) (natural_t) kmsg;
+
+ assert(real_size > rcv_size);
+
+ (void) copyout((vm_offset_t) &real_size,
+ (vm_offset_t) &msg->msgh_size,
+ sizeof(mach_msg_size_t));
+ }
+
+ return mr;
+ }
+
+ kmsg->ikm_header.msgh_seqno = seqno;
+ assert(kmsg->ikm_header.msgh_size <= rcv_size);
+ } else {
+ mr = ipc_mqueue_receive(mqueue, option & MACH_RCV_TIMEOUT,
+ MACH_MSG_SIZE_MAX, time_out,
+ FALSE, mach_msg_receive_continue,
+ &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_object_release(object);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+
+ kmsg->ikm_header.msgh_seqno = seqno;
+ if (kmsg->ikm_header.msgh_size > rcv_size) {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ return MACH_RCV_TOO_LARGE;
+ }
+ }
+
+ if (option & MACH_RCV_NOTIFY) {
+ if (notify == MACH_PORT_NULL)
+ mr = MACH_RCV_INVALID_NOTIFY;
+ else
+ mr = ipc_kmsg_copyout(kmsg, space, map, notify);
+ } else
+ mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ } else {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ }
+
+ return mr;
+ }
+
+ return ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
+}
+
+#ifdef CONTINUATIONS
+/*
+ * Routine: mach_msg_receive_continue
+ * Purpose:
+ * Continue after blocking for a message.
+ * Conditions:
+ * Nothing locked. We are running on a new kernel stack,
+ * with the receive state saved in the thread. From here
+ * control goes back to user space.
+ */
+
+void
+mach_msg_receive_continue()
+{
+ ipc_thread_t self = current_thread();
+ ipc_space_t space = current_space();
+ vm_map_t map = current_map();
+ mach_msg_header_t *msg = self->ith_msg;
+ mach_msg_option_t option = self->ith_option;
+ mach_msg_size_t rcv_size = self->ith_rcv_size;
+ mach_msg_timeout_t time_out = self->ith_timeout;
+ mach_port_t notify = self->ith_notify;
+ ipc_object_t object = self->ith_object;
+ ipc_mqueue_t mqueue = self->ith_mqueue;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+ mach_msg_return_t mr;
+
+ if (option & MACH_RCV_LARGE) {
+ mr = ipc_mqueue_receive(mqueue, option & MACH_RCV_TIMEOUT,
+ rcv_size, time_out,
+ TRUE, mach_msg_receive_continue,
+ &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_object_release(object);
+ if (mr != MACH_MSG_SUCCESS) {
+ if (mr == MACH_RCV_TOO_LARGE) {
+ mach_msg_size_t real_size =
+ (mach_msg_size_t) (natural_t) kmsg;
+
+ assert(real_size > rcv_size);
+
+ (void) copyout((vm_offset_t) &real_size,
+ (vm_offset_t) &msg->msgh_size,
+ sizeof(mach_msg_size_t));
+ }
+
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ kmsg->ikm_header.msgh_seqno = seqno;
+ assert(kmsg->ikm_header.msgh_size <= rcv_size);
+ } else {
+ mr = ipc_mqueue_receive(mqueue, option & MACH_RCV_TIMEOUT,
+ MACH_MSG_SIZE_MAX, time_out,
+ TRUE, mach_msg_receive_continue,
+ &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_object_release(object);
+ if (mr != MACH_MSG_SUCCESS) {
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ kmsg->ikm_header.msgh_seqno = seqno;
+ if (kmsg->ikm_header.msgh_size > rcv_size) {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ thread_syscall_return(MACH_RCV_TOO_LARGE);
+ /*NOTREACHED*/
+ }
+ }
+
+ if (option & MACH_RCV_NOTIFY) {
+ if (notify == MACH_PORT_NULL)
+ mr = MACH_RCV_INVALID_NOTIFY;
+ else
+ mr = ipc_kmsg_copyout(kmsg, space, map, notify);
+ } else
+ mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ } else {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ }
+
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ mr = ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+}
+#endif /* CONTINUATIONS */
+
+/*
+ * Routine: mach_msg_trap [mach trap]
+ * Purpose:
+ * Possibly send a message; possibly receive a message.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * All of mach_msg_send and mach_msg_receive error codes.
+ */
+
+mach_msg_return_t
+mach_msg_trap(msg, option, send_size, rcv_size, rcv_name, time_out, notify)
+ mach_msg_header_t *msg;
+ mach_msg_option_t option;
+ mach_msg_size_t send_size;
+ mach_msg_size_t rcv_size;
+ mach_port_t rcv_name;
+ mach_msg_timeout_t time_out;
+ mach_port_t notify;
+{
+ mach_msg_return_t mr;
+
+#ifdef CONTINUATIONS
+ /* first check for common cases */
+
+ if (option == (MACH_SEND_MSG|MACH_RCV_MSG)) {
+ register ipc_thread_t self = current_thread();
+ ipc_space_t space = self->task->itk_space;
+ register ipc_kmsg_t kmsg;
+ register ipc_port_t dest_port;
+ ipc_object_t rcv_object;
+ register ipc_mqueue_t rcv_mqueue;
+ mach_msg_size_t reply_size;
+
+ /*
+ * This case is divided into ten sections, each
+ * with a label. There are five optimized
+ * sections and six unoptimized sections, which
+ * do the same thing but handle all possible
+ * cases and are slower.
+ *
+ * The five sections for an RPC are
+ * 1) Get request message into a buffer.
+ * (fast_get or slow_get)
+ * 2) Copyin request message and rcv_name.
+ * (fast_copyin or slow_copyin)
+ * 3) Enqueue request and dequeue reply.
+ * (fast_send_receive or
+ * slow_send and slow_receive)
+ * 4) Copyout reply message.
+ * (fast_copyout or slow_copyout)
+ * 5) Put reply message to user's buffer.
+ * (fast_put or slow_put)
+ *
+ * Keep the locking hierarchy firmly in mind.
+ * (First spaces, then ports, then port sets,
+ * then message queues.) Only a non-blocking
+ * attempt can be made to acquire locks out of
+ * order, or acquire two locks on the same level.
+ * Acquiring two locks on the same level will
+ * fail if the objects are really the same,
+ * unless simple locking is disabled. This is OK,
+ * because then the extra unlock does nothing.
+ *
+ * There are two major reasons these RPCs can't use
+ * ipc_thread_switch, and use slow_send/slow_receive:
+ * 1) Kernel RPCs.
+ * 2) Servers fall behind clients, so
+ * client doesn't find a blocked server thread and
+ * server finds waiting messages and can't block.
+ */
+
+ /*
+ fast_get:
+ */
+ /*
+ * optimized ipc_kmsg_get
+ *
+ * No locks, references, or messages held.
+ * We must clear ikm_cache before copyinmsg.
+ */
+
+ if ((send_size > IKM_SAVED_MSG_SIZE) ||
+ (send_size < sizeof(mach_msg_header_t)) ||
+ (send_size & 3) ||
+ ((kmsg = ikm_cache()) == IKM_NULL))
+ goto slow_get;
+
+ ikm_cache() = IKM_NULL;
+ ikm_check_initialized(kmsg, IKM_SAVED_KMSG_SIZE);
+
+ if (copyinmsg((vm_offset_t) msg, (vm_offset_t) &kmsg->ikm_header,
+ send_size)) {
+ ikm_free(kmsg);
+ goto slow_get;
+ }
+
+ kmsg->ikm_header.msgh_size = send_size;
+
+ fast_copyin:
+ /*
+ * optimized ipc_kmsg_copyin/ipc_mqueue_copyin
+ *
+ * We have the request message data in kmsg.
+ * Must still do copyin, send, receive, etc.
+ *
+ * If the message isn't simple, we can't combine
+ * ipc_kmsg_copyin_header and ipc_mqueue_copyin,
+ * because copyin of the message body might
+ * affect rcv_name.
+ */
+
+ switch (kmsg->ikm_header.msgh_bits) {
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,
+ MACH_MSG_TYPE_MAKE_SEND_ONCE): {
+ register ipc_entry_t table;
+ register ipc_entry_num_t size;
+ register ipc_port_t reply_port;
+
+ /* sending a request message */
+
+ {
+ register mach_port_index_t index;
+ register mach_port_gen_t gen;
+
+ {
+ register mach_port_t reply_name =
+ kmsg->ikm_header.msgh_local_port;
+
+ if (reply_name != rcv_name)
+ goto slow_copyin;
+
+ /* optimized ipc_entry_lookup of reply_name */
+
+ index = MACH_PORT_INDEX(reply_name);
+ gen = MACH_PORT_GEN(reply_name);
+ }
+
+ is_read_lock(space);
+ assert(space->is_active);
+
+ size = space->is_table_size;
+ table = space->is_table;
+
+ if (index >= size)
+ goto abort_request_copyin;
+
+ {
+ register ipc_entry_t entry;
+ register ipc_entry_bits_t bits;
+
+ entry = &table[index];
+ bits = entry->ie_bits;
+
+ /* check generation number and type bit */
+
+ if ((bits & (IE_BITS_GEN_MASK|
+ MACH_PORT_TYPE_RECEIVE)) !=
+ (gen | MACH_PORT_TYPE_RECEIVE))
+ goto abort_request_copyin;
+
+ reply_port = (ipc_port_t) entry->ie_object;
+ assert(reply_port != IP_NULL);
+ }
+ }
+
+ /* optimized ipc_entry_lookup of dest_name */
+
+ {
+ register mach_port_index_t index;
+ register mach_port_gen_t gen;
+
+ {
+ register mach_port_t dest_name =
+ kmsg->ikm_header.msgh_remote_port;
+
+ index = MACH_PORT_INDEX(dest_name);
+ gen = MACH_PORT_GEN(dest_name);
+ }
+
+ if (index >= size)
+ goto abort_request_copyin;
+
+ {
+ register ipc_entry_t entry;
+ register ipc_entry_bits_t bits;
+
+ entry = &table[index];
+ bits = entry->ie_bits;
+
+ /* check generation number and type bit */
+
+ if ((bits & (IE_BITS_GEN_MASK|MACH_PORT_TYPE_SEND)) !=
+ (gen | MACH_PORT_TYPE_SEND))
+ goto abort_request_copyin;
+
+ assert(IE_BITS_UREFS(bits) > 0);
+
+ dest_port = (ipc_port_t) entry->ie_object;
+ assert(dest_port != IP_NULL);
+ }
+ }
+
+ /*
+ * To do an atomic copyin, need simultaneous
+ * locks on both ports and the space. If
+ * dest_port == reply_port, and simple locking is
+ * enabled, then we will abort. Otherwise it's
+ * OK to unlock twice.
+ */
+
+ ip_lock(dest_port);
+ if (!ip_active(dest_port) ||
+ !ip_lock_try(reply_port)) {
+ ip_unlock(dest_port);
+ goto abort_request_copyin;
+ }
+ is_read_unlock(space);
+
+ assert(dest_port->ip_srights > 0);
+ dest_port->ip_srights++;
+ ip_reference(dest_port);
+
+ assert(ip_active(reply_port));
+ assert(reply_port->ip_receiver_name ==
+ kmsg->ikm_header.msgh_local_port);
+ assert(reply_port->ip_receiver == space);
+
+ reply_port->ip_sorights++;
+ ip_reference(reply_port);
+
+ kmsg->ikm_header.msgh_bits =
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
+ MACH_MSG_TYPE_PORT_SEND_ONCE);
+ kmsg->ikm_header.msgh_remote_port =
+ (mach_port_t) dest_port;
+ kmsg->ikm_header.msgh_local_port =
+ (mach_port_t) reply_port;
+
+ /* make sure we can queue to the destination */
+
+ if (dest_port->ip_receiver == ipc_space_kernel) {
+ /*
+ * The kernel server has a reference to
+ * the reply port, which it hands back
+ * to us in the reply message. We do
+ * not need to keep another reference to
+ * it.
+ */
+ ip_unlock(reply_port);
+
+ assert(ip_active(dest_port));
+ ip_unlock(dest_port);
+ goto kernel_send;
+ }
+
+#if NORMA_IPC
+ if (IP_NORMA_IS_PROXY(dest_port)) {
+ ip_unlock(dest_port);
+ ip_unlock(reply_port);
+ goto norma_send;
+ }
+#endif NORMA_IPC
+
+ if (dest_port->ip_msgcount >= dest_port->ip_qlimit)
+ goto abort_request_send_receive;
+
+ /* optimized ipc_mqueue_copyin */
+
+ if (reply_port->ip_pset != IPS_NULL)
+ goto abort_request_send_receive;
+
+ rcv_object = (ipc_object_t) reply_port;
+ io_reference(rcv_object);
+ rcv_mqueue = &reply_port->ip_messages;
+ imq_lock(rcv_mqueue);
+ io_unlock(rcv_object);
+ goto fast_send_receive;
+
+ abort_request_copyin:
+ is_read_unlock(space);
+ goto slow_copyin;
+
+ abort_request_send_receive:
+ ip_unlock(dest_port);
+ ip_unlock(reply_port);
+ goto slow_send;
+ }
+
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0): {
+ register ipc_entry_num_t size;
+ register ipc_entry_t table;
+
+ /* sending a reply message */
+
+ {
+ register mach_port_t reply_name =
+ kmsg->ikm_header.msgh_local_port;
+
+ if (reply_name != MACH_PORT_NULL)
+ goto slow_copyin;
+ }
+
+ is_write_lock(space);
+ assert(space->is_active);
+
+ /* optimized ipc_entry_lookup */
+
+ size = space->is_table_size;
+ table = space->is_table;
+
+ {
+ register ipc_entry_t entry;
+ register mach_port_gen_t gen;
+ register mach_port_index_t index;
+
+ {
+ register mach_port_t dest_name =
+ kmsg->ikm_header.msgh_remote_port;
+
+ index = MACH_PORT_INDEX(dest_name);
+ gen = MACH_PORT_GEN(dest_name);
+ }
+
+ if (index >= size)
+ goto abort_reply_dest_copyin;
+
+ entry = &table[index];
+
+ /* check generation, collision bit, and type bit */
+
+ if ((entry->ie_bits & (IE_BITS_GEN_MASK|
+ IE_BITS_COLLISION|
+ MACH_PORT_TYPE_SEND_ONCE)) !=
+ (gen | MACH_PORT_TYPE_SEND_ONCE))
+ goto abort_reply_dest_copyin;
+
+ /* optimized ipc_right_copyin */
+
+ assert(IE_BITS_TYPE(entry->ie_bits) ==
+ MACH_PORT_TYPE_SEND_ONCE);
+ assert(IE_BITS_UREFS(entry->ie_bits) == 1);
+ assert((entry->ie_bits & IE_BITS_MAREQUEST) == 0);
+
+ if (entry->ie_request != 0)
+ goto abort_reply_dest_copyin;
+
+ dest_port = (ipc_port_t) entry->ie_object;
+ assert(dest_port != IP_NULL);
+
+ ip_lock(dest_port);
+ if (!ip_active(dest_port)) {
+ ip_unlock(dest_port);
+ goto abort_reply_dest_copyin;
+ }
+
+ assert(dest_port->ip_sorights > 0);
+
+ /* optimized ipc_entry_dealloc */
+
+ entry->ie_next = table->ie_next;
+ table->ie_next = index;
+ entry->ie_bits = gen;
+ entry->ie_object = IO_NULL;
+ }
+
+ kmsg->ikm_header.msgh_bits =
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
+ 0);
+ kmsg->ikm_header.msgh_remote_port =
+ (mach_port_t) dest_port;
+
+ /* make sure we can queue to the destination */
+
+ assert(dest_port->ip_receiver != ipc_space_kernel);
+#if NORMA_IPC
+ if (IP_NORMA_IS_PROXY(dest_port)) {
+ is_write_unlock(space);
+ ip_unlock(dest_port);
+ goto norma_send;
+ }
+#endif NORMA_IPC
+
+ /* optimized ipc_entry_lookup/ipc_mqueue_copyin */
+
+ {
+ register ipc_entry_t entry;
+ register ipc_entry_bits_t bits;
+
+ {
+ register mach_port_index_t index;
+ register mach_port_gen_t gen;
+
+ index = MACH_PORT_INDEX(rcv_name);
+ gen = MACH_PORT_GEN(rcv_name);
+
+ if (index >= size)
+ goto abort_reply_rcv_copyin;
+
+ entry = &table[index];
+ bits = entry->ie_bits;
+
+ /* check generation number */
+
+ if ((bits & IE_BITS_GEN_MASK) != gen)
+ goto abort_reply_rcv_copyin;
+ }
+
+ /* check type bits; looking for receive or set */
+
+ if (bits & MACH_PORT_TYPE_PORT_SET) {
+ register ipc_pset_t rcv_pset;
+
+ rcv_pset = (ipc_pset_t) entry->ie_object;
+ assert(rcv_pset != IPS_NULL);
+
+ ips_lock(rcv_pset);
+ assert(ips_active(rcv_pset));
+
+ rcv_object = (ipc_object_t) rcv_pset;
+ rcv_mqueue = &rcv_pset->ips_messages;
+ } else if (bits & MACH_PORT_TYPE_RECEIVE) {
+ register ipc_port_t rcv_port;
+
+ rcv_port = (ipc_port_t) entry->ie_object;
+ assert(rcv_port != IP_NULL);
+
+ if (!ip_lock_try(rcv_port))
+ goto abort_reply_rcv_copyin;
+ assert(ip_active(rcv_port));
+
+ if (rcv_port->ip_pset != IPS_NULL) {
+ ip_unlock(rcv_port);
+ goto abort_reply_rcv_copyin;
+ }
+
+ rcv_object = (ipc_object_t) rcv_port;
+ rcv_mqueue = &rcv_port->ip_messages;
+ } else
+ goto abort_reply_rcv_copyin;
+ }
+
+ is_write_unlock(space);
+ io_reference(rcv_object);
+ imq_lock(rcv_mqueue);
+ io_unlock(rcv_object);
+ goto fast_send_receive;
+
+ abort_reply_dest_copyin:
+ is_write_unlock(space);
+ goto slow_copyin;
+
+ abort_reply_rcv_copyin:
+ ip_unlock(dest_port);
+ is_write_unlock(space);
+ goto slow_send;
+ }
+
+ default:
+ goto slow_copyin;
+ }
+ /*NOTREACHED*/
+
+ fast_send_receive:
+ /*
+ * optimized ipc_mqueue_send/ipc_mqueue_receive
+ *
+ * Finished get/copyin of kmsg and copyin of rcv_name.
+ * space is unlocked, dest_port is locked,
+ * we can queue kmsg to dest_port,
+ * rcv_mqueue is locked, rcv_object holds a ref,
+ * if rcv_object is a port it isn't in a port set
+ *
+ * Note that if simple locking is turned off,
+ * then we could have dest_mqueue == rcv_mqueue
+ * and not abort when we try to lock dest_mqueue.
+ */
+
+ assert(ip_active(dest_port));
+ assert(dest_port->ip_receiver != ipc_space_kernel);
+#if NORMA_IPC
+ assert(! IP_NORMA_IS_PROXY(dest_port));
+#endif NORMA_IPC
+ assert((dest_port->ip_msgcount < dest_port->ip_qlimit) ||
+ (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits) ==
+ MACH_MSG_TYPE_PORT_SEND_ONCE));
+ assert((kmsg->ikm_header.msgh_bits &
+ MACH_MSGH_BITS_CIRCULAR) == 0);
+
+ {
+ register ipc_mqueue_t dest_mqueue;
+ register ipc_thread_t receiver;
+
+ {
+ register ipc_pset_t dest_pset;
+
+ dest_pset = dest_port->ip_pset;
+ if (dest_pset == IPS_NULL)
+ dest_mqueue = &dest_port->ip_messages;
+ else
+ dest_mqueue = &dest_pset->ips_messages;
+ }
+
+ if (!imq_lock_try(dest_mqueue)) {
+ abort_send_receive:
+ ip_unlock(dest_port);
+ imq_unlock(rcv_mqueue);
+ ipc_object_release(rcv_object);
+ goto slow_send;
+ }
+
+ receiver = ipc_thread_queue_first(&dest_mqueue->imq_threads);
+ if ((receiver == ITH_NULL) ||
+ (ipc_kmsg_queue_first(&rcv_mqueue->imq_messages)
+ != IKM_NULL)) {
+ imq_unlock(dest_mqueue);
+ goto abort_send_receive;
+ }
+
+ /*
+ * There is a receiver thread waiting, and
+ * there is no reply message for us to pick up.
+ * We have hope of hand-off, so save state.
+ */
+
+ self->ith_msg = msg;
+ self->ith_rcv_size = rcv_size;
+ self->ith_object = rcv_object;
+ self->ith_mqueue = rcv_mqueue;
+
+ if ((receiver->swap_func == (void (*)()) mach_msg_continue) &&
+ thread_handoff(self, mach_msg_continue, receiver)) {
+ assert(current_thread() == receiver);
+
+ /*
+ * We can use the optimized receive code,
+ * because the receiver is using no options.
+ */
+ } else if ((receiver->swap_func ==
+ (void (*)()) exception_raise_continue) &&
+ thread_handoff(self, mach_msg_continue, receiver)) {
+ counter(c_mach_msg_trap_block_exc++);
+ assert(current_thread() == receiver);
+
+ /*
+ * We are a reply message coming back through
+ * the optimized exception-handling path.
+ * Finish with rcv_mqueue and dest_mqueue,
+ * and then jump to exception code with
+ * dest_port still locked. We don't bother
+ * with a sequence number in this case.
+ */
+
+ ipc_thread_enqueue_macro(
+ &rcv_mqueue->imq_threads, self);
+ self->ith_state = MACH_RCV_IN_PROGRESS;
+ self->ith_msize = MACH_MSG_SIZE_MAX;
+ imq_unlock(rcv_mqueue);
+
+ ipc_thread_rmqueue_first_macro(
+ &dest_mqueue->imq_threads, receiver);
+ imq_unlock(dest_mqueue);
+
+ exception_raise_continue_fast(dest_port, kmsg);
+ /*NOTREACHED*/
+ return MACH_MSG_SUCCESS;
+ } else if ((send_size <= receiver->ith_msize) &&
+ thread_handoff(self, mach_msg_continue, receiver)) {
+ assert(current_thread() == receiver);
+
+ if ((receiver->swap_func ==
+ (void (*)()) mach_msg_receive_continue) &&
+ ((receiver->ith_option & MACH_RCV_NOTIFY) == 0)) {
+ /*
+ * We can still use the optimized code.
+ */
+ } else {
+ counter(c_mach_msg_trap_block_slow++);
+ /*
+ * We are running as the receiver,
+ * but we can't use the optimized code.
+ * Finish send/receive processing.
+ */
+
+ dest_port->ip_msgcount++;
+ ip_unlock(dest_port);
+
+ ipc_thread_enqueue_macro(
+ &rcv_mqueue->imq_threads, self);
+ self->ith_state = MACH_RCV_IN_PROGRESS;
+ self->ith_msize = MACH_MSG_SIZE_MAX;
+ imq_unlock(rcv_mqueue);
+
+ ipc_thread_rmqueue_first_macro(
+ &dest_mqueue->imq_threads, receiver);
+ receiver->ith_state = MACH_MSG_SUCCESS;
+ receiver->ith_kmsg = kmsg;
+ receiver->ith_seqno = dest_port->ip_seqno++;
+ imq_unlock(dest_mqueue);
+
+ /*
+ * Call the receiver's continuation.
+ */
+
+ receiver->wait_result = THREAD_AWAKENED;
+ (*receiver->swap_func)();
+ /*NOTREACHED*/
+ return MACH_MSG_SUCCESS;
+ }
+ } else {
+ /*
+ * The receiver can't accept the message,
+ * or we can't switch to the receiver.
+ */
+
+ imq_unlock(dest_mqueue);
+ goto abort_send_receive;
+ }
+ counter(c_mach_msg_trap_block_fast++);
+
+ /*
+ * Safe to unlock dest_port now that we are
+ * committed to this path, because we hold
+ * dest_mqueue locked. We never bother changing
+ * dest_port->ip_msgcount.
+ */
+
+ ip_unlock(dest_port);
+
+ /*
+ * We need to finish preparing self for its
+ * time asleep in rcv_mqueue.
+ */
+
+ ipc_thread_enqueue_macro(&rcv_mqueue->imq_threads, self);
+ self->ith_state = MACH_RCV_IN_PROGRESS;
+ self->ith_msize = MACH_MSG_SIZE_MAX;
+ imq_unlock(rcv_mqueue);
+
+ /*
+ * Finish extracting receiver from dest_mqueue.
+ */
+
+ ipc_thread_rmqueue_first_macro(
+ &dest_mqueue->imq_threads, receiver);
+ kmsg->ikm_header.msgh_seqno = dest_port->ip_seqno++;
+ imq_unlock(dest_mqueue);
+
+ /*
+ * We don't have to do any post-dequeue processing of
+ * the message. We never incremented ip_msgcount, we
+ * know it has no msg-accepted request, and blocked
+ * senders aren't a worry because we found the port
+ * with a receiver waiting.
+ */
+
+ self = receiver;
+ space = self->task->itk_space;
+
+ msg = self->ith_msg;
+ rcv_size = self->ith_rcv_size;
+ rcv_object = self->ith_object;
+
+ /* inline ipc_object_release */
+ io_lock(rcv_object);
+ io_release(rcv_object);
+ io_check_unlock(rcv_object);
+ }
+
+ fast_copyout:
+ /*
+ * Nothing locked and no references held, except
+ * we have kmsg with msgh_seqno filled in. Must
+ * still check against rcv_size and do
+ * ipc_kmsg_copyout/ipc_kmsg_put.
+ */
+
+ assert((ipc_port_t) kmsg->ikm_header.msgh_remote_port
+ == dest_port);
+
+ reply_size = kmsg->ikm_header.msgh_size;
+ if (rcv_size < reply_size)
+ goto slow_copyout;
+
+ /* optimized ipc_kmsg_copyout/ipc_kmsg_copyout_header */
+
+ switch (kmsg->ikm_header.msgh_bits) {
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
+ MACH_MSG_TYPE_PORT_SEND_ONCE): {
+ ipc_port_t reply_port =
+ (ipc_port_t) kmsg->ikm_header.msgh_local_port;
+ mach_port_t dest_name, reply_name;
+
+ /* receiving a request message */
+
+ if (!IP_VALID(reply_port))
+ goto slow_copyout;
+
+ is_write_lock(space);
+ assert(space->is_active);
+
+ /*
+ * To do an atomic copyout, need simultaneous
+ * locks on both ports and the space. If
+ * dest_port == reply_port, and simple locking is
+ * enabled, then we will abort. Otherwise it's
+ * OK to unlock twice.
+ */
+
+ ip_lock(dest_port);
+ if (!ip_active(dest_port) ||
+ !ip_lock_try(reply_port))
+ goto abort_request_copyout;
+
+ if (!ip_active(reply_port)) {
+ ip_unlock(reply_port);
+ goto abort_request_copyout;
+ }
+
+ assert(reply_port->ip_sorights > 0);
+ ip_unlock(reply_port);
+
+ {
+ register ipc_entry_t table;
+ register ipc_entry_t entry;
+ register mach_port_index_t index;
+
+ /* optimized ipc_entry_get */
+
+ table = space->is_table;
+ index = table->ie_next;
+
+ if (index == 0)
+ goto abort_request_copyout;
+
+ entry = &table[index];
+ table->ie_next = entry->ie_next;
+ entry->ie_request = 0;
+
+ {
+ register mach_port_gen_t gen;
+
+ assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
+ gen = entry->ie_bits + IE_BITS_GEN_ONE;
+
+ reply_name = MACH_PORT_MAKE(index, gen);
+
+ /* optimized ipc_right_copyout */
+
+ entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1);
+ }
+
+ assert(MACH_PORT_VALID(reply_name));
+ entry->ie_object = (ipc_object_t) reply_port;
+ is_write_unlock(space);
+ }
+
+ /* optimized ipc_object_copyout_dest */
+
+ assert(dest_port->ip_srights > 0);
+ ip_release(dest_port);
+
+ if (dest_port->ip_receiver == space)
+ dest_name = dest_port->ip_receiver_name;
+ else
+ dest_name = MACH_PORT_NULL;
+
+ if ((--dest_port->ip_srights == 0) &&
+ (dest_port->ip_nsrequest != IP_NULL)) {
+ ipc_port_t nsrequest;
+ mach_port_mscount_t mscount;
+
+ /* a rather rare case */
+
+ nsrequest = dest_port->ip_nsrequest;
+ mscount = dest_port->ip_mscount;
+ dest_port->ip_nsrequest = IP_NULL;
+ ip_unlock(dest_port);
+
+ ipc_notify_no_senders(nsrequest, mscount);
+ } else
+ ip_unlock(dest_port);
+
+ kmsg->ikm_header.msgh_bits =
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
+ MACH_MSG_TYPE_PORT_SEND);
+ kmsg->ikm_header.msgh_remote_port = reply_name;
+ kmsg->ikm_header.msgh_local_port = dest_name;
+ goto fast_put;
+
+ abort_request_copyout:
+ ip_unlock(dest_port);
+ is_write_unlock(space);
+ goto slow_copyout;
+ }
+
+ case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
+ register mach_port_t dest_name;
+
+ /* receiving a reply message */
+
+ ip_lock(dest_port);
+ if (!ip_active(dest_port))
+ goto slow_copyout;
+
+ /* optimized ipc_object_copyout_dest */
+
+ assert(dest_port->ip_sorights > 0);
+
+ if (dest_port->ip_receiver == space) {
+ ip_release(dest_port);
+ dest_port->ip_sorights--;
+ dest_name = dest_port->ip_receiver_name;
+ ip_unlock(dest_port);
+ } else {
+ ip_unlock(dest_port);
+
+ ipc_notify_send_once(dest_port);
+ dest_name = MACH_PORT_NULL;
+ }
+
+ kmsg->ikm_header.msgh_bits =
+ MACH_MSGH_BITS(0,
+ MACH_MSG_TYPE_PORT_SEND_ONCE);
+ kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
+ kmsg->ikm_header.msgh_local_port = dest_name;
+ goto fast_put;
+ }
+
+ case MACH_MSGH_BITS_COMPLEX|
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
+ register mach_port_t dest_name;
+
+ /* receiving a complex reply message */
+
+ ip_lock(dest_port);
+ if (!ip_active(dest_port))
+ goto slow_copyout;
+
+ /* optimized ipc_object_copyout_dest */
+
+ assert(dest_port->ip_sorights > 0);
+
+ if (dest_port->ip_receiver == space) {
+ ip_release(dest_port);
+ dest_port->ip_sorights--;
+ dest_name = dest_port->ip_receiver_name;
+ ip_unlock(dest_port);
+ } else {
+ ip_unlock(dest_port);
+
+ ipc_notify_send_once(dest_port);
+ dest_name = MACH_PORT_NULL;
+ }
+
+ kmsg->ikm_header.msgh_bits =
+ MACH_MSGH_BITS_COMPLEX |
+ MACH_MSGH_BITS(0,
+ MACH_MSG_TYPE_PORT_SEND_ONCE);
+ kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
+ kmsg->ikm_header.msgh_local_port = dest_name;
+
+ mr = ipc_kmsg_copyout_body(
+ (vm_offset_t) (&kmsg->ikm_header + 1),
+ (vm_offset_t) &kmsg->ikm_header
+ + kmsg->ikm_header.msgh_size,
+ space,
+ current_map());
+
+ if (mr != MACH_MSG_SUCCESS) {
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ return mr | MACH_RCV_BODY_ERROR;
+ }
+ goto fast_put;
+ }
+
+ default:
+ goto slow_copyout;
+ }
+ /*NOTREACHED*/
+
+ fast_put:
+ /*
+ * We have the reply message data in kmsg,
+ * and the reply message size in reply_size.
+ * Just need to copy it out to the user and free kmsg.
+ * We must check ikm_cache after copyoutmsg.
+ */
+
+ ikm_check_initialized(kmsg, kmsg->ikm_size);
+
+ if ((kmsg->ikm_size != IKM_SAVED_KMSG_SIZE) ||
+ copyoutmsg((vm_offset_t) &kmsg->ikm_header, (vm_offset_t) msg,
+ reply_size) ||
+ (ikm_cache() != IKM_NULL))
+ goto slow_put;
+
+ ikm_cache() = kmsg;
+ thread_syscall_return(MACH_MSG_SUCCESS);
+ /*NOTREACHED*/
+ return MACH_MSG_SUCCESS; /* help for the compiler */
+
+ /*
+ * The slow path has a few non-register temporary
+ * variables used only for call-by-reference.
+ */
+
+ {
+ ipc_kmsg_t temp_kmsg;
+ mach_port_seqno_t temp_seqno;
+ ipc_object_t temp_rcv_object;
+ ipc_mqueue_t temp_rcv_mqueue;
+
+ slow_get:
+ /*
+ * No locks, references, or messages held.
+ * Still have to get the request, send it,
+ * receive reply, etc.
+ */
+
+ mr = ipc_kmsg_get(msg, send_size, &temp_kmsg);
+ if (mr != MACH_MSG_SUCCESS) {
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+ kmsg = temp_kmsg;
+
+ /* try to get back on optimized path */
+ goto fast_copyin;
+
+ slow_copyin:
+ /*
+ * We have the message data in kmsg, but
+ * we still need to copyin, send it,
+ * receive a reply, and do copyout.
+ */
+
+ mr = ipc_kmsg_copyin(kmsg, space, current_map(),
+ MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ ikm_free(kmsg);
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ /* try to get back on optimized path */
+
+ if (kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_CIRCULAR)
+ goto slow_send;
+
+ dest_port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
+ assert(IP_VALID(dest_port));
+
+ ip_lock(dest_port);
+ if (dest_port->ip_receiver == ipc_space_kernel) {
+ assert(ip_active(dest_port));
+ ip_unlock(dest_port);
+ goto kernel_send;
+ }
+
+ if (ip_active(dest_port) &&
+#if NORMA_IPC
+ (! IP_NORMA_IS_PROXY(dest_port)) &&
+#endif NORMA_IPC
+ ((dest_port->ip_msgcount < dest_port->ip_qlimit) ||
+ (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits) ==
+ MACH_MSG_TYPE_PORT_SEND_ONCE)))
+ {
+ /*
+ * Try an optimized ipc_mqueue_copyin.
+ * It will work if this is a request message.
+ */
+
+ register ipc_port_t reply_port;
+
+ reply_port = (ipc_port_t)
+ kmsg->ikm_header.msgh_local_port;
+ if (IP_VALID(reply_port)) {
+ if (ip_lock_try(reply_port)) {
+ if (ip_active(reply_port) &&
+ reply_port->ip_receiver == space &&
+ reply_port->ip_receiver_name == rcv_name &&
+ reply_port->ip_pset == IPS_NULL)
+ {
+ /* Grab a reference to the reply port. */
+ rcv_object = (ipc_object_t) reply_port;
+ io_reference(rcv_object);
+ rcv_mqueue = &reply_port->ip_messages;
+ imq_lock(rcv_mqueue);
+ io_unlock(rcv_object);
+ goto fast_send_receive;
+ }
+ ip_unlock(reply_port);
+ }
+ }
+ }
+
+ ip_unlock(dest_port);
+ goto slow_send;
+
+#if NORMA_IPC
+ norma_send:
+ /*
+ * Nothing is locked. We have acquired kmsg, but
+ * we still need to send it and receive a reply.
+ */
+
+ mr = norma_ipc_send(kmsg);
+ if (mr != MACH_MSG_SUCCESS) {
+ mr |= ipc_kmsg_copyout_pseudo(kmsg, space,
+ current_map());
+
+ assert(kmsg->ikm_marequest == IMAR_NULL);
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ goto slow_get_rcv_port;
+#endif NORMA_IPC
+
+ kernel_send:
+ /*
+ * Special case: send message to kernel services.
+ * The request message has been copied into the
+ * kmsg. Nothing is locked.
+ */
+
+ {
+ register ipc_port_t reply_port;
+
+ /*
+ * Perform the kernel function.
+ */
+
+ kmsg = ipc_kobject_server(kmsg);
+ if (kmsg == IKM_NULL) {
+ /*
+ * No reply. Take the
+ * slow receive path.
+ */
+ goto slow_get_rcv_port;
+ }
+
+ /*
+ * Check that:
+ * the reply port is alive
+ * we hold the receive right
+ * the name has not changed.
+ * the port is not in a set
+ * If any of these are not true,
+ * we cannot directly receive the reply
+ * message.
+ */
+ reply_port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
+ ip_lock(reply_port);
+
+ if ((!ip_active(reply_port)) ||
+ (reply_port->ip_receiver != space) ||
+ (reply_port->ip_receiver_name != rcv_name) ||
+ (reply_port->ip_pset != IPS_NULL))
+ {
+ ip_unlock(reply_port);
+ ipc_mqueue_send_always(kmsg);
+ goto slow_get_rcv_port;
+ }
+
+ rcv_mqueue = &reply_port->ip_messages;
+ imq_lock(rcv_mqueue);
+ /* keep port locked, and don`t change ref count yet */
+
+ /*
+ * If there are messages on the port
+ * or other threads waiting for a message,
+ * we cannot directly receive the reply.
+ */
+ if ((ipc_thread_queue_first(&rcv_mqueue->imq_threads)
+ != ITH_NULL) ||
+ (ipc_kmsg_queue_first(&rcv_mqueue->imq_messages)
+ != IKM_NULL))
+ {
+ imq_unlock(rcv_mqueue);
+ ip_unlock(reply_port);
+ ipc_mqueue_send_always(kmsg);
+ goto slow_get_rcv_port;
+ }
+
+ /*
+ * We can directly receive this reply.
+ * Since the kernel reply never blocks,
+ * it holds no message_accepted request.
+ * Since there were no messages queued
+ * on the reply port, there should be
+ * no threads blocked waiting to send.
+ */
+
+ assert(kmsg->ikm_marequest == IMAR_NULL);
+ assert(ipc_thread_queue_first(&reply_port->ip_blocked)
+ == ITH_NULL);
+
+ dest_port = reply_port;
+ kmsg->ikm_header.msgh_seqno = dest_port->ip_seqno++;
+ imq_unlock(rcv_mqueue);
+
+ /*
+ * inline ipc_object_release.
+ * Port is still locked.
+ * Reference count was not incremented.
+ */
+ ip_check_unlock(reply_port);
+
+ /* copy out the kernel reply */
+ goto fast_copyout;
+ }
+
+ slow_send:
+ /*
+ * Nothing is locked. We have acquired kmsg, but
+ * we still need to send it and receive a reply.
+ */
+
+ mr = ipc_mqueue_send(kmsg, MACH_MSG_OPTION_NONE,
+ MACH_MSG_TIMEOUT_NONE);
+ if (mr != MACH_MSG_SUCCESS) {
+ mr |= ipc_kmsg_copyout_pseudo(kmsg, space,
+ current_map());
+
+ assert(kmsg->ikm_marequest == IMAR_NULL);
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ slow_get_rcv_port:
+ /*
+ * We have sent the message. Copy in the receive port.
+ */
+ mr = ipc_mqueue_copyin(space, rcv_name,
+ &temp_rcv_mqueue, &temp_rcv_object);
+ if (mr != MACH_MSG_SUCCESS) {
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+ rcv_mqueue = temp_rcv_mqueue;
+ rcv_object = temp_rcv_object;
+ /* hold ref for rcv_object; rcv_mqueue is locked */
+
+ /*
+ slow_receive:
+ */
+ /*
+ * Now we have sent the request and copied in rcv_name,
+ * so rcv_mqueue is locked and hold ref for rcv_object.
+ * Just receive a reply and try to get back to fast path.
+ *
+ * ipc_mqueue_receive may not return, because if we block
+ * then our kernel stack may be discarded. So we save
+ * state here for mach_msg_continue to pick up.
+ */
+
+ self->ith_msg = msg;
+ self->ith_rcv_size = rcv_size;
+ self->ith_object = rcv_object;
+ self->ith_mqueue = rcv_mqueue;
+
+ mr = ipc_mqueue_receive(rcv_mqueue,
+ MACH_MSG_OPTION_NONE,
+ MACH_MSG_SIZE_MAX,
+ MACH_MSG_TIMEOUT_NONE,
+ FALSE, mach_msg_continue,
+ &temp_kmsg, &temp_seqno);
+ /* rcv_mqueue is unlocked */
+ ipc_object_release(rcv_object);
+ if (mr != MACH_MSG_SUCCESS) {
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ (kmsg = temp_kmsg)->ikm_header.msgh_seqno = temp_seqno;
+ dest_port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
+ goto fast_copyout;
+
+ slow_copyout:
+ /*
+ * Nothing locked and no references held, except
+ * we have kmsg with msgh_seqno filled in. Must
+ * still check against rcv_size and do
+ * ipc_kmsg_copyout/ipc_kmsg_put.
+ */
+
+ reply_size = kmsg->ikm_header.msgh_size;
+ if (rcv_size < reply_size) {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ thread_syscall_return(MACH_RCV_TOO_LARGE);
+ /*NOTREACHED*/
+ }
+
+ mr = ipc_kmsg_copyout(kmsg, space, current_map(),
+ MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ } else {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ }
+
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ /* try to get back on optimized path */
+
+ goto fast_put;
+
+ slow_put:
+ mr = ipc_kmsg_put(msg, kmsg, reply_size);
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+ } else if (option == MACH_SEND_MSG) {
+ ipc_space_t space = current_space();
+ vm_map_t map = current_map();
+ ipc_kmsg_t kmsg;
+
+ mr = ipc_kmsg_get(msg, send_size, &kmsg);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+
+ mr = ipc_kmsg_copyin(kmsg, space, map, MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ ikm_free(kmsg);
+ return mr;
+ }
+
+ mr = ipc_mqueue_send(kmsg, MACH_MSG_OPTION_NONE,
+ MACH_MSG_TIMEOUT_NONE);
+ if (mr != MACH_MSG_SUCCESS) {
+ mr |= ipc_kmsg_copyout_pseudo(kmsg, space, map);
+
+ assert(kmsg->ikm_marequest == IMAR_NULL);
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ }
+
+ return mr;
+ } else if (option == MACH_RCV_MSG) {
+ ipc_thread_t self = current_thread();
+ ipc_space_t space = current_space();
+ vm_map_t map = current_map();
+ ipc_object_t object;
+ ipc_mqueue_t mqueue;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+
+ mr = ipc_mqueue_copyin(space, rcv_name, &mqueue, &object);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+ /* hold ref for object; mqueue is locked */
+
+ /*
+ * ipc_mqueue_receive may not return, because if we block
+ * then our kernel stack may be discarded. So we save
+ * state here for mach_msg_continue to pick up.
+ */
+
+ self->ith_msg = msg;
+ self->ith_rcv_size = rcv_size;
+ self->ith_object = object;
+ self->ith_mqueue = mqueue;
+
+ mr = ipc_mqueue_receive(mqueue,
+ MACH_MSG_OPTION_NONE,
+ MACH_MSG_SIZE_MAX,
+ MACH_MSG_TIMEOUT_NONE,
+ FALSE, mach_msg_continue,
+ &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_object_release(object);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+
+ kmsg->ikm_header.msgh_seqno = seqno;
+ if (rcv_size < kmsg->ikm_header.msgh_size) {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ return MACH_RCV_TOO_LARGE;
+ }
+
+ mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ } else {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ }
+
+ return mr;
+ }
+
+ return ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
+ } else if (option == MACH_MSG_OPTION_NONE) {
+ /*
+ * We can measure the "null mach_msg_trap"
+ * (syscall entry and thread_syscall_return exit)
+ * with this path.
+ */
+
+ thread_syscall_return(MACH_MSG_SUCCESS);
+ /*NOTREACHED*/
+ }
+#endif /* CONTINUATIONS */
+
+ if (option & MACH_SEND_MSG) {
+ mr = mach_msg_send(msg, option, send_size,
+ time_out, notify);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+ }
+
+ if (option & MACH_RCV_MSG) {
+ mr = mach_msg_receive(msg, option, rcv_size, rcv_name,
+ time_out, notify);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+ }
+
+ return MACH_MSG_SUCCESS;
+}
+
+#ifdef CONTINUATIONS
+/*
+ * Routine: mach_msg_continue
+ * Purpose:
+ * Continue after blocking for a message.
+ * Conditions:
+ * Nothing locked. We are running on a new kernel stack,
+ * with the receive state saved in the thread. From here
+ * control goes back to user space.
+ */
+
+void
+mach_msg_continue()
+{
+ ipc_thread_t thread = current_thread();
+ task_t task = thread->task;
+ ipc_space_t space = task->itk_space;
+ vm_map_t map = task->map;
+ mach_msg_header_t *msg = thread->ith_msg;
+ mach_msg_size_t rcv_size = thread->ith_rcv_size;
+ ipc_object_t object = thread->ith_object;
+ ipc_mqueue_t mqueue = thread->ith_mqueue;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+ mach_msg_return_t mr;
+
+ mr = ipc_mqueue_receive(mqueue, MACH_MSG_OPTION_NONE,
+ MACH_MSG_SIZE_MAX, MACH_MSG_TIMEOUT_NONE,
+ TRUE, mach_msg_continue, &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_object_release(object);
+ if (mr != MACH_MSG_SUCCESS) {
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ kmsg->ikm_header.msgh_seqno = seqno;
+ if (kmsg->ikm_header.msgh_size > rcv_size) {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ thread_syscall_return(MACH_RCV_TOO_LARGE);
+ /*NOTREACHED*/
+ }
+
+ mr = ipc_kmsg_copyout(kmsg, space, map, MACH_PORT_NULL);
+ if (mr != MACH_MSG_SUCCESS) {
+ if ((mr &~ MACH_MSG_MASK) == MACH_RCV_BODY_ERROR) {
+ (void) ipc_kmsg_put(msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ } else {
+ ipc_kmsg_copyout_dest(kmsg, space);
+ (void) ipc_kmsg_put(msg, kmsg, sizeof *msg);
+ }
+
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+ }
+
+ mr = ipc_kmsg_put(msg, kmsg, kmsg->ikm_header.msgh_size);
+ thread_syscall_return(mr);
+ /*NOTREACHED*/
+}
+
+/*
+ * Routine: mach_msg_interrupt
+ * Purpose:
+ * Attempts to force a thread waiting at mach_msg_continue or
+ * mach_msg_receive_continue into a clean point. Returns TRUE
+ * if this was possible.
+ * Conditions:
+ * Nothing locked. The thread must NOT be runnable.
+ */
+
+boolean_t
+mach_msg_interrupt(thread)
+ thread_t thread;
+{
+ ipc_mqueue_t mqueue;
+
+ assert((thread->swap_func == (void (*)()) mach_msg_continue) ||
+ (thread->swap_func == (void (*)()) mach_msg_receive_continue));
+
+ mqueue = thread->ith_mqueue;
+ imq_lock(mqueue);
+ if (thread->ith_state != MACH_RCV_IN_PROGRESS) {
+ /*
+ * The thread is no longer waiting for a message.
+ * It may have a message sitting in ith_kmsg.
+ * We can't clean this up.
+ */
+
+ imq_unlock(mqueue);
+ return FALSE;
+ }
+ ipc_thread_rmqueue(&mqueue->imq_threads, thread);
+ imq_unlock(mqueue);
+
+ ipc_object_release(thread->ith_object);
+
+ thread_set_syscall_return(thread, MACH_RCV_INTERRUPTED);
+ thread->swap_func = thread_exception_return;
+ return TRUE;
+}
+#endif /* CONTINUATIONS */
+
+#if MACH_IPC_COMPAT
+
+/*
+ * Routine: msg_return_translate
+ * Purpose:
+ * Translate from new error code to old error code.
+ */
+
+msg_return_t
+msg_return_translate(mr)
+ mach_msg_return_t mr;
+{
+ switch (mr &~ MACH_MSG_MASK) {
+ case MACH_MSG_SUCCESS:
+ return 0; /* SEND_SUCCESS/RCV_SUCCESS/RPC_SUCCESS */
+
+ case MACH_SEND_NO_BUFFER:
+ case MACH_SEND_NO_NOTIFY:
+ printf("msg_return_translate: %x -> interrupted\n", mr);
+ return SEND_INTERRUPTED;
+
+ case MACH_SEND_MSG_TOO_SMALL:
+ return SEND_MSG_TOO_SMALL;
+ case MACH_SEND_INVALID_DATA:
+ case MACH_SEND_INVALID_MEMORY:
+ return SEND_INVALID_MEMORY;
+ case MACH_SEND_TIMED_OUT:
+ return SEND_TIMED_OUT;
+ case MACH_SEND_INTERRUPTED:
+ return SEND_INTERRUPTED;
+ case MACH_SEND_INVALID_DEST:
+ case MACH_SEND_INVALID_REPLY:
+ case MACH_SEND_INVALID_RIGHT:
+ case MACH_SEND_INVALID_TYPE:
+ return SEND_INVALID_PORT;
+ case MACH_SEND_WILL_NOTIFY:
+ return SEND_WILL_NOTIFY;
+ case MACH_SEND_NOTIFY_IN_PROGRESS:
+ return SEND_NOTIFY_IN_PROGRESS;
+
+ case MACH_RCV_INVALID_NAME:
+ case MACH_RCV_IN_SET:
+ case MACH_RCV_PORT_DIED:
+ return RCV_INVALID_PORT;
+ case MACH_RCV_TOO_LARGE:
+ return RCV_TOO_LARGE;
+ case MACH_RCV_TIMED_OUT:
+ return RCV_TIMED_OUT;
+ case MACH_RCV_INTERRUPTED:
+ return RCV_INTERRUPTED;
+ case MACH_RCV_PORT_CHANGED:
+ return RCV_PORT_CHANGE;
+ case MACH_RCV_INVALID_DATA:
+ return RCV_INVALID_MEMORY;
+
+ case MACH_SEND_IN_PROGRESS:
+ case MACH_SEND_INVALID_NOTIFY:
+ case MACH_SEND_INVALID_HEADER:
+ case MACH_RCV_IN_PROGRESS:
+ case MACH_RCV_INVALID_NOTIFY:
+ case MACH_RCV_HEADER_ERROR:
+ case MACH_RCV_BODY_ERROR:
+ default:
+#if MACH_ASSERT
+ assert(!"msg_return_translate");
+#else
+ panic("msg_return_translate");
+#endif
+ }
+}
+
+/*
+ * Routine: msg_send_trap [mach trap]
+ * Purpose:
+ * Send a message.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ */
+
+msg_return_t
+msg_send_trap(msg, option, send_size, time_out)
+ msg_header_t *msg;
+ msg_option_t option;
+ msg_size_t send_size;
+ msg_timeout_t time_out;
+{
+ ipc_space_t space = current_space();
+ vm_map_t map = current_map();
+ ipc_kmsg_t kmsg;
+ mach_msg_return_t mr;
+
+ send_size = (send_size + 3) & ~3; /* round up */
+
+ if (send_size > MSG_SIZE_MAX)
+ return SEND_MSG_TOO_LARGE;
+
+ mr = ipc_kmsg_get((mach_msg_header_t *) msg,
+ (mach_msg_size_t) send_size,
+ &kmsg);
+ if (mr != MACH_MSG_SUCCESS)
+ return msg_return_translate(mr);
+
+ mr = ipc_kmsg_copyin_compat(kmsg, space, map);
+ if (mr != MACH_MSG_SUCCESS) {
+ ikm_free(kmsg);
+ return msg_return_translate(mr);
+ }
+
+ if (option & SEND_NOTIFY) {
+ mr = ipc_mqueue_send(kmsg, MACH_SEND_TIMEOUT,
+ ((option & SEND_TIMEOUT) ?
+ (mach_msg_timeout_t) time_out :
+ MACH_MSG_TIMEOUT_NONE));
+ if (mr == MACH_SEND_TIMED_OUT) {
+ ipc_port_t dest = (ipc_port_t)
+ kmsg->ikm_header.msgh_remote_port;
+
+ mr = ipc_marequest_create(space, dest, MACH_PORT_NULL,
+ &kmsg->ikm_marequest);
+ if (mr == MACH_MSG_SUCCESS) {
+ ipc_mqueue_send_always(kmsg);
+ return SEND_WILL_NOTIFY;
+ }
+ }
+ } else
+ mr = ipc_mqueue_send(kmsg,
+ ((option & SEND_TIMEOUT) ?
+ MACH_SEND_TIMEOUT :
+ MACH_MSG_OPTION_NONE),
+ (mach_msg_timeout_t) time_out);
+
+ if (mr != MACH_MSG_SUCCESS)
+ ipc_kmsg_destroy(kmsg);
+
+ return msg_return_translate(mr);
+}
+
+/*
+ * Routine: msg_receive_trap [mach trap]
+ * Purpose:
+ * Receive a message.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ */
+
+msg_return_t
+msg_receive_trap(msg, option, rcv_size, rcv_name, time_out)
+ msg_header_t *msg;
+ msg_option_t option;
+ msg_size_t rcv_size;
+ port_name_t rcv_name;
+ msg_timeout_t time_out;
+{
+ ipc_thread_t self;
+ ipc_space_t space = current_space();
+ vm_map_t map = current_map();
+ ipc_object_t object;
+ ipc_mqueue_t mqueue;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+ mach_msg_return_t mr;
+
+ mr = ipc_mqueue_copyin(space, (mach_port_t) rcv_name,
+ &mqueue, &object);
+ if (mr != MACH_MSG_SUCCESS)
+ return msg_return_translate(mr);
+ /* hold ref for object; mqueue is locked */
+
+#ifdef CONTINUATIONS
+ /*
+ * ipc_mqueue_receive may not return, because if we block
+ * then our kernel stack may be discarded. So we save
+ * state here for msg_receive_continue to pick up.
+ */
+
+ self = current_thread();
+ self->ith_msg = (mach_msg_header_t *) msg;
+ self->ith_option = (mach_msg_option_t) option;
+ self->ith_rcv_size = (mach_msg_size_t) rcv_size;
+ self->ith_timeout = (mach_msg_timeout_t) time_out;
+ self->ith_object = object;
+ self->ith_mqueue = mqueue;
+#endif /* CONTINUATIONS */
+
+ mr = ipc_mqueue_receive(mqueue,
+ (option & RCV_TIMEOUT) ?
+ MACH_RCV_TIMEOUT : MACH_MSG_OPTION_NONE,
+ (mach_msg_size_t) rcv_size,
+ (mach_msg_timeout_t) time_out,
+ FALSE, msg_receive_continue,
+ &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_object_release(object);
+ if (mr != MACH_MSG_SUCCESS) {
+ if (mr == MACH_RCV_TOO_LARGE) {
+ msg_size_t real_size =
+ (msg_size_t) (mach_msg_size_t) kmsg;
+
+ assert(real_size > rcv_size);
+
+ (void) copyout((vm_offset_t) &real_size,
+ (vm_offset_t) &msg->msg_size,
+ sizeof(msg_size_t));
+ }
+
+ return msg_return_translate(mr);
+ }
+
+ assert(kmsg->ikm_header.msgh_size <= (mach_msg_size_t) rcv_size);
+
+ mr = ipc_kmsg_copyout_compat(kmsg, space, map);
+ assert(mr == MACH_MSG_SUCCESS);
+
+ mr = ipc_kmsg_put((mach_msg_header_t *) msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ return msg_return_translate(mr);
+}
+
+/*
+ * Routine: msg_rpc_trap [mach trap]
+ * Purpose:
+ * Send and receive a message.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ */
+
+msg_return_t
+msg_rpc_trap(msg, option, send_size, rcv_size, send_timeout, rcv_timeout)
+ msg_header_t *msg;
+ msg_option_t option;
+ msg_size_t send_size;
+ msg_size_t rcv_size;
+ msg_timeout_t send_timeout;
+ msg_timeout_t rcv_timeout;
+{
+ ipc_thread_t self;
+ ipc_space_t space = current_space();
+ vm_map_t map = current_map();
+ ipc_port_t reply;
+ ipc_pset_t pset;
+ ipc_mqueue_t mqueue;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+ mach_msg_return_t mr;
+
+ /*
+ * Instead of using msg_send_trap and msg_receive_trap,
+ * we implement msg_rpc_trap directly. The difference
+ * is how the reply port is handled. Instead of using
+ * ipc_mqueue_copyin, we save a reference for the reply
+ * port carried in the sent message. For example,
+ * consider a rename kernel call which changes the name
+ * of the call's own reply port. This is the behaviour
+ * of the Mach 2.5 msg_rpc_trap.
+ */
+
+ send_size = (send_size + 3) & ~3; /* round up */
+
+ if (send_size > MSG_SIZE_MAX)
+ return SEND_MSG_TOO_LARGE;
+
+ mr = ipc_kmsg_get((mach_msg_header_t *) msg,
+ (mach_msg_size_t) send_size,
+ &kmsg);
+ if (mr != MACH_MSG_SUCCESS)
+ return msg_return_translate(mr);
+
+ mr = ipc_kmsg_copyin_compat(kmsg, space, map);
+ if (mr != MACH_MSG_SUCCESS) {
+ ikm_free(kmsg);
+ return msg_return_translate(mr);
+ }
+
+ reply = (ipc_port_t) kmsg->ikm_header.msgh_local_port;
+ if (IP_VALID(reply))
+ ipc_port_reference(reply);
+
+ if (option & SEND_NOTIFY) {
+ mr = ipc_mqueue_send(kmsg, MACH_SEND_TIMEOUT,
+ ((option & SEND_TIMEOUT) ?
+ (mach_msg_timeout_t) send_timeout :
+ MACH_MSG_TIMEOUT_NONE));
+ if (mr == MACH_SEND_TIMED_OUT) {
+ ipc_port_t dest = (ipc_port_t)
+ kmsg->ikm_header.msgh_remote_port;
+
+ mr = ipc_marequest_create(space, dest, MACH_PORT_NULL,
+ &kmsg->ikm_marequest);
+ if (mr == MACH_MSG_SUCCESS) {
+ ipc_mqueue_send_always(kmsg);
+ if (IP_VALID(reply))
+ ipc_port_release(reply);
+ return SEND_WILL_NOTIFY;
+ }
+ }
+ } else
+ mr = ipc_mqueue_send(kmsg,
+ ((option & SEND_TIMEOUT) ?
+ MACH_SEND_TIMEOUT :
+ MACH_MSG_OPTION_NONE),
+ (mach_msg_timeout_t) send_timeout);
+
+ if (mr != MACH_MSG_SUCCESS) {
+ ipc_kmsg_destroy(kmsg);
+ if (IP_VALID(reply))
+ ipc_port_release(reply);
+ return msg_return_translate(mr);
+ }
+
+ if (!IP_VALID(reply))
+ return RCV_INVALID_PORT;
+
+ ip_lock(reply);
+ if (reply->ip_receiver != space) {
+ ip_release(reply);
+ ip_check_unlock(reply);
+ return RCV_INVALID_PORT;
+ }
+
+ assert(ip_active(reply));
+ pset = reply->ip_pset;
+
+ if (pset != IPS_NULL) {
+ ips_lock(pset);
+ if (ips_active(pset)) {
+ ips_unlock(pset);
+ ip_release(reply);
+ ip_unlock(reply);
+ return RCV_INVALID_PORT;
+ }
+
+ ipc_pset_remove(pset, reply);
+ ips_check_unlock(pset);
+ assert(reply->ip_pset == IPS_NULL);
+ }
+
+ mqueue = &reply->ip_messages;
+ imq_lock(mqueue);
+ ip_unlock(reply);
+
+#ifdef CONTINUATIONS
+ /*
+ * ipc_mqueue_receive may not return, because if we block
+ * then our kernel stack may be discarded. So we save
+ * state here for msg_receive_continue to pick up.
+ */
+
+ self = current_thread();
+ self->ith_msg = (mach_msg_header_t *) msg;
+ self->ith_option = (mach_msg_option_t) option;
+ self->ith_rcv_size = (mach_msg_size_t) rcv_size;
+ self->ith_timeout = (mach_msg_timeout_t) rcv_timeout;
+ self->ith_object = (ipc_object_t) reply;
+ self->ith_mqueue = mqueue;
+#endif /* CONTINUATIONS */
+
+ mr = ipc_mqueue_receive(mqueue,
+ (option & RCV_TIMEOUT) ?
+ MACH_RCV_TIMEOUT : MACH_MSG_OPTION_NONE,
+ (mach_msg_size_t) rcv_size,
+ (mach_msg_timeout_t) rcv_timeout,
+ FALSE, msg_receive_continue,
+ &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_port_release(reply);
+ if (mr != MACH_MSG_SUCCESS) {
+ if (mr == MACH_RCV_TOO_LARGE) {
+ msg_size_t real_size =
+ (msg_size_t) (mach_msg_size_t) kmsg;
+
+ assert(real_size > rcv_size);
+
+ (void) copyout((vm_offset_t) &real_size,
+ (vm_offset_t) &msg->msg_size,
+ sizeof(msg_size_t));
+ }
+
+ return msg_return_translate(mr);
+ }
+
+ assert(kmsg->ikm_header.msgh_size <= (mach_msg_size_t) rcv_size);
+
+ mr = ipc_kmsg_copyout_compat(kmsg, space, map);
+ assert(mr == MACH_MSG_SUCCESS);
+
+ mr = ipc_kmsg_put((mach_msg_header_t *) msg,
+ kmsg, kmsg->ikm_header.msgh_size);
+ return msg_return_translate(mr);
+}
+
+#ifdef CONTINUATIONS
+/*
+ * Routine: msg_receive_continue
+ * Purpose:
+ * Continue after blocking for a message.
+ * Conditions:
+ * Nothing locked. We are running on a new kernel stack,
+ * with the receive state saved in the thread. From here
+ * control goes back to user space.
+ */
+
+void
+msg_receive_continue()
+{
+ ipc_thread_t self = current_thread();
+ msg_header_t *msg = (msg_header_t *) self->ith_msg;
+ msg_option_t option = (msg_option_t) self->ith_option;
+ msg_size_t rcv_size = (msg_size_t) self->ith_rcv_size;
+ msg_timeout_t time_out = (msg_timeout_t) self->ith_timeout;
+ ipc_object_t object = self->ith_object;
+ ipc_mqueue_t mqueue = self->ith_mqueue;
+ ipc_kmsg_t kmsg;
+ mach_port_seqno_t seqno;
+ mach_msg_return_t mr;
+
+ mr = ipc_mqueue_receive(mqueue,
+ (option & RCV_TIMEOUT) ?
+ MACH_RCV_TIMEOUT : MACH_MSG_OPTION_NONE,
+ (mach_msg_size_t) rcv_size,
+ (mach_msg_timeout_t) time_out,
+ TRUE, msg_receive_continue,
+ &kmsg, &seqno);
+ /* mqueue is unlocked */
+ ipc_object_release(object);
+ if (mr != MACH_MSG_SUCCESS) {
+ if (mr == MACH_RCV_TOO_LARGE) {
+ msg_size_t real_size =
+ (msg_size_t) (mach_msg_size_t) kmsg;
+
+ assert(real_size > rcv_size);
+
+ (void) copyout((vm_offset_t) &real_size,
+ (vm_offset_t) &msg->msg_size,
+ sizeof(msg_size_t));
+ }
+
+ thread_syscall_return(msg_return_translate(mr));
+ /*NOTREACHED*/
+ }
+
+ assert(kmsg->ikm_header.msgh_size <= (mach_msg_size_t) rcv_size);
+
+ mr = ipc_kmsg_copyout_compat(kmsg, current_space(), current_map());
+ assert(mr == MACH_MSG_SUCCESS);
+
+ mr = ipc_kmsg_put((mach_msg_header_t *) msg, kmsg,
+ kmsg->ikm_header.msgh_size);
+ thread_syscall_return(msg_return_translate(mr));
+ /*NOTREACHED*/
+}
+#endif /* CONTINUATIONS */
+
+#endif MACH_IPC_COMPAT
diff --git a/ipc/mach_msg.h b/ipc/mach_msg.h
new file mode 100644
index 0000000..55c3526
--- /dev/null
+++ b/ipc/mach_msg.h
@@ -0,0 +1,68 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/mach_msg.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Declarations of internal messaging primitives.
+ */
+
+#ifndef _IPC_MACH_MSG_H_
+#define _IPC_MACH_MSG_H_
+
+#include <mach_ipc_compat.h>
+
+#include <mach/boolean.h>
+#include <mach/message.h>
+
+extern mach_msg_return_t
+mach_msg_send(/* mach_msg_header_t *, mach_msg_option_t,
+ mach_msg_size_t, mach_msg_timeout_t, mach_port_t */);
+
+extern mach_msg_return_t
+mach_msg_receive(/* mach_msg_header_t *, mach_msg_option_t,
+ mach_msg_size_t, mach_port_t,
+ mach_msg_timeout_t, mach_port_t */);
+
+extern void
+mach_msg_receive_continue();
+
+extern void
+mach_msg_continue();
+
+extern boolean_t
+mach_msg_interrupt(/* thread_t */);
+
+#if MACH_IPC_COMPAT
+
+extern void
+msg_receive_continue();
+
+#endif MACH_IPC_COMPAT
+#endif _IPC_MACH_MSG_H_
diff --git a/ipc/mach_port.c b/ipc/mach_port.c
new file mode 100644
index 0000000..b26c96b
--- /dev/null
+++ b/ipc/mach_port.c
@@ -0,0 +1,2505 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/mach_port.c
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Exported kernel calls. See mach/mach_port.defs.
+ */
+
+#include <mach_ipc_compat.h>
+
+#include <mach/port.h>
+#include <mach/kern_return.h>
+#include <mach/notify.h>
+#include <mach/mach_param.h>
+#include <mach/vm_param.h>
+#include <mach/vm_prot.h>
+#ifdef MIGRATING_THREADS
+#include <mach/rpc.h>
+#include <kern/task.h>
+#include <kern/act.h>
+#endif /* MIGRATING_THREADS */
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_user.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_notify.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_right.h>
+
+
+
+/*
+ * Routine: mach_port_names_helper
+ * Purpose:
+ * A helper function for mach_port_names.
+ */
+
+void
+mach_port_names_helper(
+ ipc_port_timestamp_t timestamp,
+ ipc_entry_t entry,
+ mach_port_t name,
+ mach_port_t *names,
+ mach_port_type_t *types,
+ ipc_entry_num_t *actualp)
+{
+ ipc_entry_bits_t bits = entry->ie_bits;
+ ipc_port_request_index_t request = entry->ie_request;
+ mach_port_type_t type;
+ ipc_entry_num_t actual;
+
+ if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
+ ipc_port_t port;
+ boolean_t died;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ /*
+ * The timestamp serializes mach_port_names
+ * with ipc_port_destroy. If the port died,
+ * but after mach_port_names started, pretend
+ * that it isn't dead.
+ */
+
+ ip_lock(port);
+ died = (!ip_active(port) &&
+ IP_TIMESTAMP_ORDER(port->ip_timestamp, timestamp));
+ ip_unlock(port);
+
+ if (died) {
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ return;
+#endif MACH_IPC_COMPAT
+
+ /* pretend this is a dead-name entry */
+
+ bits &= ~(IE_BITS_TYPE_MASK|IE_BITS_MAREQUEST);
+ bits |= MACH_PORT_TYPE_DEAD_NAME;
+ if (request != 0)
+ bits++;
+ request = 0;
+ }
+ }
+
+ type = IE_BITS_TYPE(bits);
+#if MACH_IPC_COMPAT
+ if (bits & IE_BITS_COMPAT)
+ type |= MACH_PORT_TYPE_COMPAT;
+ else
+#endif MACH_IPC_COMPAT
+ if (request != 0)
+ type |= MACH_PORT_TYPE_DNREQUEST;
+ if (bits & IE_BITS_MAREQUEST)
+ type |= MACH_PORT_TYPE_MAREQUEST;
+
+ actual = *actualp;
+ names[actual] = name;
+ types[actual] = type;
+ *actualp = actual+1;
+}
+
+/*
+ * Routine: mach_port_names [kernel call]
+ * Purpose:
+ * Retrieves a list of the rights present in the space,
+ * along with type information. (Same as returned
+ * by mach_port_type.) The names are returned in
+ * no particular order, but they (and the type info)
+ * are an accurate snapshot of the space.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Arrays of names and types returned.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+mach_port_names(
+ ipc_space_t space,
+ mach_port_t **namesp,
+ mach_msg_type_number_t *namesCnt,
+ mach_port_type_t **typesp,
+ mach_msg_type_number_t *typesCnt)
+{
+ ipc_tree_entry_t tentry;
+ ipc_entry_t table;
+ ipc_entry_num_t tsize;
+ mach_port_index_t index;
+ ipc_entry_num_t actual; /* this many names */
+ ipc_port_timestamp_t timestamp; /* logical time of this operation */
+ mach_port_t *names;
+ mach_port_type_t *types;
+ kern_return_t kr;
+
+ vm_size_t size; /* size of allocated memory */
+ vm_offset_t addr1; /* allocated memory, for names */
+ vm_offset_t addr2; /* allocated memory, for types */
+ vm_map_copy_t memory1; /* copied-in memory, for names */
+ vm_map_copy_t memory2; /* copied-in memory, for types */
+
+ /* safe simplifying assumption */
+ assert_static(sizeof(mach_port_t) == sizeof(mach_port_type_t));
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ size = 0;
+
+ for (;;) {
+ ipc_entry_num_t bound;
+ vm_size_t size_needed;
+
+ is_read_lock(space);
+ if (!space->is_active) {
+ is_read_unlock(space);
+ if (size != 0) {
+ kmem_free(ipc_kernel_map, addr1, size);
+ kmem_free(ipc_kernel_map, addr2, size);
+ }
+ return KERN_INVALID_TASK;
+ }
+
+ /* upper bound on number of names in the space */
+
+ bound = space->is_table_size + space->is_tree_total;
+ size_needed = round_page(bound * sizeof(mach_port_t));
+
+ if (size_needed <= size)
+ break;
+
+ is_read_unlock(space);
+
+ if (size != 0) {
+ kmem_free(ipc_kernel_map, addr1, size);
+ kmem_free(ipc_kernel_map, addr2, size);
+ }
+ size = size_needed;
+
+ kr = vm_allocate(ipc_kernel_map, &addr1, size, TRUE);
+ if (kr != KERN_SUCCESS)
+ return KERN_RESOURCE_SHORTAGE;
+
+ kr = vm_allocate(ipc_kernel_map, &addr2, size, TRUE);
+ if (kr != KERN_SUCCESS) {
+ kmem_free(ipc_kernel_map, addr1, size);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ /* can't fault while we hold locks */
+
+ kr = vm_map_pageable(ipc_kernel_map, addr1, addr1 + size,
+ VM_PROT_READ|VM_PROT_WRITE);
+ assert(kr == KERN_SUCCESS);
+
+ kr = vm_map_pageable(ipc_kernel_map, addr2, addr2 + size,
+ VM_PROT_READ|VM_PROT_WRITE);
+ assert(kr == KERN_SUCCESS);
+ }
+ /* space is read-locked and active */
+
+ names = (mach_port_t *) addr1;
+ types = (mach_port_type_t *) addr2;
+ actual = 0;
+
+ timestamp = ipc_port_timestamp();
+
+ table = space->is_table;
+ tsize = space->is_table_size;
+
+ for (index = 0; index < tsize; index++) {
+ ipc_entry_t entry = &table[index];
+ ipc_entry_bits_t bits = entry->ie_bits;
+
+ if (IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE) {
+ mach_port_t name = MACH_PORT_MAKEB(index, bits);
+
+ mach_port_names_helper(timestamp, entry, name,
+ names, types, &actual);
+ }
+ }
+
+ for (tentry = ipc_splay_traverse_start(&space->is_tree);
+ tentry != ITE_NULL;
+ tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) {
+ ipc_entry_t entry = &tentry->ite_entry;
+ mach_port_t name = tentry->ite_name;
+
+ assert(IE_BITS_TYPE(tentry->ite_bits) != MACH_PORT_TYPE_NONE);
+
+ mach_port_names_helper(timestamp, entry, name,
+ names, types, &actual);
+ }
+ ipc_splay_traverse_finish(&space->is_tree);
+ is_read_unlock(space);
+
+ if (actual == 0) {
+ memory1 = VM_MAP_COPY_NULL;
+ memory2 = VM_MAP_COPY_NULL;
+
+ if (size != 0) {
+ kmem_free(ipc_kernel_map, addr1, size);
+ kmem_free(ipc_kernel_map, addr2, size);
+ }
+ } else {
+ vm_size_t size_used;
+
+ size_used = round_page(actual * sizeof(mach_port_t));
+
+ /*
+ * Make used memory pageable and get it into
+ * copied-in form. Free any unused memory.
+ */
+
+ kr = vm_map_pageable(ipc_kernel_map,
+ addr1, addr1 + size_used,
+ VM_PROT_NONE);
+ assert(kr == KERN_SUCCESS);
+
+ kr = vm_map_pageable(ipc_kernel_map,
+ addr2, addr2 + size_used,
+ VM_PROT_NONE);
+ assert(kr == KERN_SUCCESS);
+
+ kr = vm_map_copyin(ipc_kernel_map, addr1, size_used,
+ TRUE, &memory1);
+ assert(kr == KERN_SUCCESS);
+
+ kr = vm_map_copyin(ipc_kernel_map, addr2, size_used,
+ TRUE, &memory2);
+ assert(kr == KERN_SUCCESS);
+
+ if (size_used != size) {
+ kmem_free(ipc_kernel_map,
+ addr1 + size_used, size - size_used);
+ kmem_free(ipc_kernel_map,
+ addr2 + size_used, size - size_used);
+ }
+ }
+
+ *namesp = (mach_port_t *) memory1;
+ *namesCnt = actual;
+ *typesp = (mach_port_type_t *) memory2;
+ *typesCnt = actual;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_type [kernel call]
+ * Purpose:
+ * Retrieves the type of a right in the space.
+ * The type is a bitwise combination of one or more
+ * of the following type bits:
+ * MACH_PORT_TYPE_SEND
+ * MACH_PORT_TYPE_RECEIVE
+ * MACH_PORT_TYPE_SEND_ONCE
+ * MACH_PORT_TYPE_PORT_SET
+ * MACH_PORT_TYPE_DEAD_NAME
+ * In addition, the following pseudo-type bits may be present:
+ * MACH_PORT_TYPE_DNREQUEST
+ * A dead-name notification is requested.
+ * MACH_PORT_TYPE_MAREQUEST
+ * The send/receive right is blocked;
+ * a msg-accepted notification is outstanding.
+ * MACH_PORT_TYPE_COMPAT
+ * This is a compatibility-mode right;
+ * when the port dies, it will disappear
+ * instead of turning into a dead-name.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Type is returned.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ */
+
+kern_return_t
+mach_port_type(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_port_type_t *typep)
+{
+ mach_port_urefs_t urefs;
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ kr = ipc_right_info(space, name, entry, typep, &urefs);
+ if (kr == KERN_SUCCESS)
+ is_write_unlock(space);
+ /* space is unlocked */
+ return kr;
+}
+
+/*
+ * Routine: mach_port_rename [kernel call]
+ * Purpose:
+ * Changes the name denoting a right,
+ * from oname to nname.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS The right is renamed.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The oname doesn't denote a right.
+ * KERN_INVALID_VALUE The nname isn't a legal name.
+ * KERN_NAME_EXISTS The nname already denotes a right.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+mach_port_rename(
+ ipc_space_t space,
+ mach_port_t oname,
+ mach_port_t nname)
+{
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (!MACH_PORT_VALID(nname))
+ return KERN_INVALID_VALUE;
+
+ return ipc_object_rename(space, oname, nname);
+}
+
+/*
+ * Routine: mach_port_allocate_name [kernel call]
+ * Purpose:
+ * Allocates a right in a space, using a specific name
+ * for the new right. Possible rights:
+ * MACH_PORT_RIGHT_RECEIVE
+ * MACH_PORT_RIGHT_PORT_SET
+ * MACH_PORT_RIGHT_DEAD_NAME
+ *
+ * A new port (allocated with MACH_PORT_RIGHT_RECEIVE)
+ * has no extant send or send-once rights and no queued
+ * messages. Its queue limit is MACH_PORT_QLIMIT_DEFAULT
+ * and its make-send count is 0. It is not a member of
+ * a port set. It has no registered no-senders or
+ * port-destroyed notification requests.
+ *
+ * A new port set has no members.
+ *
+ * A new dead name has one user reference.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS The right is allocated.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_VALUE The name isn't a legal name.
+ * KERN_INVALID_VALUE "right" isn't a legal kind of right.
+ * KERN_NAME_EXISTS The name already denotes a right.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+mach_port_allocate_name(space, right, name)
+ ipc_space_t space;
+ mach_port_right_t right;
+ mach_port_t name;
+{
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (!MACH_PORT_VALID(name))
+ return KERN_INVALID_VALUE;
+
+ switch (right) {
+ case MACH_PORT_RIGHT_RECEIVE: {
+ ipc_port_t port;
+
+ kr = ipc_port_alloc_name(space, name, &port);
+ if (kr == KERN_SUCCESS)
+ ip_unlock(port);
+ break;
+ }
+
+ case MACH_PORT_RIGHT_PORT_SET: {
+ ipc_pset_t pset;
+
+ kr = ipc_pset_alloc_name(space, name, &pset);
+ if (kr == KERN_SUCCESS)
+ ips_unlock(pset);
+ break;
+ }
+
+ case MACH_PORT_RIGHT_DEAD_NAME:
+ kr = ipc_object_alloc_dead_name(space, name);
+ break;
+
+ default:
+ kr = KERN_INVALID_VALUE;
+ break;
+ }
+
+ return kr;
+}
+
+/*
+ * Routine: mach_port_allocate [kernel call]
+ * Purpose:
+ * Allocates a right in a space. Like mach_port_allocate_name,
+ * except that the implementation picks a name for the right.
+ * The name may be any legal name in the space that doesn't
+ * currently denote a right.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS The right is allocated.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_VALUE "right" isn't a legal kind of right.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ * KERN_NO_SPACE No room in space for another right.
+ */
+
+kern_return_t
+mach_port_allocate(space, right, namep)
+ ipc_space_t space;
+ mach_port_right_t right;
+ mach_port_t *namep;
+{
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ switch (right) {
+ case MACH_PORT_RIGHT_RECEIVE: {
+ ipc_port_t port;
+
+ kr = ipc_port_alloc(space, namep, &port);
+ if (kr == KERN_SUCCESS)
+ ip_unlock(port);
+ break;
+ }
+
+ case MACH_PORT_RIGHT_PORT_SET: {
+ ipc_pset_t pset;
+
+ kr = ipc_pset_alloc(space, namep, &pset);
+ if (kr == KERN_SUCCESS)
+ ips_unlock(pset);
+ break;
+ }
+
+ case MACH_PORT_RIGHT_DEAD_NAME:
+ kr = ipc_object_alloc_dead(space, namep);
+ break;
+
+ default:
+ kr = KERN_INVALID_VALUE;
+ break;
+ }
+
+ return (kr);
+}
+
+/*
+ * Routine: mach_port_destroy [kernel call]
+ * Purpose:
+ * Cleans up and destroys all rights denoted by a name
+ * in a space. The destruction of a receive right
+ * destroys the port, unless a port-destroyed request
+ * has been made for it; the destruction of a port-set right
+ * destroys the port set.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS The name is destroyed.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ */
+
+kern_return_t
+mach_port_destroy(
+ ipc_space_t space,
+ mach_port_t name)
+{
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ kr = ipc_right_destroy(space, name, entry); /* unlocks space */
+ return kr;
+}
+
+/*
+ * Routine: mach_port_deallocate [kernel call]
+ * Purpose:
+ * Deallocates a user reference from a send right,
+ * send-once right, or a dead-name right. May
+ * deallocate the right, if this is the last uref,
+ * and destroy the name, if it doesn't denote
+ * other rights.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS The uref is deallocated.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT The right isn't correct.
+ */
+
+kern_return_t
+mach_port_deallocate(
+ ipc_space_t space,
+ mach_port_t name)
+{
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked */
+
+ kr = ipc_right_dealloc(space, name, entry); /* unlocks space */
+ return kr;
+}
+
+/*
+ * Routine: mach_port_get_refs [kernel call]
+ * Purpose:
+ * Retrieves the number of user references held by a right.
+ * Receive rights, port-set rights, and send-once rights
+ * always have one user reference. Returns zero if the
+ * name denotes a right, but not the queried right.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Number of urefs returned.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_VALUE "right" isn't a legal value.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ */
+
+kern_return_t
+mach_port_get_refs(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_port_right_t right,
+ mach_port_urefs_t *urefsp)
+{
+ mach_port_type_t type;
+ mach_port_urefs_t urefs;
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (right >= MACH_PORT_RIGHT_NUMBER)
+ return KERN_INVALID_VALUE;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ kr = ipc_right_info(space, name, entry, &type, &urefs); /* unlocks */
+ if (kr != KERN_SUCCESS)
+ return kr; /* space is unlocked */
+ is_write_unlock(space);
+
+ if (type & MACH_PORT_TYPE(right))
+ switch (right) {
+ case MACH_PORT_RIGHT_SEND_ONCE:
+ assert(urefs == 1);
+ /* fall-through */
+
+ case MACH_PORT_RIGHT_PORT_SET:
+ case MACH_PORT_RIGHT_RECEIVE:
+ *urefsp = 1;
+ break;
+
+ case MACH_PORT_RIGHT_DEAD_NAME:
+ case MACH_PORT_RIGHT_SEND:
+ assert(urefs > 0);
+ *urefsp = urefs;
+ break;
+
+ default:
+ panic("mach_port_get_refs: strange rights");
+ }
+ else
+ *urefsp = 0;
+
+ return kr;
+}
+
+/*
+ * Routine: mach_port_mod_refs
+ * Purpose:
+ * Modifies the number of user references held by a right.
+ * The resulting number of user references must be non-negative.
+ * If it is zero, the right is deallocated. If the name
+ * doesn't denote other rights, it is destroyed.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Modified number of urefs.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_VALUE "right" isn't a legal value.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote specified right.
+ * KERN_INVALID_VALUE Impossible modification to urefs.
+ * KERN_UREFS_OVERFLOW Urefs would overflow.
+ */
+
+kern_return_t
+mach_port_mod_refs(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_port_right_t right,
+ mach_port_delta_t delta)
+{
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (right >= MACH_PORT_RIGHT_NUMBER)
+ return KERN_INVALID_VALUE;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ kr = ipc_right_delta(space, name, entry, right, delta); /* unlocks */
+ return kr;
+}
+
+/*
+ * Routine: old_mach_port_get_receive_status [kernel call]
+ * Purpose:
+ * Compatibility for code written before sequence numbers.
+ * Retrieves mucho info about a receive right.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved status.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote receive rights.
+ */
+
+kern_return_t
+old_mach_port_get_receive_status(space, name, statusp)
+ ipc_space_t space;
+ mach_port_t name;
+ old_mach_port_status_t *statusp;
+{
+ mach_port_status_t status;
+ kern_return_t kr;
+
+ kr = mach_port_get_receive_status(space, name, &status);
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ statusp->mps_pset = status.mps_pset;
+ statusp->mps_mscount = status.mps_mscount;
+ statusp->mps_qlimit = status.mps_qlimit;
+ statusp->mps_msgcount = status.mps_msgcount;
+ statusp->mps_sorights = status.mps_sorights;
+ statusp->mps_srights = status.mps_srights;
+ statusp->mps_pdrequest = status.mps_pdrequest;
+ statusp->mps_nsrequest = status.mps_nsrequest;
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_set_qlimit [kernel call]
+ * Purpose:
+ * Changes a receive right's queue limit.
+ * The new queue limit must be between 0 and
+ * MACH_PORT_QLIMIT_MAX, inclusive.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Set queue limit.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote receive rights.
+ * KERN_INVALID_VALUE Illegal queue limit.
+ */
+
+kern_return_t
+mach_port_set_qlimit(space, name, qlimit)
+ ipc_space_t space;
+ mach_port_t name;
+ mach_port_msgcount_t qlimit;
+{
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (qlimit > MACH_PORT_QLIMIT_MAX)
+ return KERN_INVALID_VALUE;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ ipc_port_set_qlimit(port, qlimit);
+
+ ip_unlock(port);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_set_mscount [kernel call]
+ * Purpose:
+ * Changes a receive right's make-send count.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Set make-send count.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote receive rights.
+ */
+
+kern_return_t
+mach_port_set_mscount(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_port_mscount_t mscount)
+{
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ ipc_port_set_mscount(port, mscount);
+
+ ip_unlock(port);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_set_seqno [kernel call]
+ * Purpose:
+ * Changes a receive right's sequence number.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Set sequence number.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote receive rights.
+ */
+
+kern_return_t
+mach_port_set_seqno(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_port_seqno_t seqno)
+{
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ ipc_port_set_seqno(port, seqno);
+
+ ip_unlock(port);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_gst_helper
+ * Purpose:
+ * A helper function for mach_port_get_set_status.
+ */
+
+void
+mach_port_gst_helper(
+ ipc_pset_t pset,
+ ipc_port_t port,
+ ipc_entry_num_t maxnames,
+ mach_port_t *names,
+ ipc_entry_num_t *actualp)
+{
+ ipc_pset_t ip_pset;
+ mach_port_t name;
+
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ assert(ip_active(port));
+
+ name = port->ip_receiver_name;
+ assert(name != MACH_PORT_NULL);
+ ip_pset = port->ip_pset;
+
+ ip_unlock(port);
+
+ if (pset == ip_pset) {
+ ipc_entry_num_t actual = *actualp;
+
+ if (actual < maxnames)
+ names[actual] = name;
+
+ *actualp = actual+1;
+ }
+}
+
+/*
+ * Routine: mach_port_get_set_status [kernel call]
+ * Purpose:
+ * Retrieves a list of members in a port set.
+ * Returns the space's name for each receive right member.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved list of members.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote a port set.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+mach_port_get_set_status(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_port_t **members,
+ mach_msg_type_number_t *membersCnt)
+{
+ ipc_entry_num_t actual; /* this many members */
+ ipc_entry_num_t maxnames; /* space for this many members */
+ kern_return_t kr;
+
+ vm_size_t size; /* size of allocated memory */
+ vm_offset_t addr; /* allocated memory */
+ vm_map_copy_t memory; /* copied-in memory */
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ size = PAGE_SIZE; /* initial guess */
+
+ for (;;) {
+ ipc_tree_entry_t tentry;
+ ipc_entry_t entry, table;
+ ipc_entry_num_t tsize;
+ mach_port_index_t index;
+ mach_port_t *names;
+ ipc_pset_t pset;
+
+ kr = vm_allocate(ipc_kernel_map, &addr, size, TRUE);
+ if (kr != KERN_SUCCESS)
+ return KERN_RESOURCE_SHORTAGE;
+
+ /* can't fault while we hold locks */
+
+ kr = vm_map_pageable(ipc_kernel_map, addr, addr + size,
+ VM_PROT_READ|VM_PROT_WRITE);
+ assert(kr == KERN_SUCCESS);
+
+ kr = ipc_right_lookup_read(space, name, &entry);
+ if (kr != KERN_SUCCESS) {
+ kmem_free(ipc_kernel_map, addr, size);
+ return kr;
+ }
+ /* space is read-locked and active */
+
+ if (IE_BITS_TYPE(entry->ie_bits) != MACH_PORT_TYPE_PORT_SET) {
+ is_read_unlock(space);
+ kmem_free(ipc_kernel_map, addr, size);
+ return KERN_INVALID_RIGHT;
+ }
+
+ pset = (ipc_pset_t) entry->ie_object;
+ assert(pset != IPS_NULL);
+ /* the port set must be active */
+
+ names = (mach_port_t *) addr;
+ maxnames = size / sizeof(mach_port_t);
+ actual = 0;
+
+ table = space->is_table;
+ tsize = space->is_table_size;
+
+ for (index = 0; index < tsize; index++) {
+ ipc_entry_t ientry = &table[index];
+ ipc_entry_bits_t bits = ientry->ie_bits;
+
+ if (bits & MACH_PORT_TYPE_RECEIVE) {
+ ipc_port_t port =
+ (ipc_port_t) ientry->ie_object;
+
+ mach_port_gst_helper(pset, port, maxnames,
+ names, &actual);
+ }
+ }
+
+ for (tentry = ipc_splay_traverse_start(&space->is_tree);
+ tentry != ITE_NULL;
+ tentry = ipc_splay_traverse_next(&space->is_tree,FALSE)) {
+ ipc_entry_bits_t bits = tentry->ite_bits;
+
+ assert(IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE);
+
+ if (bits & MACH_PORT_TYPE_RECEIVE) {
+ ipc_port_t port =
+ (ipc_port_t) tentry->ite_object;
+
+ mach_port_gst_helper(pset, port, maxnames,
+ names, &actual);
+ }
+ }
+ ipc_splay_traverse_finish(&space->is_tree);
+ is_read_unlock(space);
+
+ if (actual <= maxnames)
+ break;
+
+ /* didn't have enough memory; allocate more */
+
+ kmem_free(ipc_kernel_map, addr, size);
+ size = round_page(actual * sizeof(mach_port_t)) + PAGE_SIZE;
+ }
+
+ if (actual == 0) {
+ memory = VM_MAP_COPY_NULL;
+
+ kmem_free(ipc_kernel_map, addr, size);
+ } else {
+ vm_size_t size_used;
+
+ size_used = round_page(actual * sizeof(mach_port_t));
+
+ /*
+ * Make used memory pageable and get it into
+ * copied-in form. Free any unused memory.
+ */
+
+ kr = vm_map_pageable(ipc_kernel_map,
+ addr, addr + size_used,
+ VM_PROT_NONE);
+ assert(kr == KERN_SUCCESS);
+
+ kr = vm_map_copyin(ipc_kernel_map, addr, size_used,
+ TRUE, &memory);
+ assert(kr == KERN_SUCCESS);
+
+ if (size_used != size)
+ kmem_free(ipc_kernel_map,
+ addr + size_used, size - size_used);
+ }
+
+ *members = (mach_port_t *) memory;
+ *membersCnt = actual;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_move_member [kernel call]
+ * Purpose:
+ * If after is MACH_PORT_NULL, removes member
+ * from the port set it is in. Otherwise, adds
+ * member to after, removing it from any set
+ * it might already be in.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Moved the port.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME Member didn't denote a right.
+ * KERN_INVALID_RIGHT Member didn't denote a receive right.
+ * KERN_INVALID_NAME After didn't denote a right.
+ * KERN_INVALID_RIGHT After didn't denote a port set right.
+ * KERN_NOT_IN_SET
+ * After is MACH_PORT_NULL and Member isn't in a port set.
+ */
+
+kern_return_t
+mach_port_move_member(
+ ipc_space_t space,
+ mach_port_t member,
+ mach_port_t after)
+{
+ ipc_entry_t entry;
+ ipc_port_t port;
+ ipc_pset_t nset;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_right_lookup_read(space, member, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is read-locked and active */
+
+ if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0) {
+ is_read_unlock(space);
+ return KERN_INVALID_RIGHT;
+ }
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ if (after == MACH_PORT_NULL)
+ nset = IPS_NULL;
+ else {
+ entry = ipc_entry_lookup(space, after);
+ if (entry == IE_NULL) {
+ is_read_unlock(space);
+ return KERN_INVALID_NAME;
+ }
+
+ if ((entry->ie_bits & MACH_PORT_TYPE_PORT_SET) == 0) {
+ is_read_unlock(space);
+ return KERN_INVALID_RIGHT;
+ }
+
+ nset = (ipc_pset_t) entry->ie_object;
+ assert(nset != IPS_NULL);
+ }
+
+ kr = ipc_pset_move(space, port, nset);
+ /* space is unlocked */
+ return kr;
+}
+
+/*
+ * Routine: mach_port_request_notification [kernel call]
+ * Purpose:
+ * Requests a notification. The caller supplies
+ * a send-once right for the notification to use,
+ * and the call returns the previously registered
+ * send-once right, if any. Possible types:
+ *
+ * MACH_NOTIFY_PORT_DESTROYED
+ * Requests a port-destroyed notification
+ * for a receive right. Sync should be zero.
+ * MACH_NOTIFY_NO_SENDERS
+ * Requests a no-senders notification for a
+ * receive right. If there are currently no
+ * senders, sync is less than or equal to the
+ * current make-send count, and a send-once right
+ * is supplied, then an immediate no-senders
+ * notification is generated.
+ * MACH_NOTIFY_DEAD_NAME
+ * Requests a dead-name notification for a send
+ * or receive right. If the name is already a
+ * dead name, sync is non-zero, and a send-once
+ * right is supplied, then an immediate dead-name
+ * notification is generated.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Requested a notification.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_VALUE Bad id value.
+ * KERN_INVALID_NAME Name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote appropriate right.
+ * KERN_INVALID_CAPABILITY The notify port is dead.
+ * MACH_NOTIFY_PORT_DESTROYED:
+ * KERN_INVALID_VALUE Sync isn't zero.
+ * MACH_NOTIFY_DEAD_NAME:
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ * KERN_INVALID_ARGUMENT Name denotes dead name, but
+ * sync is zero or notify is IP_NULL.
+ * KERN_UREFS_OVERFLOW Name denotes dead name, but
+ * generating immediate notif. would overflow urefs.
+ */
+
+kern_return_t
+mach_port_request_notification(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_msg_id_t id,
+ mach_port_mscount_t sync,
+ ipc_port_t notify,
+ ipc_port_t *previousp)
+{
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (notify == IP_DEAD)
+ return KERN_INVALID_CAPABILITY;
+
+ switch (id) {
+ case MACH_NOTIFY_PORT_DESTROYED: {
+ ipc_port_t port, previous;
+
+ if (sync != 0)
+ return KERN_INVALID_VALUE;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ ipc_port_pdrequest(port, notify, &previous);
+ /* port is unlocked */
+
+#if MACH_IPC_COMPAT
+ /*
+ * If previous was a send right instead of a send-once
+ * right, we can't return it in the reply message.
+ * So destroy it instead.
+ */
+
+ if ((previous != IP_NULL) && ip_pdsendp(previous)) {
+ ipc_port_release_send(ip_pdsend(previous));
+ previous = IP_NULL;
+ }
+#endif MACH_IPC_COMPAT
+
+ *previousp = previous;
+ break;
+ }
+
+ case MACH_NOTIFY_NO_SENDERS: {
+ ipc_port_t port;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ ipc_port_nsrequest(port, sync, notify, previousp);
+ /* port is unlocked */
+ break;
+ }
+
+ case MACH_NOTIFY_DEAD_NAME:
+ kr = ipc_right_dnrequest(space, name, sync != 0,
+ notify, previousp);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ break;
+
+ default:
+ return KERN_INVALID_VALUE;
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_insert_right [kernel call]
+ * Purpose:
+ * Inserts a right into a space, as if the space
+ * voluntarily received the right in a message,
+ * except that the right gets the specified name.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Inserted the right.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_VALUE The name isn't a legal name.
+ * KERN_NAME_EXISTS The name already denotes a right.
+ * KERN_INVALID_VALUE Message doesn't carry a port right.
+ * KERN_INVALID_CAPABILITY Port is null or dead.
+ * KERN_UREFS_OVERFLOW Urefs limit would be exceeded.
+ * KERN_RIGHT_EXISTS Space has rights under another name.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+mach_port_insert_right(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_port_t poly,
+ mach_msg_type_name_t polyPoly)
+{
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (!MACH_PORT_VALID(name) ||
+ !MACH_MSG_TYPE_PORT_ANY_RIGHT(polyPoly))
+ return KERN_INVALID_VALUE;
+
+ if (!IO_VALID(poly))
+ return KERN_INVALID_CAPABILITY;
+
+ return ipc_object_copyout_name(space, poly, polyPoly, FALSE, name);
+}
+
+/*
+ * Routine: mach_port_extract_right [kernel call]
+ * Purpose:
+ * Extracts a right from a space, as if the space
+ * voluntarily sent the right to the caller.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Extracted the right.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_VALUE Requested type isn't a port right.
+ * KERN_INVALID_NAME Name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote appropriate right.
+ */
+
+kern_return_t
+mach_port_extract_right(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_msg_type_name_t msgt_name,
+ ipc_port_t *poly,
+ mach_msg_type_name_t *polyPoly)
+{
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ if (!MACH_MSG_TYPE_PORT_ANY(msgt_name))
+ return KERN_INVALID_VALUE;
+
+ kr = ipc_object_copyin(space, name, msgt_name, (ipc_object_t *) poly);
+
+ if (kr == KERN_SUCCESS)
+ *polyPoly = ipc_object_copyin_type(msgt_name);
+ return kr;
+}
+
+/*
+ * Routine: mach_port_get_receive_status [kernel call]
+ * Purpose:
+ * Retrieves mucho info about a receive right.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved status.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote receive rights.
+ */
+
+kern_return_t
+mach_port_get_receive_status(space, name, statusp)
+ ipc_space_t space;
+ mach_port_t name;
+ mach_port_status_t *statusp;
+{
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ if (port->ip_pset != IPS_NULL) {
+ ipc_pset_t pset = port->ip_pset;
+
+ ips_lock(pset);
+ if (!ips_active(pset)) {
+ ipc_pset_remove(pset, port);
+ ips_check_unlock(pset);
+ goto no_port_set;
+ } else {
+ statusp->mps_pset = pset->ips_local_name;
+ imq_lock(&pset->ips_messages);
+ statusp->mps_seqno = port->ip_seqno;
+ imq_unlock(&pset->ips_messages);
+ ips_unlock(pset);
+ assert(MACH_PORT_VALID(statusp->mps_pset));
+ }
+ } else {
+ no_port_set:
+ statusp->mps_pset = MACH_PORT_NULL;
+ imq_lock(&port->ip_messages);
+ statusp->mps_seqno = port->ip_seqno;
+ imq_unlock(&port->ip_messages);
+ }
+
+ statusp->mps_mscount = port->ip_mscount;
+ statusp->mps_qlimit = port->ip_qlimit;
+ statusp->mps_msgcount = port->ip_msgcount;
+ statusp->mps_sorights = port->ip_sorights;
+ statusp->mps_srights = port->ip_srights > 0;
+ statusp->mps_pdrequest = port->ip_pdrequest != IP_NULL;
+ statusp->mps_nsrequest = port->ip_nsrequest != IP_NULL;
+ ip_unlock(port);
+
+ return KERN_SUCCESS;
+}
+
+#ifdef MIGRATING_THREADS
+kern_return_t
+mach_port_set_rpcinfo(space, name, rpc_info, rpc_info_count)
+ ipc_space_t space;
+ mach_port_t name;
+ void *rpc_info;
+ unsigned int rpc_info_count;
+{
+ ipc_target_t target;
+ ipc_object_t object;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_object_translate(space, name,
+ MACH_PORT_RIGHT_PORT_SET, &object);
+ if (kr == KERN_SUCCESS)
+ target = &((ipc_pset_t)object)->ips_target;
+ else {
+ kr = ipc_object_translate(space, name,
+ MACH_PORT_RIGHT_RECEIVE, &object);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ target = &((ipc_port_t)object)->ip_target;
+ }
+
+ /* port/pset is locked and active */
+
+ kr = port_machine_set_rpcinfo(target, rpc_info, rpc_info_count);
+
+ io_unlock(object);
+
+ return kr;
+}
+
+#if 1
+int sacts, maxsacts;
+#endif
+
+sact_count()
+{
+ printf("%d server activations in use, %d max\n", sacts, maxsacts);
+}
+
+kern_return_t
+mach_port_create_act(task, name, user_stack, user_rbuf, user_rbuf_size, out_act)
+ task_t task;
+ mach_port_t name;
+ vm_offset_t user_stack;
+ vm_offset_t user_rbuf;
+ vm_size_t user_rbuf_size;
+ Act **out_act;
+{
+ ipc_target_t target;
+ ipc_space_t space;
+ ipc_object_t object;
+ kern_return_t kr;
+ Act *act;
+
+ if (task == 0)
+ return KERN_INVALID_TASK;
+
+ /* First create the new activation. */
+ kr = act_create(task, user_stack, user_rbuf, user_rbuf_size, &act);
+ if (kr != KERN_SUCCESS)
+ return kr;
+
+ space = task->itk_space;
+
+ kr = ipc_object_translate(space, name,
+ MACH_PORT_RIGHT_PORT_SET, &object);
+ if (kr == KERN_SUCCESS)
+ target = &((ipc_pset_t)object)->ips_target;
+ else {
+ kr = ipc_object_translate(space, name,
+ MACH_PORT_RIGHT_RECEIVE, &object);
+ if (kr != KERN_SUCCESS) {
+ act_terminate(act);
+ act_deallocate(act);
+ return kr;
+ }
+ target = &((ipc_port_t)object)->ip_target;
+ }
+
+ /* port/pset is locked and active */
+#if 0
+ printf("act port/pset %08x ipc_target %08x stack %08x act %08x\n",
+ object, target, user_stack, act);
+#endif
+
+ /* Assign the activation to the port's actpool. */
+ kr = act_set_target(act, target);
+ if (kr != KERN_SUCCESS) {
+ io_unlock(object);
+ act_terminate(act);
+ act_deallocate(act);
+ return kr;
+ }
+#if 0
+ printf(" actpool %08x act %08x\n", target->ip_actpool, act);
+#endif
+
+ io_unlock(object);
+
+ /* Pass our reference to the activation back to the user. */
+ *out_act = act;
+
+#if 1
+ sacts++;
+ if (sacts > maxsacts)
+ maxsacts = sacts;
+ act->mact.pcb->ss.mpsfu_high = 0x69;
+#endif
+ return KERN_SUCCESS;
+}
+
+#ifdef RPCKERNELSIG
+kern_return_t
+mach_port_set_syscall_right(task, name)
+ task_t task;
+ mach_port_t name;
+{
+ ipc_entry_t entry;
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (task == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_right_lookup_write(task, name, &entry);
+ if (kr != KERN_SUCCESS) {
+ return kr;
+ }
+
+ if (!(entry->ie_bits & MACH_PORT_TYPE(MACH_PORT_RIGHT_SEND))) {
+ is_write_unlock(space);
+ return KERN_INVALID_RIGHT;
+ }
+
+ task->syscall_ipc_entry = *entry;
+
+ is_write_unlock(space);
+
+ return KERN_SUCCESS;
+}
+#endif
+#endif /* MIGRATING_THREADS */
+
+#if MACH_IPC_COMPAT
+
+/*
+ * Routine: port_translate_compat
+ * Purpose:
+ * Converts a name to a receive right.
+ * Conditions:
+ * Nothing locked. If successful, the port
+ * is returned locked and active.
+ * Returns:
+ * KERN_SUCCESS Port is returned.
+ * KERN_INVALID_ARGUMENT The space is dead.
+ * KERN_INVALID_ARGUMENT Name doesn't denote port rights.
+ * KERN_NOT_RECEIVER Name denotes send, not receive, rights.
+ * KERN_NOT_RECEIVER Name denotes a send-once right.
+ * KERN_NOT_RECEIVER Name denotes a dead name.
+ */
+
+kern_return_t
+port_translate_compat(space, name, portp)
+ ipc_space_t space;
+ mach_port_t name;
+ ipc_port_t *portp;
+{
+ ipc_entry_t entry;
+ mach_port_type_t type;
+ mach_port_urefs_t urefs;
+ ipc_port_t port;
+ kern_return_t kr;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return KERN_INVALID_ARGUMENT;
+ /* space is write-locked and active */
+
+ kr = ipc_right_info(space, name, entry, &type, &urefs);
+ if (kr != KERN_SUCCESS)
+ return KERN_INVALID_ARGUMENT; /* space is unlocked */
+
+ if ((type & (MACH_PORT_TYPE_RECEIVE)) == 0) {
+ is_write_unlock(space);
+ if (type & MACH_PORT_TYPE_PORT_OR_DEAD)
+ return KERN_NOT_RECEIVER;
+ else
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ is_write_unlock(space);
+ assert(ip_active(port));
+
+ *portp = port;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: convert_port_type
+ * Purpose:
+ * Convert a new mach_port_type_t to an old value.
+ * Note send-once rights and dead names get
+ * represented as send rights. The extra info
+ * bits get dropped.
+ */
+
+mach_port_type_t
+convert_port_type(type)
+ mach_port_type_t type;
+{
+ switch (type & MACH_PORT_TYPE_ALL_RIGHTS) {
+ case MACH_PORT_TYPE_SEND:
+ case MACH_PORT_TYPE_SEND_ONCE:
+ case MACH_PORT_TYPE_DEAD_NAME:
+ return PORT_TYPE_SEND;
+
+ case MACH_PORT_TYPE_RECEIVE:
+ case MACH_PORT_TYPE_SEND_RECEIVE:
+ return PORT_TYPE_RECEIVE_OWN;
+
+ case MACH_PORT_TYPE_PORT_SET:
+ return PORT_TYPE_SET;
+
+ default:
+#if MACH_ASSERT
+ assert(!"convert_port_type: strange port type");
+#else
+ panic("convert_port_type: strange port type");
+#endif
+ }
+}
+
+/*
+ * Routine: port_names [kernel call]
+ * Purpose:
+ * Retrieve all the names in the task's port name space.
+ * As a (major) convenience, return port type information.
+ * The port name space includes port sets.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved names.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * Additions:
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+port_names(space, namesp, namesCnt, typesp, typesCnt)
+ ipc_space_t space;
+ mach_port_t **namesp;
+ mach_msg_type_number_t *namesCnt;
+ mach_port_type_t **typesp;
+ mach_msg_type_number_t *typesCnt;
+{
+ kern_return_t kr;
+
+ kr = mach_port_names(space, namesp, namesCnt, typesp, typesCnt);
+ if (kr == KERN_SUCCESS) {
+ ipc_entry_num_t actual = (ipc_entry_num_t) *typesCnt;
+ mach_port_type_t *types;
+ ipc_entry_num_t i;
+
+ vm_map_copy_t copy = (vm_map_copy_t) *typesp;
+ vm_offset_t addr;
+ vm_size_t size = round_page(actual * sizeof(mach_port_type_t));
+
+ /* convert copy object back to something we can use */
+
+ kr = vm_map_copyout(ipc_kernel_map, &addr, copy);
+ if (kr != KERN_SUCCESS) {
+ vm_map_copy_discard((vm_map_copy_t) *typesp);
+ vm_map_copy_discard((vm_map_copy_t) *namesp);
+ return KERN_RESOURCE_SHORTAGE;
+ }
+
+ types = (mach_port_type_t *) addr;
+
+ for (i = 0; i < actual; i++)
+ types[i] = convert_port_type(types[i]);
+
+ /* convert memory back into a copy object */
+
+ kr = vm_map_copyin(ipc_kernel_map, addr, size,
+ TRUE, &copy);
+ assert(kr == KERN_SUCCESS);
+
+ *typesp = (mach_port_type_t *) copy;
+ } else if (kr != KERN_RESOURCE_SHORTAGE)
+ kr = KERN_INVALID_ARGUMENT;
+
+ return kr;
+}
+
+/*
+ * Routine: port_type [kernel call]
+ * Purpose:
+ * Return type of the capability named.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved type.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT The name doesn't denote a right.
+ */
+
+kern_return_t
+port_type(space, name, typep)
+ ipc_space_t space;
+ mach_port_t name;
+ mach_port_type_t *typep;
+{
+ mach_port_type_t type;
+ kern_return_t kr;
+
+ kr = mach_port_type(space, name, &type);
+ if (kr != KERN_SUCCESS)
+ return KERN_INVALID_ARGUMENT;
+
+ *typep = convert_port_type(type);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: port_rename [kernel call]
+ * Purpose:
+ * Change the name of a capability.
+ * The new name can't be in use.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved type.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT The new name is reserved.
+ * KERN_NAME_EXISTS The new name already denotes a right.
+ * KERN_INVALID_ARGUMENT The old name doesn't denote a right.
+ */
+
+kern_return_t
+port_rename(space, old_name, new_name)
+ ipc_space_t space;
+ mach_port_t old_name;
+ mach_port_t new_name;
+{
+ kern_return_t kr;
+
+ kr = mach_port_rename(space, old_name, new_name);
+ if ((kr != KERN_SUCCESS) && (kr != KERN_NAME_EXISTS))
+ kr = KERN_INVALID_ARGUMENT;
+
+ return kr;
+}
+
+/*
+ * Routine: port_allocate [kernel call]
+ * Purpose:
+ * Allocate a new port, giving all rights to "task".
+ *
+ * Returns in "port_name" the task's local name for the port.
+ * Doesn't return a reference to the port.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Allocated a port.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+port_allocate(space, namep)
+ ipc_space_t space;
+ mach_port_t *namep;
+{
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ kr = ipc_port_alloc_compat(space, namep, &port);
+ if (kr == KERN_SUCCESS)
+ ip_unlock(port);
+ else if (kr != KERN_RESOURCE_SHORTAGE)
+ kr = KERN_INVALID_ARGUMENT;
+
+ return kr;
+}
+
+/*
+ * Routine: port_deallocate [kernel call]
+ * Purpose:
+ * Delete port rights (send and receive) from a task.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Deallocated the port right.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT Name doesn't denote a port right.
+ * Additions:
+ * KERN_SUCCESS Deallocated a send-once right.
+ * KERN_SUCCESS Destroyed a dead name.
+ */
+
+kern_return_t
+port_deallocate(space, name)
+ ipc_space_t space;
+ mach_port_t name;
+{
+ ipc_entry_t entry;
+ mach_port_type_t type;
+ mach_port_urefs_t urefs;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return KERN_INVALID_ARGUMENT;
+ /* space is write-locked and active */
+
+ /*
+ * We serialize with port destruction with the
+ * ipc_right_info call, not ipc_right_destroy.
+ * After ipc_right_info, we pretend that the
+ * port doesn't get destroyed.
+ */
+
+ kr = ipc_right_info(space, name, entry, &type, &urefs);
+ if (kr != KERN_SUCCESS)
+ return KERN_INVALID_ARGUMENT; /* space is unlocked */
+
+ if ((type & (MACH_PORT_TYPE_PORT_OR_DEAD)) == 0) {
+ is_write_unlock(space);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ (void) ipc_right_destroy(space, name, entry);
+ /* space is unlocked */
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: port_set_backlog [kernel call]
+ * Purpose:
+ * Change the queueing backlog on "port_name" to "backlog";
+ * the specified "task" must be the current receiver.
+ *
+ * Valid backlog values are 0 < backlog <= PORT_BACKLOG_MAX.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Set the backlog.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT Name doesn't denote a port right.
+ * KERN_NOT_RECEIVER Name denotes send rights, not receive.
+ * KERN_INVALID_ARGUMENT Backlog value is invalid.
+ * Additions:
+ * KERN_NOT_RECEIVER Name denotes a send-once right.
+ * KERN_NOT_RECEIVER Name denotes a dead name.
+ */
+
+kern_return_t
+port_set_backlog(space, name, backlog)
+ ipc_space_t space;
+ mach_port_t name;
+ int backlog;
+{
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if ((space == IS_NULL) ||
+ (backlog <= 0) ||
+ (backlog > PORT_BACKLOG_MAX))
+ return KERN_INVALID_ARGUMENT;
+
+ kr = port_translate_compat(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ ipc_port_set_qlimit(port, (mach_port_msgcount_t) backlog);
+
+ ip_unlock(port);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: port_set_backup [kernel call]
+ * Purpose:
+ * Changes the backup port for the the named port.
+ * The specified "task" must be the current receiver.
+ * Returns the old backup port, if any.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Set the backup.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT Name doesn't denote a port right.
+ * KERN_NOT_RECEIVER Name denotes send rights, not receive.
+ * Additions:
+ * KERN_NOT_RECEIVER Name denotes a send-once right.
+ * KERN_NOT_RECEIVER Name denotes a dead name.
+ */
+
+kern_return_t
+port_set_backup(space, name, backup, previousp)
+ ipc_space_t space;
+ mach_port_t name;
+ ipc_port_t backup;
+ ipc_port_t *previousp;
+{
+ ipc_port_t port, previous;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ if (backup == IP_DEAD)
+ backup = IP_NULL;
+ else if (backup != IP_NULL)
+ backup = ip_pdsendm(backup);
+
+ kr = port_translate_compat(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ ipc_port_pdrequest(port, backup, &previous);
+ /* port is unlocked */
+
+ /*
+ * If previous was a send-once right instead of a send
+ * right, we can't return it in the reply message.
+ * So get rid of it in a notification instead.
+ */
+
+ if (previous != IP_NULL) {
+ if (ip_pdsendp(previous))
+ previous = ip_pdsend(previous);
+ else {
+ ipc_notify_send_once(previous);
+ previous = IP_NULL;
+ }
+ }
+
+ *previousp = previous;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: port_status [kernel call]
+ * Purpose:
+ * Returns statistics related to "port_name", as seen by "task".
+ * Only the receiver for a given port will see true message
+ * counts.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved status.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT Name doesn't denote a port right.
+ * Additions:
+ * KERN_SUCCESS Send-once right.
+ * KERN_SUCCESS Dead name.
+ */
+
+kern_return_t
+port_status(space, name, enabledp, num_msgs, backlog,
+ ownership, receive_rights)
+ ipc_space_t space;
+ mach_port_t name;
+ mach_port_t *enabledp;
+ int *num_msgs;
+ int *backlog;
+ boolean_t *ownership;
+ boolean_t *receive_rights;
+{
+ ipc_entry_t entry;
+ mach_port_type_t type;
+ mach_port_urefs_t urefs;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return KERN_INVALID_ARGUMENT;
+ /* space is write-locked and active */
+
+ kr = ipc_right_info(space, name, entry, &type, &urefs);
+ if (kr != KERN_SUCCESS)
+ return KERN_INVALID_ARGUMENT; /* space is unlocked */
+
+ if ((type & MACH_PORT_TYPE_PORT_OR_DEAD) == 0) {
+ is_write_unlock(space);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (type & MACH_PORT_TYPE_RECEIVE) {
+ mach_port_t enabled;
+ mach_port_msgcount_t qlimit;
+ mach_port_msgcount_t msgcount;
+ ipc_port_t port;
+
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ ip_lock(port);
+ is_write_unlock(space);
+ assert(ip_active(port));
+
+ if (port->ip_pset != IPS_NULL) {
+ ipc_pset_t pset = port->ip_pset;
+
+ ips_lock(pset);
+ if (!ips_active(pset)) {
+ ipc_pset_remove(pset, port);
+ ips_check_unlock(pset);
+ enabled = MACH_PORT_NULL;
+ } else {
+ enabled = pset->ips_local_name;
+ ips_unlock(pset);
+ assert(MACH_PORT_VALID(enabled));
+ }
+ } else
+ enabled = MACH_PORT_NULL;
+
+ qlimit = port->ip_qlimit;
+ msgcount = port->ip_msgcount;
+ ip_unlock(port);
+
+ *ownership = TRUE;
+ *receive_rights = TRUE;
+ *enabledp = enabled;
+ *num_msgs = (int) msgcount;
+ *backlog = (int) qlimit;
+ } else {
+ is_write_unlock(space);
+
+ *ownership = FALSE;
+ *receive_rights = FALSE;
+ *enabledp = MACH_PORT_NULL;
+ *num_msgs = -1;
+ *backlog = 0;
+ }
+
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: port_set_allocate [kernel call]
+ * Purpose:
+ * Create a new port set, give rights to task, and
+ * return task's local name for the set.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Allocated a port set.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+port_set_allocate(space, namep)
+ ipc_space_t space;
+ mach_port_t *namep;
+{
+ ipc_pset_t pset;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ kr = ipc_pset_alloc(space, namep, &pset);
+ if (kr == KERN_SUCCESS)
+ ips_unlock(pset);
+ else if (kr != KERN_RESOURCE_SHORTAGE)
+ kr = KERN_INVALID_ARGUMENT;
+
+ return kr;
+}
+
+/*
+ * Routine: port_set_deallocate [kernel call]
+ * Purpose:
+ * Destroys the task's port set. If there are any
+ * receive rights in the set, they are removed.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Deallocated the port set.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT Name doesn't denote a port set.
+ */
+
+kern_return_t
+port_set_deallocate(space, name)
+ ipc_space_t space;
+ mach_port_t name;
+{
+ ipc_entry_t entry;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* space is write-locked and active */
+
+ if ((entry->ie_bits & MACH_PORT_TYPE_PORT_SET) == 0) {
+ is_write_unlock(space);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ kr = ipc_right_destroy(space, name, entry);
+ /* space is unlocked */
+ assert(kr == KERN_SUCCESS);
+ return kr;
+}
+
+/*
+ * Routine: port_set_add [kernel call]
+ * Purpose:
+ * Moves receive rights into the port set.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Moved the receive right.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT port_name doesn't denote port rights.
+ * KERN_NOT_RECEIVER port_name doesn't denote receive right.
+ * KERN_INVALID_ARGUMENT set_name doesn't denote a port set.
+ * Additions:
+ * KERN_NOT_RECEIVER port_name denotes a send-once right.
+ * KERN_NOT_RECEIVER port_name denotes a dead name.
+ */
+
+kern_return_t
+port_set_add(space, set_name, port_name)
+ ipc_space_t space;
+ mach_port_t set_name;
+ mach_port_t port_name;
+{
+ ipc_entry_t entry;
+ mach_port_type_t type;
+ mach_port_urefs_t urefs;
+ ipc_port_t port;
+ ipc_pset_t pset;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ kr = ipc_right_lookup_write(space, port_name, &entry);
+ if (kr != KERN_SUCCESS)
+ return KERN_INVALID_ARGUMENT;
+ /* space is write-locked and active */
+
+ /* use ipc_right_info to check for dead compat entries */
+
+ kr = ipc_right_info(space, port_name, entry, &type, &urefs);
+ if (kr != KERN_SUCCESS)
+ return KERN_INVALID_ARGUMENT; /* space is unlocked */
+
+ if ((type & MACH_PORT_TYPE_RECEIVE) == 0) {
+ is_write_unlock(space);
+ if (type & MACH_PORT_TYPE_PORT_OR_DEAD)
+ return KERN_NOT_RECEIVER;
+ else
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ is_write_to_read_lock(space);
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ entry = ipc_entry_lookup(space, set_name);
+ if ((entry == IE_NULL) ||
+ ((entry->ie_bits & MACH_PORT_TYPE_PORT_SET) == 0)) {
+ is_read_unlock(space);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ pset = (ipc_pset_t) entry->ie_object;
+ assert(pset != IPS_NULL);
+
+ kr = ipc_pset_move(space, port, pset);
+ /* space is unlocked */
+ assert(kr == KERN_SUCCESS);
+ return kr;
+}
+
+/*
+ * Routine: port_set_remove [kernel call]
+ * Purpose:
+ * Removes the receive rights from the set they are in.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Removed the receive right.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT Name doesn't denote a port right.
+ * KERN_NOT_RECEIVER Name denotes send rights, not receive.
+ * KERN_NOT_IN_SET Port isn't in a port set.
+ * Additions:
+ * KERN_NOT_RECEIVER Name denotes a send-once right.
+ * KERN_NOT_RECEIVER Name denotes a dead name.
+ */
+
+kern_return_t
+port_set_remove(space, name)
+ ipc_space_t space;
+ mach_port_t name;
+{
+ ipc_entry_t entry;
+ mach_port_type_t type;
+ mach_port_urefs_t urefs;
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS)
+ return KERN_INVALID_ARGUMENT;
+ /* space is write-locked and active */
+
+ /* use ipc_right_info to check for dead compat entries */
+
+ kr = ipc_right_info(space, name, entry, &type, &urefs);
+ if (kr != KERN_SUCCESS)
+ return KERN_INVALID_ARGUMENT; /* space is unlocked */
+
+ if ((type & (MACH_PORT_TYPE_RECEIVE)) == 0) {
+ is_write_unlock(space);
+ if (type & MACH_PORT_TYPE_PORT_OR_DEAD)
+ return KERN_NOT_RECEIVER;
+ else
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ is_write_to_read_lock(space);
+ port = (ipc_port_t) entry->ie_object;
+ assert(port != IP_NULL);
+
+ kr = ipc_pset_move(space, port, IPS_NULL);
+ /* space is unlocked */
+ return kr;
+}
+
+/*
+ * Routine: port_set_status [kernel call]
+ * Purpose:
+ * Retrieve list of members of a port set.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Retrieved port set status.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT Name doesn't denote a port set.
+ * Additions:
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+port_set_status(space, name, members, membersCnt)
+ ipc_space_t space;
+ mach_port_t name;
+ mach_port_t **members;
+ mach_msg_type_number_t *membersCnt;
+{
+ kern_return_t kr;
+
+ kr = mach_port_get_set_status(space, name, members, membersCnt);
+ if ((kr != KERN_SUCCESS) && (kr != KERN_RESOURCE_SHORTAGE))
+ kr = KERN_INVALID_ARGUMENT;
+
+ return kr;
+}
+
+/*
+ * Routine: port_insert_send [kernel call]
+ * Purpose:
+ * Inserts send rights to a port into a task,
+ * at a given name. The name must not be in use.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Inserted send right.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT Port is null or dead.
+ * KERN_INVALID_ARGUMENT Name is reserved.
+ * KERN_NAME_EXISTS Name already denotes a right.
+ * KERN_FAILURE Task already has rights for the port.
+ * Additions:
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+port_insert_send(space, port, name)
+ ipc_space_t space;
+ ipc_port_t port;
+ mach_port_t name;
+{
+ kern_return_t kr;
+
+ if ((space == IS_NULL) ||
+ !MACH_PORT_VALID(name) ||
+ !IP_VALID(port))
+ return KERN_INVALID_ARGUMENT;
+
+ kr = ipc_object_copyout_name_compat(space, (ipc_object_t) port,
+ MACH_MSG_TYPE_PORT_SEND, name);
+ switch (kr) {
+ case KERN_SUCCESS:
+ case KERN_NAME_EXISTS:
+ case KERN_RESOURCE_SHORTAGE:
+ break;
+
+ case KERN_RIGHT_EXISTS:
+ kr = KERN_FAILURE;
+ break;
+
+ default:
+ kr = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ return kr;
+}
+
+/*
+ * Routine: port_extract_send [kernel call]
+ * Purpose:
+ * Extracts send rights from "task"'s "his_name" port.
+ * The task is left with no rights for the port.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Extracted send right.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT Name doesn't denote pure send rights.
+ */
+
+kern_return_t
+port_extract_send(space, name, portp)
+ ipc_space_t space;
+ mach_port_t name;
+ ipc_port_t *portp;
+{
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ kr = ipc_object_copyin_compat(space, name,
+ MSG_TYPE_PORT, TRUE,
+ (ipc_object_t *) portp);
+ if (kr != KERN_SUCCESS)
+ kr = KERN_INVALID_ARGUMENT;
+
+ return kr;
+}
+
+/*
+ * Routine: port_insert_receive [kernel call]
+ * Purpose:
+ * Inserts receive/ownership rights to a port into a task,
+ * at a given name.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Inserted receive right.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT Port is null. (Can't be dead.)
+ * KERN_INVALID_ARGUMENT Name is reserved.
+ * KERN_NAME_EXISTS Name already denotes a right.
+ * KERN_FAILURE Task already has rights for the port.
+ * Additions:
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ */
+
+kern_return_t
+port_insert_receive(space, port, name)
+ ipc_space_t space;
+ ipc_port_t port;
+ mach_port_t name;
+{
+ kern_return_t kr;
+
+ if ((space == IS_NULL) ||
+ !MACH_PORT_VALID(name) ||
+ !IP_VALID(port))
+ return KERN_INVALID_ARGUMENT;
+
+ kr = ipc_object_copyout_name_compat(space, (ipc_object_t) port,
+ MACH_MSG_TYPE_PORT_RECEIVE, name);
+ switch (kr) {
+ case KERN_SUCCESS:
+ case KERN_NAME_EXISTS:
+ case KERN_RESOURCE_SHORTAGE:
+ break;
+
+ case KERN_RIGHT_EXISTS:
+ kr = KERN_FAILURE;
+ break;
+
+ default:
+ kr = KERN_INVALID_ARGUMENT;
+ break;
+ }
+
+ return kr;
+}
+
+/*
+ * Routine: port_extract_receive [kernel call]
+ * Purpose:
+ * Extracts receive/ownership rights
+ * from "task"'s "his_name" port.
+ *
+ * The task is left with no rights for the port.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Extracted receive right.
+ * KERN_INVALID_ARGUMENT Task is null.
+ * KERN_INVALID_ARGUMENT Task is not active.
+ * KERN_INVALID_ARGUMENT Name doesn't denote receive rights.
+ */
+
+kern_return_t
+port_extract_receive(space, name, portp)
+ ipc_space_t space;
+ mach_port_t name;
+ ipc_port_t *portp;
+{
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ kr = ipc_object_copyin_compat(space, name,
+ MSG_TYPE_PORT_ALL, TRUE,
+ (ipc_object_t *) portp);
+ if (kr != KERN_SUCCESS)
+ kr = KERN_INVALID_ARGUMENT;
+
+ return kr;
+}
+
+#endif MACH_IPC_COMPAT
diff --git a/ipc/mach_port.srv b/ipc/mach_port.srv
new file mode 100644
index 0000000..c4f8536
--- /dev/null
+++ b/ipc/mach_port.srv
@@ -0,0 +1,27 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory at the University of Utah (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ * Author: Bryan Ford, University of Utah CSL
+ */
+/* This is a server presentation file. */
+
+#define KERNEL_SERVER 1
+
+#include <mach/mach_port.defs>
diff --git a/ipc/mach_rpc.c b/ipc/mach_rpc.c
new file mode 100644
index 0000000..0ceeeb4
--- /dev/null
+++ b/ipc/mach_rpc.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (c) 1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL). All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software is hereby
+ * granted provided that (1) source code retains these copyright, permission,
+ * and disclaimer notices, and (2) redistributions including binaries
+ * reproduce the notices in supporting documentation, and (3) all advertising
+ * materials mentioning features or use of this software display the following
+ * acknowledgement: ``This product includes software developed by the
+ * Computer Systems Laboratory at the University of Utah.''
+ *
+ * THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
+ * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSL DISCLAIM ANY LIABILITY OF
+ * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * CSL requests users of this software to return to csl-dist@cs.utah.edu any
+ * improvements that they make and grant CSL redistribution rights.
+ *
+ */
+
+#ifdef MIGRATING_THREADS
+
+#include <mach/kern_return.h>
+#include <mach/port.h>
+#include <mach/rpc.h>
+#include <mach/notify.h>
+#include <mach/mach_param.h>
+#include <mach/vm_param.h>
+#include <mach/vm_prot.h>
+#include <kern/task.h>
+#include <kern/act.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <vm/vm_user.h>
+#include <ipc/ipc_entry.h>
+#include <ipc/ipc_space.h>
+#include <ipc/ipc_object.h>
+#include <ipc/ipc_notify.h>
+#include <ipc/ipc_port.h>
+#include <ipc/ipc_pset.h>
+#include <ipc/ipc_right.h>
+
+#undef DEBUG_MPRC
+
+/*
+ * XXX need to identify if one endpoint of an RPC is the kernel to
+ * ensure proper port name translation (or lack of). This is bogus.
+ */
+#define ISKERNELACT(act) ((act)->task == kernel_task)
+
+/*
+ * Copy the indicated port from the task associated with the source
+ * activation into the task associated with the destination activation.
+ *
+ * XXX on errors we should probably clear the portp to avoid leaking
+ * info to the other side.
+ */
+kern_return_t
+mach_port_rpc_copy(portp, sact, dact)
+ struct rpc_port_desc *portp;
+ struct Act *sact, *dact;
+{
+ ipc_space_t sspace, dspace;
+ mach_msg_type_name_t tname;
+ ipc_object_t iname;
+ kern_return_t kr;
+
+#ifdef DEBUG_MPRC
+ printf("m_p_rpc_copy(portp=%x/%x, sact=%x, dact=%x): ",
+ portp->name, portp->msgt_name, sact, dact);
+#endif
+ sspace = sact->task->itk_space;
+ dspace = dact->task->itk_space;
+ if (sspace == IS_NULL || dspace == IS_NULL) {
+#ifdef DEBUG_MPRC
+ printf("bogus src (%x) or dst (%x) space\n", sspace, dspace);
+#endif
+ return KERN_INVALID_TASK;
+ }
+
+ if (!MACH_MSG_TYPE_PORT_ANY(portp->msgt_name)) {
+#ifdef DEBUG_MPRC
+ printf("invalid port type\n");
+#endif
+ return KERN_INVALID_VALUE;
+ }
+
+ if (ISKERNELACT(sact)) {
+ iname = (ipc_object_t) portp->name;
+ ipc_object_copyin_from_kernel(iname, portp->msgt_name);
+ kr = KERN_SUCCESS;
+ } else {
+ kr = ipc_object_copyin(sspace, portp->name, portp->msgt_name,
+ &iname);
+ }
+ if (kr != KERN_SUCCESS) {
+#ifdef DEBUG_MPRC
+ printf("copyin returned %x\n", kr);
+#endif
+ return kr;
+ }
+
+ tname = ipc_object_copyin_type(portp->msgt_name);
+ if (!IO_VALID(iname)) {
+ portp->name = (mach_port_t) iname;
+ portp->msgt_name = tname;
+#ifdef DEBUG_MPRC
+ printf("iport %x invalid\n", iname);
+#endif
+ return KERN_SUCCESS;
+ }
+
+ if (ISKERNELACT(dact)) {
+ portp->name = (mach_port_t) iname;
+ kr = KERN_SUCCESS;
+ } else {
+ kr = ipc_object_copyout(dspace, iname, tname, TRUE,
+ &portp->name);
+ }
+ if (kr != KERN_SUCCESS) {
+ ipc_object_destroy(iname, tname);
+
+ if (kr == KERN_INVALID_CAPABILITY)
+ portp->name = MACH_PORT_DEAD;
+ else {
+ portp->name = MACH_PORT_NULL;
+#ifdef DEBUG_MPRC
+ printf("copyout iport %x returned %x\n", iname);
+#endif
+ return kr;
+ }
+ }
+
+ portp->msgt_name = tname;
+#ifdef DEBUG_MPRC
+ printf("portp=%x/%x, iname=%x\n", portp->name, portp->msgt_name, iname);
+#endif
+ return KERN_SUCCESS;
+}
+
+kern_return_t
+mach_port_rpc_sig(space, name, buffer, buflen)
+{
+ return KERN_FAILURE;
+}
+
+#endif /* MIGRATING_THREADS */
diff --git a/ipc/port.h b/ipc/port.h
new file mode 100644
index 0000000..6e9f77b
--- /dev/null
+++ b/ipc/port.h
@@ -0,0 +1,90 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University.
+ * Copyright (c) 1993,1994 The University of Utah and
+ * the Computer Systems Laboratory (CSL).
+ * All rights reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON, THE UNIVERSITY OF UTAH AND CSL ALLOW FREE USE OF
+ * THIS SOFTWARE IN ITS "AS IS" CONDITION, AND DISCLAIM ANY LIABILITY
+ * OF ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF
+ * THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ */
+/*
+ * File: ipc/ipc_port.h
+ * Author: Rich Draves
+ * Date: 1989
+ *
+ * Implementation specific complement to mach/port.h.
+ */
+
+#ifndef _IPC_PORT_H_
+#define _IPC_PORT_H_
+
+#include <mach/port.h>
+
+/*
+ * mach_port_t must be an unsigned type. Port values
+ * have two parts, a generation number and an index.
+ * These macros encapsulate all knowledge of how
+ * a mach_port_t is layed out. However, ipc/ipc_entry.c
+ * implicitly assumes when it uses the splay tree functions
+ * that the generation number is in the low bits, so that
+ * names are ordered first by index and then by generation.
+ *
+ * If the size of generation numbers changes,
+ * be sure to update IE_BITS_GEN_MASK and friends
+ * in ipc/ipc_entry.h.
+ */
+
+#if PORT_GENERATIONS
+#define MACH_PORT_INDEX(name) ((name) >> 8)
+#define MACH_PORT_GEN(name) (((name) & 0xff) << 24)
+#define MACH_PORT_MAKE(index, gen) (((index) << 8) | ((gen) >> 24))
+#else
+#define MACH_PORT_INDEX(name) (name)
+#define MACH_PORT_GEN(name) 0
+#define MACH_PORT_MAKE(index, gen) (index)
+#endif
+
+#define MACH_PORT_NGEN(name) MACH_PORT_MAKE(0, MACH_PORT_GEN(name))
+#define MACH_PORT_MAKEB(index, bits) MACH_PORT_MAKE(index, IE_BITS_GEN(bits))
+
+/*
+ * Typedefs for code cleanliness. These must all have
+ * the same (unsigned) type as mach_port_t.
+ */
+
+typedef mach_port_t mach_port_index_t; /* index values */
+typedef mach_port_t mach_port_gen_t; /* generation numbers */
+
+
+#define MACH_PORT_UREFS_MAX ((mach_port_urefs_t) ((1 << 16) - 1))
+
+#define MACH_PORT_UREFS_OVERFLOW(urefs, delta) \
+ (((delta) > 0) && \
+ ((((urefs) + (delta)) <= (urefs)) || \
+ (((urefs) + (delta)) > MACH_PORT_UREFS_MAX)))
+
+#define MACH_PORT_UREFS_UNDERFLOW(urefs, delta) \
+ (((delta) < 0) && (-(delta) > (urefs)))
+
+#endif /* _IPC_PORT_H_ */