1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
|
/*
* Mach Operating System
* Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
* All Rights Reserved.
*
* Permission to use, copy, modify and distribute this software and its
* documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
* ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
*
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
/*
* Author: David B. Golub, Carnegie Mellon University
* Date: 10/88
*/
#ifndef _IO_REQ_
#define _IO_REQ_
#include <mach.h>
#include <cthreads.h>
#include "dev_hdr.h"
/*
* IO request element, queued on device for delayed replies.
*/
typedef struct io_req *io_req_t;
struct io_req {
struct io_req * io_next; /* next, ... */
struct io_req * io_prev; /* prev pointers: link in done,
defered, or in-progress list */
mach_device_t io_device; /* pointer to open-device structure */
char * io_dev_ptr; /* pointer to driver structure -
filled in by driver if necessary */
int io_unit; /* unit number ('minor') of device */
int io_op; /* IO operation */
dev_mode_t io_mode; /* operation mode (wait, truncate) */
recnum_t io_recnum; /* starting record number for
random-access devices */
union io_un {
io_buf_ptr_t data; /* data, for IO requests */
} io_un;
#define io_data io_un.data
long io_count; /* amount requested */
long io_alloc_size; /* amount allocated */
long io_residual; /* amount NOT done */
io_return_t io_error; /* error code */
/* call when done - returns TRUE if IO really finished */
boolean_t (*io_done)(io_req_t);
mach_port_t io_reply_port; /* reply port, for asynchronous
messages */
mach_msg_type_name_t io_reply_port_type;
/* send or send-once right? */
struct io_req * io_link; /* forward link (for driver header) */
struct io_req * io_rlink; /* reverse link (for driver header) */
// vm_map_copy_t io_copy; /* vm_map_copy obj. for this op. */
long io_total; /* total op size, for write */
struct mutex io_req_lock;
// decl_simple_lock_data(,io_req_lock)
/* Lock for this structure */
long io_physrec; /* mapping to the physical block
number */
long io_rectotal; /* total number of blocks to move */
};
/*
* LOCKING NOTE: Operations on io_req's are in general single threaded by
* the invoking code, obviating the need for a lock. The usual IO_CALL
* path through the code is: Initiating thread hands io_req to device driver,
* driver passes it to io_done thread, io_done thread sends reply message. No
* locking is needed in this sequence. Unfortunately, a synchronous wait
* for a buffer requires a lock to avoid problems if the wait and interrupt
* happen simultaneously on different processors.
*/
#define ior_lock(ior) mutex_lock(&(ior)->io_req_lock)
#define ior_unlock(ior) mutex_unlock(&(ior)->io_req_lock)
/*
* Flags and operations
*/
#define IO_WRITE 0x00000000 /* operation is write */
#define IO_READ 0x00000001 /* operation is read */
#define IO_OPEN 0x00000002 /* operation is open */
#define IO_DONE 0x00000100 /* operation complete */
#define IO_ERROR 0x00000200 /* error on operation */
#define IO_BUSY 0x00000400 /* operation in progress */
#define IO_WANTED 0x00000800 /* wakeup when no longer BUSY */
#define IO_BAD 0x00001000 /* bad disk block */
#define IO_CALL 0x00002000 /* call io_done_thread when done */
#define IO_INBAND 0x00004000 /* mig call was inband */
#define IO_INTERNAL 0x00008000 /* internal, device-driver specific */
#define IO_LOANED 0x00010000 /* ior loaned by another module */
#define IO_SPARE_START 0x00020000 /* start of spare flags */
/*
* Standard completion routine for io_requests.
*/
void iodone(io_req_t);
/*
* Macros to allocate and free IORs - will convert to zones later.
*/
#define io_req_alloc(ior,size) \
MACRO_BEGIN \
(ior) = (io_req_t)malloc(sizeof(struct io_req)); \
mutex_init(&(ior)->io_req_lock); \
MACRO_END
#define io_req_free(ior) \
(free(ior))
//zone_t io_inband_zone; /* for inband reads */
#endif /* _IO_REQ_ */
|