1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
|
/*
* Mach Operating System
* Copyright (c) 1991,1990,1989 Carnegie Mellon University
* All Rights Reserved.
*
* Permission to use, copy, modify and distribute this software and its
* documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
*
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
* ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
*
* Carnegie Mellon requests users of this software to return to
*
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
*
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
/*
* Author: David B. Golub, Carnegie Mellon University
* Date: 7/89
*
* Block IO driven from generic kernel IO interface.
*/
#include <mach/kern_return.h>
#include <device/param.h>
#include <device/device_types.h>
#include <device/io_req.h>
#include <device/ds_routines.h>
io_return_t block_io(
void (*strat)(),
void (*max_count)(),
io_req_t ior)
{
kern_return_t rc;
boolean_t wait = FALSE;
/*
* Make sure the size is not too large by letting max_count
* change io_count. If we are doing a write, then io_alloc_size
* preserves the original io_count.
*/
(*max_count)(ior);
/*
* If reading, allocate memory. If writing, wire
* down the incoming memory.
*/
if (ior->io_op & IO_READ)
rc = device_read_alloc(ior, (vm_size_t)ior->io_count);
else
rc = device_write_get(ior, &wait);
if (rc != KERN_SUCCESS)
return (rc);
/*
* Queue the operation for the device.
*/
(*strat)(ior);
/*
* The io is now queued. Wait for it if needed.
*/
if (wait) {
iowait(ior);
return(D_SUCCESS);
}
return (D_IO_QUEUED);
}
/*
* 'standard' max_count routine. VM continuations mean that this
* code can cope with arbitrarily-sized write operations (they won't be
* atomic, but any caller that cares will do the op synchronously).
*/
#define MAX_PHYS (256 * 1024)
void minphys(io_req_t ior)
{
if ((ior->io_op & (IO_WRITE | IO_READ | IO_OPEN)) == IO_WRITE)
return;
if (ior->io_count > MAX_PHYS)
ior->io_count = MAX_PHYS;
}
/*
* Dummy routine placed in device switch entries to indicate that
* block device may be mapped.
*/
vm_offset_t block_io_mmap(void)
{
return (0);
}
|