File: | obj-scan-build/../xen/block.c |
Location: | line 385, column 3 |
Description: | Value stored to 'err' is never read |
1 | /* |
2 | * Copyright (C) 2006-2009, 2011 Free Software Foundation |
3 | * |
4 | * This program is free software ; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by |
6 | * the Free Software Foundation ; either version 2 of the License, or |
7 | * (at your option) any later version. |
8 | * |
9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY ; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. |
13 | * |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with the program ; if not, write to the Free Software |
16 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
17 | */ |
18 | |
19 | #include <sys/types.h> |
20 | #include <mach/mig_errors.h> |
21 | #include <kern/kalloc.h> |
22 | #include <ipc/ipc_port.h> |
23 | #include <ipc/ipc_space.h> |
24 | #include <vm/vm_kern.h> |
25 | #include <vm/vm_user.h> |
26 | #include <device/device_types.h> |
27 | #include <device/device_port.h> |
28 | #include <device/disk_status.h> |
29 | #include <device/device_reply.user.h> |
30 | #include <device/device_emul.h> |
31 | #include <device/ds_routines.h> |
32 | #include <xen/public/io/blkif.h> |
33 | #include <xen/evt.h> |
34 | #include <string.h> |
35 | #include <util/atoi.h> |
36 | #include "store.h" |
37 | #include "block.h" |
38 | #include "grant.h" |
39 | #include "ring.h" |
40 | #include "xen.h" |
41 | |
42 | /* Hypervisor part */ |
43 | |
44 | struct block_data { |
45 | struct device device; |
46 | char *name; |
47 | int open_count; |
48 | char *backend; |
49 | domid_t domid; |
50 | char *vbd; |
51 | int handle; |
52 | unsigned info; |
53 | dev_mode_t mode; |
54 | unsigned sector_size; |
55 | unsigned long nr_sectors; |
56 | ipc_port_t port; |
57 | blkif_front_ring_t ring; |
58 | evtchn_port_t evt; |
59 | simple_lock_data_t lock; |
60 | simple_lock_data_t pushlock; |
61 | }; |
62 | |
63 | static int n_vbds; |
64 | static struct block_data *vbd_data; |
65 | |
66 | struct device_emulation_ops hyp_block_emulation_ops; |
67 | |
68 | static void hyp_block_intr(int unit) { |
69 | struct block_data *bd = &vbd_data[unit]; |
70 | blkif_response_t *rsp; |
71 | int more; |
72 | io_return_t *err; |
73 | |
74 | simple_lock(&bd->lock); |
75 | more = RING_HAS_UNCONSUMED_RESPONSES(&bd->ring)((&bd->ring)->sring->rsp_prod - (&bd->ring )->rsp_cons); |
76 | while (more) { |
77 | rmb()__asm__ __volatile__("lock; addl $0,0(%esp)"); /* make sure we see responses */ |
78 | rsp = RING_GET_RESPONSE(&bd->ring, bd->ring.rsp_cons++)(&((&bd->ring)->sring->ring[((bd->ring.rsp_cons ++) & (((&bd->ring)->nr_ents) - 1))].rsp)); |
79 | err = (void *) (unsigned long) rsp->id; |
80 | switch (rsp->status) { |
81 | case BLKIF_RSP_ERROR-1: |
82 | *err = D_IO_ERROR2500; |
83 | break; |
84 | case BLKIF_RSP_OKAY0: |
85 | break; |
86 | default: |
87 | printf("Unrecognized blkif status %d\n", rsp->status); |
88 | goto drop; |
89 | } |
90 | thread_wakeup(err)thread_wakeup_prim((err), ((boolean_t) 0), 0); |
91 | drop: |
92 | thread_wakeup_one(bd)thread_wakeup_prim((bd), ((boolean_t) 1), 0); |
93 | RING_FINAL_CHECK_FOR_RESPONSES(&bd->ring, more)do { (more) = ((&bd->ring)->sring->rsp_prod - (& bd->ring)->rsp_cons); if (more) break; (&bd->ring )->sring->rsp_event = (&bd->ring)->rsp_cons + 1; __asm__ __volatile__("lock; addl $0,0(%esp)"); (more) = ( (&bd->ring)->sring->rsp_prod - (&bd->ring )->rsp_cons); } while (0); |
94 | } |
95 | simple_unlock(&bd->lock)((void)(&bd->lock)); |
96 | } |
97 | |
98 | #define VBD_PATH"device/vbd" "device/vbd" |
99 | void hyp_block_init(void) { |
100 | char **vbds, **vbd; |
101 | char *c; |
102 | int i, disk, partition; |
103 | int n; |
104 | int grant; |
105 | char port_name[10]; |
106 | char *prefix; |
107 | char device_name[32]; |
108 | domid_t domid; |
109 | evtchn_port_t evt; |
110 | hyp_store_transaction_t t; |
111 | vm_offset_t addr; |
112 | struct block_data *bd; |
113 | blkif_sring_t *ring; |
114 | |
115 | vbds = hyp_store_ls(0, 1, VBD_PATH"device/vbd"); |
116 | if (!vbds) { |
117 | printf("hd: No block device (%s). Hoping you don't need any\n", hyp_store_error); |
118 | n_vbds = 0; |
119 | return; |
120 | } |
121 | |
122 | n = 0; |
123 | for (vbd = vbds; *vbd; vbd++) |
124 | n++; |
125 | |
126 | vbd_data = (void*) kalloc(n * sizeof(*vbd_data)); |
127 | if (!vbd_data) { |
128 | printf("hd: No memory room for VBD\n"); |
129 | n_vbds = 0; |
130 | return; |
131 | } |
132 | n_vbds = n; |
133 | |
134 | for (n = 0; n < n_vbds; n++) { |
135 | bd = &vbd_data[n]; |
136 | mach_atoi((u_char *) vbds[n], &bd->handle); |
137 | if (bd->handle == MACH_ATOI_DEFAULT-1) |
138 | continue; |
139 | |
140 | bd->open_count = -2; |
141 | bd->vbd = vbds[n]; |
142 | |
143 | /* Get virtual number. */ |
144 | i = hyp_store_read_int(0, 5, VBD_PATH"device/vbd", "/", vbds[n], "/", "virtual-device"); |
145 | if (i == -1) |
146 | panic("hd: couldn't virtual device of VBD %s\n",vbds[n]); |
147 | if ((i >> 28) == 1) { |
148 | /* xvd, new format */ |
149 | prefix = "xvd"; |
150 | disk = (i >> 8) & ((1 << 20) - 1); |
151 | partition = i & ((1 << 8) - 1); |
152 | } else if ((i >> 8) == 202) { |
153 | /* xvd, old format */ |
154 | prefix = "xvd"; |
155 | disk = (i >> 4) & ((1 << 4) - 1); |
156 | partition = i & ((1 << 4) - 1); |
157 | } else if ((i >> 8) == 8) { |
158 | /* SCSI */ |
159 | prefix = "sd"; |
160 | disk = (i >> 4) & ((1 << 4) - 1); |
161 | partition = i & ((1 << 4) - 1); |
162 | } else if ((i >> 8) == 3) { |
163 | /* IDE primary */ |
164 | prefix = "hd"; |
165 | disk = (i >> 6) & ((1 << 2) - 1); |
166 | partition = i & ((1 << 6) - 1); |
167 | } else if ((i >> 8) == 22) { |
168 | /* IDE secondary */ |
169 | prefix = "hd"; |
170 | disk = ((i >> 6) & ((1 << 2) - 1)) + 2; |
171 | partition = i & ((1 << 6) - 1); |
172 | } else if ((i >> 8) == 33) { |
173 | /* IDE 3 */ |
174 | prefix = "hd"; |
175 | disk = ((i >> 6) & ((1 << 2) - 1)) + 4; |
176 | partition = i & ((1 << 6) - 1); |
177 | } else if ((i >> 8) == 34) { |
178 | /* IDE 4 */ |
179 | prefix = "hd"; |
180 | disk = ((i >> 6) & ((1 << 2) - 1)) + 6; |
181 | partition = i & ((1 << 6) - 1); |
182 | } else if ((i >> 8) == 56) { |
183 | /* IDE 5 */ |
184 | prefix = "hd"; |
185 | disk = ((i >> 6) & ((1 << 2) - 1)) + 8; |
186 | partition = i & ((1 << 6) - 1); |
187 | } else if ((i >> 8) == 57) { |
188 | /* IDE 6 */ |
189 | prefix = "hd"; |
190 | disk = ((i >> 6) & ((1 << 2) - 1)) + 10; |
191 | partition = i & ((1 << 6) - 1); |
192 | } else if ((i >> 8) == 88) { |
193 | /* IDE 7 */ |
194 | prefix = "hd"; |
195 | disk = ((i >> 6) & ((1 << 2) - 1)) + 12; |
196 | partition = i & ((1 << 6) - 1); |
197 | } else if ((i >> 8) == 89) { |
198 | /* IDE 8 */ |
199 | prefix = "hd"; |
200 | disk = ((i >> 6) & ((1 << 2) - 1)) + 14; |
201 | partition = i & ((1 << 6) - 1); |
202 | } else if ((i >> 8) == 90) { |
203 | /* IDE 9 */ |
204 | prefix = "hd"; |
205 | disk = ((i >> 6) & ((1 << 2) - 1)) + 16; |
206 | partition = i & ((1 << 6) - 1); |
207 | } else if ((i >> 8) == 91) { |
208 | /* IDE 10 */ |
209 | prefix = "hd"; |
210 | disk = ((i >> 6) & ((1 << 2) - 1)) + 18; |
211 | partition = i & ((1 << 6) - 1); |
212 | } else { |
213 | printf("unsupported VBD number %d\n", i); |
214 | continue; |
215 | } |
216 | if (partition) |
217 | sprintf(device_name, "%s%ds%d", prefix, disk, partition); |
218 | else |
219 | sprintf(device_name, "%s%d", prefix, disk); |
220 | bd->name = (char*) kalloc(strlen(device_name) + 1); |
221 | strcpy(bd->name, device_name); |
222 | |
223 | /* Get domain id of backend driver. */ |
224 | i = hyp_store_read_int(0, 5, VBD_PATH"device/vbd", "/", vbds[n], "/", "backend-id"); |
225 | if (i == -1) |
226 | panic("%s: couldn't read backend domid (%s)", device_name, hyp_store_error); |
227 | bd->domid = domid = i; |
228 | |
229 | do { |
230 | t = hyp_store_transaction_start(); |
231 | |
232 | /* Get a page for ring */ |
233 | if ((addr = vm_page_grab_phys_addr()) == -1) |
234 | panic("%s: couldn't allocate space for store ring\n", device_name); |
235 | ring = (void*) phystokv(addr)((vm_offset_t)(addr) + 0xC0000000UL); |
236 | SHARED_RING_INIT(ring)do { (ring)->req_prod = (ring)->rsp_prod = 0; (ring)-> req_event = (ring)->rsp_event = 1; (void)memset((ring)-> pad, 0, sizeof((ring)->pad)); } while(0); |
237 | FRONT_RING_INIT(&bd->ring, ring, PAGE_SIZE)do { (&bd->ring)->req_prod_pvt = 0; (&bd->ring )->rsp_cons = 0; (&bd->ring)->nr_ents = (((((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof ((ring)->ring[0])) & 0xffff0000) ? (((((((1 << 12 )) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)-> ring[0]))>>16) & 0x0000ff00) ? ((((((((1 << 12 )) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)-> ring[0]))>>16)>>8) & 0x000000f0) ? (((((((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof ((ring)->ring[0]))>>16)>>8)>>4) & 0x0000000c ) ? ((((((((((1 << 12)) - (long)(ring)->ring + (long )(ring)) / sizeof((ring)->ring[0]))>>16)>>8)>> 4)>>2) & 0x00000002) ? 0x2 : (((((((((1 << 12 )) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)-> ring[0]))>>16)>>8)>>4)>>2) & 0x1) )<<2 : (((((((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)->ring[0]))>>16)>> 8)>>4) & 0x00000002) ? 0x2 : ((((((((1 << 12) ) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)-> ring[0]))>>16)>>8)>>4) & 0x1)))<< 4 : ((((((((1 << 12)) - (long)(ring)->ring + (long)( ring)) / sizeof((ring)->ring[0]))>>16)>>8) & 0x0000000c) ? (((((((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)->ring[0]))>>16)>> 8)>>2) & 0x00000002) ? 0x2 : ((((((((1 << 12) ) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)-> ring[0]))>>16)>>8)>>2) & 0x1))<<2 : ((((((((1 << 12)) - (long)(ring)->ring + (long)(ring )) / sizeof((ring)->ring[0]))>>16)>>8) & 0x00000002 ) ? 0x2 : (((((((1 << 12)) - (long)(ring)->ring + (long )(ring)) / sizeof((ring)->ring[0]))>>16)>>8) & 0x1))))<<8 : (((((((1 << 12)) - (long)(ring)-> ring + (long)(ring)) / sizeof((ring)->ring[0]))>>16) & 0x000000f0) ? ((((((((1 << 12)) - (long)(ring)-> ring + (long)(ring)) / sizeof((ring)->ring[0]))>>16) >>4) & 0x0000000c) ? (((((((((1 << 12)) - (long )(ring)->ring + (long)(ring)) / sizeof((ring)->ring[0]) )>>16)>>4)>>2) & 0x00000002) ? 0x2 : (( ((((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)->ring[0]))>>16)>>4)>>2) & 0x1))<<2 : ((((((((1 << 12)) - (long)(ring )->ring + (long)(ring)) / sizeof((ring)->ring[0]))>> 16)>>4) & 0x00000002) ? 0x2 : (((((((1 << 12) ) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)-> ring[0]))>>16)>>4) & 0x1)))<<4 : (((((( (1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof ((ring)->ring[0]))>>16) & 0x0000000c) ? (((((((( 1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof ((ring)->ring[0]))>>16)>>2) & 0x00000002) ? 0x2 : (((((((1 << 12)) - (long)(ring)->ring + (long )(ring)) / sizeof((ring)->ring[0]))>>16)>>2) & 0x1))<<2 : (((((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)->ring[0]))>>16) & 0x00000002) ? 0x2 : ((((((1 << 12)) - (long)(ring)-> ring + (long)(ring)) / sizeof((ring)->ring[0]))>>16) & 0x1)))))<<16 : ((((((1 << 12)) - (long)(ring )->ring + (long)(ring)) / sizeof((ring)->ring[0])) & 0x0000ff00) ? (((((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)->ring[0]))>>8) & 0x000000f0) ? ((((((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)->ring[0]))>>8)>> 4) & 0x0000000c) ? (((((((((1 << 12)) - (long)(ring )->ring + (long)(ring)) / sizeof((ring)->ring[0]))>> 8)>>4)>>2) & 0x00000002) ? 0x2 : ((((((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof((ring) ->ring[0]))>>8)>>4)>>2) & 0x1))<< 2 : ((((((((1 << 12)) - (long)(ring)->ring + (long)( ring)) / sizeof((ring)->ring[0]))>>8)>>4) & 0x00000002) ? 0x2 : (((((((1 << 12)) - (long)(ring)-> ring + (long)(ring)) / sizeof((ring)->ring[0]))>>8)>> 4) & 0x1)))<<4 : (((((((1 << 12)) - (long)(ring )->ring + (long)(ring)) / sizeof((ring)->ring[0]))>> 8) & 0x0000000c) ? ((((((((1 << 12)) - (long)(ring) ->ring + (long)(ring)) / sizeof((ring)->ring[0]))>> 8)>>2) & 0x00000002) ? 0x2 : (((((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)-> ring[0]))>>8)>>2) & 0x1))<<2 : (((((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof ((ring)->ring[0]))>>8) & 0x00000002) ? 0x2 : ((( (((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof ((ring)->ring[0]))>>8) & 0x1))))<<8 : (((( ((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof ((ring)->ring[0])) & 0x000000f0) ? (((((((1 << 12 )) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)-> ring[0]))>>4) & 0x0000000c) ? ((((((((1 << 12 )) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)-> ring[0]))>>4)>>2) & 0x00000002) ? 0x2 : ((((( ((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof ((ring)->ring[0]))>>4)>>2) & 0x1))<< 2 : (((((((1 << 12)) - (long)(ring)->ring + (long)(ring )) / sizeof((ring)->ring[0]))>>4) & 0x00000002) ? 0x2 : ((((((1 << 12)) - (long)(ring)->ring + (long) (ring)) / sizeof((ring)->ring[0]))>>4) & 0x1)))<< 4 : ((((((1 << 12)) - (long)(ring)->ring + (long)(ring )) / sizeof((ring)->ring[0])) & 0x0000000c) ? (((((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof ((ring)->ring[0]))>>2) & 0x00000002) ? 0x2 : ((( (((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof ((ring)->ring[0]))>>2) & 0x1))<<2 : (((((( 1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof ((ring)->ring[0])) & 0x00000002) ? 0x2 : (((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof((ring) ->ring[0])) & 0x1))))))); (&bd->ring)->sring = (ring); } while (0); |
238 | grant = hyp_grant_give(domid, atop(addr)(((vm_size_t)(addr)) >> 12), 0); |
239 | |
240 | /* and give it to backend. */ |
241 | i = sprintf(port_name, "%d", grant); |
242 | c = hyp_store_write(t, port_name, 5, VBD_PATH"device/vbd", "/", vbds[n], "/", "ring-ref"); |
243 | if (!c) |
244 | panic("%s: couldn't store ring reference (%s)", device_name, hyp_store_error); |
245 | kfree((vm_offset_t) c, strlen(c)+1); |
246 | |
247 | /* Allocate an event channel and give it to backend. */ |
248 | bd->evt = evt = hyp_event_channel_alloc(domid); |
249 | hyp_evt_handler(evt, hyp_block_intr, n, SPL77); |
250 | i = sprintf(port_name, "%lu", evt); |
251 | c = hyp_store_write(t, port_name, 5, VBD_PATH"device/vbd", "/", vbds[n], "/", "event-channel"); |
252 | if (!c) |
253 | panic("%s: couldn't store event channel (%s)", device_name, hyp_store_error); |
254 | kfree((vm_offset_t) c, strlen(c)+1); |
255 | c = hyp_store_write(t, hyp_store_state_initialized"3", 5, VBD_PATH"device/vbd", "/", vbds[n], "/", "state"); |
256 | if (!c) |
257 | panic("%s: couldn't store state (%s)", device_name, hyp_store_error); |
258 | kfree((vm_offset_t) c, strlen(c)+1); |
259 | } while (!hyp_store_transaction_stop(t)); |
260 | /* TODO randomly wait? */ |
261 | |
262 | c = hyp_store_read(0, 5, VBD_PATH"device/vbd", "/", vbds[n], "/", "backend"); |
263 | if (!c) |
264 | panic("%s: couldn't get path to backend (%s)", device_name, hyp_store_error); |
265 | bd->backend = c; |
266 | |
267 | while(1) { |
268 | i = hyp_store_read_int(0, 3, bd->backend, "/", "state"); |
269 | if (i == MACH_ATOI_DEFAULT-1) |
270 | panic("can't read state from %s", bd->backend); |
271 | if (i == XenbusStateConnected) |
272 | break; |
273 | hyp_yield()hyp_sched_op(0, 0); |
274 | } |
275 | |
276 | i = hyp_store_read_int(0, 3, bd->backend, "/", "sectors"); |
277 | if (i == -1) |
278 | panic("%s: couldn't get number of sectors (%s)", device_name, hyp_store_error); |
279 | bd->nr_sectors = i; |
280 | |
281 | i = hyp_store_read_int(0, 3, bd->backend, "/", "sector-size"); |
282 | if (i == -1) |
283 | panic("%s: couldn't get sector size (%s)", device_name, hyp_store_error); |
284 | if (i & ~(2*(i-1)+1)) |
285 | panic("sector size %d is not a power of 2\n", i); |
286 | if (i > PAGE_SIZE(1 << 12) || PAGE_SIZE(1 << 12) % i != 0) |
287 | panic("%s: couldn't handle sector size %d with pages of size %d\n", device_name, i, PAGE_SIZE(1 << 12)); |
288 | bd->sector_size = i; |
289 | |
290 | i = hyp_store_read_int(0, 3, bd->backend, "/", "info"); |
291 | if (i == -1) |
292 | panic("%s: couldn't get info (%s)", device_name, hyp_store_error); |
293 | bd->info = i; |
294 | |
295 | c = hyp_store_read(0, 3, bd->backend, "/", "mode"); |
296 | if (!c) |
297 | panic("%s: couldn't get backend's mode (%s)", device_name, hyp_store_error); |
298 | if ((c[0] == 'w') && !(bd->info & VDISK_READONLY0x4)) |
299 | bd->mode = D_READ0x1|D_WRITE0x2; |
300 | else |
301 | bd->mode = D_READ0x1; |
302 | |
303 | c = hyp_store_read(0, 3, bd->backend, "/", "params"); |
304 | if (!c) |
305 | panic("%s: couldn't get backend's real device (%s)", device_name, hyp_store_error); |
306 | |
307 | /* TODO: change suffix */ |
308 | printf("%s: dom%d's VBD %s (%s,%c%s) %ldMB\n", device_name, domid, |
309 | vbds[n], c, bd->mode & D_WRITE0x2 ? 'w' : 'r', |
310 | bd->info & VDISK_CDROM0x1 ? ", cdrom" : "", |
311 | bd->nr_sectors / ((1<<20) / 512)); |
312 | kfree((vm_offset_t) c, strlen(c)+1); |
313 | |
314 | c = hyp_store_write(0, hyp_store_state_connected"4", 5, VBD_PATH"device/vbd", "/", bd->vbd, "/", "state"); |
315 | if (!c) |
316 | panic("couldn't store state for %s (%s)", device_name, hyp_store_error); |
317 | kfree((vm_offset_t) c, strlen(c)+1); |
318 | |
319 | bd->open_count = -1; |
320 | bd->device.emul_ops = &hyp_block_emulation_ops; |
321 | bd->device.emul_data = bd; |
322 | simple_lock_init(&bd->lock); |
323 | simple_lock_init(&bd->pushlock); |
324 | } |
325 | } |
326 | |
327 | static ipc_port_t |
328 | dev_to_port(void *d) |
329 | { |
330 | struct block_data *b = d; |
331 | if (!d) |
332 | return IP_NULL((ipc_port_t) ((ipc_object_t) 0)); |
333 | return ipc_port_make_send(b->port); |
334 | } |
335 | |
336 | static int |
337 | device_close(void *devp) |
338 | { |
339 | struct block_data *bd = devp; |
340 | if (--bd->open_count < 0) |
341 | panic("too many closes on %s", bd->name); |
342 | printf("close, %s count %d\n", bd->name, bd->open_count); |
343 | if (bd->open_count) |
344 | return 0; |
345 | ipc_kobject_set(bd->port, IKO_NULL((ipc_kobject_t) 0), IKOT_NONE0); |
346 | ipc_port_dealloc_kernel(bd->port)ipc_port_dealloc_special((bd->port), ipc_space_kernel); |
347 | return 0; |
348 | } |
349 | |
350 | static io_return_t |
351 | device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type, |
352 | dev_mode_t mode, char *name, device_t *devp /* out */) |
353 | { |
354 | int i, err = 0; |
355 | ipc_port_t port, notify; |
356 | struct block_data *bd; |
357 | |
358 | for (i = 0; i < n_vbds; i++) |
359 | if (!strcmp(name, vbd_data[i].name)) |
360 | break; |
361 | |
362 | if (i == n_vbds) |
363 | return D_NO_SUCH_DEVICE2502; |
364 | |
365 | bd = &vbd_data[i]; |
366 | if (bd->open_count == -2) |
367 | /* couldn't be initialized */ |
368 | return D_NO_SUCH_DEVICE2502; |
369 | |
370 | if ((mode & D_WRITE0x2) && !(bd->mode & D_WRITE0x2)) |
371 | return D_READ_ONLY2509; |
372 | |
373 | if (bd->open_count >= 0) { |
374 | *devp = &bd->device ; |
375 | bd->open_count++ ; |
376 | printf("re-open, %s count %d\n", bd->name, bd->open_count); |
377 | return D_SUCCESS0; |
378 | } |
379 | |
380 | bd->open_count = 1; |
381 | printf("%s count %d\n", bd->name, bd->open_count); |
382 | |
383 | port = ipc_port_alloc_kernel()ipc_port_alloc_special(ipc_space_kernel); |
384 | if (port == IP_NULL((ipc_port_t) ((ipc_object_t) 0))) { |
385 | err = KERN_RESOURCE_SHORTAGE6; |
Value stored to 'err' is never read | |
386 | goto out; |
387 | } |
388 | bd->port = port; |
389 | |
390 | *devp = &bd->device; |
391 | |
392 | ipc_kobject_set (port, (ipc_kobject_t) &bd->device, IKOT_DEVICE10); |
393 | |
394 | notify = ipc_port_make_sonce (bd->port); |
395 | ip_lock (bd->port); |
396 | ipc_port_nsrequest (bd->port, 1, notify, ¬ify); |
397 | assert (notify == IP_NULL)({ if (!(notify == ((ipc_port_t) ((ipc_object_t) 0)))) Assert ("notify == IP_NULL", "../xen/block.c", 397); }); |
398 | |
399 | out: |
400 | if (IP_VALID (reply_port)(((&(reply_port)->ip_target.ipt_object) != ((ipc_object_t ) 0)) && ((&(reply_port)->ip_target.ipt_object ) != ((ipc_object_t) -1)))) |
401 | ds_device_open_reply (reply_port, reply_port_type, D_SUCCESS0, port); |
402 | else |
403 | device_close(bd); |
404 | return MIG_NO_REPLY-305; |
405 | } |
406 | |
407 | static io_return_t |
408 | device_read (void *d, ipc_port_t reply_port, |
409 | mach_msg_type_name_t reply_port_type, dev_mode_t mode, |
410 | recnum_t bn, int count, io_buf_ptr_t *data, |
411 | unsigned *bytes_read) |
412 | { |
413 | int resid, amt; |
414 | io_return_t err = 0; |
415 | vm_page_t pages[BLKIF_MAX_SEGMENTS_PER_REQUEST11]; |
416 | grant_ref_t gref[BLKIF_MAX_SEGMENTS_PER_REQUEST11]; |
417 | int nbpages; |
418 | vm_map_copy_t copy; |
419 | vm_offset_t offset, alloc_offset, o; |
420 | vm_object_t object; |
421 | vm_page_t m; |
422 | vm_size_t len, size; |
423 | struct block_data *bd = d; |
424 | struct blkif_request *req; |
425 | |
426 | *data = 0; |
427 | *bytes_read = 0; |
428 | |
429 | if (count < 0) |
430 | return D_INVALID_SIZE2507; |
431 | if (count == 0) |
432 | return 0; |
433 | |
434 | /* Allocate an object to hold the data. */ |
435 | size = round_page (count)((vm_offset_t)((((vm_offset_t)(count)) + ((1 << 12)-1)) & ~((1 << 12)-1))); |
436 | object = vm_object_allocate (size); |
437 | if (! object) |
438 | { |
439 | err = D_NO_MEMORY2508; |
440 | goto out; |
441 | } |
442 | alloc_offset = offset = 0; |
443 | resid = count; |
444 | |
445 | while (resid && !err) |
446 | { |
447 | unsigned reqn; |
448 | int i; |
449 | int last_sect; |
450 | |
451 | nbpages = 0; |
452 | |
453 | /* Determine size of I/O this time around. */ |
454 | len = round_page(offset + resid)((vm_offset_t)((((vm_offset_t)(offset + resid)) + ((1 << 12)-1)) & ~((1 << 12)-1))) - offset; |
455 | if (len > PAGE_SIZE(1 << 12) * BLKIF_MAX_SEGMENTS_PER_REQUEST11) |
456 | len = PAGE_SIZE(1 << 12) * BLKIF_MAX_SEGMENTS_PER_REQUEST11; |
457 | |
458 | /* Allocate pages. */ |
459 | while (alloc_offset < offset + len) |
460 | { |
461 | while ((m = vm_page_grab (FALSE((boolean_t) 0))) == 0) |
462 | VM_PAGE_WAIT (0)vm_page_wait(0); |
463 | assert (! m->active && ! m->inactive)({ if (!(! m->active && ! m->inactive)) Assert( "! m->active && ! m->inactive", "../xen/block.c" , 463); }); |
464 | m->busy = TRUE((boolean_t) 1); |
465 | assert(nbpages < BLKIF_MAX_SEGMENTS_PER_REQUEST)({ if (!(nbpages < 11)) Assert("nbpages < BLKIF_MAX_SEGMENTS_PER_REQUEST" , "../xen/block.c", 465); }); |
466 | pages[nbpages++] = m; |
467 | alloc_offset += PAGE_SIZE(1 << 12); |
468 | } |
469 | |
470 | /* Do the read. */ |
471 | amt = len; |
472 | if (amt > resid) |
473 | amt = resid; |
474 | |
475 | /* allocate a request */ |
476 | spl_t spl = splsched(); |
477 | while(1) { |
478 | simple_lock(&bd->lock); |
479 | if (!RING_FULL(&bd->ring)((((&bd->ring)->nr_ents) - ((&bd->ring)-> req_prod_pvt - (&bd->ring)->rsp_cons)) == 0)) |
480 | break; |
481 | thread_sleep(bd, &bd->lock, FALSE((boolean_t) 0)); |
482 | } |
483 | mb()__asm__ __volatile__("lock; addl $0,0(%esp)"); |
484 | reqn = bd->ring.req_prod_pvt++;; |
485 | simple_lock(&bd->pushlock); |
486 | simple_unlock(&bd->lock)((void)(&bd->lock)); |
487 | (void) splx(spl); |
488 | |
489 | req = RING_GET_REQUEST(&bd->ring, reqn)(&((&bd->ring)->sring->ring[((reqn) & (( (&bd->ring)->nr_ents) - 1))].req)); |
490 | req->operation = BLKIF_OP_READ0; |
491 | req->nr_segments = nbpages; |
492 | req->handle = bd->handle; |
493 | req->id = (unsigned64_t) (unsigned long) &err; /* pointer on the stack */ |
494 | req->sector_number = bn + offset / 512; |
495 | for (i = 0; i < nbpages; i++) { |
496 | req->seg[i].gref = gref[i] = hyp_grant_give(bd->domid, atop(pages[i]->phys_addr)(((vm_size_t)(pages[i]->phys_addr)) >> 12), 0); |
497 | req->seg[i].first_sect = 0; |
498 | req->seg[i].last_sect = PAGE_SIZE(1 << 12)/512 - 1; |
499 | } |
500 | last_sect = ((amt - 1) & PAGE_MASK((1 << 12)-1)) / 512; |
501 | req->seg[nbpages-1].last_sect = last_sect; |
502 | |
503 | memset((void*) phystokv(pages[nbpages-1]->phys_addr((vm_offset_t)(pages[nbpages-1]->phys_addr + (last_sect + 1 ) * 512) + 0xC0000000UL) |
504 | + (last_sect + 1) * 512)((vm_offset_t)(pages[nbpages-1]->phys_addr + (last_sect + 1 ) * 512) + 0xC0000000UL), |
505 | 0, PAGE_SIZE(1 << 12) - (last_sect + 1) * 512); |
506 | |
507 | /* no need for a lock: as long as the request is not pushed, the event won't be triggered */ |
508 | assert_wait((event_t) &err, FALSE((boolean_t) 0)); |
509 | |
510 | int notify; |
511 | wmb()__asm__ __volatile__("lock; addl $0,0(%esp)"); /* make sure it sees requests */ |
512 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bd->ring, notify)do { RING_IDX __old = (&bd->ring)->sring->req_prod ; RING_IDX __new = (&bd->ring)->req_prod_pvt; __asm__ __volatile__("lock; addl $0,0(%esp)"); (&bd->ring)-> sring->req_prod = __new; __asm__ __volatile__("lock; addl $0,0(%esp)" ); (notify) = ((RING_IDX)(__new - (&bd->ring)->sring ->req_event) < (RING_IDX)(__new - __old)); } while (0); |
513 | if (notify) |
514 | hyp_event_channel_send(bd->evt); |
515 | simple_unlock(&bd->pushlock)((void)(&bd->pushlock)); |
516 | |
517 | thread_block(NULL((void *) 0)); |
518 | |
519 | if (err) |
520 | printf("error reading %d bytes at sector %ld\n", amt, |
521 | bn + offset / 512); |
522 | |
523 | for (i = 0; i < nbpages; i++) |
524 | hyp_grant_takeback(gref[i]); |
525 | |
526 | /* Compute number of pages to insert in object. */ |
527 | o = offset; |
528 | |
529 | resid -= amt; |
530 | if (resid == 0) |
531 | offset = o + len; |
532 | else |
533 | offset += amt; |
534 | |
535 | /* Add pages to the object. */ |
536 | vm_object_lock (object); |
537 | for (i = 0; i < nbpages; i++) |
538 | { |
539 | m = pages[i]; |
540 | assert (m->busy)({ if (!(m->busy)) Assert("m->busy", "../xen/block.c", 540 ); }); |
541 | vm_page_lock_queues (); |
542 | PAGE_WAKEUP_DONE (m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); |
543 | m->dirty = TRUE((boolean_t) 1); |
544 | vm_page_insert (m, object, o); |
545 | vm_page_unlock_queues ()((void)(&vm_page_queue_lock)); |
546 | o += PAGE_SIZE(1 << 12); |
547 | } |
548 | vm_object_unlock (object)((void)(&(object)->Lock)); |
549 | } |
550 | |
551 | out: |
552 | if (! err) |
553 | err = vm_map_copyin_object (object, 0, round_page (count)((vm_offset_t)((((vm_offset_t)(count)) + ((1 << 12)-1)) & ~((1 << 12)-1))), ©); |
554 | if (! err) |
555 | { |
556 | *data = (io_buf_ptr_t) copy; |
557 | *bytes_read = count - resid; |
558 | } |
559 | else |
560 | vm_object_deallocate (object); |
561 | return err; |
562 | } |
563 | |
564 | static io_return_t |
565 | device_write(void *d, ipc_port_t reply_port, |
566 | mach_msg_type_name_t reply_port_type, dev_mode_t mode, |
567 | recnum_t bn, io_buf_ptr_t data, unsigned int count, |
568 | int *bytes_written) |
569 | { |
570 | io_return_t err = 0; |
571 | vm_map_copy_t copy = (vm_map_copy_t) data; |
572 | vm_offset_t aligned_buffer = 0; |
573 | int copy_npages = atop(round_page(count))(((vm_size_t)(((vm_offset_t)((((vm_offset_t)(count)) + ((1 << 12)-1)) & ~((1 << 12)-1))))) >> 12); |
574 | vm_offset_t phys_addrs[copy_npages]; |
575 | struct block_data *bd = d; |
576 | blkif_request_t *req; |
577 | grant_ref_t gref[BLKIF_MAX_SEGMENTS_PER_REQUEST11]; |
578 | unsigned reqn, size; |
579 | int i, nbpages, j; |
580 | |
581 | if (!(bd->mode & D_WRITE0x2)) |
582 | return D_READ_ONLY2509; |
583 | |
584 | if (count == 0) { |
585 | vm_map_copy_discard(copy); |
586 | return 0; |
587 | } |
588 | |
589 | if (count % bd->sector_size) |
590 | return D_INVALID_SIZE2507; |
591 | |
592 | if (count > copy->size) |
593 | return D_INVALID_SIZE2507; |
594 | |
595 | if (copy->type != VM_MAP_COPY_PAGE_LIST3 || copy->offset & PAGE_MASK((1 << 12)-1)) { |
596 | /* Unaligned write. Has to copy data before passing it to the backend. */ |
597 | kern_return_t kr; |
598 | vm_offset_t buffer; |
599 | |
600 | kr = kmem_alloc(device_io_map, &aligned_buffer, count); |
601 | if (kr != KERN_SUCCESS0) |
602 | return kr; |
603 | |
604 | kr = vm_map_copyout(device_io_map, &buffer, vm_map_copy_copy(copy)); |
605 | if (kr != KERN_SUCCESS0) { |
606 | kmem_free(device_io_map, aligned_buffer, count); |
607 | return kr; |
608 | } |
609 | |
610 | memcpy((void*) aligned_buffer, (void*) buffer, count); |
611 | |
612 | vm_deallocate (device_io_map, buffer, count); |
613 | |
614 | for (i = 0; i < copy_npages; i++) |
615 | phys_addrs[i] = kvtophys(aligned_buffer + ptoa(i)((vm_offset_t)((i) << 12))); |
616 | } else { |
617 | for (i = 0; i < copy_npages; i++) |
618 | phys_addrs[i] = copy->cpy_page_listc_u.c_p.page_list[i]->phys_addr; |
619 | } |
620 | |
621 | for (i=0; i<copy_npages; i+=nbpages) { |
622 | |
623 | nbpages = BLKIF_MAX_SEGMENTS_PER_REQUEST11; |
624 | if (nbpages > copy_npages-i) |
625 | nbpages = copy_npages-i; |
626 | |
627 | /* allocate a request */ |
628 | spl_t spl = splsched(); |
629 | while(1) { |
630 | simple_lock(&bd->lock); |
631 | if (!RING_FULL(&bd->ring)((((&bd->ring)->nr_ents) - ((&bd->ring)-> req_prod_pvt - (&bd->ring)->rsp_cons)) == 0)) |
632 | break; |
633 | thread_sleep(bd, &bd->lock, FALSE((boolean_t) 0)); |
634 | } |
635 | mb()__asm__ __volatile__("lock; addl $0,0(%esp)"); |
636 | reqn = bd->ring.req_prod_pvt++;; |
637 | simple_lock(&bd->pushlock); |
638 | simple_unlock(&bd->lock)((void)(&bd->lock)); |
639 | (void) splx(spl); |
640 | |
641 | req = RING_GET_REQUEST(&bd->ring, reqn)(&((&bd->ring)->sring->ring[((reqn) & (( (&bd->ring)->nr_ents) - 1))].req)); |
642 | req->operation = BLKIF_OP_WRITE1; |
643 | req->nr_segments = nbpages; |
644 | req->handle = bd->handle; |
645 | req->id = (unsigned64_t) (unsigned long) &err; /* pointer on the stack */ |
646 | req->sector_number = bn + i*PAGE_SIZE(1 << 12) / 512; |
647 | |
648 | for (j = 0; j < nbpages; j++) { |
649 | req->seg[j].gref = gref[j] = hyp_grant_give(bd->domid, atop(phys_addrs[i + j])(((vm_size_t)(phys_addrs[i + j])) >> 12), 1); |
650 | req->seg[j].first_sect = 0; |
651 | size = PAGE_SIZE(1 << 12); |
652 | if ((i + j + 1) * PAGE_SIZE(1 << 12) > count) |
653 | size = count - (i + j) * PAGE_SIZE(1 << 12); |
654 | req->seg[j].last_sect = size/512 - 1; |
655 | } |
656 | |
657 | /* no need for a lock: as long as the request is not pushed, the event won't be triggered */ |
658 | assert_wait((event_t) &err, FALSE((boolean_t) 0)); |
659 | |
660 | int notify; |
661 | wmb()__asm__ __volatile__("lock; addl $0,0(%esp)"); /* make sure it sees requests */ |
662 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bd->ring, notify)do { RING_IDX __old = (&bd->ring)->sring->req_prod ; RING_IDX __new = (&bd->ring)->req_prod_pvt; __asm__ __volatile__("lock; addl $0,0(%esp)"); (&bd->ring)-> sring->req_prod = __new; __asm__ __volatile__("lock; addl $0,0(%esp)" ); (notify) = ((RING_IDX)(__new - (&bd->ring)->sring ->req_event) < (RING_IDX)(__new - __old)); } while (0); |
663 | if (notify) |
664 | hyp_event_channel_send(bd->evt); |
665 | simple_unlock(&bd->pushlock)((void)(&bd->pushlock)); |
666 | |
667 | thread_block(NULL((void *) 0)); |
668 | |
669 | for (j = 0; j < nbpages; j++) |
670 | hyp_grant_takeback(gref[j]); |
671 | |
672 | if (err) { |
673 | printf("error writing %u bytes at sector %d\n", count, bn); |
674 | break; |
675 | } |
676 | } |
677 | |
678 | if (aligned_buffer) |
679 | kmem_free(device_io_map, aligned_buffer, count); |
680 | |
681 | vm_map_copy_discard (copy); |
682 | |
683 | if (!err) |
684 | *bytes_written = count; |
685 | |
686 | if (IP_VALID(reply_port)(((&(reply_port)->ip_target.ipt_object) != ((ipc_object_t ) 0)) && ((&(reply_port)->ip_target.ipt_object ) != ((ipc_object_t) -1)))) |
687 | ds_device_write_reply (reply_port, reply_port_type, err, count); |
688 | |
689 | return MIG_NO_REPLY-305; |
690 | } |
691 | |
692 | static io_return_t |
693 | device_get_status(void *d, dev_flavor_t flavor, dev_status_t status, |
694 | mach_msg_type_number_t *status_count) |
695 | { |
696 | struct block_data *bd = d; |
697 | |
698 | switch (flavor) |
699 | { |
700 | case DEV_GET_SIZE0: |
701 | status[DEV_GET_SIZE_DEVICE_SIZE0] = (unsigned long long) bd->nr_sectors * 512; |
702 | status[DEV_GET_SIZE_RECORD_SIZE1] = bd->sector_size; |
703 | *status_count = DEV_GET_SIZE_COUNT2; |
704 | break; |
705 | case DEV_GET_RECORDS1: |
706 | status[DEV_GET_RECORDS_DEVICE_RECORDS0] = ((unsigned long long) bd->nr_sectors * 512) / bd->sector_size; |
707 | status[DEV_GET_RECORDS_RECORD_SIZE1] = bd->sector_size; |
708 | *status_count = DEV_GET_RECORDS_COUNT2; |
709 | break; |
710 | default: |
711 | printf("TODO: block_%s(%d)\n", __func__, flavor); |
712 | return D_INVALID_OPERATION2505; |
713 | } |
714 | return D_SUCCESS0; |
715 | } |
716 | |
717 | struct device_emulation_ops hyp_block_emulation_ops = { |
718 | NULL((void *) 0), /* dereference */ |
719 | NULL((void *) 0), /* deallocate */ |
720 | dev_to_port, |
721 | device_open, |
722 | device_close, |
723 | device_write, |
724 | NULL((void *) 0), /* write_inband */ |
725 | device_read, |
726 | NULL((void *) 0), /* read_inband */ |
727 | NULL((void *) 0), /* set_status */ |
728 | device_get_status, |
729 | NULL((void *) 0), /* set_filter */ |
730 | NULL((void *) 0), /* map */ |
731 | NULL((void *) 0), /* no_senders */ |
732 | NULL((void *) 0), /* write_trap */ |
733 | NULL((void *) 0), /* writev_trap */ |
734 | }; |