File: | obj-scan-build/../xen/block.c |
Location: | line 250, column 4 |
Description: | Value stored to 'i' is never read |
1 | /* |
2 | * Copyright (C) 2006-2009, 2011 Free Software Foundation |
3 | * |
4 | * This program is free software ; you can redistribute it and/or modify |
5 | * it under the terms of the GNU General Public License as published by |
6 | * the Free Software Foundation ; either version 2 of the License, or |
7 | * (at your option) any later version. |
8 | * |
9 | * This program is distributed in the hope that it will be useful, |
10 | * but WITHOUT ANY WARRANTY ; without even the implied warranty of |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
12 | * GNU General Public License for more details. |
13 | * |
14 | * You should have received a copy of the GNU General Public License |
15 | * along with the program ; if not, write to the Free Software |
16 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
17 | */ |
18 | |
19 | #include <sys/types.h> |
20 | #include <mach/mig_errors.h> |
21 | #include <kern/kalloc.h> |
22 | #include <ipc/ipc_port.h> |
23 | #include <ipc/ipc_space.h> |
24 | #include <vm/vm_kern.h> |
25 | #include <vm/vm_user.h> |
26 | #include <device/device_types.h> |
27 | #include <device/device_port.h> |
28 | #include <device/disk_status.h> |
29 | #include <device/device_reply.user.h> |
30 | #include <device/device_emul.h> |
31 | #include <device/ds_routines.h> |
32 | #include <xen/public/io/blkif.h> |
33 | #include <xen/evt.h> |
34 | #include <string.h> |
35 | #include <util/atoi.h> |
36 | #include "store.h" |
37 | #include "block.h" |
38 | #include "grant.h" |
39 | #include "ring.h" |
40 | #include "xen.h" |
41 | |
42 | /* Hypervisor part */ |
43 | |
44 | struct block_data { |
45 | struct device device; |
46 | char *name; |
47 | int open_count; |
48 | char *backend; |
49 | domid_t domid; |
50 | char *vbd; |
51 | int handle; |
52 | unsigned info; |
53 | dev_mode_t mode; |
54 | unsigned sector_size; |
55 | unsigned long nr_sectors; |
56 | ipc_port_t port; |
57 | blkif_front_ring_t ring; |
58 | evtchn_port_t evt; |
59 | simple_lock_data_t lock; |
60 | simple_lock_data_t pushlock; |
61 | }; |
62 | |
63 | static int n_vbds; |
64 | static struct block_data *vbd_data; |
65 | |
66 | struct device_emulation_ops hyp_block_emulation_ops; |
67 | |
68 | static void hyp_block_intr(int unit) { |
69 | struct block_data *bd = &vbd_data[unit]; |
70 | blkif_response_t *rsp; |
71 | int more; |
72 | io_return_t *err; |
73 | |
74 | simple_lock(&bd->lock); |
75 | more = RING_HAS_UNCONSUMED_RESPONSES(&bd->ring)((&bd->ring)->sring->rsp_prod - (&bd->ring )->rsp_cons); |
76 | while (more) { |
77 | rmb()__asm__ __volatile__("lock; addl $0,0(%esp)"); /* make sure we see responses */ |
78 | rsp = RING_GET_RESPONSE(&bd->ring, bd->ring.rsp_cons++)(&((&bd->ring)->sring->ring[((bd->ring.rsp_cons ++) & (((&bd->ring)->nr_ents) - 1))].rsp)); |
79 | err = (void *) (unsigned long) rsp->id; |
80 | switch (rsp->status) { |
81 | case BLKIF_RSP_ERROR-1: |
82 | *err = D_IO_ERROR2500; |
83 | break; |
84 | case BLKIF_RSP_OKAY0: |
85 | break; |
86 | default: |
87 | printf("Unrecognized blkif status %d\n", rsp->status); |
88 | goto drop; |
89 | } |
90 | thread_wakeup(err)thread_wakeup_prim((err), ((boolean_t) 0), 0); |
91 | drop: |
92 | thread_wakeup_one(bd)thread_wakeup_prim((bd), ((boolean_t) 1), 0); |
93 | RING_FINAL_CHECK_FOR_RESPONSES(&bd->ring, more)do { (more) = ((&bd->ring)->sring->rsp_prod - (& bd->ring)->rsp_cons); if (more) break; (&bd->ring )->sring->rsp_event = (&bd->ring)->rsp_cons + 1; __asm__ __volatile__("lock; addl $0,0(%esp)"); (more) = ( (&bd->ring)->sring->rsp_prod - (&bd->ring )->rsp_cons); } while (0); |
94 | } |
95 | simple_unlock(&bd->lock)((void)(&bd->lock)); |
96 | } |
97 | |
98 | #define VBD_PATH"device/vbd" "device/vbd" |
99 | void hyp_block_init(void) { |
100 | char **vbds, **vbd; |
101 | char *c; |
102 | int i, disk, partition; |
103 | int n; |
104 | int grant; |
105 | char port_name[10]; |
106 | char *prefix; |
107 | char device_name[32]; |
108 | domid_t domid; |
109 | evtchn_port_t evt; |
110 | hyp_store_transaction_t t; |
111 | vm_offset_t addr; |
112 | struct block_data *bd; |
113 | blkif_sring_t *ring; |
114 | |
115 | vbds = hyp_store_ls(0, 1, VBD_PATH"device/vbd"); |
116 | if (!vbds) { |
117 | printf("hd: No block device (%s). Hoping you don't need any\n", hyp_store_error); |
118 | n_vbds = 0; |
119 | return; |
120 | } |
121 | |
122 | n = 0; |
123 | for (vbd = vbds; *vbd; vbd++) |
124 | n++; |
125 | |
126 | vbd_data = (void*) kalloc(n * sizeof(*vbd_data)); |
127 | if (!vbd_data) { |
128 | printf("hd: No memory room for VBD\n"); |
129 | n_vbds = 0; |
130 | return; |
131 | } |
132 | n_vbds = n; |
133 | |
134 | for (n = 0; n < n_vbds; n++) { |
135 | bd = &vbd_data[n]; |
136 | mach_atoi((u_char *) vbds[n], &bd->handle); |
137 | if (bd->handle == MACH_ATOI_DEFAULT-1) |
138 | continue; |
139 | |
140 | bd->open_count = -2; |
141 | bd->vbd = vbds[n]; |
142 | |
143 | /* Get virtual number. */ |
144 | i = hyp_store_read_int(0, 5, VBD_PATH"device/vbd", "/", vbds[n], "/", "virtual-device"); |
145 | if (i == -1) |
146 | panic("hd: couldn't virtual device of VBD %s\n",vbds[n]); |
147 | if ((i >> 28) == 1) { |
148 | /* xvd, new format */ |
149 | prefix = "xvd"; |
150 | disk = (i >> 8) & ((1 << 20) - 1); |
151 | partition = i & ((1 << 8) - 1); |
152 | } else if ((i >> 8) == 202) { |
153 | /* xvd, old format */ |
154 | prefix = "xvd"; |
155 | disk = (i >> 4) & ((1 << 4) - 1); |
156 | partition = i & ((1 << 4) - 1); |
157 | } else if ((i >> 8) == 8) { |
158 | /* SCSI */ |
159 | prefix = "sd"; |
160 | disk = (i >> 4) & ((1 << 4) - 1); |
161 | partition = i & ((1 << 4) - 1); |
162 | } else if ((i >> 8) == 3) { |
163 | /* IDE primary */ |
164 | prefix = "hd"; |
165 | disk = (i >> 6) & ((1 << 2) - 1); |
166 | partition = i & ((1 << 6) - 1); |
167 | } else if ((i >> 8) == 22) { |
168 | /* IDE secondary */ |
169 | prefix = "hd"; |
170 | disk = ((i >> 6) & ((1 << 2) - 1)) + 2; |
171 | partition = i & ((1 << 6) - 1); |
172 | } else if ((i >> 8) == 33) { |
173 | /* IDE 3 */ |
174 | prefix = "hd"; |
175 | disk = ((i >> 6) & ((1 << 2) - 1)) + 4; |
176 | partition = i & ((1 << 6) - 1); |
177 | } else if ((i >> 8) == 34) { |
178 | /* IDE 4 */ |
179 | prefix = "hd"; |
180 | disk = ((i >> 6) & ((1 << 2) - 1)) + 6; |
181 | partition = i & ((1 << 6) - 1); |
182 | } else if ((i >> 8) == 56) { |
183 | /* IDE 5 */ |
184 | prefix = "hd"; |
185 | disk = ((i >> 6) & ((1 << 2) - 1)) + 8; |
186 | partition = i & ((1 << 6) - 1); |
187 | } else if ((i >> 8) == 57) { |
188 | /* IDE 6 */ |
189 | prefix = "hd"; |
190 | disk = ((i >> 6) & ((1 << 2) - 1)) + 10; |
191 | partition = i & ((1 << 6) - 1); |
192 | } else if ((i >> 8) == 88) { |
193 | /* IDE 7 */ |
194 | prefix = "hd"; |
195 | disk = ((i >> 6) & ((1 << 2) - 1)) + 12; |
196 | partition = i & ((1 << 6) - 1); |
197 | } else if ((i >> 8) == 89) { |
198 | /* IDE 8 */ |
199 | prefix = "hd"; |
200 | disk = ((i >> 6) & ((1 << 2) - 1)) + 14; |
201 | partition = i & ((1 << 6) - 1); |
202 | } else if ((i >> 8) == 90) { |
203 | /* IDE 9 */ |
204 | prefix = "hd"; |
205 | disk = ((i >> 6) & ((1 << 2) - 1)) + 16; |
206 | partition = i & ((1 << 6) - 1); |
207 | } else if ((i >> 8) == 91) { |
208 | /* IDE 10 */ |
209 | prefix = "hd"; |
210 | disk = ((i >> 6) & ((1 << 2) - 1)) + 18; |
211 | partition = i & ((1 << 6) - 1); |
212 | } else { |
213 | printf("unsupported VBD number %d\n", i); |
214 | continue; |
215 | } |
216 | if (partition) |
217 | sprintf(device_name, "%s%ds%d", prefix, disk, partition); |
218 | else |
219 | sprintf(device_name, "%s%d", prefix, disk); |
220 | bd->name = (char*) kalloc(strlen(device_name) + 1); |
221 | strcpy(bd->name, device_name); |
222 | |
223 | /* Get domain id of backend driver. */ |
224 | i = hyp_store_read_int(0, 5, VBD_PATH"device/vbd", "/", vbds[n], "/", "backend-id"); |
225 | if (i == -1) |
226 | panic("%s: couldn't read backend domid (%s)", device_name, hyp_store_error); |
227 | bd->domid = domid = i; |
228 | |
229 | do { |
230 | t = hyp_store_transaction_start(); |
231 | |
232 | /* Get a page for ring */ |
233 | if ((addr = vm_page_grab_phys_addr()) == -1) |
234 | panic("%s: couldn't allocate space for store ring\n", device_name); |
235 | ring = (void*) phystokv(addr)((vm_offset_t)(addr) + 0xC0000000UL); |
236 | SHARED_RING_INIT(ring)do { (ring)->req_prod = (ring)->rsp_prod = 0; (ring)-> req_event = (ring)->rsp_event = 1; (void)memset((ring)-> pad, 0, sizeof((ring)->pad)); } while(0); |
237 | FRONT_RING_INIT(&bd->ring, ring, PAGE_SIZE)do { (&bd->ring)->req_prod_pvt = 0; (&bd->ring )->rsp_cons = 0; (&bd->ring)->nr_ents = (((((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof ((ring)->ring[0])) & 0xffff0000) ? (((((((1 << 12 )) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)-> ring[0]))>>16) & 0x0000ff00) ? ((((((((1 << 12 )) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)-> ring[0]))>>16)>>8) & 0x000000f0) ? (((((((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof ((ring)->ring[0]))>>16)>>8)>>4) & 0x0000000c ) ? ((((((((((1 << 12)) - (long)(ring)->ring + (long )(ring)) / sizeof((ring)->ring[0]))>>16)>>8)>> 4)>>2) & 0x00000002) ? 0x2 : (((((((((1 << 12 )) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)-> ring[0]))>>16)>>8)>>4)>>2) & 0x1) )<<2 : (((((((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)->ring[0]))>>16)>> 8)>>4) & 0x00000002) ? 0x2 : ((((((((1 << 12) ) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)-> ring[0]))>>16)>>8)>>4) & 0x1)))<< 4 : ((((((((1 << 12)) - (long)(ring)->ring + (long)( ring)) / sizeof((ring)->ring[0]))>>16)>>8) & 0x0000000c) ? (((((((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)->ring[0]))>>16)>> 8)>>2) & 0x00000002) ? 0x2 : ((((((((1 << 12) ) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)-> ring[0]))>>16)>>8)>>2) & 0x1))<<2 : ((((((((1 << 12)) - (long)(ring)->ring + (long)(ring )) / sizeof((ring)->ring[0]))>>16)>>8) & 0x00000002 ) ? 0x2 : (((((((1 << 12)) - (long)(ring)->ring + (long )(ring)) / sizeof((ring)->ring[0]))>>16)>>8) & 0x1))))<<8 : (((((((1 << 12)) - (long)(ring)-> ring + (long)(ring)) / sizeof((ring)->ring[0]))>>16) & 0x000000f0) ? ((((((((1 << 12)) - (long)(ring)-> ring + (long)(ring)) / sizeof((ring)->ring[0]))>>16) >>4) & 0x0000000c) ? (((((((((1 << 12)) - (long )(ring)->ring + (long)(ring)) / sizeof((ring)->ring[0]) )>>16)>>4)>>2) & 0x00000002) ? 0x2 : (( ((((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)->ring[0]))>>16)>>4)>>2) & 0x1))<<2 : ((((((((1 << 12)) - (long)(ring )->ring + (long)(ring)) / sizeof((ring)->ring[0]))>> 16)>>4) & 0x00000002) ? 0x2 : (((((((1 << 12) ) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)-> ring[0]))>>16)>>4) & 0x1)))<<4 : (((((( (1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof ((ring)->ring[0]))>>16) & 0x0000000c) ? (((((((( 1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof ((ring)->ring[0]))>>16)>>2) & 0x00000002) ? 0x2 : (((((((1 << 12)) - (long)(ring)->ring + (long )(ring)) / sizeof((ring)->ring[0]))>>16)>>2) & 0x1))<<2 : (((((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)->ring[0]))>>16) & 0x00000002) ? 0x2 : ((((((1 << 12)) - (long)(ring)-> ring + (long)(ring)) / sizeof((ring)->ring[0]))>>16) & 0x1)))))<<16 : ((((((1 << 12)) - (long)(ring )->ring + (long)(ring)) / sizeof((ring)->ring[0])) & 0x0000ff00) ? (((((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)->ring[0]))>>8) & 0x000000f0) ? ((((((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)->ring[0]))>>8)>> 4) & 0x0000000c) ? (((((((((1 << 12)) - (long)(ring )->ring + (long)(ring)) / sizeof((ring)->ring[0]))>> 8)>>4)>>2) & 0x00000002) ? 0x2 : ((((((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof((ring) ->ring[0]))>>8)>>4)>>2) & 0x1))<< 2 : ((((((((1 << 12)) - (long)(ring)->ring + (long)( ring)) / sizeof((ring)->ring[0]))>>8)>>4) & 0x00000002) ? 0x2 : (((((((1 << 12)) - (long)(ring)-> ring + (long)(ring)) / sizeof((ring)->ring[0]))>>8)>> 4) & 0x1)))<<4 : (((((((1 << 12)) - (long)(ring )->ring + (long)(ring)) / sizeof((ring)->ring[0]))>> 8) & 0x0000000c) ? ((((((((1 << 12)) - (long)(ring) ->ring + (long)(ring)) / sizeof((ring)->ring[0]))>> 8)>>2) & 0x00000002) ? 0x2 : (((((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)-> ring[0]))>>8)>>2) & 0x1))<<2 : (((((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof ((ring)->ring[0]))>>8) & 0x00000002) ? 0x2 : ((( (((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof ((ring)->ring[0]))>>8) & 0x1))))<<8 : (((( ((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof ((ring)->ring[0])) & 0x000000f0) ? (((((((1 << 12 )) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)-> ring[0]))>>4) & 0x0000000c) ? ((((((((1 << 12 )) - (long)(ring)->ring + (long)(ring)) / sizeof((ring)-> ring[0]))>>4)>>2) & 0x00000002) ? 0x2 : ((((( ((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof ((ring)->ring[0]))>>4)>>2) & 0x1))<< 2 : (((((((1 << 12)) - (long)(ring)->ring + (long)(ring )) / sizeof((ring)->ring[0]))>>4) & 0x00000002) ? 0x2 : ((((((1 << 12)) - (long)(ring)->ring + (long) (ring)) / sizeof((ring)->ring[0]))>>4) & 0x1)))<< 4 : ((((((1 << 12)) - (long)(ring)->ring + (long)(ring )) / sizeof((ring)->ring[0])) & 0x0000000c) ? (((((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof ((ring)->ring[0]))>>2) & 0x00000002) ? 0x2 : ((( (((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof ((ring)->ring[0]))>>2) & 0x1))<<2 : (((((( 1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof ((ring)->ring[0])) & 0x00000002) ? 0x2 : (((((1 << 12)) - (long)(ring)->ring + (long)(ring)) / sizeof((ring) ->ring[0])) & 0x1))))))); (&bd->ring)->sring = (ring); } while (0); |
238 | grant = hyp_grant_give(domid, atop(addr)(((vm_size_t)(addr)) >> 12), 0); |
239 | |
240 | /* and give it to backend. */ |
241 | i = sprintf(port_name, "%d", grant); |
242 | c = hyp_store_write(t, port_name, 5, VBD_PATH"device/vbd", "/", vbds[n], "/", "ring-ref"); |
243 | if (!c) |
244 | panic("%s: couldn't store ring reference (%s)", device_name, hyp_store_error); |
245 | kfree((vm_offset_t) c, strlen(c)+1); |
246 | |
247 | /* Allocate an event channel and give it to backend. */ |
248 | bd->evt = evt = hyp_event_channel_alloc(domid); |
249 | hyp_evt_handler(evt, hyp_block_intr, n, SPL77); |
250 | i = sprintf(port_name, "%lu", evt); |
Value stored to 'i' is never read | |
251 | c = hyp_store_write(t, port_name, 5, VBD_PATH"device/vbd", "/", vbds[n], "/", "event-channel"); |
252 | if (!c) |
253 | panic("%s: couldn't store event channel (%s)", device_name, hyp_store_error); |
254 | kfree((vm_offset_t) c, strlen(c)+1); |
255 | c = hyp_store_write(t, hyp_store_state_initialized"3", 5, VBD_PATH"device/vbd", "/", vbds[n], "/", "state"); |
256 | if (!c) |
257 | panic("%s: couldn't store state (%s)", device_name, hyp_store_error); |
258 | kfree((vm_offset_t) c, strlen(c)+1); |
259 | } while (!hyp_store_transaction_stop(t)); |
260 | /* TODO randomly wait? */ |
261 | |
262 | c = hyp_store_read(0, 5, VBD_PATH"device/vbd", "/", vbds[n], "/", "backend"); |
263 | if (!c) |
264 | panic("%s: couldn't get path to backend (%s)", device_name, hyp_store_error); |
265 | bd->backend = c; |
266 | |
267 | while(1) { |
268 | i = hyp_store_read_int(0, 3, bd->backend, "/", "state"); |
269 | if (i == MACH_ATOI_DEFAULT-1) |
270 | panic("can't read state from %s", bd->backend); |
271 | if (i == XenbusStateConnected) |
272 | break; |
273 | hyp_yield()hyp_sched_op(0, 0); |
274 | } |
275 | |
276 | i = hyp_store_read_int(0, 3, bd->backend, "/", "sectors"); |
277 | if (i == -1) |
278 | panic("%s: couldn't get number of sectors (%s)", device_name, hyp_store_error); |
279 | bd->nr_sectors = i; |
280 | |
281 | i = hyp_store_read_int(0, 3, bd->backend, "/", "sector-size"); |
282 | if (i == -1) |
283 | panic("%s: couldn't get sector size (%s)", device_name, hyp_store_error); |
284 | if (i & ~(2*(i-1)+1)) |
285 | panic("sector size %d is not a power of 2\n", i); |
286 | if (i > PAGE_SIZE(1 << 12) || PAGE_SIZE(1 << 12) % i != 0) |
287 | panic("%s: couldn't handle sector size %d with pages of size %d\n", device_name, i, PAGE_SIZE(1 << 12)); |
288 | bd->sector_size = i; |
289 | |
290 | i = hyp_store_read_int(0, 3, bd->backend, "/", "info"); |
291 | if (i == -1) |
292 | panic("%s: couldn't get info (%s)", device_name, hyp_store_error); |
293 | bd->info = i; |
294 | |
295 | c = hyp_store_read(0, 3, bd->backend, "/", "mode"); |
296 | if (!c) |
297 | panic("%s: couldn't get backend's mode (%s)", device_name, hyp_store_error); |
298 | if ((c[0] == 'w') && !(bd->info & VDISK_READONLY0x4)) |
299 | bd->mode = D_READ0x1|D_WRITE0x2; |
300 | else |
301 | bd->mode = D_READ0x1; |
302 | |
303 | c = hyp_store_read(0, 3, bd->backend, "/", "params"); |
304 | if (!c) |
305 | panic("%s: couldn't get backend's real device (%s)", device_name, hyp_store_error); |
306 | |
307 | /* TODO: change suffix */ |
308 | printf("%s: dom%d's VBD %s (%s,%c%s) %ldMB\n", device_name, domid, |
309 | vbds[n], c, bd->mode & D_WRITE0x2 ? 'w' : 'r', |
310 | bd->info & VDISK_CDROM0x1 ? ", cdrom" : "", |
311 | bd->nr_sectors / ((1<<20) / 512)); |
312 | kfree((vm_offset_t) c, strlen(c)+1); |
313 | |
314 | c = hyp_store_write(0, hyp_store_state_connected"4", 5, VBD_PATH"device/vbd", "/", bd->vbd, "/", "state"); |
315 | if (!c) |
316 | panic("couldn't store state for %s (%s)", device_name, hyp_store_error); |
317 | kfree((vm_offset_t) c, strlen(c)+1); |
318 | |
319 | bd->open_count = -1; |
320 | bd->device.emul_ops = &hyp_block_emulation_ops; |
321 | bd->device.emul_data = bd; |
322 | simple_lock_init(&bd->lock); |
323 | simple_lock_init(&bd->pushlock); |
324 | } |
325 | } |
326 | |
327 | static ipc_port_t |
328 | dev_to_port(void *d) |
329 | { |
330 | struct block_data *b = d; |
331 | if (!d) |
332 | return IP_NULL((ipc_port_t) ((ipc_object_t) 0)); |
333 | return ipc_port_make_send(b->port); |
334 | } |
335 | |
336 | static int |
337 | device_close(void *devp) |
338 | { |
339 | struct block_data *bd = devp; |
340 | if (--bd->open_count < 0) |
341 | panic("too many closes on %s", bd->name); |
342 | printf("close, %s count %d\n", bd->name, bd->open_count); |
343 | if (bd->open_count) |
344 | return 0; |
345 | ipc_kobject_set(bd->port, IKO_NULL((ipc_kobject_t) 0), IKOT_NONE0); |
346 | ipc_port_dealloc_kernel(bd->port)ipc_port_dealloc_special((bd->port), ipc_space_kernel); |
347 | return 0; |
348 | } |
349 | |
350 | static io_return_t |
351 | device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type, |
352 | dev_mode_t mode, char *name, device_t *devp /* out */) |
353 | { |
354 | int i, err = 0; |
355 | ipc_port_t port, notify; |
356 | struct block_data *bd; |
357 | |
358 | for (i = 0; i < n_vbds; i++) |
359 | if (!strcmp(name, vbd_data[i].name)) |
360 | break; |
361 | |
362 | if (i == n_vbds) |
363 | return D_NO_SUCH_DEVICE2502; |
364 | |
365 | bd = &vbd_data[i]; |
366 | if (bd->open_count == -2) |
367 | /* couldn't be initialized */ |
368 | return D_NO_SUCH_DEVICE2502; |
369 | |
370 | if ((mode & D_WRITE0x2) && !(bd->mode & D_WRITE0x2)) |
371 | return D_READ_ONLY2509; |
372 | |
373 | if (bd->open_count >= 0) { |
374 | *devp = &bd->device ; |
375 | bd->open_count++ ; |
376 | printf("re-open, %s count %d\n", bd->name, bd->open_count); |
377 | return D_SUCCESS0; |
378 | } |
379 | |
380 | bd->open_count = 1; |
381 | printf("%s count %d\n", bd->name, bd->open_count); |
382 | |
383 | port = ipc_port_alloc_kernel()ipc_port_alloc_special(ipc_space_kernel); |
384 | if (port == IP_NULL((ipc_port_t) ((ipc_object_t) 0))) { |
385 | device_close(bd); |
386 | return KERN_RESOURCE_SHORTAGE6; |
387 | } |
388 | bd->port = port; |
389 | |
390 | *devp = &bd->device; |
391 | |
392 | ipc_kobject_set (port, (ipc_kobject_t) &bd->device, IKOT_DEVICE10); |
393 | |
394 | notify = ipc_port_make_sonce (bd->port); |
395 | ip_lock (bd->port); |
396 | ipc_port_nsrequest (bd->port, 1, notify, ¬ify); |
397 | assert (notify == IP_NULL)({ if (!(notify == ((ipc_port_t) ((ipc_object_t) 0)))) Assert ("notify == IP_NULL", "../xen/block.c", 397); }); |
398 | |
399 | if (IP_VALID (reply_port)(((&(reply_port)->ip_target.ipt_object) != ((ipc_object_t ) 0)) && ((&(reply_port)->ip_target.ipt_object ) != ((ipc_object_t) -1)))) |
400 | ds_device_open_reply (reply_port, reply_port_type, D_SUCCESS0, port); |
401 | else |
402 | device_close(bd); |
403 | return MIG_NO_REPLY-305; |
404 | } |
405 | |
406 | static io_return_t |
407 | device_read (void *d, ipc_port_t reply_port, |
408 | mach_msg_type_name_t reply_port_type, dev_mode_t mode, |
409 | recnum_t bn, int count, io_buf_ptr_t *data, |
410 | unsigned *bytes_read) |
411 | { |
412 | int resid, amt; |
413 | io_return_t err = 0; |
414 | vm_page_t pages[BLKIF_MAX_SEGMENTS_PER_REQUEST11]; |
415 | grant_ref_t gref[BLKIF_MAX_SEGMENTS_PER_REQUEST11]; |
416 | int nbpages; |
417 | vm_map_copy_t copy; |
418 | vm_offset_t offset, alloc_offset, o; |
419 | vm_object_t object; |
420 | vm_page_t m; |
421 | vm_size_t len, size; |
422 | struct block_data *bd = d; |
423 | struct blkif_request *req; |
424 | |
425 | *data = 0; |
426 | *bytes_read = 0; |
427 | |
428 | if (count < 0) |
429 | return D_INVALID_SIZE2507; |
430 | if (count == 0) |
431 | return 0; |
432 | |
433 | /* Allocate an object to hold the data. */ |
434 | size = round_page (count)((vm_offset_t)((((vm_offset_t)(count)) + ((1 << 12)-1)) & ~((1 << 12)-1))); |
435 | object = vm_object_allocate (size); |
436 | if (! object) |
437 | { |
438 | err = D_NO_MEMORY2508; |
439 | goto out; |
440 | } |
441 | alloc_offset = offset = 0; |
442 | resid = count; |
443 | |
444 | while (resid && !err) |
445 | { |
446 | unsigned reqn; |
447 | int i; |
448 | int last_sect; |
449 | |
450 | nbpages = 0; |
451 | |
452 | /* Determine size of I/O this time around. */ |
453 | len = round_page(offset + resid)((vm_offset_t)((((vm_offset_t)(offset + resid)) + ((1 << 12)-1)) & ~((1 << 12)-1))) - offset; |
454 | if (len > PAGE_SIZE(1 << 12) * BLKIF_MAX_SEGMENTS_PER_REQUEST11) |
455 | len = PAGE_SIZE(1 << 12) * BLKIF_MAX_SEGMENTS_PER_REQUEST11; |
456 | |
457 | /* Allocate pages. */ |
458 | while (alloc_offset < offset + len) |
459 | { |
460 | while ((m = vm_page_grab (FALSE((boolean_t) 0))) == 0) |
461 | VM_PAGE_WAIT (0)vm_page_wait(0); |
462 | assert (! m->active && ! m->inactive)({ if (!(! m->active && ! m->inactive)) Assert( "! m->active && ! m->inactive", "../xen/block.c" , 462); }); |
463 | m->busy = TRUE((boolean_t) 1); |
464 | assert(nbpages < BLKIF_MAX_SEGMENTS_PER_REQUEST)({ if (!(nbpages < 11)) Assert("nbpages < BLKIF_MAX_SEGMENTS_PER_REQUEST" , "../xen/block.c", 464); }); |
465 | pages[nbpages++] = m; |
466 | alloc_offset += PAGE_SIZE(1 << 12); |
467 | } |
468 | |
469 | /* Do the read. */ |
470 | amt = len; |
471 | if (amt > resid) |
472 | amt = resid; |
473 | |
474 | /* allocate a request */ |
475 | spl_t spl = splsched(); |
476 | while(1) { |
477 | simple_lock(&bd->lock); |
478 | if (!RING_FULL(&bd->ring)((((&bd->ring)->nr_ents) - ((&bd->ring)-> req_prod_pvt - (&bd->ring)->rsp_cons)) == 0)) |
479 | break; |
480 | thread_sleep(bd, &bd->lock, FALSE((boolean_t) 0)); |
481 | } |
482 | mb()__asm__ __volatile__("lock; addl $0,0(%esp)"); |
483 | reqn = bd->ring.req_prod_pvt++;; |
484 | simple_lock(&bd->pushlock); |
485 | simple_unlock(&bd->lock)((void)(&bd->lock)); |
486 | (void) splx(spl); |
487 | |
488 | req = RING_GET_REQUEST(&bd->ring, reqn)(&((&bd->ring)->sring->ring[((reqn) & (( (&bd->ring)->nr_ents) - 1))].req)); |
489 | req->operation = BLKIF_OP_READ0; |
490 | req->nr_segments = nbpages; |
491 | req->handle = bd->handle; |
492 | req->id = (unsigned64_t) (unsigned long) &err; /* pointer on the stack */ |
493 | req->sector_number = bn + offset / 512; |
494 | for (i = 0; i < nbpages; i++) { |
495 | req->seg[i].gref = gref[i] = hyp_grant_give(bd->domid, atop(pages[i]->phys_addr)(((vm_size_t)(pages[i]->phys_addr)) >> 12), 0); |
496 | req->seg[i].first_sect = 0; |
497 | req->seg[i].last_sect = PAGE_SIZE(1 << 12)/512 - 1; |
498 | } |
499 | last_sect = ((amt - 1) & PAGE_MASK((1 << 12)-1)) / 512; |
500 | req->seg[nbpages-1].last_sect = last_sect; |
501 | |
502 | memset((void*) phystokv(pages[nbpages-1]->phys_addr((vm_offset_t)(pages[nbpages-1]->phys_addr + (last_sect + 1 ) * 512) + 0xC0000000UL) |
503 | + (last_sect + 1) * 512)((vm_offset_t)(pages[nbpages-1]->phys_addr + (last_sect + 1 ) * 512) + 0xC0000000UL), |
504 | 0, PAGE_SIZE(1 << 12) - (last_sect + 1) * 512); |
505 | |
506 | /* no need for a lock: as long as the request is not pushed, the event won't be triggered */ |
507 | assert_wait((event_t) &err, FALSE((boolean_t) 0)); |
508 | |
509 | int notify; |
510 | wmb()__asm__ __volatile__("lock; addl $0,0(%esp)"); /* make sure it sees requests */ |
511 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bd->ring, notify)do { RING_IDX __old = (&bd->ring)->sring->req_prod ; RING_IDX __new = (&bd->ring)->req_prod_pvt; __asm__ __volatile__("lock; addl $0,0(%esp)"); (&bd->ring)-> sring->req_prod = __new; __asm__ __volatile__("lock; addl $0,0(%esp)" ); (notify) = ((RING_IDX)(__new - (&bd->ring)->sring ->req_event) < (RING_IDX)(__new - __old)); } while (0); |
512 | if (notify) |
513 | hyp_event_channel_send(bd->evt); |
514 | simple_unlock(&bd->pushlock)((void)(&bd->pushlock)); |
515 | |
516 | thread_block(NULL((void *) 0)); |
517 | |
518 | if (err) |
519 | printf("error reading %d bytes at sector %ld\n", amt, |
520 | bn + offset / 512); |
521 | |
522 | for (i = 0; i < nbpages; i++) |
523 | hyp_grant_takeback(gref[i]); |
524 | |
525 | /* Compute number of pages to insert in object. */ |
526 | o = offset; |
527 | |
528 | resid -= amt; |
529 | if (resid == 0) |
530 | offset = o + len; |
531 | else |
532 | offset += amt; |
533 | |
534 | /* Add pages to the object. */ |
535 | vm_object_lock (object); |
536 | for (i = 0; i < nbpages; i++) |
537 | { |
538 | m = pages[i]; |
539 | assert (m->busy)({ if (!(m->busy)) Assert("m->busy", "../xen/block.c", 539 ); }); |
540 | vm_page_lock_queues (); |
541 | PAGE_WAKEUP_DONE (m)({ (m)->busy = ((boolean_t) 0); if ((m)->wanted) { (m)-> wanted = ((boolean_t) 0); thread_wakeup_prim((((event_t) m)), ((boolean_t) 0), 0); } }); |
542 | m->dirty = TRUE((boolean_t) 1); |
543 | vm_page_insert (m, object, o); |
544 | vm_page_unlock_queues ()((void)(&vm_page_queue_lock)); |
545 | o += PAGE_SIZE(1 << 12); |
546 | } |
547 | vm_object_unlock (object)((void)(&(object)->Lock)); |
548 | } |
549 | |
550 | out: |
551 | if (! err) |
552 | err = vm_map_copyin_object (object, 0, round_page (count)((vm_offset_t)((((vm_offset_t)(count)) + ((1 << 12)-1)) & ~((1 << 12)-1))), ©); |
553 | if (! err) |
554 | { |
555 | *data = (io_buf_ptr_t) copy; |
556 | *bytes_read = count - resid; |
557 | } |
558 | else |
559 | vm_object_deallocate (object); |
560 | return err; |
561 | } |
562 | |
563 | static io_return_t |
564 | device_write(void *d, ipc_port_t reply_port, |
565 | mach_msg_type_name_t reply_port_type, dev_mode_t mode, |
566 | recnum_t bn, io_buf_ptr_t data, unsigned int count, |
567 | int *bytes_written) |
568 | { |
569 | io_return_t err = 0; |
570 | vm_map_copy_t copy = (vm_map_copy_t) data; |
571 | vm_offset_t aligned_buffer = 0; |
572 | int copy_npages = atop(round_page(count))(((vm_size_t)(((vm_offset_t)((((vm_offset_t)(count)) + ((1 << 12)-1)) & ~((1 << 12)-1))))) >> 12); |
573 | vm_offset_t phys_addrs[copy_npages]; |
574 | struct block_data *bd = d; |
575 | blkif_request_t *req; |
576 | grant_ref_t gref[BLKIF_MAX_SEGMENTS_PER_REQUEST11]; |
577 | unsigned reqn, size; |
578 | int i, nbpages, j; |
579 | |
580 | if (!(bd->mode & D_WRITE0x2)) |
581 | return D_READ_ONLY2509; |
582 | |
583 | if (count == 0) { |
584 | vm_map_copy_discard(copy); |
585 | return 0; |
586 | } |
587 | |
588 | if (count % bd->sector_size) |
589 | return D_INVALID_SIZE2507; |
590 | |
591 | if (count > copy->size) |
592 | return D_INVALID_SIZE2507; |
593 | |
594 | if (copy->type != VM_MAP_COPY_PAGE_LIST3 || copy->offset & PAGE_MASK((1 << 12)-1)) { |
595 | /* Unaligned write. Has to copy data before passing it to the backend. */ |
596 | kern_return_t kr; |
597 | vm_offset_t buffer; |
598 | |
599 | kr = kmem_alloc(device_io_map, &aligned_buffer, count); |
600 | if (kr != KERN_SUCCESS0) |
601 | return kr; |
602 | |
603 | kr = vm_map_copyout(device_io_map, &buffer, vm_map_copy_copy(copy)); |
604 | if (kr != KERN_SUCCESS0) { |
605 | kmem_free(device_io_map, aligned_buffer, count); |
606 | return kr; |
607 | } |
608 | |
609 | memcpy((void*) aligned_buffer, (void*) buffer, count); |
610 | |
611 | vm_deallocate (device_io_map, buffer, count); |
612 | |
613 | for (i = 0; i < copy_npages; i++) |
614 | phys_addrs[i] = kvtophys(aligned_buffer + ptoa(i)((vm_offset_t)((i) << 12))); |
615 | } else { |
616 | for (i = 0; i < copy_npages; i++) |
617 | phys_addrs[i] = copy->cpy_page_listc_u.c_p.page_list[i]->phys_addr; |
618 | } |
619 | |
620 | for (i=0; i<copy_npages; i+=nbpages) { |
621 | |
622 | nbpages = BLKIF_MAX_SEGMENTS_PER_REQUEST11; |
623 | if (nbpages > copy_npages-i) |
624 | nbpages = copy_npages-i; |
625 | |
626 | /* allocate a request */ |
627 | spl_t spl = splsched(); |
628 | while(1) { |
629 | simple_lock(&bd->lock); |
630 | if (!RING_FULL(&bd->ring)((((&bd->ring)->nr_ents) - ((&bd->ring)-> req_prod_pvt - (&bd->ring)->rsp_cons)) == 0)) |
631 | break; |
632 | thread_sleep(bd, &bd->lock, FALSE((boolean_t) 0)); |
633 | } |
634 | mb()__asm__ __volatile__("lock; addl $0,0(%esp)"); |
635 | reqn = bd->ring.req_prod_pvt++;; |
636 | simple_lock(&bd->pushlock); |
637 | simple_unlock(&bd->lock)((void)(&bd->lock)); |
638 | (void) splx(spl); |
639 | |
640 | req = RING_GET_REQUEST(&bd->ring, reqn)(&((&bd->ring)->sring->ring[((reqn) & (( (&bd->ring)->nr_ents) - 1))].req)); |
641 | req->operation = BLKIF_OP_WRITE1; |
642 | req->nr_segments = nbpages; |
643 | req->handle = bd->handle; |
644 | req->id = (unsigned64_t) (unsigned long) &err; /* pointer on the stack */ |
645 | req->sector_number = bn + i*PAGE_SIZE(1 << 12) / 512; |
646 | |
647 | for (j = 0; j < nbpages; j++) { |
648 | req->seg[j].gref = gref[j] = hyp_grant_give(bd->domid, atop(phys_addrs[i + j])(((vm_size_t)(phys_addrs[i + j])) >> 12), 1); |
649 | req->seg[j].first_sect = 0; |
650 | size = PAGE_SIZE(1 << 12); |
651 | if ((i + j + 1) * PAGE_SIZE(1 << 12) > count) |
652 | size = count - (i + j) * PAGE_SIZE(1 << 12); |
653 | req->seg[j].last_sect = size/512 - 1; |
654 | } |
655 | |
656 | /* no need for a lock: as long as the request is not pushed, the event won't be triggered */ |
657 | assert_wait((event_t) &err, FALSE((boolean_t) 0)); |
658 | |
659 | int notify; |
660 | wmb()__asm__ __volatile__("lock; addl $0,0(%esp)"); /* make sure it sees requests */ |
661 | RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bd->ring, notify)do { RING_IDX __old = (&bd->ring)->sring->req_prod ; RING_IDX __new = (&bd->ring)->req_prod_pvt; __asm__ __volatile__("lock; addl $0,0(%esp)"); (&bd->ring)-> sring->req_prod = __new; __asm__ __volatile__("lock; addl $0,0(%esp)" ); (notify) = ((RING_IDX)(__new - (&bd->ring)->sring ->req_event) < (RING_IDX)(__new - __old)); } while (0); |
662 | if (notify) |
663 | hyp_event_channel_send(bd->evt); |
664 | simple_unlock(&bd->pushlock)((void)(&bd->pushlock)); |
665 | |
666 | thread_block(NULL((void *) 0)); |
667 | |
668 | for (j = 0; j < nbpages; j++) |
669 | hyp_grant_takeback(gref[j]); |
670 | |
671 | if (err) { |
672 | printf("error writing %u bytes at sector %d\n", count, bn); |
673 | break; |
674 | } |
675 | } |
676 | |
677 | if (aligned_buffer) |
678 | kmem_free(device_io_map, aligned_buffer, count); |
679 | |
680 | vm_map_copy_discard (copy); |
681 | |
682 | if (!err) |
683 | *bytes_written = count; |
684 | |
685 | if (IP_VALID(reply_port)(((&(reply_port)->ip_target.ipt_object) != ((ipc_object_t ) 0)) && ((&(reply_port)->ip_target.ipt_object ) != ((ipc_object_t) -1)))) |
686 | ds_device_write_reply (reply_port, reply_port_type, err, count); |
687 | |
688 | return MIG_NO_REPLY-305; |
689 | } |
690 | |
691 | static io_return_t |
692 | device_get_status(void *d, dev_flavor_t flavor, dev_status_t status, |
693 | mach_msg_type_number_t *status_count) |
694 | { |
695 | struct block_data *bd = d; |
696 | |
697 | switch (flavor) |
698 | { |
699 | case DEV_GET_SIZE0: |
700 | status[DEV_GET_SIZE_DEVICE_SIZE0] = (unsigned long long) bd->nr_sectors * 512; |
701 | status[DEV_GET_SIZE_RECORD_SIZE1] = bd->sector_size; |
702 | *status_count = DEV_GET_SIZE_COUNT2; |
703 | break; |
704 | case DEV_GET_RECORDS1: |
705 | status[DEV_GET_RECORDS_DEVICE_RECORDS0] = ((unsigned long long) bd->nr_sectors * 512) / bd->sector_size; |
706 | status[DEV_GET_RECORDS_RECORD_SIZE1] = bd->sector_size; |
707 | *status_count = DEV_GET_RECORDS_COUNT2; |
708 | break; |
709 | default: |
710 | printf("TODO: block_%s(%d)\n", __func__, flavor); |
711 | return D_INVALID_OPERATION2505; |
712 | } |
713 | return D_SUCCESS0; |
714 | } |
715 | |
716 | struct device_emulation_ops hyp_block_emulation_ops = { |
717 | NULL((void *) 0), /* dereference */ |
718 | NULL((void *) 0), /* deallocate */ |
719 | dev_to_port, |
720 | device_open, |
721 | device_close, |
722 | device_write, |
723 | NULL((void *) 0), /* write_inband */ |
724 | device_read, |
725 | NULL((void *) 0), /* read_inband */ |
726 | NULL((void *) 0), /* set_status */ |
727 | device_get_status, |
728 | NULL((void *) 0), /* set_filter */ |
729 | NULL((void *) 0), /* map */ |
730 | NULL((void *) 0), /* no_senders */ |
731 | NULL((void *) 0), /* write_trap */ |
732 | NULL((void *) 0), /* writev_trap */ |
733 | }; |