1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
|
/*
* Copyright (C) 2006-2009 Free Software Foundation
*
* This program is free software ; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation ; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY ; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with the program ; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <mach/machine/asm.h>
#include <i386/i386asm.h>
#include <i386/cpu_number.h>
#include <i386/xen.h>
.data 2
int_active:
.long 0
.text
.globl hyp_callback, hyp_failsafe_callback
P2ALIGN(TEXT_ALIGN)
hyp_callback:
pushl %eax
jmp EXT(all_intrs)
ENTRY(interrupt)
incl int_active /* currently handling interrupts */
call EXT(hyp_c_callback) /* call generic interrupt routine */
decl int_active /* stopped handling interrupts */
sti
ret
/* FIXME: if we're _very_ unlucky, we may be re-interrupted, filling stack
*
* Far from trivial, see mini-os. That said, maybe we could just, before poping
* everything (which is _not_ destructive), save sp into a known place and use
* it+jmp back?
*
* Mmm, there seems to be an iret hypcall that does exactly what we want:
* perform iret, and if IF is set, clear the interrupt mask.
*/
/* Pfff, we have to check pending interrupts ourselves. Some other DomUs just make an hypercall for retriggering the irq. Not sure it's really easier/faster */
ENTRY(hyp_sti)
pushl %ebp
movl %esp, %ebp
_hyp_sti:
movb $0,hyp_shared_info+CPU_CLI /* Enable interrupts */
cmpl $0,int_active /* Check whether we were already checking pending interrupts */
jz 0f
popl %ebp
ret /* Already active, just return */
0:
/* Not active, check pending interrupts by hand */
/* no memory barrier needed on x86 */
cmpb $0,hyp_shared_info+CPU_PENDING
jne 0f
popl %ebp
ret
0:
movb $0xff,hyp_shared_info+CPU_CLI
1:
pushl %eax
pushl %ecx
pushl %edx
incl int_active /* currently handling interrupts */
pushl $0
pushl $0
call EXT(hyp_c_callback)
popl %edx
popl %edx
popl %edx
popl %ecx
popl %eax
decl int_active /* stopped handling interrupts */
cmpb $0,hyp_shared_info+CPU_PENDING
jne 1b
jmp _hyp_sti
/* Hypervisor failed to reload segments. Dump them. */
hyp_failsafe_callback:
#if 1
/* load sane segments */
mov %ss, %ax
mov %ax, %ds
mov %ax, %es
mov %ax, %fs
mov %ax, %gs
push %esp
call EXT(hyp_failsafe_c_callback)
#else
popl %ds
popl %es
popl %fs
popl %gs
iret
#endif
|