Bug Summary

File:obj-scan-build/../i386/i386/fpu.c
Location:line 676, column 14
Description:Dereference of null pointer

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1992-1990 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26
27/*
28 * Copyright (C) 1994 Linus Torvalds
29 *
30 * Pentium III FXSR, SSE support
31 * General FPU state handling cleanups
32 * Gareth Hughes <gareth@valinux.com>, May 2000
33 */
34
35/*
36 * Support for 80387 floating point or FP emulator.
37 */
38
39#include <string.h>
40
41#include <mach/exception.h>
42#include <mach/machine/thread_status.h>
43#include <mach/machine/fp_reg.h>
44
45#include <kern/debug.h>
46#include <machine/machspl.h> /* spls */
47#include <kern/printf.h>
48#include <kern/thread.h>
49#include <kern/slab.h>
50
51#include <i3861/thread.h>
52#include <i3861/fpu.h>
53#include <i3861/pio.h>
54#include <i3861/pic.h>
55#include <i3861/locore.h>
56#include <i3861/trap.h>
57#include "cpu_number.h"
58
59#if 0
60#include <i3861/ipl.h>
61#define ASSERT_IPL(L) \
62{ \
63 if (curr_ipl != L) { \
64 printf("IPL is %d, expected %d\n", curr_ipl, L); \
65 panic("fpu: wrong ipl"); \
66 } \
67}
68#else
69#define ASSERT_IPL(L)
70#endif
71
72int fp_kind = FP_3873; /* 80387 present */
73struct kmem_cache ifps_cache; /* cache for FPU save area */
74static unsigned long mxcsr_feature_mask = 0xffffffff; /* Always AND user-provided mxcsr with this security mask */
75
76#if NCPUS1 == 1
77volatile thread_t fp_thread = THREAD_NULL((thread_t) 0);
78 /* thread whose state is in FPU */
79 /* always THREAD_NULL if emulating
80 FPU */
81volatile thread_t fp_intr_thread = THREAD_NULL((thread_t) 0);
82
83
84#define clear_fpu(){ ({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); }); fp_thread = ((thread_t) 0); }
\
85 { \
86 set_ts()({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); })
; \
87 fp_thread = THREAD_NULL((thread_t) 0); \
88 }
89
90#else /* NCPUS > 1 */
91#define clear_fpu(){ ({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); }); fp_thread = ((thread_t) 0); }
\
92 { \
93 set_ts()({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); })
; \
94 }
95
96#endif
97
98
99/*
100 * Look for FPU and initialize it.
101 * Called on each CPU.
102 */
103void
104init_fpu(void)
105{
106 unsigned short status, control;
107
108#ifdef MACH_RING1
109 clear_ts()asm volatile("clts");
110#else /* MACH_RING1 */
111 unsigned int native = 0;
112
113 if (machine_slot[cpu_number()(0)].cpu_type >= CPU_TYPE_I486((cpu_type_t) 17))
114 native = CR0_NE0x00000020;
115
116 /*
117 * Check for FPU by initializing it,
118 * then trying to read the correct bit patterns from
119 * the control and status registers.
120 */
121 set_cr0((get_cr0() & ~(CR0_EM|CR0_TS)) | native)({ register unsigned long _temp__ = ((({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) & ~(0x00000004|0x00000008)) | native); asm volatile(
"mov %0, %%cr0" : : "r" (_temp__)); })
; /* allow use of FPU */
122#endif /* MACH_RING1 */
123
124 fninit()asm volatile("fninit");
125 status = fnstsw()({ unsigned short _status__; asm("fnstsw %0" : "=ma" (_status__
)); _status__; })
;
126 fnstcw(&control)asm("fnstcw %0" : "=m" (*(unsigned short *)(&control)));
127
128 if ((status & 0xff) == 0 &&
129 (control & 0x103f) == 0x3f)
130 {
131 /*
132 * We have a FPU of some sort.
133 * Compare -infinity against +infinity
134 * to check whether we have a 287 or a 387.
135 */
136 volatile double fp_infinity, fp_one, fp_zero;
137 fp_one = 1.0;
138 fp_zero = 0.0;
139 fp_infinity = fp_one / fp_zero;
140 if (fp_infinity == -fp_infinity) {
141 /*
142 * We have an 80287.
143 */
144 fp_kind = FP_2872;
145 asm volatile(".byte 0xdb; .byte 0xe4"); /* fnsetpm */
146 }
147 else {
148 /*
149 * We have a 387.
150 */
151 if (CPU_HAS_FEATURE(CPU_FEATURE_FXSR)(cpu_features[(24) / 32] & (1 << ((24) % 32)))) {
152 static /* because we _need_ alignment */
153 struct i386_xfp_save save;
154 unsigned long mask;
155 fp_kind = FP_387X4;
156#ifndef MACH_RING1
157 set_cr4(get_cr4() | CR4_OSFXSR)({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr4, %0" : "=r" (_temp__)); _temp__
; }) | 0x0200); asm volatile("mov %0, %%cr4" : : "r" (_temp__
)); })
;
158#endif /* MACH_RING1 */
159 fxsave(&save)asm volatile("fxsave %0" : "=m" (*&save));
160 mask = save.fp_mxcsr_mask;
161 if (!mask)
162 mask = 0x0000ffbf;
163 mxcsr_feature_mask &= mask;
164 } else
165 fp_kind = FP_3873;
166 }
167#ifdef MACH_RING1
168 set_ts()({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); })
;
169#else /* MACH_RING1 */
170 /*
171 * Trap wait instructions. Turn off FPU for now.
172 */
173 set_cr0(get_cr0() | CR0_TS | CR0_MP)({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008 | 0x00000002); asm volatile("mov %0, %%cr0"
: : "r" (_temp__)); })
;
174#endif /* MACH_RING1 */
175 }
176 else {
177 /*
178 * NO FPU.
179 */
180 panic("No FPU!");
181 }
182}
183
184/*
185 * Initialize FP handling.
186 */
187void
188fpu_module_init(void)
189{
190 kmem_cache_init(&ifps_cache, "i386_fpsave_state",
191 sizeof(struct i386_fpsave_state), 16,
192 NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0);
193}
194
195/*
196 * Free a FPU save area.
197 * Called only when thread terminating - no locking necessary.
198 */
199void
200fp_free(struct i386_fpsave_state *fps)
201{
202ASSERT_IPL(SPL0);
203#if NCPUS1 == 1
204 if ((fp_thread != THREAD_NULL((thread_t) 0)) && (fp_thread->pcb->ims.ifps == fps)) {
205 /*
206 * Make sure we don't get FPU interrupts later for
207 * this thread
208 */
209 clear_ts()asm volatile("clts");
210 fwait()asm("fwait");;
211
212 /* Mark it free and disable access */
213 clear_fpu(){ ({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); }); fp_thread = ((thread_t) 0); }
;
214 }
215#endif /* NCPUS == 1 */
216 kmem_cache_free(&ifps_cache, (vm_offset_t) fps);
217}
218
219/* The two following functions were stolen from Linux's i387.c */
220static inline unsigned short
221twd_i387_to_fxsr (unsigned short twd)
222{
223 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
224
225 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
226 tmp = ~twd;
227 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
228 /* and move the valid bits to the lower byte. */
229 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
230 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
231 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
232 return tmp;
233}
234
235static inline unsigned long
236twd_fxsr_to_i387 (struct i386_xfp_save *fxsave)
237{
238 struct {
239 unsigned short significand[4];
240 unsigned short exponent;
241 unsigned short padding[3];
242 } *st = NULL((void *) 0);
243 unsigned long tos = (fxsave->fp_status >> 11) & 7;
244 unsigned long twd = (unsigned long) fxsave->fp_tag;
245 unsigned long tag;
246 unsigned long ret = 0xffff0000u;
247 int i;
248
249#define FPREG_ADDR(f, n)((void *)&(f)->fp_reg_word + (n) * 16); ((void *)&(f)->fp_reg_word + (n) * 16);
250
251 for (i = 0 ; i < 8 ; i++) {
252 if (twd & 0x1) {
253 st = FPREG_ADDR (fxsave, (i - tos) & 7)((void *)&(fxsave)->fp_reg_word + ((i - tos) & 7) *
16);
;
254
255 switch (st->exponent & 0x7fff) {
256 case 0x7fff:
257 tag = 2; /* Special */
258 break;
259 case 0x0000:
260 if (!st->significand[0] &&
261 !st->significand[1] &&
262 !st->significand[2] &&
263 !st->significand[3] ) {
264 tag = 1; /* Zero */
265 } else {
266 tag = 2; /* Special */
267 }
268 break;
269 default:
270 if (st->significand[3] & 0x8000) {
271 tag = 0; /* Valid */
272 } else {
273 tag = 2; /* Special */
274 }
275 break;
276 }
277 } else {
278 tag = 3; /* Empty */
279 }
280 ret |= (tag << (2 * i));
281 twd = twd >> 1;
282 }
283 return ret;
284}
285
286/*
287 * Set the floating-point state for a thread.
288 * If the thread is not the current thread, it is
289 * not running (held). Locking needed against
290 * concurrent fpu_set_state or fpu_get_state.
291 */
292kern_return_t
293fpu_set_state(thread, state)
294 const thread_t thread;
295 struct i386_float_state *state;
296{
297 pcb_t pcb = thread->pcb;
298 struct i386_fpsave_state *ifps;
299 struct i386_fpsave_state *new_ifps;
300
301ASSERT_IPL(SPL0);
302 if (fp_kind == FP_NO0)
303 return KERN_FAILURE5;
304
305#if NCPUS1 == 1
306
307 /*
308 * If this thread`s state is in the FPU,
309 * discard it; we are replacing the entire
310 * FPU state.
311 */
312 if (fp_thread == thread) {
313 clear_ts()asm volatile("clts");
314 fwait()asm("fwait");; /* wait for possible interrupt */
315 clear_fpu(){ ({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); }); fp_thread = ((thread_t) 0); }
; /* no state in FPU */
316 }
317#endif
318
319 if (state->initialized == 0) {
320 /*
321 * new FPU state is 'invalid'.
322 * Deallocate the fp state if it exists.
323 */
324 simple_lock(&pcb->lock);
325 ifps = pcb->ims.ifps;
326 pcb->ims.ifps = 0;
327 simple_unlock(&pcb->lock)((void)(&pcb->lock));
328
329 if (ifps != 0) {
330 kmem_cache_free(&ifps_cache, (vm_offset_t) ifps);
331 }
332 }
333 else {
334 /*
335 * Valid state. Allocate the fp state if there is none.
336 */
337 struct i386_fp_save *user_fp_state;
338 struct i386_fp_regs *user_fp_regs;
339
340 user_fp_state = (struct i386_fp_save *) &state->hw_state[0];
341 user_fp_regs = (struct i386_fp_regs *)
342 &state->hw_state[sizeof(struct i386_fp_save)];
343
344 new_ifps = 0;
345 Retry:
346 simple_lock(&pcb->lock);
347 ifps = pcb->ims.ifps;
348 if (ifps == 0) {
349 if (new_ifps == 0) {
350 simple_unlock(&pcb->lock)((void)(&pcb->lock));
351 new_ifps = (struct i386_fpsave_state *) kmem_cache_alloc(&ifps_cache);
352 goto Retry;
353 }
354 ifps = new_ifps;
355 new_ifps = 0;
356 pcb->ims.ifps = ifps;
357 }
358
359 /*
360 * Ensure that reserved parts of the environment are 0.
361 */
362 memset(&ifps->fp_save_state, 0, sizeof(struct i386_fp_save));
363
364 if (fp_kind == FP_387X4) {
365 int i;
366
367 ifps->xfp_save_state.fp_control = user_fp_state->fp_control;
368 ifps->xfp_save_state.fp_status = user_fp_state->fp_status;
369 ifps->xfp_save_state.fp_tag = twd_i387_to_fxsr(user_fp_state->fp_tag);
370 ifps->xfp_save_state.fp_eip = user_fp_state->fp_eip;
371 ifps->xfp_save_state.fp_cs = user_fp_state->fp_cs;
372 ifps->xfp_save_state.fp_opcode = user_fp_state->fp_opcode;
373 ifps->xfp_save_state.fp_dp = user_fp_state->fp_dp;
374 ifps->xfp_save_state.fp_ds = user_fp_state->fp_ds;
375 for (i=0; i<8; i++)
376 memcpy(&ifps->xfp_save_state.fp_reg_word[i], &user_fp_regs->fp_reg_word[i], sizeof(user_fp_regs->fp_reg_word[i]));
377 } else {
378 ifps->fp_save_state.fp_control = user_fp_state->fp_control;
379 ifps->fp_save_state.fp_status = user_fp_state->fp_status;
380 ifps->fp_save_state.fp_tag = user_fp_state->fp_tag;
381 ifps->fp_save_state.fp_eip = user_fp_state->fp_eip;
382 ifps->fp_save_state.fp_cs = user_fp_state->fp_cs;
383 ifps->fp_save_state.fp_opcode = user_fp_state->fp_opcode;
384 ifps->fp_save_state.fp_dp = user_fp_state->fp_dp;
385 ifps->fp_save_state.fp_ds = user_fp_state->fp_ds;
386 ifps->fp_regs = *user_fp_regs;
387 }
388
389 simple_unlock(&pcb->lock)((void)(&pcb->lock));
390 if (new_ifps != 0)
391 kmem_cache_free(&ifps_cache, (vm_offset_t) new_ifps);
392 }
393
394 return KERN_SUCCESS0;
395}
396
397/*
398 * Get the floating-point state for a thread.
399 * If the thread is not the current thread, it is
400 * not running (held). Locking needed against
401 * concurrent fpu_set_state or fpu_get_state.
402 */
403kern_return_t
404fpu_get_state(thread, state)
405 const thread_t thread;
406 struct i386_float_state *state;
407{
408 pcb_t pcb = thread->pcb;
409 struct i386_fpsave_state *ifps;
410
411ASSERT_IPL(SPL0);
412 if (fp_kind == FP_NO0)
413 return KERN_FAILURE5;
414
415 simple_lock(&pcb->lock);
416 ifps = pcb->ims.ifps;
417 if (ifps == 0) {
418 /*
419 * No valid floating-point state.
420 */
421 simple_unlock(&pcb->lock)((void)(&pcb->lock));
422 memset(state, 0, sizeof(struct i386_float_state));
423 return KERN_SUCCESS0;
424 }
425
426 /* Make sure we`ve got the latest fp state info */
427 /* If the live fpu state belongs to our target */
428#if NCPUS1 == 1
429 if (thread == fp_thread)
430#else
431 if (thread == current_thread()(active_threads[(0)]))
432#endif
433 {
434 clear_ts()asm volatile("clts");
435 fp_save(thread);
436 clear_fpu(){ ({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); }); fp_thread = ((thread_t) 0); }
;
437 }
438
439 state->fpkind = fp_kind;
440 state->exc_status = 0;
441
442 {
443 struct i386_fp_save *user_fp_state;
444 struct i386_fp_regs *user_fp_regs;
445
446 state->initialized = ifps->fp_valid;
447
448 user_fp_state = (struct i386_fp_save *) &state->hw_state[0];
449 user_fp_regs = (struct i386_fp_regs *)
450 &state->hw_state[sizeof(struct i386_fp_save)];
451
452 /*
453 * Ensure that reserved parts of the environment are 0.
454 */
455 memset(user_fp_state, 0, sizeof(struct i386_fp_save));
456
457 if (fp_kind == FP_387X4) {
458 int i;
459
460 user_fp_state->fp_control = ifps->xfp_save_state.fp_control;
461 user_fp_state->fp_status = ifps->xfp_save_state.fp_status;
462 user_fp_state->fp_tag = twd_fxsr_to_i387(&ifps->xfp_save_state);
463 user_fp_state->fp_eip = ifps->xfp_save_state.fp_eip;
464 user_fp_state->fp_cs = ifps->xfp_save_state.fp_cs;
465 user_fp_state->fp_opcode = ifps->xfp_save_state.fp_opcode;
466 user_fp_state->fp_dp = ifps->xfp_save_state.fp_dp;
467 user_fp_state->fp_ds = ifps->xfp_save_state.fp_ds;
468 for (i=0; i<8; i++)
469 memcpy(&user_fp_regs->fp_reg_word[i], &ifps->xfp_save_state.fp_reg_word[i], sizeof(user_fp_regs->fp_reg_word[i]));
470 } else {
471 user_fp_state->fp_control = ifps->fp_save_state.fp_control;
472 user_fp_state->fp_status = ifps->fp_save_state.fp_status;
473 user_fp_state->fp_tag = ifps->fp_save_state.fp_tag;
474 user_fp_state->fp_eip = ifps->fp_save_state.fp_eip;
475 user_fp_state->fp_cs = ifps->fp_save_state.fp_cs;
476 user_fp_state->fp_opcode = ifps->fp_save_state.fp_opcode;
477 user_fp_state->fp_dp = ifps->fp_save_state.fp_dp;
478 user_fp_state->fp_ds = ifps->fp_save_state.fp_ds;
479 *user_fp_regs = ifps->fp_regs;
480 }
481 }
482 simple_unlock(&pcb->lock)((void)(&pcb->lock));
483
484 return KERN_SUCCESS0;
485}
486
487/*
488 * Initialize FPU.
489 *
490 * Raise exceptions for:
491 * invalid operation
492 * divide by zero
493 * overflow
494 *
495 * Use 53-bit precision.
496 */
497void fpinit(void)
498{
499 unsigned short control;
500
501ASSERT_IPL(SPL0);
502 clear_ts()asm volatile("clts");
503 fninit()asm volatile("fninit");
504 fnstcw(&control)asm("fnstcw %0" : "=m" (*(unsigned short *)(&control)));
505 control &= ~(FPC_PC0x0300|FPC_RC0x0c00); /* Clear precision & rounding control */
506 control |= (FPC_PC_530x0200 | /* Set precision */
507 FPC_RC_RN0x0000 | /* round-to-nearest */
508 FPC_ZE0x0004 | /* Suppress zero-divide */
509 FPC_OE0x0008 | /* and overflow */
510 FPC_UE0x0010 | /* underflow */
511 FPC_IE0x0001 | /* Allow NaNQs and +-INF */
512 FPC_DE0x0002 | /* Allow denorms as operands */
513 FPC_PE0x0020); /* No trap for precision loss */
514 fldcw(control)asm volatile("fldcw %0" : : "m" (*(unsigned short *) &(control
)) )
;
515}
516
517/*
518 * Coprocessor not present.
519 */
520void
521fpnoextflt(void)
522{
523 /*
524 * Enable FPU use.
525 */
526ASSERT_IPL(SPL0);
527 clear_ts()asm volatile("clts");
528#if NCPUS1 == 1
529
530 /*
531 * If this thread`s state is in the FPU, we are done.
532 */
533 if (fp_thread == current_thread()(active_threads[(0)]))
534 return;
535
536 /* Make sure we don't do fpsave() in fp_intr while doing fpsave()
537 * here if the current fpu instruction generates an error.
538 */
539 fwait()asm("fwait");;
540 /*
541 * If another thread`s state is in the FPU, save it.
542 */
543 if (fp_thread != THREAD_NULL((thread_t) 0)) {
544 fp_save(fp_thread);
545 }
546
547 /*
548 * Give this thread the FPU.
549 */
550 fp_thread = current_thread()(active_threads[(0)]);
551
552#endif /* NCPUS == 1 */
553
554 /*
555 * Load this thread`s state into the FPU.
556 */
557 fp_load(current_thread()(active_threads[(0)]));
558}
559
560/*
561 * FPU overran end of segment.
562 * Re-initialize FPU. Floating point state is not valid.
563 */
564void
565fpextovrflt(void)
566{
567 thread_t thread = current_thread()(active_threads[(0)]);
568 pcb_t pcb;
569 struct i386_fpsave_state *ifps;
570
571#if NCPUS1 == 1
572
573 /*
574 * Is exception for the currently running thread?
575 */
576 if (fp_thread != thread) {
577 /* Uh oh... */
578 panic("fpextovrflt");
579 }
580#endif
581
582 /*
583 * This is a non-recoverable error.
584 * Invalidate the thread`s FPU state.
585 */
586 pcb = thread->pcb;
587 simple_lock(&pcb->lock);
588 ifps = pcb->ims.ifps;
589 pcb->ims.ifps = 0;
590 simple_unlock(&pcb->lock)((void)(&pcb->lock));
591
592 /*
593 * Re-initialize the FPU.
594 */
595 clear_ts()asm volatile("clts");
596 fninit()asm volatile("fninit");
597
598 /*
599 * And disable access.
600 */
601 clear_fpu(){ ({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); }); fp_thread = ((thread_t) 0); }
;
602
603 if (ifps)
604 kmem_cache_free(&ifps_cache, (vm_offset_t) ifps);
605
606 /*
607 * Raise exception.
608 */
609 i386_exception(EXC_BAD_ACCESS1, VM_PROT_READ((vm_prot_t) 0x01)|VM_PROT_EXECUTE((vm_prot_t) 0x04), 0);
610 /*NOTREACHED*/
611}
612
613static int
614fphandleerr(void)
615{
616 thread_t thread = current_thread()(active_threads[(0)]);
617
618 /*
619 * Save the FPU context to the thread using it.
620 */
621#if NCPUS1 == 1
622 if (fp_thread == THREAD_NULL((thread_t) 0)) {
623 printf("fphandleerr: FPU not belonging to anyone!\n");
624 clear_ts()asm volatile("clts");
625 fninit()asm volatile("fninit");
626 clear_fpu(){ ({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); }); fp_thread = ((thread_t) 0); }
;
627 return 1;
628 }
629
630 if (fp_thread != thread) {
631 /*
632 * FPU exception is for a different thread.
633 * When that thread again uses the FPU an exception will be
634 * raised in fp_load. Remember the condition in fp_valid (== 2).
635 */
636 clear_ts()asm volatile("clts");
637 fp_save(fp_thread);
638 fp_thread->pcb->ims.ifps->fp_valid = 2;
639 fninit()asm volatile("fninit");
640 clear_fpu(){ ({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); }); fp_thread = ((thread_t) 0); }
;
641 /* leave fp_intr_thread THREAD_NULL */
642 return 1;
643 }
644#endif /* NCPUS == 1 */
645
646 /*
647 * Save the FPU state and turn off the FPU.
648 */
649 clear_ts()asm volatile("clts");
650 fp_save(thread);
651 fninit()asm volatile("fninit");
652 clear_fpu(){ ({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); }); fp_thread = ((thread_t) 0); }
;
653
654 return 0;
655}
656
657/*
658 * FPU error. Called by exception handler.
659 */
660void
661fpexterrflt(void)
662{
663 thread_t thread = current_thread()(active_threads[(0)]);
664
665 if (fphandleerr())
1
Taking false branch
666 return;
667
668 /*
669 * Raise FPU exception.
670 * Locking not needed on pcb->ims.ifps,
671 * since thread is running.
672 */
673 i386_exception(EXC_ARITHMETIC3,
674 EXC_I386_EXTERR5,
675 fp_kind == FP_387X4 ?
2
Assuming 'fp_kind' is equal to 4
3
'?' condition is true
676 thread->pcb->ims.ifps->xfp_save_state.fp_status :
4
Dereference of null pointer
677 thread->pcb->ims.ifps->fp_save_state.fp_status);
678 /*NOTREACHED*/
679}
680
681#ifndef MACH_RING1
682/*
683 * FPU error. Called by AST.
684 */
685void
686fpastintr(void)
687{
688 thread_t thread = current_thread()(active_threads[(0)]);
689
690ASSERT_IPL(SPL0);
691#if NCPUS1 == 1
692 /*
693 * Since FPU errors only occur on ESC or WAIT instructions,
694 * the current thread should own the FPU. If it didn`t,
695 * we should have gotten the task-switched interrupt first.
696 */
697 if (fp_thread != THREAD_NULL((thread_t) 0)) {
698 panic("fpexterrflt");
699 return;
700 }
701
702 /*
703 * Check if we got a context switch between the interrupt and the AST
704 * This can happen if the interrupt arrived after the FPU AST was
705 * checked. In this case, raise the exception in fp_load when this
706 * thread next time uses the FPU. Remember exception condition in
707 * fp_valid (extended boolean 2).
708 */
709 if (fp_intr_thread != thread) {
710 if (fp_intr_thread == THREAD_NULL((thread_t) 0)) {
711 panic("fpexterrflt: fp_intr_thread == THREAD_NULL");
712 return;
713 }
714 fp_intr_thread->pcb->ims.ifps->fp_valid = 2;
715 fp_intr_thread = THREAD_NULL((thread_t) 0);
716 return;
717 }
718 fp_intr_thread = THREAD_NULL((thread_t) 0);
719#else /* NCPUS == 1 */
720 /*
721 * Save the FPU state and turn off the FPU.
722 */
723 fp_save(thread);
724#endif /* NCPUS == 1 */
725
726 /*
727 * Raise FPU exception.
728 * Locking not needed on pcb->ims.ifps,
729 * since thread is running.
730 */
731 i386_exception(EXC_ARITHMETIC3,
732 EXC_I386_EXTERR5,
733 fp_kind == FP_387X4 ?
734 thread->pcb->ims.ifps->xfp_save_state.fp_status :
735 thread->pcb->ims.ifps->fp_save_state.fp_status);
736 /*NOTREACHED*/
737}
738#endif /* MACH_RING1 */
739
740/*
741 * Save FPU state.
742 *
743 * Locking not needed:
744 * . if called from fpu_get_state, pcb already locked.
745 * . if called from fpnoextflt or fp_intr, we are single-cpu
746 * . otherwise, thread is running.
747 */
748void
749fp_save(thread_t thread)
750{
751 pcb_t pcb = thread->pcb;
752 struct i386_fpsave_state *ifps = pcb->ims.ifps;
753
754 if (ifps != 0 && !ifps->fp_valid) {
755 /* registers are in FPU */
756 ifps->fp_valid = TRUE((boolean_t) 1);
757 if (fp_kind == FP_387X4)
758 fxsave(&ifps->xfp_save_state)asm volatile("fxsave %0" : "=m" (*&ifps->xfp_save_state
))
;
759 else
760 fnsave(&ifps->fp_save_state)asm volatile("fnsave %0" : "=m" (*&ifps->fp_save_state
))
;
761 }
762}
763
764/*
765 * Restore FPU state from PCB.
766 *
767 * Locking not needed; always called on the current thread.
768 */
769void
770fp_load(thread_t thread)
771{
772 pcb_t pcb = thread->pcb;
773 struct i386_fpsave_state *ifps;
774
775ASSERT_IPL(SPL0);
776 ifps = pcb->ims.ifps;
777 if (ifps == 0) {
778 ifps = (struct i386_fpsave_state *) kmem_cache_alloc(&ifps_cache);
779 memset(ifps, 0, sizeof *ifps);
780 pcb->ims.ifps = ifps;
781 fpinit();
782#if 1
783/*
784 * I'm not sure this is needed. Does the fpu regenerate the interrupt in
785 * frstor or not? Without this code we may miss some exceptions, with it
786 * we might send too many exceptions.
787 */
788 } else if (ifps->fp_valid == 2) {
789 /* delayed exception pending */
790
791 ifps->fp_valid = TRUE((boolean_t) 1);
792 clear_fpu(){ ({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008); asm volatile("mov %0, %%cr0" : : "r" (_temp__
)); }); fp_thread = ((thread_t) 0); }
;
793 /*
794 * Raise FPU exception.
795 * Locking not needed on pcb->ims.ifps,
796 * since thread is running.
797 */
798 i386_exception(EXC_ARITHMETIC3,
799 EXC_I386_EXTERR5,
800 fp_kind == FP_387X4 ?
801 thread->pcb->ims.ifps->xfp_save_state.fp_status :
802 thread->pcb->ims.ifps->fp_save_state.fp_status);
803 /*NOTREACHED*/
804#endif
805 } else if (! ifps->fp_valid) {
806 printf("fp_load: invalid FPU state!\n");
807 fninit ()asm volatile("fninit");
808 } else {
809 if (fp_kind == FP_387X4)
810 fxrstor(ifps->xfp_save_state)asm volatile("fxrstor %0" : : "m" (ifps->xfp_save_state));
811 else
812 frstor(ifps->fp_save_state)asm volatile("frstor %0" : : "m" (ifps->fp_save_state));
813 }
814 ifps->fp_valid = FALSE((boolean_t) 0); /* in FPU */
815}
816
817/*
818 * Allocate and initialize FP state for current thread.
819 * Don't load state.
820 *
821 * Locking not needed; always called on the current thread.
822 */
823void
824fp_state_alloc(void)
825{
826 pcb_t pcb = current_thread()(active_threads[(0)])->pcb;
827 struct i386_fpsave_state *ifps;
828
829 ifps = (struct i386_fpsave_state *)kmem_cache_alloc(&ifps_cache);
830 memset(ifps, 0, sizeof *ifps);
831 pcb->ims.ifps = ifps;
832
833 ifps->fp_valid = TRUE((boolean_t) 1);
834
835 if (fp_kind == FP_387X4) {
836 ifps->xfp_save_state.fp_control = (0x037f
837 & ~(FPC_IM0x0001|FPC_ZM0x0004|FPC_OM0x0008|FPC_PC0x0300))
838 | (FPC_PC_530x0200|FPC_IC_AFF0x1000);
839 ifps->xfp_save_state.fp_status = 0;
840 ifps->xfp_save_state.fp_tag = 0xffff; /* all empty */
841 if (CPU_HAS_FEATURE(CPU_FEATURE_SSE)(cpu_features[(25) / 32] & (1 << ((25) % 32))))
842 ifps->xfp_save_state.fp_mxcsr = 0x1f80;
843 } else {
844 ifps->fp_save_state.fp_control = (0x037f
845 & ~(FPC_IM0x0001|FPC_ZM0x0004|FPC_OM0x0008|FPC_PC0x0300))
846 | (FPC_PC_530x0200|FPC_IC_AFF0x1000);
847 ifps->fp_save_state.fp_status = 0;
848 ifps->fp_save_state.fp_tag = 0xffff; /* all empty */
849 }
850}
851
852#if AT3861 && !defined(MACH_XEN)
853/*
854 * Handle a coprocessor error interrupt on the AT386.
855 * This comes in on line 5 of the slave PIC at SPL1.
856 */
857void
858fpintr(int unit)
859{
860 spl_t s;
861 thread_t thread = current_thread()(active_threads[(0)]);
862
863ASSERT_IPL(SPL1);
864 /*
865 * Turn off the extended 'busy' line.
866 */
867 outb(0xf0, 0){ asm volatile("outb %0, %1" : : "a" ((unsigned char)(0)) , "d"
((unsigned short)(0xf0))); }
;
868
869 if (fphandleerr())
870 return;
871
872#if NCPUS1 == 1
873 if (fp_intr_thread != THREAD_NULL((thread_t) 0) && fp_intr_thread != thread)
874 panic("fp_intr: already caught intr");
875 fp_intr_thread = thread;
876#endif /* NCPUS == 1 */
877
878 /*
879 * Since we are running on the interrupt stack, we must
880 * signal the thread to take the exception when we return
881 * to user mode. Use an AST to do this.
882 *
883 * Don`t set the thread`s AST field. If the thread is
884 * descheduled before it takes the AST, it will notice
885 * the FPU error when it reloads its FPU state.
886 */
887 s = splsched();
888 ast_on(cpu_number(), AST_I386_FP)({ if ((need_ast[(0)] |= (0x80000000)) != 0x0) { ; } });
889 splx(s);
890}
891#endif /* AT386 */