Bug Summary

File:obj-scan-build/../i386/i386/fpu.c
Location:line 678, column 14
Description:Dereference of null pointer

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1992-1990 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26
27/*
28 * Copyright (C) 1994 Linus Torvalds
29 *
30 * Pentium III FXSR, SSE support
31 * General FPU state handling cleanups
32 * Gareth Hughes <gareth@valinux.com>, May 2000
33 */
34
35/*
36 * Support for 80387 floating point or FP emulator.
37 */
38
39#include <string.h>
40
41#include <mach/exception.h>
42#include <mach/machine/thread_status.h>
43#include <mach/machine/fp_reg.h>
44
45#include <kern/debug.h>
46#include <machine/machspl.h> /* spls */
47#include <kern/printf.h>
48#include <kern/thread.h>
49#include <kern/slab.h>
50
51#include <i3861/thread.h>
52#include <i3861/fpu.h>
53#include <i3861/pio.h>
54#include <i3861/pic.h>
55#include <i3861/locore.h>
56#include <i3861/trap.h>
57#include "cpu_number.h"
58
59#if 0
60#include <i3861/ipl.h>
61#define ASSERT_IPL(L) \
62{ \
63 if (curr_ipl != L) { \
64 printf("IPL is %d, expected %d\n", curr_ipl, L); \
65 panic("fpu: wrong ipl"); \
66 } \
67}
68#else
69#define ASSERT_IPL(L)
70#endif
71
72int fp_kind = FP_3873; /* 80387 present */
73struct kmem_cache ifps_cache; /* cache for FPU save area */
74static unsigned long mxcsr_feature_mask = 0xffffffff; /* Always AND user-provided mxcsr with this security mask */
75
76#if NCPUS1 == 1
77volatile thread_t fp_thread = THREAD_NULL((thread_t) 0);
78 /* thread whose state is in FPU */
79 /* always THREAD_NULL if emulating
80 FPU */
81volatile thread_t fp_intr_thread = THREAD_NULL((thread_t) 0);
82
83
84#define clear_fpu(){ hyp_fpu_taskswitch(1); fp_thread = ((thread_t) 0); } \
85 { \
86 set_ts()hyp_fpu_taskswitch(1); \
87 fp_thread = THREAD_NULL((thread_t) 0); \
88 }
89
90#else /* NCPUS > 1 */
91#define clear_fpu(){ hyp_fpu_taskswitch(1); fp_thread = ((thread_t) 0); } \
92 { \
93 set_ts()hyp_fpu_taskswitch(1); \
94 }
95
96#endif
97
98
99/*
100 * Look for FPU and initialize it.
101 * Called on each CPU.
102 */
103void
104init_fpu(void)
105{
106 unsigned short status, control;
107
108#ifdef MACH_RING1
109 clear_ts()hyp_fpu_taskswitch(0);
110#else /* MACH_RING1 */
111 unsigned int native = 0;
112
113 if (machine_slot[cpu_number()(0)].cpu_type >= CPU_TYPE_I486((cpu_type_t) 17))
114 native = CR0_NE0x00000020;
115
116 /*
117 * Check for FPU by initializing it,
118 * then trying to read the correct bit patterns from
119 * the control and status registers.
120 */
121 set_cr0((get_cr0() & ~(CR0_EM|CR0_TS)) | native)({ register unsigned long _temp__ = ((({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) & ~(0x00000004|0x00000008)) | native); asm volatile(
"mov %0, %%cr0" : : "r" (_temp__)); })
; /* allow use of FPU */
122#endif /* MACH_RING1 */
123
124 fninit()asm volatile("fninit");
125 status = fnstsw()({ unsigned short _status__; asm("fnstsw %0" : "=ma" (_status__
)); _status__; })
;
126 fnstcw(&control)asm("fnstcw %0" : "=m" (*(unsigned short *)(&control)));
127
128 if ((status & 0xff) == 0 &&
129 (control & 0x103f) == 0x3f)
130 {
131 /*
132 * We have a FPU of some sort.
133 * Compare -infinity against +infinity
134 * to check whether we have a 287 or a 387.
135 */
136 volatile double fp_infinity, fp_one, fp_zero;
137 fp_one = 1.0;
138 fp_zero = 0.0;
139 fp_infinity = fp_one / fp_zero;
140 if (fp_infinity == -fp_infinity) {
141 /*
142 * We have an 80287.
143 */
144 fp_kind = FP_2872;
145 asm volatile(".byte 0xdb; .byte 0xe4"); /* fnsetpm */
146 }
147 else {
148 /*
149 * We have a 387.
150 */
151 if (CPU_HAS_FEATURE(CPU_FEATURE_FXSR)(cpu_features[(24) / 32] & (1 << ((24) % 32)))) {
152 static /* because we _need_ alignment */
153 struct i386_xfp_save save;
154 unsigned long mask;
155 fp_kind = FP_387X4;
156#ifndef MACH_RING1
157 set_cr4(get_cr4() | CR4_OSFXSR)({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr4, %0" : "=r" (_temp__)); _temp__
; }) | 0x0200); asm volatile("mov %0, %%cr4" : : "r" (_temp__
)); })
;
158#endif /* MACH_RING1 */
159 fxsave(&save)asm volatile("fxsave %0" : "=m" (*&save));
160 mask = save.fp_mxcsr_mask;
161 if (!mask)
162 mask = 0x0000ffbf;
163 mxcsr_feature_mask &= mask;
164 } else
165 fp_kind = FP_3873;
166 }
167#ifdef MACH_RING1
168 set_ts()hyp_fpu_taskswitch(1);
169#else /* MACH_RING1 */
170 /*
171 * Trap wait instructions. Turn off FPU for now.
172 */
173 set_cr0(get_cr0() | CR0_TS | CR0_MP)({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr0, %0" : "=r" (_temp__)); _temp__
; }) | 0x00000008 | 0x00000002); asm volatile("mov %0, %%cr0"
: : "r" (_temp__)); })
;
174#endif /* MACH_RING1 */
175 }
176 else {
177 /*
178 * NO FPU.
179 */
180 panic("No FPU!");
181 }
182}
183
184/*
185 * Initialize FP handling.
186 */
187void
188fpu_module_init(void)
189{
190 kmem_cache_init(&ifps_cache, "i386_fpsave_state",
191 sizeof(struct i386_fpsave_state), 16,
192 NULL((void *) 0), NULL((void *) 0), NULL((void *) 0), 0);
193}
194
195/*
196 * Free a FPU save area.
197 * Called only when thread terminating - no locking necessary.
198 */
199void
200fp_free(fps)
201 struct i386_fpsave_state *fps;
202{
203ASSERT_IPL(SPL0);
204#if NCPUS1 == 1
205 if ((fp_thread != THREAD_NULL((thread_t) 0)) && (fp_thread->pcb->ims.ifps == fps)) {
206 /*
207 * Make sure we don't get FPU interrupts later for
208 * this thread
209 */
210 clear_ts()hyp_fpu_taskswitch(0);
211 fwait()asm("fwait");;
212
213 /* Mark it free and disable access */
214 clear_fpu(){ hyp_fpu_taskswitch(1); fp_thread = ((thread_t) 0); };
215 }
216#endif /* NCPUS == 1 */
217 kmem_cache_free(&ifps_cache, (vm_offset_t) fps);
218}
219
220/* The two following functions were stolen from Linux's i387.c */
221static inline unsigned short
222twd_i387_to_fxsr (unsigned short twd)
223{
224 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
225
226 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
227 tmp = ~twd;
228 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
229 /* and move the valid bits to the lower byte. */
230 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
231 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
232 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
233 return tmp;
234}
235
236static inline unsigned long
237twd_fxsr_to_i387 (struct i386_xfp_save *fxsave)
238{
239 struct {
240 unsigned short significand[4];
241 unsigned short exponent;
242 unsigned short padding[3];
243 } *st = NULL((void *) 0);
244 unsigned long tos = (fxsave->fp_status >> 11) & 7;
245 unsigned long twd = (unsigned long) fxsave->fp_tag;
246 unsigned long tag;
247 unsigned long ret = 0xffff0000u;
248 int i;
249
250#define FPREG_ADDR(f, n)((void *)&(f)->fp_reg_word + (n) * 16); ((void *)&(f)->fp_reg_word + (n) * 16);
251
252 for (i = 0 ; i < 8 ; i++) {
253 if (twd & 0x1) {
254 st = FPREG_ADDR (fxsave, (i - tos) & 7)((void *)&(fxsave)->fp_reg_word + ((i - tos) & 7) *
16);
;
255
256 switch (st->exponent & 0x7fff) {
257 case 0x7fff:
258 tag = 2; /* Special */
259 break;
260 case 0x0000:
261 if (!st->significand[0] &&
262 !st->significand[1] &&
263 !st->significand[2] &&
264 !st->significand[3] ) {
265 tag = 1; /* Zero */
266 } else {
267 tag = 2; /* Special */
268 }
269 break;
270 default:
271 if (st->significand[3] & 0x8000) {
272 tag = 0; /* Valid */
273 } else {
274 tag = 2; /* Special */
275 }
276 break;
277 }
278 } else {
279 tag = 3; /* Empty */
280 }
281 ret |= (tag << (2 * i));
282 twd = twd >> 1;
283 }
284 return ret;
285}
286
287/*
288 * Set the floating-point state for a thread.
289 * If the thread is not the current thread, it is
290 * not running (held). Locking needed against
291 * concurrent fpu_set_state or fpu_get_state.
292 */
293kern_return_t
294fpu_set_state(thread, state)
295 const thread_t thread;
296 struct i386_float_state *state;
297{
298 pcb_t pcb = thread->pcb;
299 struct i386_fpsave_state *ifps;
300 struct i386_fpsave_state *new_ifps;
301
302ASSERT_IPL(SPL0);
303 if (fp_kind == FP_NO0)
304 return KERN_FAILURE5;
305
306#if NCPUS1 == 1
307
308 /*
309 * If this thread`s state is in the FPU,
310 * discard it; we are replacing the entire
311 * FPU state.
312 */
313 if (fp_thread == thread) {
314 clear_ts()hyp_fpu_taskswitch(0);
315 fwait()asm("fwait");; /* wait for possible interrupt */
316 clear_fpu(){ hyp_fpu_taskswitch(1); fp_thread = ((thread_t) 0); }; /* no state in FPU */
317 }
318#endif
319
320 if (state->initialized == 0) {
321 /*
322 * new FPU state is 'invalid'.
323 * Deallocate the fp state if it exists.
324 */
325 simple_lock(&pcb->lock);
326 ifps = pcb->ims.ifps;
327 pcb->ims.ifps = 0;
328 simple_unlock(&pcb->lock)((void)(&pcb->lock));
329
330 if (ifps != 0) {
331 kmem_cache_free(&ifps_cache, (vm_offset_t) ifps);
332 }
333 }
334 else {
335 /*
336 * Valid state. Allocate the fp state if there is none.
337 */
338 struct i386_fp_save *user_fp_state;
339 struct i386_fp_regs *user_fp_regs;
340
341 user_fp_state = (struct i386_fp_save *) &state->hw_state[0];
342 user_fp_regs = (struct i386_fp_regs *)
343 &state->hw_state[sizeof(struct i386_fp_save)];
344
345 new_ifps = 0;
346 Retry:
347 simple_lock(&pcb->lock);
348 ifps = pcb->ims.ifps;
349 if (ifps == 0) {
350 if (new_ifps == 0) {
351 simple_unlock(&pcb->lock)((void)(&pcb->lock));
352 new_ifps = (struct i386_fpsave_state *) kmem_cache_alloc(&ifps_cache);
353 goto Retry;
354 }
355 ifps = new_ifps;
356 new_ifps = 0;
357 pcb->ims.ifps = ifps;
358 }
359
360 /*
361 * Ensure that reserved parts of the environment are 0.
362 */
363 memset(&ifps->fp_save_state, 0, sizeof(struct i386_fp_save));
364
365 if (fp_kind == FP_387X4) {
366 int i;
367
368 ifps->xfp_save_state.fp_control = user_fp_state->fp_control;
369 ifps->xfp_save_state.fp_status = user_fp_state->fp_status;
370 ifps->xfp_save_state.fp_tag = twd_i387_to_fxsr(user_fp_state->fp_tag);
371 ifps->xfp_save_state.fp_eip = user_fp_state->fp_eip;
372 ifps->xfp_save_state.fp_cs = user_fp_state->fp_cs;
373 ifps->xfp_save_state.fp_opcode = user_fp_state->fp_opcode;
374 ifps->xfp_save_state.fp_dp = user_fp_state->fp_dp;
375 ifps->xfp_save_state.fp_ds = user_fp_state->fp_ds;
376 for (i=0; i<8; i++)
377 memcpy(&ifps->xfp_save_state.fp_reg_word[i], &user_fp_regs->fp_reg_word[i], sizeof(user_fp_regs->fp_reg_word[i]));
378 } else {
379 ifps->fp_save_state.fp_control = user_fp_state->fp_control;
380 ifps->fp_save_state.fp_status = user_fp_state->fp_status;
381 ifps->fp_save_state.fp_tag = user_fp_state->fp_tag;
382 ifps->fp_save_state.fp_eip = user_fp_state->fp_eip;
383 ifps->fp_save_state.fp_cs = user_fp_state->fp_cs;
384 ifps->fp_save_state.fp_opcode = user_fp_state->fp_opcode;
385 ifps->fp_save_state.fp_dp = user_fp_state->fp_dp;
386 ifps->fp_save_state.fp_ds = user_fp_state->fp_ds;
387 ifps->fp_regs = *user_fp_regs;
388 }
389
390 simple_unlock(&pcb->lock)((void)(&pcb->lock));
391 if (new_ifps != 0)
392 kmem_cache_free(&ifps_cache, (vm_offset_t) new_ifps);
393 }
394
395 return KERN_SUCCESS0;
396}
397
398/*
399 * Get the floating-point state for a thread.
400 * If the thread is not the current thread, it is
401 * not running (held). Locking needed against
402 * concurrent fpu_set_state or fpu_get_state.
403 */
404kern_return_t
405fpu_get_state(thread, state)
406 const thread_t thread;
407 struct i386_float_state *state;
408{
409 pcb_t pcb = thread->pcb;
410 struct i386_fpsave_state *ifps;
411
412ASSERT_IPL(SPL0);
413 if (fp_kind == FP_NO0)
414 return KERN_FAILURE5;
415
416 simple_lock(&pcb->lock);
417 ifps = pcb->ims.ifps;
418 if (ifps == 0) {
419 /*
420 * No valid floating-point state.
421 */
422 simple_unlock(&pcb->lock)((void)(&pcb->lock));
423 memset(state, 0, sizeof(struct i386_float_state));
424 return KERN_SUCCESS0;
425 }
426
427 /* Make sure we`ve got the latest fp state info */
428 /* If the live fpu state belongs to our target */
429#if NCPUS1 == 1
430 if (thread == fp_thread)
431#else
432 if (thread == current_thread()(active_threads[(0)]))
433#endif
434 {
435 clear_ts()hyp_fpu_taskswitch(0);
436 fp_save(thread);
437 clear_fpu(){ hyp_fpu_taskswitch(1); fp_thread = ((thread_t) 0); };
438 }
439
440 state->fpkind = fp_kind;
441 state->exc_status = 0;
442
443 {
444 struct i386_fp_save *user_fp_state;
445 struct i386_fp_regs *user_fp_regs;
446
447 state->initialized = ifps->fp_valid;
448
449 user_fp_state = (struct i386_fp_save *) &state->hw_state[0];
450 user_fp_regs = (struct i386_fp_regs *)
451 &state->hw_state[sizeof(struct i386_fp_save)];
452
453 /*
454 * Ensure that reserved parts of the environment are 0.
455 */
456 memset(user_fp_state, 0, sizeof(struct i386_fp_save));
457
458 if (fp_kind == FP_387X4) {
459 int i;
460
461 user_fp_state->fp_control = ifps->xfp_save_state.fp_control;
462 user_fp_state->fp_status = ifps->xfp_save_state.fp_status;
463 user_fp_state->fp_tag = twd_fxsr_to_i387(&ifps->xfp_save_state);
464 user_fp_state->fp_eip = ifps->xfp_save_state.fp_eip;
465 user_fp_state->fp_cs = ifps->xfp_save_state.fp_cs;
466 user_fp_state->fp_opcode = ifps->xfp_save_state.fp_opcode;
467 user_fp_state->fp_dp = ifps->xfp_save_state.fp_dp;
468 user_fp_state->fp_ds = ifps->xfp_save_state.fp_ds;
469 for (i=0; i<8; i++)
470 memcpy(&user_fp_regs->fp_reg_word[i], &ifps->xfp_save_state.fp_reg_word[i], sizeof(user_fp_regs->fp_reg_word[i]));
471 } else {
472 user_fp_state->fp_control = ifps->fp_save_state.fp_control;
473 user_fp_state->fp_status = ifps->fp_save_state.fp_status;
474 user_fp_state->fp_tag = ifps->fp_save_state.fp_tag;
475 user_fp_state->fp_eip = ifps->fp_save_state.fp_eip;
476 user_fp_state->fp_cs = ifps->fp_save_state.fp_cs;
477 user_fp_state->fp_opcode = ifps->fp_save_state.fp_opcode;
478 user_fp_state->fp_dp = ifps->fp_save_state.fp_dp;
479 user_fp_state->fp_ds = ifps->fp_save_state.fp_ds;
480 *user_fp_regs = ifps->fp_regs;
481 }
482 }
483 simple_unlock(&pcb->lock)((void)(&pcb->lock));
484
485 return KERN_SUCCESS0;
486}
487
488/*
489 * Initialize FPU.
490 *
491 * Raise exceptions for:
492 * invalid operation
493 * divide by zero
494 * overflow
495 *
496 * Use 53-bit precision.
497 */
498void fpinit(void)
499{
500 unsigned short control;
501
502ASSERT_IPL(SPL0);
503 clear_ts()hyp_fpu_taskswitch(0);
504 fninit()asm volatile("fninit");
505 fnstcw(&control)asm("fnstcw %0" : "=m" (*(unsigned short *)(&control)));
506 control &= ~(FPC_PC0x0300|FPC_RC0x0c00); /* Clear precision & rounding control */
507 control |= (FPC_PC_530x0200 | /* Set precision */
508 FPC_RC_RN0x0000 | /* round-to-nearest */
509 FPC_ZE0x0004 | /* Suppress zero-divide */
510 FPC_OE0x0008 | /* and overflow */
511 FPC_UE0x0010 | /* underflow */
512 FPC_IE0x0001 | /* Allow NaNQs and +-INF */
513 FPC_DE0x0002 | /* Allow denorms as operands */
514 FPC_PE0x0020); /* No trap for precision loss */
515 fldcw(control)asm volatile("fldcw %0" : : "m" (*(unsigned short *) &(control
)) )
;
516}
517
518/*
519 * Coprocessor not present.
520 */
521void
522fpnoextflt(void)
523{
524 /*
525 * Enable FPU use.
526 */
527ASSERT_IPL(SPL0);
528 clear_ts()hyp_fpu_taskswitch(0);
529#if NCPUS1 == 1
530
531 /*
532 * If this thread`s state is in the FPU, we are done.
533 */
534 if (fp_thread == current_thread()(active_threads[(0)]))
535 return;
536
537 /* Make sure we don't do fpsave() in fp_intr while doing fpsave()
538 * here if the current fpu instruction generates an error.
539 */
540 fwait()asm("fwait");;
541 /*
542 * If another thread`s state is in the FPU, save it.
543 */
544 if (fp_thread != THREAD_NULL((thread_t) 0)) {
545 fp_save(fp_thread);
546 }
547
548 /*
549 * Give this thread the FPU.
550 */
551 fp_thread = current_thread()(active_threads[(0)]);
552
553#endif /* NCPUS == 1 */
554
555 /*
556 * Load this thread`s state into the FPU.
557 */
558 fp_load(current_thread()(active_threads[(0)]));
559}
560
561/*
562 * FPU overran end of segment.
563 * Re-initialize FPU. Floating point state is not valid.
564 */
565void
566fpextovrflt(void)
567{
568 thread_t thread = current_thread()(active_threads[(0)]);
569 pcb_t pcb;
570 struct i386_fpsave_state *ifps;
571
572#if NCPUS1 == 1
573
574 /*
575 * Is exception for the currently running thread?
576 */
577 if (fp_thread != thread) {
578 /* Uh oh... */
579 panic("fpextovrflt");
580 }
581#endif
582
583 /*
584 * This is a non-recoverable error.
585 * Invalidate the thread`s FPU state.
586 */
587 pcb = thread->pcb;
588 simple_lock(&pcb->lock);
589 ifps = pcb->ims.ifps;
590 pcb->ims.ifps = 0;
591 simple_unlock(&pcb->lock)((void)(&pcb->lock));
592
593 /*
594 * Re-initialize the FPU.
595 */
596 clear_ts()hyp_fpu_taskswitch(0);
597 fninit()asm volatile("fninit");
598
599 /*
600 * And disable access.
601 */
602 clear_fpu(){ hyp_fpu_taskswitch(1); fp_thread = ((thread_t) 0); };
603
604 if (ifps)
605 kmem_cache_free(&ifps_cache, (vm_offset_t) ifps);
606
607 /*
608 * Raise exception.
609 */
610 i386_exception(EXC_BAD_ACCESS1, VM_PROT_READ((vm_prot_t) 0x01)|VM_PROT_EXECUTE((vm_prot_t) 0x04), 0);
611 /*NOTREACHED*/
612}
613
614static int
615fphandleerr(void)
616{
617 thread_t thread = current_thread()(active_threads[(0)]);
618
619 /*
620 * Save the FPU context to the thread using it.
621 */
622#if NCPUS1 == 1
623 if (fp_thread == THREAD_NULL((thread_t) 0)) {
624 printf("fphandleerr: FPU not belonging to anyone!\n");
625 clear_ts()hyp_fpu_taskswitch(0);
626 fninit()asm volatile("fninit");
627 clear_fpu(){ hyp_fpu_taskswitch(1); fp_thread = ((thread_t) 0); };
628 return 1;
629 }
630
631 if (fp_thread != thread) {
632 /*
633 * FPU exception is for a different thread.
634 * When that thread again uses the FPU an exception will be
635 * raised in fp_load. Remember the condition in fp_valid (== 2).
636 */
637 clear_ts()hyp_fpu_taskswitch(0);
638 fp_save(fp_thread);
639 fp_thread->pcb->ims.ifps->fp_valid = 2;
640 fninit()asm volatile("fninit");
641 clear_fpu(){ hyp_fpu_taskswitch(1); fp_thread = ((thread_t) 0); };
642 /* leave fp_intr_thread THREAD_NULL */
643 return 1;
644 }
645#endif /* NCPUS == 1 */
646
647 /*
648 * Save the FPU state and turn off the FPU.
649 */
650 clear_ts()hyp_fpu_taskswitch(0);
651 fp_save(thread);
652 fninit()asm volatile("fninit");
653 clear_fpu(){ hyp_fpu_taskswitch(1); fp_thread = ((thread_t) 0); };
654
655 return 0;
656}
657
658/*
659 * FPU error. Called by exception handler.
660 */
661void
662fpexterrflt(void)
663{
664 thread_t thread = current_thread()(active_threads[(0)]);
665
666 if (fphandleerr())
1
Taking false branch
667 return;
668
669 /*
670 * Raise FPU exception.
671 * Locking not needed on pcb->ims.ifps,
672 * since thread is running.
673 */
674 i386_exception(EXC_ARITHMETIC3,
675 EXC_I386_EXTERR5,
676 fp_kind == FP_387X4 ?
2
Assuming 'fp_kind' is not equal to 4
3
'?' condition is false
677 thread->pcb->ims.ifps->xfp_save_state.fp_status :
678 thread->pcb->ims.ifps->fp_save_state.fp_status);
4
Dereference of null pointer
679 /*NOTREACHED*/
680}
681
682#ifndef MACH_RING1
683/*
684 * FPU error. Called by AST.
685 */
686void
687fpastintr(void)
688{
689 thread_t thread = current_thread()(active_threads[(0)]);
690
691ASSERT_IPL(SPL0);
692#if NCPUS1 == 1
693 /*
694 * Since FPU errors only occur on ESC or WAIT instructions,
695 * the current thread should own the FPU. If it didn`t,
696 * we should have gotten the task-switched interrupt first.
697 */
698 if (fp_thread != THREAD_NULL((thread_t) 0)) {
699 panic("fpexterrflt");
700 return;
701 }
702
703 /*
704 * Check if we got a context switch between the interrupt and the AST
705 * This can happen if the interrupt arrived after the FPU AST was
706 * checked. In this case, raise the exception in fp_load when this
707 * thread next time uses the FPU. Remember exception condition in
708 * fp_valid (extended boolean 2).
709 */
710 if (fp_intr_thread != thread) {
711 if (fp_intr_thread == THREAD_NULL((thread_t) 0)) {
712 panic("fpexterrflt: fp_intr_thread == THREAD_NULL");
713 return;
714 }
715 fp_intr_thread->pcb->ims.ifps->fp_valid = 2;
716 fp_intr_thread = THREAD_NULL((thread_t) 0);
717 return;
718 }
719 fp_intr_thread = THREAD_NULL((thread_t) 0);
720#else /* NCPUS == 1 */
721 /*
722 * Save the FPU state and turn off the FPU.
723 */
724 fp_save(thread);
725#endif /* NCPUS == 1 */
726
727 /*
728 * Raise FPU exception.
729 * Locking not needed on pcb->ims.ifps,
730 * since thread is running.
731 */
732 i386_exception(EXC_ARITHMETIC3,
733 EXC_I386_EXTERR5,
734 fp_kind == FP_387X4 ?
735 thread->pcb->ims.ifps->xfp_save_state.fp_status :
736 thread->pcb->ims.ifps->fp_save_state.fp_status);
737 /*NOTREACHED*/
738}
739#endif /* MACH_RING1 */
740
741/*
742 * Save FPU state.
743 *
744 * Locking not needed:
745 * . if called from fpu_get_state, pcb already locked.
746 * . if called from fpnoextflt or fp_intr, we are single-cpu
747 * . otherwise, thread is running.
748 */
749void
750fp_save(thread)
751 thread_t thread;
752{
753 pcb_t pcb = thread->pcb;
754 struct i386_fpsave_state *ifps = pcb->ims.ifps;
755
756 if (ifps != 0 && !ifps->fp_valid) {
757 /* registers are in FPU */
758 ifps->fp_valid = TRUE((boolean_t) 1);
759 if (fp_kind == FP_387X4)
760 fxsave(&ifps->xfp_save_state)asm volatile("fxsave %0" : "=m" (*&ifps->xfp_save_state
))
;
761 else
762 fnsave(&ifps->fp_save_state)asm volatile("fnsave %0" : "=m" (*&ifps->fp_save_state
))
;
763 }
764}
765
766/*
767 * Restore FPU state from PCB.
768 *
769 * Locking not needed; always called on the current thread.
770 */
771void
772fp_load(thread)
773 thread_t thread;
774{
775 pcb_t pcb = thread->pcb;
776 struct i386_fpsave_state *ifps;
777
778ASSERT_IPL(SPL0);
779 ifps = pcb->ims.ifps;
780 if (ifps == 0) {
781 ifps = (struct i386_fpsave_state *) kmem_cache_alloc(&ifps_cache);
782 memset(ifps, 0, sizeof *ifps);
783 pcb->ims.ifps = ifps;
784 fpinit();
785#if 1
786/*
787 * I'm not sure this is needed. Does the fpu regenerate the interrupt in
788 * frstor or not? Without this code we may miss some exceptions, with it
789 * we might send too many exceptions.
790 */
791 } else if (ifps->fp_valid == 2) {
792 /* delayed exception pending */
793
794 ifps->fp_valid = TRUE((boolean_t) 1);
795 clear_fpu(){ hyp_fpu_taskswitch(1); fp_thread = ((thread_t) 0); };
796 /*
797 * Raise FPU exception.
798 * Locking not needed on pcb->ims.ifps,
799 * since thread is running.
800 */
801 i386_exception(EXC_ARITHMETIC3,
802 EXC_I386_EXTERR5,
803 fp_kind == FP_387X4 ?
804 thread->pcb->ims.ifps->xfp_save_state.fp_status :
805 thread->pcb->ims.ifps->fp_save_state.fp_status);
806 /*NOTREACHED*/
807#endif
808 } else if (! ifps->fp_valid) {
809 printf("fp_load: invalid FPU state!\n");
810 fninit ()asm volatile("fninit");
811 } else {
812 if (fp_kind == FP_387X4)
813 fxrstor(ifps->xfp_save_state)asm volatile("fxrstor %0" : : "m" (ifps->xfp_save_state));
814 else
815 frstor(ifps->fp_save_state)asm volatile("frstor %0" : : "m" (ifps->fp_save_state));
816 }
817 ifps->fp_valid = FALSE((boolean_t) 0); /* in FPU */
818}
819
820/*
821 * Allocate and initialize FP state for current thread.
822 * Don't load state.
823 *
824 * Locking not needed; always called on the current thread.
825 */
826void
827fp_state_alloc(void)
828{
829 pcb_t pcb = current_thread()(active_threads[(0)])->pcb;
830 struct i386_fpsave_state *ifps;
831
832 ifps = (struct i386_fpsave_state *)kmem_cache_alloc(&ifps_cache);
833 memset(ifps, 0, sizeof *ifps);
834 pcb->ims.ifps = ifps;
835
836 ifps->fp_valid = TRUE((boolean_t) 1);
837
838 if (fp_kind == FP_387X4) {
839 ifps->xfp_save_state.fp_control = (0x037f
840 & ~(FPC_IM0x0001|FPC_ZM0x0004|FPC_OM0x0008|FPC_PC0x0300))
841 | (FPC_PC_530x0200|FPC_IC_AFF0x1000);
842 ifps->xfp_save_state.fp_status = 0;
843 ifps->xfp_save_state.fp_tag = 0xffff; /* all empty */
844 if (CPU_HAS_FEATURE(CPU_FEATURE_SSE)(cpu_features[(25) / 32] & (1 << ((25) % 32))))
845 ifps->xfp_save_state.fp_mxcsr = 0x1f80;
846 } else {
847 ifps->fp_save_state.fp_control = (0x037f
848 & ~(FPC_IM0x0001|FPC_ZM0x0004|FPC_OM0x0008|FPC_PC0x0300))
849 | (FPC_PC_530x0200|FPC_IC_AFF0x1000);
850 ifps->fp_save_state.fp_status = 0;
851 ifps->fp_save_state.fp_tag = 0xffff; /* all empty */
852 }
853}
854
855#if AT3861 && !defined(MACH_XEN)
856/*
857 * Handle a coprocessor error interrupt on the AT386.
858 * This comes in on line 5 of the slave PIC at SPL1.
859 */
860void
861fpintr(int unit)
862{
863 spl_t s;
864 thread_t thread = current_thread()(active_threads[(0)]);
865
866ASSERT_IPL(SPL1);
867 /*
868 * Turn off the extended 'busy' line.
869 */
870 outb(0xf0, 0){ asm volatile("outb %0, %1" : : "a" ((unsigned char)(0)) , "d"
((unsigned short)(0xf0))); }
;
871
872 if (fphandleerr())
873 return;
874
875#if NCPUS1 == 1
876 if (fp_intr_thread != THREAD_NULL((thread_t) 0) && fp_intr_thread != thread)
877 panic("fp_intr: already caught intr");
878 fp_intr_thread = thread;
879#endif /* NCPUS == 1 */
880
881 /*
882 * Since we are running on the interrupt stack, we must
883 * signal the thread to take the exception when we return
884 * to user mode. Use an AST to do this.
885 *
886 * Don`t set the thread`s AST field. If the thread is
887 * descheduled before it takes the AST, it will notice
888 * the FPU error when it reloads its FPU state.
889 */
890 s = splsched();
891 ast_on(cpu_number(), AST_I386_FP)({ if ((need_ast[(0)] |= (0x80000000)) != 0x0) { ; } });
892 splx(s);
893}
894#endif /* AT386 */