Bug Summary

File:obj-scan-build/../i386/i386/db_interface.c
Location:line 331, column 1
Description:Address of stack memory associated with local variable 'type' is still referred to by the global variable 'i386_last_kdb_sp' upon returning to the caller. This will be a dangling reference

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1993,1992,1991,1990 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26/*
27 * Interface to new debugger.
28 */
29
30#include <string.h>
31#include <sys/reboot.h>
32#include <vm/pmap.h>
33
34#include <i3861/thread.h>
35#include <i3861/db_machdep.h>
36#include <i3861/seg.h>
37#include <i3861/trap.h>
38#include <i3861/setjmp.h>
39#include <i3861/pmap.h>
40#include <i3861/proc_reg.h>
41#include <i3861/locore.h>
42#include "gdt.h"
43#include "trap.h"
44
45#include "vm_param.h"
46#include <vm/vm_map.h>
47#include <vm/vm_fault.h>
48#include <kern/cpu_number.h>
49#include <kern/printf.h>
50#include <kern/thread.h>
51#include <kern/task.h>
52#include <ddb/db_access.h>
53#include <ddb/db_command.h>
54#include <ddb/db_output.h>
55#include <ddb/db_run.h>
56#include <ddb/db_task_thread.h>
57#include <ddb/db_trap.h>
58#include <ddb/db_watch.h>
59#include <machine/db_interface.h>
60#include <machine/machspl.h>
61
62#if MACH_KDB1
63/* Whether the kernel uses any debugging register. */
64static boolean_t kernel_dr;
65#endif
66
67void db_load_context(pcb_t pcb)
68{
69#if MACH_KDB1
70 int s = splhigh();
71
72 if (kernel_dr) {
73 splx(s);
74 return;
75 }
76#endif
77
78 /* Else set user debug registers */
79 set_dr0(pcb->ims.ids.dr[0])({ register unsigned long _temp__ = (pcb->ims.ids.dr[0]); asm
volatile("movl %0,%%dr0" : : "r" (_temp__)); })
;
80 set_dr1(pcb->ims.ids.dr[1])({ register unsigned long _temp__ = (pcb->ims.ids.dr[1]); asm
volatile("movl %0,%%dr1" : : "r" (_temp__)); })
;
81 set_dr2(pcb->ims.ids.dr[2])({ register unsigned long _temp__ = (pcb->ims.ids.dr[2]); asm
volatile("movl %0,%%dr2" : : "r" (_temp__)); })
;
82 set_dr3(pcb->ims.ids.dr[3])({ register unsigned long _temp__ = (pcb->ims.ids.dr[3]); asm
volatile("movl %0,%%dr3" : : "r" (_temp__)); })
;
83 set_dr7(pcb->ims.ids.dr[7])({ register unsigned long _temp__ = (pcb->ims.ids.dr[7]); asm
volatile("movl %0,%%dr7" : : "r" (_temp__)); })
;
84#if MACH_KDB1
85 splx(s);
86#endif
87}
88
89void db_get_debug_state(
90 pcb_t pcb,
91 struct i386_debug_state *state)
92{
93 *state = pcb->ims.ids;
94}
95
96kern_return_t db_set_debug_state(
97 pcb_t pcb,
98 const struct i386_debug_state *state)
99{
100 int i;
101
102 for (i = 0; i <= 3; i++)
103 if (state->dr[i] < VM_MIN_ADDRESS(0)
104 || state->dr[i] >= VM_MAX_ADDRESS(0xc0000000UL))
105 return KERN_INVALID_ARGUMENT4;
106
107 pcb->ims.ids = *state;
108
109 if (pcb == current_thread()(active_threads[(0)])->pcb)
110 db_load_context(pcb);
111
112 return KERN_SUCCESS0;
113}
114
115#if MACH_KDB1
116
117struct i386_saved_state *i386_last_saved_statep;
118struct i386_saved_state i386_nested_saved_state;
119unsigned i386_last_kdb_sp;
120
121extern thread_t db_default_thread;
122
123static struct i386_debug_state ids;
124
125void db_dr (
126 int num,
127 vm_offset_t linear_addr,
128 int type,
129 int len,
130 int persistence)
131{
132 int s = splhigh();
133 unsigned long dr7;
134
135 if (!kernel_dr) {
136 if (!linear_addr) {
137 splx(s);
138 return;
139 }
140 kernel_dr = TRUE((boolean_t) 1);
141 /* Clear user debugging registers */
142 set_dr7(0)({ register unsigned long _temp__ = (0); asm volatile("movl %0,%%dr7"
: : "r" (_temp__)); })
;
143 set_dr0(0)({ register unsigned long _temp__ = (0); asm volatile("movl %0,%%dr0"
: : "r" (_temp__)); })
;
144 set_dr1(0)({ register unsigned long _temp__ = (0); asm volatile("movl %0,%%dr1"
: : "r" (_temp__)); })
;
145 set_dr2(0)({ register unsigned long _temp__ = (0); asm volatile("movl %0,%%dr2"
: : "r" (_temp__)); })
;
146 set_dr3(0)({ register unsigned long _temp__ = (0); asm volatile("movl %0,%%dr3"
: : "r" (_temp__)); })
;
147 }
148
149 ids.dr[num] = linear_addr;
150 switch (num) {
151 case 0: set_dr0(linear_addr)({ register unsigned long _temp__ = (linear_addr); asm volatile
("movl %0,%%dr0" : : "r" (_temp__)); })
; break;
152 case 1: set_dr1(linear_addr)({ register unsigned long _temp__ = (linear_addr); asm volatile
("movl %0,%%dr1" : : "r" (_temp__)); })
; break;
153 case 2: set_dr2(linear_addr)({ register unsigned long _temp__ = (linear_addr); asm volatile
("movl %0,%%dr2" : : "r" (_temp__)); })
; break;
154 case 3: set_dr3(linear_addr)({ register unsigned long _temp__ = (linear_addr); asm volatile
("movl %0,%%dr3" : : "r" (_temp__)); })
; break;
155 }
156
157 /* Replace type/len/persistence for DRnum in dr7 */
158 dr7 = get_dr7 ()({ register unsigned long _temp__; asm volatile("movl %%dr7, %0"
: "=r" (_temp__)); _temp__; })
;
159 dr7 &= ~(0xfUL << (4*num+16)) & ~(0x3UL << (2*num));
160 dr7 |= (((len << 2) | type) << (4*num+16)) | (persistence << (2*num));
161 set_dr7 (dr7)({ register unsigned long _temp__ = (dr7); asm volatile("movl %0,%%dr7"
: : "r" (_temp__)); })
;
162
163 if (kernel_dr) {
164 if (!ids.dr[0] && !ids.dr[1] && !ids.dr[2] && !ids.dr[3]) {
165 /* Not used any more, switch back to user debugging registers */
166 kernel_dr = FALSE((boolean_t) 0);
167 db_load_context(current_thread()(active_threads[(0)])->pcb);
168 }
169 }
170 splx(s);
171}
172
173boolean_t
174db_set_hw_watchpoint(
175 const db_watchpoint_t watch,
176 unsigned num)
177{
178 vm_size_t size = watch->hiaddr - watch->loaddr;
179 db_addr_t addr = watch->loaddr;
180 vm_offset_t kern_addr;
181
182 if (num >= 4)
183 return FALSE((boolean_t) 0);
184 if (size != 1 && size != 2 && size != 4)
185 return FALSE((boolean_t) 0);
186
187 if (addr & (size-1))
188 /* Unaligned */
189 return FALSE((boolean_t) 0);
190
191 if (watch->task) {
192 if (db_user_to_kernel_address(watch->task, addr, &kern_addr, 1) < 0)
193 return FALSE((boolean_t) 0);
194 addr = kern_addr;
195 }
196 addr = kvtolin(addr)((vm_offset_t)(addr) - 0xC0000000UL + ((0xc0000000UL)));
197
198 db_dr (num, addr, I386_DB_TYPE_W1, size-1, I386_DB_LOCAL1|I386_DB_GLOBAL2);
199
200 db_printf("Hardware watchpoint %d set for %x\n", num, addr);
201 return TRUE((boolean_t) 1);
202}
203
204boolean_t
205db_clear_hw_watchpoint(
206 unsigned num)
207{
208 if (num >= 4)
209 return FALSE((boolean_t) 0);
210
211 db_dr (num, 0, 0, 0, 0);
212 return TRUE((boolean_t) 1);
213}
214
215/*
216 * Print trap reason.
217 */
218void
219kdbprinttrap(
220 int type,
221 int code)
222{
223 printf("kernel: %s (%d), code=%x\n",
224 trap_name(type), type, code);
225}
226
227/*
228 * kdb_trap - field a TRACE or BPT trap
229 */
230
231extern jmp_buf_t *db_recover;
232spl_t saved_ipl[NCPUS1]; /* just to know what was IPL before trap */
233
234boolean_t
235kdb_trap(
236 int type,
237 int code,
238 struct i386_saved_state *regs)
239{
240 spl_t s;
241
242 s = splhigh();
243 saved_ipl[cpu_number()(0)] = s;
244
245 switch (type) {
1
Control jumps to 'case -1:' at line 262
246 case T_DEBUG1: /* single_step */
247 {
248 int addr;
249 int status = get_dr6()({ register unsigned long _temp__; asm volatile("movl %%dr6, %0"
: "=r" (_temp__)); _temp__; })
;
250
251 if (status & 0xf) { /* hmm hdw break */
252 addr = status & 0x8 ? get_dr3()({ register unsigned long _temp__; asm volatile("movl %%dr3, %0"
: "=r" (_temp__)); _temp__; })
:
253 status & 0x4 ? get_dr2()({ register unsigned long _temp__; asm volatile("movl %%dr2, %0"
: "=r" (_temp__)); _temp__; })
:
254 status & 0x2 ? get_dr1()({ register unsigned long _temp__; asm volatile("movl %%dr1, %0"
: "=r" (_temp__)); _temp__; })
:
255 get_dr0()({ register unsigned long _temp__; asm volatile("movl %%dr0, %0"
: "=r" (_temp__)); _temp__; })
;
256 regs->efl |= EFL_RF0x00010000;
257 db_single_step_cmd(addr, 0, 1, "p");
258 }
259 }
260 case T_INT33: /* breakpoint */
261 case T_WATCHPOINT17: /* watchpoint */
262 case -1: /* keyboard interrupt */
263 break;
2
Execution continues on line 280
264
265 default:
266 if (db_recover) {
267 i386_nested_saved_state = *regs;
268 db_printf("Caught %s (%d), code = %x, pc = %x\n",
269 trap_name(type), type, code, regs->eip);
270 db_error("");
271 /*NOTREACHED*/
272 }
273 kdbprinttrap(type, code);
274 }
275
276#if NCPUS1 > 1
277 if (db_enter())
278#endif /* NCPUS > 1 */
279 {
280 i386_last_saved_statep = regs;
281 i386_last_kdb_sp = (unsigned) &type;
282
283 /* XXX Should switch to ddb`s own stack here. */
284
285 ddb_regs = *regs;
286 if ((regs->cs & 0x3) == KERNEL_RING0) {
3
Taking false branch
287 /*
288 * Kernel mode - esp and ss not saved
289 */
290 ddb_regs.uesp = (int)&regs->uesp; /* kernel stack pointer */
291 ddb_regs.ss = KERNEL_DS(0x10 | 0);
292 }
293
294 cnpollc(TRUE((boolean_t) 1));
295 db_task_trap(type, code, (regs->cs & 0x3) != 0);
296 cnpollc(FALSE((boolean_t) 0));
297
298 regs->eip = ddb_regs.eip;
299 regs->efl = ddb_regs.efl;
300 regs->eax = ddb_regs.eax;
301 regs->ecx = ddb_regs.ecx;
302 regs->edx = ddb_regs.edx;
303 regs->ebx = ddb_regs.ebx;
304 if ((regs->cs & 0x3) != KERNEL_RING0) {
4
Taking true branch
305 /*
306 * user mode - saved esp and ss valid
307 */
308 regs->uesp = ddb_regs.uesp; /* user stack pointer */
309 regs->ss = ddb_regs.ss & 0xffff; /* user stack segment */
310 }
311 regs->ebp = ddb_regs.ebp;
312 regs->esi = ddb_regs.esi;
313 regs->edi = ddb_regs.edi;
314 regs->es = ddb_regs.es & 0xffff;
315 regs->cs = ddb_regs.cs & 0xffff;
316 regs->ds = ddb_regs.ds & 0xffff;
317 regs->fs = ddb_regs.fs & 0xffff;
318 regs->gs = ddb_regs.gs & 0xffff;
319
320 if ((type == T_INT33) &&
321 (db_get_task_value(regs->eip, BKPT_SIZE(1), FALSE((boolean_t) 0), TASK_NULL((task_t) 0))
322 == BKPT_INST0xcc))
323 regs->eip += BKPT_SIZE(1);
324 }
325#if NCPUS1 > 1
326 db_leave();
327#endif /* NCPUS > 1 */
328
329 splx(s);
330 return 1;
331}
5
Address of stack memory associated with local variable 'type' is still referred to by the global variable 'i386_last_kdb_sp' upon returning to the caller. This will be a dangling reference
332
333/*
334 * Enter KDB through a keyboard trap.
335 * We show the registers as of the keyboard interrupt
336 * instead of those at its call to KDB.
337 */
338struct int_regs {
339 long edi;
340 long esi;
341 long ebp;
342 long ebx;
343 struct i386_interrupt_state *is;
344};
345
346void
347kdb_kentry(
348 struct int_regs *int_regs)
349{
350 struct i386_interrupt_state *is = int_regs->is;
351 spl_t s = splhigh();
352
353#if NCPUS1 > 1
354 if (db_enter())
355#endif /* NCPUS > 1 */
356 {
357 if ((is->cs & 0x3) != KERNEL_RING0) {
358 ddb_regs.uesp = ((int *)(is+1))[0];
359 ddb_regs.ss = ((int *)(is+1))[1];
360 }
361 else {
362 ddb_regs.ss = KERNEL_DS(0x10 | 0);
363 ddb_regs.uesp= (int)(is+1);
364 }
365 ddb_regs.efl = is->efl;
366 ddb_regs.cs = is->cs;
367 ddb_regs.eip = is->eip;
368 ddb_regs.eax = is->eax;
369 ddb_regs.ecx = is->ecx;
370 ddb_regs.edx = is->edx;
371 ddb_regs.ebx = int_regs->ebx;
372 ddb_regs.ebp = int_regs->ebp;
373 ddb_regs.esi = int_regs->esi;
374 ddb_regs.edi = int_regs->edi;
375 ddb_regs.ds = is->ds;
376 ddb_regs.es = is->es;
377 ddb_regs.fs = is->fs;
378 ddb_regs.gs = is->gs;
379
380 cnpollc(TRUE((boolean_t) 1));
381 db_task_trap(-1, 0, (ddb_regs.cs & 0x3) != 0);
382 cnpollc(FALSE((boolean_t) 0));
383
384 if ((ddb_regs.cs & 0x3) != KERNEL_RING0) {
385 ((int *)(is+1))[0] = ddb_regs.uesp;
386 ((int *)(is+1))[1] = ddb_regs.ss & 0xffff;
387 }
388 is->efl = ddb_regs.efl;
389 is->cs = ddb_regs.cs & 0xffff;
390 is->eip = ddb_regs.eip;
391 is->eax = ddb_regs.eax;
392 is->ecx = ddb_regs.ecx;
393 is->edx = ddb_regs.edx;
394 int_regs->ebx = ddb_regs.ebx;
395 int_regs->ebp = ddb_regs.ebp;
396 int_regs->esi = ddb_regs.esi;
397 int_regs->edi = ddb_regs.edi;
398 is->ds = ddb_regs.ds & 0xffff;
399 is->es = ddb_regs.es & 0xffff;
400 is->fs = ddb_regs.fs & 0xffff;
401 is->gs = ddb_regs.gs & 0xffff;
402 }
403#if NCPUS1 > 1
404 db_leave();
405#endif /* NCPUS > 1 */
406
407 (void) splx(s);
408}
409
410boolean_t db_no_vm_fault = TRUE((boolean_t) 1);
411
412int
413db_user_to_kernel_address(
414 const task_t task,
415 vm_offset_t addr,
416 vm_offset_t *kaddr,
417 int flag)
418{
419 pt_entry_t *ptp;
420 boolean_t faulted = FALSE((boolean_t) 0);
421
422 retry:
423 ptp = pmap_pte(task->map->pmap, addr);
424 if (ptp == PT_ENTRY_NULL((pt_entry_t *) 0) || (*ptp & INTEL_PTE_VALID0x00000001) == 0) {
425 if (!faulted && !db_no_vm_fault) {
426 kern_return_t err;
427
428 faulted = TRUE((boolean_t) 1);
429 err = vm_fault( task->map,
430 trunc_page(addr)((vm_offset_t)(((vm_offset_t)(addr)) & ~((1 << 12)-
1)))
,
431 VM_PROT_READ((vm_prot_t) 0x01),
432 FALSE((boolean_t) 0), FALSE((boolean_t) 0), 0);
433 if (err == KERN_SUCCESS0)
434 goto retry;
435 }
436 if (flag) {
437 db_printf("\nno memory is assigned to address %08x\n", addr);
438 db_error(0);
439 /* NOTREACHED */
440 }
441 return(-1);
442 }
443 *kaddr = ptetokv(*ptp)(((vm_offset_t)(((*ptp) & 0xfffff000)) + 0xC0000000UL)) + (addr & (INTEL_PGBYTES4096-1));
444 return(0);
445}
446
447/*
448 * Read bytes from kernel address space for debugger.
449 */
450
451void
452db_read_bytes(
453 vm_offset_t addr,
454 int size,
455 char *data,
456 task_t task)
457{
458 char *src;
459 int n;
460 vm_offset_t kern_addr;
461
462 src = (char *)addr;
463 if ((addr >= VM_MIN_KERNEL_ADDRESS0xC0000000UL && addr < VM_MAX_KERNEL_ADDRESS((0xffffffffUL) - ((0xc0000000UL)) + 0xC0000000UL)) || task == TASK_NULL((task_t) 0)) {
464 if (task == TASK_NULL((task_t) 0))
465 task = db_current_task()(((active_threads[(0)]))? (active_threads[(0)])->task: ((task_t
) 0))
;
466 while (--size >= 0) {
467 if (addr < VM_MIN_KERNEL_ADDRESS0xC0000000UL && task == TASK_NULL((task_t) 0)) {
468 db_printf("\nbad address %x\n", addr);
469 db_error(0);
470 /* NOTREACHED */
471 }
472 addr++;
473 *data++ = *src++;
474 }
475 return;
476 }
477 while (size > 0) {
478 if (db_user_to_kernel_address(task, addr, &kern_addr, 1) < 0)
479 return;
480 src = (char *)kern_addr;
481 n = intel_trunc_page(addr+INTEL_PGBYTES)(((unsigned long)(addr+4096)) & ~(4096 -1)) - addr;
482 if (n > size)
483 n = size;
484 size -= n;
485 addr += n;
486 while (--n >= 0)
487 *data++ = *src++;
488 }
489}
490
491/*
492 * Write bytes to kernel address space for debugger.
493 */
494void
495db_write_bytes(
496 vm_offset_t addr,
497 int size,
498 char *data,
499 task_t task)
500{
501 char *dst;
502
503 pt_entry_t *ptep0 = 0;
504 pt_entry_t oldmap0 = 0;
505 vm_offset_t addr1;
506 pt_entry_t *ptep1 = 0;
507 pt_entry_t oldmap1 = 0;
508 extern char etext;
509
510 if ((addr < VM_MIN_KERNEL_ADDRESS0xC0000000UL) ^
511 ((addr + size) <= VM_MIN_KERNEL_ADDRESS0xC0000000UL)) {
512 db_error("\ncannot write data into mixed space\n");
513 /* NOTREACHED */
514 }
515 if (addr < VM_MIN_KERNEL_ADDRESS0xC0000000UL) {
516 if (task) {
517 db_write_bytes_user_space(addr, size, data, task);
518 return;
519 } else if (db_current_task()(((active_threads[(0)]))? (active_threads[(0)])->task: ((task_t
) 0))
== TASK_NULL((task_t) 0)) {
520 db_printf("\nbad address %x\n", addr);
521 db_error(0);
522 /* NOTREACHED */
523 }
524 }
525
526 if (addr >= VM_MIN_KERNEL_ADDRESS0xC0000000UL &&
527 addr <= (vm_offset_t)&etext)
528 {
529 ptep0 = pmap_pte(kernel_pmap, addr);
530 oldmap0 = *ptep0;
531 *ptep0 |= INTEL_PTE_WRITE0x00000002;
532
533 addr1 = i386_trunc_page(addr + size - 1)(((unsigned long)(addr + size - 1)) & ~(4096 -1));
534 if (i386_trunc_page(addr)(((unsigned long)(addr)) & ~(4096 -1)) != addr1) {
535 /* data crosses a page boundary */
536
537 ptep1 = pmap_pte(kernel_pmap, addr1);
538 oldmap1 = *ptep1;
539 *ptep1 |= INTEL_PTE_WRITE0x00000002;
540 }
541 if (CPU_HAS_FEATURE(CPU_FEATURE_PGE)(cpu_features[(13) / 32] & (1 << ((13) % 32))))
542 set_cr4(get_cr4() & ~CR4_PGE)({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr4, %0" : "=r" (_temp__)); _temp__
; }) & ~0x0080); asm volatile("mov %0, %%cr4" : : "r" (_temp__
)); })
;
543 flush_tlb()({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr3, %0" : "=r" (_temp__)); _temp__
; })); asm volatile("mov %0, %%cr3" : : "r" (_temp__) : "memory"
); })
;
544 }
545
546 dst = (char *)addr;
547
548 while (--size >= 0)
549 *dst++ = *data++;
550
551 if (ptep0) {
552 *ptep0 = oldmap0;
553 if (ptep1) {
554 *ptep1 = oldmap1;
555 }
556 flush_tlb()({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr3, %0" : "=r" (_temp__)); _temp__
; })); asm volatile("mov %0, %%cr3" : : "r" (_temp__) : "memory"
); })
;
557 if (CPU_HAS_FEATURE(CPU_FEATURE_PGE)(cpu_features[(13) / 32] & (1 << ((13) % 32))))
558 set_cr4(get_cr4() | CR4_PGE)({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr4, %0" : "=r" (_temp__)); _temp__
; }) | 0x0080); asm volatile("mov %0, %%cr4" : : "r" (_temp__
)); })
;
559 }
560}
561
562void
563db_write_bytes_user_space(
564 vm_offset_t addr,
565 int size,
566 char *data,
567 task_t task)
568{
569 char *dst;
570 int n;
571 vm_offset_t kern_addr;
572
573 while (size > 0) {
574 if (db_user_to_kernel_address(task, addr, &kern_addr, 1) < 0)
575 return;
576 dst = (char *)kern_addr;
577 n = intel_trunc_page(addr+INTEL_PGBYTES)(((unsigned long)(addr+4096)) & ~(4096 -1)) - addr;
578 if (n > size)
579 n = size;
580 size -= n;
581 addr += n;
582 while (--n >= 0)
583 *dst++ = *data++;
584 }
585}
586
587boolean_t
588db_check_access(
589 vm_offset_t addr,
590 int size,
591 task_t task)
592{
593 int n;
594 vm_offset_t kern_addr;
595
596 if (addr >= VM_MIN_KERNEL_ADDRESS0xC0000000UL) {
597 if (kernel_task == TASK_NULL((task_t) 0))
598 return TRUE((boolean_t) 1);
599 task = kernel_task;
600 } else if (task == TASK_NULL((task_t) 0)) {
601 if (current_thread()(active_threads[(0)]) == THREAD_NULL((thread_t) 0))
602 return FALSE((boolean_t) 0);
603 task = current_thread()(active_threads[(0)])->task;
604 }
605 while (size > 0) {
606 if (db_user_to_kernel_address(task, addr, &kern_addr, 0) < 0)
607 return FALSE((boolean_t) 0);
608 n = intel_trunc_page(addr+INTEL_PGBYTES)(((unsigned long)(addr+4096)) & ~(4096 -1)) - addr;
609 if (n > size)
610 n = size;
611 size -= n;
612 addr += n;
613 }
614 return TRUE((boolean_t) 1);
615}
616
617boolean_t
618db_phys_eq(
619 task_t task1,
620 vm_offset_t addr1,
621 const task_t task2,
622 vm_offset_t addr2)
623{
624 vm_offset_t kern_addr1, kern_addr2;
625
626 if (addr1 >= VM_MIN_KERNEL_ADDRESS0xC0000000UL || addr2 >= VM_MIN_KERNEL_ADDRESS0xC0000000UL)
627 return FALSE((boolean_t) 0);
628 if ((addr1 & (INTEL_PGBYTES4096-1)) != (addr2 & (INTEL_PGBYTES4096-1)))
629 return FALSE((boolean_t) 0);
630 if (task1 == TASK_NULL((task_t) 0)) {
631 if (current_thread()(active_threads[(0)]) == THREAD_NULL((thread_t) 0))
632 return FALSE((boolean_t) 0);
633 task1 = current_thread()(active_threads[(0)])->task;
634 }
635 if (db_user_to_kernel_address(task1, addr1, &kern_addr1, 0) < 0
636 || db_user_to_kernel_address(task2, addr2, &kern_addr2, 0) < 0)
637 return FALSE((boolean_t) 0);
638 return(kern_addr1 == kern_addr2);
639}
640
641#define DB_USER_STACK_ADDR(0xC0000000UL) (VM_MIN_KERNEL_ADDRESS0xC0000000UL)
642#define DB_NAME_SEARCH_LIMIT((0xC0000000UL)-(4096*3)) (DB_USER_STACK_ADDR(0xC0000000UL)-(INTEL_PGBYTES4096*3))
643
644#define GNU
645
646#ifndef GNU
647static boolean_t
648db_search_null(
649 const task_t task,
650 vm_offset_t *svaddr,
651 vm_offset_t evaddr,
652 vm_offset_t *skaddr,
653 int flag)
654{
655 unsigned vaddr;
656 unsigned *kaddr;
657
658 kaddr = (unsigned *)*skaddr;
659 for (vaddr = *svaddr; vaddr > evaddr; ) {
660 if (vaddr % INTEL_PGBYTES4096 == 0) {
661 vaddr -= sizeof(unsigned);
662 if (db_user_to_kernel_address(task, vaddr, skaddr, 0) < 0)
663 return FALSE((boolean_t) 0);
664 kaddr = (vm_offset_t *)*skaddr;
665 } else {
666 vaddr -= sizeof(unsigned);
667 kaddr--;
668 }
669 if ((*kaddr == 0) ^ (flag == 0)) {
670 *svaddr = vaddr;
671 *skaddr = (unsigned)kaddr;
672 return TRUE((boolean_t) 1);
673 }
674 }
675 return FALSE((boolean_t) 0);
676}
677#endif /* GNU */
678
679#ifdef GNU
680static boolean_t
681looks_like_command(
682 const task_t task,
683 char* kaddr)
684{
685 char *c;
686
687 assert(!((vm_offset_t) kaddr & (INTEL_PGBYTES-1)))({ if (!(!((vm_offset_t) kaddr & (4096 -1)))) Assert("!((vm_offset_t) kaddr & (INTEL_PGBYTES-1))"
, "../i386/i386/db_interface.c", 687); })
;
688
689 /*
690 * Must be the environment.
691 */
692 if (!memcmp(kaddr, "PATH=", 5) || !memcmp(kaddr, "TERM=", 5) || !memcmp(kaddr, "SHELL=", 6) || !memcmp(kaddr, "LOCAL_PART=", 11) || !memcmp(kaddr, "LC_ALL=", 7))
693 return FALSE((boolean_t) 0);
694
695 /*
696 * This is purely heuristical but works quite nicely.
697 * We know that it should look like words separated by \0, and
698 * eventually only \0s.
699 */
700 c = kaddr;
701 while (c < kaddr + INTEL_PGBYTES4096) {
702 if (!*c) {
703 if (c == kaddr)
704 /* Starts by \0. */
705 return FALSE((boolean_t) 0);
706 break;
707 }
708 while (c < kaddr + INTEL_PGBYTES4096 && *c)
709 c++;
710 if (c < kaddr + INTEL_PGBYTES4096)
711 c++; /* Skip \0 */
712 }
713 /*
714 * Check that the remainder is just \0s.
715 */
716 while (c < kaddr + INTEL_PGBYTES4096)
717 if (*c++)
718 return FALSE((boolean_t) 0);
719
720 return TRUE((boolean_t) 1);
721}
722#endif /* GNU */
723
724void
725db_task_name(
726 const task_t task)
727{
728 char *p;
729 int n;
730 vm_offset_t vaddr, kaddr;
731 unsigned sp;
732
733 if (task->name[0]) {
734 db_printf("%s", task->name);
735 return;
736 }
737
738#ifdef GNU
739 /*
740 * GNU Hurd-specific heuristics.
741 */
742
743 /* Heuristical address first. */
744 vaddr = 0x1026000;
745 if (db_user_to_kernel_address(task, vaddr, &kaddr, 0) >= 0 &&
746 looks_like_command(task, (char*) kaddr))
747 goto ok;
748
749 /* Try to catch SP of the main thread. */
750 thread_t thread;
751
752 task_lock(task);
753 thread = (thread_t) queue_first(&task->thread_list)((&task->thread_list)->next);
754 if (!thread) {
755 task_unlock(task)((void)(&(task)->lock));
756 db_printf(DB_NULL_TASK_NAME"? ");
757 return;
758 }
759 sp = thread->pcb->iss.uesp;
760 task_unlock(task)((void)(&(task)->lock));
761
762 vaddr = (sp & ~(INTEL_PGBYTES4096 - 1)) + INTEL_PGBYTES4096;
763 while (1) {
764 if (db_user_to_kernel_address(task, vaddr, &kaddr, 0) < 0)
765 return;
766 if (looks_like_command(task, (char*) kaddr))
767 break;
768 vaddr += INTEL_PGBYTES4096;
769 }
770#else /* GNU */
771 vaddr = DB_USER_STACK_ADDR(0xC0000000UL);
772 kaddr = 0;
773
774 /*
775 * skip nulls at the end
776 */
777 if (!db_search_null(task, &vaddr, DB_NAME_SEARCH_LIMIT((0xC0000000UL)-(4096*3)), &kaddr, 0)) {
778 db_printf(DB_NULL_TASK_NAME"? ");
779 return;
780 }
781 /*
782 * search start of args
783 */
784 if (!db_search_null(task, &vaddr, DB_NAME_SEARCH_LIMIT((0xC0000000UL)-(4096*3)), &kaddr, 1)) {
785 db_printf(DB_NULL_TASK_NAME"? ");
786 return;
787 }
788#endif /* GNU */
789
790ok:
791 n = DB_TASK_NAME_LEN23-1;
792#ifdef GNU
793 p = (char *)kaddr;
794 for (; n > 0; vaddr++, p++, n--) {
795#else /* GNU */
796 p = (char *)kaddr + sizeof(unsigned);
797 for (vaddr += sizeof(int); vaddr < DB_USER_STACK_ADDR(0xC0000000UL) && n > 0;
798 vaddr++, p++, n--) {
799#endif /* GNU */
800 if (vaddr % INTEL_PGBYTES4096 == 0) {
801 (void)db_user_to_kernel_address(task, vaddr, &kaddr, 0);
802 p = (char*)kaddr;
803 }
804 db_printf("%c", (*p < ' ' || *p > '~')? ' ': *p);
805 }
806 while (n-- >= 0) /* compare with >= 0 for one more space */
807 db_printf(" ");
808}
809
810#endif /* MACH_KDB */