Bug Summary

File:obj-scan-build/../i386/i386/db_interface.c
Location:line 330, column 1
Description:Address of stack memory associated with local variable 'type' is still referred to by the global variable 'i386_last_kdb_sp' upon returning to the caller. This will be a dangling reference

Annotated Source Code

1/*
2 * Mach Operating System
3 * Copyright (c) 1993,1992,1991,1990 Carnegie Mellon University
4 * All Rights Reserved.
5 *
6 * Permission to use, copy, modify and distribute this software and its
7 * documentation is hereby granted, provided that both the copyright
8 * notice and this permission notice appear in all copies of the
9 * software, derivative works or modified versions, and any portions
10 * thereof, and that both notices appear in supporting documentation.
11 *
12 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
13 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
14 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
15 *
16 * Carnegie Mellon requests users of this software to return to
17 *
18 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
19 * School of Computer Science
20 * Carnegie Mellon University
21 * Pittsburgh PA 15213-3890
22 *
23 * any improvements or extensions that they make and grant Carnegie Mellon
24 * the rights to redistribute these changes.
25 */
26/*
27 * Interface to new debugger.
28 */
29
30#include <string.h>
31#include <sys/reboot.h>
32#include <vm/pmap.h>
33
34#include <i3861/thread.h>
35#include <i3861/db_machdep.h>
36#include <i3861/seg.h>
37#include <i3861/trap.h>
38#include <i3861/setjmp.h>
39#include <i3861/pmap.h>
40#include <i3861/proc_reg.h>
41#include <i3861/locore.h>
42#include "gdt.h"
43#include "trap.h"
44
45#include "vm_param.h"
46#include <vm/vm_map.h>
47#include <vm/vm_fault.h>
48#include <kern/cpu_number.h>
49#include <kern/printf.h>
50#include <kern/thread.h>
51#include <kern/task.h>
52#include <ddb/db_access.h>
53#include <ddb/db_command.h>
54#include <ddb/db_output.h>
55#include <ddb/db_run.h>
56#include <ddb/db_task_thread.h>
57#include <ddb/db_trap.h>
58#include <ddb/db_watch.h>
59#include <machine/db_interface.h>
60#include <machine/machspl.h>
61
62#if MACH_KDB1
63/* Whether the kernel uses any debugging register. */
64static boolean_t kernel_dr;
65#endif
66
67void db_load_context(pcb_t pcb)
68{
69#if MACH_KDB1
70 int s = splhigh();
71
72 if (kernel_dr) {
73 splx(s);
74 return;
75 }
76#endif
77
78 /* Else set user debug registers */
79 set_dr0(pcb->ims.ids.dr[0])({ register unsigned long _temp__ = (pcb->ims.ids.dr[0]); asm
volatile("movl %0,%%dr0" : : "r" (_temp__)); })
;
80 set_dr1(pcb->ims.ids.dr[1])({ register unsigned long _temp__ = (pcb->ims.ids.dr[1]); asm
volatile("movl %0,%%dr1" : : "r" (_temp__)); })
;
81 set_dr2(pcb->ims.ids.dr[2])({ register unsigned long _temp__ = (pcb->ims.ids.dr[2]); asm
volatile("movl %0,%%dr2" : : "r" (_temp__)); })
;
82 set_dr3(pcb->ims.ids.dr[3])({ register unsigned long _temp__ = (pcb->ims.ids.dr[3]); asm
volatile("movl %0,%%dr3" : : "r" (_temp__)); })
;
83 set_dr7(pcb->ims.ids.dr[7])({ register unsigned long _temp__ = (pcb->ims.ids.dr[7]); asm
volatile("movl %0,%%dr7" : : "r" (_temp__)); })
;
84#if MACH_KDB1
85 splx(s);
86#endif
87}
88
89void db_get_debug_state(
90 pcb_t pcb,
91 struct i386_debug_state *state)
92{
93 *state = pcb->ims.ids;
94}
95
96kern_return_t db_set_debug_state(
97 pcb_t pcb,
98 const struct i386_debug_state *state)
99{
100 int i;
101
102 for (i = 0; i <= 3; i++)
103 if (state->dr[i] < VM_MIN_ADDRESS(0)
104 || state->dr[i] >= VM_MAX_ADDRESS(0xc0000000UL))
105 return KERN_INVALID_ARGUMENT4;
106
107 pcb->ims.ids = *state;
108
109 if (pcb == current_thread()(active_threads[(0)])->pcb)
110 db_load_context(pcb);
111
112 return KERN_SUCCESS0;
113}
114
115#if MACH_KDB1
116
117struct i386_saved_state *i386_last_saved_statep;
118struct i386_saved_state i386_nested_saved_state;
119unsigned i386_last_kdb_sp;
120
121extern thread_t db_default_thread;
122
123static struct i386_debug_state ids;
124
125void db_dr (
126 int num,
127 vm_offset_t linear_addr,
128 int type,
129 int len,
130 int persistence)
131{
132 int s = splhigh();
133 unsigned long dr7;
134
135 if (!kernel_dr) {
136 if (!linear_addr) {
137 splx(s);
138 return;
139 }
140 kernel_dr = TRUE((boolean_t) 1);
141 /* Clear user debugging registers */
142 set_dr7(0)({ register unsigned long _temp__ = (0); asm volatile("movl %0,%%dr7"
: : "r" (_temp__)); })
;
143 set_dr0(0)({ register unsigned long _temp__ = (0); asm volatile("movl %0,%%dr0"
: : "r" (_temp__)); })
;
144 set_dr1(0)({ register unsigned long _temp__ = (0); asm volatile("movl %0,%%dr1"
: : "r" (_temp__)); })
;
145 set_dr2(0)({ register unsigned long _temp__ = (0); asm volatile("movl %0,%%dr2"
: : "r" (_temp__)); })
;
146 set_dr3(0)({ register unsigned long _temp__ = (0); asm volatile("movl %0,%%dr3"
: : "r" (_temp__)); })
;
147 }
148
149 ids.dr[num] = linear_addr;
150 switch (num) {
151 case 0: set_dr0(linear_addr)({ register unsigned long _temp__ = (linear_addr); asm volatile
("movl %0,%%dr0" : : "r" (_temp__)); })
; break;
152 case 1: set_dr1(linear_addr)({ register unsigned long _temp__ = (linear_addr); asm volatile
("movl %0,%%dr1" : : "r" (_temp__)); })
; break;
153 case 2: set_dr2(linear_addr)({ register unsigned long _temp__ = (linear_addr); asm volatile
("movl %0,%%dr2" : : "r" (_temp__)); })
; break;
154 case 3: set_dr3(linear_addr)({ register unsigned long _temp__ = (linear_addr); asm volatile
("movl %0,%%dr3" : : "r" (_temp__)); })
; break;
155 }
156
157 /* Replace type/len/persistence for DRnum in dr7 */
158 dr7 = get_dr7 ()({ register unsigned long _temp__; asm volatile("movl %%dr7, %0"
: "=r" (_temp__)); _temp__; })
;
159 dr7 &= ~(0xfUL << (4*num+16)) & ~(0x3UL << (2*num));
160 dr7 |= (((len << 2) | type) << (4*num+16)) | (persistence << (2*num));
161 set_dr7 (dr7)({ register unsigned long _temp__ = (dr7); asm volatile("movl %0,%%dr7"
: : "r" (_temp__)); })
;
162
163 if (kernel_dr) {
164 if (!ids.dr[0] && !ids.dr[1] && !ids.dr[2] && !ids.dr[3]) {
165 /* Not used any more, switch back to user debugging registers */
166 kernel_dr = FALSE((boolean_t) 0);
167 db_load_context(current_thread()(active_threads[(0)])->pcb);
168 }
169 }
170 splx(s);
171}
172
173boolean_t
174db_set_hw_watchpoint(
175 const db_watchpoint_t watch,
176 unsigned num)
177{
178 vm_size_t size = watch->hiaddr - watch->loaddr;
179 db_addr_t addr = watch->loaddr;
180 vm_offset_t kern_addr;
181
182 if (num >= 4)
183 return FALSE((boolean_t) 0);
184 if (size != 1 && size != 2 && size != 4)
185 return FALSE((boolean_t) 0);
186
187 if (addr & (size-1))
188 /* Unaligned */
189 return FALSE((boolean_t) 0);
190
191 if (watch->task) {
192 if (db_user_to_kernel_address(watch->task, addr, &kern_addr, 1) < 0)
193 return FALSE((boolean_t) 0);
194 addr = kern_addr;
195 }
196 addr = kvtolin(addr)((vm_offset_t)(addr) - 0xC0000000UL + ((0xc0000000UL)));
197
198 db_dr (num, addr, I386_DB_TYPE_W1, size-1, I386_DB_LOCAL1|I386_DB_GLOBAL2);
199
200 db_printf("Hardware watchpoint %d set for %x\n", num, addr);
201 return TRUE((boolean_t) 1);
202}
203
204boolean_t
205db_clear_hw_watchpoint(
206 unsigned num)
207{
208 if (num >= 4)
209 return FALSE((boolean_t) 0);
210
211 db_dr (num, 0, 0, 0, 0);
212 return TRUE((boolean_t) 1);
213}
214
215/*
216 * Print trap reason.
217 */
218void
219kdbprinttrap(type, code)
220 int type, code;
221{
222 printf("kernel: %s (%d), code=%x\n",
223 trap_name(type), type, code);
224}
225
226/*
227 * kdb_trap - field a TRACE or BPT trap
228 */
229
230extern jmp_buf_t *db_recover;
231spl_t saved_ipl[NCPUS1]; /* just to know what was IPL before trap */
232
233boolean_t
234kdb_trap(
235 int type,
236 int code,
237 struct i386_saved_state *regs)
238{
239 spl_t s;
240
241 s = splhigh();
242 saved_ipl[cpu_number()(0)] = s;
243
244 switch (type) {
1
Control jumps to 'case -1:' at line 261
245 case T_DEBUG1: /* single_step */
246 {
247 int addr;
248 int status = get_dr6()({ register unsigned long _temp__; asm volatile("movl %%dr6, %0"
: "=r" (_temp__)); _temp__; })
;
249
250 if (status & 0xf) { /* hmm hdw break */
251 addr = status & 0x8 ? get_dr3()({ register unsigned long _temp__; asm volatile("movl %%dr3, %0"
: "=r" (_temp__)); _temp__; })
:
252 status & 0x4 ? get_dr2()({ register unsigned long _temp__; asm volatile("movl %%dr2, %0"
: "=r" (_temp__)); _temp__; })
:
253 status & 0x2 ? get_dr1()({ register unsigned long _temp__; asm volatile("movl %%dr1, %0"
: "=r" (_temp__)); _temp__; })
:
254 get_dr0()({ register unsigned long _temp__; asm volatile("movl %%dr0, %0"
: "=r" (_temp__)); _temp__; })
;
255 regs->efl |= EFL_RF0x00010000;
256 db_single_step_cmd(addr, 0, 1, "p");
257 }
258 }
259 case T_INT33: /* breakpoint */
260 case T_WATCHPOINT17: /* watchpoint */
261 case -1: /* keyboard interrupt */
262 break;
2
Execution continues on line 279
263
264 default:
265 if (db_recover) {
266 i386_nested_saved_state = *regs;
267 db_printf("Caught %s (%d), code = %x, pc = %x\n",
268 trap_name(type), type, code, regs->eip);
269 db_error("");
270 /*NOTREACHED*/
271 }
272 kdbprinttrap(type, code);
273 }
274
275#if NCPUS1 > 1
276 if (db_enter())
277#endif /* NCPUS > 1 */
278 {
279 i386_last_saved_statep = regs;
280 i386_last_kdb_sp = (unsigned) &type;
281
282 /* XXX Should switch to ddb`s own stack here. */
283
284 ddb_regs = *regs;
285 if ((regs->cs & 0x3) == KERNEL_RING0) {
3
Taking false branch
286 /*
287 * Kernel mode - esp and ss not saved
288 */
289 ddb_regs.uesp = (int)&regs->uesp; /* kernel stack pointer */
290 ddb_regs.ss = KERNEL_DS(0x10 | 0);
291 }
292
293 cnpollc(TRUE((boolean_t) 1));
294 db_task_trap(type, code, (regs->cs & 0x3) != 0);
295 cnpollc(FALSE((boolean_t) 0));
296
297 regs->eip = ddb_regs.eip;
298 regs->efl = ddb_regs.efl;
299 regs->eax = ddb_regs.eax;
300 regs->ecx = ddb_regs.ecx;
301 regs->edx = ddb_regs.edx;
302 regs->ebx = ddb_regs.ebx;
303 if ((regs->cs & 0x3) != KERNEL_RING0) {
4
Taking true branch
304 /*
305 * user mode - saved esp and ss valid
306 */
307 regs->uesp = ddb_regs.uesp; /* user stack pointer */
308 regs->ss = ddb_regs.ss & 0xffff; /* user stack segment */
309 }
310 regs->ebp = ddb_regs.ebp;
311 regs->esi = ddb_regs.esi;
312 regs->edi = ddb_regs.edi;
313 regs->es = ddb_regs.es & 0xffff;
314 regs->cs = ddb_regs.cs & 0xffff;
315 regs->ds = ddb_regs.ds & 0xffff;
316 regs->fs = ddb_regs.fs & 0xffff;
317 regs->gs = ddb_regs.gs & 0xffff;
318
319 if ((type == T_INT33) &&
320 (db_get_task_value(regs->eip, BKPT_SIZE(1), FALSE((boolean_t) 0), TASK_NULL((task_t) 0))
321 == BKPT_INST0xcc))
322 regs->eip += BKPT_SIZE(1);
323 }
324#if NCPUS1 > 1
325 db_leave();
326#endif /* NCPUS > 1 */
327
328 splx(s);
329 return 1;
330}
5
Address of stack memory associated with local variable 'type' is still referred to by the global variable 'i386_last_kdb_sp' upon returning to the caller. This will be a dangling reference
331
332/*
333 * Enter KDB through a keyboard trap.
334 * We show the registers as of the keyboard interrupt
335 * instead of those at its call to KDB.
336 */
337struct int_regs {
338 long edi;
339 long esi;
340 long ebp;
341 long ebx;
342 struct i386_interrupt_state *is;
343};
344
345void
346kdb_kentry(
347 struct int_regs *int_regs)
348{
349 struct i386_interrupt_state *is = int_regs->is;
350 spl_t s = splhigh();
351
352#if NCPUS1 > 1
353 if (db_enter())
354#endif /* NCPUS > 1 */
355 {
356 if ((is->cs & 0x3) != KERNEL_RING0) {
357 ddb_regs.uesp = ((int *)(is+1))[0];
358 ddb_regs.ss = ((int *)(is+1))[1];
359 }
360 else {
361 ddb_regs.ss = KERNEL_DS(0x10 | 0);
362 ddb_regs.uesp= (int)(is+1);
363 }
364 ddb_regs.efl = is->efl;
365 ddb_regs.cs = is->cs;
366 ddb_regs.eip = is->eip;
367 ddb_regs.eax = is->eax;
368 ddb_regs.ecx = is->ecx;
369 ddb_regs.edx = is->edx;
370 ddb_regs.ebx = int_regs->ebx;
371 ddb_regs.ebp = int_regs->ebp;
372 ddb_regs.esi = int_regs->esi;
373 ddb_regs.edi = int_regs->edi;
374 ddb_regs.ds = is->ds;
375 ddb_regs.es = is->es;
376 ddb_regs.fs = is->fs;
377 ddb_regs.gs = is->gs;
378
379 cnpollc(TRUE((boolean_t) 1));
380 db_task_trap(-1, 0, (ddb_regs.cs & 0x3) != 0);
381 cnpollc(FALSE((boolean_t) 0));
382
383 if ((ddb_regs.cs & 0x3) != KERNEL_RING0) {
384 ((int *)(is+1))[0] = ddb_regs.uesp;
385 ((int *)(is+1))[1] = ddb_regs.ss & 0xffff;
386 }
387 is->efl = ddb_regs.efl;
388 is->cs = ddb_regs.cs & 0xffff;
389 is->eip = ddb_regs.eip;
390 is->eax = ddb_regs.eax;
391 is->ecx = ddb_regs.ecx;
392 is->edx = ddb_regs.edx;
393 int_regs->ebx = ddb_regs.ebx;
394 int_regs->ebp = ddb_regs.ebp;
395 int_regs->esi = ddb_regs.esi;
396 int_regs->edi = ddb_regs.edi;
397 is->ds = ddb_regs.ds & 0xffff;
398 is->es = ddb_regs.es & 0xffff;
399 is->fs = ddb_regs.fs & 0xffff;
400 is->gs = ddb_regs.gs & 0xffff;
401 }
402#if NCPUS1 > 1
403 db_leave();
404#endif /* NCPUS > 1 */
405
406 (void) splx(s);
407}
408
409boolean_t db_no_vm_fault = TRUE((boolean_t) 1);
410
411int
412db_user_to_kernel_address(
413 const task_t task,
414 vm_offset_t addr,
415 vm_offset_t *kaddr,
416 int flag)
417{
418 pt_entry_t *ptp;
419 boolean_t faulted = FALSE((boolean_t) 0);
420
421 retry:
422 ptp = pmap_pte(task->map->pmap, addr);
423 if (ptp == PT_ENTRY_NULL((pt_entry_t *) 0) || (*ptp & INTEL_PTE_VALID0x00000001) == 0) {
424 if (!faulted && !db_no_vm_fault) {
425 kern_return_t err;
426
427 faulted = TRUE((boolean_t) 1);
428 err = vm_fault( task->map,
429 trunc_page(addr)((vm_offset_t)(((vm_offset_t)(addr)) & ~((1 << 12)-
1)))
,
430 VM_PROT_READ((vm_prot_t) 0x01),
431 FALSE((boolean_t) 0), FALSE((boolean_t) 0), 0);
432 if (err == KERN_SUCCESS0)
433 goto retry;
434 }
435 if (flag) {
436 db_printf("\nno memory is assigned to address %08x\n", addr);
437 db_error(0);
438 /* NOTREACHED */
439 }
440 return(-1);
441 }
442 *kaddr = ptetokv(*ptp)(((vm_offset_t)(((*ptp) & 0xfffff000)) + 0xC0000000UL)) + (addr & (INTEL_PGBYTES4096-1));
443 return(0);
444}
445
446/*
447 * Read bytes from kernel address space for debugger.
448 */
449
450void
451db_read_bytes(
452 vm_offset_t addr,
453 int size,
454 char *data,
455 task_t task)
456{
457 char *src;
458 int n;
459 vm_offset_t kern_addr;
460
461 src = (char *)addr;
462 if ((addr >= VM_MIN_KERNEL_ADDRESS0xC0000000UL && addr < VM_MAX_KERNEL_ADDRESS((0xffffffffUL) - ((0xc0000000UL)) + 0xC0000000UL)) || task == TASK_NULL((task_t) 0)) {
463 if (task == TASK_NULL((task_t) 0))
464 task = db_current_task()(((active_threads[(0)]))? (active_threads[(0)])->task: ((task_t
) 0))
;
465 while (--size >= 0) {
466 if (addr < VM_MIN_KERNEL_ADDRESS0xC0000000UL && task == TASK_NULL((task_t) 0)) {
467 db_printf("\nbad address %x\n", addr);
468 db_error(0);
469 /* NOTREACHED */
470 }
471 addr++;
472 *data++ = *src++;
473 }
474 return;
475 }
476 while (size > 0) {
477 if (db_user_to_kernel_address(task, addr, &kern_addr, 1) < 0)
478 return;
479 src = (char *)kern_addr;
480 n = intel_trunc_page(addr+INTEL_PGBYTES)(((unsigned long)(addr+4096)) & ~(4096 -1)) - addr;
481 if (n > size)
482 n = size;
483 size -= n;
484 addr += n;
485 while (--n >= 0)
486 *data++ = *src++;
487 }
488}
489
490/*
491 * Write bytes to kernel address space for debugger.
492 */
493void
494db_write_bytes(
495 vm_offset_t addr,
496 int size,
497 char *data,
498 task_t task)
499{
500 char *dst;
501
502 pt_entry_t *ptep0 = 0;
503 pt_entry_t oldmap0 = 0;
504 vm_offset_t addr1;
505 pt_entry_t *ptep1 = 0;
506 pt_entry_t oldmap1 = 0;
507 extern char etext;
508
509 if ((addr < VM_MIN_KERNEL_ADDRESS0xC0000000UL) ^
510 ((addr + size) <= VM_MIN_KERNEL_ADDRESS0xC0000000UL)) {
511 db_error("\ncannot write data into mixed space\n");
512 /* NOTREACHED */
513 }
514 if (addr < VM_MIN_KERNEL_ADDRESS0xC0000000UL) {
515 if (task) {
516 db_write_bytes_user_space(addr, size, data, task);
517 return;
518 } else if (db_current_task()(((active_threads[(0)]))? (active_threads[(0)])->task: ((task_t
) 0))
== TASK_NULL((task_t) 0)) {
519 db_printf("\nbad address %x\n", addr);
520 db_error(0);
521 /* NOTREACHED */
522 }
523 }
524
525 if (addr >= VM_MIN_KERNEL_ADDRESS0xC0000000UL &&
526 addr <= (vm_offset_t)&etext)
527 {
528 ptep0 = pmap_pte(kernel_pmap, addr);
529 oldmap0 = *ptep0;
530 *ptep0 |= INTEL_PTE_WRITE0x00000002;
531
532 addr1 = i386_trunc_page(addr + size - 1)(((unsigned long)(addr + size - 1)) & ~(4096 -1));
533 if (i386_trunc_page(addr)(((unsigned long)(addr)) & ~(4096 -1)) != addr1) {
534 /* data crosses a page boundary */
535
536 ptep1 = pmap_pte(kernel_pmap, addr1);
537 oldmap1 = *ptep1;
538 *ptep1 |= INTEL_PTE_WRITE0x00000002;
539 }
540 if (CPU_HAS_FEATURE(CPU_FEATURE_PGE)(cpu_features[(13) / 32] & (1 << ((13) % 32))))
541 set_cr4(get_cr4() & ~CR4_PGE)({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr4, %0" : "=r" (_temp__)); _temp__
; }) & ~0x0080); asm volatile("mov %0, %%cr4" : : "r" (_temp__
)); })
;
542 flush_tlb()({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr3, %0" : "=r" (_temp__)); _temp__
; })); asm volatile("mov %0, %%cr3" : : "r" (_temp__) : "memory"
); })
;
543 }
544
545 dst = (char *)addr;
546
547 while (--size >= 0)
548 *dst++ = *data++;
549
550 if (ptep0) {
551 *ptep0 = oldmap0;
552 if (ptep1) {
553 *ptep1 = oldmap1;
554 }
555 flush_tlb()({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr3, %0" : "=r" (_temp__)); _temp__
; })); asm volatile("mov %0, %%cr3" : : "r" (_temp__) : "memory"
); })
;
556 if (CPU_HAS_FEATURE(CPU_FEATURE_PGE)(cpu_features[(13) / 32] & (1 << ((13) % 32))))
557 set_cr4(get_cr4() | CR4_PGE)({ register unsigned long _temp__ = (({ register unsigned long
_temp__; asm volatile("mov %%cr4, %0" : "=r" (_temp__)); _temp__
; }) | 0x0080); asm volatile("mov %0, %%cr4" : : "r" (_temp__
)); })
;
558 }
559}
560
561void
562db_write_bytes_user_space(
563 vm_offset_t addr,
564 int size,
565 char *data,
566 task_t task)
567{
568 char *dst;
569 int n;
570 vm_offset_t kern_addr;
571
572 while (size > 0) {
573 if (db_user_to_kernel_address(task, addr, &kern_addr, 1) < 0)
574 return;
575 dst = (char *)kern_addr;
576 n = intel_trunc_page(addr+INTEL_PGBYTES)(((unsigned long)(addr+4096)) & ~(4096 -1)) - addr;
577 if (n > size)
578 n = size;
579 size -= n;
580 addr += n;
581 while (--n >= 0)
582 *dst++ = *data++;
583 }
584}
585
586boolean_t
587db_check_access(
588 vm_offset_t addr,
589 int size,
590 task_t task)
591{
592 int n;
593 vm_offset_t kern_addr;
594
595 if (addr >= VM_MIN_KERNEL_ADDRESS0xC0000000UL) {
596 if (kernel_task == TASK_NULL((task_t) 0))
597 return TRUE((boolean_t) 1);
598 task = kernel_task;
599 } else if (task == TASK_NULL((task_t) 0)) {
600 if (current_thread()(active_threads[(0)]) == THREAD_NULL((thread_t) 0))
601 return FALSE((boolean_t) 0);
602 task = current_thread()(active_threads[(0)])->task;
603 }
604 while (size > 0) {
605 if (db_user_to_kernel_address(task, addr, &kern_addr, 0) < 0)
606 return FALSE((boolean_t) 0);
607 n = intel_trunc_page(addr+INTEL_PGBYTES)(((unsigned long)(addr+4096)) & ~(4096 -1)) - addr;
608 if (n > size)
609 n = size;
610 size -= n;
611 addr += n;
612 }
613 return TRUE((boolean_t) 1);
614}
615
616boolean_t
617db_phys_eq(
618 task_t task1,
619 vm_offset_t addr1,
620 const task_t task2,
621 vm_offset_t addr2)
622{
623 vm_offset_t kern_addr1, kern_addr2;
624
625 if (addr1 >= VM_MIN_KERNEL_ADDRESS0xC0000000UL || addr2 >= VM_MIN_KERNEL_ADDRESS0xC0000000UL)
626 return FALSE((boolean_t) 0);
627 if ((addr1 & (INTEL_PGBYTES4096-1)) != (addr2 & (INTEL_PGBYTES4096-1)))
628 return FALSE((boolean_t) 0);
629 if (task1 == TASK_NULL((task_t) 0)) {
630 if (current_thread()(active_threads[(0)]) == THREAD_NULL((thread_t) 0))
631 return FALSE((boolean_t) 0);
632 task1 = current_thread()(active_threads[(0)])->task;
633 }
634 if (db_user_to_kernel_address(task1, addr1, &kern_addr1, 0) < 0
635 || db_user_to_kernel_address(task2, addr2, &kern_addr2, 0) < 0)
636 return FALSE((boolean_t) 0);
637 return(kern_addr1 == kern_addr2);
638}
639
640#define DB_USER_STACK_ADDR(0xC0000000UL) (VM_MIN_KERNEL_ADDRESS0xC0000000UL)
641#define DB_NAME_SEARCH_LIMIT((0xC0000000UL)-(4096*3)) (DB_USER_STACK_ADDR(0xC0000000UL)-(INTEL_PGBYTES4096*3))
642
643#define GNU
644
645#ifndef GNU
646static boolean_t
647db_search_null(
648 const task_t task,
649 vm_offset_t *svaddr,
650 vm_offset_t evaddr,
651 vm_offset_t *skaddr,
652 int flag)
653{
654 unsigned vaddr;
655 unsigned *kaddr;
656
657 kaddr = (unsigned *)*skaddr;
658 for (vaddr = *svaddr; vaddr > evaddr; ) {
659 if (vaddr % INTEL_PGBYTES4096 == 0) {
660 vaddr -= sizeof(unsigned);
661 if (db_user_to_kernel_address(task, vaddr, skaddr, 0) < 0)
662 return FALSE((boolean_t) 0);
663 kaddr = (vm_offset_t *)*skaddr;
664 } else {
665 vaddr -= sizeof(unsigned);
666 kaddr--;
667 }
668 if ((*kaddr == 0) ^ (flag == 0)) {
669 *svaddr = vaddr;
670 *skaddr = (unsigned)kaddr;
671 return TRUE((boolean_t) 1);
672 }
673 }
674 return FALSE((boolean_t) 0);
675}
676#endif /* GNU */
677
678#ifdef GNU
679static boolean_t
680looks_like_command(
681 const task_t task,
682 char* kaddr)
683{
684 char *c;
685
686 assert(!((vm_offset_t) kaddr & (INTEL_PGBYTES-1)))({ if (!(!((vm_offset_t) kaddr & (4096 -1)))) Assert("!((vm_offset_t) kaddr & (INTEL_PGBYTES-1))"
, "../i386/i386/db_interface.c", 686); })
;
687
688 /*
689 * Must be the environment.
690 */
691 if (!memcmp(kaddr, "PATH=", 5) || !memcmp(kaddr, "TERM=", 5) || !memcmp(kaddr, "SHELL=", 6) || !memcmp(kaddr, "LOCAL_PART=", 11) || !memcmp(kaddr, "LC_ALL=", 7))
692 return FALSE((boolean_t) 0);
693
694 /*
695 * This is purely heuristical but works quite nicely.
696 * We know that it should look like words separated by \0, and
697 * eventually only \0s.
698 */
699 c = kaddr;
700 while (c < kaddr + INTEL_PGBYTES4096) {
701 if (!*c) {
702 if (c == kaddr)
703 /* Starts by \0. */
704 return FALSE((boolean_t) 0);
705 break;
706 }
707 while (c < kaddr + INTEL_PGBYTES4096 && *c)
708 c++;
709 if (c < kaddr + INTEL_PGBYTES4096)
710 c++; /* Skip \0 */
711 }
712 /*
713 * Check that the remainder is just \0s.
714 */
715 while (c < kaddr + INTEL_PGBYTES4096)
716 if (*c++)
717 return FALSE((boolean_t) 0);
718
719 return TRUE((boolean_t) 1);
720}
721#endif /* GNU */
722
723void
724db_task_name(
725 const task_t task)
726{
727 char *p;
728 int n;
729 vm_offset_t vaddr, kaddr;
730 unsigned sp;
731
732 if (task->map->pmap == kernel_pmap) {
733 db_printf(DB_GNUMACH_TASK_NAME"gnumach ");
734 return;
735 }
736
737#ifdef GNU
738 /*
739 * GNU Hurd-specific heuristics.
740 */
741
742 /* Heuristical address first. */
743 vaddr = 0x1026000;
744 if (db_user_to_kernel_address(task, vaddr, &kaddr, 0) >= 0 &&
745 looks_like_command(task, (char*) kaddr))
746 goto ok;
747
748 /* Try to catch SP of the main thread. */
749 thread_t thread;
750
751 task_lock(task);
752 thread = (thread_t) queue_first(&task->thread_list)((&task->thread_list)->next);
753 if (!thread) {
754 task_unlock(task)((void)(&(task)->lock));
755 db_printf(DB_NULL_TASK_NAME"? ");
756 return;
757 }
758 sp = thread->pcb->iss.uesp;
759 task_unlock(task)((void)(&(task)->lock));
760
761 vaddr = (sp & ~(INTEL_PGBYTES4096 - 1)) + INTEL_PGBYTES4096;
762 while (1) {
763 if (db_user_to_kernel_address(task, vaddr, &kaddr, 0) < 0)
764 return;
765 if (looks_like_command(task, (char*) kaddr))
766 break;
767 vaddr += INTEL_PGBYTES4096;
768 }
769#else /* GNU */
770 vaddr = DB_USER_STACK_ADDR(0xC0000000UL);
771 kaddr = 0;
772
773 /*
774 * skip nulls at the end
775 */
776 if (!db_search_null(task, &vaddr, DB_NAME_SEARCH_LIMIT((0xC0000000UL)-(4096*3)), &kaddr, 0)) {
777 db_printf(DB_NULL_TASK_NAME"? ");
778 return;
779 }
780 /*
781 * search start of args
782 */
783 if (!db_search_null(task, &vaddr, DB_NAME_SEARCH_LIMIT((0xC0000000UL)-(4096*3)), &kaddr, 1)) {
784 db_printf(DB_NULL_TASK_NAME"? ");
785 return;
786 }
787#endif /* GNU */
788
789ok:
790 n = DB_TASK_NAME_LEN23-1;
791#ifdef GNU
792 p = (char *)kaddr;
793 for (; n > 0; vaddr++, p++, n--) {
794#else /* GNU */
795 p = (char *)kaddr + sizeof(unsigned);
796 for (vaddr += sizeof(int); vaddr < DB_USER_STACK_ADDR(0xC0000000UL) && n > 0;
797 vaddr++, p++, n--) {
798#endif /* GNU */
799 if (vaddr % INTEL_PGBYTES4096 == 0) {
800 (void)db_user_to_kernel_address(task, vaddr, &kaddr, 0);
801 p = (char*)kaddr;
802 }
803 db_printf("%c", (*p < ' ' || *p > '~')? ' ': *p);
804 }
805 while (n-- >= 0) /* compare with >= 0 for one more space */
806 db_printf(" ");
807}
808
809#endif /* MACH_KDB */