summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--i386/i386/db_interface.c2
-rw-r--r--i386/i386at/model_dep.c8
-rw-r--r--i386/intel/pmap.c4
-rw-r--r--i386/linux/dev/include/linux/autoconf.h4
-rw-r--r--kern/lock.c4
-rw-r--r--kern/zalloc.c14
-rw-r--r--vm/vm_map.c6
7 files changed, 30 insertions, 12 deletions
diff --git a/i386/i386/db_interface.c b/i386/i386/db_interface.c
index bd43b1f..29f03c9 100644
--- a/i386/i386/db_interface.c
+++ b/i386/i386/db_interface.c
@@ -311,7 +311,7 @@ db_read_bytes(
unsigned kern_addr;
src = (char *)addr;
- if (addr >= VM_MIN_KERNEL_ADDRESS || task == TASK_NULL) {
+ if ((addr >= VM_MIN_KERNEL_ADDRESS && addr < VM_MAX_KERNEL_ADDRESS) || task == TASK_NULL) {
if (task == TASK_NULL)
task = db_current_task();
while (--size >= 0) {
diff --git a/i386/i386at/model_dep.c b/i386/i386at/model_dep.c
index 2426cde..b9fb7c0 100644
--- a/i386/i386at/model_dep.c
+++ b/i386/i386at/model_dep.c
@@ -220,8 +220,10 @@ mem_size_init(void)
phys_last_kb = 0x400 + boot_info.mem_upper;
/* Avoid 4GiB overflow. */
- if (phys_last_kb < 0x400 || phys_last_kb >= 0x400000)
+ if (phys_last_kb < 0x400 || phys_last_kb >= 0x400000) {
+ printf("Truncating memory size to 4GiB\n");
phys_last_kb = 0x400000 - 1;
+ }
phys_last_addr = phys_last_kb * 0x400;
avail_remaining
@@ -233,8 +235,10 @@ mem_size_init(void)
/* Reserve 1/16 of the memory address space for virtual mappings.
* Yes, this loses memory. Blame i386. */
- if (phys_last_addr > (VM_MAX_KERNEL_ADDRESS / 16) * 15)
+ if (phys_last_addr > (VM_MAX_KERNEL_ADDRESS / 16) * 15) {
phys_last_addr = (VM_MAX_KERNEL_ADDRESS / 16) * 15;
+ printf("Truncating memory size to %dMiB\n", (phys_last_addr - phys_first_addr) / (1024 * 1024));
+ }
phys_first_addr = round_page(phys_first_addr);
phys_last_addr = trunc_page(phys_last_addr);
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
index d57df92..f6cbead 100644
--- a/i386/intel/pmap.c
+++ b/i386/intel/pmap.c
@@ -335,8 +335,8 @@ lock_data_t pmap_system_lock;
/* It is hard to know when a TLB flush becomes less expensive than a bunch of
* invlpgs. But it surely is more expensive than just one invlpg. */
#define INVALIDATE_TLB(s, e) { \
- if (__builtin_constant_p((e) - (s))
- && (e) - (s) == PAGE_SIZE)
+ if (__builtin_constant_p((e) - (s)) \
+ && (e) - (s) == PAGE_SIZE) \
invlpg_linear(s); \
else \
flush_tlb(); \
diff --git a/i386/linux/dev/include/linux/autoconf.h b/i386/linux/dev/include/linux/autoconf.h
index 037829b..75ff2aa 100644
--- a/i386/linux/dev/include/linux/autoconf.h
+++ b/i386/linux/dev/include/linux/autoconf.h
@@ -55,8 +55,8 @@
#undef CONFIG_BLK_DEV_IDEFLOPPY
#undef CONFIG_BLK_DEV_IDESCSI
#undef CONFIG_BLK_DEV_IDE_PCMCIA
-#define CONFIG_BLK_DEV_CMD640 1
-#define CONFIG_BLK_DEV_CMD640_ENHANCED 1
+#undef CONFIG_BLK_DEV_CMD640
+#undef CONFIG_BLK_DEV_CMD640_ENHANCED
#define CONFIG_BLK_DEV_RZ1000 1
#define CONFIG_BLK_DEV_TRITON 1
#undef CONFIG_IDE_CHIPSETS
diff --git a/kern/lock.c b/kern/lock.c
index 909aa46..0c61227 100644
--- a/kern/lock.c
+++ b/kern/lock.c
@@ -104,7 +104,7 @@ boolean_t simple_lock_try(simple_lock_t l)
#endif /* NCPUS > 1 */
#if NCPUS > 1
-int lock_wait_time = 100;
+static int lock_wait_time = 100;
#else /* NCPUS > 1 */
/*
@@ -112,7 +112,7 @@ int lock_wait_time = 100;
* thought something magical would happen to the
* want_write bit while we are executing.
*/
-int lock_wait_time = 0;
+static int lock_wait_time = 0;
#endif /* NCPUS > 1 */
#if MACH_SLOCKS && NCPUS == 1
diff --git a/kern/zalloc.c b/kern/zalloc.c
index 839e40f..f2904e4 100644
--- a/kern/zalloc.c
+++ b/kern/zalloc.c
@@ -214,7 +214,7 @@ zone_t zinit(size, align, max, alloc, memtype, name)
max = alloc;
if (align > 0) {
- if (align >= PAGE_SIZE)
+ if (PAGE_SIZE % align || align % sizeof(z->free_elements))
panic("zinit");
ALIGN_SIZE_UP(size, align);
}
@@ -828,6 +828,18 @@ static void zone_gc(void)
free_addr = zone_map_min_address +
PAGE_SIZE * (freep - zone_page_table);
+
+ /* Hack Hack */
+ /* Needed to make vm_map_delete's vm_map_clip_end always be
+ * able to get an element without having to call zget_space and
+ * hang because zone_map is already locked by vm_map_delete */
+
+ extern zone_t vm_map_kentry_zone; /* zone for kernel entry structures */
+ vm_offset_t entry1 = zalloc(vm_map_kentry_zone),
+ entry2 = zalloc(vm_map_kentry_zone);
+ zfree(vm_map_kentry_zone, entry1);
+ zfree(vm_map_kentry_zone, entry2);
+
kmem_free(zone_map, free_addr, PAGE_SIZE);
}
}
diff --git a/vm/vm_map.c b/vm/vm_map.c
index 260eb5a..2c8ad60 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -798,7 +798,7 @@ kern_return_t vm_map_enter(
*/
if (((start + mask) & ~mask) < start)
- return(KERN_NO_SPACE);
+ RETURN(KERN_NO_SPACE);
start = ((start + mask) & ~mask);
end = start + size;
@@ -2146,8 +2146,10 @@ start_pass_1:
* the copy cannot be interrupted.
*/
- if (interruptible && contains_permanent_objects)
+ if (interruptible && contains_permanent_objects) {
+ vm_map_unlock(dst_map);
return(KERN_FAILURE); /* XXX */
+ }
/*
* XXXO If there are no permanent objects in the destination,