summaryrefslogtreecommitdiff
path: root/i386
diff options
context:
space:
mode:
Diffstat (limited to 'i386')
-rw-r--r--i386/i386/db_interface.c2
-rw-r--r--i386/i386at/model_dep.c8
-rw-r--r--i386/intel/pmap.c4
-rw-r--r--i386/linux/dev/include/linux/autoconf.h4
4 files changed, 11 insertions, 7 deletions
diff --git a/i386/i386/db_interface.c b/i386/i386/db_interface.c
index bd43b1f..29f03c9 100644
--- a/i386/i386/db_interface.c
+++ b/i386/i386/db_interface.c
@@ -311,7 +311,7 @@ db_read_bytes(
unsigned kern_addr;
src = (char *)addr;
- if (addr >= VM_MIN_KERNEL_ADDRESS || task == TASK_NULL) {
+ if ((addr >= VM_MIN_KERNEL_ADDRESS && addr < VM_MAX_KERNEL_ADDRESS) || task == TASK_NULL) {
if (task == TASK_NULL)
task = db_current_task();
while (--size >= 0) {
diff --git a/i386/i386at/model_dep.c b/i386/i386at/model_dep.c
index 2426cde..b9fb7c0 100644
--- a/i386/i386at/model_dep.c
+++ b/i386/i386at/model_dep.c
@@ -220,8 +220,10 @@ mem_size_init(void)
phys_last_kb = 0x400 + boot_info.mem_upper;
/* Avoid 4GiB overflow. */
- if (phys_last_kb < 0x400 || phys_last_kb >= 0x400000)
+ if (phys_last_kb < 0x400 || phys_last_kb >= 0x400000) {
+ printf("Truncating memory size to 4GiB\n");
phys_last_kb = 0x400000 - 1;
+ }
phys_last_addr = phys_last_kb * 0x400;
avail_remaining
@@ -233,8 +235,10 @@ mem_size_init(void)
/* Reserve 1/16 of the memory address space for virtual mappings.
* Yes, this loses memory. Blame i386. */
- if (phys_last_addr > (VM_MAX_KERNEL_ADDRESS / 16) * 15)
+ if (phys_last_addr > (VM_MAX_KERNEL_ADDRESS / 16) * 15) {
phys_last_addr = (VM_MAX_KERNEL_ADDRESS / 16) * 15;
+ printf("Truncating memory size to %dMiB\n", (phys_last_addr - phys_first_addr) / (1024 * 1024));
+ }
phys_first_addr = round_page(phys_first_addr);
phys_last_addr = trunc_page(phys_last_addr);
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
index d57df92..f6cbead 100644
--- a/i386/intel/pmap.c
+++ b/i386/intel/pmap.c
@@ -335,8 +335,8 @@ lock_data_t pmap_system_lock;
/* It is hard to know when a TLB flush becomes less expensive than a bunch of
* invlpgs. But it surely is more expensive than just one invlpg. */
#define INVALIDATE_TLB(s, e) { \
- if (__builtin_constant_p((e) - (s))
- && (e) - (s) == PAGE_SIZE)
+ if (__builtin_constant_p((e) - (s)) \
+ && (e) - (s) == PAGE_SIZE) \
invlpg_linear(s); \
else \
flush_tlb(); \
diff --git a/i386/linux/dev/include/linux/autoconf.h b/i386/linux/dev/include/linux/autoconf.h
index 037829b..75ff2aa 100644
--- a/i386/linux/dev/include/linux/autoconf.h
+++ b/i386/linux/dev/include/linux/autoconf.h
@@ -55,8 +55,8 @@
#undef CONFIG_BLK_DEV_IDEFLOPPY
#undef CONFIG_BLK_DEV_IDESCSI
#undef CONFIG_BLK_DEV_IDE_PCMCIA
-#define CONFIG_BLK_DEV_CMD640 1
-#define CONFIG_BLK_DEV_CMD640_ENHANCED 1
+#undef CONFIG_BLK_DEV_CMD640
+#undef CONFIG_BLK_DEV_CMD640_ENHANCED
#define CONFIG_BLK_DEV_RZ1000 1
#define CONFIG_BLK_DEV_TRITON 1
#undef CONFIG_IDE_CHIPSETS