diff options
| -rw-r--r-- | debian/changelog | 4 | ||||
| -rw-r--r-- | debian/patches/50_mem_limit.patch | 56 |
2 files changed, 60 insertions, 0 deletions
diff --git a/debian/changelog b/debian/changelog index 13c30af..ebf4d70 100644 --- a/debian/changelog +++ b/debian/changelog @@ -15,6 +15,10 @@ gnumach (1:20050801-3) UNRELEASED; urgency=low - debian/patches/45_io_per_task.patch: Likewise. - debian/patches/46_io_device.patch: Likewise. Thanks to Samuel Thibault <samuel.thibault@ens-lyon.org>. + * Fix memory limit, that was causing panics when having roughly more than + 768 MiB of physical memory. (Closes: #226609) + - debian/patches/50_mem_limit.patch: New file. + Thanks to Samuel Thibault <samuel.thibault@ens-lyon.org>. -- Guillem Jover <guillem@debian.org> Sat, 7 Jan 2006 01:06:40 +0200 diff --git a/debian/patches/50_mem_limit.patch b/debian/patches/50_mem_limit.patch new file mode 100644 index 0000000..58dd088 --- /dev/null +++ b/debian/patches/50_mem_limit.patch @@ -0,0 +1,56 @@ +#DPATCHLEVEL=1 + +2006-01-09 Samuel Thibault <samuel.thibault@ens-lyon.org> + + * i386/i386at/model_dep.c (mem_size_init): Limit memory to what + gnumach is able to use (minus a little extra for virtual mappings). + + * i386/intel/pmap.c (pmap_bootstrap): Extend the virtual mapping area + according to memory size for at least being able to manage it. But + look out for wrap and limit it to kernel adresses. Remove duplicate + computing. + + +diff -upr gnumach-20050801/i386/i386at/model_dep.c gnumach-mine/i386/i386at/model_dep.c +--- gnumach-20050801/i386/i386at/model_dep.c 2004-11-29 06:34:51.000000000 +0100 ++++ gnumach-mine/i386/i386at/model_dep.c 2006-01-09 00:07:46.000000000 +0100 +@@ -226,6 +226,11 @@ mem_size_init() + /* This is actually enforced below, in init_alloc_aligned. */ + } + ++ /* Reserve 1/16 of memory address space for virtual mappings. ++ * Yes, this looses memory. Blame i386. */ ++ if (phys_last_addr > (VM_MAX_KERNEL_ADDRESS / 16) * 15) ++ phys_last_addr = (VM_MAX_KERNEL_ADDRESS / 16) * 15; ++ + phys_first_addr = round_page(phys_first_addr); + phys_last_addr = trunc_page(phys_last_addr); + } +diff -upr gnumach-20050801/i386/intel/pmap.c gnumach-mine/i386/intel/pmap.c +--- gnumach-20050801/i386/intel/pmap.c 2001-04-05 08:39:21.000000000 +0200 ++++ gnumach-mine/i386/intel/pmap.c 2006-01-09 00:17:26.000000000 +0100 +@@ -627,7 +627,12 @@ void pmap_bootstrap() + * and extends to a stupid arbitrary limit beyond that. + */ + kernel_virtual_start = phys_last_addr; +- kernel_virtual_end = phys_last_addr + morevm; ++ kernel_virtual_end = phys_last_addr + morevm ++ + (phys_last_addr - phys_first_addr) / 15; ++ ++ if (kernel_virtual_end < phys_last_addr ++ || kernel_virtual_end > VM_MAX_KERNEL_ADDRESS) ++ kernel_virtual_end = VM_MAX_KERNEL_ADDRESS; + + /* + * Allocate and clear a kernel page directory. +@@ -656,7 +661,7 @@ void pmap_bootstrap() + * to allocate new kernel page tables later. + * XX fix this + */ +- for (va = phys_first_addr; va < phys_last_addr + morevm; ) ++ for (va = phys_first_addr; va < kernel_virtual_end; ) + { + pt_entry_t *pde = kernel_page_dir + lin2pdenum(kvtolin(va)); + pt_entry_t *ptable = (pt_entry_t*)pmap_grab_page(); + + |
