summaryrefslogtreecommitdiff
path: root/debian/patches/50_mem_limit.patch
blob: 08b71214a43a07f3e5bee05d55c368cf6cc74f1a (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
#DPATCHLEVEL=1

2006-01-09  Samuel Thibault  <samuel.thibault@ens-lyon.org>

	* i386/i386at/model_dep.c (mem_size_init): Limit memory to what
	gnumach is able to use (minus a little extra for virtual mappings).
	
	* i386/intel/pmap.c (pmap_bootstrap): Extend the virtual mapping area
	according to memory size for at least being able to manage it.  But
	look out for wrap and limit it to kernel adresses.  Remove duplicate
	computing.


diff -upr gnumach-20050801/i386/i386at/model_dep.c gnumach-mine/i386/i386at/model_dep.c
--- gnumach-20050801/i386/i386at/model_dep.c	2004-11-29 06:34:51.000000000 +0100
+++ gnumach-mine/i386/i386at/model_dep.c	2006-01-09 00:07:46.000000000 +0100
@@ -211,6 +211,11 @@ mem_size_init()
 	printf("AT386 boot: physical memory from 0x%x to 0x%x\n",
 	       phys_first_addr, phys_last_addr);
 
+	/* Reserve 1/16 of memory address space for virtual mappings.
+	 * Yes, this looses memory. Blame i386.  */
+	if (phys_last_addr > (VM_MAX_KERNEL_ADDRESS / 16) * 15)
+		phys_last_addr = (VM_MAX_KERNEL_ADDRESS / 16) * 15;
+
 	phys_first_addr = round_page(phys_first_addr);
 	phys_last_addr = trunc_page(phys_last_addr);
 }
diff -upr gnumach-20050801/i386/intel/pmap.c gnumach-mine/i386/intel/pmap.c
--- gnumach-20050801/i386/intel/pmap.c	2001-04-05 08:39:21.000000000 +0200
+++ gnumach-mine/i386/intel/pmap.c	2006-01-09 00:17:26.000000000 +0100
@@ -627,7 +627,12 @@ void pmap_bootstrap()
 	 * and extends to a stupid arbitrary limit beyond that.
 	 */
 	kernel_virtual_start = phys_last_addr;
-	kernel_virtual_end = phys_last_addr + morevm;
+	kernel_virtual_end = phys_last_addr + morevm
+		+ (phys_last_addr - phys_first_addr) / 15;
+
+	if (kernel_virtual_end < phys_last_addr
+			|| kernel_virtual_end > VM_MAX_KERNEL_ADDRESS)
+		kernel_virtual_end = VM_MAX_KERNEL_ADDRESS;
 
 	/*
 	 * Allocate and clear a kernel page directory.
@@ -656,7 +661,7 @@ void pmap_bootstrap()
 		 * to allocate new kernel page tables later.
 		 * XX fix this
 		 */
-		for (va = phys_first_addr; va < phys_last_addr + morevm; )
+		for (va = phys_first_addr; va < kernel_virtual_end; )
 		{
 			pt_entry_t *pde = kernel_page_dir + lin2pdenum(kvtolin(va));
 			pt_entry_t *ptable = (pt_entry_t*)pmap_grab_page();