|
@@ -51,44 +51,171 @@
|
|
|
*/
|
|
|
static struct spinlock stealmem_lock = SPINLOCK_INITIALIZER;
|
|
|
|
|
|
-void
|
|
|
-vm_bootstrap(void)
|
|
|
+typedef struct coremap
|
|
|
{
|
|
|
- /* Do nothing. */
|
|
|
-}
|
|
|
+ paddr_t start;
|
|
|
+ int framecount;
|
|
|
+ int next;
|
|
|
+ int * num;
|
|
|
+} coremap;
|
|
|
|
|
|
-static
|
|
|
-paddr_t
|
|
|
-getppages(unsigned long npages)
|
|
|
+static coremap map;
|
|
|
+static bool init = false;
|
|
|
+
|
|
|
+void vm_bootstrap(void)
|
|
|
{
|
|
|
- paddr_t addr;
|
|
|
+ paddr_t min;
|
|
|
+ paddr_t max;
|
|
|
+ // size of ram available
|
|
|
+ ram_getsize(&min, &max);
|
|
|
+ // # of frames if we start here
|
|
|
+ map.framecount = (max - min) / PAGE_SIZE;
|
|
|
+ // start of the coremap array
|
|
|
+ map.num = (int *) PADDR_TO_KVADDR(min);
|
|
|
+ // min should start at a page start address (roundup), and be after the space for the array
|
|
|
+ min += ROUNDUP(map.framecount * sizeof(int), PAGE_SIZE);
|
|
|
+ // framecount needs to reflect the frames we took for the array of frames
|
|
|
+ map.framecount = (max - min) / PAGE_SIZE;
|
|
|
+ // set the start of actual virtual memory
|
|
|
+ map.start = min;
|
|
|
+ // set next frame to the start
|
|
|
+ map.next = 0;
|
|
|
+
|
|
|
+ // set all frames to empty
|
|
|
+ for (int i = 0; i < map.framecount; ++i)
|
|
|
+ {
|
|
|
+ map.num[i] = -1;
|
|
|
+ }
|
|
|
+
|
|
|
+ init = true;
|
|
|
+}
|
|
|
|
|
|
+// get virtual or physical pages
|
|
|
+static paddr_t getpages(int n)
|
|
|
+{
|
|
|
+ if (!init) goto skip2;
|
|
|
+ bool first = true;
|
|
|
spinlock_acquire(&stealmem_lock);
|
|
|
|
|
|
- addr = ram_stealmem(npages);
|
|
|
+ for (int i = map.next; i < map.framecount; ++i)
|
|
|
+ {
|
|
|
+ // may have a block starting here
|
|
|
+ if ((map.num[i]) == -1)
|
|
|
+ {
|
|
|
+ int temp = n - 1;
|
|
|
+
|
|
|
+ // see if we have a full block
|
|
|
+ while (temp)
|
|
|
+ {
|
|
|
+ // frame is available
|
|
|
+ if (map.num[i + temp] == -1)
|
|
|
+ {
|
|
|
+ --temp;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ // don't have enough contiguous space
|
|
|
+ goto skip1;
|
|
|
+ }
|
|
|
+
|
|
|
+ // we have enough frames
|
|
|
+ for (int j = 0; j < n; ++j)
|
|
|
+ {
|
|
|
+ map.num[i + j] = j;
|
|
|
+ }
|
|
|
+
|
|
|
+ if ((map.next + n) < map.framecount) map.next += n;
|
|
|
+ else map.next = 0;
|
|
|
+ spinlock_release(&stealmem_lock);
|
|
|
+ return map.start + PAGE_SIZE * i;
|
|
|
+ }
|
|
|
+
|
|
|
+ skip1:
|
|
|
+ // start searching at 0 if we are at max
|
|
|
+ // block won't fit before end, and we didn't start between here and the end
|
|
|
+ if (i + n >= map.framecount && map.next < i)
|
|
|
+ {
|
|
|
+ // reset i
|
|
|
+ i = -1;
|
|
|
+ first = false;
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+
|
|
|
+ // increment i as much as possible
|
|
|
+ int temp = i;
|
|
|
+ while (temp < map.framecount && map.num[temp] != -1)
|
|
|
+ {
|
|
|
+ // looping around
|
|
|
+ if (temp == map.framecount - 1)
|
|
|
+ {
|
|
|
+ temp = 0;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+
|
|
|
+ // we came back to the start
|
|
|
+ if (temp == map.next && !first)
|
|
|
+ {
|
|
|
+ spinlock_release(&stealmem_lock);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ ++temp;
|
|
|
+ }
|
|
|
+
|
|
|
+ // if we came back to the start
|
|
|
+ if ((i == map.next || temp == map.next) && !first)
|
|
|
+ {
|
|
|
+ spinlock_release(&stealmem_lock);
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+
|
|
|
+ // increment i as needed from above
|
|
|
+ if (i != temp) i = temp - 1;
|
|
|
+ first = false;
|
|
|
+ }
|
|
|
|
|
|
+ spinlock_release(&stealmem_lock);
|
|
|
+ return 0;
|
|
|
+
|
|
|
+ skip2:
|
|
|
+ // why won't this work
|
|
|
+ if (false) panic("bullshit\n");
|
|
|
+ paddr_t addr;
|
|
|
+ spinlock_acquire(&stealmem_lock);
|
|
|
+ addr = ram_stealmem(n);
|
|
|
spinlock_release(&stealmem_lock);
|
|
|
return addr;
|
|
|
}
|
|
|
|
|
|
/* Allocate/free some kernel-space virtual pages */
|
|
|
-vaddr_t
|
|
|
-alloc_kpages(int npages)
|
|
|
+vaddr_t alloc_kpages(int npages)
|
|
|
{
|
|
|
paddr_t pa;
|
|
|
- pa = getppages(npages);
|
|
|
- if (pa==0) {
|
|
|
- return 0;
|
|
|
- }
|
|
|
+ pa = getpages(npages);
|
|
|
+ if (!(pa)) return 0;
|
|
|
return PADDR_TO_KVADDR(pa);
|
|
|
}
|
|
|
|
|
|
-void
|
|
|
-free_kpages(vaddr_t addr)
|
|
|
+void free_kpages(vaddr_t addr)
|
|
|
{
|
|
|
- /* nothing - leak the memory. */
|
|
|
-
|
|
|
- (void)addr;
|
|
|
+ if (!(init))
|
|
|
+ {
|
|
|
+ (void)addr;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ spinlock_acquire(&stealmem_lock);
|
|
|
+ paddr_t a = addr - MIPS_KSEG0;
|
|
|
+ int start = (a - map.start) / PAGE_SIZE;
|
|
|
+ KASSERT(map.num[start] == 0);
|
|
|
+ int lockstep = 0;
|
|
|
+
|
|
|
+ while (map.num[start] == lockstep)
|
|
|
+ {
|
|
|
+ if (start == map.framecount) break;
|
|
|
+ map.num[start] = -1;
|
|
|
+ ++start;
|
|
|
+ ++lockstep;
|
|
|
+ }
|
|
|
+ spinlock_release(&stealmem_lock);
|
|
|
}
|
|
|
|
|
|
void
|
|
@@ -238,6 +365,9 @@ struct addrspace * as_create(void)
|
|
|
void
|
|
|
as_destroy(struct addrspace *as)
|
|
|
{
|
|
|
+ free_kpages(as->as_pbase1 - MIPS_KSEG0);
|
|
|
+ free_kpages(as->as_pbase2 - MIPS_KSEG0);
|
|
|
+ free_kpages(as->as_stackpbase - MIPS_KSEG0);
|
|
|
kfree(as);
|
|
|
}
|
|
|
|
|
@@ -324,17 +454,17 @@ as_prepare_load(struct addrspace *as)
|
|
|
KASSERT(as->as_pbase2 == 0);
|
|
|
KASSERT(as->as_stackpbase == 0);
|
|
|
|
|
|
- as->as_pbase1 = getppages(as->as_npages1);
|
|
|
+ as->as_pbase1 = getpages(as->as_npages1);
|
|
|
if (as->as_pbase1 == 0) {
|
|
|
return ENOMEM;
|
|
|
}
|
|
|
|
|
|
- as->as_pbase2 = getppages(as->as_npages2);
|
|
|
+ as->as_pbase2 = getpages(as->as_npages2);
|
|
|
if (as->as_pbase2 == 0) {
|
|
|
return ENOMEM;
|
|
|
}
|
|
|
|
|
|
- as->as_stackpbase = getppages(DUMBVM_STACKPAGES);
|
|
|
+ as->as_stackpbase = getpages(DUMBVM_STACKPAGES);
|
|
|
if (as->as_stackpbase == 0) {
|
|
|
return ENOMEM;
|
|
|
}
|