/* * Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009 * The President and Fellows of Harvard College. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include /* * Dumb MIPS-only "VM system" that is intended to only be just barely * enough to struggle off the ground. */ /* under dumbvm, always have 48k of user stack */ #define DUMBVM_STACKPAGES 12 /* * Wrap rma_stealmem in a spinlock. */ static struct spinlock stealmem_lock = SPINLOCK_INITIALIZER; typedef struct coremap coremap; //static coremap map; //static bool init = false; /*void vm_bootstrap(void) { paddr_t min; paddr_t max; // size of ram available ram_getsize(&min, &max); // # of frames if we start here map.framecount = (max - min) / PAGE_SIZE; // start of the coremap array map.num = (int *) PADDR_TO_KVADDR(min); // min should start at a page start address (roundup), and be after the space for the array min += ROUNDUP(map.framecount * sizeof(int), PAGE_SIZE); // framecount needs to reflect the frames we took for the array of frames map.framecount = (max - min) / PAGE_SIZE; // set the start of actual virtual memory map.start = min; // set next frame to the start map.next = 0; // set all frames to empty for (int i = 0; i < map.framecount; ++i) { map.num[i] = -1; } init = true; }*/ // init in ram.c now void vm_bootstrap(void) { return; } static paddr_t getvpages(int n) { bool first = true; for (int i = map.next; i < map.framecount; ++i) { // may have a block starting here if ((map.num[i]) == -1) { int temp = n - 1; // see if we have a full block while (temp) { // frame is available if (map.num[i + temp] == -1) { --temp; continue; } // don't have enough contiguous space goto skip1; } kprintf("found space\n"); // we have enough frames for (int j = 0; j < n; ++j) { map.num[i + j] = j; } if ((map.next + n) < map.framecount) map.next += n; else map.next = 0; kprintf("alloced %d frames\n", n); return map.start + PAGE_SIZE * i; } skip1: // start searching at 0 if we are at max // block won't fit before end, and we didn't start between here and the end if (i + n >= map.framecount && map.next < i) { // reset i i = -1; first = false; continue; } // increment i as much as possible int temp = i; while (temp < map.framecount && map.num[temp] != -1) { // looping around if (temp == map.framecount - 1) { temp = 0; break; } // we came back to the start if (temp == map.next && !first) { return 0; } ++temp; } // if we came back to the start if ((i == map.next || temp == map.next) && !first) { return 0; } // increment i as needed from above if (i != temp) i = temp - 1; first = false; } return 0; } /* Allocate/free some kernel-space virtual pages */ vaddr_t alloc_kpages(int npages) { spinlock_acquire(&stealmem_lock); kprintf("call made\n"); paddr_t pa = getvpages(npages); spinlock_release(&stealmem_lock); if (!(pa)) return 0; return PADDR_TO_KVADDR(pa); } void free_kpages(vaddr_t addr) { spinlock_acquire(&stealmem_lock); paddr_t a = addr - MIPS_KSEG0; int start = (a - map.start) / PAGE_SIZE; KASSERT(map.num[start] == 0); int lockstep = 0; while (map.num[start] == lockstep) { if (start == map.framecount) break; map.num[start] = -1; ++start; ++lockstep; } kprintf("freed %d frames\n", lockstep); spinlock_release(&stealmem_lock); } void vm_tlbshootdown_all(void) { panic("dumbvm tried to do tlb shootdown?!\n"); } void vm_tlbshootdown(const struct tlbshootdown *ts) { (void)ts; panic("dumbvm tried to do tlb shootdown?!\n"); } int vm_fault(int faulttype, vaddr_t faultaddress) { vaddr_t vbase1, vtop1, vbase2, vtop2, stackbase, stacktop; paddr_t paddr; int i; uint32_t ehi, elo; struct addrspace *as; int spl; bool readonly = false; faultaddress &= PAGE_FRAME; DEBUG(DB_VM, "dumbvm: fault: 0x%x\n", faultaddress); switch (faulttype) { case VM_FAULT_READONLY: return EINVAL; case VM_FAULT_READ: case VM_FAULT_WRITE: break; default: return EINVAL; } if (curproc == NULL) { /* * No process. This is probably a kernel fault early * in boot. Return EFAULT so as to panic instead of * getting into an infinite faulting loop. */ return EFAULT; } as = curproc_getas(); if (as == NULL) { /* * No address space set up. This is probably also a * kernel fault early in boot. */ return EFAULT; } /* Assert that the address space has been set up properly. */ KASSERT(as->as_vbase1 != 0); KASSERT(as->as_pbase1 != 0); KASSERT(as->as_npages1 != 0); KASSERT(as->as_vbase2 != 0); KASSERT(as->as_pbase2 != 0); KASSERT(as->as_npages2 != 0); KASSERT(as->as_stackpbase != 0); KASSERT((as->as_vbase1 & PAGE_FRAME) == as->as_vbase1); KASSERT((as->as_pbase1 & PAGE_FRAME) == as->as_pbase1); KASSERT((as->as_vbase2 & PAGE_FRAME) == as->as_vbase2); KASSERT((as->as_pbase2 & PAGE_FRAME) == as->as_pbase2); KASSERT((as->as_stackpbase & PAGE_FRAME) == as->as_stackpbase); vbase1 = as->as_vbase1; vtop1 = vbase1 + as->as_npages1 * PAGE_SIZE; vbase2 = as->as_vbase2; vtop2 = vbase2 + as->as_npages2 * PAGE_SIZE; stackbase = USERSTACK - DUMBVM_STACKPAGES * PAGE_SIZE; stacktop = USERSTACK; // text if (faultaddress >= vbase1 && faultaddress < vtop1) { paddr = (faultaddress - vbase1) + as->as_pbase1; readonly = true; } // heap else if (faultaddress >= vbase2 && faultaddress < vtop2) { paddr = (faultaddress - vbase2) + as->as_pbase2; } // stack else if (faultaddress >= stackbase && faultaddress < stacktop) { paddr = (faultaddress - stackbase) + as->as_stackpbase; } else return EFAULT; /* make sure it's page-aligned */ KASSERT((paddr & PAGE_FRAME) == paddr); /* Disable interrupts on this CPU while frobbing the TLB. */ spl = splhigh(); for (i = 0; i < NUM_TLB; ++i) { tlb_read(&ehi, &elo, i); if (elo & TLBLO_VALID) { continue; } ehi = faultaddress; elo = paddr | TLBLO_DIRTY | TLBLO_VALID; if (readonly && !(as->loading)) elo &= ~TLBLO_DIRTY; DEBUG(DB_VM, "dumbvm: 0x%x -> 0x%x\n", faultaddress, paddr); tlb_write(ehi, elo, i); splx(spl); return 0; } ehi = faultaddress; elo = paddr | TLBLO_DIRTY | TLBLO_VALID; if (readonly && !(as->loading)) elo &= ~TLBLO_DIRTY; tlb_random(ehi, elo); splx(spl); return 0; } struct addrspace * as_create(void) { struct addrspace *as = kmalloc(sizeof(struct addrspace)); if (as==NULL) { return NULL; } as->as_vbase1 = 0; as->as_pbase1 = 0; as->as_npages1 = 0; as->as_vbase2 = 0; as->as_pbase2 = 0; as->as_npages2 = 0; as->as_stackpbase = 0; as->loading = true; return as; } void as_destroy(struct addrspace *as) { free_kpages(as->as_pbase1 - MIPS_KSEG0); free_kpages(as->as_pbase2 - MIPS_KSEG0); free_kpages(as->as_stackpbase - MIPS_KSEG0); kfree(as); } void as_activate(void) { int i, spl; struct addrspace *as; as = curproc_getas(); #ifdef UW /* Kernel threads don't have an address spaces to activate */ #endif if (as == NULL) { return; } /* Disable interrupts on this CPU while frobbing the TLB. */ spl = splhigh(); for (i=0; ias_vbase1 == 0) { as->as_vbase1 = vaddr; as->as_npages1 = npages; return 0; } if (as->as_vbase2 == 0) { as->as_vbase2 = vaddr; as->as_npages2 = npages; return 0; } /* * Support for more than two regions is not available. */ kprintf("dumbvm: Warning: too many regions\n"); return EUNIMP; } static void as_zero_region(paddr_t paddr, unsigned npages) { bzero((void *)PADDR_TO_KVADDR(paddr), npages * PAGE_SIZE); } int as_prepare_load(struct addrspace *as) { KASSERT(as->as_pbase1 == 0); KASSERT(as->as_pbase2 == 0); KASSERT(as->as_stackpbase == 0); as->as_pbase1 = alloc_kpages(as->as_npages1) - MIPS_KSEG0; if (as->as_pbase1 == 0) { return ENOMEM; } as->as_pbase2 = alloc_kpages(as->as_npages2) - MIPS_KSEG0; if (as->as_pbase2 == 0) { return ENOMEM; } as->as_stackpbase = alloc_kpages(DUMBVM_STACKPAGES) - MIPS_KSEG0; if (as->as_stackpbase == 0) { return ENOMEM; } as_zero_region(as->as_pbase1, as->as_npages1); as_zero_region(as->as_pbase2, as->as_npages2); as_zero_region(as->as_stackpbase, DUMBVM_STACKPAGES); return 0; } int as_complete_load(struct addrspace *as) { (void)as; return 0; } int as_define_stack(struct addrspace *as, vaddr_t *stackptr) { KASSERT(as->as_stackpbase != 0); *stackptr = USERSTACK; return 0; } int as_copy(struct addrspace *old, struct addrspace **ret) { struct addrspace *new; new = as_create(); if (new==NULL) { return ENOMEM; } new->as_vbase1 = old->as_vbase1; new->as_npages1 = old->as_npages1; new->as_vbase2 = old->as_vbase2; new->as_npages2 = old->as_npages2; /* (Mis)use as_prepare_load to allocate some physical memory. */ if (as_prepare_load(new)) { as_destroy(new); return ENOMEM; } KASSERT(new->as_pbase1 != 0); KASSERT(new->as_pbase2 != 0); KASSERT(new->as_stackpbase != 0); memmove((void *)PADDR_TO_KVADDR(new->as_pbase1), (const void *)PADDR_TO_KVADDR(old->as_pbase1), old->as_npages1*PAGE_SIZE); memmove((void *)PADDR_TO_KVADDR(new->as_pbase2), (const void *)PADDR_TO_KVADDR(old->as_pbase2), old->as_npages2*PAGE_SIZE); memmove((void *)PADDR_TO_KVADDR(new->as_stackpbase), (const void *)PADDR_TO_KVADDR(old->as_stackpbase), DUMBVM_STACKPAGES*PAGE_SIZE); *ret = new; return 0; }