123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534 |
- /*
- * Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009
- * The President and Fellows of Harvard College.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of the University nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
- #include <types.h>
- #include <kern/errno.h>
- #include <lib.h>
- #include <spl.h>
- #include <spinlock.h>
- #include <proc.h>
- #include <current.h>
- #include <mips/tlb.h>
- #include <addrspace.h>
- #include <vm.h>
- /*
- * Dumb MIPS-only "VM system" that is intended to only be just barely
- * enough to struggle off the ground.
- */
- /* under dumbvm, always have 48k of user stack */
- #define DUMBVM_STACKPAGES 12
- /*
- * Wrap rma_stealmem in a spinlock.
- */
- static struct spinlock stealmem_lock = SPINLOCK_INITIALIZER;
- typedef struct coremap
- {
- paddr_t start;
- int framecount;
- int next;
- int * num;
- } coremap;
- static coremap map;
- static bool init = false;
- void vm_bootstrap(void)
- {
- paddr_t min;
- paddr_t max;
- // size of ram available
- ram_getsize(&min, &max);
- // # of frames if we start here
- map.framecount = (max - min) / PAGE_SIZE;
- // start of the coremap array
- map.num = (int *) PADDR_TO_KVADDR(min);
- // min should start at a page start address (roundup), and be after the space for the array
- min += ROUNDUP(map.framecount * sizeof(int), PAGE_SIZE);
- // framecount needs to reflect the frames we took for the array of frames
- map.framecount = (max - min) / PAGE_SIZE;
- // set the start of actual virtual memory
- map.start = min;
- // set next frame to the start
- map.next = 0;
-
- // set all frames to empty
- for (int i = 0; i < map.framecount; ++i)
- {
- map.num[i] = -1;
- }
-
- init = true;
- }
- // get physical pages
- static paddr_t getppages(int n)
- {
- paddr_t addr;
- spinlock_acquire(&stealmem_lock);
- addr = ram_stealmem(n);
- spinlock_release(&stealmem_lock);
- return addr;
- }
- static paddr_t getvpages(int n)
- {
- bool first = true;
- spinlock_acquire(&stealmem_lock);
- for (int i = map.next; i < map.framecount; ++i)
- {
- // may have a block starting here
- if ((map.num[i]) == -1)
- {
- int temp = n - 1;
-
- // see if we have a full block
- while (temp)
- {
- // frame is available
- if (map.num[i + temp] == -1)
- {
- --temp;
- continue;
- }
- // don't have enough contiguous space
- goto skip1;
- }
-
- // we have enough frames
- for (int j = 0; j < n; ++j)
- {
- map.num[i + j] = j;
- }
-
- if ((map.next + n) < map.framecount) map.next += n;
- else map.next = 0;
- spinlock_release(&stealmem_lock);
- return map.start + PAGE_SIZE * i;
- }
-
- skip1:
- // start searching at 0 if we are at max
- // block won't fit before end, and we didn't start between here and the end
- if (i + n >= map.framecount && map.next < i)
- {
- // reset i
- i = -1;
- first = false;
- continue;
- }
-
- // increment i as much as possible
- int temp = i;
- while (temp < map.framecount && map.num[temp] != -1)
- {
- // looping around
- if (temp == map.framecount - 1)
- {
- temp = 0;
- break;
- }
-
- // we came back to the start
- if (temp == map.next && !first)
- {
- spinlock_release(&stealmem_lock);
- return 0;
- }
- ++temp;
- }
-
- // if we came back to the start
- if ((i == map.next || temp == map.next) && !first)
- {
- spinlock_release(&stealmem_lock);
- return 0;
- }
-
- // increment i as needed from above
- if (i != temp) i = temp - 1;
- first = false;
- }
-
- spinlock_release(&stealmem_lock);
- return 0;
- }
- /* Allocate/free some kernel-space virtual pages */
- vaddr_t alloc_kpages(int npages)
- {
- paddr_t pa;
- if (init) pa = getvpages(npages);
- else pa = getppages(npages);
- if (!(pa)) return 0;
- return PADDR_TO_KVADDR(pa);
- }
- void free_kpages(vaddr_t addr)
- {
- if (!(init))
- {
- (void)addr;
- return;
- }
- spinlock_acquire(&stealmem_lock);
- paddr_t a = addr - MIPS_KSEG0;
- int start = (a - map.start) / PAGE_SIZE;
- KASSERT(map.num[start] == 0);
- int lockstep = 0;
-
- while (map.num[start] == lockstep)
- {
- if (start == map.framecount) break;
- map.num[start] = -1;
- ++start;
- ++lockstep;
- }
- spinlock_release(&stealmem_lock);
- }
- void
- vm_tlbshootdown_all(void)
- {
- panic("dumbvm tried to do tlb shootdown?!\n");
- }
- void
- vm_tlbshootdown(const struct tlbshootdown *ts)
- {
- (void)ts;
- panic("dumbvm tried to do tlb shootdown?!\n");
- }
- int vm_fault(int faulttype, vaddr_t faultaddress)
- {
- vaddr_t vbase1, vtop1, vbase2, vtop2, stackbase, stacktop;
- paddr_t paddr;
- int i;
- uint32_t ehi, elo;
- struct addrspace *as;
- int spl;
- bool readonly = false;
- faultaddress &= PAGE_FRAME;
- DEBUG(DB_VM, "dumbvm: fault: 0x%x\n", faultaddress);
- switch (faulttype)
- {
- case VM_FAULT_READONLY:
- return EINVAL;
- case VM_FAULT_READ:
- case VM_FAULT_WRITE:
- break;
- default:
- return EINVAL;
- }
- if (curproc == NULL) {
- /*
- * No process. This is probably a kernel fault early
- * in boot. Return EFAULT so as to panic instead of
- * getting into an infinite faulting loop.
- */
- return EFAULT;
- }
- as = curproc_getas();
- if (as == NULL) {
- /*
- * No address space set up. This is probably also a
- * kernel fault early in boot.
- */
- return EFAULT;
- }
- /* Assert that the address space has been set up properly. */
- KASSERT(as->as_vbase1 != 0);
- KASSERT(as->as_pbase1 != 0);
- KASSERT(as->as_npages1 != 0);
- KASSERT(as->as_vbase2 != 0);
- KASSERT(as->as_pbase2 != 0);
- KASSERT(as->as_npages2 != 0);
- KASSERT(as->as_stackpbase != 0);
- KASSERT((as->as_vbase1 & PAGE_FRAME) == as->as_vbase1);
- KASSERT((as->as_pbase1 & PAGE_FRAME) == as->as_pbase1);
- KASSERT((as->as_vbase2 & PAGE_FRAME) == as->as_vbase2);
- KASSERT((as->as_pbase2 & PAGE_FRAME) == as->as_pbase2);
- KASSERT((as->as_stackpbase & PAGE_FRAME) == as->as_stackpbase);
- vbase1 = as->as_vbase1;
- vtop1 = vbase1 + as->as_npages1 * PAGE_SIZE;
- vbase2 = as->as_vbase2;
- vtop2 = vbase2 + as->as_npages2 * PAGE_SIZE;
- stackbase = USERSTACK - DUMBVM_STACKPAGES * PAGE_SIZE;
- stacktop = USERSTACK;
- // text
- if (faultaddress >= vbase1 && faultaddress < vtop1)
- {
- paddr = (faultaddress - vbase1) + as->as_pbase1;
- readonly = true;
- }
- // heap
- else if (faultaddress >= vbase2 && faultaddress < vtop2)
- {
- paddr = (faultaddress - vbase2) + as->as_pbase2;
- }
- // stack
- else if (faultaddress >= stackbase && faultaddress < stacktop)
- {
- paddr = (faultaddress - stackbase) + as->as_stackpbase;
- }
- else return EFAULT;
- /* make sure it's page-aligned */
- KASSERT((paddr & PAGE_FRAME) == paddr);
- /* Disable interrupts on this CPU while frobbing the TLB. */
- spl = splhigh();
- for (i = 0; i < NUM_TLB; ++i)
- {
- tlb_read(&ehi, &elo, i);
- if (elo & TLBLO_VALID)
- {
- continue;
- }
- ehi = faultaddress;
- elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
- if (readonly && !(as->loading)) elo &= ~TLBLO_DIRTY;
- DEBUG(DB_VM, "dumbvm: 0x%x -> 0x%x\n", faultaddress, paddr);
- tlb_write(ehi, elo, i);
- splx(spl);
- return 0;
- }
- ehi = faultaddress;
- elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
- if (readonly && !(as->loading)) elo &= ~TLBLO_DIRTY;
- tlb_random(ehi, elo);
- splx(spl);
- return 0;
- }
- struct addrspace * as_create(void)
- {
- struct addrspace *as = kmalloc(sizeof(struct addrspace));
- if (as==NULL) {
- return NULL;
- }
- as->as_vbase1 = 0;
- as->as_pbase1 = 0;
- as->as_npages1 = 0;
- as->as_vbase2 = 0;
- as->as_pbase2 = 0;
- as->as_npages2 = 0;
- as->as_stackpbase = 0;
- as->loading = true;
- return as;
- }
- void
- as_destroy(struct addrspace *as)
- {
- free_kpages(as->as_pbase1 - MIPS_KSEG0);
- free_kpages(as->as_pbase2 - MIPS_KSEG0);
- free_kpages(as->as_stackpbase - MIPS_KSEG0);
- kfree(as);
- }
- void
- as_activate(void)
- {
- int i, spl;
- struct addrspace *as;
- as = curproc_getas();
- #ifdef UW
- /* Kernel threads don't have an address spaces to activate */
- #endif
- if (as == NULL) {
- return;
- }
- /* Disable interrupts on this CPU while frobbing the TLB. */
- spl = splhigh();
- for (i=0; i<NUM_TLB; i++) {
- tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
- }
- splx(spl);
- }
- void
- as_deactivate(void)
- {
- /* nothing */
- }
- int
- as_define_region(struct addrspace *as, vaddr_t vaddr, size_t sz,
- int readable, int writeable, int executable)
- {
- size_t npages;
- /* Align the region. First, the base... */
- sz += vaddr & ~(vaddr_t)PAGE_FRAME;
- vaddr &= PAGE_FRAME;
- /* ...and now the length. */
- sz = (sz + PAGE_SIZE - 1) & PAGE_FRAME;
- npages = sz / PAGE_SIZE;
- /* We don't use these - all pages are read-write */
- (void)readable;
- (void)writeable;
- (void)executable;
- if (as->as_vbase1 == 0) {
- as->as_vbase1 = vaddr;
- as->as_npages1 = npages;
- return 0;
- }
- if (as->as_vbase2 == 0) {
- as->as_vbase2 = vaddr;
- as->as_npages2 = npages;
- return 0;
- }
- /*
- * Support for more than two regions is not available.
- */
- kprintf("dumbvm: Warning: too many regions\n");
- return EUNIMP;
- }
- static
- void
- as_zero_region(paddr_t paddr, unsigned npages)
- {
- bzero((void *)PADDR_TO_KVADDR(paddr), npages * PAGE_SIZE);
- }
- int
- as_prepare_load(struct addrspace *as)
- {
- KASSERT(as->as_pbase1 == 0);
- KASSERT(as->as_pbase2 == 0);
- KASSERT(as->as_stackpbase == 0);
- as->as_pbase1 = alloc_kpages(as->as_npages1) - MIPS_KSEG0;
- if (as->as_pbase1 == 0) {
- return ENOMEM;
- }
- as->as_pbase2 = alloc_kpages(as->as_npages2) - MIPS_KSEG0;
- if (as->as_pbase2 == 0) {
- return ENOMEM;
- }
- as->as_stackpbase = alloc_kpages(DUMBVM_STACKPAGES) - MIPS_KSEG0;
- if (as->as_stackpbase == 0) {
- return ENOMEM;
- }
-
- as_zero_region(as->as_pbase1, as->as_npages1);
- as_zero_region(as->as_pbase2, as->as_npages2);
- as_zero_region(as->as_stackpbase, DUMBVM_STACKPAGES);
- return 0;
- }
- int
- as_complete_load(struct addrspace *as)
- {
- (void)as;
- return 0;
- }
- int
- as_define_stack(struct addrspace *as, vaddr_t *stackptr)
- {
- KASSERT(as->as_stackpbase != 0);
- *stackptr = USERSTACK;
- return 0;
- }
- int
- as_copy(struct addrspace *old, struct addrspace **ret)
- {
- struct addrspace *new;
- new = as_create();
- if (new==NULL) {
- return ENOMEM;
- }
- new->as_vbase1 = old->as_vbase1;
- new->as_npages1 = old->as_npages1;
- new->as_vbase2 = old->as_vbase2;
- new->as_npages2 = old->as_npages2;
- /* (Mis)use as_prepare_load to allocate some physical memory. */
- if (as_prepare_load(new)) {
- as_destroy(new);
- return ENOMEM;
- }
- KASSERT(new->as_pbase1 != 0);
- KASSERT(new->as_pbase2 != 0);
- KASSERT(new->as_stackpbase != 0);
- memmove((void *)PADDR_TO_KVADDR(new->as_pbase1),
- (const void *)PADDR_TO_KVADDR(old->as_pbase1),
- old->as_npages1*PAGE_SIZE);
- memmove((void *)PADDR_TO_KVADDR(new->as_pbase2),
- (const void *)PADDR_TO_KVADDR(old->as_pbase2),
- old->as_npages2*PAGE_SIZE);
- memmove((void *)PADDR_TO_KVADDR(new->as_stackpbase),
- (const void *)PADDR_TO_KVADDR(old->as_stackpbase),
- DUMBVM_STACKPAGES*PAGE_SIZE);
-
- *ret = new;
- return 0;
- }
|