dumbvm.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393
  1. /*
  2. * Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009
  3. * The President and Fellows of Harvard College.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions
  7. * are met:
  8. * 1. Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * 2. Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in the
  12. * documentation and/or other materials provided with the distribution.
  13. * 3. Neither the name of the University nor the names of its contributors
  14. * may be used to endorse or promote products derived from this software
  15. * without specific prior written permission.
  16. *
  17. * THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
  18. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  19. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  20. * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
  21. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  22. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  23. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  24. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  25. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  26. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  27. * SUCH DAMAGE.
  28. */
  29. #include <types.h>
  30. #include <kern/errno.h>
  31. #include <lib.h>
  32. #include <spl.h>
  33. #include <spinlock.h>
  34. #include <proc.h>
  35. #include <current.h>
  36. #include <mips/tlb.h>
  37. #include <addrspace.h>
  38. #include <vm.h>
  39. /*
  40. * Dumb MIPS-only "VM system" that is intended to only be just barely
  41. * enough to struggle off the ground.
  42. */
  43. /* under dumbvm, always have 48k of user stack */
  44. #define DUMBVM_STACKPAGES 12
  45. /*
  46. * Wrap rma_stealmem in a spinlock.
  47. */
  48. static struct spinlock stealmem_lock = SPINLOCK_INITIALIZER;
  49. void
  50. vm_bootstrap(void)
  51. {
  52. /* Do nothing. */
  53. }
  54. static
  55. paddr_t
  56. getppages(unsigned long npages)
  57. {
  58. paddr_t addr;
  59. spinlock_acquire(&stealmem_lock);
  60. addr = ram_stealmem(npages);
  61. spinlock_release(&stealmem_lock);
  62. return addr;
  63. }
  64. /* Allocate/free some kernel-space virtual pages */
  65. vaddr_t
  66. alloc_kpages(int npages)
  67. {
  68. paddr_t pa;
  69. pa = getppages(npages);
  70. if (pa==0) {
  71. return 0;
  72. }
  73. return PADDR_TO_KVADDR(pa);
  74. }
  75. void
  76. free_kpages(vaddr_t addr)
  77. {
  78. /* nothing - leak the memory. */
  79. (void)addr;
  80. }
  81. void
  82. vm_tlbshootdown_all(void)
  83. {
  84. panic("dumbvm tried to do tlb shootdown?!\n");
  85. }
  86. void
  87. vm_tlbshootdown(const struct tlbshootdown *ts)
  88. {
  89. (void)ts;
  90. panic("dumbvm tried to do tlb shootdown?!\n");
  91. }
  92. int
  93. vm_fault(int faulttype, vaddr_t faultaddress)
  94. {
  95. vaddr_t vbase1, vtop1, vbase2, vtop2, stackbase, stacktop;
  96. paddr_t paddr;
  97. int i;
  98. uint32_t ehi, elo;
  99. struct addrspace *as;
  100. int spl;
  101. faultaddress &= PAGE_FRAME;
  102. DEBUG(DB_VM, "dumbvm: fault: 0x%x\n", faultaddress);
  103. switch (faulttype) {
  104. case VM_FAULT_READONLY:
  105. /* We always create pages read-write, so we can't get this */
  106. panic("dumbvm: got VM_FAULT_READONLY\n");
  107. case VM_FAULT_READ:
  108. case VM_FAULT_WRITE:
  109. break;
  110. default:
  111. return EINVAL;
  112. }
  113. if (curproc == NULL) {
  114. /*
  115. * No process. This is probably a kernel fault early
  116. * in boot. Return EFAULT so as to panic instead of
  117. * getting into an infinite faulting loop.
  118. */
  119. return EFAULT;
  120. }
  121. as = curproc_getas();
  122. if (as == NULL) {
  123. /*
  124. * No address space set up. This is probably also a
  125. * kernel fault early in boot.
  126. */
  127. return EFAULT;
  128. }
  129. /* Assert that the address space has been set up properly. */
  130. KASSERT(as->as_vbase1 != 0);
  131. KASSERT(as->as_pbase1 != 0);
  132. KASSERT(as->as_npages1 != 0);
  133. KASSERT(as->as_vbase2 != 0);
  134. KASSERT(as->as_pbase2 != 0);
  135. KASSERT(as->as_npages2 != 0);
  136. KASSERT(as->as_stackpbase != 0);
  137. KASSERT((as->as_vbase1 & PAGE_FRAME) == as->as_vbase1);
  138. KASSERT((as->as_pbase1 & PAGE_FRAME) == as->as_pbase1);
  139. KASSERT((as->as_vbase2 & PAGE_FRAME) == as->as_vbase2);
  140. KASSERT((as->as_pbase2 & PAGE_FRAME) == as->as_pbase2);
  141. KASSERT((as->as_stackpbase & PAGE_FRAME) == as->as_stackpbase);
  142. vbase1 = as->as_vbase1;
  143. vtop1 = vbase1 + as->as_npages1 * PAGE_SIZE;
  144. vbase2 = as->as_vbase2;
  145. vtop2 = vbase2 + as->as_npages2 * PAGE_SIZE;
  146. stackbase = USERSTACK - DUMBVM_STACKPAGES * PAGE_SIZE;
  147. stacktop = USERSTACK;
  148. if (faultaddress >= vbase1 && faultaddress < vtop1) {
  149. paddr = (faultaddress - vbase1) + as->as_pbase1;
  150. }
  151. else if (faultaddress >= vbase2 && faultaddress < vtop2) {
  152. paddr = (faultaddress - vbase2) + as->as_pbase2;
  153. }
  154. else if (faultaddress >= stackbase && faultaddress < stacktop) {
  155. paddr = (faultaddress - stackbase) + as->as_stackpbase;
  156. }
  157. else {
  158. return EFAULT;
  159. }
  160. /* make sure it's page-aligned */
  161. KASSERT((paddr & PAGE_FRAME) == paddr);
  162. /* Disable interrupts on this CPU while frobbing the TLB. */
  163. spl = splhigh();
  164. for (i=0; i<NUM_TLB; i++) {
  165. tlb_read(&ehi, &elo, i);
  166. if (elo & TLBLO_VALID) {
  167. continue;
  168. }
  169. ehi = faultaddress;
  170. elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
  171. DEBUG(DB_VM, "dumbvm: 0x%x -> 0x%x\n", faultaddress, paddr);
  172. tlb_write(ehi, elo, i);
  173. splx(spl);
  174. return 0;
  175. }
  176. kprintf("dumbvm: Ran out of TLB entries - cannot handle page fault\n");
  177. splx(spl);
  178. return EFAULT;
  179. }
  180. struct addrspace *
  181. as_create(void)
  182. {
  183. struct addrspace *as = kmalloc(sizeof(struct addrspace));
  184. if (as==NULL) {
  185. return NULL;
  186. }
  187. as->as_vbase1 = 0;
  188. as->as_pbase1 = 0;
  189. as->as_npages1 = 0;
  190. as->as_vbase2 = 0;
  191. as->as_pbase2 = 0;
  192. as->as_npages2 = 0;
  193. as->as_stackpbase = 0;
  194. return as;
  195. }
  196. void
  197. as_destroy(struct addrspace *as)
  198. {
  199. kfree(as);
  200. }
  201. void
  202. as_activate(void)
  203. {
  204. int i, spl;
  205. struct addrspace *as;
  206. as = curproc_getas();
  207. #ifdef UW
  208. /* Kernel threads don't have an address spaces to activate */
  209. #endif
  210. if (as == NULL) {
  211. return;
  212. }
  213. /* Disable interrupts on this CPU while frobbing the TLB. */
  214. spl = splhigh();
  215. for (i=0; i<NUM_TLB; i++) {
  216. tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
  217. }
  218. splx(spl);
  219. }
  220. void
  221. as_deactivate(void)
  222. {
  223. /* nothing */
  224. }
  225. int
  226. as_define_region(struct addrspace *as, vaddr_t vaddr, size_t sz,
  227. int readable, int writeable, int executable)
  228. {
  229. size_t npages;
  230. /* Align the region. First, the base... */
  231. sz += vaddr & ~(vaddr_t)PAGE_FRAME;
  232. vaddr &= PAGE_FRAME;
  233. /* ...and now the length. */
  234. sz = (sz + PAGE_SIZE - 1) & PAGE_FRAME;
  235. npages = sz / PAGE_SIZE;
  236. /* We don't use these - all pages are read-write */
  237. (void)readable;
  238. (void)writeable;
  239. (void)executable;
  240. if (as->as_vbase1 == 0) {
  241. as->as_vbase1 = vaddr;
  242. as->as_npages1 = npages;
  243. return 0;
  244. }
  245. if (as->as_vbase2 == 0) {
  246. as->as_vbase2 = vaddr;
  247. as->as_npages2 = npages;
  248. return 0;
  249. }
  250. /*
  251. * Support for more than two regions is not available.
  252. */
  253. kprintf("dumbvm: Warning: too many regions\n");
  254. return EUNIMP;
  255. }
  256. static
  257. void
  258. as_zero_region(paddr_t paddr, unsigned npages)
  259. {
  260. bzero((void *)PADDR_TO_KVADDR(paddr), npages * PAGE_SIZE);
  261. }
  262. int
  263. as_prepare_load(struct addrspace *as)
  264. {
  265. KASSERT(as->as_pbase1 == 0);
  266. KASSERT(as->as_pbase2 == 0);
  267. KASSERT(as->as_stackpbase == 0);
  268. as->as_pbase1 = getppages(as->as_npages1);
  269. if (as->as_pbase1 == 0) {
  270. return ENOMEM;
  271. }
  272. as->as_pbase2 = getppages(as->as_npages2);
  273. if (as->as_pbase2 == 0) {
  274. return ENOMEM;
  275. }
  276. as->as_stackpbase = getppages(DUMBVM_STACKPAGES);
  277. if (as->as_stackpbase == 0) {
  278. return ENOMEM;
  279. }
  280. as_zero_region(as->as_pbase1, as->as_npages1);
  281. as_zero_region(as->as_pbase2, as->as_npages2);
  282. as_zero_region(as->as_stackpbase, DUMBVM_STACKPAGES);
  283. return 0;
  284. }
  285. int
  286. as_complete_load(struct addrspace *as)
  287. {
  288. (void)as;
  289. return 0;
  290. }
  291. int
  292. as_define_stack(struct addrspace *as, vaddr_t *stackptr)
  293. {
  294. KASSERT(as->as_stackpbase != 0);
  295. *stackptr = USERSTACK;
  296. return 0;
  297. }
  298. int
  299. as_copy(struct addrspace *old, struct addrspace **ret)
  300. {
  301. struct addrspace *new;
  302. new = as_create();
  303. if (new==NULL) {
  304. return ENOMEM;
  305. }
  306. new->as_vbase1 = old->as_vbase1;
  307. new->as_npages1 = old->as_npages1;
  308. new->as_vbase2 = old->as_vbase2;
  309. new->as_npages2 = old->as_npages2;
  310. /* (Mis)use as_prepare_load to allocate some physical memory. */
  311. if (as_prepare_load(new)) {
  312. as_destroy(new);
  313. return ENOMEM;
  314. }
  315. KASSERT(new->as_pbase1 != 0);
  316. KASSERT(new->as_pbase2 != 0);
  317. KASSERT(new->as_stackpbase != 0);
  318. memmove((void *)PADDR_TO_KVADDR(new->as_pbase1),
  319. (const void *)PADDR_TO_KVADDR(old->as_pbase1),
  320. old->as_npages1*PAGE_SIZE);
  321. memmove((void *)PADDR_TO_KVADDR(new->as_pbase2),
  322. (const void *)PADDR_TO_KVADDR(old->as_pbase2),
  323. old->as_npages2*PAGE_SIZE);
  324. memmove((void *)PADDR_TO_KVADDR(new->as_stackpbase),
  325. (const void *)PADDR_TO_KVADDR(old->as_stackpbase),
  326. DUMBVM_STACKPAGES*PAGE_SIZE);
  327. *ret = new;
  328. return 0;
  329. }