dumbvm.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404
  1. /*
  2. * Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009
  3. * The President and Fellows of Harvard College.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions
  7. * are met:
  8. * 1. Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * 2. Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in the
  12. * documentation and/or other materials provided with the distribution.
  13. * 3. Neither the name of the University nor the names of its contributors
  14. * may be used to endorse or promote products derived from this software
  15. * without specific prior written permission.
  16. *
  17. * THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
  18. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  19. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  20. * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
  21. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  22. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  23. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  24. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  25. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  26. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  27. * SUCH DAMAGE.
  28. */
  29. #include <types.h>
  30. #include <kern/errno.h>
  31. #include <lib.h>
  32. #include <spl.h>
  33. #include <spinlock.h>
  34. #include <proc.h>
  35. #include <current.h>
  36. #include <mips/tlb.h>
  37. #include <addrspace.h>
  38. #include <vm.h>
  39. /*
  40. * Dumb MIPS-only "VM system" that is intended to only be just barely
  41. * enough to struggle off the ground.
  42. */
  43. /* under dumbvm, always have 48k of user stack */
  44. #define DUMBVM_STACKPAGES 12
  45. /*
  46. * Wrap rma_stealmem in a spinlock.
  47. */
  48. static struct spinlock stealmem_lock = SPINLOCK_INITIALIZER;
  49. void
  50. vm_bootstrap(void)
  51. {
  52. /* Do nothing. */
  53. }
  54. static
  55. paddr_t
  56. getppages(unsigned long npages)
  57. {
  58. paddr_t addr;
  59. spinlock_acquire(&stealmem_lock);
  60. addr = ram_stealmem(npages);
  61. spinlock_release(&stealmem_lock);
  62. return addr;
  63. }
  64. /* Allocate/free some kernel-space virtual pages */
  65. vaddr_t
  66. alloc_kpages(int npages)
  67. {
  68. paddr_t pa;
  69. pa = getppages(npages);
  70. if (pa==0) {
  71. return 0;
  72. }
  73. return PADDR_TO_KVADDR(pa);
  74. }
  75. void
  76. free_kpages(vaddr_t addr)
  77. {
  78. /* nothing - leak the memory. */
  79. (void)addr;
  80. }
  81. void
  82. vm_tlbshootdown_all(void)
  83. {
  84. panic("dumbvm tried to do tlb shootdown?!\n");
  85. }
  86. void
  87. vm_tlbshootdown(const struct tlbshootdown *ts)
  88. {
  89. (void)ts;
  90. panic("dumbvm tried to do tlb shootdown?!\n");
  91. }
  92. int vm_fault(int faulttype, vaddr_t faultaddress)
  93. {
  94. vaddr_t vbase1, vtop1, vbase2, vtop2, stackbase, stacktop;
  95. paddr_t paddr;
  96. int i;
  97. uint32_t ehi, elo;
  98. struct addrspace *as;
  99. int spl;
  100. bool readonly = false;
  101. faultaddress &= PAGE_FRAME;
  102. DEBUG(DB_VM, "dumbvm: fault: 0x%x\n", faultaddress);
  103. switch (faulttype)
  104. {
  105. case VM_FAULT_READONLY:
  106. return EINVAL;
  107. case VM_FAULT_READ:
  108. case VM_FAULT_WRITE:
  109. break;
  110. default:
  111. return EINVAL;
  112. }
  113. if (curproc == NULL) {
  114. /*
  115. * No process. This is probably a kernel fault early
  116. * in boot. Return EFAULT so as to panic instead of
  117. * getting into an infinite faulting loop.
  118. */
  119. return EFAULT;
  120. }
  121. as = curproc_getas();
  122. if (as == NULL) {
  123. /*
  124. * No address space set up. This is probably also a
  125. * kernel fault early in boot.
  126. */
  127. return EFAULT;
  128. }
  129. /* Assert that the address space has been set up properly. */
  130. KASSERT(as->as_vbase1 != 0);
  131. KASSERT(as->as_pbase1 != 0);
  132. KASSERT(as->as_npages1 != 0);
  133. KASSERT(as->as_vbase2 != 0);
  134. KASSERT(as->as_pbase2 != 0);
  135. KASSERT(as->as_npages2 != 0);
  136. KASSERT(as->as_stackpbase != 0);
  137. KASSERT((as->as_vbase1 & PAGE_FRAME) == as->as_vbase1);
  138. KASSERT((as->as_pbase1 & PAGE_FRAME) == as->as_pbase1);
  139. KASSERT((as->as_vbase2 & PAGE_FRAME) == as->as_vbase2);
  140. KASSERT((as->as_pbase2 & PAGE_FRAME) == as->as_pbase2);
  141. KASSERT((as->as_stackpbase & PAGE_FRAME) == as->as_stackpbase);
  142. vbase1 = as->as_vbase1;
  143. vtop1 = vbase1 + as->as_npages1 * PAGE_SIZE;
  144. vbase2 = as->as_vbase2;
  145. vtop2 = vbase2 + as->as_npages2 * PAGE_SIZE;
  146. stackbase = USERSTACK - DUMBVM_STACKPAGES * PAGE_SIZE;
  147. stacktop = USERSTACK;
  148. // text
  149. if (faultaddress >= vbase1 && faultaddress < vtop1)
  150. {
  151. paddr = (faultaddress - vbase1) + as->as_pbase1;
  152. readonly = true;
  153. }
  154. // heap
  155. else if (faultaddress >= vbase2 && faultaddress < vtop2)
  156. {
  157. paddr = (faultaddress - vbase2) + as->as_pbase2;
  158. }
  159. // stack
  160. else if (faultaddress >= stackbase && faultaddress < stacktop)
  161. {
  162. paddr = (faultaddress - stackbase) + as->as_stackpbase;
  163. }
  164. else return EFAULT;
  165. /* make sure it's page-aligned */
  166. KASSERT((paddr & PAGE_FRAME) == paddr);
  167. /* Disable interrupts on this CPU while frobbing the TLB. */
  168. spl = splhigh();
  169. for (i = 0; i < NUM_TLB; ++i)
  170. {
  171. tlb_read(&ehi, &elo, i);
  172. if (elo & TLBLO_VALID)
  173. {
  174. continue;
  175. }
  176. ehi = faultaddress;
  177. elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
  178. if (readonly && !(as->loading)) elo &= ~TLBLO_DIRTY;
  179. DEBUG(DB_VM, "dumbvm: 0x%x -> 0x%x\n", faultaddress, paddr);
  180. tlb_write(ehi, elo, i);
  181. splx(spl);
  182. return 0;
  183. }
  184. ehi = faultaddress;
  185. elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
  186. if (readonly && !(as->loading)) elo &= ~TLBLO_DIRTY;
  187. tlb_random(ehi, elo);
  188. splx(spl);
  189. return 0;
  190. }
  191. struct addrspace * as_create(void)
  192. {
  193. struct addrspace *as = kmalloc(sizeof(struct addrspace));
  194. if (as==NULL) {
  195. return NULL;
  196. }
  197. as->as_vbase1 = 0;
  198. as->as_pbase1 = 0;
  199. as->as_npages1 = 0;
  200. as->as_vbase2 = 0;
  201. as->as_pbase2 = 0;
  202. as->as_npages2 = 0;
  203. as->as_stackpbase = 0;
  204. as->loading = true;
  205. return as;
  206. }
  207. void
  208. as_destroy(struct addrspace *as)
  209. {
  210. kfree(as);
  211. }
  212. void
  213. as_activate(void)
  214. {
  215. int i, spl;
  216. struct addrspace *as;
  217. as = curproc_getas();
  218. #ifdef UW
  219. /* Kernel threads don't have an address spaces to activate */
  220. #endif
  221. if (as == NULL) {
  222. return;
  223. }
  224. /* Disable interrupts on this CPU while frobbing the TLB. */
  225. spl = splhigh();
  226. for (i=0; i<NUM_TLB; i++) {
  227. tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
  228. }
  229. splx(spl);
  230. }
  231. void
  232. as_deactivate(void)
  233. {
  234. /* nothing */
  235. }
  236. int
  237. as_define_region(struct addrspace *as, vaddr_t vaddr, size_t sz,
  238. int readable, int writeable, int executable)
  239. {
  240. size_t npages;
  241. /* Align the region. First, the base... */
  242. sz += vaddr & ~(vaddr_t)PAGE_FRAME;
  243. vaddr &= PAGE_FRAME;
  244. /* ...and now the length. */
  245. sz = (sz + PAGE_SIZE - 1) & PAGE_FRAME;
  246. npages = sz / PAGE_SIZE;
  247. /* We don't use these - all pages are read-write */
  248. (void)readable;
  249. (void)writeable;
  250. (void)executable;
  251. if (as->as_vbase1 == 0) {
  252. as->as_vbase1 = vaddr;
  253. as->as_npages1 = npages;
  254. return 0;
  255. }
  256. if (as->as_vbase2 == 0) {
  257. as->as_vbase2 = vaddr;
  258. as->as_npages2 = npages;
  259. return 0;
  260. }
  261. /*
  262. * Support for more than two regions is not available.
  263. */
  264. kprintf("dumbvm: Warning: too many regions\n");
  265. return EUNIMP;
  266. }
  267. static
  268. void
  269. as_zero_region(paddr_t paddr, unsigned npages)
  270. {
  271. bzero((void *)PADDR_TO_KVADDR(paddr), npages * PAGE_SIZE);
  272. }
  273. int
  274. as_prepare_load(struct addrspace *as)
  275. {
  276. KASSERT(as->as_pbase1 == 0);
  277. KASSERT(as->as_pbase2 == 0);
  278. KASSERT(as->as_stackpbase == 0);
  279. as->as_pbase1 = getppages(as->as_npages1);
  280. if (as->as_pbase1 == 0) {
  281. return ENOMEM;
  282. }
  283. as->as_pbase2 = getppages(as->as_npages2);
  284. if (as->as_pbase2 == 0) {
  285. return ENOMEM;
  286. }
  287. as->as_stackpbase = getppages(DUMBVM_STACKPAGES);
  288. if (as->as_stackpbase == 0) {
  289. return ENOMEM;
  290. }
  291. as_zero_region(as->as_pbase1, as->as_npages1);
  292. as_zero_region(as->as_pbase2, as->as_npages2);
  293. as_zero_region(as->as_stackpbase, DUMBVM_STACKPAGES);
  294. return 0;
  295. }
  296. int
  297. as_complete_load(struct addrspace *as)
  298. {
  299. (void)as;
  300. return 0;
  301. }
  302. int
  303. as_define_stack(struct addrspace *as, vaddr_t *stackptr)
  304. {
  305. KASSERT(as->as_stackpbase != 0);
  306. *stackptr = USERSTACK;
  307. return 0;
  308. }
  309. int
  310. as_copy(struct addrspace *old, struct addrspace **ret)
  311. {
  312. struct addrspace *new;
  313. new = as_create();
  314. if (new==NULL) {
  315. return ENOMEM;
  316. }
  317. new->as_vbase1 = old->as_vbase1;
  318. new->as_npages1 = old->as_npages1;
  319. new->as_vbase2 = old->as_vbase2;
  320. new->as_npages2 = old->as_npages2;
  321. /* (Mis)use as_prepare_load to allocate some physical memory. */
  322. if (as_prepare_load(new)) {
  323. as_destroy(new);
  324. return ENOMEM;
  325. }
  326. KASSERT(new->as_pbase1 != 0);
  327. KASSERT(new->as_pbase2 != 0);
  328. KASSERT(new->as_stackpbase != 0);
  329. memmove((void *)PADDR_TO_KVADDR(new->as_pbase1),
  330. (const void *)PADDR_TO_KVADDR(old->as_pbase1),
  331. old->as_npages1*PAGE_SIZE);
  332. memmove((void *)PADDR_TO_KVADDR(new->as_pbase2),
  333. (const void *)PADDR_TO_KVADDR(old->as_pbase2),
  334. old->as_npages2*PAGE_SIZE);
  335. memmove((void *)PADDR_TO_KVADDR(new->as_stackpbase),
  336. (const void *)PADDR_TO_KVADDR(old->as_stackpbase),
  337. DUMBVM_STACKPAGES*PAGE_SIZE);
  338. *ret = new;
  339. return 0;
  340. }