dumbvm.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516
  1. /*
  2. * Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009
  3. * The President and Fellows of Harvard College.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions
  7. * are met:
  8. * 1. Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * 2. Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in the
  12. * documentation and/or other materials provided with the distribution.
  13. * 3. Neither the name of the University nor the names of its contributors
  14. * may be used to endorse or promote products derived from this software
  15. * without specific prior written permission.
  16. *
  17. * THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
  18. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  19. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  20. * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
  21. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  22. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  23. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  24. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  25. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  26. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  27. * SUCH DAMAGE.
  28. */
  29. #include <types.h>
  30. #include <kern/errno.h>
  31. #include <lib.h>
  32. #include <spl.h>
  33. #include <spinlock.h>
  34. #include <proc.h>
  35. #include <current.h>
  36. #include <mips/tlb.h>
  37. #include <addrspace.h>
  38. #include <vm.h>
  39. /*
  40. * Dumb MIPS-only "VM system" that is intended to only be just barely
  41. * enough to struggle off the ground.
  42. */
  43. /* under dumbvm, always have 48k of user stack */
  44. #define DUMBVM_STACKPAGES 12
  45. /*
  46. * Wrap rma_stealmem in a spinlock.
  47. */
  48. static struct spinlock stealmem_lock = SPINLOCK_INITIALIZER;
  49. typedef struct coremap coremap;
  50. //static coremap map;
  51. //static bool init = false;
  52. /*void vm_bootstrap(void)
  53. {
  54. paddr_t min;
  55. paddr_t max;
  56. // size of ram available
  57. ram_getsize(&min, &max);
  58. // # of frames if we start here
  59. map.framecount = (max - min) / PAGE_SIZE;
  60. // start of the coremap array
  61. map.num = (int *) PADDR_TO_KVADDR(min);
  62. // min should start at a page start address (roundup), and be after the space for the array
  63. min += ROUNDUP(map.framecount * sizeof(int), PAGE_SIZE);
  64. // framecount needs to reflect the frames we took for the array of frames
  65. map.framecount = (max - min) / PAGE_SIZE;
  66. // set the start of actual virtual memory
  67. map.start = min;
  68. // set next frame to the start
  69. map.next = 0;
  70. // set all frames to empty
  71. for (int i = 0; i < map.framecount; ++i)
  72. {
  73. map.num[i] = -1;
  74. }
  75. init = true;
  76. }*/
  77. // init in ram.c now
  78. void vm_bootstrap(void)
  79. {
  80. return;
  81. }
  82. static paddr_t getvpages(int n)
  83. {
  84. bool first = true;
  85. for (int i = map.next; i < map.framecount; ++i)
  86. {
  87. // may have a block starting here
  88. if ((map.num[i]) == -1)
  89. {
  90. int temp = n - 1;
  91. // see if we have a full block
  92. while (temp)
  93. {
  94. // frame is available
  95. if (map.num[i + temp] == -1)
  96. {
  97. --temp;
  98. continue;
  99. }
  100. // don't have enough contiguous space
  101. goto skip1;
  102. }
  103. kprintf("found space\n");
  104. // we have enough frames
  105. for (int j = 0; j < n; ++j)
  106. {
  107. map.num[i + j] = j;
  108. }
  109. if ((map.next + n) < map.framecount) map.next += n;
  110. else map.next = 0;
  111. kprintf("alloced %d frames\n", n);
  112. return map.start + PAGE_SIZE * i;
  113. }
  114. skip1:
  115. // start searching at 0 if we are at max
  116. // block won't fit before end, and we didn't start between here and the end
  117. if (i + n >= map.framecount && map.next < i)
  118. {
  119. // reset i
  120. i = -1;
  121. first = false;
  122. continue;
  123. }
  124. // increment i as much as possible
  125. int temp = i;
  126. while (temp < map.framecount && map.num[temp] != -1)
  127. {
  128. // looping around
  129. if (temp == map.framecount - 1)
  130. {
  131. temp = 0;
  132. break;
  133. }
  134. // we came back to the start
  135. if (temp == map.next && !first)
  136. {
  137. return 0;
  138. }
  139. ++temp;
  140. }
  141. // if we came back to the start
  142. if ((i == map.next || temp == map.next) && !first)
  143. {
  144. return 0;
  145. }
  146. // increment i as needed from above
  147. if (i != temp) i = temp - 1;
  148. first = false;
  149. }
  150. return 0;
  151. }
  152. /* Allocate/free some kernel-space virtual pages */
  153. vaddr_t alloc_kpages(int npages)
  154. {
  155. spinlock_acquire(&stealmem_lock);
  156. kprintf("call made\n");
  157. paddr_t pa = getvpages(npages);
  158. spinlock_release(&stealmem_lock);
  159. if (!(pa)) return 0;
  160. return PADDR_TO_KVADDR(pa);
  161. }
  162. void free_kpages(vaddr_t addr)
  163. {
  164. spinlock_acquire(&stealmem_lock);
  165. paddr_t a = addr - MIPS_KSEG0;
  166. int start = (a - map.start) / PAGE_SIZE;
  167. KASSERT(map.num[start] == 0);
  168. int lockstep = 0;
  169. while (map.num[start] == lockstep)
  170. {
  171. if (start == map.framecount) break;
  172. map.num[start] = -1;
  173. ++start;
  174. ++lockstep;
  175. }
  176. kprintf("freed %d frames\n", lockstep);
  177. spinlock_release(&stealmem_lock);
  178. }
  179. void
  180. vm_tlbshootdown_all(void)
  181. {
  182. panic("dumbvm tried to do tlb shootdown?!\n");
  183. }
  184. void
  185. vm_tlbshootdown(const struct tlbshootdown *ts)
  186. {
  187. (void)ts;
  188. panic("dumbvm tried to do tlb shootdown?!\n");
  189. }
  190. int vm_fault(int faulttype, vaddr_t faultaddress)
  191. {
  192. vaddr_t vbase1, vtop1, vbase2, vtop2, stackbase, stacktop;
  193. paddr_t paddr;
  194. int i;
  195. uint32_t ehi, elo;
  196. struct addrspace *as;
  197. int spl;
  198. bool readonly = false;
  199. faultaddress &= PAGE_FRAME;
  200. DEBUG(DB_VM, "dumbvm: fault: 0x%x\n", faultaddress);
  201. switch (faulttype)
  202. {
  203. case VM_FAULT_READONLY:
  204. return EINVAL;
  205. case VM_FAULT_READ:
  206. case VM_FAULT_WRITE:
  207. break;
  208. default:
  209. return EINVAL;
  210. }
  211. if (curproc == NULL) {
  212. /*
  213. * No process. This is probably a kernel fault early
  214. * in boot. Return EFAULT so as to panic instead of
  215. * getting into an infinite faulting loop.
  216. */
  217. return EFAULT;
  218. }
  219. as = curproc_getas();
  220. if (as == NULL) {
  221. /*
  222. * No address space set up. This is probably also a
  223. * kernel fault early in boot.
  224. */
  225. return EFAULT;
  226. }
  227. /* Assert that the address space has been set up properly. */
  228. KASSERT(as->as_vbase1 != 0);
  229. KASSERT(as->as_pbase1 != 0);
  230. KASSERT(as->as_npages1 != 0);
  231. KASSERT(as->as_vbase2 != 0);
  232. KASSERT(as->as_pbase2 != 0);
  233. KASSERT(as->as_npages2 != 0);
  234. KASSERT(as->as_stackpbase != 0);
  235. KASSERT((as->as_vbase1 & PAGE_FRAME) == as->as_vbase1);
  236. KASSERT((as->as_pbase1 & PAGE_FRAME) == as->as_pbase1);
  237. KASSERT((as->as_vbase2 & PAGE_FRAME) == as->as_vbase2);
  238. KASSERT((as->as_pbase2 & PAGE_FRAME) == as->as_pbase2);
  239. KASSERT((as->as_stackpbase & PAGE_FRAME) == as->as_stackpbase);
  240. vbase1 = as->as_vbase1;
  241. vtop1 = vbase1 + as->as_npages1 * PAGE_SIZE;
  242. vbase2 = as->as_vbase2;
  243. vtop2 = vbase2 + as->as_npages2 * PAGE_SIZE;
  244. stackbase = USERSTACK - DUMBVM_STACKPAGES * PAGE_SIZE;
  245. stacktop = USERSTACK;
  246. // text
  247. if (faultaddress >= vbase1 && faultaddress < vtop1)
  248. {
  249. paddr = (faultaddress - vbase1) + as->as_pbase1;
  250. readonly = true;
  251. }
  252. // heap
  253. else if (faultaddress >= vbase2 && faultaddress < vtop2)
  254. {
  255. paddr = (faultaddress - vbase2) + as->as_pbase2;
  256. }
  257. // stack
  258. else if (faultaddress >= stackbase && faultaddress < stacktop)
  259. {
  260. paddr = (faultaddress - stackbase) + as->as_stackpbase;
  261. }
  262. else return EFAULT;
  263. /* make sure it's page-aligned */
  264. KASSERT((paddr & PAGE_FRAME) == paddr);
  265. /* Disable interrupts on this CPU while frobbing the TLB. */
  266. spl = splhigh();
  267. for (i = 0; i < NUM_TLB; ++i)
  268. {
  269. tlb_read(&ehi, &elo, i);
  270. if (elo & TLBLO_VALID)
  271. {
  272. continue;
  273. }
  274. ehi = faultaddress;
  275. elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
  276. if (readonly && !(as->loading)) elo &= ~TLBLO_DIRTY;
  277. DEBUG(DB_VM, "dumbvm: 0x%x -> 0x%x\n", faultaddress, paddr);
  278. tlb_write(ehi, elo, i);
  279. splx(spl);
  280. return 0;
  281. }
  282. ehi = faultaddress;
  283. elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
  284. if (readonly && !(as->loading)) elo &= ~TLBLO_DIRTY;
  285. tlb_random(ehi, elo);
  286. splx(spl);
  287. return 0;
  288. }
  289. struct addrspace * as_create(void)
  290. {
  291. struct addrspace *as = kmalloc(sizeof(struct addrspace));
  292. if (as==NULL) {
  293. return NULL;
  294. }
  295. as->as_vbase1 = 0;
  296. as->as_pbase1 = 0;
  297. as->as_npages1 = 0;
  298. as->as_vbase2 = 0;
  299. as->as_pbase2 = 0;
  300. as->as_npages2 = 0;
  301. as->as_stackpbase = 0;
  302. as->loading = true;
  303. return as;
  304. }
  305. void
  306. as_destroy(struct addrspace *as)
  307. {
  308. free_kpages(as->as_pbase1 - MIPS_KSEG0);
  309. free_kpages(as->as_pbase2 - MIPS_KSEG0);
  310. free_kpages(as->as_stackpbase - MIPS_KSEG0);
  311. kfree(as);
  312. }
  313. void
  314. as_activate(void)
  315. {
  316. int i, spl;
  317. struct addrspace *as;
  318. as = curproc_getas();
  319. #ifdef UW
  320. /* Kernel threads don't have an address spaces to activate */
  321. #endif
  322. if (as == NULL) {
  323. return;
  324. }
  325. /* Disable interrupts on this CPU while frobbing the TLB. */
  326. spl = splhigh();
  327. for (i=0; i<NUM_TLB; i++) {
  328. tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
  329. }
  330. splx(spl);
  331. }
  332. void
  333. as_deactivate(void)
  334. {
  335. /* nothing */
  336. }
  337. int
  338. as_define_region(struct addrspace *as, vaddr_t vaddr, size_t sz,
  339. int readable, int writeable, int executable)
  340. {
  341. size_t npages;
  342. /* Align the region. First, the base... */
  343. sz += vaddr & ~(vaddr_t)PAGE_FRAME;
  344. vaddr &= PAGE_FRAME;
  345. /* ...and now the length. */
  346. sz = (sz + PAGE_SIZE - 1) & PAGE_FRAME;
  347. npages = sz / PAGE_SIZE;
  348. /* We don't use these - all pages are read-write */
  349. (void)readable;
  350. (void)writeable;
  351. (void)executable;
  352. if (as->as_vbase1 == 0) {
  353. as->as_vbase1 = vaddr;
  354. as->as_npages1 = npages;
  355. return 0;
  356. }
  357. if (as->as_vbase2 == 0) {
  358. as->as_vbase2 = vaddr;
  359. as->as_npages2 = npages;
  360. return 0;
  361. }
  362. /*
  363. * Support for more than two regions is not available.
  364. */
  365. kprintf("dumbvm: Warning: too many regions\n");
  366. return EUNIMP;
  367. }
  368. static
  369. void
  370. as_zero_region(paddr_t paddr, unsigned npages)
  371. {
  372. bzero((void *)PADDR_TO_KVADDR(paddr), npages * PAGE_SIZE);
  373. }
  374. int
  375. as_prepare_load(struct addrspace *as)
  376. {
  377. KASSERT(as->as_pbase1 == 0);
  378. KASSERT(as->as_pbase2 == 0);
  379. KASSERT(as->as_stackpbase == 0);
  380. as->as_pbase1 = alloc_kpages(as->as_npages1) - MIPS_KSEG0;
  381. if (as->as_pbase1 == 0) {
  382. return ENOMEM;
  383. }
  384. as->as_pbase2 = alloc_kpages(as->as_npages2) - MIPS_KSEG0;
  385. if (as->as_pbase2 == 0) {
  386. return ENOMEM;
  387. }
  388. as->as_stackpbase = alloc_kpages(DUMBVM_STACKPAGES) - MIPS_KSEG0;
  389. if (as->as_stackpbase == 0) {
  390. return ENOMEM;
  391. }
  392. as_zero_region(as->as_pbase1, as->as_npages1);
  393. as_zero_region(as->as_pbase2, as->as_npages2);
  394. as_zero_region(as->as_stackpbase, DUMBVM_STACKPAGES);
  395. return 0;
  396. }
  397. int
  398. as_complete_load(struct addrspace *as)
  399. {
  400. (void)as;
  401. return 0;
  402. }
  403. int
  404. as_define_stack(struct addrspace *as, vaddr_t *stackptr)
  405. {
  406. KASSERT(as->as_stackpbase != 0);
  407. *stackptr = USERSTACK;
  408. return 0;
  409. }
  410. int
  411. as_copy(struct addrspace *old, struct addrspace **ret)
  412. {
  413. struct addrspace *new;
  414. new = as_create();
  415. if (new==NULL) {
  416. return ENOMEM;
  417. }
  418. new->as_vbase1 = old->as_vbase1;
  419. new->as_npages1 = old->as_npages1;
  420. new->as_vbase2 = old->as_vbase2;
  421. new->as_npages2 = old->as_npages2;
  422. /* (Mis)use as_prepare_load to allocate some physical memory. */
  423. if (as_prepare_load(new)) {
  424. as_destroy(new);
  425. return ENOMEM;
  426. }
  427. KASSERT(new->as_pbase1 != 0);
  428. KASSERT(new->as_pbase2 != 0);
  429. KASSERT(new->as_stackpbase != 0);
  430. memmove((void *)PADDR_TO_KVADDR(new->as_pbase1),
  431. (const void *)PADDR_TO_KVADDR(old->as_pbase1),
  432. old->as_npages1*PAGE_SIZE);
  433. memmove((void *)PADDR_TO_KVADDR(new->as_pbase2),
  434. (const void *)PADDR_TO_KVADDR(old->as_pbase2),
  435. old->as_npages2*PAGE_SIZE);
  436. memmove((void *)PADDR_TO_KVADDR(new->as_stackpbase),
  437. (const void *)PADDR_TO_KVADDR(old->as_stackpbase),
  438. DUMBVM_STACKPAGES*PAGE_SIZE);
  439. *ret = new;
  440. return 0;
  441. }