dumbvm.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579
  1. /*
  2. * Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009
  3. * The President and Fellows of Harvard College.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions
  7. * are met:
  8. * 1. Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * 2. Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in the
  12. * documentation and/or other materials provided with the distribution.
  13. * 3. Neither the name of the University nor the names of its contributors
  14. * may be used to endorse or promote products derived from this software
  15. * without specific prior written permission.
  16. *
  17. * THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
  18. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  19. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  20. * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
  21. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  22. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  23. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  24. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  25. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  26. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  27. * SUCH DAMAGE.
  28. */
  29. #include <types.h>
  30. #include <kern/errno.h>
  31. #include <lib.h>
  32. #include <spl.h>
  33. #include <spinlock.h>
  34. #include <proc.h>
  35. #include <current.h>
  36. #include <mips/tlb.h>
  37. #include <addrspace.h>
  38. #include <vm.h>
  39. /*
  40. * Dumb MIPS-only "VM system" that is intended to only be just barely
  41. * enough to struggle off the ground.
  42. */
  43. /* under dumbvm, always have 48k of user stack */
  44. #define DUMBVM_STACKPAGES 12
  45. /*
  46. * Wrap rma_stealmem in a spinlock.
  47. */
  48. static struct spinlock stealmem_lock = SPINLOCK_INITIALIZER;
  49. typedef struct coremap
  50. {
  51. paddr_t start;
  52. int framecount;
  53. int next;
  54. int * num;
  55. } coremap;
  56. static coremap map;
  57. static bool init = false;
  58. void vm_bootstrap(void)
  59. {
  60. paddr_t min;
  61. paddr_t max;
  62. // size of ram available
  63. ram_getsize(&min, &max);
  64. // # of frames if we start here
  65. map.framecount = (max - min) / PAGE_SIZE;
  66. // start of the coremap array
  67. map.num = (int *) PADDR_TO_KVADDR(min);
  68. // min should start at a page start address (roundup), and be after the space for the array
  69. min += ROUNDUP(map.framecount * sizeof(int), PAGE_SIZE);
  70. // framecount needs to reflect the frames we took for the array of frames
  71. map.framecount = (max - min) / PAGE_SIZE;
  72. // set the start of actual virtual memory
  73. map.start = min;
  74. // set next frame to the start
  75. map.next = 0;
  76. // set all frames to empty
  77. for (int i = 0; i < map.framecount; ++i)
  78. {
  79. map.num[i] = -1;
  80. }
  81. init = true;
  82. }
  83. // get physical pages
  84. static paddr_t getppages(int n)
  85. {
  86. paddr_t addr;
  87. spinlock_acquire(&stealmem_lock);
  88. addr = ram_stealmem(n);
  89. spinlock_release(&stealmem_lock);
  90. return addr;
  91. }
  92. static paddr_t getvpages(int n)
  93. {
  94. bool first = true;
  95. spinlock_acquire(&stealmem_lock);
  96. for (int i = map.next; i < map.framecount; ++i)
  97. {
  98. // may have a block starting here
  99. if ((map.num[i]) == -1)
  100. {
  101. int temp = n - 1;
  102. // see if we have a full block
  103. while (temp)
  104. {
  105. // frame is available
  106. if (map.num[i + temp] == -1)
  107. {
  108. --temp;
  109. continue;
  110. }
  111. // don't have enough contiguous space
  112. goto skip1;
  113. }
  114. // we have enough frames
  115. for (int j = 0; j < n; ++j)
  116. {
  117. map.num[i + j] = j;
  118. }
  119. if ((map.next + n) < map.framecount) map.next += n;
  120. else map.next = 0;
  121. spinlock_release(&stealmem_lock);
  122. return map.start + PAGE_SIZE * i;
  123. }
  124. skip1:
  125. // start searching at 0 if we are at max
  126. // block won't fit before end, and we didn't start between here and the end
  127. if (i + n >= map.framecount && map.next < i)
  128. {
  129. // reset i
  130. i = -1;
  131. first = false;
  132. continue;
  133. }
  134. // increment i as much as possible
  135. int temp = i;
  136. while (temp < map.framecount && map.num[temp] != -1)
  137. {
  138. // looping around
  139. if (temp == map.framecount - 1)
  140. {
  141. temp = 0;
  142. break;
  143. }
  144. // we came back to the start
  145. if (temp == map.next && !first)
  146. {
  147. spinlock_release(&stealmem_lock);
  148. return 0;
  149. }
  150. ++temp;
  151. }
  152. // if we came back to the start
  153. if ((i == map.next || temp == map.next) && !first)
  154. {
  155. spinlock_release(&stealmem_lock);
  156. return 0;
  157. }
  158. // increment i as needed from above
  159. if (i != temp) i = temp - 1;
  160. first = false;
  161. }
  162. spinlock_release(&stealmem_lock);
  163. return 0;
  164. }
  165. /* Allocate/free some kernel-space virtual pages */
  166. vaddr_t alloc_kpages(int npages)
  167. {
  168. paddr_t pa;
  169. if (init) pa = getvpages(npages);
  170. else pa = getppages(npages);
  171. if (!(pa)) return 0;
  172. return PADDR_TO_KVADDR(pa);
  173. }
  174. void free_kpages(vaddr_t addr)
  175. {
  176. if (!(init))
  177. {
  178. (void)addr;
  179. return;
  180. }
  181. spinlock_acquire(&stealmem_lock);
  182. paddr_t a = addr - MIPS_KSEG0;
  183. int start = (a - map.start) / PAGE_SIZE;
  184. KASSERT(map.num[start] == 0);
  185. int lockstep = 0;
  186. while (map.num[start] == lockstep)
  187. {
  188. if (start == map.framecount) break;
  189. map.num[start] = -1;
  190. ++start;
  191. ++lockstep;
  192. }
  193. spinlock_release(&stealmem_lock);
  194. }
  195. void
  196. vm_tlbshootdown_all(void)
  197. {
  198. panic("dumbvm tried to do tlb shootdown?!\n");
  199. }
  200. void
  201. vm_tlbshootdown(const struct tlbshootdown *ts)
  202. {
  203. (void)ts;
  204. panic("dumbvm tried to do tlb shootdown?!\n");
  205. }
  206. int vm_fault(int faulttype, vaddr_t faultaddress)
  207. {
  208. vaddr_t vbase1, vtop1, vbase2, vtop2, stackbase, stacktop;
  209. paddr_t paddr;
  210. int i;
  211. uint32_t ehi, elo;
  212. struct addrspace *as;
  213. int spl;
  214. bool readonly = false;
  215. faultaddress &= PAGE_FRAME;
  216. DEBUG(DB_VM, "dumbvm: fault: 0x%x\n", faultaddress);
  217. switch (faulttype)
  218. {
  219. case VM_FAULT_READONLY:
  220. return EINVAL;
  221. case VM_FAULT_READ:
  222. case VM_FAULT_WRITE:
  223. break;
  224. default:
  225. return EINVAL;
  226. }
  227. if (curproc == NULL) {
  228. /*
  229. * No process. This is probably a kernel fault early
  230. * in boot. Return EFAULT so as to panic instead of
  231. * getting into an infinite faulting loop.
  232. */
  233. return EFAULT;
  234. }
  235. as = curproc_getas();
  236. if (as == NULL) {
  237. /*
  238. * No address space set up. This is probably also a
  239. * kernel fault early in boot.
  240. */
  241. return EFAULT;
  242. }
  243. /* Assert that the address space has been set up properly. */
  244. KASSERT(as->as_vbase1 != 0);
  245. KASSERT(as->as_pbase1 != 0);
  246. KASSERT(as->as_npages1 != 0);
  247. KASSERT(as->as_vbase2 != 0);
  248. KASSERT(as->as_pbase2 != 0);
  249. KASSERT(as->as_npages2 != 0);
  250. KASSERT(as->as_stackpbase != 0);
  251. KASSERT((as->as_vbase1 & PAGE_FRAME) == as->as_vbase1);
  252. KASSERT((as->as_pbase1 & PAGE_FRAME) == as->as_pbase1);
  253. KASSERT((as->as_vbase2 & PAGE_FRAME) == as->as_vbase2);
  254. KASSERT((as->as_pbase2 & PAGE_FRAME) == as->as_pbase2);
  255. KASSERT((as->as_stackpbase & PAGE_FRAME) == as->as_stackpbase);
  256. vbase1 = as->as_vbase1;
  257. vtop1 = vbase1 + as->as_npages1 * PAGE_SIZE;
  258. vbase2 = as->as_vbase2;
  259. vtop2 = vbase2 + as->as_npages2 * PAGE_SIZE;
  260. stackbase = USERSTACK - DUMBVM_STACKPAGES * PAGE_SIZE;
  261. stacktop = USERSTACK;
  262. // text
  263. if (faultaddress >= vbase1 && faultaddress < vtop1)
  264. {
  265. paddr = (faultaddress - vbase1) + as->as_pbase1;
  266. readonly = true;
  267. }
  268. // heap
  269. else if (faultaddress >= vbase2 && faultaddress < vtop2)
  270. {
  271. paddr = (faultaddress - vbase2) + as->as_pbase2;
  272. }
  273. // stack
  274. else if (faultaddress >= stackbase && faultaddress < stacktop)
  275. {
  276. paddr = (faultaddress - stackbase) + as->as_stackpbase;
  277. }
  278. else return EFAULT;
  279. /* make sure it's page-aligned */
  280. KASSERT((paddr & PAGE_FRAME) == paddr);
  281. /* Disable interrupts on this CPU while frobbing the TLB. */
  282. spl = splhigh();
  283. for (i = 0; i < NUM_TLB; ++i)
  284. {
  285. tlb_read(&ehi, &elo, i);
  286. if (elo & TLBLO_VALID)
  287. {
  288. continue;
  289. }
  290. ehi = faultaddress;
  291. elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
  292. if (readonly && !(as->loading)) elo &= ~TLBLO_DIRTY;
  293. DEBUG(DB_VM, "dumbvm: 0x%x -> 0x%x\n", faultaddress, paddr);
  294. tlb_write(ehi, elo, i);
  295. splx(spl);
  296. return 0;
  297. }
  298. ehi = faultaddress;
  299. elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
  300. if (readonly && !(as->loading)) elo &= ~TLBLO_DIRTY;
  301. tlb_random(ehi, elo);
  302. splx(spl);
  303. return 0;
  304. }
  305. struct addrspace * as_create(void)
  306. {
  307. struct addrspace *as = kmalloc(sizeof(struct addrspace));
  308. if (as==NULL) {
  309. return NULL;
  310. }
  311. as->pt = NULL;
  312. as->regions = 0;
  313. as->pages = 0;
  314. return as;
  315. }
  316. void
  317. as_destroy(struct addrspace *as)
  318. {
  319. free_kpages(as->as_pbase1 - MIPS_KSEG0);
  320. free_kpages(as->as_pbase2 - MIPS_KSEG0);
  321. free_kpages(as->as_stackpbase - MIPS_KSEG0);
  322. kfree(as);
  323. }
  324. void
  325. as_activate(void)
  326. {
  327. int i, spl;
  328. struct addrspace *as;
  329. as = curproc_getas();
  330. #ifdef UW
  331. /* Kernel threads don't have an address spaces to activate */
  332. #endif
  333. if (as == NULL) {
  334. return;
  335. }
  336. /* Disable interrupts on this CPU while frobbing the TLB. */
  337. spl = splhigh();
  338. for (i=0; i<NUM_TLB; i++) {
  339. tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
  340. }
  341. splx(spl);
  342. }
  343. void
  344. as_deactivate(void)
  345. {
  346. /* nothing */
  347. }
  348. int
  349. as_define_region(struct addrspace *as, vaddr_t vaddr, size_t sz,
  350. int readable, int writeable, int executable)
  351. {
  352. size_t npages;
  353. /* Align the region. First, the base... */
  354. sz += vaddr & ~(vaddr_t)PAGE_FRAME;
  355. vaddr &= PAGE_FRAME;
  356. /* ...and now the length. */
  357. sz = (sz + PAGE_SIZE - 1) & PAGE_FRAME;
  358. npages = sz / PAGE_SIZE;
  359. /* We don't use these - all pages are read-write */
  360. (void)readable;
  361. (void)writeable;
  362. (void)executable;
  363. if (as->regions > 2) goto skip3;
  364. // code segment not inited yet
  365. if (!(as->regions))
  366. {
  367. as->pt = kmalloc(npages * sizeof(struct pte));
  368. for (int i = 0; i < npages; ++i)
  369. {
  370. pt[i].section = 0;
  371. pt[i].pageStart = NULL;
  372. pt[i].frame = -1;
  373. }
  374. ++as->regions;
  375. as->pages += npages;
  376. return 0;
  377. }
  378. // this is the data segment
  379. if (as->regions == 1)
  380. {
  381. // alloc new pt, copy over data
  382. struct pte * temp = kmalloc((npages + as->pages) * sizeof(struct pte));
  383. for (int i = 0; i < as->pages; ++i)
  384. {
  385. temp[i] = as->pt[i];
  386. }
  387. // fill in new section's entries
  388. for (int i = as->pages; i < (npages + as->pages); ++i)
  389. {
  390. temp[i].section = 1;
  391. temp[i].pageStart = NULL;
  392. temp[i].frame = -1;
  393. }
  394. kfree(as->pt);
  395. as->pt = temp;
  396. ++as->regions;
  397. as->pages += npages;
  398. return 0;
  399. }
  400. /*
  401. * Support for more than two regions is not available.
  402. */
  403. skip3:
  404. kprintf("dumbvm: Warning: too many regions\n");
  405. return EUNIMP;
  406. }
  407. static
  408. void
  409. as_zero_region(paddr_t paddr, unsigned npages)
  410. {
  411. bzero((void *)PADDR_TO_KVADDR(paddr), npages * PAGE_SIZE);
  412. }
  413. int
  414. as_prepare_load(struct addrspace *as)
  415. {
  416. // alloc new pt, copy over data
  417. struct pte * temp = kmalloc((DUMBVM_STACKPAGES + as->pages) * sizeof(struct pte));
  418. for (int i = 0; i < as->pages; ++i)
  419. {
  420. temp[i] = as->pt[i];
  421. }
  422. // fill in stack section's entries
  423. for (int i = as->pages; i < (DUMBVM_STACKPAGES + as->pages); ++i)
  424. {
  425. temp[i].section = 2;
  426. temp[i].pageStart = NULL;
  427. temp[i].frame = -1;
  428. }
  429. ++as->regions;
  430. as->pages += DUMBVM_STACKPAGES;
  431. for (int i = 0; i < as->pages; +=i)
  432. {
  433. as->pt[i].pageStart
  434. }
  435. as->as_pbase1 = alloc_kpages(as->as_npages1) - MIPS_KSEG0;
  436. if (as->as_pbase1 == 0) {
  437. return ENOMEM;
  438. }
  439. as->as_pbase2 = alloc_kpages(as->as_npages2) - MIPS_KSEG0;
  440. if (as->as_pbase2 == 0) {
  441. return ENOMEM;
  442. }
  443. as->as_stackpbase = alloc_kpages(DUMBVM_STACKPAGES) - MIPS_KSEG0;
  444. if (as->as_stackpbase == 0) {
  445. return ENOMEM;
  446. }
  447. as_zero_region(as->as_pbase1, as->as_npages1);
  448. as_zero_region(as->as_pbase2, as->as_npages2);
  449. as_zero_region(as->as_stackpbase, DUMBVM_STACKPAGES);
  450. return 0;
  451. }
  452. int
  453. as_complete_load(struct addrspace *as)
  454. {
  455. (void)as;
  456. return 0;
  457. }
  458. int
  459. as_define_stack(struct addrspace *as, vaddr_t *stackptr)
  460. {
  461. KASSERT(as->as_stackpbase != 0);
  462. *stackptr = USERSTACK;
  463. return 0;
  464. }
  465. int
  466. as_copy(struct addrspace *old, struct addrspace **ret)
  467. {
  468. struct addrspace *new;
  469. new = as_create();
  470. if (new==NULL) {
  471. return ENOMEM;
  472. }
  473. new->as_vbase1 = old->as_vbase1;
  474. new->as_npages1 = old->as_npages1;
  475. new->as_vbase2 = old->as_vbase2;
  476. new->as_npages2 = old->as_npages2;
  477. /* (Mis)use as_prepare_load to allocate some physical memory. */
  478. if (as_prepare_load(new)) {
  479. as_destroy(new);
  480. return ENOMEM;
  481. }
  482. KASSERT(new->as_pbase1 != 0);
  483. KASSERT(new->as_pbase2 != 0);
  484. KASSERT(new->as_stackpbase != 0);
  485. memmove((void *)PADDR_TO_KVADDR(new->as_pbase1),
  486. (const void *)PADDR_TO_KVADDR(old->as_pbase1),
  487. old->as_npages1*PAGE_SIZE);
  488. memmove((void *)PADDR_TO_KVADDR(new->as_pbase2),
  489. (const void *)PADDR_TO_KVADDR(old->as_pbase2),
  490. old->as_npages2*PAGE_SIZE);
  491. memmove((void *)PADDR_TO_KVADDR(new->as_stackpbase),
  492. (const void *)PADDR_TO_KVADDR(old->as_stackpbase),
  493. DUMBVM_STACKPAGES*PAGE_SIZE);
  494. *ret = new;
  495. return 0;
  496. }