dumbvm.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534
  1. /*
  2. * Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009
  3. * The President and Fellows of Harvard College.
  4. *
  5. * Redistribution and use in source and binary forms, with or without
  6. * modification, are permitted provided that the following conditions
  7. * are met:
  8. * 1. Redistributions of source code must retain the above copyright
  9. * notice, this list of conditions and the following disclaimer.
  10. * 2. Redistributions in binary form must reproduce the above copyright
  11. * notice, this list of conditions and the following disclaimer in the
  12. * documentation and/or other materials provided with the distribution.
  13. * 3. Neither the name of the University nor the names of its contributors
  14. * may be used to endorse or promote products derived from this software
  15. * without specific prior written permission.
  16. *
  17. * THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND
  18. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  19. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  20. * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE
  21. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  22. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  23. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  24. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  25. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  26. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  27. * SUCH DAMAGE.
  28. */
  29. #include <types.h>
  30. #include <kern/errno.h>
  31. #include <lib.h>
  32. #include <spl.h>
  33. #include <spinlock.h>
  34. #include <proc.h>
  35. #include <current.h>
  36. #include <mips/tlb.h>
  37. #include <addrspace.h>
  38. #include <vm.h>
  39. /*
  40. * Dumb MIPS-only "VM system" that is intended to only be just barely
  41. * enough to struggle off the ground.
  42. */
  43. /* under dumbvm, always have 48k of user stack */
  44. #define DUMBVM_STACKPAGES 12
  45. /*
  46. * Wrap rma_stealmem in a spinlock.
  47. */
  48. static struct spinlock stealmem_lock = SPINLOCK_INITIALIZER;
  49. typedef struct coremap
  50. {
  51. paddr_t start;
  52. int framecount;
  53. int next;
  54. int * num;
  55. } coremap;
  56. static coremap map;
  57. static bool init = false;
  58. void vm_bootstrap(void)
  59. {
  60. paddr_t min;
  61. paddr_t max;
  62. // size of ram available
  63. ram_getsize(&min, &max);
  64. // # of frames if we start here
  65. map.framecount = (max - min) / PAGE_SIZE;
  66. // start of the coremap array
  67. map.num = (int *) PADDR_TO_KVADDR(min);
  68. // min should start at a page start address (roundup), and be after the space for the array
  69. min += ROUNDUP(map.framecount * sizeof(int), PAGE_SIZE);
  70. // framecount needs to reflect the frames we took for the array of frames
  71. map.framecount = (max - min) / PAGE_SIZE;
  72. // set the start of actual virtual memory
  73. map.start = min;
  74. // set next frame to the start
  75. map.next = 0;
  76. // set all frames to empty
  77. for (int i = 0; i < map.framecount; ++i)
  78. {
  79. map.num[i] = -1;
  80. }
  81. init = true;
  82. }
  83. // get physical pages
  84. static paddr_t getppages(int n)
  85. {
  86. paddr_t addr;
  87. spinlock_acquire(&stealmem_lock);
  88. addr = ram_stealmem(n);
  89. spinlock_release(&stealmem_lock);
  90. return addr;
  91. }
  92. static paddr_t getvpages(int n)
  93. {
  94. bool first = true;
  95. spinlock_acquire(&stealmem_lock);
  96. for (int i = map.next; i < map.framecount; ++i)
  97. {
  98. // may have a block starting here
  99. if ((map.num[i]) == -1)
  100. {
  101. int temp = n - 1;
  102. // see if we have a full block
  103. while (temp)
  104. {
  105. // frame is available
  106. if (map.num[i + temp] == -1)
  107. {
  108. --temp;
  109. continue;
  110. }
  111. // don't have enough contiguous space
  112. goto skip1;
  113. }
  114. // we have enough frames
  115. for (int j = 0; j < n; ++j)
  116. {
  117. map.num[i + j] = j;
  118. }
  119. if ((map.next + n) < map.framecount) map.next += n;
  120. else map.next = 0;
  121. spinlock_release(&stealmem_lock);
  122. return map.start + PAGE_SIZE * i;
  123. }
  124. skip1:
  125. // start searching at 0 if we are at max
  126. // block won't fit before end, and we didn't start between here and the end
  127. if (i + n >= map.framecount && map.next < i)
  128. {
  129. // reset i
  130. i = -1;
  131. first = false;
  132. continue;
  133. }
  134. // increment i as much as possible
  135. int temp = i;
  136. while (temp < map.framecount && map.num[temp] != -1)
  137. {
  138. // looping around
  139. if (temp == map.framecount - 1)
  140. {
  141. temp = 0;
  142. break;
  143. }
  144. // we came back to the start
  145. if (temp == map.next && !first)
  146. {
  147. spinlock_release(&stealmem_lock);
  148. return 0;
  149. }
  150. ++temp;
  151. }
  152. // if we came back to the start
  153. if ((i == map.next || temp == map.next) && !first)
  154. {
  155. spinlock_release(&stealmem_lock);
  156. return 0;
  157. }
  158. // increment i as needed from above
  159. if (i != temp) i = temp - 1;
  160. first = false;
  161. }
  162. spinlock_release(&stealmem_lock);
  163. return 0;
  164. }
  165. /* Allocate/free some kernel-space virtual pages */
  166. vaddr_t alloc_kpages(int npages)
  167. {
  168. paddr_t pa;
  169. if (init) pa = getvpages(npages);
  170. else pa = getppages(npages);
  171. if (!(pa)) return 0;
  172. return PADDR_TO_KVADDR(pa);
  173. }
  174. void free_kpages(vaddr_t addr)
  175. {
  176. if (!(init))
  177. {
  178. (void)addr;
  179. return;
  180. }
  181. spinlock_acquire(&stealmem_lock);
  182. paddr_t a = addr - MIPS_KSEG0;
  183. int start = (a - map.start) / PAGE_SIZE;
  184. KASSERT(map.num[start] == 0);
  185. int lockstep = 0;
  186. while (map.num[start] == lockstep)
  187. {
  188. if (start == map.framecount) break;
  189. map.num[start] = -1;
  190. ++start;
  191. ++lockstep;
  192. }
  193. spinlock_release(&stealmem_lock);
  194. }
  195. void
  196. vm_tlbshootdown_all(void)
  197. {
  198. panic("dumbvm tried to do tlb shootdown?!\n");
  199. }
  200. void
  201. vm_tlbshootdown(const struct tlbshootdown *ts)
  202. {
  203. (void)ts;
  204. panic("dumbvm tried to do tlb shootdown?!\n");
  205. }
  206. int vm_fault(int faulttype, vaddr_t faultaddress)
  207. {
  208. vaddr_t vbase1, vtop1, vbase2, vtop2, stackbase, stacktop;
  209. paddr_t paddr;
  210. int i;
  211. uint32_t ehi, elo;
  212. struct addrspace *as;
  213. int spl;
  214. bool readonly = false;
  215. faultaddress &= PAGE_FRAME;
  216. DEBUG(DB_VM, "dumbvm: fault: 0x%x\n", faultaddress);
  217. switch (faulttype)
  218. {
  219. case VM_FAULT_READONLY:
  220. return EINVAL;
  221. case VM_FAULT_READ:
  222. case VM_FAULT_WRITE:
  223. break;
  224. default:
  225. return EINVAL;
  226. }
  227. if (curproc == NULL) {
  228. /*
  229. * No process. This is probably a kernel fault early
  230. * in boot. Return EFAULT so as to panic instead of
  231. * getting into an infinite faulting loop.
  232. */
  233. return EFAULT;
  234. }
  235. as = curproc_getas();
  236. if (as == NULL) {
  237. /*
  238. * No address space set up. This is probably also a
  239. * kernel fault early in boot.
  240. */
  241. return EFAULT;
  242. }
  243. /* Assert that the address space has been set up properly. */
  244. KASSERT(as->as_vbase1 != 0);
  245. KASSERT(as->as_pbase1 != 0);
  246. KASSERT(as->as_npages1 != 0);
  247. KASSERT(as->as_vbase2 != 0);
  248. KASSERT(as->as_pbase2 != 0);
  249. KASSERT(as->as_npages2 != 0);
  250. KASSERT(as->as_stackpbase != 0);
  251. KASSERT((as->as_vbase1 & PAGE_FRAME) == as->as_vbase1);
  252. KASSERT((as->as_pbase1 & PAGE_FRAME) == as->as_pbase1);
  253. KASSERT((as->as_vbase2 & PAGE_FRAME) == as->as_vbase2);
  254. KASSERT((as->as_pbase2 & PAGE_FRAME) == as->as_pbase2);
  255. KASSERT((as->as_stackpbase & PAGE_FRAME) == as->as_stackpbase);
  256. vbase1 = as->as_vbase1;
  257. vtop1 = vbase1 + as->as_npages1 * PAGE_SIZE;
  258. vbase2 = as->as_vbase2;
  259. vtop2 = vbase2 + as->as_npages2 * PAGE_SIZE;
  260. stackbase = USERSTACK - DUMBVM_STACKPAGES * PAGE_SIZE;
  261. stacktop = USERSTACK;
  262. // text
  263. if (faultaddress >= vbase1 && faultaddress < vtop1)
  264. {
  265. paddr = (faultaddress - vbase1) + as->as_pbase1;
  266. readonly = true;
  267. }
  268. // heap
  269. else if (faultaddress >= vbase2 && faultaddress < vtop2)
  270. {
  271. paddr = (faultaddress - vbase2) + as->as_pbase2;
  272. }
  273. // stack
  274. else if (faultaddress >= stackbase && faultaddress < stacktop)
  275. {
  276. paddr = (faultaddress - stackbase) + as->as_stackpbase;
  277. }
  278. else return EFAULT;
  279. /* make sure it's page-aligned */
  280. KASSERT((paddr & PAGE_FRAME) == paddr);
  281. /* Disable interrupts on this CPU while frobbing the TLB. */
  282. spl = splhigh();
  283. for (i = 0; i < NUM_TLB; ++i)
  284. {
  285. tlb_read(&ehi, &elo, i);
  286. if (elo & TLBLO_VALID)
  287. {
  288. continue;
  289. }
  290. ehi = faultaddress;
  291. elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
  292. if (readonly && !(as->loading)) elo &= ~TLBLO_DIRTY;
  293. DEBUG(DB_VM, "dumbvm: 0x%x -> 0x%x\n", faultaddress, paddr);
  294. tlb_write(ehi, elo, i);
  295. splx(spl);
  296. return 0;
  297. }
  298. ehi = faultaddress;
  299. elo = paddr | TLBLO_DIRTY | TLBLO_VALID;
  300. if (readonly && !(as->loading)) elo &= ~TLBLO_DIRTY;
  301. tlb_random(ehi, elo);
  302. splx(spl);
  303. return 0;
  304. }
  305. struct addrspace * as_create(void)
  306. {
  307. struct addrspace *as = kmalloc(sizeof(struct addrspace));
  308. if (as==NULL) {
  309. return NULL;
  310. }
  311. as->as_vbase1 = 0;
  312. as->as_pbase1 = 0;
  313. as->as_npages1 = 0;
  314. as->as_vbase2 = 0;
  315. as->as_pbase2 = 0;
  316. as->as_npages2 = 0;
  317. as->as_stackpbase = 0;
  318. as->loading = true;
  319. return as;
  320. }
  321. void
  322. as_destroy(struct addrspace *as)
  323. {
  324. free_kpages(as->as_pbase1 - MIPS_KSEG0);
  325. free_kpages(as->as_pbase2 - MIPS_KSEG0);
  326. free_kpages(as->as_stackpbase - MIPS_KSEG0);
  327. kfree(as);
  328. }
  329. void
  330. as_activate(void)
  331. {
  332. int i, spl;
  333. struct addrspace *as;
  334. as = curproc_getas();
  335. #ifdef UW
  336. /* Kernel threads don't have an address spaces to activate */
  337. #endif
  338. if (as == NULL) {
  339. return;
  340. }
  341. /* Disable interrupts on this CPU while frobbing the TLB. */
  342. spl = splhigh();
  343. for (i=0; i<NUM_TLB; i++) {
  344. tlb_write(TLBHI_INVALID(i), TLBLO_INVALID(), i);
  345. }
  346. splx(spl);
  347. }
  348. void
  349. as_deactivate(void)
  350. {
  351. /* nothing */
  352. }
  353. int
  354. as_define_region(struct addrspace *as, vaddr_t vaddr, size_t sz,
  355. int readable, int writeable, int executable)
  356. {
  357. size_t npages;
  358. /* Align the region. First, the base... */
  359. sz += vaddr & ~(vaddr_t)PAGE_FRAME;
  360. vaddr &= PAGE_FRAME;
  361. /* ...and now the length. */
  362. sz = (sz + PAGE_SIZE - 1) & PAGE_FRAME;
  363. npages = sz / PAGE_SIZE;
  364. /* We don't use these - all pages are read-write */
  365. (void)readable;
  366. (void)writeable;
  367. (void)executable;
  368. if (as->as_vbase1 == 0) {
  369. as->as_vbase1 = vaddr;
  370. as->as_npages1 = npages;
  371. return 0;
  372. }
  373. if (as->as_vbase2 == 0) {
  374. as->as_vbase2 = vaddr;
  375. as->as_npages2 = npages;
  376. return 0;
  377. }
  378. /*
  379. * Support for more than two regions is not available.
  380. */
  381. kprintf("dumbvm: Warning: too many regions\n");
  382. return EUNIMP;
  383. }
  384. static
  385. void
  386. as_zero_region(paddr_t paddr, unsigned npages)
  387. {
  388. bzero((void *)PADDR_TO_KVADDR(paddr), npages * PAGE_SIZE);
  389. }
  390. int
  391. as_prepare_load(struct addrspace *as)
  392. {
  393. KASSERT(as->as_pbase1 == 0);
  394. KASSERT(as->as_pbase2 == 0);
  395. KASSERT(as->as_stackpbase == 0);
  396. as->as_pbase1 = alloc_kpages(as->as_npages1) - MIPS_KSEG0;
  397. if (as->as_pbase1 == 0) {
  398. return ENOMEM;
  399. }
  400. as->as_pbase2 = alloc_kpages(as->as_npages2) - MIPS_KSEG0;
  401. if (as->as_pbase2 == 0) {
  402. return ENOMEM;
  403. }
  404. as->as_stackpbase = alloc_kpages(DUMBVM_STACKPAGES) - MIPS_KSEG0;
  405. if (as->as_stackpbase == 0) {
  406. return ENOMEM;
  407. }
  408. as_zero_region(as->as_pbase1, as->as_npages1);
  409. as_zero_region(as->as_pbase2, as->as_npages2);
  410. as_zero_region(as->as_stackpbase, DUMBVM_STACKPAGES);
  411. return 0;
  412. }
  413. int
  414. as_complete_load(struct addrspace *as)
  415. {
  416. (void)as;
  417. return 0;
  418. }
  419. int
  420. as_define_stack(struct addrspace *as, vaddr_t *stackptr)
  421. {
  422. KASSERT(as->as_stackpbase != 0);
  423. *stackptr = USERSTACK;
  424. return 0;
  425. }
  426. int
  427. as_copy(struct addrspace *old, struct addrspace **ret)
  428. {
  429. struct addrspace *new;
  430. new = as_create();
  431. if (new==NULL) {
  432. return ENOMEM;
  433. }
  434. new->as_vbase1 = old->as_vbase1;
  435. new->as_npages1 = old->as_npages1;
  436. new->as_vbase2 = old->as_vbase2;
  437. new->as_npages2 = old->as_npages2;
  438. /* (Mis)use as_prepare_load to allocate some physical memory. */
  439. if (as_prepare_load(new)) {
  440. as_destroy(new);
  441. return ENOMEM;
  442. }
  443. KASSERT(new->as_pbase1 != 0);
  444. KASSERT(new->as_pbase2 != 0);
  445. KASSERT(new->as_stackpbase != 0);
  446. memmove((void *)PADDR_TO_KVADDR(new->as_pbase1),
  447. (const void *)PADDR_TO_KVADDR(old->as_pbase1),
  448. old->as_npages1*PAGE_SIZE);
  449. memmove((void *)PADDR_TO_KVADDR(new->as_pbase2),
  450. (const void *)PADDR_TO_KVADDR(old->as_pbase2),
  451. old->as_npages2*PAGE_SIZE);
  452. memmove((void *)PADDR_TO_KVADDR(new->as_stackpbase),
  453. (const void *)PADDR_TO_KVADDR(old->as_stackpbase),
  454. DUMBVM_STACKPAGES*PAGE_SIZE);
  455. *ret = new;
  456. return 0;
  457. }