/* * Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009 * The President and Fellows of Harvard College. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE UNIVERSITY AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include /* * Kernel malloc. */ static void fill_deadbeef(void *vptr, size_t len) { uint32_t *ptr = vptr; size_t i; for (i=0; ipageaddr_and_blocktype & PAGE_FRAME) #define PR_BLOCKTYPE(pr) ((pr)->pageaddr_and_blocktype & ~PAGE_FRAME) #define MKPAB(pa, blk) (((pa)&PAGE_FRAME) | ((blk) & ~PAGE_FRAME)) //////////////////////////////////////// /* * This is cheesy. * * The problem is not that it's wasteful - we can only allocate whole * pages of pageref structures at a time anyway. The problem is that * we really ought to be able to have more than one of these pages. * * However, for the time being, one page worth of pagerefs gives us * 256 pagerefs; this lets us manage 256 * 4k = 1M of kernel heap. * That would be twice as much memory as we get for *everything*. * Thus, we will cheat and not allow any mechanism for having a second * page of pageref structs. * * Then, since the size is fixed at one page, we'll simplify a bit * further by allocating the page in the kernel BSS instead of calling * alloc_kpages to get it. */ #define NPAGEREFS (PAGE_SIZE / sizeof(struct pageref)) static struct pageref pagerefs[NPAGEREFS]; #define INUSE_WORDS (NPAGEREFS/32) static uint32_t pagerefs_inuse[INUSE_WORDS]; static struct pageref * allocpageref(void) { unsigned i,j; uint32_t k; for (i=0; ifreelist_offset == INVALID_OFFSET) { KASSERT(pr->nfree==0); return; } prpage = PR_PAGEADDR(pr); blktype = PR_BLOCKTYPE(pr); KASSERT(pr->freelist_offset < PAGE_SIZE); KASSERT(pr->freelist_offset % sizes[blktype] == 0); fla = prpage + pr->freelist_offset; fl = (struct freelist *)fla; for (; fl != NULL; fl = fl->next) { fla = (vaddr_t)fl; KASSERT(fla >= prpage && fla < prpage + PAGE_SIZE); KASSERT((fla-prpage) % sizes[blktype] == 0); KASSERT(fla >= MIPS_KSEG0); KASSERT(fla < MIPS_KSEG1); nfree++; } KASSERT(nfree==pr->nfree); } #else #define checksubpage(pr) ((void)(pr)) #endif #ifdef SLOWER static void checksubpages(void) { struct pageref *pr; int i; unsigned sc=0, ac=0; KASSERT(spinlock_do_i_hold(&kmalloc_spinlock)); for (i=0; inext_samesize) { checksubpage(pr); KASSERT(sc < NPAGEREFS); sc++; } } for (pr = allbase; pr != NULL; pr = pr->next_all) { checksubpage(pr); KASSERT(ac < NPAGEREFS); ac++; } KASSERT(sc==ac); } #else #define checksubpages() #endif //////////////////////////////////////// static void dumpsubpage(struct pageref *pr) { vaddr_t prpage, fla; struct freelist *fl; int blktype; unsigned i, n, index; uint32_t freemap[PAGE_SIZE / (SMALLEST_SUBPAGE_SIZE*32)]; checksubpage(pr); KASSERT(spinlock_do_i_hold(&kmalloc_spinlock)); /* clear freemap[] */ for (i=0; ifreelist_offset != INVALID_OFFSET) { fla = prpage + pr->freelist_offset; fl = (struct freelist *)fla; for (; fl != NULL; fl = fl->next) { fla = (vaddr_t)fl; index = (fla-prpage) / sizes[blktype]; KASSERT(indexnfree, n); kprintf(" "); for (i=0; inext_all) { dumpsubpage(pr); } spinlock_release(&kmalloc_spinlock); } //////////////////////////////////////// static void remove_lists(struct pageref *pr, int blktype) { struct pageref **guy; KASSERT(blktype>=0 && blktypenext_samesize) { checksubpage(*guy); if (*guy == pr) { *guy = pr->next_samesize; break; } } for (guy = &allbase; *guy; guy = &(*guy)->next_all) { checksubpage(*guy); if (*guy == pr) { *guy = pr->next_all; break; } } } static inline int blocktype(size_t sz) { unsigned i; for (i=0; inext_samesize) { /* check for corruption */ KASSERT(PR_BLOCKTYPE(pr) == blktype); checksubpage(pr); if (pr->nfree > 0) { doalloc: /* comes here after getting a whole fresh page */ KASSERT(pr->freelist_offset < PAGE_SIZE); prpage = PR_PAGEADDR(pr); fla = prpage + pr->freelist_offset; fl = (struct freelist *)fla; retptr = fl; fl = fl->next; pr->nfree--; if (fl != NULL) { KASSERT(pr->nfree > 0); fla = (vaddr_t)fl; KASSERT(fla - prpage < PAGE_SIZE); pr->freelist_offset = fla - prpage; } else { KASSERT(pr->nfree == 0); pr->freelist_offset = INVALID_OFFSET; } checksubpages(); spinlock_release(&kmalloc_spinlock); return retptr; } } /* * No page of the right size available. * Make a new one. * * We release the spinlock while calling alloc_kpages. This * avoids deadlock if alloc_kpages needs to come back here. * Note that this means things can change behind our back... */ spinlock_release(&kmalloc_spinlock); prpage = alloc_kpages(1); if (prpage==0) { /* Out of memory. */ kprintf("kmalloc: Subpage allocator couldn't get a page\n"); return NULL; } spinlock_acquire(&kmalloc_spinlock); pr = allocpageref(); if (pr==NULL) { /* Couldn't allocate accounting space for the new page. */ spinlock_release(&kmalloc_spinlock); free_kpages(prpage); kprintf("kmalloc: Subpage allocator couldn't get pageref\n"); return NULL; } pr->pageaddr_and_blocktype = MKPAB(prpage, blktype); pr->nfree = PAGE_SIZE / sizes[blktype]; /* * Note: fl is volatile because the MIPS toolchain we were * using in spring 2001 attempted to optimize this loop and * blew it. Making fl volatile inhibits the optimization. */ fla = prpage; fl = (struct freelist *)fla; fl->next = NULL; for (i=1; infree; i++) { fl = (struct freelist *)(fla + i*sizes[blktype]); fl->next = (struct freelist *)(fla + (i-1)*sizes[blktype]); KASSERT(fl != fl->next); } fla = (vaddr_t) fl; pr->freelist_offset = fla - prpage; KASSERT(pr->freelist_offset == (pr->nfree-1)*sizes[blktype]); pr->next_samesize = sizebases[blktype]; sizebases[blktype] = pr; pr->next_all = allbase; allbase = pr; /* This is kind of cheesy, but avoids duplicating the alloc code. */ goto doalloc; } static int subpage_kfree(void *ptr) { int blktype; // index into sizes[] that we're using vaddr_t ptraddr; // same as ptr struct pageref *pr; // pageref for page we're freeing in vaddr_t prpage; // PR_PAGEADDR(pr) vaddr_t fla; // free list entry address struct freelist *fl; // free list entry vaddr_t offset; // offset into page ptraddr = (vaddr_t)ptr; spinlock_acquire(&kmalloc_spinlock); checksubpages(); for (pr = allbase; pr; pr = pr->next_all) { prpage = PR_PAGEADDR(pr); blktype = PR_BLOCKTYPE(pr); /* check for corruption */ KASSERT(blktype>=0 && blktype= prpage && ptraddr < prpage + PAGE_SIZE) { break; } } if (pr==NULL) { /* Not on any of our pages - not a subpage allocation */ spinlock_release(&kmalloc_spinlock); return -1; } offset = ptraddr - prpage; /* Check for proper positioning and alignment */ if (offset >= PAGE_SIZE || offset % sizes[blktype] != 0) { panic("kfree: subpage free of invalid addr %p\n", ptr); } /* * Clear the block to 0xdeadbeef to make it easier to detect * uses of dangling pointers. */ fill_deadbeef(ptr, sizes[blktype]); /* * We probably ought to check for free twice by seeing if the block * is already on the free list. But that's expensive, so we don't. */ fla = prpage + offset; fl = (struct freelist *)fla; if (pr->freelist_offset == INVALID_OFFSET) { fl->next = NULL; } else { fl->next = (struct freelist *)(prpage + pr->freelist_offset); } pr->freelist_offset = offset; pr->nfree++; KASSERT(pr->nfree <= PAGE_SIZE / sizes[blktype]); if (pr->nfree == PAGE_SIZE / sizes[blktype]) { /* Whole page is free. */ remove_lists(pr, blktype); freepageref(pr); /* Call free_kpages without kmalloc_spinlock. */ spinlock_release(&kmalloc_spinlock); free_kpages(prpage); } else { spinlock_release(&kmalloc_spinlock); } #ifdef SLOWER /* Don't get the lock unless checksubpages does something. */ spinlock_acquire(&kmalloc_spinlock); checksubpages(); spinlock_release(&kmalloc_spinlock); #endif return 0; } // //////////////////////////////////////////////////////////// void * kmalloc(size_t sz) { if (sz>=LARGEST_SUBPAGE_SIZE) { unsigned long npages; vaddr_t address; /* Round up to a whole number of pages. */ npages = (sz + PAGE_SIZE - 1)/PAGE_SIZE; address = alloc_kpages(npages); if (address==0) { return NULL; } return (void *)address; } return subpage_kmalloc(sz); } void kfree(void *ptr) { /* * Try subpage first; if that fails, assume it's a big allocation. */ if (ptr == NULL) { return; } else if (subpage_kfree(ptr)) { KASSERT((vaddr_t)ptr%PAGE_SIZE==0); free_kpages((vaddr_t)ptr); } }