char *p; p = (char*)PGROUNDUP((uint64)pa_start); for(; p + PGSIZE <= (char*)pa_end; p += PGSIZE) kfree(p);
pop_off(); }
// Free the page of physical memory pointed at by v, // which normally should have been returned by a // call to kalloc(). (The exception is when // initializing the allocator; see kinit above.) void kfree(void *pa) { push_off(); int id = cpuid(); structrun *r;
// Allocate one 4096-byte page of physical memory. // Returns a pointer that the kernel can use. // Returns 0 if the memory cannot be allocated. void * kalloc(void) { push_off(); int id = cpuid();
structrun *r;
acquire(&kmem.lock[id]); r = kmem.freelist[id]; if(r) kmem.freelist[id] = r->next; release(&kmem.lock[id]);
// Buffer cache. // // The buffer cache is a linked list of buf structures holding // cached copies of disk block contents. Caching disk blocks // in memory reduces the number of disk reads and also provides // a synchronization point for disk blocks used by multiple processes. // // Interface: // * To get a buffer for a particular disk block, call bread. // * After changing buffer data, call bwrite to write it to disk. // * When done with the buffer, call brelse. // * Do not use the buffer after calling brelse. // * Only one process at a time can use a buffer, // so do not keep them longer than necessary.
// Linked list of all buffers, through prev/next. // head.next is most recently used. structspinlockbucket_lock[NBUCKET]; structbufbucket_head[NBUCKET]; } bcache;
// Initialize buckets for(int i = 0; i < NBUCKET; i++) { bcache.bucket_head[i].prev = &bcache.bucket_head[i]; bcache.bucket_head[i].next = &bcache.bucket_head[i]; } }
// Look through buffer cache for block on device dev. // If not found, allocate a buffer. // In either case, return locked buffer. static struct buf* bget(uint dev, uint blockno) { structbuf *b;
uint id = ((((uint64)dev) << 32) | blockno) % NBUCKET;
// Is the block already cached? acquire(&bcache.bucket_lock[id]); for(b = bcache.bucket_head[id].next; b != &bcache.bucket_head[id]; b = b->next){ if(b->dev == dev && b->blockno == blockno){ b->refcnt++; release(&bcache.bucket_lock[id]); acquiresleep(&b->lock); return b; } }
// Not cached; recycle an unused buffer. // acquire(&bcache.unused_lock); for (int i = 0; i < NBUF; i++) { if(!bcache.buf[i].used && __sync_bool_compare_and_swap(&bcache.buf[i].used, 0, 1)) { b = &bcache.buf[i]; if(b->refcnt == 0) { // release(&bcache.unused_lock);