MIT 6.828 - 7. Lab 07: Locks

Tags: MIT 6.828

实验总结

  1. 本次实验用时约 15 个小时。
  2. 收获是对多核、无锁原语理解更深入了。
  3. 最后一个实验的实现有问题,我不知道怎么改了。

实验结束后的全部代码在:https://github.com/monkey2000/xv6-riscv/tree/lock/

0. 实验准备

实验指导链接

上来直接:

1
2
$ cd xv6-riscv-fall19
$ git checkout lock

1. Memory allocator

这块是要给 Paged Allocator 加入 per-core freelist 和 stealing 。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
// Physical memory allocator, for user processes,
// kernel stacks, page-table pages,
// and pipe buffers. Allocates whole 4096-byte pages.

#include "types.h"
#include "param.h"
#include "memlayout.h"
#include "spinlock.h"
#include "riscv.h"
#include "defs.h"

void freerange(void *pa_start, void *pa_end);

extern char end[]; // first address after kernel.
// defined by kernel.ld.

struct run {
struct run *next;
};

struct {
struct spinlock lock[NCPU];
struct run *freelist[NCPU];
} kmem;

void
kinit()
{
for (int i = 0; i < NCPU; i++)
initlock(&kmem.lock[i], "kmem");
freerange(end, (void*)PHYSTOP);
}

void
freerange(void *pa_start, void *pa_end)
{
push_off();

char *p;
p = (char*)PGROUNDUP((uint64)pa_start);
for(; p + PGSIZE <= (char*)pa_end; p += PGSIZE)
kfree(p);

pop_off();
}

// Free the page of physical memory pointed at by v,
// which normally should have been returned by a
// call to kalloc(). (The exception is when
// initializing the allocator; see kinit above.)
void
kfree(void *pa)
{
push_off();
int id = cpuid();
struct run *r;

if(((uint64)pa % PGSIZE) != 0 || (char*)pa < end || (uint64)pa >= PHYSTOP)
panic("kfree");

// Fill with junk to catch dangling refs.
memset(pa, 1, PGSIZE);

r = (struct run*)pa;

acquire(&kmem.lock[id]);
r->next = kmem.freelist[id];
kmem.freelist[id] = r;
release(&kmem.lock[id]);

pop_off();
}

// Allocate one 4096-byte page of physical memory.
// Returns a pointer that the kernel can use.
// Returns 0 if the memory cannot be allocated.
void *
kalloc(void)
{
push_off();
int id = cpuid();

struct run *r;

acquire(&kmem.lock[id]);
r = kmem.freelist[id];
if(r)
kmem.freelist[id] = r->next;
release(&kmem.lock[id]);

if(!r) {
for(int i=0;i<NCPU;i++) {
acquire(&kmem.lock[i]);
r = kmem.freelist[i];
if(r)
kmem.freelist[i] = r->next;
release(&kmem.lock[i]);

if(r)
break;
}
}

if(r)
memset((char*)r, 5, PGSIZE); // fill with junk

pop_off();

return (void*)r;
}

2. Buffer cache

这个是给 fs 的 buffer io 加入多核支持,我的问题似乎是没有把 buffer freelist 设计成 per-core 的(回头改,因为实验指导里没让这么做 TAT),所以说用无锁原语胡乱实现一通,也没有起到好的效果。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
 
// Buffer cache.
//
// The buffer cache is a linked list of buf structures holding
// cached copies of disk block contents. Caching disk blocks
// in memory reduces the number of disk reads and also provides
// a synchronization point for disk blocks used by multiple processes.
//
// Interface:
// * To get a buffer for a particular disk block, call bread.
// * After changing buffer data, call bwrite to write it to disk.
// * When done with the buffer, call brelse.
// * Do not use the buffer after calling brelse.
// * Only one process at a time can use a buffer,
// so do not keep them longer than necessary.


#include "types.h"
#include "param.h"
#include "spinlock.h"
#include "sleeplock.h"
#include "riscv.h"
#include "defs.h"
#include "fs.h"
#include "buf.h"

struct {
struct buf buf[NBUF];
uint unused[NBUF];

// unused buffers
// struct spinlock unused_lock;
// struct buf unused_head;

// Linked list of all buffers, through prev/next.
// head.next is most recently used.
struct spinlock bucket_lock[NBUCKET];
struct buf bucket_head[NBUCKET];
} bcache;

void
binit(void)
{
struct buf *b;

// initlock(&bcache.unused_lock, "bcache.unused");

for(int i = 0; i < NBUCKET; i++)
initlock(&bcache.bucket_lock[i], "bcache.bucket");

// Create linked list of unused buffers
// bcache.unused_head.prev = &bcache.unused_head;
// bcache.unused_head.next = &bcache.unused_head;
for(b = bcache.buf; b < bcache.buf+NBUF; b++){
// b->next = bcache.unused_head.next;
// b->prev = &bcache.unused_head;
initsleeplock(&b->lock, "buffer");
b->used = 0;
// bcache.unused_head.next->prev = b;
// bcache.unused_head.next = b;
}

// Initialize buckets
for(int i = 0; i < NBUCKET; i++) {
bcache.bucket_head[i].prev = &bcache.bucket_head[i];
bcache.bucket_head[i].next = &bcache.bucket_head[i];
}
}

// Look through buffer cache for block on device dev.
// If not found, allocate a buffer.
// In either case, return locked buffer.
static struct buf*
bget(uint dev, uint blockno)
{
struct buf *b;

uint id = ((((uint64)dev) << 32) | blockno) % NBUCKET;

// Is the block already cached?
acquire(&bcache.bucket_lock[id]);
for(b = bcache.bucket_head[id].next; b != &bcache.bucket_head[id]; b = b->next){
if(b->dev == dev && b->blockno == blockno){
b->refcnt++;
release(&bcache.bucket_lock[id]);
acquiresleep(&b->lock);
return b;
}
}

// Not cached; recycle an unused buffer.
// acquire(&bcache.unused_lock);
for (int i = 0; i < NBUF; i++) {
if(!bcache.buf[i].used && __sync_bool_compare_and_swap(&bcache.buf[i].used, 0, 1)) {
b = &bcache.buf[i];
if(b->refcnt == 0) {
// release(&bcache.unused_lock);

b->dev = dev;
b->blockno = blockno;
b->valid = 0;
b->refcnt = 1;


b->next = bcache.bucket_head[id].next;
b->prev = &bcache.bucket_head[id];
bcache.bucket_head[id].next->prev = b;
bcache.bucket_head[id].next = b;
release(&bcache.bucket_lock[id]);
acquiresleep(&b->lock);
return b;
}
}
}

panic("bget: no buffers");
}

// Return a locked buf with the contents of the indicated block.
struct buf*
bread(uint dev, uint blockno)
{
struct buf *b;

b = bget(dev, blockno);
if(!b->valid) {
virtio_disk_rw(b->dev, b, 0);
b->valid = 1;
}
return b;
}

// Write b's contents to disk. Must be locked.
void
bwrite(struct buf *b)
{
if(!holdingsleep(&b->lock))
panic("bwrite");
virtio_disk_rw(b->dev, b, 1);
}

// Release a locked buffer.
// Move to the head of the MRU list.
void
brelse(struct buf *b)
{
if(!holdingsleep(&b->lock))
panic("brelse");

releasesleep(&b->lock);

uint id = ((((uint64)b->dev) << 32) | b->blockno) % NBUCKET;

acquire(&bcache.bucket_lock[id]);
b->refcnt--;
if (b->refcnt == 0) {
// no one is waiting for it.
b->next->prev = b->prev;
b->prev->next = b->next;
// acquire(&bcache.unused_lock);
// b->next = bcache.unused_head.next;
// b->prev = &bcache.unused_head;
// bcache.unused_head.next->prev = b;
// bcache.unused_head.next = b;
// release(&bcache.unused_lock);
if(!__sync_bool_compare_and_swap(&b->used, 1, 0))
panic("brelse_cas");
}

release(&bcache.bucket_lock[id]);
}

void
bpin(struct buf *b) {
uint id = ((((uint64)b->dev) << 32) | b->blockno) % NBUCKET;

acquire(&bcache.bucket_lock[id]);
b->refcnt++;
release(&bcache.bucket_lock[id]);
}

void
bunpin(struct buf *b) {
uint id = ((((uint64)b->dev) << 32) | b->blockno) % NBUCKET;

acquire(&bcache.bucket_lock[id]);
b->refcnt--;
release(&bcache.bucket_lock[id]);
}


知识共享许可协议 2000 - 2099 MonKey's Blog | 自豪地采用 Hexo + Pandoc + KaTeX + Highlight.js