MIT 6.1810: Locks

警告
本文最后更新于 2023-05-05,文中内容可能已过时。

MIT 6.1810 中第八个 lab 的 solution

Locks

我这个代码暂时没能通过usertests -q这个测试,在textwrite那里failed了。而且有一点很难绷,我在写Buffer cache这个lab的时候一直会panic,最后我make clean了之后重新来一次就好用了(浪费很多时间去尝试发现错误,可惜没有找到)。

Your job is to implement per-CPU freelists, and stealing when a CPU’s free list is empty. You must give all of your locks names that start with “kmem”. That is, you should call initlock for each of your locks, and pass a name that starts with “kmem”. Run kalloctest to see if your implementation has reduced lock contention. To check that it can still allocate all of memory, run usertests sbrkmuch. Your output will look similar to that shown below, with much-reduced contention in total on kmem locks, although the specific numbers will differ. Make sure all tests in usertests -q pass. make grade should say that the kalloctests pass.

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
struct {
  struct spinlock lock;
  struct run *freelist;
} kmemlist[NCPU];

void
kinit()
{
  for(int i = 0;i < NCPU; i++)
    initlock(&kmemlist[i].lock, "kmem");
  freerange(end, (void*)PHYSTOP);
}

void
kfree(void *pa)
{
  struct run *r;
  push_off();
  int index = cpuid();
  pop_off();

  if(((uint64)pa % PGSIZE) != 0 || (char*)pa < end || (uint64)pa >= PHYSTOP)
    panic("kfree");

  // Fill with junk to catch dangling refs.
  memset(pa, 1, PGSIZE);

  r = (struct run*)pa;

  acquire(&kmemlist[index].lock);
  r->next = kmemlist[index].freelist;
  kmemlist[index].freelist = r;
  release(&kmemlist[index].lock);
}

void *
kalloc(void)
{
  struct run *r;
  push_off();
  int index = cpuid();
  pop_off();

  acquire(&kmemlist[index].lock);
  r = kmemlist[index].freelist;
  if(r)
    kmemlist[index].freelist = r->next;
  else{
    for(int tmp = 0; tmp < NCPU; tmp++){
      if(tmp == index) continue;
      acquire(&kmemlist[tmp].lock);
      if(kmemlist[tmp].freelist){
        r = kmemlist[tmp].freelist;
        kmemlist[tmp].freelist = r->next;
        release(&kmemlist[tmp].lock);
        break;
      }
      release(&kmemlist[tmp].lock);
    }
  }
  release(&kmemlist[index].lock);

  if(r)
    memset((char*)r, 5, PGSIZE); // fill with junk
  return (void*)r;
}

Modify the block cache so that the number of acquire loop iterations for all locks in the bcache is close to zero when running bcachetest. Ideally the sum of the counts for all locks involved in the block cache should be zero, but it’s OK if the sum is less than 500. Modify bget and brelse so that concurrent lookups and releases for different blocks that are in the bcache are unlikely to conflict on locks (e.g., don’t all have to wait for bcache.lock). You must maintain the invariant that at most one copy of each block is cached. When you are done, your output should be similar to that shown below (though not identical). Make sure ‘usertests -q’ still passes. make grade should pass all tests when you are done.

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
#define BNUM 13
#define HASH(x) (x % BNUM)

struct {
  struct spinlock block_lock[BNUM];
  struct buf buf[NBUF];
  struct buf head[BNUM];
} bcache;

void
binit(void)
{
  struct buf *b;

  for(int i = 0; i < BNUM; i++){
    initlock(&bcache.block_lock[i], "bcache");
    bcache.head[i].prev = &bcache.head[i];
    bcache.head[i].next = &bcache.head[i];
  }

  // Create linked list of buffers
  for(b = bcache.buf; b < bcache.buf+NBUF; b++){
    b->next = bcache.head[0].next;
    b->prev = &bcache.head[0];
    initsleeplock(&b->lock, "buffer");
    bcache.head[0].next->prev = b;
    bcache.head[0].next = b;
  }
}

static struct buf*
bget(uint dev, uint blockno)
{
  struct buf *b;
  int index = HASH(blockno);

  acquire(&bcache.block_lock[index]);

  // Is the block already cached?
  for(b = bcache.head[index].next; b != &bcache.head[index]; b = b->next){
    if(b->dev == dev && b->blockno == blockno){
      b->refcnt++;
      release(&bcache.block_lock[index]);
      acquiresleep(&b->lock);
      return b;
    }
  }

  release(&bcache.block_lock[index]);

  // Not cached.
  // Recycle the least recently used (LRU) unused buffer.

  for (int i = 0; i < BNUM; i++) {
    if (i == index)  continue;
    acquire(&bcache.block_lock[i]);
    b = bcache.head[i].prev;
    for (b = bcache.head[i].prev; b != &bcache.head[i]; b = b->prev) {
      if (b->refcnt == 0) {
        b->dev = dev;
        b->blockno = blockno;
        b->valid = 0;
        b->refcnt = 1;
        b->prev->next = b->next;
        b->next->prev = b->prev;
        release(&bcache.block_lock[i]);
        acquire(&bcache.block_lock[index]);
        b->next = bcache.head[index].next;
        b->prev = &bcache.head[index];
        bcache.head[index].next->prev = b;
        bcache.head[index].next = b;
        release(&bcache.block_lock[index]);
        acquiresleep(&b->lock);
        return b;
      }
    }
    release(&bcache.block_lock[i]);
  }
  panic("bget: no buffers");
}

void
brelse(struct buf *b)
{
  if(!holdingsleep(&b->lock))
    panic("brelse");

  releasesleep(&b->lock);

  int index = HASH(b->blockno);

  acquire(&bcache.block_lock[index]);
  b->refcnt--;
  release(&bcache.block_lock[index]);
}

void
bpin(struct buf *b) {
  int index = HASH(b->blockno);
  acquire(&bcache.block_lock[index]);
  b->refcnt++;
  release(&bcache.block_lock[index]);
}

void
bunpin(struct buf *b) {
  int index = HASH(b->blockno);
  acquire(&bcache.block_lock[index]);
  b->refcnt--;
  release(&bcache.block_lock[index]);
}