b 42 bio.c struct buf *b;
b 50 bio.c for(b = bcache.buf; b < bcache.buf+NBUF; b++){
b 51 bio.c b->next = bcache.head.next;
b 52 bio.c b->prev = &bcache.head;
b 53 bio.c b->dev = -1;
b 54 bio.c bcache.head.next->prev = b;
b 55 bio.c bcache.head.next = b;
b 65 bio.c struct buf *b;
b 71 bio.c for(b = bcache.head.next; b != &bcache.head; b = b->next){
b 72 bio.c if(b->dev == dev && b->blockno == blockno){
b 73 bio.c if(!(b->flags & B_BUSY)){
b 74 bio.c b->flags |= B_BUSY;
b 76 bio.c return b;
b 78 bio.c sleep(b, &bcache.lock);
b 86 bio.c for(b = bcache.head.prev; b != &bcache.head; b = b->prev){
b 87 bio.c if((b->flags & B_BUSY) == 0 && (b->flags & B_DIRTY) == 0){
b 88 bio.c b->dev = dev;
b 89 bio.c b->blockno = blockno;
b 90 bio.c b->flags = B_BUSY;
b 92 bio.c return b;
b 102 bio.c struct buf *b;
b 104 bio.c b = bget(dev, blockno);
b 105 bio.c if(!(b->flags & B_VALID)) {
b 106 bio.c iderw(b);
b 108 bio.c return b;
b 113 bio.c bwrite(struct buf *b)
b 115 bio.c if((b->flags & B_BUSY) == 0)
b 117 bio.c b->flags |= B_DIRTY;
b 118 bio.c iderw(b);
b 124 bio.c brelse(struct buf *b)
b 126 bio.c if((b->flags & B_BUSY) == 0)
b 131 bio.c b->next->prev = b->prev;
b 132 bio.c b->prev->next = b->next;
b 133 bio.c b->next = bcache.head.next;
b 134 bio.c b->prev = &bcache.head;
b 135 bio.c bcache.head.next->prev = b;
b 136 bio.c bcache.head.next = b;
b 138 bio.c b->flags &= ~B_BUSY;
b 139 bio.c wakeup(b);
b 23 fs.c #define min(a, b) ((a) < (b) ? (a) : (b))
b 56 fs.c int b, bi, m;
b 60 fs.c for(b = 0; b < sb.size; b += BPB){
b 61 fs.c bp = bread(dev, BBLOCK(b, sb));
b 62 fs.c for(bi = 0; bi < BPB && b + bi < sb.size; bi++){
b 68 fs.c bzero(dev, b + bi);
b 69 fs.c return b + bi;
b 79 fs.c bfree(int dev, uint b)
b 85 fs.c bp = bread(dev, BBLOCK(b, sb));
b 86 fs.c bi = b % BPB;
b 47 fs.h #define BBLOCK(b, sb) (b/BPB + sb.bmapstart)
b 72 ide.c idestart(struct buf *b)
b 74 ide.c if(b == 0)
b 76 ide.c if(b->blockno >= FSSIZE)
b 79 ide.c int sector = b->blockno * sector_per_block;
b 89 ide.c outb(0x1f6, 0xe0 | ((b->dev&1)<<4) | ((sector>>24)&0x0f));
b 90 ide.c if(b->flags & B_DIRTY){
b 92 ide.c outsl(0x1f0, b->data, BSIZE/4);
b 102 ide.c struct buf *b;
b 106 ide.c if((b = idequeue) == 0){
b 111 ide.c idequeue = b->qnext;
b 114 ide.c if(!(b->flags & B_DIRTY) && idewait(1) >= 0)
b 115 ide.c insl(0x1f0, b->data, BSIZE/4);
b 118 ide.c b->flags |= B_VALID;
b 119 ide.c b->flags &= ~B_DIRTY;
b 120 ide.c wakeup(b);
b 134 ide.c iderw(struct buf *b)
b 138 ide.c if(!(b->flags & B_BUSY))
b 140 ide.c if((b->flags & (B_VALID|B_DIRTY)) == B_VALID)
b 142 ide.c if(b->dev != 0 && !havedisk1)
b 148 ide.c b->qnext = 0;
b 151 ide.c *pp = b;
b 154 ide.c if(idequeue == b)
b 155 ide.c idestart(b);
b 158 ide.c while((b->flags & (B_VALID|B_DIRTY)) != B_VALID){
b 159 ide.c sleep(b, &idelock);
b 211 log.c log_write(struct buf *b)
b 222 log.c if (log.lh.block[i] == b->blockno) // log absorbtion
b 225 log.c log.lh.block[i] = b->blockno;
b 228 log.c b->flags |= B_DIRTY; // prevent eviction
b 38 memide.c iderw(struct buf *b)
b 42 memide.c if(!(b->flags & B_BUSY))
b 44 memide.c if((b->flags & (B_VALID|B_DIRTY)) == B_VALID)
b 46 memide.c if(b->dev != 1)
b 48 memide.c if(b->blockno >= disksize)
b 51 memide.c p = memdisk + b->blockno*BSIZE;
b 53 memide.c if(b->flags & B_DIRTY){
b 54 memide.c b->flags &= ~B_DIRTY;
b 55 memide.c memmove(p, b->data, BSIZE);
b 57 memide.c memmove(b->data, p, BSIZE);
b 58 memide.c b->flags |= B_VALID;
b 15 mkfs.c #define static_assert(a, b) do { switch (0) case 0: case (a): ; } while (0)
b 253 mkfs.c #define min(a, b) ((a) < (b) ? (a) : (b))
b 1418 usertests.c char *a, *b, *c, *lastaddr, *oldbrk, *p, scratch;
b 1428 usertests.c b = sbrk(1);
b 1429 usertests.c if(b != a){
b 1430 usertests.c printf(stdout, "sbrk test failed %d %x %x\n", i, a, b);
b 1433 usertests.c *b = 1;
b 1434 usertests.c a = b + 1;