Plan 9 from Bell Labs’s /usr/web/sources/extra/9hist/pc/mmu.c

Copyright © 2021 Plan 9 Foundation.
Distributed under the MIT License.
Download the Plan 9 distribution.


## diffname pc/mmu.c 1991/0612
## diff -e /dev/null /n/bootesdump/1991/0612/sys/src/9/safari/mmu.c
0a
#include	"u.h"
#include	"lib.h"
#include	"mem.h"
#include	"dat.h"
#include	"fns.h"
#include	"io.h"



void
mapstack(Proc *p)
{
}
.
## diffname pc/mmu.c 1991/0613
## diff -e /n/bootesdump/1991/0612/sys/src/9/safari/mmu.c /n/bootesdump/1991/0613/sys/src/9/safari/mmu.c
13c
[NULLSEG]	{ 0, 0},		/* null descriptor */
[KESEG]		EXECSEG(0),		/* kernel code */
[KDSEG]		DATASEG(0),		/* kernel data/stack */
[UESEG]		EXECSEG(3),		/* user code */
[UDSEG]		DATASEG(3),		/* user data/stack */
[SYSGATE]	CALLGATE(KESEG, syscall, 3),	/* call gate for system calls */
		{ 0, 0},		/* the rest */
};
.
8,11c
/*
 *  global descriptor table describing all segments
 */
Segdesc gdt[1024] =
.
## diffname pc/mmu.c 1991/0625
## diff -e /n/bootesdump/1991/0613/sys/src/9/safari/mmu.c /n/bootesdump/1991/0625/sys/src/9/safari/mmu.c
19d
8a
 *  segment descriptor/gate
 */
typedef struct Segdesc	Segdesc;
struct Segdesc
{
	ulong	d0;
	ulong	d1;
};
#define SEGDATA	(0x10<<8)	/* data/stack segment */
#define SEGEXEC	(0x18<<8)	/* executable segment */
#define SEGCG	(0x0C<<8)	/* call gate */
#define	SEGIG	(0x0E<<8)	/* interrupt gate */
#define SEGTG	(0x0F<<8)	/* task gate */

#define SEGP	(1<<15)		/* segment present */
#define SEGPL(x) ((x)<<13)	/* priority level */
#define SEGB	(1<<22)		/* granularity 1==4k (for expand-down) */
#define SEGG	(1<<23)		/* granularity 1==4k (for other) */
#define SEGE	(1<<10)		/* expand down */
#define SEGW	(1<<9)		/* writable (for data/stack) */
#define	SEGR	(1<<9)		/* readable (for code) */
#define SEGD	(1<<22)		/* default 1==32bit (for code) */

/*
 *  gate initializers
 */
#define TRAPGATE(s,o,p)	{ (o)&0xFFFF0000|SEGP|SEGPL(p)|SEGTG, (o)&0xFFFF|((s)<<16) }
#define INTRGATE(s,o,p)	{ (o)&0xFFFF0000|SEGP|SEGPL(p)|SEGIG, (o)&0xFFFF|((s)<<16) }
#define CALLGATE(s,o,p)	{ (o)&0xFFFF0000|SEGP|SEGPL(p)|SEGCG, (o)&0xFFFF|((s)<<16) }

/*
 *  segment descriptor initializers
 */
#define	DATASEG(p) 	{ SEGG|SEGB|(0xF<<16)|SEGP|SEGPL(p)|SEGDATA|SEGW, 0xFFFF }
#define	EXECSEG(p) 	{ SEGG|SEGD|(0xF<<16)|SEGP|SEGPL(p)|SEGEXEC|SEGR, 0xFFFF }

/*
 *  task state segment.  Plan 9 ignores all the task switching goo and just
 *  uses the tss for esp0 and ss0 on gate's into the kernel, interrupts,
 *  and exceptions.  The rest is completely ignored.
 *
 *  This means that we only need one tss in the whole system.
 */
typedef struct Tss	Tss;
struct Tss
{
	ulong	backlink;	/* unused */
	ulong	esp0;		/* pl0 stack pointer */
	ulong	ss0;		/* pl0 stack selector */
	ulong	esp1;		/* pl1 stack pointer */
	ulong	ss1;		/* pl1 stack selector */
	ulong	esp2;		/* pl2 stack pointer */
	ulong	ss2;		/* pl2 stack selector */
	ulong	cr3;		/* page table descriptor */
	ulong	eip;		/* instruction pointer */
	ulong	eflags;		/* processor flags */
	ulong	eax;		/* general (hah?) registers */
	ulong 	ecx;
	ulong	edx;
	ulong	ebx;
	ulong	esp;
	ulong	ebp;
	ulong	esi;
	ulong	edi;
	ulong	es;		/* segment selectors */
	ulong	cs;
	ulong	ss;
	ulong	ds;
	ulong	fs;
	ulong	gs;
	ulong	ldt;		/* local descriptor table */
	ulong	iomap;		/* io map base */
};

/*
.
## diffname pc/mmu.c 1991/0627
## diff -e /n/bootesdump/1991/0625/sys/src/9/safari/mmu.c /n/bootesdump/1991/0627/sys/src/9/safari/mmu.c
17,30d
## diffname pc/mmu.c 1991/0703
## diff -e /n/bootesdump/1991/0627/sys/src/9/safari/mmu.c /n/bootesdump/1991/0703/sys/src/9/safari/mmu.c
80a

void
mmuinit(void)
{
	gdt[SYSGATE].d0 = ((ulong)systrap)&0xFFFF|(KESEL<<16);
	gdt[SYSGATE].d1 = ((ulong)systrap)&0xFFFF0000|SEGP|SEGPL(3)|SEGCG;
	lgdt(gdt, sizeof gdt);
}

void
systrap(void)
{
	panic("system trap from user");
}
.
79c
[UESEG]		EXECSEG(3),		/* user code */
[SYSGATE]	CALLGATE(KESEL,0,3),	/* call gate for system calls */
.
77c
[KESEG]		EXECSEG(0),		/* kernel code */
.
75d
72c
Segdesc gdt[6] =
.
69a
 *  segment descriptor initializers
 */
#define	DATASEG(p) 	{ 0xFFFF, SEGG|SEGB|(0xF<<16)|SEGP|SEGPL(p)|SEGDATA|SEGW }
#define	EXECSEG(p) 	{ 0xFFFF, SEGG|SEGD|(0xF<<16)|SEGP|SEGPL(p)|SEGEXEC|SEGR }
#define CALLGATE(s,o,p)	{ (o)&0xFFFF|((s)<<16), (o)&0xFFFF0000|SEGP|SEGPL(p)|SEGCG }

/*
.
9,31d
## diffname pc/mmu.c 1991/0706
## diff -e /n/bootesdump/1991/0703/sys/src/9/safari/mmu.c /n/bootesdump/1991/0706/sys/src/9/safari/mmu.c
63a
[RDSEG]		D16SEG(0),		/* reboot data/stack */
[RESEG]		E16SEG(0),		/* reboot code */
.
56c
Segdesc gdt[] =
.
51a
#define	D16SEG(p) 	{ 0xFFFF, (0x0<<16)|SEGP|SEGPL(p)|SEGDATA|SEGW }
#define	E16SEG(p) 	{ 0xFFFF, (0x0<<16)|SEGP|SEGPL(p)|SEGEXEC|SEGR }
.
## diffname pc/mmu.c 1991/0711
## diff -e /n/bootesdump/1991/0706/sys/src/9/safari/mmu.c /n/bootesdump/1991/0711/sys/src/9/safari/mmu.c
75a
}

void
mapstack(Proc *p)
{
}

void
flushmmu(void)
{
}

void
mmurelease(Proc *p)
{
}

void
putmmu(ulong x, ulong y, Page*z)
{
}

void
invalidateu(void)
{
.
72a

.
69a
extern ulong tpt[];

.
## diffname pc/mmu.c 1991/0716
## diff -e /n/bootesdump/1991/0711/sys/src/9/safari/mmu.c /n/bootesdump/1991/0716/sys/src/9/safari/mmu.c
78a

	/*
	 *  set up system page tables
	 */

	/*
	 *  set up the task segment
	 */
.
75c
	/*
	 *  set up the global descriptor table
	 */
.
## diffname pc/mmu.c 1991/0717
## diff -e /n/bootesdump/1991/0716/sys/src/9/safari/mmu.c /n/bootesdump/1991/0717/sys/src/9/safari/mmu.c
119a
}

void
exit(void)
{
	int i;

	u = 0;
	print("exiting\n");
	for(i = 0; i < WD2PG; i++)
		toppt[i] = 0;
	lcr3(((ulong)toppt)&~KZERO);
.
107c
putmmu(ulong x, ulong y, Page *z)
.
93a
	
.
88a
	tss.sp0 = USERADDR;
	tss.ss0 = KDSEL;
	tss.cr3 = (ulong)toppt;
	ltr(TSSSEL);
.
85a
	/*  allocate and fill low level page tables for physical mem */
	nkpt = ROUNDUP(conf.npage0+conf.npage1, 4*1024*1024);
	nkpt = nkpt/(4*1024*1024);
	kpt = ialloc(nkpt*BY2PG, 1);
	n = ROUNDUP(conf.npage0+conf.npage1, 1*1024*1024);
	n = n/(4*1024);
	for(i = 0; i < n; i++){
		kpt[i] = (i<<PGSHIFT)|PTEVALID|PTEKERNEL|PTEWRITE;
	}

	/*  allocate page table for u-> */
	upt = ialloc(BY2PG, 1);

	/*  allocate top level table and put pointers to lower tables in it */
	toppt = ialloc(BY2PG, 1);
	x = KZERO>>(2*PGSHIFT-2);
	y = ((ulong)kpt)&~KZERO;
	for(i = 0; i < nkpt; i++){
/*		toppt[i] = (y+i*BY2PG)|PTEVALID|PTEKERNEL|PTEWRITE;/**/
		toppt[x+i] = (y+i*BY2PG)|PTEVALID|PTEKERNEL|PTEWRITE;
	}
	x = USERADDR>>(2*PGSHIFT-2);
	y = ((ulong)upt)&~KZERO;
	toppt[x] = y|PTEVALID|PTEKERNEL|PTEWRITE;
	lcr3(((ulong)toppt)&~KZERO);

.
83c
	 *  set up system page tables.
	 *  map all of physical memory to start at KZERO.
	 *  leave a map for a user area.
.
78,79c
	x = (ulong)systrap;
	gdt[SYSGATE].d0 = (x&0xFFFF)|(KESEL<<16);
	gdt[SYSGATE].d1 = (x&0xFFFF0000)|SEGP|SEGPL(3)|SEGCG;
	x = (long)&tss;
	gdt[TSSSEG].d0 = (x<<16)|sizeof(Tss);
	gdt[TSSSEG].d1 = (x&0xFF000000)|((x>>16)&0xFF)|SEGTSS|SEGPL(0)|SEGP;
.
74a
	int i, n, nkpt;
	ulong x;
	ulong y;

.
71a
#define ROUNDUP(s,v)	(((s)+(v-1))&~(v-1))

.
70c
static ulong	*toppt;		/* top level page table */	
static ulong	*kpt;		/* kernel level page tables */
static ulong	*upt;		/* page table for struct User */
.
66,67c
[RDSEG]		D16SEGM(0),		/* reboot data/stack */
[RESEG]		E16SEGM(0),		/* reboot code */
[TSSSEG]	TSSSEGM(0,0),		/* tss segment */
.
61,64c
[KDSEG]		DATASEGM(0),		/* kernel data/stack */
[KESEG]		EXECSEGM(0),		/* kernel code */
[UDSEG]		DATASEGM(3),		/* user data/stack */
[UESEG]		EXECSEGM(3),		/* user code */
.
49,53c
#define	DATASEGM(p) 	{ 0xFFFF, SEGG|SEGB|(0xF<<16)|SEGP|SEGPL(p)|SEGDATA|SEGW }
#define	EXECSEGM(p) 	{ 0xFFFF, SEGG|SEGD|(0xF<<16)|SEGP|SEGPL(p)|SEGEXEC|SEGR }
#define CALLGATE(s,o,p)	{ ((o)&0xFFFF)|((s)<<16), (o)&0xFFFF0000|SEGP|SEGPL(p)|SEGCG }
#define	D16SEGM(p) 	{ 0xFFFF, (0x0<<16)|SEGP|SEGPL(p)|SEGDATA|SEGW }
#define	E16SEGM(p) 	{ 0xFFFF, (0x0<<16)|SEGP|SEGPL(p)|SEGEXEC|SEGR }
#define	TSSSEGM(b,p)	{ ((b)<<16)|sizeof(Tss),\
			  ((b)&0xFF000000)|(((b)<<16)&0xFF)|SEGTSS|SEGPL(p)|SEGP }
.
44a
Tss tss;
.
23c
	ulong	sp2;		/* pl2 stack pointer */
.
21c
	ulong	sp1;		/* pl1 stack pointer */
.
19c
	ulong	sp0;		/* pl0 stack pointer */
.
## diffname pc/mmu.c 1991/0718
## diff -e /n/bootesdump/1991/0717/sys/src/9/safari/mmu.c /n/bootesdump/1991/0718/sys/src/9/safari/mmu.c
180c
	putcr3(((ulong)toppt)&~KZERO);
.
162a
	/* unmap u area */
	upt[0] = 0;

	/* flush cached mmu entries */
	putcr3(((ulong)toppt)&~KZERO);
.
157a
	int topoff;
	ulong *pt;
	Proc *p;
	int i;

print("putmmu %lux %lux USTKTOP %lux\n", va, pa, USTKTOP); /**/
	if(u==0)
		panic("putmmu");
	p = u->p;

	/*
	 *  check for exec/data vs stack vs illegal
	 */
	topoff = TOPOFF(va);
	if(topoff < TOPOFF(TSTKTOP) && topoff >= TOPOFF(USTKBTM))
		i = MAXMMU + topoff - TOPOFF(USTKBTM);
	else if(topoff < MAXMMU)
		i = topoff;
	else
		panic("putmmu bad addr %lux", va);

	/*
	 *  if bottom level page table missing, allocate one
	 */
	pg = p->mmu[i];
	if(pg == 0){
		pg = p->mmu[i] = newpage(1, 0, 0);
		p->mmue[i] = PPN(pg->pa) | PTEVALID | PTEKERNEL | PTEWRITE;
		toppt[topoff] = p->mmue[i];
	}

	/*
	 *  fill in the bottom level page table
	 */
	pt = (ulong*)(p->mmu[i]->pa|KZERO);
	pt[BTMOFF(va)] = pa | PTEUSER;

	/* flush cached mmu entries */
	putcr3(((ulong)toppt)&~KZERO);
.
156c
putmmu(ulong va, ulong pa, Page *pg)
.
152a
	p->mmuvalid = 0;
.
147a
	int s;

	if(u == 0)
		return;

	u->p->mmuvalid = 0;
	s = splhi();
	mapstack(u->p);
	splx(s);
.
142c
	ulong tlbphys;
	int i;

print("mapstack\n");

	if(p->upage->va != (USERADDR|(p->pid&0xFFFF)))
		panic("mapstack %d 0x%lux 0x%lux", p->pid, p->upage->pa, p->upage->va);

	/*
 	 *  dump any invalid mappings
	 */
	if(p->mmuvalid == 0){
		for(i = 0; i < MAXMMU+MAXSMMU; i++){
			if(p->mmu[i]==0)
				continue;
			memset(kmap(p->mmu[i]), 0, BY2PG);
		}
		p->mmuvalid = 1;
	}

	/*
	 *  point top level page table to bottom level ones
	 */
	memmove(toppt, p->mmu, MAXMMU*sizeof(ulong));
	memmove(&toppt[TOPOFF(USTKBTM)], &p->mmu[MAXMMU], MAXSMMU*sizeof(ulong));

	/* map in u area */
	upt[0] = PPN(p->upage->pa) | PTEVALID | PTEKERNEL | PTEWRITE;

	/* flush cached mmu entries */
	putcr3(((ulong)toppt)&~KZERO);

	u = (User*)USERADDR;
.
136c
	puttr(TSSSEL);
.
133c
	tss.sp0 = USERADDR+BY2PG;
.
127,128c
	toppt[x] = y | PTEVALID | PTEKERNEL | PTEWRITE;
	putcr3(((ulong)toppt)&~KZERO);
.
121,125c
	for(i = 0; i < nkpt; i++)
		toppt[x+i] = (y+i*BY2PG) | PTEVALID | PTEKERNEL | PTEWRITE;
	x = TOPOFF(USERADDR);
.
119c
	x = TOPOFF(KZERO);
.
110,112c
	for(i = 0; i < n; i++)
		kpt[i] = (i<<PGSHIFT) | PTEVALID | PTEKERNEL | PTEWRITE;
.
96c
	putgdt(gdt, sizeof gdt);
.
93c
	x = (ulong)&tss;
.
79a
/*
 *  offset of virtual address into
 *  bottom level page table
 */
#define BTMOFF(v)	(((v)>>(PGSHIFT))&(BY2PG-1))

.
78a
/*
 *  offset of virtual address into
 *  top level page table
 */
#define TOPOFF(v)	((v)>>(2*PGSHIFT-2))
.
## diffname pc/mmu.c 1991/0719
## diff -e /n/bootesdump/1991/0718/sys/src/9/safari/mmu.c /n/bootesdump/1991/0719/sys/src/9/safari/mmu.c
263,274d
243a
print("%lux[%d] = %lux\n", pt, BTMOFF(va), pa | PTEUSER);
.
236a
print("toppt[%d] = %lux\n", topoff, p->mmue[i]);
.
235c
		p->mmue[i] = PPN(pg->pa) | PTEVALID | PTEUSER | PTEWRITE;
.
213c
print("putmmu %lux %lux\n", va, pa); /**/
.
153,154d
119,120c
	n = ROUNDUP(conf.npage, 1024);
.
115,117c
	/*  allocate and fill low level page tables for kernel mem */
	nkpt = ROUNDUP(conf.npage, 4*1024);
	nkpt = nkpt/(4*1024);
.
91a
mmudump(void)
{
	int i;
	ulong *z;
	z = (ulong*)gdt;
	for(i = 0; i < sizeof(gdt)/4; i+=2)
		print("%8.8lux %8.8lux\n", *z++, *z++);
	print("UESEL %lux UDSEL %lux\n", UESEL, UDSEL);
	print("KESEL %lux KDSEL %lux\n", KESEL, KDSEL);
	panic("done");
}

void
.
89c
#define BTMOFF(v)	(((v)>>(PGSHIFT))&(WD2PG-1))
.
69,70d
## diffname pc/mmu.c 1991/0720
## diff -e /n/bootesdump/1991/0719/sys/src/9/safari/mmu.c /n/bootesdump/1991/0720/sys/src/9/safari/mmu.c
253c
print("%lux[%d] now %lux\n", pt, BTMOFF(va), pt[BTMOFF(va)]);
.
250a
print("%lux[%d] was %lux\n", pt, BTMOFF(va), pt[BTMOFF(va)]);
.
245c
print("toppt[%d] = %lux\n", topoff, toppt[topoff]);
.
## diffname pc/mmu.c 1991/0723
## diff -e /n/bootesdump/1991/0720/sys/src/9/safari/mmu.c /n/bootesdump/1991/0723/sys/src/9/safari/mmu.c
252a
print("%lux[%d] was %lux\n", pt, BTMOFF(va), pt[BTMOFF(va)]);
.
251d
245c
print("toppt[%d] now %lux\n", topoff, toppt[topoff]);
.
240a
print("toppt[%d] was %lux\n", topoff, toppt[topoff]);
.
219c
	int i = 0;
.
181,182c
	memmove(toppt, p->mmue, MAXMMU*sizeof(ulong));
	memmove(&toppt[TOPOFF(USTKBTM)], &p->mmue[MAXMMU], MAXSMMU*sizeof(ulong));
.
## diffname pc/mmu.c 1991/0801
## diff -e /n/bootesdump/1991/0723/sys/src/9/safari/mmu.c /n/bootesdump/1991/0801/sys/src/9/safari/mmu.c
255c
/*print("%lux[%d] now %lux\n", pt, BTMOFF(va), pt[BTMOFF(va)]);/**/
.
253c
/*print("%lux[%d] was %lux\n", pt, BTMOFF(va), pt[BTMOFF(va)]);/**/
.
246c
/*print("toppt[%d] now %lux\n", topoff, toppt[topoff]);/**/
.
241c
/*print("toppt[%d] was %lux\n", topoff, toppt[topoff]);/**/
.
221c
/*print("putmmu %lux %lux\n", va, pa); /**/
.
## diffname pc/mmu.c 1991/0821
## diff -e /n/bootesdump/1991/0801/sys/src/9/safari/mmu.c /n/bootesdump/1991/0821/sys/src/9/safari/mmu.c
132a

print("%d low level pte's, %d high level pte's\n", npage, nkpt);
.
127,131c
	npage = (1024*1024)/BY2PG + conf.npage1;
	nbytes = PGROUND(npage*BY2WD);		/* words of page map */
	nkpt = nbytes/BY2PG;			/* pages of page map */
	kpt = ialloc(nbytes, 1);
	for(i = 0; i < npage; i++)
.
105c
	int i, nkpt, npage, nbytes;
.
## diffname pc/mmu.c 1991/0827
## diff -e /n/bootesdump/1991/0821/sys/src/9/safari/mmu.c /n/bootesdump/1991/0827/sys/src/9/safari/mmu.c
127c
	npage = conf.base1/BY2PG + conf.npage1;
.
## diffname pc/mmu.c 1991/0922
## diff -e /n/bootesdump/1991/0827/sys/src/9/safari/mmu.c /n/bootesdump/1991/0922/sys/src/9/safari/mmu.c
71a
static ulong	*ktoppt;	/* prototype top level page table
				 * containing kernel mappings
				 */
.
## diffname pc/mmu.c 1991/0928
## diff -e /n/bootesdump/1991/0922/sys/src/9/safari/mmu.c /n/bootesdump/1991/0928/sys/src/9/safari/mmu.c
168c
	if(p->upage->va != (USERADDR|(p->pid&0xFFFF)) && p->pid != 0)
.
## diffname pc/mmu.c 1991/1003
## diff -e /n/bootesdump/1991/0928/sys/src/9/safari/mmu.c /n/bootesdump/1991/1003/sys/src/9/safari/mmu.c
161a

.
## diffname pc/mmu.c 1991/1004
## diff -e /n/bootesdump/1991/1003/sys/src/9/safari/mmu.c /n/bootesdump/1991/1004/sys/src/9/safari/mmu.c
274c
	putcr3(ktoppg.pa);
.
264c
	putcr3(p->mmutop->pa);
.
261d
258,259c
	pt = (ulong*)(PPN(top[topoff])|KZERO);
.
256c
	 *  put in new mmu entry
.
246,252c
	topoff = TOPOFF(va);
	if(top[topoff] == 0){
		pg = mmugetpage(1);
		top[topoff] = PPN(pg->pa) | PTEVALID | PTEUSER | PTEWRITE;
		pg->next = p->mmuused;
		p->mmuused = pg;
.
244c
	 *  if bottom level page table missing, allocate one and point
	 *  the top level page at it.
.
235,241c
	if(p->mmutop == 0){
		p->mmutop = mmugetpage(0);
		memmove((void*)p->mmutop->va, (void*)ktoppg.va, BY2PG);
	}
	top = (ulong*)p->mmutop->va;
.
233c
	 *  if no top level page, allocate one and copy the prototype
	 *  into it.
.
231a
	if(va >= USERADDR && va < USERADDR + FOURMEG)
		print("putmmu in USERADDR page table 0x%lux\n", va);
	if((va & 0xF0000000) == KZERO)
		print("putmmu in kernel page table 0x%lux\n", va);

.
229a

.
227d
225c
	char err[64];
.
222a
	ulong *top;
.
214,219d
212a
/*
 *  Add an entry into the mmu.
 */
#define FOURMEG (4*1024*1024)
.
207,210c
	/* give away page table pages */
	for(pg = p->mmufree; pg; pg = next){
		next = pg->next;
		simpleputpage(pg);
	}
	p->mmufree = 0;
	for(pg = p->mmuused; pg; pg = next){
		next = pg->next;
		simpleputpage(pg);
	}
	p->mmuused = 0;
	if(p->mmutop)
		simpleputpage(p->mmutop);
	p->mmutop = 0;
.
204,205c
	/* point 386 to protoype page map */
	putcr3(ktoppg.pa);
.
202c
	Page *pg;
	Page *next;
.
200c
mmurelease(Proc *p)
.
198a
/*
 *  give all page table pages back to the free pool.  This is called in sched()
 *  with palloc locked.
 */
.
193,194c
	/* tell processor about new page table (flushes cached entries) */
	putcr3(pg->pa);
.
184,189d
172,182c
	if(p->mmutop)
		pg = p->mmutop;
	else
		pg = &ktoppg;
.
167a
	Page *pg;
.
163a
flushmmu(void)
{
	int s;
	Proc *p;
	Page *pg;

	if(u == 0)
		return;

	p = u->p;
	s = splhi();
	if(p->mmutop){
		p->mmutop->next = p->mmufree;
		p->mmufree = p->mmutop;
		for(pg = p->mmufree; pg->next; pg = pg->next)
			;
		pg->next = p->mmuused;
		p->mmutop = 0;
		p->mmuused = 0;
	}
	mapstack(u->p);
	splx(s);
}

/*
 *  Switch to a process's memory map.  If the process doesn't
 *  have a map yet, just use the prototype one that contains
 *  mappings for only the kernel and the User struct.
 */
void
.
162a
	if(p->mmufree){
		pg = p->mmufree;
		p->mmufree = pg->next;
		if(clear)
			memset((void*)pg->va, 0, BY2PG);
	} else {
		pg = newpage(clear, 0, 0);
		pg->va = VA(kmap(pg));
	}
	return pg;
}

/*
 *  Put all page map pages on the process's free list and
 *  call mapstack to set up the prototype page map.  This
 *  effectively forgets all of the process's mappings.
 */
.
161a
/*
 *  Get a page for a process's page map.
 *
 *  Each process maintains its own free list of page
 *  table pages.  All page table pages are put on
 *  this list in flushmmu().  flushmmu() doesn't
 *  putpage() the pages since the process will soon need
 *  them back.  Also, this avoids worrying about deadlocks
 *  twixt flushmmu() and putpage().
 *
 *  mmurelease() will give back the pages when the process
 *  exits.
 */
static Page*
mmugetpage(int clear)
{
	Proc *p = u->p;
	Page *pg;
.
158c
	tss.cr3 = ktoppg.pa;
.
150,151c
	top[x] = y | PTEVALID | PTEKERNEL | PTEWRITE;
	putcr3(ktoppg.pa);
.
147c
		top[x+i] = (y+i*BY2PG) | PTEVALID | PTEKERNEL | PTEWRITE;
.
143c
	top = ialloc(BY2PG, 1);
	ktoppg.va = (ulong)top;
	ktoppg.pa = ktoppg.va & ~KZERO;
.
126c
	 *  leave a map entry for a user area.
.
110a
	ulong *top;
.
104a
/*
 *  Create a prototype page map that maps all of memory into
 *  kernel (KZERO) space.  This is the default map.  It is used
 *  whenever the processor not running a process or whenever running
 *  a process which does not yet have its own map.
 */
.
72,77c
static Page	ktoppg;		/* prototype top level page table
				 * containing kernel mappings  */
static ulong	*kpt;		/* 2nd level page tables for kernel mem */
static ulong	*upt;		/* 2nd level page table for struct User */
.
## diffname pc/mmu.c 1991/1005
## diff -e /n/bootesdump/1991/1004/sys/src/9/safari/mmu.c /n/bootesdump/1991/1005/sys/src/9/safari/mmu.c
316,317c
		/*
		 *  N.B. The assignment to pg is neccessary.
		 *  We can't assign to p->mmutop until after
		 *  copying ktoppg into the new page since we might
		 *  get scheded in this code and p->mmutop will be
		 *  pointing to a bad map.
		 */
		pg = mmugetpage(0);
		memmove((void*)pg->va, (void*)ktoppg.va, BY2PG);
		p->mmutop = pg;
.
299a
	int x;
.
## diffname pc/mmu.c 1991/1210
## diff -e /n/bootesdump/1991/1005/sys/src/9/safari/mmu.c /n/bootesdump/1991/1210/sys/src/9/safari/mmu.c
157a

.
154a

	/*  page table for u-> */
	upt = ialloc(BY2PG, 1);
.
140,150c
		kpt[i] = (0+i*BY2PG) | PTEVALID | PTEKERNEL | PTEWRITE;
.
134c
	/*  allocate top level table */
	top = ialloc(BY2PG, 1);
	ktoppg.va = (ulong)top;
	ktoppg.pa = ktoppg.va & ~KZERO;

	/*  map all memory to KZERO */
.
130a
	 *  map ROM BIOS at the usual place (F0000000).
.
## diffname pc/mmu.c 1991/1211
## diff -e /n/bootesdump/1991/1210/sys/src/9/safari/mmu.c /n/bootesdump/1991/1211/sys/src/9/safari/mmu.c
162a
	memset(&tss, 0, sizeof(tss));
.
## diffname pc/mmu.c 1991/1214
## diff -e /n/bootesdump/1991/1211/sys/src/9/safari/mmu.c /n/bootesdump/1991/1214/sys/src/9/safari/mmu.c
361,366d
131d
120,122d
118c
	 *  set up the global descriptor table. we make the tss entry here
	 *  since it requires arithmetic on an address and hence cannot
	 *  be a compile or link time constant.
.
90,102d
68d
## diffname pc/mmu.c 1992/0116
## diff -e /n/bootesdump/1991/1214/sys/src/9/safari/mmu.c /n/bootesdump/1992/0116/sys/src/9/safari/mmu.c
56c
			  ((b)&0xFF000000)|(((b)>>16)&0xFF)|SEGTSS|SEGPL(p)|SEGP }
.
## diffname pc/mmu.c 1992/0131
## diff -e /n/bootesdump/1992/0116/sys/src/9/safari/mmu.c /n/bootesdump/1992/0131/sys/src/9/safari/mmu.c
324a
		pg->daddr = topoff;
.
238a
	if(p->mmutop)
		pg = p->mmutop;
	else
		pg = &ktoppg;
.
230,234d
202,208c
	if(p->mmutop && p->mmuused){
		top = (ulong*)p->mmutop->va;
		for(pg = p->mmuused; pg->next; pg = pg->next)
			top[pg->daddr] = 0;
		top[pg->daddr] = 0;
		pg->next = p->mmufree;
		p->mmufree = p->mmuused;
.
195a
	ulong *top;
.
188a
 *
 *  Don't free the top level page.  Just zero the used entries.  This
 *  avoids a 4k copy each flushmmu.
.
186c
 *  Put all bottom level page map pages on the process's free list and
.
## diffname pc/mmu.c 1992/0321
## diff -e /n/bootesdump/1992/0131/sys/src/9/safari/mmu.c /n/bootesdump/1992/0321/sys/src/9/safari/mmu.c
87c
#define BTMOFF(v)	((((ulong)(v))>>(PGSHIFT))&(WD2PG-1))
.
81c
#define TOPOFF(v)	(((ulong)(v))>>(2*PGSHIFT-2))
.
2c
#include	"../port/lib.h"
.
## diffname pc/mmu.c 1992/0617
## diff -e /n/bootesdump/1992/0321/sys/src/9/safari/mmu.c /n/bootesdump/1992/0617/sys/src/9/safari/mmu.c
232a

	if(p->newtlb){
		/*
		 *  bin the current second level page tables.  newtlb
		 *  set means that they are inconsistent with the segment.c
		 *  data structures.
		 */
		if(p->mmutop && p->mmuused){
			top = (ulong*)p->mmutop->va;
			for(pg = p->mmuused; pg->next; pg = pg->next)
				top[pg->daddr] = 0;
			top[pg->daddr] = 0;
			pg->next = p->mmufree;
			p->mmufree = p->mmuused;
			p->mmuused = 0;
		}
		p->newtlb = 0;
	}
.
229a
	ulong *top;
.
206,215c
	mapstack(p);
.
204a
	p->newtlb = 1;
.
203d
198,199d
## diffname pc/mmu.c 1992/0625
## diff -e /n/bootesdump/1992/0617/sys/src/9/safari/mmu.c /n/bootesdump/1992/0625/sys/src/9/safari/mmu.c
137c
	upt = xspanalloc(BY2PG, BY2PG, 0);
.
128c
	kpt = xspanalloc(nbytes, BY2PG, 0);
.
125c
	npage = conf.topofmem/BY2PG;
.
120c
	top = xspanalloc(BY2PG, BY2PG, 0);
.
## diffname pc/mmu.c 1992/0711
## diff -e /n/bootesdump/1992/0625/sys/src/9/safari/mmu.c /n/bootesdump/1992/0711/sys/src/9/safari/mmu.c
295,296d
216,217d
## diffname pc/mmu.c 1992/0804
## diff -e /n/bootesdump/1992/0711/sys/src/9/safari/mmu.c /n/bootesdump/1992/0804/sys/src/9/safari/mmu.c
342a
	splx(s);
.
332d
326,328c
	s = splhi();
	if(PPN(top[topoff]) == 0){
		if(p->mmufree == 0){
			spllo();
			pg = newpage(1, 0, 0);
			pg->va = VA(kmap(pg));
			splhi();
		} else {
			pg = p->mmufree;
			p->mmufree = pg->next;
			memset((void*)pg->va, 0, BY2PG);
		}
.
323,324c
	 *  if bottom level page table missing, allocate one 
	 *  and point the top level page at it.
.
320a
	topoff = TOPOFF(va);
.
309,316c
		pg = newpage(0, 0, 0);
		pg->va = VA(kmap(pg));
.
305,306c
	 *  create a top level page if we don't already have one.
	 *  copy the kernel top level page into it for kernel mappings.
.
299,303d
296d
292a
	int s;
.
285d
228,236c
		if(p->mmutop)
			memmove((void*)p->mmutop->va, (void*)ktoppg.va, BY2PG);
		l = &p->mmufree;
		for(pg = p->mmufree; pg; pg = pg->next)
			l = &pg->next;
		*l = p->mmuused;
		p->mmuused = 0;
.
224,226c
		 *  bin the current second level page tables.
		 *  newtlb set means that they are inconsistent
		 *  with the segment.c data structures.
.
216,217c
	Page *pg, **l;
.
204c
	if(u){
		u->p->newtlb = 1;
		mapstack(u->p);
	} else
		putcr3(ktoppg.pa);
.
199,202d
197d
167,192d
155,165c
 *  Mark the mmu and tlb as inconsistent and call mapstack to fix it up.
.
88a
#define MAXUMEG 64	/* maximum memory per user process in megabytes */
#define ONEMEG (1024*1024)


.
## diffname pc/mmu.c 1992/0805
## diff -e /n/bootesdump/1992/0804/sys/src/9/safari/mmu.c /n/bootesdump/1992/0805/sys/src/9/safari/mmu.c
291a
		pg->daddr = topoff;
.
194,200c
		if(p->mmutop && p->mmuused){
			top = (ulong*)p->mmutop->va;
			for(pg = p->mmuused; pg->next; pg = pg->next)
				top[pg->daddr] = 0;
			top[pg->daddr] = 0;
			pg->next = p->mmufree;
			p->mmufree = p->mmuused;
			p->mmuused = 0;
		}
.
192a
		 *
		 *  bin the current second level page tables and
		 *  the pointers to them in the top level page.
		 *  pg->daddr is used by putmmu to save the offset into
		 *  the top level page.
.
190d
183c
	Page *pg;
	ulong *top;
.
## diffname pc/mmu.c 1992/0929
## diff -e /n/bootesdump/1992/0808/sys/src/9/safari/mmu.c /n/bootesdump/1992/0929/sys/src/9/pc/mmu.c
322a
}

/*
 *  allocate some address space (already mapped into the kernel)
 *  for ISA bus memory.
 */
ulong
isamem(int len)
{
	ulong a, x;

	lock(&isamemalloc);
	len = PGROUND(len);
	x = isamemalloc.addr + len;
	if(x > isamemalloc.end)
		panic("isamem");
	a = isamemalloc.addr;
	isamemalloc.addr = x;
	unlock(&isamemalloc);
	return a;
.
129a
	if(conf.topofmem < 64*MB){
		/* for ISA bus memory */
		isamemalloc.addr = conf.topofmem;
		isamemalloc.end = conf.topofmem + 2*MB;
		if(isamemalloc.end > 64*MB)
			isamemalloc.end = 64*MB;
		npage += (isamemalloc.end - isamemalloc.addr)/BY2PG;
	}
.
128c
	/*  map all memory to KZERO (plus 1 meg for PCMCIA window) */
.
91a
struct
{
	ulong addr; 	/* next available address for isa bus memory */
	ulong end;	/* one past available isa bus memory */
} isamemalloc;
.
## diffname pc/mmu.c 1992/0930
## diff -e /n/bootesdump/1992/0929/sys/src/9/pc/mmu.c /n/bootesdump/1992/0930/sys/src/9/pc/mmu.c
133,142c
	/*  map all memory to KZERO (add some address space for ISA memory) */
	isamemalloc.addr = conf.topofmem;
	isamemalloc.end = conf.topofmem + ISAMEMSIZE;
	if(isamemalloc.end > 64*MB)
		isamemalloc.end = 64*MB;	/* ISA can only access 64 meg */
	npage = isamemalloc.end/BY2PG;
.
## diffname pc/mmu.c 1992/1004
## diff -e /n/bootesdump/1992/0930/sys/src/9/pc/mmu.c /n/bootesdump/1992/1004/sys/src/9/pc/mmu.c
93a
	Lock;
.
## diffname pc/mmu.c 1993/0120
## diff -e /n/bootesdump/1992/1004/sys/src/9/pc/mmu.c /n/bootesdump/1993/0120/sys/src/9/pc/mmu.c
353a

/*
 *  mapping for user access to d segment (access to TARGA)
 */
Page*
dsegalloc(Segment *s, ulong va)
{
	Page *pg;

	pg = smalloc(sizeof(Page));
	memset(pg, 0, sizeof(Page));
	pg->va = va;
	pg->pa = 0xd0000 + (va - s->base);
	pg->ref = 1;
	return pg;
}

void
dsegfree(Page *pg)
{
	int x;

	lock(pg);
	x = --pg->ref;
	unlock(pg);
	if(x <= 0)
		free(pg);
}
.
## diffname pc/mmu.c 1993/0210
## diff -e /n/bootesdump/1993/0120/sys/src/9/pc/mmu.c /n/bootesdump/1993/0210/sys/src/9/pc/mmu.c
354,381d
## diffname pc/mmu.c 1993/0915
## diff -e /n/bootesdump/1993/0210/sys/src/9/pc/mmu.c /n/fornaxdump/1993/0915/sys/src/brazil/pc/mmu.c
352a
}

/*
 *  used to map a page into 16 meg - BY2PG for confinit(). tpt is the temporary
 *  page table set up by l.s.
 */
long*
mapaddr(ulong addr)
{
	ulong base;
	ulong off;
	static ulong *pte, top;
	extern ulong tpt[];

	if(pte == 0){
		top = (((ulong)tpt)+(BY2PG-1))&~(BY2PG-1);
		pte = (ulong*)top;
		top &= ~KZERO;
		top += BY2PG;
		pte += (4*1024*1024-BY2PG)>>PGSHIFT;
	}

	base = off = addr;
	base &= ~(KZERO|(BY2PG-1));
	off &= BY2PG-1;

	*pte = base|PTEVALID|PTEKERNEL|PTEWRITE; /**/
	putcr3((ulong)top);

	return (long*)(KZERO | 4*1024*1024-BY2PG | off);
.
325,334d
321c
	taskswitch(up->mmutoup->pa, up->kstack);
.
310,311c
		pg->next = up->mmuused;
		up->mmuused = pg;
.
304,305c
			pg = up->mmufree;
			up->mmufree = pg->next;
.
298c
		if(up->mmufree == 0){
.
289c
	top = (ulong*)up->mmutoup->va;
.
287c
		up->mmutop = pg;
.
283c
	if(up->mmutop == 0){
.
275,278d
272d
244,245c
	/* point 386 to protoype page map and m->stack */
	taskswitch(ktoppg.pa, BY2PG + (ulong)m);
.
228,231c
		taskswitch(ktoppg.pa, p->kstack+KSTACK);
.
226c
		taskswitch(p->mmutop->pa, p->kstack+KSTACK);
.
221,223d
196,198d
191c
mmuswitch(Proc *p)
.
188c
 *  mappings for only the kernel.
.
177,181c
	up->newtlb = 1;
	mmuswitch(up);
.
162,164c
	taskswitch(ktoppg.pa, BY2BG + (ulong)m);
.
150,157d
99a
 *  Change current page table and the stack to use for exceptions
 *  (traps & interrupts).  The exception stack comes from the tss.
 *  Since we use only one tss, (we hope) there's no need for a
 *  puttr().
 */
static void
taskswitch(ulong pagetbl, ulong stack)
{
	putcr3(pagetbl);
	tss.ss0 = KDSEL;
	tss.sp0 = stack;
	tss.cr3 = pagetbl;
}

/*
.
74d
## diffname pc/mmu.c 1993/1013
## diff -e /n/fornaxdump/1993/0915/sys/src/brazil/pc/mmu.c /n/fornaxdump/1993/1013/sys/src/brazil/pc/mmu.c
308c
	taskswitch(up->mmutop->pa, (ulong)up->kstack);
.
276c
	top = (ulong*)up->mmutop->va;
.
223c
		taskswitch(ktoppg.pa, (ulong)(p->kstack+KSTACK));
.
221c
		taskswitch(p->mmutop->pa, (ulong)(p->kstack+KSTACK));
.
168c
	taskswitch(ktoppg.pa, BY2PG + (ulong)m);
.
## diffname pc/mmu.c 1993/1113
## diff -e /n/fornaxdump/1993/1013/sys/src/brazil/pc/mmu.c /n/fornaxdump/1993/1113/sys/src/brazil/pc/mmu.c
308c
	/*taskswitch(up->mmutop->pa, (ulong)(up->kstack+KSTACK));/**/
	putcr3(up->mmutop->pa);/**/
.
225a
void
simpleputpage(Page *pg)
{
	Rendez *r;

	if(pg->ref != 1)
		panic("simpleputpage");

	pg->ref = 0;
	if(palloc.head){
		pg->next = palloc.head;
		palloc.head->prev = pg;
	}
	else {
		palloc.tail = pg;
		pg->next = 0;
	}
	palloc.head = pg;
	pg->prev = 0;

	palloc.freecol[pg->color]++;
	r = &palloc.r[pg->color];
	if(r->p != 0)
		wakeup(r);
}

.
173c
 *  Mark the mmu and tlb as inconsistent and call mmuswitch to fix it up.
.
169c
	puttr(TSSSEL);/**/
.
110a
	putcr3(pagetbl);
.
107d
## diffname pc/mmu.c 1993/1115
## diff -e /n/fornaxdump/1993/1113/sys/src/brazil/pc/mmu.c /n/fornaxdump/1993/1115/sys/src/brazil/pc/mmu.c
108a
tss.ss1 = KDSEL;
tss.sp1 = stack;
tss.ss2 = KDSEL;
tss.sp2 = stack;
.
## diffname pc/mmu.c 1993/1124
## diff -e /n/fornaxdump/1993/1115/sys/src/brazil/pc/mmu.c /n/fornaxdump/1993/1124/sys/src/brazil/pc/mmu.c
250,253c
	palloc.freecount++;
	if(palloc.r.p != 0)
		wakeup(&palloc.r);
.
233,234d
230c
static void
.
## diffname pc/mmu.c 1994/0707
## diff -e /n/fornaxdump/1993/1124/sys/src/brazil/pc/mmu.c /n/fornaxdump/1994/0707/sys/src/brazil/pc/mmu.c
120c
 *  whenever the processor is not running a process or whenever running
.
## diffname pc/mmu.c 1994/0819
## diff -e /n/fornaxdump/1994/0707/sys/src/brazil/pc/mmu.c /n/fornaxdump/1994/0819/sys/src/brazil/pc/mmu.c
154,156c
	isamemalloc.end = 64*MB;
.
## diffname pc/mmu.c 1994/1209
## diff -e /n/fornaxdump/1994/0819/sys/src/brazil/pc/mmu.c /n/fornaxdump/1994/1209/sys/src/brazil/pc/mmu.c
153,155c
	isamemalloc.addr = 0xd8000;
	isamemalloc.end = 0xe0000;
	npage = 64*MB/BY2PG;
.
## diffname pc/mmu.c 1994/1210
## diff -e /n/fornaxdump/1994/1209/sys/src/brazil/pc/mmu.c /n/fornaxdump/1994/1210/sys/src/brazil/pc/mmu.c
347,355c
	lock(&isaalloc);
	os = s = e = 0;
	for(i = 0; i < Nisa; i++){
		s = os = isaalloc.s[i];
		if(s == 0)
			continue;
		e = isaalloc.e[i];
		if(addr && addr >= s && addr < isaalloc.e[i])
			break;
		if(align > 0)
			s = ((s + align - 1)/align)*align;
		if(e - s >= len)
			break;
	}
	if(i >= Nisa){
		unlock(&isaalloc);
		return 0;
	}

	/* remove */
	isaalloc.s[i] = 0;
	unlock(&isaalloc);

	/* give back edges */
	if(s != os)
		putisa(os, s - os);
	os = s + len;
	if(os != e)
		putisa(os, e - os);

	return KZERO|s;
.
345c
	int i;
	ulong os, s, e;
.
343c
getisa(ulong addr, int len, int align)
.
338a
 *  make isa address space available
 */
void
putisa(ulong addr, int len)
{
	ulong e;
	int i, hole;

	addr &= ~KZERO;

	e = addr + len;
	lock(&isaalloc);
	hole = -1;
	for(i = 0; i < Nisa; i++){
		if(isaalloc.s[i] == e){
			isaalloc.s[i] = addr;
			break;
		}
		if(isaalloc.e[i] == addr){
			isaalloc.e[i] = e;
			break;
		}
		if(isaalloc.s[i] == 0)
			hole = i;
	}
	if(i >= Nisa && hole >= 0){
		isaalloc.s[hole] = addr;
		isaalloc.e[hole] = e;
	}
	unlock(&isaalloc);
}

/*
.
152,155c
	/*  map all memory to KZERO */
	npage = 128*MB/BY2PG;
.
94,96c
	ulong s[Nisa];
	ulong e[Nisa];
} isaalloc;
.
90a
enum {
	Nisa=	256,
};
.
## diffname pc/mmu.c 1995/0209
## diff -e /n/fornaxdump/1994/1210/sys/src/brazil/pc/mmu.c /n/fornaxdump/1995/0209/sys/src/brazil/pc/mmu.c
389c
		if(addr && addr >= s && addr < e)
.
380c
	long os, s, e;
.
## diffname pc/mmu.c 1995/0406
## diff -e /n/fornaxdump/1995/0209/sys/src/brazil/pc/mmu.c /n/fornaxdump/1995/0406/sys/src/brazil/pc/mmu.c
442a
}

/*
 *  get non-ISA memory space
 */
ulong
getspace(int len, int span)
{
	ulong x;

	lock(&msalloc);
	x = msalloc.s;
	if(span)
		x = ROUND(x, span);
	if(len > msalloc.e - x){
		unlock(&msalloc);
		return 0;
	}
	msalloc.s = x + len;
	unlock(&msalloc);

	return x | KZERO;
.
172a

	/*
	 *  allocatable, non ISA memory
	 */
	if(conf.topofmem > 16*1024*1024)
		msalloc.s = conf.topofmem;
	else
		msalloc.s = 16*1024*1024;
	msalloc.e = 128*1024*1024;
.
100a
/* unallocated space */
struct
{
	Lock;
	ulong s;
	ulong e;
} msalloc;

.
90a
/* unallocated ISA space */
.
## diffname pc/mmu.c 1995/0418
## diff -e /n/fornaxdump/1995/0406/sys/src/brazil/pc/mmu.c /n/fornaxdump/1995/0418/sys/src/brazil/pc/mmu.c
170a
	for(i = 0xa0000/BY2PG; i < 0xC0000/BY2PG; i++)
		kpt[i] = (0+i*BY2PG) | PTEVALID | PTEKERNEL | PTEWRITE | PTEWT;
	for(i = 0xC0000/BY2PG; i < MB/BY2PG; i++)
		kpt[i] = (0+i*BY2PG) | PTEVALID | PTEKERNEL | PTEWRITE | PTEUNCACHED;
	for(i = conf.topofmem/BY2PG; i < 128*MB/BY2PG; i++)
		kpt[i] = (0+i*BY2PG) | PTEVALID | PTEKERNEL | PTEWRITE | PTEUNCACHED;
.
## diffname pc/mmu.c 1997/0327
## diff -e /n/fornaxdump/1995/0418/sys/src/brazil/pc/mmu.c /n/emeliedump/1997/0327/sys/src/brazil/pc/mmu.c
361,488d
357,359c
	s = splhi();
	pdb[PDX(MACHADDR)] = ((ulong*)m->pdb)[PDX(MACHADDR)];
	putcr3(up->mmupdb->pa);
.
351,355c
	pt = (ulong*)(PPN(pdb[pdbx])|KZERO);
	pt[PTX(va)] = pa|ptebits|PTEUSER;
.
345,346c
		pdb[pdbx] = PPN(pg->pa)|ptebits|PTEUSER|PTEWRITE|PTEVALID;
		pg->daddr = pdbx;
.
339,340c
		}
		else {
.
336d
329,334c
	if(PPN(pdb[pdbx]) == 0){
.
316,327c
	if(up->mmupdb == 0)
		up->mmupdb = mmupdballoc();
	pdb = (ulong*)up->mmupdb->va;
	pdbx = PDX(va);
.
311,313c
	int pdbx;
	ulong *pdb, *pt;
.
305,307d
299,302c
	splx(s);
	return pg;
.
294,297c
	else{
		pg = m->pdbpool;
		m->pdbpool = pg->next;
		m->pdbcnt--;
.
286,292c
	s = splhi();
	if(m->pdbpool == 0){
		spllo();
		pg = newpage(0, 0, 0);
		pg->va = VA(kmap(pg));
		memmove((void*)pg->va, m->pdb, BY2PG);
.
284d
282a
	int s;
.
276,281c
static Page*
mmupdballoc(void)
.
273a
	p->mmufree = 0;
.
271,272c
	for(pg = p->mmufree; pg; pg = next){
		next = pg->next;
		if(--pg->ref)
			panic("mmurelease: pg->ref %d\n", pg->ref);
		pg->ref = 0;
		if(palloc.head){
			pg->next = palloc.head;
			palloc.head->prev = pg;
		}
		else{
			palloc.tail = pg;
			pg->next = 0;
		}
		palloc.head = pg;
		pg->prev = 0;

		palloc.freecount++;
	}
	if(p->mmufree && palloc.r.p)
.
264,269d
259,262c
	/*
	 * Release any pages allocated for a page directory base or page-tables
	 * for this process:
	 *   switch to the prototype pdb for this processor (m->pdb);
	 *   call mmuptefree() to place all pages used for page-tables (p->mmuused)
	 *   onto the process' free list (p->mmufree). This has the side-effect of
	 *   cleaning any user entries in the pdb (p->mmupdb);
	 *   if there's a pdb put it in the cache of pre-initialised pdb's
	 *   for this processor (m->pdbpool) or on the process' free list;
	 *   finally, place any pages freed back into the free pool (palloc).
	 * This routine is only called from sched() with palloc locked.
	 */
	taskswitch(PADDR(m->pdb), (ulong)m + BY2PG);
	mmuptefree(p);

	if(p->mmupdb){
		if(m->pdbcnt > 10){
			p->mmupdb->next = p->mmufree;
			p->mmufree = p->mmupdb;
		}
		else{
			p->mmupdb->next = m->pdbpool;
			m->pdbpool = p->mmupdb;
			m->pdbcnt++;
		}
		p->mmupdb = 0;
.
256,257c
	Page *pg, *next;
.
253,254c
void
mmurelease(Proc *p)
.
250c
		taskswitch(PADDR(m->pdb), (ulong)(p->kstack+KSTACK));
.
246,248c
	if(p->mmupdb){
		top = (ulong*)p->mmupdb->va;
		top[PDX(MACHADDR)] = ((ulong*)m->pdb)[PDX(MACHADDR)];
		taskswitch(p->mmupdb->pa, (ulong)(p->kstack+KSTACK));
	}
.
225,242c
		mmuptefree(p);
.
221d
213,217c
static void
mmuptefree(Proc *p)
{
	ulong *pdb;
	Page **lpg, *pg;

	if(p->mmupdb && p->mmuused){
		pdb = (ulong*)p->mmupdb->va;
		lpg = &p->mmuused;
		for(pg = *lpg; pg; pg = pg->next){
			pdb[pg->daddr] = 0;
			lpg = &pg->next;
		}
		*lpg = p->mmufree;
		p->mmufree = p->mmuused;
		p->mmuused = 0;
	}
}

.
199,201d
189,196c
ulong*
mmuwalk(ulong *pdb, ulong va, int create)
{
	ulong *table, x;

	table = &pdb[PDX(va)];
	if(*table == 0){
		if(create == 0)
			return 0;
		x = PADDR((ulong)xspanalloc(BY2PG, BY2PG, 0));
		*table = x|ptebits|PTEWRITE|PTEVALID;
	}
	table = (ulong*)(KZERO|PPN(*table));
	va = PTX(va);
	return &table[va];
.
182,187c
	taskswitch(PADDR(m->pdb),  (ulong)m + BY2PG);
	ltr(TSSSEL);
}
.
164,180c
	ptr[0] = sizeof(Segdesc)*256;
	x = IDTADDR;
	ptr[1] = x & 0xFFFF;
	ptr[2] = (x>>16) & 0xFFFF;
	lidt(ptr);
.
159,162c
	ptr[0] = sizeof(m->gdt);
	x = (ulong)m->gdt;
	ptr[1] = x & 0xFFFF;
	ptr[2] = (x>>16) & 0xFFFF;
	lgdt(ptr);
.
153,157c
	memmove(m->gdt, gdt, sizeof(m->gdt));
	x = (ulong)m->tss;
	m->gdt[TSSSEG].d0 = (x<<16)|sizeof(Tss);
	m->gdt[TSSSEG].d1 = (x&0xFF000000)|((x>>16)&0xFF)|SEGTSS|SEGPL(0)|SEGP;
.
143,151c
	m->tss = malloc(sizeof(Tss));
	memset(m->tss, 0, sizeof(Tss));
.
140,141c
	ushort ptr[3];
.
138d
129,134d
119,125c
	Tss *tss;

	tss = m->tss;
	tss->ss0 = KDSEL;
	tss->esp0 = stack;
	tss->ss1 = KDSEL;
	tss->esp1 = stack;
	tss->ss2 = KDSEL;
	tss->esp2 = stack;
	tss->cr3 = pagetbl;
.
82,115d
75,80c
static int ptebits = 0;
.
71,73c
#define PDX(va)		((((ulong)(va))>>22) & 0x03FF)
#define PTX(va)		((((ulong)(va))>>12) & 0x03FF)
.
58,61c
Segdesc gdt[6] =
.
52,54d
8,49d
## diffname pc/mmu.c 1997/1011
## diff -e /n/emeliedump/1997/0327/sys/src/brazil/pc/mmu.c /n/emeliedump/1997/1011/sys/src/brazil/pc/mmu.c
252a
}

ulong*
mmuwalk(ulong* pdb, ulong va, int create)
{
	ulong pa, *table;

	table = &pdb[PDX(va)];
	if(*table == 0){
		if(create == 0)
			return 0;
		pa = PADDR((ulong)xspanalloc(BY2PG, BY2PG, 0));
		*table = pa|PTEWRITE|PTEVALID;
	}
	if(*table & PTESIZE)
		return table;

	table = (ulong*)(KZERO|PPN(*table));
	va = PTX(va);

	return &table[va];
}

static Lock mmukmaplock;

int
mmukmapsync(ulong va)
{
	Mach *mach0;
	ulong entry;

	mach0 = MACHP(0);

	lock(&mmukmaplock);

	if(mmuwalk(mach0->pdb, va, 0) == nil){
		unlock(&mmukmaplock);
		return 0;
	}
	entry = ((ulong*)mach0->pdb)[PDX(va)];

	if(!(((ulong*)m->pdb)[PDX(va)] & PTEVALID))
		((ulong*)m->pdb)[PDX(va)] = entry;

	if(up && up->mmupdb){
		((ulong*)up->mmupdb->va)[PDX(va)] = entry;
		mmuflushtlb(up->mmupdb->pa);
	}
	else
		mmuflushtlb(PADDR(m->pdb));

	unlock(&mmukmaplock);

	return 1;
}

ulong
mmukmap(ulong pa, ulong va, int size)
{
	Mach *mach0;
	ulong ova, pae, *table, pgsz, *pte;

	pa &= ~(BY2PG-1);
	if(va == 0)
		va = (ulong)KADDR(pa);
	va &= ~(BY2PG-1);
	ova = va;

	pae = pa + size;
	mach0 = MACHP(0);
	lock(&mmukmaplock);
	while(pa < pae){
		table = &((ulong*)mach0->pdb)[PDX(va)];
		if((pa % (4*MB)) == 0 && (mach0->cpuiddx & 0x08)){
			*table = pa|PTESIZE|PTEWRITE|PTEUNCACHED|PTEVALID;
			pgsz = 4*MB;
		}
		else{
			pte = mmuwalk(mach0->pdb, va, 1);
			*pte = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
			pgsz = BY2PG;
		}
		pa += pgsz;
		va += pgsz;
	}
	unlock(&mmukmaplock);

	mmukmapsync(ova);

	return pa;
.
251c
	mmuflushtlb(up->mmupdb->pa);
.
247c
	pt[PTX(va)] = pa|PTEUSER;
.
240c
		pdb[pdbx] = PPN(pg->pa)|PTEUSER|PTEWRITE|PTEVALID;
.
219c
putmmu(ulong va, ulong pa, Page* pg)
.
141c
mmurelease(Proc* p)
.
122c
mmuswitch(Proc* p)
.
103c
mmuptefree(Proc* p)
.
74,90d
26,27d
## diffname pc/mmu.c 1997/1101
## diff -e /n/emeliedump/1997/1011/sys/src/brazil/pc/mmu.c /n/emeliedump/1997/1101/sys/src/brazil/pc/mmu.c
321c
	/*
	 * If something was added
	 * then need to sync up.
	 */
	if(sync)
		mmukmapsync(ova);
.
317a
		sync++;
.
312c
			pte = mmuwalk(mach0->pdb, va, 2, 1);
.
306,307c
		table = &mach0->pdb[PDX(va)];
		/*
		 * Possibly already mapped.
		 */
		if(*table & PTEVALID){
			if(*table & PTESIZE){
				/*
				 * Big page. Does it fit within?
				 * If it does, adjust pgsz so the correct end can be
				 * returned and get out.
				 * If not, adjust pgsz up to the next 4MB boundary
				 * and continue.
				 */
				x = PPN(*table);
				if(x != pa)
					panic("mmukmap1: pa %ux  entry %uX\n",
						pa, *table);
				x += 4*MB;
				if(pae <= x){
					pa = pae;
					break;
				}
				pgsz = x - pa;
				pa += pgsz;
				va += pgsz;

				continue;
			}
			else{
				/*
				 * Little page. Walk to the entry.
				 * If the entry is valid, set pgsz and continue.
				 * If not, make it so, set pgsz, sync and continue.
				 */
				pte = mmuwalk(mach0->pdb, va, 2, 0);
				if(pte && *pte & PTEVALID){
					x = PPN(*pte);
					if(x != pa)
						panic("mmukmap2: pa %ux entry %uX\n",
							pa, *pte);
					pgsz = BY2PG;
					pa += pgsz;
					va += pgsz;
					sync++;

					continue;
				}
			}
		}

		/*
		 * Not mapped. Check if it can be mapped using a big page -
		 * starts on a 4MB boundary, size >= 4MB and processor can do it.
		 * If not a big page, walk the walk, talk the talk.
		 * Sync is set.
		 */
		if(pse && (pa % (4*MB)) == 0 && (pae >= pa+4*MB)){
.
303d
299c
	else
		va = PPN(va);
.
296c
	mach0 = MACHP(0);
	if((mach0->cpuiddx & 0x08) && (getcr4() & 0x10))
		pse = 1;
	else
		pse = 0;
	sync = 0;

	pa = PPN(pa);
.
294c
	ulong ova, pae, *table, pgsz, *pte, x;
	int pse, sync;
.
275,276c
	if(!(m->pdb[PDX(va)] & PTEVALID))
		m->pdb[PDX(va)] = entry;
.
273c
	if(!(*pte & PTESIZE) && mmuwalk(mach0->pdb, va, 2, 0) == nil){
		unlock(&mmukmaplock);
		return 0;
	}
	entry = *pte;
.
269c
	if((pte = mmuwalk(mach0->pdb, va, 1, 0)) == nil){
.
263c
	ulong entry, *pte;
.
254c
		return &table[PTX(va)];
	}
.
251,252c
	case 2:
		if(*table & PTESIZE)
			panic("mmuwalk2: va %uX entry %uX\n", va, *table);
		if(!(*table & PTEVALID)){
			pa = PADDR(xspanalloc(BY2PG, BY2PG, 0));
			*table = pa|PTEWRITE|PTEVALID;
		}
		table = KADDR(PPN(*table));
.
242,248c
	if(!(*table & PTEVALID) && create == 0)
		return 0;

	switch(level){

	default:
		return 0;

	case 1:
.
240a
	/*
	 * Walk the page-table pointed to by pdb and return a pointer
	 * to the entry for virtual address va at the requested level.
	 * If the entry is invalid and create isn't requested then bail
	 * out early. Otherwise, for the 2nd level walk, allocate a new
	 * page-table page and register it in the 1st level.
	 */
.
237c
mmuwalk(ulong* pdb, ulong va, int level, int create)
.
231c
	pdb[PDX(MACHADDR)] = m->pdb[PDX(MACHADDR)];
.
227c
	pt = KADDR(PPN(pdb[pdbx]));
.
114c
		top[PDX(MACHADDR)] = m->pdb[PDX(MACHADDR)];
.
## diffname pc/mmu.c 1998/0605
## diff -e /n/emeliedump/1997/1101/sys/src/brazil/pc/mmu.c /n/emeliedump/1998/0605/sys/src/brazil/pc/mmu.c
227,228c
	pte = KADDR(PPN(pdb[pdbx]));
	pte[PTX(va)] = pa|PTEUSER;
.
221,224c
		pdb[pdbx] = PPN(page->pa)|PTEUSER|PTEWRITE|PTEVALID;
		page->daddr = pdbx;
		page->next = up->mmuused;
		up->mmuused = page;
.
217,219c
			page = up->mmufree;
			up->mmufree = page->next;
			memset((void*)page->va, 0, BY2PG);
.
213,214c
			page = newpage(1, 0, 0);
			page->va = VA(kmap(page));
.
203c
	Page *page;
	ulong *pdb, *pte;
.
200c
putmmu(ulong va, ulong pa, Page*)
.
196c
	return page;
.
191,192c
		page = m->pdbpool;
		m->pdbpool = page->next;
.
186,188c
		page = newpage(0, 0, 0);
		page->va = VA(kmap(page));
		memmove((void*)page->va, m->pdb, BY2PG);
.
181c
	Page *page;
.
174c
	proc->mmufree = 0;
.
172c
	if(proc->mmufree && palloc.r.p)
.
154,170c
	for(page = proc->mmufree; page; page = next){
		next = page->next;
		if(--page->ref)
			panic("mmurelease: page->ref %d\n", page->ref);
		pagechainhead(page);
.
151c
		proc->mmupdb = 0;
.
147,148c
			proc->mmupdb->next = m->pdbpool;
			m->pdbpool = proc->mmupdb;
.
143,144c
			proc->mmupdb->next = proc->mmufree;
			proc->mmufree = proc->mmupdb;
.
141c
	if(proc->mmupdb){
.
139c
	mmuptefree(proc);
.
130,132c
	 *   call mmuptefree() to place all pages used for page-tables (proc->mmuused)
	 *   onto the process' free list (proc->mmufree). This has the side-effect of
	 *   cleaning any user entries in the pdb (proc->mmupdb);
.
124c
	Page *page, *next;
.
122c
mmurelease(Proc* proc)
.
118c
		taskswitch(PADDR(m->pdb), (ulong)(proc->kstack+KSTACK));
.
112,115c
	if(proc->mmupdb){
		pdb = (ulong*)proc->mmupdb->va;
		pdb[PDX(MACHADDR)] = m->pdb[PDX(MACHADDR)];
		taskswitch(proc->mmupdb->pa, (ulong)(proc->kstack+KSTACK));
.
107,109c
	if(proc->newtlb){
		mmuptefree(proc);
		proc->newtlb = 0;
.
105c
	ulong *pdb;
.
103c
mmuswitch(Proc* proc)
.
96,98c
		*last = proc->mmufree;
		proc->mmufree = proc->mmuused;
		proc->mmuused = 0;
.
89,94c
	if(proc->mmupdb && proc->mmuused){
		pdb = (ulong*)proc->mmupdb->va;
		last = &proc->mmuused;
		for(page = *last; page; page = page->next){
			pdb[page->daddr] = 0;
			last = &page->next;
.
87c
	Page **last, *page;
.
84c
mmuptefree(Proc* proc)
.
38,39c
	tss->cr3 = pdb;
	putcr3(pdb);
.
27c
taskswitch(ulong pdb, ulong stack)
.
## diffname pc/mmu.c 1998/0906
## diff -e /n/emeliedump/1998/0605/sys/src/brazil/pc/mmu.c /n/emeliedump/1998/0906/sys/src/brazil/pc/mmu.c
361c
						panic("mmukmap2: pa %uX entry %uX\n",
.
338c
					panic("mmukmap1: pa %uX  entry %uX\n",
.
## diffname pc/mmu.c 1999/0216
## diff -e /n/emeliedump/1998/0906/sys/src/brazil/pc/mmu.c /n/emeliedump/1999/0216/sys/src/brazil/pc/mmu.c
66a

	/* make kernel text unwritable */
	for(x = KTZERO; x < (ulong)etext; x += BY2PG){
		p = mmuwalk(m->pdb, x, 2, 0);
		if(p == nil)
			panic("mmuinit");
		*p &= ~PTEWRITE;
	}
.
45c
	ulong x, *p;
.
## diffname pc/mmu.c 2001/0223
## diff -e /n/emeliedump/1999/0216/sys/src/brazil/pc/mmu.c /n/emeliedump/2001/0223/sys/src/9/pc/mmu.c
228a
	coherence();
.
38a
	coherence();
.
## diffname pc/mmu.c 2001/0224
## diff -e /n/emeliedump/2001/0223/sys/src/9/pc/mmu.c /n/emeliedump/2001/0224/sys/src/9/pc/mmu.c
230c
	coherence();	// *** See note at beginning of file ***
.
39c
	coherence();	// *** See note at beginning of file ***
.
7a
/*
 *  There are 2 coherence calls in this file, before each putcr3() (mmuflushtlb
 *  is really a putcr3() call).  They are there because of my IBM 570 and ehg's
 *  IBM 600E.  We found that when the coherence() instructions were removed from
 *  unlock and iunlock, the processors would hang and/or get spurious interrupts.
 *  I posited that we were getting hit by some interaction between the tlb,
 *  cache flushing, and tlb flushing, so I moved the calls here and it seems
 *  to work.
 *
 *  I don't really understand why they'ld change anything since the putcr3 is
 *  supposed to be a serializing instruction.  Also, reads and writes are supposed
 *  to be ordered in the view of the processing core.  This is just desperation.
 *  I can only believe that the coherence() is fixing something else.
 *
 *  The 570 is a Celeron and the 600E is a PentiumII/Xeon.  Both screw up when
 *  pounding on PCMCIA devices.
 *
 *  -- presotto
 */

.
## diffname pc/mmu.c 2001/0527
## diff -e /n/emeliedump/2001/0224/sys/src/9/pc/mmu.c /n/emeliedump/2001/0527/sys/src/9/pc/mmu.c
250d
59d
8,27d
## diffname pc/mmu.c 2002/0109
## diff -e /n/emeliedump/2001/0527/sys/src/9/pc/mmu.c /n/emeliedump/2002/0109/sys/src/9/pc/mmu.c
62c
	ptr[0] = sizeof(Segdesc)*256-1;
.
56c
	ptr[0] = sizeof(m->gdt)-1;
.
42a
gdtinit(void)
{
	ulong x;
	ushort ptr[3];

	memmove(m->gdt, gdt, sizeof(m->gdt));

	ptr[0] = sizeof(m->gdt)-1;
	x = (ulong)m->gdt;
	ptr[1] = x & 0xFFFF;
	ptr[2] = (x>>16) & 0xFFFF;
	lgdt(ptr);
}

void
.
23,25d
13c
Segdesc gdt[NGDT] =
.
## diffname pc/mmu.c 2002/0114
## diff -e /n/emeliedump/2002/0109/sys/src/9/pc/mmu.c /n/emeliedump/2002/0114/sys/src/9/pc/mmu.c
381c
						panic("mmukmap2: pa %luX entry %luX\n",
.
358c
					panic("mmukmap1: pa %luX  entry %luX\n",
.
271c
			panic("mmuwalk2: va %luX entry %luX\n", va, *table);
.
## diffname pc/mmu.c 2002/0412
## diff -e /n/emeliedump/2002/0114/sys/src/9/pc/mmu.c /n/emeliedump/2002/0412/sys/src/9/pc/mmu.c
405a
			if((va&KZERO) && m->havepge)
				*pte |= PTEGLOBAL;
.
400a
			if((va&KZERO) && m->havepge)
				*table |= PTEGLOBAL;
.
397a
		 *
		 * If we're creating a kernel mapping, we know that it will never
		 * expire and thus we can set the PTEGLOBAL bit to make the entry
	 	 * persist in the TLB across flushes.  If we do add support later for
		 * unmapping kernel addresses, see devarch.c for instructions on
		 * how to do a full TLB flush.
.
68c
	ptr[0] = sizeof(gdt)-1;
.
63c
	/*
	 * We used to keep the GDT in the Mach structure, but it
	 * turns out that that slows down access to the rest of the
	 * page.  Since the Mach structure is accessed quite often,
	 * it pays off anywhere from a factor of 1.25 to 2 on real
	 * hardware to separate them (the AMDs are more sensitive
	 * than Intels in this regard).  Under VMware it pays off
	 * a factor of about 10 to 100.
	 */

	memmove(m->gdt, gdt, sizeof gdt);
.
59a
	memglobal();

.
47,51c
	if(!m->havepge)
		return;

	pde = m->pdb;
	for(i=512; i<1024; i++){	/* 512: start at entry for virtual 0x80000000 */
		if(pde[i] & PTEVALID){
			pde[i] |= PTEGLOBAL;
			if(!(pde[i] & PTESIZE)){
				pte = KADDR(pde[i]&~(BY2PG-1));
				for(j=0; j<1024; j++)
					if(pte[j] & PTEVALID)
						pte[j] |= PTEGLOBAL;
			}
		}
	}			
.
45c
	/* only need to do this once, on bootstrap processor */
	if(m->machno != 0)
		return;
.
42,43c
	int i, j;
	ulong *pde, *pte;
.
39,40c
/* 
 * On processors that support it, we set the PTEGLOBAL bit in
 * page table and page directory entries that map kernel memory.
 * Doing this tells the processor not to bother flushing them
 * from the TLB when doing the TLB flush associated with a 
 * context switch (write to CR3).  Since kernel memory mappings
 * are never removed, this is safe.  (If we ever remove kernel memory
 * mappings, we can do a full flush by turning off the PGE bit in CR4,
 * writing to CR3, and then turning the PGE bit back on.) 
 *
 * See also mmukmap below.
 * 
 * Processor support for the PTEGLOBAL bit is enabled in devarch.c.
 */
static void
memglobal(void)
.
## diffname pc/mmu.c 2002/0430
## diff -e /n/emeliedump/2002/0412/sys/src/9/pc/mmu.c /n/emeliedump/2002/0430/sys/src/9/pc/mmu.c
58a
return;
.
## diffname pc/mmu.c 2002/0501
## diff -e /n/emeliedump/2002/0430/sys/src/9/pc/mmu.c /n/emeliedump/2002/0501/sys/src/9/pc/mmu.c
59d
## diffname pc/mmu.c 2003/0405
## diff -e /n/emeliedump/2002/0501/sys/src/9/pc/mmu.c /n/emeliedump/2003/0405/sys/src/9/pc/mmu.c
460c
	iunlock(&mmukmaplock);
.
379c
	ilock(&mmukmaplock);
.
352c
	iunlock(&mmukmaplock);
.
337c
		iunlock(&mmukmaplock);
.
333c
		iunlock(&mmukmaplock);
.
330c
	ilock(&mmukmaplock);
.

Bell Labs OSI certified Powered by Plan 9

(Return to Plan 9 Home Page)

Copyright © 2021 Plan 9 Foundation. All Rights Reserved.
Comments to [email protected].