diff -N -c -r /usr/src/sys/9fs/9auth.c ./9fs/9auth.c
*** /usr/src/sys/9fs/9auth.c Wed Dec 31 19:00:00 1969
--- ./9fs/9auth.c Mon May 22 17:11:29 2000
***************
*** 0 ****
--- 1,238 ----
+ #include <sys/param.h>
+ #include <sys/systm.h>
+ #include <sys/socket.h>
+ #include <sys/socketvar.h>
+ #include <sys/protosw.h>
+ #include <sys/malloc.h>
+ #include <sys/mbuf.h>
+ #include <sys/uio.h>
+
+ #include <9fs/9p.h>
+ #include <9fs/9auth.h>
+
+ #define N2HCHAR(x) x = *p++
+ #define N2HSHORT(x) x = (p[0] | (p[1]<<8)); p += 2
+ #define N2HLONG(x) x = (p[0] | (p[1]<<8) |\
+ (p[2]<<16) | (p[3]<<24)); p += 4
+ #define N2HQUAD(x) x = (u_int64_t)(p[0] | (p[1]<<8) |\
+ (p[2]<<16) | (p[3]<<24)) |\
+ ((u_int64_t)(p[4] | (p[5]<<8) |\
+ (p[6]<<16) | (p[7]<<24)) << 32); p += 8
+ #define N2HSTRING(x,n) bcopy(p, x, n); p += n
+
+ #define H2NCHAR(x) *p++ = x
+ #define H2NSHORT(x) p[0]=x; p[1]=x>>8; p += 2
+ #define H2NLONG(x) p[0]=x; p[1]=x>>8; p[2]=x>>16; p[3]=x>>24; p += 4
+ #define H2NQUAD(x) p[0]=x; p[1]=x>>8;\
+ p[2]=x>>16; p[3]=x>>24;\
+ p[4]=x>>32; p[5]=x>>40;\
+ p[6]=x>>48; p[7]=x>>56;\
+ p += 8
+ #define H2NSTRING(x,n) bcopy(x, p, n); p += n
+
+ static int u9auth_send __P((struct socket *so, struct mbuf *top, struct proc *p));
+ static int u9auth_recv __P((struct socket *so, struct mbuf **mp, struct proc *p));
+
+ static int u9auth_count = 0;
+
+ static int u9auth_tr2m(struct u9auth_ticketreq *f, char *ap)
+ {
+ int n;
+ u_char *p;
+
+ p = (u_char*)ap;
+ H2NCHAR(f->type);
+ H2NSTRING(f->authid, U9FS_NAMELEN);
+ H2NSTRING(f->authdom, U9FS_DOMLEN);
+ H2NSTRING(f->chal, U9FS_CHALLEN);
+ H2NSTRING(f->hostid, U9FS_NAMELEN);
+ H2NSTRING(f->uid, U9FS_NAMELEN);
+ n = p - (u_char*)ap;
+ return n;
+ }
+
+ static struct mbuf * u9auth_m_tr2m(struct u9auth_ticketreq * tktq)
+ {
+ register struct mbuf *m;
+ char * ap;
+ int sz = 141;
+
+ MGETHDR(m, M_WAIT, MT_DATA);
+ if( sz > MHLEN )
+ MCLGET(m, M_WAIT);
+ m->m_len = 0;
+
+ if ( M_TRAILINGSPACE(m) < sz )
+ panic("u9auth_m_tr2m");
+
+ ap = mtod(m, char *);
+ m->m_len = u9auth_tr2m(tktq, ap);
+ m->m_pkthdr.len = m->m_len;
+
+ return (m);
+ }
+
+ static int
+ u9auth_send(so, top, p)
+ register struct socket *so;
+ register struct mbuf *top;
+ register struct proc *p;
+
+ {
+ int error, soflags, flags;
+
+ soflags = so->so_proto->pr_flags;
+ if (so->so_type == SOCK_SEQPACKET)
+ flags = MSG_EOR;
+ else
+ flags = 0;
+
+ error = so->so_proto->pr_usrreqs->pru_sosend(so, 0, 0, top, 0, flags, p);
+
+ return (error);
+ }
+
+ static int
+ u9auth_recv(so, mp, p)
+ register struct socket * so;
+ register struct mbuf **mp;
+ struct proc *p;
+ {
+ struct uio auio;
+ u_int32_t len;
+ int error = 0, sotype, rcvflg;
+
+ *mp = 0;
+ sotype = so->so_type;
+
+ /*
+ * For reliable protocols, lock against other senders/receivers
+ * in case a reconnect is necessary.
+ * For SOCK_STREAM, first get the Record Mark to find out how much
+ * more there is to get.
+ * We must lock the socket against other receivers
+ * until we have an entire rpc request/reply.
+ */
+ if (sotype == SOCK_SEQPACKET ) {
+ if( (so->so_state & SS_ISCONNECTED) == 0 )
+ return (EACCES);
+ auio.uio_resid = len = 1000000;
+ auio.uio_procp = p;
+ do {
+ rcvflg = 0;
+ error = so->so_proto->pr_usrreqs->pru_soreceive
+ (so, 0, &auio, mp,
+ (struct mbuf **)0, &rcvflg);
+ } while (error == EWOULDBLOCK);
+ len -= auio.uio_resid;
+ }
+ if (error) {
+ m_freem(*mp);
+ *mp = 0;
+ }
+ return (error);
+ }
+
+ static void
+ u9auth_m2t(char *ap, struct u9auth_ticket *f, char *key)
+ {
+ u_char *p;
+
+ if(key)
+ decrypt9(key, ap, U9AUTH_TICKETLEN);
+ p = (u_char*)ap;
+ N2HCHAR(f->num);
+ N2HSTRING(f->chal, U9FS_CHALLEN);
+ N2HSTRING(f->cuid, U9FS_NAMELEN);
+ f->cuid[U9FS_NAMELEN-1] = 0;
+ N2HSTRING(f->suid, U9FS_NAMELEN);
+ f->suid[U9FS_NAMELEN-1] = 0;
+ N2HSTRING(f->key, U9AUTH_DESKEYLEN);
+ };
+
+ static int
+ u9auth_a2m(struct u9auth_authenticator *f, char *ap, char *key)
+ {
+ int n;
+ u_char *p;
+
+ p = (u_char*)ap;
+ H2NCHAR(f->num);
+ H2NSTRING(f->chal, U9FS_CHALLEN);
+ H2NLONG(f->id);
+ n = p - (u_char*)ap;
+ if(key)
+ encrypt9(key, ap, n);
+ return n;
+ }
+
+ void u9auth_genchal (char * chal)
+ {
+ u_long * lp = (u_long *)chal;
+
+ *lp++ = random();
+ *lp = random();
+ }
+
+ int u9auth_gettickets (struct socket * so, struct u9fsreq * rep,
+ char * user, char * ckey, char * ts, char * authc,
+ struct proc *p)
+ {
+ char * cp;
+ struct u9auth_ticketreq tktq;
+ struct u9auth_ticket tc;
+ struct u9auth_authenticator auth;
+ struct mbuf * m;
+ int error, len;
+
+ bzero(&tktq, sizeof(tktq));
+ tktq.type = AuthTreq;
+ bcopy(rep->r_authid, tktq.authid, U9FS_NAMELEN);
+ bcopy(rep->r_authdom, tktq.authdom, U9FS_DOMLEN);
+ bcopy(rep->r_chal, tktq.chal, U9FS_CHALLEN);
+ strncpy(tktq.hostid, user, U9FS_NAMELEN);
+ strncpy(tktq.uid, user, U9FS_NAMELEN);
+
+ m = u9auth_m_tr2m(&tktq);
+ error = u9auth_send(so, m, p);
+ if( error )
+ goto bad;
+ error = u9auth_recv(so, &m, p);
+ if( error )
+ goto bad;
+
+ len = U9AUTH_TICKETLEN+1;
+ if( m->m_len < len && (m = m_pullup(m, len)) == 0 )
+ goto bad;
+
+ cp = mtod(m, char *);
+ switch( cp[0] ) {
+ case AuthOK:
+ u9auth_m2t(&cp[1], & tc, ckey);
+ bzero(&auth, sizeof(auth));
+ auth.num = AuthAc;
+ bcopy(tc.chal, auth.chal, sizeof(auth.chal));
+ auth.id = u9auth_count++;
+
+ m->m_len -= len;
+ m->m_data += len;
+
+ len = U9AUTH_TICKETLEN;
+ if( m->m_len < len && (m = m_pullup(m, len)) == 0 )
+ goto bad;
+ cp = mtod(m, char *);
+ bcopy(cp, ts, len);
+ break;
+ case AuthErr:
+ case AuthOKvar:
+ m_freem(m);
+ goto bad;
+ break;
+ }
+
+ u9auth_a2m(&auth, authc, tc.key);
+ return 0;
+ bad:
+ return error;
+ }
+
diff -N -c -r /usr/src/sys/9fs/9auth.h ./9fs/9auth.h
*** /usr/src/sys/9fs/9auth.h Wed Dec 31 19:00:00 1969
--- ./9fs/9auth.h Thu Nov 11 15:00:29 1999
***************
*** 0 ****
--- 1,129 ----
+ #ifndef P9AUTH_H
+ #define P9AUTH_H
+
+ #define U9AUTH_DOMLEN 48 /* length of an authentication domain name */
+ #define U9AUTH_DESKEYLEN 7 /* length of a des key for encrypt/decrypt */
+ #define U9AUTH_CHALLEN 8 /* length of a challenge */
+ #define U9AUTH_NETCHLEN 16 /* max network challenge length */
+ #define U9AUTH_CONFIGLEN 14
+ #define U9AUTH_SECRETLEN 32 /* max length of a secret */
+ #define U9AUTH_APOPCHLEN 256
+ #define U9AUTH_MD5LEN 16
+ #define U9AUTH_KEYDBOFF 8 /* length of random data at the start of key file */
+ #define U9AUTH_OKEYDBLEN U9FSNAMELEN+U9AUTH_DESKEYLEN+4+2, /* length of an entry in old key file */
+ #define U9AUTH_KEYDBLEN OKEYDBLENSECRETLEN, /* length of an entry in key file */
+
+ /* encryption numberings (anti-replay) */
+ enum
+ {
+ AuthTreq=1, /* ticket request */
+ AuthChal=2, /* challenge box request */
+ AuthPass=3, /* change password */
+ AuthOK=4, /* fixed length reply follows */
+ AuthErr=5, /* error follows */
+ AuthMod=6, /* modify user */
+ AuthApop=7, /* apop authentication for pop3 */
+ AuthOKvar=9, /* variable length reply follows */
+ AuthChap=10, /* chap authentication for ppp */
+ AuthMSchap=11, /* MS chap authentication for ppp */
+
+
+ AuthTs=64, /* ticket encrypted with server's key */
+ AuthTc, /* ticket encrypted with client's key */
+ AuthAs, /* server generated authenticator */
+ AuthAc, /* client generated authenticator */
+ AuthTp, /* ticket encrypted with clien's key for password change */
+ };
+
+ struct u9auth_ticketreq
+ {
+ char type;
+ char authid[U9FS_NAMELEN]; /* server's encryption id */
+ char authdom[U9AUTH_DOMLEN]; /* server's authentication domain */
+ char chal[U9AUTH_CHALLEN]; /* challenge from server */
+ char hostid[U9FS_NAMELEN]; /* host's encryption id */
+ char uid[U9FS_NAMELEN]; /* uid of requesting user on host */
+ };
+ #define U9AUTH_TICKREQLEN (3*U9FS_NAMELEN+U9AUTH_CHALLEN+U9AUTH_DOMLEN+1)
+
+ struct u9auth_ticket
+ {
+ char num; /* replay protection */
+ char chal[U9AUTH_CHALLEN]; /* server challenge */
+ char cuid[U9FS_NAMELEN]; /* uid on client */
+ char suid[U9FS_NAMELEN]; /* uid on server */
+ char key[U9AUTH_DESKEYLEN]; /* nonce DES key */
+ };
+ #define U9AUTH_TICKETLEN (U9AUTH_CHALLEN+2*U9FS_NAMELEN+U9AUTH_DESKEYLEN+1)
+
+ struct u9auth_authenticator
+ {
+ char num; /* replay protection */
+ char chal[U9AUTH_CHALLEN];
+ u_long id; /* authenticator id, ++'d with each auth */
+ };
+ #define U9AUTH_AUTHENTLEN (U9AUTH_CHALLEN+4+1)
+
+ struct u9auth_passwordreq
+ {
+ char num;
+ char old[U9FS_NAMELEN];
+ char new[U9FS_NAMELEN];
+ char changesecret;
+ char secret[U9AUTH_SECRETLEN]; /* new secret */
+ };
+ #define U9AUTH_PASSREQLEN (2*U9FS_NAMELEN+1+1+U9AUTH_SECRETLEN)
+
+ struct u9auth_nvrsafe
+ {
+ char machkey[U9AUTH_DESKEYLEN];
+ u_char machsum;
+ char authkey[U9AUTH_DESKEYLEN];
+ u_char authsum;
+ char config[U9AUTH_CONFIGLEN];
+ u_char configsum;
+ char authid[U9FS_NAMELEN];
+ u_char authidsum;
+ char authdom[U9AUTH_DOMLEN];
+ u_char authdomsum;
+ };
+
+ struct u9auth_chalstate
+ {
+ int afd; /* /dev/authenticate */
+ int asfd; /* authdial() */
+ char chal[U9AUTH_NETCHLEN]; /* challenge/response */
+ };
+
+ struct u9auth_apopchalstate
+ {
+ int afd; /* /dev/authenticate */
+ int asfd; /* authdial() */
+ char chal[U9AUTH_APOPCHLEN]; /* challenge/response */
+ };
+
+ struct u9auth_chapreply
+ {
+ u_char id;
+ char uid[U9FS_NAMELEN];
+ char resp[U9AUTH_MD5LEN];
+ };
+
+ struct u9auth_mSchapreply
+ {
+ char uid[U9FS_NAMELEN];
+ char LMresp[24]; /* Lan Manager response */
+ char NTresp[24]; /* NT response */
+ };
+
+ #ifdef KERNEL
+ void u9auth_genchal __P((char *));
+ int u9auth_gettickets __P((struct socket * so, struct u9fsreq * rep,
+ char * user, char * ckey, char * ts, char * authc,
+ struct proc * p));
+ int encrypt9 __P((void *key, void * vbuf, int n));
+ int decrypt9 __P((void *key, void * vbuf, int n));
+
+ #endif
+
+ #endif
diff -N -c -r /usr/src/sys/9fs/9crypt.c ./9fs/9crypt.c
*** /usr/src/sys/9fs/9crypt.c Wed Dec 31 19:00:00 1969
--- ./9fs/9crypt.c Thu Nov 11 12:23:02 1999
***************
*** 0 ****
--- 1,416 ----
+ /*
+ * Data Encryption Standard
+ * D.P.Mitchell 83/06/08.
+ *
+ * block_cipher(key, block, decrypting)
+ */
+ #include <sys/param.h>
+ #include <sys/systm.h>
+ #include <sys/socket.h>
+ #include <sys/socketvar.h>
+
+ typedef unsigned char uchar;
+ typedef unsigned long ulong;
+ #define NAMELEN 28 /* length of path element, including '\0' */
+ #include <9fs/9p.h>
+ #include <9fs/9auth.h>
+
+ static long ip_low(char [8]);
+ static long ip_high(char [8]);
+ static void fp(long, long, char[8]);
+ static void key_setup(char[U9AUTH_DESKEYLEN], char[128]);
+ static void block_cipher(char[128], char[8], int);
+
+ /*
+ * destructively encrypt the buffer, which
+ * must be at least 8 characters long.
+ */
+ int
+ encrypt9(void *key, void *vbuf, int n)
+ {
+ char ekey[128], *buf;
+ int i, r;
+
+ if(n < 8)
+ return 0;
+ key_setup(key, ekey);
+ buf = vbuf;
+ n--;
+ r = n % 7;
+ n /= 7;
+ for(i = 0; i < n; i++){
+ block_cipher(ekey, buf, 0);
+ buf += 7;
+ }
+ if(r)
+ block_cipher(ekey, buf - 7 + r, 0);
+ return 1;
+ }
+
+ /*
+ * destructively decrypt the buffer, which
+ * must be at least 8 characters long.
+ */
+ int
+ decrypt9(void *key, void *vbuf, int n)
+ {
+ char ekey[128], *buf;
+ int i, r;
+
+ if(n < 8)
+ return 0;
+ key_setup(key, ekey);
+ buf = vbuf;
+ n--;
+ r = n % 7;
+ n /= 7;
+ buf += n * 7;
+ if(r)
+ block_cipher(ekey, buf - 7 + r, 1);
+ for(i = 0; i < n; i++){
+ buf -= 7;
+ block_cipher(ekey, buf, 1);
+ }
+ return 1;
+ }
+
+ /*
+ * Tables for Combined S and P Boxes
+ */
+
+ static long s0p[] = {
+ 0x00410100,0x00010000,0x40400000,0x40410100,0x00400000,0x40010100,0x40010000,0x40400000,
+ 0x40010100,0x00410100,0x00410000,0x40000100,0x40400100,0x00400000,0x00000000,0x40010000,
+ 0x00010000,0x40000000,0x00400100,0x00010100,0x40410100,0x00410000,0x40000100,0x00400100,
+ 0x40000000,0x00000100,0x00010100,0x40410000,0x00000100,0x40400100,0x40410000,0x00000000,
+ 0x00000000,0x40410100,0x00400100,0x40010000,0x00410100,0x00010000,0x40000100,0x00400100,
+ 0x40410000,0x00000100,0x00010100,0x40400000,0x40010100,0x40000000,0x40400000,0x00410000,
+ 0x40410100,0x00010100,0x00410000,0x40400100,0x00400000,0x40000100,0x40010000,0x00000000,
+ 0x00010000,0x00400000,0x40400100,0x00410100,0x40000000,0x40410000,0x00000100,0x40010100,
+ };
+
+ static long s1p[] = {
+ 0x08021002,0x00000000,0x00021000,0x08020000,0x08000002,0x00001002,0x08001000,0x00021000,
+ 0x00001000,0x08020002,0x00000002,0x08001000,0x00020002,0x08021000,0x08020000,0x00000002,
+ 0x00020000,0x08001002,0x08020002,0x00001000,0x00021002,0x08000000,0x00000000,0x00020002,
+ 0x08001002,0x00021002,0x08021000,0x08000002,0x08000000,0x00020000,0x00001002,0x08021002,
+ 0x00020002,0x08021000,0x08001000,0x00021002,0x08021002,0x00020002,0x08000002,0x00000000,
+ 0x08000000,0x00001002,0x00020000,0x08020002,0x00001000,0x08000000,0x00021002,0x08001002,
+ 0x08021000,0x00001000,0x00000000,0x08000002,0x00000002,0x08021002,0x00021000,0x08020000,
+ 0x08020002,0x00020000,0x00001002,0x08001000,0x08001002,0x00000002,0x08020000,0x00021000,
+ };
+
+ static long s2p[] = {
+ 0x20800000,0x00808020,0x00000020,0x20800020,0x20008000,0x00800000,0x20800020,0x00008020,
+ 0x00800020,0x00008000,0x00808000,0x20000000,0x20808020,0x20000020,0x20000000,0x20808000,
+ 0x00000000,0x20008000,0x00808020,0x00000020,0x20000020,0x20808020,0x00008000,0x20800000,
+ 0x20808000,0x00800020,0x20008020,0x00808000,0x00008020,0x00000000,0x00800000,0x20008020,
+ 0x00808020,0x00000020,0x20000000,0x00008000,0x20000020,0x20008000,0x00808000,0x20800020,
+ 0x00000000,0x00808020,0x00008020,0x20808000,0x20008000,0x00800000,0x20808020,0x20000000,
+ 0x20008020,0x20800000,0x00800000,0x20808020,0x00008000,0x00800020,0x20800020,0x00008020,
+ 0x00800020,0x00000000,0x20808000,0x20000020,0x20800000,0x20008020,0x00000020,0x00808000,
+ };
+
+ static long s3p[] = {
+ 0x00080201,0x02000200,0x00000001,0x02080201,0x00000000,0x02080000,0x02000201,0x00080001,
+ 0x02080200,0x02000001,0x02000000,0x00000201,0x02000001,0x00080201,0x00080000,0x02000000,
+ 0x02080001,0x00080200,0x00000200,0x00000001,0x00080200,0x02000201,0x02080000,0x00000200,
+ 0x00000201,0x00000000,0x00080001,0x02080200,0x02000200,0x02080001,0x02080201,0x00080000,
+ 0x02080001,0x00000201,0x00080000,0x02000001,0x00080200,0x02000200,0x00000001,0x02080000,
+ 0x02000201,0x00000000,0x00000200,0x00080001,0x00000000,0x02080001,0x02080200,0x00000200,
+ 0x02000000,0x02080201,0x00080201,0x00080000,0x02080201,0x00000001,0x02000200,0x00080201,
+ 0x00080001,0x00080200,0x02080000,0x02000201,0x00000201,0x02000000,0x02000001,0x02080200,
+ };
+
+ static long s4p[] = {
+ 0x01000000,0x00002000,0x00000080,0x01002084,0x01002004,0x01000080,0x00002084,0x01002000,
+ 0x00002000,0x00000004,0x01000004,0x00002080,0x01000084,0x01002004,0x01002080,0x00000000,
+ 0x00002080,0x01000000,0x00002004,0x00000084,0x01000080,0x00002084,0x00000000,0x01000004,
+ 0x00000004,0x01000084,0x01002084,0x00002004,0x01002000,0x00000080,0x00000084,0x01002080,
+ 0x01002080,0x01000084,0x00002004,0x01002000,0x00002000,0x00000004,0x01000004,0x01000080,
+ 0x01000000,0x00002080,0x01002084,0x00000000,0x00002084,0x01000000,0x00000080,0x00002004,
+ 0x01000084,0x00000080,0x00000000,0x01002084,0x01002004,0x01002080,0x00000084,0x00002000,
+ 0x00002080,0x01002004,0x01000080,0x00000084,0x00000004,0x00002084,0x01002000,0x01000004,
+ };
+
+ static long s5p[] = {
+ 0x10000008,0x00040008,0x00000000,0x10040400,0x00040008,0x00000400,0x10000408,0x00040000,
+ 0x00000408,0x10040408,0x00040400,0x10000000,0x10000400,0x10000008,0x10040000,0x00040408,
+ 0x00040000,0x10000408,0x10040008,0x00000000,0x00000400,0x00000008,0x10040400,0x10040008,
+ 0x10040408,0x10040000,0x10000000,0x00000408,0x00000008,0x00040400,0x00040408,0x10000400,
+ 0x00000408,0x10000000,0x10000400,0x00040408,0x10040400,0x00040008,0x00000000,0x10000400,
+ 0x10000000,0x00000400,0x10040008,0x00040000,0x00040008,0x10040408,0x00040400,0x00000008,
+ 0x10040408,0x00040400,0x00040000,0x10000408,0x10000008,0x10040000,0x00040408,0x00000000,
+ 0x00000400,0x10000008,0x10000408,0x10040400,0x10040000,0x00000408,0x00000008,0x10040008,
+ };
+
+ static long s6p[] = {
+ 0x00000800,0x00000040,0x00200040,0x80200000,0x80200840,0x80000800,0x00000840,0x00000000,
+ 0x00200000,0x80200040,0x80000040,0x00200800,0x80000000,0x00200840,0x00200800,0x80000040,
+ 0x80200040,0x00000800,0x80000800,0x80200840,0x00000000,0x00200040,0x80200000,0x00000840,
+ 0x80200800,0x80000840,0x00200840,0x80000000,0x80000840,0x80200800,0x00000040,0x00200000,
+ 0x80000840,0x00200800,0x80200800,0x80000040,0x00000800,0x00000040,0x00200000,0x80200800,
+ 0x80200040,0x80000840,0x00000840,0x00000000,0x00000040,0x80200000,0x80000000,0x00200040,
+ 0x00000000,0x80200040,0x00200040,0x00000840,0x80000040,0x00000800,0x80200840,0x00200000,
+ 0x00200840,0x80000000,0x80000800,0x80200840,0x80200000,0x00200840,0x00200800,0x80000800,
+ };
+
+ static long s7p[] = {
+ 0x04100010,0x04104000,0x00004010,0x00000000,0x04004000,0x00100010,0x04100000,0x04104010,
+ 0x00000010,0x04000000,0x00104000,0x00004010,0x00104010,0x04004010,0x04000010,0x04100000,
+ 0x00004000,0x00104010,0x00100010,0x04004000,0x04104010,0x04000010,0x00000000,0x00104000,
+ 0x04000000,0x00100000,0x04004010,0x04100010,0x00100000,0x00004000,0x04104000,0x00000010,
+ 0x00100000,0x00004000,0x04000010,0x04104010,0x00004010,0x04000000,0x00000000,0x00104000,
+ 0x04100010,0x04004010,0x04004000,0x00100010,0x04104000,0x00000010,0x00100010,0x04004000,
+ 0x04104010,0x00100000,0x04100000,0x04000010,0x00104000,0x00004010,0x04004010,0x04100000,
+ 0x00000010,0x04104000,0x00104010,0x00000000,0x04000000,0x04100010,0x00004000,0x00104010,
+ };
+
+ /*
+ * DES electronic codebook encryption of one block
+ */
+ static void
+ block_cipher(char expanded_key[128], char text[8], int decrypting)
+ {
+ char *key;
+ long crypto, temp, right, left;
+ int i, key_offset;
+
+ key = expanded_key;
+ left = ip_low(text);
+ right = ip_high(text);
+ if (decrypting) {
+ key_offset = 16;
+ key = key + 128 - 8;
+ } else
+ key_offset = 0;
+ for (i = 0; i < 16; i++) {
+ temp = (right << 1) | ((right >> 31) & 1);
+ crypto = s0p[(temp & 0x3f) ^ *key++];
+ crypto |= s1p[((temp >> 4) & 0x3f) ^ *key++];
+ crypto |= s2p[((temp >> 8) & 0x3f) ^ *key++];
+ crypto |= s3p[((temp >> 12) & 0x3f) ^ *key++];
+ crypto |= s4p[((temp >> 16) & 0x3f) ^ *key++];
+ crypto |= s5p[((temp >> 20) & 0x3f) ^ *key++];
+ crypto |= s6p[((temp >> 24) & 0x3f) ^ *key++];
+ temp = ((right & 1) << 5) | ((right >> 27) & 0x1f);
+ crypto |= s7p[temp ^ *key++];
+ temp = left;
+ left = right;
+ right = temp ^ crypto;
+ key -= key_offset;
+ }
+ /*
+ * standard final permutation (IPI)
+ * left and right are reversed here
+ */
+ fp(right, left, text);
+ }
+
+ /*
+ * Initial Permutation
+ */
+ static long iptab[] = {
+ 0x00000000, 0x00008000, 0x00000000, 0x00008000,
+ 0x00000080, 0x00008080, 0x00000080, 0x00008080
+ };
+
+ static long
+ ip_low(char block[8])
+ {
+ int i;
+ long l;
+
+ l = 0;
+ for(i = 0; i < 8; i++){
+ l |= iptab[(block[i] >> 4) & 7] >> i;
+ l |= iptab[block[i] & 7] << (16 - i);
+ }
+ return l;
+ }
+
+ static long
+ ip_high(char block[8])
+ {
+ int i;
+ long l;
+
+ l = 0;
+ for(i = 0; i < 8; i++){
+ l |= iptab[(block[i] >> 5) & 7] >> i;
+ l |= iptab[(block[i] >> 1) & 7] << (16 - i);
+ }
+ return l;
+ }
+
+ /*
+ * Final Permutation
+ */
+ static unsigned long fptab[] = {
+ 0x00000000,0x80000000,0x00800000,0x80800000,0x00008000,0x80008000,0x00808000,0x80808000,
+ 0x00000080,0x80000080,0x00800080,0x80800080,0x00008080,0x80008080,0x00808080,0x80808080,
+ };
+
+ static void
+ fp(long left, long right, char text[8])
+ {
+ unsigned long ta[2], t, v[2];
+ int i, j, sh;
+
+ ta[0] = right;
+ ta[1] = left;
+ v[0] = v[1] = 0;
+ for(i = 0; i < 2; i++){
+ t = ta[i];
+ sh = i;
+ for(j = 0; j < 4; j++){
+ v[1] |= fptab[t & 0xf] >> sh;
+ t >>= 4;
+ v[0] |= fptab[t & 0xf] >> sh;
+ t >>= 4;
+ sh += 2;
+ }
+ }
+ for(i = 0; i < 2; i++)
+ for(j = 0; j < 4; j++){
+ *text++ = v[i];
+ v[i] >>= 8;
+ }
+ }
+
+ /*
+ * Key set-up
+ */
+ static uchar keyexpand[][15][2] = {
+ { 3, 2, 9, 8, 18, 8, 27, 32, 33, 2, 42, 16, 48, 8, 65, 16,
+ 74, 2, 80, 2, 89, 4, 99, 16, 104, 4, 122, 32, 0, 0, },
+ { 1, 4, 8, 1, 18, 4, 25, 32, 34, 32, 41, 8, 50, 8, 59, 32,
+ 64, 16, 75, 4, 90, 1, 97, 16, 106, 2, 112, 2, 123, 1, },
+ { 2, 1, 19, 8, 35, 1, 40, 1, 50, 4, 57, 32, 75, 2, 80, 32,
+ 89, 1, 96, 16, 107, 4, 120, 8, 0, 0, 0, 0, 0, 0, },
+ { 4, 32, 20, 2, 31, 4, 37, 32, 47, 1, 54, 1, 63, 2, 68, 1,
+ 78, 4, 84, 8, 101, 16, 108, 4, 119, 16, 126, 8, 0, 0, },
+ { 5, 4, 15, 4, 21, 32, 31, 1, 38, 1, 47, 2, 53, 2, 68, 8,
+ 85, 16, 92, 4, 103, 16, 108, 32, 118, 32, 124, 2, 0, 0, },
+ { 15, 2, 21, 2, 39, 8, 46, 16, 55, 32, 61, 1, 71, 16, 76, 32,
+ 86, 32, 93, 4, 102, 2, 108, 16, 117, 8, 126, 1, 0, 0, },
+ { 14, 16, 23, 32, 29, 1, 38, 8, 52, 2, 63, 4, 70, 2, 76, 16,
+ 85, 8, 100, 1, 110, 4, 116, 8, 127, 8, 0, 0, 0, 0, },
+ { 1, 8, 8, 32, 17, 1, 24, 16, 35, 4, 50, 1, 57, 16, 67, 8,
+ 83, 1, 88, 1, 98, 4, 105, 32, 114, 32, 123, 2, 0, 0, },
+ { 0, 1, 11, 16, 16, 4, 35, 2, 40, 32, 49, 1, 56, 16, 65, 2,
+ 74, 16, 80, 8, 99, 8, 115, 1, 121, 4, 0, 0, 0, 0, },
+ { 9, 16, 18, 2, 24, 2, 33, 4, 43, 16, 48, 4, 66, 32, 73, 8,
+ 82, 8, 91, 32, 97, 2, 106, 16, 112, 8, 122, 1, 0, 0, },
+ { 14, 32, 21, 4, 30, 2, 36, 16, 45, 8, 60, 1, 69, 2, 87, 8,
+ 94, 16, 103, 32, 109, 1, 118, 8, 124, 32, 0, 0, 0, 0, },
+ { 7, 4, 14, 2, 20, 16, 29, 8, 44, 1, 54, 4, 60, 8, 71, 8,
+ 78, 16, 87, 32, 93, 1, 102, 8, 116, 2, 125, 4, 0, 0, },
+ { 7, 2, 12, 1, 22, 4, 28, 8, 45, 16, 52, 4, 63, 16, 70, 8,
+ 84, 2, 95, 4, 101, 32, 111, 1, 118, 1, 0, 0, 0, 0, },
+ { 6, 16, 13, 16, 20, 4, 31, 16, 36, 32, 46, 32, 53, 4, 62, 2,
+ 69, 32, 79, 1, 86, 1, 95, 2, 101, 2, 119, 8, 0, 0, },
+ { 0, 32, 10, 8, 19, 32, 25, 2, 34, 16, 40, 8, 59, 8, 66, 2,
+ 72, 2, 81, 4, 91, 16, 96, 4, 115, 2, 121, 8, 0, 0, },
+ { 3, 16, 10, 4, 17, 32, 26, 32, 33, 8, 42, 8, 51, 32, 57, 2,
+ 67, 4, 82, 1, 89, 16, 98, 2, 104, 2, 113, 4, 120, 1, },
+ { 1, 16, 11, 8, 27, 1, 32, 1, 42, 4, 49, 32, 58, 32, 67, 2,
+ 72, 32, 81, 1, 88, 16, 99, 4, 114, 1, 0, 0, 0, 0, },
+ { 6, 32, 12, 2, 23, 4, 29, 32, 39, 1, 46, 1, 55, 2, 61, 2,
+ 70, 4, 76, 8, 93, 16, 100, 4, 111, 16, 116, 32, 0, 0, },
+ { 6, 2, 13, 32, 23, 1, 30, 1, 39, 2, 45, 2, 63, 8, 77, 16,
+ 84, 4, 95, 16, 100, 32, 110, 32, 117, 4, 127, 4, 0, 0, },
+ { 4, 1, 13, 2, 31, 8, 38, 16, 47, 32, 53, 1, 62, 8, 68, 32,
+ 78, 32, 85, 4, 94, 2, 100, 16, 109, 8, 127, 2, 0, 0, },
+ { 5, 16, 15, 32, 21, 1, 30, 8, 44, 2, 55, 4, 61, 32, 68, 16,
+ 77, 8, 92, 1, 102, 4, 108, 8, 126, 16, 0, 0, 0, 0, },
+ { 2, 8, 9, 1, 16, 16, 27, 4, 42, 1, 49, 16, 58, 2, 75, 1,
+ 80, 1, 90, 4, 97, 32, 106, 32, 113, 8, 120, 32, 0, 0, },
+ { 2, 4, 8, 4, 27, 2, 32, 32, 41, 1, 48, 16, 59, 4, 66, 16,
+ 72, 8, 91, 8, 107, 1, 112, 1, 123, 16, 0, 0, 0, 0, },
+ { 3, 8, 10, 2, 16, 2, 25, 4, 35, 16, 40, 4, 59, 2, 65, 8,
+ 74, 8, 83, 32, 89, 2, 98, 16, 104, 8, 121, 16, 0, 0, },
+ { 4, 2, 13, 4, 22, 2, 28, 16, 37, 8, 52, 1, 62, 4, 79, 8,
+ 86, 16, 95, 32, 101, 1, 110, 8, 126, 32, 0, 0, 0, 0, },
+ { 5, 32, 12, 16, 21, 8, 36, 1, 46, 4, 52, 8, 70, 16, 79, 32,
+ 85, 1, 94, 8, 108, 2, 119, 4, 126, 2, 0, 0, 0, 0, },
+ { 5, 2, 14, 4, 20, 8, 37, 16, 44, 4, 55, 16, 60, 32, 76, 2,
+ 87, 4, 93, 32, 103, 1, 110, 1, 119, 2, 124, 1, 0, 0, },
+ { 7, 32, 12, 4, 23, 16, 28, 32, 38, 32, 45, 4, 54, 2, 60, 16,
+ 71, 1, 78, 1, 87, 2, 93, 2, 111, 8, 118, 16, 125, 16, },
+ { 1, 1, 11, 32, 17, 2, 26, 16, 32, 8, 51, 8, 64, 2, 73, 4,
+ 83, 16, 88, 4, 107, 2, 112, 32, 122, 8, 0, 0, 0, 0, },
+ { 0, 4, 9, 32, 18, 32, 25, 8, 34, 8, 43, 32, 49, 2, 58, 16,
+ 74, 1, 81, 16, 90, 2, 96, 2, 105, 4, 115, 16, 122, 4, },
+ { 2, 2, 19, 1, 24, 1, 34, 4, 41, 32, 50, 32, 57, 8, 64, 32,
+ 73, 1, 80, 16, 91, 4, 106, 1, 113, 16, 123, 8, 0, 0, },
+ { 3, 4, 10, 16, 16, 8, 35, 8, 51, 1, 56, 1, 67, 16, 72, 4,
+ 91, 2, 96, 32, 105, 1, 112, 16, 121, 2, 0, 0, 0, 0, },
+ { 4, 16, 15, 1, 22, 1, 31, 2, 37, 2, 55, 8, 62, 16, 69, 16,
+ 76, 4, 87, 16, 92, 32, 102, 32, 109, 4, 118, 2, 125, 32, },
+ { 6, 4, 23, 8, 30, 16, 39, 32, 45, 1, 54, 8, 70, 32, 77, 4,
+ 86, 2, 92, 16, 101, 8, 116, 1, 125, 2, 0, 0, 0, 0, },
+ { 4, 4, 13, 1, 22, 8, 36, 2, 47, 4, 53, 32, 63, 1, 69, 8,
+ 84, 1, 94, 4, 100, 8, 117, 16, 127, 32, 0, 0, 0, 0, },
+ { 3, 32, 8, 16, 19, 4, 34, 1, 41, 16, 50, 2, 56, 2, 67, 1,
+ 72, 1, 82, 4, 89, 32, 98, 32, 105, 8, 114, 8, 121, 1, },
+ { 1, 32, 19, 2, 24, 32, 33, 1, 40, 16, 51, 4, 64, 8, 83, 8,
+ 99, 1, 104, 1, 114, 4, 120, 4, 0, 0, 0, 0, 0, 0, },
+ { 8, 2, 17, 4, 27, 16, 32, 4, 51, 2, 56, 32, 66, 8, 75, 32,
+ 81, 2, 90, 16, 96, 8, 115, 8, 122, 2, 0, 0, 0, 0, },
+ { 2, 16, 18, 1, 25, 16, 34, 2, 40, 2, 49, 4, 59, 16, 66, 4,
+ 73, 32, 82, 32, 89, 8, 98, 8, 107, 32, 113, 2, 123, 4, },
+ { 7, 1, 13, 8, 28, 1, 38, 4, 44, 8, 61, 16, 71, 32, 77, 1,
+ 86, 8, 100, 2, 111, 4, 117, 32, 124, 16, 0, 0, 0, 0, },
+ { 12, 8, 29, 16, 36, 4, 47, 16, 52, 32, 62, 32, 68, 2, 79, 4,
+ 85, 32, 95, 1, 102, 1, 111, 2, 117, 2, 126, 4, 0, 0, },
+ { 5, 1, 15, 16, 20, 32, 30, 32, 37, 4, 46, 2, 52, 16, 61, 8,
+ 70, 1, 79, 2, 85, 2, 103, 8, 110, 16, 119, 32, 124, 4, },
+ { 0, 16, 9, 2, 18, 16, 24, 8, 43, 8, 59, 1, 65, 4, 75, 16,
+ 80, 4, 99, 2, 104, 32, 113, 1, 123, 32, 0, 0, 0, 0, },
+ { 10, 32, 17, 8, 26, 8, 35, 32, 41, 2, 50, 16, 56, 8, 66, 1,
+ 73, 16, 82, 2, 88, 2, 97, 4, 107, 16, 112, 4, 121, 32, },
+ { 0, 2, 11, 1, 16, 1, 26, 4, 33, 32, 42, 32, 49, 8, 58, 8,
+ 65, 1, 72, 16, 83, 4, 98, 1, 105, 16, 114, 2, 0, 0, },
+ { 8, 8, 27, 8, 43, 1, 48, 1, 58, 4, 64, 4, 83, 2, 88, 32,
+ 97, 1, 104, 16, 115, 4, 122, 16, 0, 0, 0, 0, 0, 0, },
+ { 5, 8, 14, 1, 23, 2, 29, 2, 47, 8, 54, 16, 63, 32, 68, 4,
+ 79, 16, 84, 32, 94, 32, 101, 4, 110, 2, 116, 16, 127, 1, },
+ { 4, 8, 15, 8, 22, 16, 31, 32, 37, 1, 46, 8, 60, 2, 69, 4,
+ 78, 2, 84, 16, 93, 8, 108, 1, 118, 4, 0, 0, 0, 0, },
+ { 7, 16, 14, 8, 28, 2, 39, 4, 45, 32, 55, 1, 62, 1, 76, 1,
+ 86, 4, 92, 8, 109, 16, 116, 4, 125, 1, 0, 0, 0, 0, },
+ { 1, 2, 11, 4, 26, 1, 33, 16, 42, 2, 48, 2, 57, 4, 64, 1,
+ 74, 4, 81, 32, 90, 32, 97, 8, 106, 8, 115, 32, 120, 16, },
+ { 2, 32, 11, 2, 16, 32, 25, 1, 32, 16, 43, 4, 58, 1, 75, 8,
+ 91, 1, 96, 1, 106, 4, 113, 32, 0, 0, 0, 0, 0, 0, },
+ { 3, 1, 9, 4, 19, 16, 24, 4, 43, 2, 48, 32, 57, 1, 67, 32,
+ 73, 2, 82, 16, 88, 8, 107, 8, 120, 2, 0, 0, 0, 0, },
+ { 0, 8, 10, 1, 17, 16, 26, 2, 32, 2, 41, 4, 51, 16, 56, 4,
+ 65, 32, 74, 32, 81, 8, 90, 8, 99, 32, 105, 2, 114, 16, },
+ { 6, 1, 20, 1, 30, 4, 36, 8, 53, 16, 60, 4, 69, 1, 78, 8,
+ 92, 2, 103, 4, 109, 32, 119, 1, 125, 8, 0, 0, 0, 0, },
+ { 7, 8, 21, 16, 28, 4, 39, 16, 44, 32, 54, 32, 61, 4, 71, 4,
+ 77, 32, 87, 1, 94, 1, 103, 2, 109, 2, 124, 8, 0, 0, },
+ { 6, 8, 12, 32, 22, 32, 29, 4, 38, 2, 44, 16, 53, 8, 71, 2,
+ 77, 2, 95, 8, 102, 16, 111, 32, 117, 1, 127, 16, 0, 0, }
+ };
+
+ static void
+ key_setup(char key[U9AUTH_DESKEYLEN], char *ek)
+ {
+ int i, j, k, mask;
+ uchar (*x)[2];
+
+ bzero(ek, 128);
+ x = keyexpand[0];
+ for(i = 0; i < 7; i++){
+ k = key[i];
+ for(mask = 0x80; mask; mask >>= 1){
+ if(k & mask)
+ for(j = 0; j < 15; j++)
+ ek[x[j][0]] |= x[j][1];
+ x += 15;
+ }
+ }
+ }
diff -N -c -r /usr/src/sys/9fs/9fs.h ./9fs/9fs.h
*** /usr/src/sys/9fs/9fs.h Wed Dec 31 19:00:00 1969
--- ./9fs/9fs.h Mon May 22 11:31:29 2000
***************
*** 0 ****
--- 1,294 ----
+ /*
+ * Copyright (c) 1989, 1993, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)nfs.h 8.4 (Berkeley) 5/1/95
+ * $Id: nfs.h,v 1.44 1998/09/07 05:42:15 bde Exp $
+ */
+
+ #ifndef _9FS_H_
+ #define _9FS_H_
+
+ #ifdef KERNEL
+ #include "opt_u9fs.h"
+ #endif
+
+ #define U9FS_FABLKSIZE 512
+ #define U9FS_PORT 17008
+
+ /*
+ * The set of signals the interrupt an I/O in progress for U9FSMNT_INT mounts.
+ * What should be in this set is open to debate, but I believe that since
+ * I/O system calls on ufs are never interrupted by signals the set should
+ * be minimal. My reasoning is that many current programs that use signals
+ * such as SIGALRM will not expect file I/O system calls to be interrupted
+ * by them and break.
+ */
+ #define U9FSINT_SIGMASK (sigmask(SIGINT)|sigmask(SIGTERM)|sigmask(SIGKILL)| \
+ sigmask(SIGHUP)|sigmask(SIGQUIT))
+
+ /*
+ * U9FS mount option flags
+ */
+ #define U9FSMNT_SOFT 0x00000001 /* soft mount (hard is default) */
+ #define U9FSMNT_MAXGRPS 0x00000020 /* set maximum grouplist size */
+ #define U9FSMNT_INT 0x00000040 /* allow interrupts on hard mount */
+ #define U9FSMNT_KERB 0x00000400 /* Use Kerberos authentication */
+ #define U9FSMNT_READAHEAD 0x00002000 /* set read ahead */
+
+ #define U9FSSTA_HASWRITEVERF 0x00040000 /* Has write verifier for V3 */
+ #define U9FSSTA_GOTPATHCONF 0x00080000 /* Got the V3 pathconf info */
+ #define U9FSSTA_GOTFSINFO 0x00100000 /* Got the V3 fsinfo */
+ #define U9FSSTA_MNTD 0x00200000 /* Mnt server for mnt point */
+ #define U9FSSTA_DISMINPROG 0x00400000 /* Dismount in progress */
+ #define U9FSSTA_DISMNT 0x00800000 /* Dismounted */
+ #define U9FSSTA_SNDLOCK 0x01000000 /* Send socket lock */
+ #define U9FSSTA_WANTSND 0x02000000 /* Want above */
+ #define U9FSSTA_RCVLOCK 0x04000000 /* Rcv socket lock */
+ #define U9FSSTA_WANTRCV 0x08000000 /* Want above */
+ #define U9FSSTA_WAITAUTH 0x10000000 /* Wait for authentication */
+ #define U9FSSTA_HASAUTH 0x20000000 /* Has authenticator */
+ #define U9FSSTA_WANTAUTH 0x40000000 /* Wants an authenticator */
+ #define U9FSSTA_AUTHERR 0x80000000 /* Authentication error */
+
+ #define U9FSNOHASH(fhsum) (&u9fsnodehashtbl[(fhsum) % u9fsnodehash])
+
+ /*
+ * Arguments to mount 9FS
+ */
+ #define U9FS_ARGSVERSION 1 /* change when nfs_args changes */
+ struct u9fs_args {
+ int version; /* args structure version number */
+ struct sockaddr *addr; /* file server address */
+ int addrlen; /* length of address */
+ int sotype; /* Socket type */
+ int proto; /* and Protocol */
+ int fhsize; /* Size, in bytes, of fh */
+ int flags; /* flags */
+ int wsize; /* write size in bytes */
+ int rsize; /* read size in bytes */
+ int readdirsize; /* readdir size in bytes */
+ char *hostname; /* server's name */
+
+ struct sockaddr * authaddr;
+ int authaddrlen;
+ int authsotype;
+ int authsoproto;
+
+ int nusers;
+ char user[U9FS_NAMELEN];
+ char key[U9AUTH_DESKEYLEN];
+ struct p9user {
+ uid_t p9_uid;
+ char p9_name[U9FS_NAMELEN];
+ } * users;
+ };
+
+ #define U9FS_USER_HASHSIZE 512
+
+ struct u9fsuser {
+ LIST_ENTRY(u9fsuser) u_hash;
+ uid_t u_uid;
+ char u_name[U9FS_NAMELEN];
+ char u_ckey[U9AUTH_DESKEYLEN]; /* user key */
+ char u_skey[U9AUTH_DESKEYLEN]; /* session key */
+ };
+
+ /*
+ * The u9fsnode is the u9fs equivalent to ufs's inode. Any similarity
+ * is purely coincidental.
+ * There is a unique u9fsnode allocated for each active file,
+ * each current directory, each mounted-on file, text file, and the root.
+ * An u9fsnode is 'named' by its file handle. (nget/u9fs_node.c)
+ * If this structure exceeds 256 bytes (it is currently 256 using 4.4BSD-Lite
+ * type definitions), file handles of > 32 bytes should probably be split out
+ * into a separate MALLOC()'d data structure. (Reduce the size of u9fsfh_t by
+ * changing the definition in u9fsproto.h of U9FS_SMALLFH.)
+ * NB: Hopefully the current order of the fields is such that everything will
+ * be well aligned and, therefore, tightly packed.
+ */
+ struct u9fsnode {
+ LIST_ENTRY(u9fsnode) n_hash; /* Hash chain */
+ u_quad_t n_size; /* Current size of file */
+ struct vattr n_vattr; /* Vnode attribute cache */
+ time_t n_attrstamp; /* Attr. cache timestamp */
+ u_int32_t n_mode; /* ACCESS mode cache */
+ uid_t n_modeuid; /* credentials having mode */
+ time_t n_modestamp; /* mode cache timestamp */
+ time_t n_mtime; /* Prev modify time. */
+ time_t n_ctime; /* Prev create time. */
+ struct u9fs_qid n_qid;
+ u_short n_fid; /* U9FS FID */
+ u_short n_rdfid;
+ u_short n_wrfid;
+ struct vnode *n_vnode; /* associated vnode */
+ struct lockf *n_lockf; /* Locking record of file */
+ int n_error; /* Save write error value */
+ struct u9fsdir n_dir;
+ short n_flag; /* Flag for locking.. */
+ int n_opens; /* number of opens */
+ };
+
+ #define n_atim n_un1.nf_atim
+ #define n_mtim n_un2.nf_mtim
+ #define n_sillyrename n_un3.nf_silly
+ #define n_cookieverf n_un1.nd_cookieverf
+ #define n_direofoffset n_un2.nd_direof
+ #define n_cookies n_un3.nd_cook
+
+ /*
+ * Flags for n_flag
+ */
+ #define NFLUSHWANT 0x0001 /* Want wakeup from a flush in prog. */
+ #define NFLUSHINPROG 0x0002 /* Avoid multiple calls to vinvalbuf() */
+ #define NMODIFIED 0x0004 /* Might have a modified buffer in bio */
+ #define NWRITEERR 0x0008 /* Flag write errors so close will know */
+ #define NQU9FSNONCACHE 0x0020 /* Non-cachable lease */
+ #define NQU9FSWRITE 0x0040 /* Write lease */
+ #define NQU9FSEVICTED 0x0080 /* Has been evicted */
+ #define NACC 0x0100 /* Special file accessed */
+ #define NUPD 0x0200 /* Special file updated */
+ #define NCHG 0x0400 /* Special file times changed */
+ #define NLOCKED 0x0800 /* node is locked */
+ #define NWANTED 0x0100 /* someone wants to lock */
+
+ /*
+ * Convert between u9fsnode pointers and vnode pointers
+ */
+ #define VTOU9FS(vp) ((struct u9fsnode *)(vp)->v_data)
+ #define U9FSTOV(np) ((struct vnode *)(np)->n_vnode)
+
+ /*
+ * Mount structure.
+ * One allocated on every U9FS mount.
+ * Holds U9FS specific information for mount.
+ */
+ struct u9fsmount {
+ int nm_flag; /* Flags for soft/hard... */
+ int nm_state; /* Internal state flags */
+ struct mount *nm_mountp; /* Vfs structure for this filesystem */
+ int nm_numgrps; /* Max. size of groupslist */
+ u9fsfh_t nm_fh; /* qid.path */
+ u_short nm_fid; /* fid of root dir */
+ struct socket *nm_so; /* Rpc socket */
+ int nm_sotype; /* Type of socket */
+ int nm_soproto; /* and protocol */
+ int nm_soflags; /* pr_flags for socket protocol */
+ struct sockaddr *nm_nam; /* Addr of server */
+ int nm_sent; /* Request send count */
+ int nm_cwnd; /* Request send window */
+ int nm_rsize; /* Max size of read rpc */
+ int nm_wsize; /* Max size of write rpc */
+ int nm_readdirsize; /* Size of a readdir rpc */
+
+ struct lock nm_lock; /* lock for tag/fid freelist */
+ bitstr_t * nm_tags;
+ bitstr_t * nm_fids;
+ TAILQ_HEAD(u9fs_reqq, u9fsreq) nm_reqq;
+
+ uid_t nm_authuid; /* Uid for authenticator */
+ #if 0
+ struct vnode *nm_inprog; /* Vnode in prog by nqu9fs_clientd() */
+ uid_t nm_authuid; /* Uid for authenticator */
+ int nm_authtype; /* Authenticator type */
+ int nm_authlen; /* and length */
+ char *nm_authstr; /* Authenticator string */
+ char *nm_verfstr; /* and the verifier */
+ int nm_verflen;
+ u_char nm_verf[U9FSX_V3WRITEVERF]; /* V3 write verifier */
+ U9FSKERBKEY_T nm_key; /* and the session key */
+ int nm_numuids; /* Number of u9fsuid mappings */
+ TAILQ_HEAD(, u9fsuid) nm_uidlruhead; /* Lists of u9fsuid mappings */
+ LIST_HEAD(, u9fsuid) nm_uidhashtbl[U9FS_MUIDHASHSIZ];
+ TAILQ_HEAD(, buf) nm_bufq; /* async io buffer queue */
+ short nm_bufqlen; /* number of buffers in queue */
+ short nm_bufqwant; /* process wants to add to the queue */
+ int nm_bufqiods; /* number of iods processing queue */
+ #endif
+ u_int64_t nm_maxfilesize; /* maximum file size */
+ };
+
+ #ifdef KERNEL
+
+ #ifdef MALLOC_DECLARE
+ MALLOC_DECLARE(M_U9FSHASH);
+ MALLOC_DECLARE(M_U9FSBITS);
+
+ extern vop_t **u9fs_vnodeop_p;
+
+ /* u9fs_node.c */
+ void u9fs_nhinit __P((void));
+ int u9fs_nget __P((struct mount *mntp, u9fsfh_t fh, struct u9fsnode **npp, struct proc * p));
+
+ /* u9fs_subr.c */
+ void u9fs_id_init __P((bitstr_t ** bits));
+ u_short u9fs_id_new __P((bitstr_t * bits));
+ void u9fs_id_free __P((bitstr_t * bits, u_short v));
+ void u9fs_uhinit __P((void));
+ uid_t u9fs_name2uid __P((char * name));
+ struct u9fsuser * u9fs_finduser __P((uid_t uid));
+ void u9fs_hashuser __P((uid_t uid, char *name));
+ int u9fs_mbuftouio __P((struct mbuf *m, struct uio *uiop, int siz));
+ int u9fs_uiotombuf __P((struct uio *uiop, struct mbuf **mq, int siz));
+
+ /* u9fs_vnopes.c */
+ int u9fs_readdirrpc __P((struct vnode *, struct uio *, struct ucred *));
+ int u9fs_readrpc __P((struct vnode *vp, struct uio *uiop, struct ucred *cred));
+ int u9fs_writerpc __P((struct vnode *vp, struct uio *uiop, struct ucred *cred));
+
+ /* u9fs_bio.c */
+ int u9fs_bioread __P((struct vnode *, struct uio *, int, struct ucred *,int));
+ int u9fs_biowrite __P((struct vnode *, struct uio *, int ioflag, struct ucred *));
+ int u9fs_doio __P((struct buf *, struct ucred *, struct proc *));
+ int u9fs_vinvalbuf __P((struct vnode *, int, struct ucred *, struct proc *, int));
+
+
+ /* u9fs_socket.c */
+ int u9fs_sigintr __P((struct u9fsmount *nmp, struct proc *p));
+ void u9fs_disconnect __P((struct socket *));
+ int u9fs_connect __P((struct socket ** sop, struct sockaddr * saddr, int sotype, int soproto, struct proc * p));
+ int u9fs_connect_9fs __P((struct u9fsmount *));
+ int u9fs_connect_9auth __P((struct u9fsmount *, struct u9fs_args *, struct socket **));
+ int u9fs_request __P((struct u9fsreq * req, struct u9fsreq * rep, int relm));
+
+ #endif
+
+ /*
+ * Convert mount ptr to u9fsmount ptr.
+ */
+ #define VFSTOU9FS(mp) ((struct u9fsmount *)((mp)->mnt_data))
+
+ #endif /* KERNEL */
+
+ #endif
diff -N -c -r /usr/src/sys/9fs/9fs_bio.c ./9fs/9fs_bio.c
*** /usr/src/sys/9fs/9fs_bio.c Wed Dec 31 19:00:00 1969
--- ./9fs/9fs_bio.c Fri Nov 26 12:28:50 1999
***************
*** 0 ****
--- 1,550 ----
+ #include <sys/param.h>
+ #include <sys/sockio.h>
+ #include <sys/proc.h>
+ #include <sys/vnode.h>
+ #include <sys/kernel.h>
+ #include <sys/sysctl.h>
+ #include <sys/malloc.h>
+ #include <sys/mount.h>
+ #include <sys/mbuf.h>
+ #include <sys/socket.h>
+ #include <sys/socketvar.h>
+ #include <sys/systm.h>
+ #include <sys/protosw.h>
+ #include <sys/syslog.h>
+
+ #include <netinet/in.h>
+ #include <netinet/tcp.h>
+
+ #include <vm/vm.h>
+ #include <vm/vm_extern.h>
+ #include <vm/vm_zone.h>
+ #include <vm/vm_prot.h>
+ #include <vm/vm_page.h>
+ #include <vm/vm_object.h>
+ #include <vm/vm_pager.h>
+ #include <vm/vnode_pager.h>
+
+ #include <net/if.h>
+ #include <net/route.h>
+ #include <netinet/in.h>
+
+ #include <9fs/bitstring.h>
+ #include <9fs/9p.h>
+ #include <9fs/9auth.h>
+ #include <9fs/9fs.h>
+
+ static struct buf *u9fs_getcacheblk __P((struct vnode *vp, daddr_t bn, int size, struct proc *p));
+ static void u9fs_prot_buf __P((struct buf *bp, int off, int n));
+
+ /*
+ * Vnode op for read using bio
+ */
+ int
+ u9fs_bioread(vp, uio, ioflag, cred, getpages)
+ register struct vnode *vp;
+ register struct uio *uio;
+ int ioflag;
+ struct ucred *cred;
+ int getpages;
+ {
+ register struct u9fsnode *np = VTOU9FS(vp);
+ register int biosize;
+ off_t diff;
+ struct buf *bp = 0;
+ struct proc *p;
+ struct u9fsmount *nmp = VFSTOU9FS(vp->v_mount);
+ daddr_t lbn;
+ int error = 0, n = 0, on = 0, bufsize, not_readin;
+
+ if (uio->uio_resid == 0)
+ return (0);
+ if (uio->uio_offset < 0)
+ return (EINVAL);
+ p = uio->uio_procp;
+ if (vp->v_type != VDIR &&
+ (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
+ return (EFBIG);
+ biosize = vp->v_mount->mnt_stat.f_iosize;
+ #if 0
+ if( np->n_qid.vers ) { /* in cache, check revision */
+ error = VOP_GETATTR(vp, &vattr, cred, p);
+ if( error )
+ return error;
+ if( np->n_qid.vers != np->n_dir.dir_qid.vers ) {
+ /* content changed */
+ u9fs_vinvalbuf(vp, V_SAVE, cred, p, 1);
+ }
+ }
+ #endif
+ do {
+ switch (vp->v_type) {
+ case VREG:
+ lbn = uio->uio_offset / biosize;
+ on = uio->uio_offset & (biosize - 1);
+ not_readin = 1;
+
+ #if 0
+ /*
+ * Start the read ahead(s), as required.
+ */
+ if (u9fs_numasync > 0 && nmp->nm_readahead > 0) {
+ for (nra = 0; nra < nmp->nm_readahead &&
+ (off_t)(lbn + 1 + nra) * biosize < np->n_size; nra++) {
+ rabn = lbn + 1 + nra;
+ if (!incore(vp, rabn)) {
+ rabp = u9fs_getcacheblk(vp, rabn, biosize, p);
+ if (!rabp)
+ return (EINTR);
+ if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
+ rabp->b_flags |= (B_READ | B_ASYNC);
+ vfs_busy_pages(rabp, 0);
+ if (u9fs_asyncio(rabp, cred)) {
+ rabp->b_flags |= B_INVAL|B_ERROR;
+ vfs_unbusy_pages(rabp);
+ brelse(rabp);
+ }
+ } else
+ brelse(rabp);
+ }
+ }
+ }
+ #endif
+
+ /*
+ * If the block is in the cache and has the required data
+ * in a valid region, just copy it out.
+ * Otherwise, get the block and write back/read in,
+ * as required.
+ */
+ again:
+ bufsize = biosize;
+ if ((off_t)(lbn + 1) * biosize > np->n_size &&
+ (off_t)(lbn + 1) * biosize - np->n_size < biosize) {
+ bufsize = np->n_size - (off_t)lbn * biosize;
+ bufsize = (bufsize + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
+ }
+ bp = u9fs_getcacheblk(vp, lbn, bufsize, p);
+ if (!bp)
+ return (EINTR);
+ /*
+ * If we are being called from u9fs_getpages, we must
+ * make sure the buffer is a vmio buffer. The vp will
+ * already be setup for vmio but there may be some old
+ * non-vmio buffers attached to it.
+ */
+ if (getpages && !(bp->b_flags & B_VMIO)) {
+ #ifdef DIAGNOSTIC
+ printf("u9fs_bioread: non vmio buf found, discarding\n");
+ #endif
+ bp->b_flags |= B_NOCACHE;
+ if (bp->b_dirtyend > 0) {
+ if ((bp->b_flags & B_DELWRI) == 0)
+ panic("u9fsbioread");
+ if (VOP_BWRITE(bp) == EINTR)
+ return (EINTR);
+ } else
+ brelse(bp);
+ goto again;
+ }
+ if ((bp->b_flags & B_CACHE) == 0) {
+ bp->b_flags |= B_READ;
+ bp->b_flags &= ~(B_DONE | B_ERROR | B_INVAL);
+ not_readin = 0;
+ vfs_busy_pages(bp, 0);
+ error = u9fs_doio(bp, cred, p);
+ if (error) {
+ brelse(bp);
+ return (error);
+ }
+ np->n_qid.vers = np->n_dir.dir_qid.vers;
+ }
+ if (bufsize > on) {
+ n = min((unsigned)(bufsize - on), uio->uio_resid);
+ } else {
+ n = 0;
+ }
+ diff = np->n_size - uio->uio_offset;
+ if (diff < n)
+ n = diff;
+ if (not_readin && n > 0) {
+ if (on < bp->b_validoff || (on + n) > bp->b_validend) {
+ bp->b_flags |= B_NOCACHE;
+ if (bp->b_dirtyend > 0) {
+ if ((bp->b_flags & B_DELWRI) == 0)
+ panic("u9fsbioread");
+ if (VOP_BWRITE(bp) == EINTR)
+ return (EINTR);
+ } else
+ brelse(bp);
+ goto again;
+ }
+ }
+ vp->v_lastr = lbn;
+ diff = (on >= bp->b_validend) ? 0 : (bp->b_validend - on);
+ if (diff < n)
+ n = diff;
+ break;
+ case VDIR:
+ biosize = nmp->nm_readdirsize;
+ lbn = (uoff_t)uio->uio_offset / biosize;
+ on = uio->uio_offset % biosize;
+ bp = u9fs_getcacheblk(vp, lbn, biosize, p);
+ if (!bp)
+ return (EINTR);
+ if ((bp->b_flags & B_CACHE) == 0) {
+ bp->b_flags |= B_READ;
+ vfs_busy_pages(bp, 0);
+ error = u9fs_doio(bp, cred, p);
+ if (error) {
+ brelse(bp);
+ }
+ if (error)
+ return (error);
+ np->n_qid.vers = np->n_dir.dir_qid.vers;
+ }
+
+ /*
+ * Make sure we use a signed variant of min() since
+ * the second term may be negative.
+ */
+ n = lmin(uio->uio_resid, biosize - bp->b_resid - on);
+ break;
+ default:
+ printf(" u9fs_bioread: type %x unexpected\n",vp->v_type);
+ break;
+ };
+
+ if (n > 0) {
+ error = uiomove(bp->b_data + on, (int)n, uio);
+ }
+ brelse(bp);
+ } while (error == 0 && uio->uio_resid > 0 && n > 0);
+ return (error);
+ }
+
+ /*
+ * Vnode op for write using bio
+ */
+ int
+ u9fs_biowrite(vp, uio, ioflag, cred)
+ register struct vnode *vp;
+ register struct uio *uio;
+ register int ioflag;
+ register struct ucred *cred;
+ {
+ register int biosize;
+ struct proc *p = uio->uio_procp;
+ struct u9fsnode *np = VTOU9FS(vp);
+ struct buf *bp;
+ struct vattr vattr;
+ struct u9fsmount *nmp = VFSTOU9FS(vp->v_mount);
+ daddr_t lbn;
+ int bufsize;
+ int n, on, error = 0;
+
+ if (ioflag & (IO_APPEND | IO_SYNC)) {
+ if (ioflag & IO_APPEND) {
+ error = VOP_GETATTR(vp, &vattr, cred, p);
+ if (error)
+ return (error);
+ uio->uio_offset = np->n_size;
+ }
+ }
+ if (uio->uio_offset < 0)
+ return (EINVAL);
+ if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
+ return (EFBIG);
+ if (uio->uio_resid == 0)
+ return (0);
+
+ /*
+ * I use nm_rsize, not nm_wsize so that all buffer cache blocks
+ * will be the same size within a filesystem. nfs_writerpc will
+ * still use nm_wsize when sizing the rpc's.
+ */
+ biosize = vp->v_mount->mnt_stat.f_iosize;
+ do {
+ lbn = uio->uio_offset / biosize;
+ on = uio->uio_offset & (biosize-1);
+ n = min((unsigned)(biosize - on), uio->uio_resid);
+ if (uio->uio_offset + n > np->n_size) {
+ np->n_size = uio->uio_offset + n;
+ vnode_pager_setsize(vp, np->n_size);
+ }
+ bufsize = biosize;
+ if ((off_t)(lbn + 1) * biosize > np->n_size) {
+ bufsize = np->n_size - (off_t)lbn * biosize;
+ bufsize = (bufsize + DEV_BSIZE - 1) & ~(DEV_BSIZE - 1);
+ }
+ bp = u9fs_getcacheblk(vp, lbn, bufsize, p);
+ if (!bp)
+ return (EINTR);
+ if (bp->b_wcred == NOCRED) {
+ crhold(cred);
+ bp->b_wcred = cred;
+ }
+
+ error = uiomove((char *)bp->b_data + on, n, uio);
+ if (error) {
+ bp->b_flags |= B_ERROR;
+ brelse(bp);
+ return (error);
+ }
+
+ /*
+ * This will keep the buffer and mmaped regions more coherent.
+ */
+ u9fs_prot_buf(bp, on, n);
+ bp->b_dirtyoff = on;
+ bp->b_dirtyend = on + n;
+
+ if (bp->b_validend == 0 || bp->b_validend < bp->b_dirtyoff ||
+ bp->b_validoff > bp->b_dirtyend) {
+ /* XXX: destroys our read cache if not overlapping */
+ /* two choice: none implemented
+ 1> keep the bigger(smaller) piece
+ 2> read the missing segment
+ */
+ bp->b_validoff = bp->b_dirtyoff;
+ bp->b_validend = bp->b_dirtyend;
+ } else {
+ bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff);
+ bp->b_validend = max(bp->b_validend, bp->b_dirtyend);
+ }
+
+ error = bwrite(bp);
+ if( error ) {
+ bp->b_flags |= B_ERROR;
+ /* brelse(bp); */
+ return error;
+ }
+ } while (uio->uio_resid > 0 && n > 0);
+ return 0;
+ }
+
+ /*
+ * Do an I/O operation to/from a cache block. This may be called
+ * synchronously or from an u9fsiod.
+ */
+ int
+ u9fs_doio(bp, cr, p)
+ register struct buf *bp;
+ struct ucred *cr;
+ struct proc *p;
+ {
+ register struct uio *uiop;
+ register struct vnode *vp;
+ struct u9fsnode *np;
+ struct u9fsmount *nmp;
+ int error = 0, diff, len;
+ struct uio uio;
+ struct iovec io;
+
+ vp = bp->b_vp;
+ np = VTOU9FS(vp);
+ nmp = VFSTOU9FS(vp->v_mount);
+ uiop = &uio;
+ uiop->uio_iov = &io;
+ uiop->uio_iovcnt = 1;
+ uiop->uio_segflg = UIO_SYSSPACE;
+ uiop->uio_procp = p;
+
+ if (bp->b_flags & B_READ ) {
+ io.iov_len = uiop->uio_resid = bp->b_bcount;
+ io.iov_base = bp->b_data;
+ uiop->uio_rw = UIO_READ;
+ switch (vp->v_type) {
+ case VREG:
+ uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
+ error = u9fs_readrpc(vp, uiop, cr);
+ if (!error) {
+ bp->b_validoff = 0;
+ if (uiop->uio_resid) {
+ /*
+ * If len > 0, there is a hole in the file and
+ * no writes after the hole have been pushed to
+ * the server yet.
+ * Just zero fill the rest of the valid area.
+ */
+ diff = bp->b_bcount - uiop->uio_resid;
+ len = np->n_size - (((u_quad_t)bp->b_blkno) * DEV_BSIZE
+ + diff);
+ if (len > 0) {
+ len = min(len, uiop->uio_resid);
+ bzero((char *)bp->b_data + diff, len);
+ bp->b_validend = diff + len;
+ } else
+ bp->b_validend = diff;
+ } else
+ bp->b_validend = bp->b_bcount;
+ }
+ break;
+ case VDIR:
+ uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * nmp->nm_readdirsize;
+ error = u9fs_readdirrpc(vp, uiop, cr);
+ if (error == 0 && uiop->uio_resid == bp->b_bcount)
+ bp->b_flags |= B_INVAL;
+ break;
+ default:
+ printf("u9fs_doio: type %x unexpected\n",vp->v_type);
+ break;
+ };
+ if (error) {
+ bp->b_flags |= B_ERROR;
+ bp->b_error = error;
+ }
+ } else {
+ if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
+ bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
+
+ if (bp->b_dirtyend > bp->b_dirtyoff) {
+ io.iov_len = uiop->uio_resid = bp->b_dirtyend
+ - bp->b_dirtyoff;
+ uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
+ + bp->b_dirtyoff;
+ io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
+ uiop->uio_rw = UIO_WRITE;
+ bp->b_flags |= B_WRITEINPROG;
+ error = u9fs_writerpc(vp, uiop, cr);
+ bp->b_flags &= ~B_WRITEINPROG;
+
+ if (error) {
+ bp->b_flags |= B_ERROR;
+ bp->b_error = np->n_error = error;
+ np->n_flag |= NWRITEERR;
+ }
+ bp->b_dirtyoff = bp->b_dirtyend = 0;
+ } else {
+ bp->b_resid = 0;
+ biodone(bp);
+ return (0);
+ }
+ }
+ bp->b_resid = uiop->uio_resid;
+ biodone(bp);
+ return error;
+ }
+
+ /*
+ * Get an u9fs cache block.
+ * Allocate a new one if the block isn't currently in the cache
+ * and return the block marked busy. If the calling process is
+ * interrupted by a signal for an interruptible mount point, return
+ * NULL.
+ */
+ static struct buf *
+ u9fs_getcacheblk(vp, bn, size, p)
+ struct vnode *vp;
+ daddr_t bn;
+ int size;
+ struct proc *p;
+ {
+ register struct buf *bp;
+ struct mount *mp;
+ struct u9fsmount *nmp;
+
+ mp = vp->v_mount;
+ nmp = VFSTOU9FS(mp);
+
+ if (nmp->nm_flag & U9FSMNT_INT) {
+ bp = getblk(vp, bn, size, PCATCH, 0);
+ while (bp == (struct buf *)0) {
+ if (u9fs_sigintr(nmp, p))
+ return ((struct buf *)0);
+ bp = getblk(vp, bn, size, 0, 2 * hz);
+ }
+ } else
+ bp = getblk(vp, bn, size, 0, 0);
+
+ if (vp->v_type == VREG) {
+ int biosize;
+ biosize = mp->mnt_stat.f_iosize;
+ bp->b_blkno = bn * (biosize / DEV_BSIZE);
+ }
+
+ return (bp);
+ }
+
+ static void
+ u9fs_prot_buf(bp, off, n)
+ struct buf *bp;
+ int off;
+ int n;
+ {
+ int pindex, boff, end;
+
+ if ((bp->b_flags & B_VMIO) == 0)
+ return;
+
+ end = round_page(off + n);
+ for (boff = trunc_page(off); boff < end; boff += PAGE_SIZE) {
+ pindex = boff >> PAGE_SHIFT;
+ vm_page_protect(bp->b_pages[pindex], VM_PROT_NONE);
+ }
+ }
+
+ /*
+ * Flush and invalidate all dirty buffers. If another process is already
+ * doing the flush, just wait for completion.
+ */
+ int
+ u9fs_vinvalbuf(vp, flags, cred, p, intrflg)
+ struct vnode *vp;
+ int flags;
+ struct ucred *cred;
+ struct proc *p;
+ int intrflg;
+ {
+ register struct u9fsnode *np = VTOU9FS(vp);
+ struct u9fsmount *nmp = VFSTOU9FS(vp->v_mount);
+ int error = 0, slpflag, slptimeo;
+
+ if (vp->v_flag & VXLOCK) {
+ return (0);
+ }
+
+ if ((nmp->nm_flag & U9FSMNT_INT) == 0)
+ intrflg = 0;
+ if (intrflg) {
+ slpflag = PCATCH;
+ slptimeo = 2 * hz;
+ } else {
+ slpflag = 0;
+ slptimeo = 0;
+ }
+ /*
+ * First wait for any other process doing a flush to complete.
+ */
+ while (np->n_flag & NFLUSHINPROG) {
+ np->n_flag |= NFLUSHWANT;
+ error = tsleep((caddr_t)&np->n_flag, PRIBIO + 2, "u9fsvinval",
+ slptimeo);
+ if (error && intrflg && u9fs_sigintr(nmp, p))
+ return (EINTR);
+ }
+
+ /*
+ * Now, flush as required.
+ */
+ np->n_flag |= NFLUSHINPROG;
+ error = vinvalbuf(vp, flags, cred, p, slpflag, 0);
+ while (error) {
+ if (intrflg && u9fs_sigintr(nmp, p)) {
+ np->n_flag &= ~NFLUSHINPROG;
+ if (np->n_flag & NFLUSHWANT) {
+ np->n_flag &= ~NFLUSHWANT;
+ wakeup((caddr_t)&np->n_flag);
+ }
+ return (EINTR);
+ }
+ error = vinvalbuf(vp, flags, cred, p, 0, slptimeo);
+ }
+ np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
+ if (np->n_flag & NFLUSHWANT) {
+ np->n_flag &= ~NFLUSHWANT;
+ wakeup((caddr_t)&np->n_flag);
+ }
+ return (0);
+ }
+
diff -N -c -r /usr/src/sys/9fs/9fs_node.c ./9fs/9fs_node.c
*** /usr/src/sys/9fs/9fs_node.c Wed Dec 31 19:00:00 1969
--- ./9fs/9fs_node.c Thu Nov 25 15:36:49 1999
***************
*** 0 ****
--- 1,132 ----
+ #include <sys/param.h>
+ #include <sys/sockio.h>
+ #include <sys/proc.h>
+ #include <sys/vnode.h>
+ #include <sys/kernel.h>
+ #include <sys/sysctl.h>
+ #include <sys/malloc.h>
+ #include <sys/mount.h>
+ #include <sys/mbuf.h>
+ #include <sys/socket.h>
+ #include <sys/socketvar.h>
+ #include <sys/systm.h>
+ #include <sys/protosw.h>
+ #include <sys/syslog.h>
+
+ #include <netinet/in.h>
+ #include <netinet/tcp.h>
+
+ #include <vm/vm.h>
+ #include <vm/vm_extern.h>
+ #include <vm/vm_zone.h>
+
+ #include <net/if.h>
+ #include <net/route.h>
+ #include <netinet/in.h>
+
+ #include <9fs/bitstring.h>
+ #include <9fs/9p.h>
+ #include <9fs/9auth.h>
+ #include <9fs/9fs.h>
+
+ vm_zone_t u9fsnode_zone;
+ static LIST_HEAD(u9fsnodehashhead, u9fsnode) *u9fsnodehashtbl;
+ static u_long u9fsnodehash;
+ MALLOC_DEFINE(M_U9FSHASH, "U9FS hash", "U9FS hash tables");
+
+ /*
+ * Initialize hash links for u9fsnodes
+ * and build u9fsnode free list.
+ */
+ void
+ u9fs_nhinit()
+ {
+ u9fsnode_zone = zinit("U9FSNODE", sizeof(struct u9fsnode), 0, 0, 1);
+ u9fsnodehashtbl = phashinit(desiredvnodes, M_U9FSHASH, &u9fsnodehash);
+ }
+
+ /*
+ * Look up a vnode/u9fsnode by file handle.
+ * Callers must check for mount points!!
+ * In all cases, a pointer to a
+ * u9fsnode structure is returned.
+ */
+ static int u9fs_node_hash_lock;
+
+ int
+ u9fs_nget(mntp, fh, npp, p)
+ struct mount *mntp;
+ register u9fsfh_t fh;
+ struct u9fsnode **npp;
+ struct proc * p;
+ {
+ struct u9fsnode *np;
+ struct u9fsnodehashhead *nhpp;
+ register struct vnode *vp;
+ struct vnode *nvp;
+ int error;
+
+ nhpp = U9FSNOHASH(fh);
+ loop:
+ for (np = nhpp->lh_first; np != 0; np = np->n_hash.le_next) {
+ if (mntp != U9FSTOV(np)->v_mount || fh != np->n_qid.path )
+ continue;
+ vp = U9FSTOV(np);
+ if (vget(vp, LK_EXCLUSIVE, p))
+ goto loop;
+ *npp = np;
+ return(0);
+ }
+ /*
+ * Obtain a lock to prevent a race condition if the getnewvnode()
+ * or MALLOC() below happens to block.
+ */
+ if (u9fs_node_hash_lock) {
+ while (u9fs_node_hash_lock) {
+ u9fs_node_hash_lock = -1;
+ tsleep(&u9fs_node_hash_lock, PVM, "u9fsngt", 0);
+ }
+ goto loop;
+ }
+ u9fs_node_hash_lock = 1;
+
+ /*
+ * allocate before getnewvnode since doing so afterward
+ * might cause a bogus v_data pointer to get dereferenced
+ * elsewhere if zalloc should block.
+ */
+ np = zalloc(u9fsnode_zone);
+
+ error = getnewvnode(VT_U9FS, mntp, u9fs_vnodeop_p, &nvp);
+ if (error) {
+ if (u9fs_node_hash_lock < 0)
+ wakeup(&u9fs_node_hash_lock);
+ u9fs_node_hash_lock = 0;
+ *npp = 0;
+ zfree(u9fsnode_zone, np);
+ return (error);
+ }
+ vp = nvp;
+ bzero((caddr_t)np, sizeof *np);
+ vp->v_data = np;
+ np->n_vnode = vp;
+ /*
+ * Insert the u9fsnode in the hash queue for its new file handle
+ */
+ LIST_INSERT_HEAD(nhpp, np, n_hash);
+ np->n_qid.path = fh;
+ np->n_qid.vers = 0; /* not in cache yet */
+ np->n_fid = 0; /* should be set by the caller */
+ *npp = np;
+
+ if (u9fs_node_hash_lock < 0)
+ wakeup(&u9fs_node_hash_lock);
+ u9fs_node_hash_lock = 0;
+
+ /*
+ * Lock the new u9fsnode.
+ */
+ vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p);
+
+ return (0);
+ }
diff -N -c -r /usr/src/sys/9fs/9fs_socket.c ./9fs/9fs_socket.c
*** /usr/src/sys/9fs/9fs_socket.c Wed Dec 31 19:00:00 1969
--- ./9fs/9fs_socket.c Thu Nov 25 15:48:46 1999
***************
*** 0 ****
--- 1,503 ----
+ #include <sys/param.h>
+ #include <sys/sockio.h>
+ #include <sys/proc.h>
+ #include <sys/vnode.h>
+ #include <sys/kernel.h>
+ #include <sys/sysctl.h>
+ #include <sys/malloc.h>
+ #include <sys/mount.h>
+ #include <sys/mbuf.h>
+ #include <sys/socket.h>
+ #include <sys/socketvar.h>
+ #include <sys/systm.h>
+ #include <sys/protosw.h>
+ #include <sys/syslog.h>
+
+ #include <netinet/in.h>
+ #include <netinet/tcp.h>
+
+ #include <vm/vm.h>
+ #include <vm/vm_extern.h>
+ #include <vm/vm_zone.h>
+
+ #include <net/if.h>
+ #include <net/route.h>
+ #include <netinet/in.h>
+
+ #include <9fs/bitstring.h>
+ #include <9fs/9p.h>
+ #include <9fs/9auth.h>
+ #include <9fs/9fs.h>
+
+ static int u9fs_reply __P((struct u9fsreq * req));
+ static int u9fs_send __P((struct socket * so, struct mbuf * mreq, struct u9fsreq * req));
+ static int u9fs_receive __P((struct socket * so, struct mbuf **mrep, struct u9fsreq * req));
+
+ static int u9fs_sndlock __P((int *flagp, int *statep, struct u9fsreq *rep));
+ static void u9fs_sndunlock __P((int *flagp, int *statep));
+ static int u9fs_rcvlock __P((struct u9fsreq *req));
+ static void u9fs_rcvunlock __P((int *flagp, int *statep));
+
+ int
+ u9fs_connect(struct socket ** sop, struct sockaddr * saddr, int sotype, int soproto, struct proc * p)
+ {
+ register struct socket * so;
+ int error, s;
+
+ *sop = 0;
+ error = socreate(saddr->sa_family, sop, sotype, soproto, p);
+ if( error )
+ return error;
+ so = *sop;
+ error = soconnect(so, saddr, p);
+ if( error )
+ return error;
+
+ /*
+ * Wait for the connection to complete. Cribbed from the
+ * connect system call but with the wait timing out so
+ * that interruptible mounts don't hang here for a long time.
+ */
+ s = splnet();
+ while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0)
+ (void) tsleep((caddr_t)&so->so_timeo, PSOCK,
+ "u9fscon", 2 * hz);
+
+ if (so->so_error) {
+ error = so->so_error;
+ so->so_error = 0;
+ splx(s);
+ return error;
+ }
+ splx(s);
+
+ return (0);
+ }
+
+ int u9fs_connect_9auth(struct u9fsmount * nmp, struct u9fs_args * argp, struct socket ** sop)
+ {
+ int error;
+ struct proc * p = & proc0;
+ struct sockaddr *nam;
+
+ error = getsockaddr(&nam, (caddr_t)argp->authaddr, argp->authaddrlen);
+ if( error )
+ return error;
+ error = u9fs_connect(sop, nam, argp->authsotype,
+ argp->authsoproto, p);
+ if( error == 0 )
+ return 0;
+
+ u9fs_disconnect(*sop);
+ *sop = 0;
+ return error;
+ }
+
+ /*
+ * Initialize sockets and congestion for a new U9FS connection.
+ * We do not free the sockaddr if error.
+ */
+ int
+ u9fs_connect_9fs(nmp)
+ register struct u9fsmount *nmp;
+ {
+ register struct socket *so;
+ int error, rcvreserve, sndreserve;
+ struct proc *p = &proc0; /* only used for socreate and sobind */
+
+ error = u9fs_connect(&nmp->nm_so, nmp->nm_nam, nmp->nm_sotype,
+ nmp->nm_soproto, p);
+ if (error)
+ goto bad;
+ so = nmp->nm_so;
+ nmp->nm_soflags = so->so_proto->pr_flags;
+
+ if (nmp->nm_flag & (U9FSMNT_SOFT | U9FSMNT_INT)) {
+ so->so_rcv.sb_timeo = (5 * hz);
+ so->so_snd.sb_timeo = (5 * hz);
+ } else {
+ so->so_rcv.sb_timeo = 0;
+ so->so_snd.sb_timeo = 0;
+ }
+
+ /* XXX: i dont understand this, only one outstanding request? */
+ if (nmp->nm_sotype == SOCK_SEQPACKET) {
+ sndreserve = (nmp->nm_wsize) * 2;
+ rcvreserve = (max(nmp->nm_rsize, nmp->nm_readdirsize)) * 2;
+ } else {
+ if (nmp->nm_sotype != SOCK_STREAM)
+ panic("u9fscon sotype");
+ if (so->so_proto->pr_flags & PR_CONNREQUIRED) {
+ struct sockopt sopt;
+ int val;
+
+ bzero(&sopt, sizeof sopt);
+ sopt.sopt_level = SOL_SOCKET;
+ sopt.sopt_name = SO_KEEPALIVE;
+ sopt.sopt_val = &val;
+ sopt.sopt_valsize = sizeof val;
+ val = 1;
+ sosetopt(so, &sopt);
+ }
+ if (so->so_proto->pr_protocol == IPPROTO_TCP) {
+ struct sockopt sopt;
+ int val;
+
+ bzero(&sopt, sizeof sopt);
+ sopt.sopt_level = IPPROTO_TCP;
+ sopt.sopt_name = TCP_NODELAY;
+ sopt.sopt_val = &val;
+ sopt.sopt_valsize = sizeof val;
+ val = 1;
+ sosetopt(so, &sopt);
+ }
+ sndreserve = (nmp->nm_wsize) * 2;
+ rcvreserve = (nmp->nm_rsize) * 2;
+ }
+ error = soreserve(so, sndreserve, rcvreserve);
+ if (error)
+ goto bad;
+ so->so_rcv.sb_flags |= SB_NOINTR;
+ so->so_snd.sb_flags |= SB_NOINTR;
+
+ /* Initialize other non-zero congestion variables */
+ nmp->nm_sent = 0;
+ return (0);
+
+ bad:
+ u9fs_disconnect(nmp->nm_so);
+ nmp->nm_so = 0;
+ return (error);
+ }
+
+ /*
+ * U9FS disconnect. Clean up and unlink.
+ */
+ void
+ u9fs_disconnect(struct socket * so)
+ {
+ soshutdown(so, 2);
+ soclose(so);
+ }
+
+ /*
+ * Lock a socket against others.
+ * Necessary for STREAM sockets to ensure you get an entire rpc request/reply
+ * and also to avoid race conditions between the processes with u9fs requests
+ * in progress when a reconnect is necessary.
+ */
+ static int
+ u9fs_sndlock(flagp, statep, rep)
+ register int *flagp;
+ register int *statep;
+ struct u9fsreq *rep;
+ {
+ struct proc *p;
+ int slpflag = 0, slptimeo = 0;
+
+ if (rep) {
+ p = rep->r_procp;
+ if (rep->r_nmp->nm_flag & U9FSMNT_INT)
+ slpflag = PCATCH;
+ } else
+ p = (struct proc *)0;
+ while (*statep & U9FSSTA_SNDLOCK) {
+ if (u9fs_sigintr(rep->r_nmp, p))
+ return (EINTR);
+ *statep |= U9FSSTA_WANTSND;
+ (void) tsleep((caddr_t)flagp, slpflag | (PZERO - 1),
+ "u9fsndlck", slptimeo);
+ if (slpflag == PCATCH) {
+ slpflag = 0;
+ slptimeo = 2 * hz;
+ }
+ }
+ *statep |= U9FSSTA_SNDLOCK;
+ return (0);
+ }
+
+
+ /*
+ * Unlock the stream socket for others.
+ */
+ static void
+ u9fs_sndunlock(flagp, statep)
+ register int *flagp;
+ register int *statep;
+ {
+
+ if ((*statep & U9FSSTA_SNDLOCK) == 0)
+ panic("u9fs sndunlock");
+ *statep &= ~U9FSSTA_SNDLOCK;
+ if (*statep & U9FSSTA_WANTSND) {
+ *statep &= ~U9FSSTA_WANTSND;
+ wakeup((caddr_t)flagp);
+ }
+ }
+
+ /*
+ * Test for a termination condition pending on the process.
+ * This is used for U9FSMNT_INT mounts.
+ */
+ int
+ u9fs_sigintr(nmp, p)
+ struct u9fsmount *nmp;
+ struct proc * p;
+ {
+ if (!(nmp->nm_flag & U9FSMNT_INT))
+ return (0);
+ if (p && p->p_siglist &&
+ (((p->p_siglist & ~p->p_sigmask) & ~p->p_sigignore) &
+ U9FSINT_SIGMASK))
+ return (EINTR);
+ return (0);
+ }
+
+ /*
+ * This is the u9fs send routine. For connection based socket types, it
+ * must be called with an u9fs_sndlock() on the socket.
+ * "rep == NULL" indicates that it has been called from a server.
+ * For the client side:
+ * - return EINTR if the RPC is terminated, 0 otherwise
+ * - set R_MUSTRESEND if the send fails for any reason
+ * - do any cleanup required by recoverable socket errors (?)
+ * For the server side:
+ * - return EINTR or ERESTART if interrupted by a signal
+ * - return EPIPE if a connection is lost for connection based sockets (TCP...)
+ * - do any cleanup required by recoverable socket errors (?)
+ */
+ static int
+ u9fs_send(so, top, req)
+ register struct socket *so;
+ register struct mbuf *top;
+ struct u9fsreq *req;
+ {
+ int error, soflags, flags;
+
+ soflags = so->so_proto->pr_flags;
+ if (so->so_type == SOCK_SEQPACKET)
+ flags = MSG_EOR;
+ else
+ flags = 0;
+
+ error = so->so_proto->pr_usrreqs->pru_sosend(so, 0, 0, top, 0,
+ flags, req->r_procp);
+ if (error)
+ log(LOG_INFO, "u9fs send error %d for server %s\n",error,
+ req->r_nmp->nm_mountp->mnt_stat.f_mntfromname);
+
+ return (error);
+ }
+
+ static int
+ u9fs_receive(so, mrep, req)
+ register struct socket * so;
+ struct mbuf **mrep;
+ struct u9fsreq * req;
+ {
+ struct uio auio;
+ u_int32_t len;
+ int error = 0, sotype, rcvflg;
+
+ /*
+ * Set up arguments for soreceive()
+ */
+ *mrep = (struct mbuf *)0;
+ sotype = req->r_nmp->nm_sotype;
+
+ /*
+ * For reliable protocols, lock against other senders/receivers
+ * in case a reconnect is necessary.
+ * For SOCK_STREAM, first get the Record Mark to find out how much
+ * more there is to get.
+ * We must lock the socket against other receivers
+ * until we have an entire rpc request/reply.
+ */
+ if (sotype == SOCK_SEQPACKET ) {
+ if( (so->so_state & SS_ISCONNECTED) == 0 )
+ return (EACCES);
+ auio.uio_resid = len = 1000000;
+ auio.uio_procp = req->r_procp;
+ do {
+ rcvflg = 0;
+ error = so->so_proto->pr_usrreqs->pru_soreceive
+ (so, 0, &auio, mrep,
+ (struct mbuf **)0, &rcvflg);
+ } while (error == EWOULDBLOCK);
+ len -= auio.uio_resid;
+ }
+ if (error) {
+ m_freem(*mrep);
+ *mrep = (struct mbuf *)0;
+ }
+ return (error);
+ }
+
+ static int
+ u9fs_rcvlock(req)
+ register struct u9fsreq *req;
+ {
+ register int *flagp = &req->r_nmp->nm_flag;
+ register int *statep = &req->r_nmp->nm_state;
+ int slpflag, slptimeo = 0;
+
+ if (*flagp & U9FSMNT_INT)
+ slpflag = PCATCH;
+ else
+ slpflag = 0;
+ while (*statep & U9FSSTA_RCVLOCK) {
+ if (u9fs_sigintr(req->r_nmp, req->r_procp))
+ return (EINTR);
+ *statep |= U9FSSTA_WANTRCV;
+ (void) tsleep((caddr_t)flagp, slpflag | (PZERO - 1), "u9fsrcvlk",
+ slptimeo);
+ /*
+ * If our reply was recieved while we were sleeping,
+ * then just return without taking the lock to avoid a
+ * situation where a single iod could 'capture' the
+ * recieve lock.
+ */
+ if (req->r_mrep != NULL)
+ return (EALREADY);
+ if (slpflag == PCATCH) {
+ slpflag = 0;
+ slptimeo = 2 * hz;
+ }
+ }
+ *statep |= U9FSSTA_RCVLOCK;
+ return (0);
+ }
+
+ /*
+ * Unlock the stream socket for others.
+ */
+ static void
+ u9fs_rcvunlock(flagp, statep)
+ register int *flagp;
+ register int *statep;
+ {
+
+ if ((*statep & U9FSSTA_RCVLOCK) == 0)
+ panic("u9fs rcvunlock");
+ *statep &= ~U9FSSTA_RCVLOCK;
+ if (*statep & U9FSSTA_WANTRCV) {
+ *statep &= ~U9FSSTA_WANTRCV;
+ wakeup((caddr_t)flagp);
+ }
+ }
+
+ /*
+ * Implement receipt of reply on a socket.
+ * We must search through the list of received datagrams matching them
+ * with outstanding requests using the xid, until ours is found.
+ */
+ /* ARGSUSED */
+ static
+ int u9fs_reply(struct u9fsreq * req)
+ {
+ int error;
+ struct mbuf * mrep;
+ register struct u9fsmount *nmp = req->r_nmp;
+ u_short tag;
+ struct u9fsreq * qp;
+
+ /*
+ * Loop around until we get our own reply
+ */
+ for (;;) {
+ /*
+ * Lock against other receivers so that I don't get stuck in
+ * sbwait() after someone else has received my reply for me.
+ * Also necessary for connection based protocols to avoid
+ * race conditions during a reconnect.
+ * If u9fs_rcvlock() returns EALREADY, that means that
+ * the reply has already been recieved by another
+ * process and we can return immediately. In this
+ * case, the lock is not taken to avoid races with
+ * other processes.
+ */
+ error = u9fs_rcvlock(req);
+ if (error == EALREADY)
+ return (0);
+ if (error)
+ return (error);
+ /*
+ * Get the next Rpc reply off the socket
+ */
+ error = u9fs_receive(nmp->nm_so, &mrep, req);
+ u9fs_rcvunlock(&nmp->nm_flag, &nmp->nm_state);
+ if (error)
+ return (error);
+
+ /* extract the tag */
+ tag = u9p_m_tag(&mrep);
+
+ /*
+ * Loop through the request list to match up the reply
+ * Iff no match, just drop the datagram
+ */
+ for (qp = nmp->nm_reqq.tqh_first; qp != 0; qp = qp->r_chain.tqe_next) {
+ if ( qp->r_mrep == 0 && qp->r_tag == tag )
+ break;
+ }
+ if( qp == 0 ) {
+ m_freem(mrep);
+ continue;
+ }
+
+ if( u9p_m_m2s(&mrep, qp->r_rep) ) { /* freed by m2s */
+ continue;
+ }
+
+ qp->r_mrep = mrep; /* should not be freed until the reply is read */
+
+ if( qp == req )
+ return 0;
+ }
+ }
+
+ int u9fs_request(struct u9fsreq * req, struct u9fsreq * rep, int relm)
+ {
+ struct mbuf * mreq;
+ int error,s;
+ struct u9fsmount * nmp;
+
+ req->r_rep = rep;
+ req->r_mrep = 0;
+ nmp = req->r_nmp;
+ req->r_tag = u9fs_id_new(nmp->nm_tags);
+
+ mreq = u9p_m_s2m(req);
+
+ /*
+ * Chain request into list of outstanding requests. Be sure
+ * to put it LAST so timer finds oldest requests first.
+ */
+ s = splsoftclock();
+ TAILQ_INSERT_TAIL(&nmp->nm_reqq, req, r_chain);
+ splx(s);
+
+ error = u9fs_send(nmp->nm_so, mreq, req);
+
+ if( !error )
+ error = u9fs_reply(req);
+
+ /*
+ * RPC done, unlink the request.
+ */
+ s = splsoftclock();
+ TAILQ_REMOVE(&nmp->nm_reqq, req, r_chain);
+ splx(s);
+
+ u9fs_id_free(nmp->nm_tags, req->r_tag);
+
+ if( !error && relm ) {
+ m_freem(req->r_mrep);
+ req->r_mrep = 0;
+ }
+ if( rep->r_type == Rerror )
+ error = EACCES;
+
+ return error;
+ }
+
diff -N -c -r /usr/src/sys/9fs/9fs_subr.c ./9fs/9fs_subr.c
*** /usr/src/sys/9fs/9fs_subr.c Wed Dec 31 19:00:00 1969
--- ./9fs/9fs_subr.c Fri Nov 26 12:28:17 1999
***************
*** 0 ****
--- 1,240 ----
+ #include <sys/param.h>
+ #include <sys/sockio.h>
+ #include <sys/proc.h>
+ #include <sys/vnode.h>
+ #include <sys/kernel.h>
+ #include <sys/sysctl.h>
+ #include <sys/malloc.h>
+ #include <sys/mount.h>
+ #include <sys/mbuf.h>
+ #include <sys/socket.h>
+ #include <sys/socketvar.h>
+ #include <sys/systm.h>
+ #include <sys/protosw.h>
+ #include <sys/syslog.h>
+
+ #include <netinet/in.h>
+ #include <netinet/tcp.h>
+
+ #include <vm/vm.h>
+ #include <vm/vm_extern.h>
+ #include <vm/vm_zone.h>
+
+ #include <net/if.h>
+ #include <net/route.h>
+ #include <netinet/in.h>
+
+ #include <9fs/bitstring.h>
+ #include <9fs/9p.h>
+ #include <9fs/9auth.h>
+ #include <9fs/9fs.h>
+
+ vm_zone_t u9fsuser_zone;
+ LIST_HEAD(u9fsuserhashhead, u9fsuser) * u9fsuidhashtbl, * u9fsunamehashtbl;
+ u_long u9fsuidhash;
+ u_long u9fsunamehash;
+ MALLOC_DEFINE(M_U9FSBITS, "U9FS bits", "U9FS tag/fid maps");
+
+ static int u9fs_hashname __P((char * name));
+
+ void u9fs_uhinit()
+ {
+ u9fsuser_zone = zinit("U9FSUSER", sizeof(struct u9fsuser), 0, 0, 1);
+ u9fsuidhashtbl = phashinit(U9FS_USER_HASHSIZE, M_U9FSHASH, &u9fsuidhash);
+ u9fsunamehashtbl = phashinit(U9FS_USER_HASHSIZE, M_U9FSHASH, &u9fsunamehash);
+ }
+
+ void
+ u9fs_id_init(bits)
+ bitstr_t ** bits;
+ {
+ bit_alloc(*bits, 0x10000, M_U9FSBITS, M_WAITOK);
+ bit_nset(*bits, 1, 0xffff); /* we dont use zero */
+ }
+
+ u_short
+ u9fs_id_new(bits)
+ bitstr_t * bits;
+ {
+ int v;
+
+ bit_ffs(bits, 0x10000, &v);
+ if( v < 0 )
+ panic("no more u9fs bits!");
+
+ bit_clear(bits, v);
+ return ((u_short)v);
+ }
+
+ void
+ u9fs_id_free(bits, v)
+ bitstr_t * bits;
+ u_short v;
+ {
+ bit_set(bits, v);
+ }
+
+
+ static int u9fs_hashname(char * cp)
+ {
+ int h = 0;
+
+ cp[U9FS_NAMELEN-1] = 0;
+ do
+ h += *cp;
+ while ( *cp++ );
+
+ return h;
+ }
+
+ void u9fs_hashuser(uid_t uid, char * name)
+ {
+ int h;
+ struct u9fsuser * u9p, *u9p2;
+ struct u9fsuserhashhead * u9hp;
+
+ if( u9fs_name2uid(name) != 65534 ) /* already hashed by previous mount */
+ return;
+
+ u9p = zalloc(u9fsuser_zone);
+ bzero(u9p, sizeof(*u9p));
+ u9p->u_uid = uid;
+ strncpy(u9p->u_name, name, U9FS_NAMELEN);
+ u9hp = & u9fsuidhashtbl[uid % u9fsuidhash];
+ LIST_INSERT_HEAD(u9hp, u9p, u_hash);
+
+ u9p2 = zalloc(u9fsuser_zone);
+ bcopy(u9p, u9p2, sizeof(*u9p));
+ h = u9fs_hashname(name);
+ u9hp = & u9fsunamehashtbl[h%u9fsunamehash];
+ LIST_INSERT_HEAD(u9hp, u9p2, u_hash);
+ }
+
+ /* name must be at least U9FS_NAMELEN long! */
+ struct u9fsuser * u9fs_finduser(uid_t uid)
+ {
+ struct u9fsuser * u9p;
+ struct u9fsuserhashhead * u9hp;
+
+ u9hp = & u9fsuidhashtbl[uid % u9fsuidhash];
+ LIST_FOREACH(u9p, u9hp, u_hash)
+ if( u9p->u_uid == uid )
+ break;
+
+ return u9p;
+ }
+
+ uid_t u9fs_name2uid(char *name)
+ {
+ struct u9fsuser * u9p;
+ struct u9fsuserhashhead * u9hp;
+ int h;
+
+ h = u9fs_hashname(name);
+ u9hp = & u9fsunamehashtbl[h%u9fsunamehash];
+ LIST_FOREACH(u9p, u9hp, u_hash)
+ if( strcmp(u9p->u_name, name) == 0 )
+ break;
+
+ if( u9p )
+ return u9p->u_uid;
+ else
+ return 65534; /* nobody */
+ }
+
+ /*
+ * copies a uio scatter/gather list to an mbuf chain.
+ */
+ int
+ u9fs_uiotombuf(uiop, mq, siz)
+ register struct uio *uiop;
+ struct mbuf **mq;
+ int siz;
+ {
+ register struct mbuf *m;
+ struct mbuf * top, **mp;
+ int mlen, len, error = 0;
+
+ mp = & top;
+ while(siz) {
+ MGET(m, M_WAIT, MT_DATA);
+ mlen = MLEN;
+ if (siz >= MINCLSIZE) {
+ MCLGET(m, M_WAIT);
+ if ((m->m_flags & M_EXT))
+ mlen = MCLBYTES;
+ }
+ len = min(mlen, siz);
+ error = uiomove(mtod(m, caddr_t), (int)len, uiop);
+ siz -= len;
+ m->m_len = len;
+ *mp = m;
+ if (error)
+ goto release;
+ mp = &m->m_next;
+ }
+ *mq = top;
+ return 0;
+
+ release:
+ if( top )
+ m_freem(top);
+
+ return error;
+ }
+
+ /*
+ * copies mbuf chain to the uio scatter/gather list
+ */
+ int
+ u9fs_mbuftouio(m, uiop, siz)
+ struct mbuf *m;
+ register struct uio *uiop;
+ int siz;
+ {
+ register char *mbufcp, *uiocp;
+ register int xfer, left, len;
+ long uiosiz;
+
+ mbufcp = mtod(m, char *);
+ len = m->m_len;
+ while (siz > 0) {
+ if (uiop->uio_iovcnt <= 0 || uiop->uio_iov == NULL)
+ return (EFBIG);
+ left = uiop->uio_iov->iov_len;
+ uiocp = uiop->uio_iov->iov_base;
+ if (left > siz)
+ left = siz;
+ uiosiz = left;
+ while (left > 0) {
+ while (len == 0) {
+ m = m->m_next;
+ if (m == NULL)
+ return (EBADRPC);
+ mbufcp = mtod(m, caddr_t);
+ len = m->m_len;
+ }
+ xfer = (left > len) ? len : left;
+ if (uiop->uio_segflg == UIO_SYSSPACE)
+ bcopy(mbufcp, uiocp, xfer);
+ else
+ copyout(mbufcp, uiocp, xfer);
+ left -= xfer;
+ len -= xfer;
+ mbufcp += xfer;
+ uiocp += xfer;
+ uiop->uio_offset += xfer;
+ uiop->uio_resid -= xfer;
+ }
+ if (uiop->uio_iov->iov_len <= siz) {
+ uiop->uio_iovcnt--;
+ uiop->uio_iov++;
+ } else {
+ uiop->uio_iov->iov_base += uiosiz;
+ uiop->uio_iov->iov_len -= uiosiz;
+ }
+ siz -= uiosiz;
+ }
+ return (0);
+ }
+
diff -N -c -r /usr/src/sys/9fs/9fs_vfsops.c ./9fs/9fs_vfsops.c
*** /usr/src/sys/9fs/9fs_vfsops.c Wed Dec 31 19:00:00 1969
--- ./9fs/9fs_vfsops.c Mon May 22 16:33:47 2000
***************
*** 0 ****
--- 1,639 ----
+ /*
+ * Copyright (c) 1989, 1993, 1995
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)u9fs_vfsops.c 8.12 (Berkeley) 5/20/95
+ * $Id: u9fs_vfsops.c,v 1.79 1998/12/04 22:54:54 archie Exp $
+ */
+
+ #include <sys/param.h>
+ #include <sys/sockio.h>
+ #include <sys/proc.h>
+ #include <sys/vnode.h>
+ #include <sys/kernel.h>
+ #include <sys/sysctl.h>
+ #include <sys/malloc.h>
+ #include <sys/mount.h>
+ #include <sys/mbuf.h>
+ #include <sys/socket.h>
+ #include <sys/socketvar.h>
+ #include <sys/systm.h>
+ #include <sys/protosw.h>
+ #include <sys/syslog.h>
+
+ #include <netinet/in.h>
+ #include <netinet/tcp.h>
+
+ #include <vm/vm.h>
+ #include <vm/vm_extern.h>
+ #include <vm/vm_zone.h>
+
+ #include <net/if.h>
+ #include <net/route.h>
+ #include <netinet/in.h>
+
+ #include <9fs/bitstring.h>
+ #include <9fs/9p.h>
+ #include <9fs/9auth.h>
+ #include <9fs/9fs.h>
+
+ vm_zone_t u9fsmount_zone;
+
+ static int u9fs_mount __P(( struct mount *mp, char *path, caddr_t data,
+ struct nameidata *ndp, struct proc *p));
+ static int u9fs_start __P(( struct mount *mp, int flags,
+ struct proc *p));
+ static int u9fs_unmount __P(( struct mount *mp, int mntflags,
+ struct proc *p));
+ static int u9fs_root __P(( struct mount *mp, struct vnode **vpp));
+ static int u9fs_quotactl __P(( struct mount *mp, int cmds, uid_t uid,
+ caddr_t arg, struct proc *p));
+ static int u9fs_statfs __P(( struct mount *mp, struct statfs *sbp,
+ struct proc *p));
+ static int u9fs_sync __P(( struct mount *mp, int waitfor,
+ struct ucred *cred, struct proc *p));
+ static int u9fs_vptofh __P(( struct vnode *vp, struct fid *fhp));
+ static int u9fs_fhtovp __P((struct mount *mp, struct fid *fhp,
+ struct sockaddr *nam, struct vnode **vpp,
+ int *exflagsp, struct ucred **credanonp));
+ static int u9fs_vget __P((struct mount *, ino_t, struct vnode **));
+ static int u9fs_init __P((struct vfsconf *vfsp));
+ int u9fs_uninit __P((struct vfsconf *vfsp));
+
+ /* */
+ static int mountu9fs __P((struct u9fs_args *,struct mount *,
+ struct sockaddr *,char *,char *,struct vnode **, struct proc *p));
+ static int u9fs_iosize __P((struct u9fsmount *nmp));
+ static void u9fs_decode_args __P((struct u9fsmount *nmp, struct u9fs_args *argp, struct proc *p));
+
+ /*
+ * u9fs vfs operations.
+ */
+ static struct vfsops u9fs_vfsops = {
+ u9fs_mount,
+ u9fs_start,
+ u9fs_unmount,
+ u9fs_root,
+ u9fs_quotactl,
+ u9fs_statfs,
+ u9fs_sync,
+ u9fs_vget,
+ u9fs_fhtovp,
+ u9fs_vptofh,
+ u9fs_init,
+ u9fs_uninit,
+ 0
+ };
+ VFS_SET(u9fs_vfsops, u9fs, VFCF_NETWORK);
+
+ /*
+ * u9fs statfs call
+ */
+ static int
+ u9fs_statfs(mp, sbp, p)
+ struct mount *mp;
+ register struct statfs *sbp;
+ struct proc *p;
+ {
+ /* we have a worm with infinite storage,
+ stat not supported by 9fs */
+ return 0;
+ }
+
+ /*
+ * Common code for mount and mountroot
+ */
+ static int
+ mountu9fs(argp, mp, nam, pth, hst, vpp, p)
+ register struct u9fs_args *argp;
+ register struct mount *mp;
+ struct sockaddr *nam;
+ char *pth, *hst;
+ struct vnode **vpp;
+ struct proc *p;
+ {
+ register struct u9fsmount *nmp;
+ struct u9fsnode *np;
+ int error;
+ struct vattr attrs;
+ struct u9fsreq req, rep;
+ char * mntpoint;
+ struct u9fsuser * u9p;
+ struct socket * so;
+
+ if (mp->mnt_flag & MNT_UPDATE) {
+ #if 0
+ nmp = VFSTONFS(mp);
+ < /* update paths, file handles, etc, here XXX */
+ FREE(nam, M_SONAME);
+ #endif
+ return (0);
+ } else {
+ nmp = zalloc(u9fsmount_zone);
+ bzero((caddr_t)nmp, sizeof (struct u9fsmount));
+ #if 0
+ TAILQ_INIT(&nmp->nm_uidlruhead);
+ TAILQ_INIT(&nmp->nm_bufq);
+ #endif
+ mp->mnt_data = (qaddr_t)nmp;
+ }
+ vfs_getnewfsid(mp);
+ nmp->nm_mountp = mp;
+
+ nmp->nm_maxfilesize = (u_int64_t)0xffffffffffffffffLL;
+
+ nmp->nm_wsize = U9FS_MAXFDATA;
+ nmp->nm_rsize = U9FS_MAXFDATA;
+ nmp->nm_readdirsize = U9FS_MAXDDATA;
+ bcopy(hst, mp->mnt_stat.f_mntfromname, MNAMELEN);
+ bcopy(pth, mp->mnt_stat.f_mntonname, MNAMELEN);
+ nmp->nm_nam = nam;
+
+ mntpoint = index(hst, '/');
+ if( mntpoint )
+ mntpoint++;
+
+ /* Set up the sockets and per-host congestion */
+ nmp->nm_sotype = argp->sotype;
+ nmp->nm_soproto = argp->proto;
+
+ u9fs_decode_args(nmp, argp, p);
+
+ lockinit(& nmp->nm_lock, PVFS, "u9fsmount", 0, 0);
+ u9fs_id_init(&nmp->nm_tags);
+ u9fs_id_init(&nmp->nm_fids);
+ TAILQ_INIT(&nmp->nm_reqq);
+
+ if ((error = u9fs_connect_9fs(nmp)))
+ goto bad;
+
+ /* "Tnop 1", "Tsession 1 0", "Tattach 1 1 none main 0 0", */
+ bzero(&req, sizeof(req));
+ req.r_nmp = nmp;
+ req.r_procp = p;
+
+ req.r_type = Tnop;
+ error = u9fs_request(& req, & rep, 1);
+ if( error )
+ return error;
+
+ req.r_type = Tsession;
+ /* bzero(req.r_chal, sizeof(req.r_chal)); */
+ u9auth_genchal(req.r_chal);
+ error = u9fs_request(& req, & rep, 1);
+ if( error )
+ return error;
+
+ if( argp->authaddr ) {
+ /* get tickets from the auth server */
+ error = u9fs_connect_9auth(nmp, argp, & so);
+ if( error )
+ goto bad;
+ u9p = u9fs_finduser(u9fs_name2uid(argp->user));
+ error = u9auth_gettickets(so, & rep, argp->user, u9p->u_ckey,
+ req.r_ticket, req.r_auth, p);
+ u9fs_disconnect(so);
+ if( error )
+ goto bad;
+ }
+
+ req.r_type = Tattach;
+ req.r_fid = u9fs_id_new(nmp->nm_fids);
+ strcpy(req.r_uname, argp->user);
+ strcpy(req.r_aname, mntpoint);
+ error = u9fs_request(& req, & rep, 1);
+ if( error )
+ return error;
+ nmp->nm_fh = rep.r_qid.path;
+ nmp->nm_fid = req.r_fid;
+ /* XXX: we should have checked our challenge to the server! */
+
+ /*
+ * This is silly, but it has to be set so that vinifod() works.
+ * We do not want to do an u9fs_statfs() here since we can get
+ * stuck on a dead server and we are holding a lock on the mount
+ * point.
+ */
+ mp->mnt_stat.f_iosize = u9fs_iosize(nmp);
+
+ /*
+ * A reference count is needed on the u9fsnode representing the
+ * remote root. If this object is not persistent, then backward
+ * traversals of the mount point (i.e. "..") will not work if
+ * the u9fsnode gets flushed out of the cache. Ufs does not have
+ * this problem, because one can identify root inodes by their
+ * number == ROOTINO (2).
+ */
+ error = u9fs_nget(mp, nmp->nm_fh, &np, p);
+ np->n_fid = nmp->nm_fid;
+
+ nmp->nm_authuid = p->p_ucred->cr_uid;
+
+ if (error)
+ goto bad;
+ *vpp = U9FSTOV(np);
+
+ /*
+ * Get file attributes for the mountpoint. This has the side
+ * effect of filling in (*vpp)->v_type with the correct value.
+ */
+ VOP_GETATTR(*vpp, &attrs, p->p_ucred, p);
+
+ /*
+ * Lose the lock but keep the ref.
+ */
+ VOP_UNLOCK(*vpp, 0, p);
+
+ return (0);
+ bad:
+ u9fs_disconnect(nmp->nm_so);
+ zfree(u9fsmount_zone, nmp);
+ FREE(nam, M_SONAME);
+ return (error);
+ }
+
+ /*
+ * VFS Operations.
+ *
+ * mount system call
+ * It seems a bit dumb to copyinstr() the host and path here and then
+ * bcopy() them in mountu9fs(), but I wanted to detect errors before
+ * doing the sockargs() call because sockargs() allocates an mbuf and
+ * an error after that means that I have to release the mbuf.
+ */
+ /* ARGSUSED */
+ static int
+ u9fs_mount(mp, path, data, ndp, p)
+ struct mount *mp;
+ char *path;
+ caddr_t data;
+ struct nameidata *ndp;
+ struct proc *p;
+ {
+ int error;
+ struct u9fs_args args;
+ struct sockaddr *nam;
+ struct vnode *vp;
+ char pth[MNAMELEN], hst[MNAMELEN];
+ size_t len;
+
+ if( path == NULL )
+ return (EOPNOTSUPP);
+
+ error = copyin(data, (caddr_t)&args, sizeof (struct u9fs_args));
+ if (error)
+ return (error);
+
+ if (args.version != U9FS_ARGSVERSION)
+ return (EPROGMISMATCH);
+
+ if (mp->mnt_flag & MNT_UPDATE) {
+ #if 0
+ register struct u9fsmount *nmp = VFSTONFS(mp);
+
+ if (nmp == NULL)
+ return (EIO);
+ /*
+ * When doing an update, we can't change from or to
+ * v3 and/or nqu9fs, or change cookie translation
+ */
+ args.flags = (args.flags &
+ ~(NFSMNT_NFSV3|NFSMNT_NQNFS /*|NFSMNT_XLATECOOKIE*/)) |
+ (nmp->nm_flag &
+ (NFSMNT_NFSV3|NFSMNT_NQNFS /*|NFSMNT_XLATECOOKIE*/));
+ u9fs_decode_args(nmp, &args, p);
+ #endif
+ return (0);
+ }
+
+ error = copyinstr(path, pth, MNAMELEN-1, &len);
+ if (error)
+ return (error);
+ bzero(&pth[len], MNAMELEN - len);
+ error = copyinstr(args.hostname, hst, MNAMELEN-1, &len);
+ if (error)
+ return (error);
+ bzero(&hst[len], MNAMELEN - len);
+ /* sockargs() call must be after above copyin() calls */
+ error = getsockaddr(&nam, (caddr_t)args.addr, args.addrlen);
+ if (error)
+ return (error);
+ error = mountu9fs(&args, mp, nam, pth, hst, &vp, p);
+ return (error);
+ }
+
+ /*
+ * unmount system call
+ */
+ static int
+ u9fs_unmount(mp, mntflags, p)
+ struct mount *mp;
+ int mntflags;
+ struct proc *p;
+ {
+ register struct u9fsmount *nmp;
+ struct u9fsnode *np;
+ struct vnode *vp;
+ int error, flags = 0;
+
+ if (mntflags & MNT_FORCE)
+ flags |= FORCECLOSE;
+ nmp = VFSTOU9FS(mp);
+
+ if( p->p_ucred->cr_uid != nmp->nm_authuid )
+ return (EPERM);
+
+ /*
+ * Goes something like this..
+ * - Check for activity on the root vnode (other than ourselves).
+ * - Call vflush() to clear out vnodes for this file system,
+ * except for the root vnode.
+ * - Decrement reference on the vnode representing remote root.
+ * - Close the socket
+ * - Free up the data structures
+ */
+ /*
+ * We need to decrement the ref. count on the u9fsnode representing
+ * the remote root. See comment in mountu9fs(). The VFS unmount()
+ * has done vput on this vnode, otherwise we would get deadlock!
+ */
+ error = u9fs_nget(mp, nmp->nm_fh, &np, p);
+ if (error)
+ return(error);
+ vp = U9FSTOV(np);
+ if (vp->v_usecount > 2) {
+ vput(vp);
+ return (EBUSY);
+ }
+
+ error = vflush(mp, vp, flags);
+ if (error) {
+ vput(vp);
+ return (error);
+ }
+
+ /*
+ * We are now committed to the unmount.
+ */
+ /*
+ * There are two reference counts and one lock to get rid of here.
+ */
+ vput(vp);
+ vrele(vp);
+ vgone(vp);
+ u9fs_disconnect(nmp->nm_so);
+ FREE(nmp->nm_nam, M_SONAME);
+
+ zfree(u9fsmount_zone, nmp);
+ return (0);
+ }
+
+ /*
+ * Return root of a filesystem
+ */
+ static int
+ u9fs_root(mp, vpp)
+ struct mount *mp;
+ struct vnode **vpp;
+ {
+ register struct vnode *vp;
+ struct u9fsmount *nmp;
+ struct u9fsnode *np;
+ int error;
+
+ nmp = VFSTOU9FS(mp);
+ error = u9fs_nget(mp, nmp->nm_fh, &np, curproc); /* XXX */
+ if (error)
+ return (error);
+ vp = U9FSTOV(np);
+ if (vp->v_type == VNON)
+ vp->v_type = VDIR;
+ vp->v_flag = VROOT;
+ *vpp = vp;
+ return (0);
+ }
+
+ extern int syncprt;
+
+ /*
+ * Flush out the buffer cache
+ */
+ /* ARGSUSED */
+ static int
+ u9fs_sync(mp, waitfor, cred, p)
+ struct mount *mp;
+ int waitfor;
+ struct ucred *cred;
+ struct proc *p;
+ {
+ /* no cache yet */
+ return 0;
+ }
+
+ /*
+ * U9FS flat namespace lookup.
+ * Currently unsupported.
+ */
+ /* ARGSUSED */
+ static int
+ u9fs_vget(mp, ino, vpp)
+ struct mount *mp;
+ ino_t ino;
+ struct vnode **vpp;
+ {
+
+ return (EOPNOTSUPP);
+ }
+
+ /*
+ * At this point, this should never happen
+ */
+ /* ARGSUSED */
+ static int
+ u9fs_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp)
+ register struct mount *mp;
+ struct fid *fhp;
+ struct sockaddr *nam;
+ struct vnode **vpp;
+ int *exflagsp;
+ struct ucred **credanonp;
+ {
+
+ return (EINVAL);
+ }
+
+ /*
+ * Vnode pointer to File handle, should never happen either
+ */
+ /* ARGSUSED */
+ static int
+ u9fs_vptofh(vp, fhp)
+ struct vnode *vp;
+ struct fid *fhp;
+ {
+
+ return (EINVAL);
+ }
+
+ /*
+ * Vfs start routine, a no-op.
+ */
+ /* ARGSUSED */
+ static int
+ u9fs_start(mp, flags, p)
+ struct mount *mp;
+ int flags;
+ struct proc *p;
+ {
+
+ return (0);
+ }
+
+ /*
+ * Do operations associated with quotas, not supported
+ */
+ /* ARGSUSED */
+ static int
+ u9fs_quotactl(mp, cmd, uid, arg, p)
+ struct mount *mp;
+ int cmd;
+ uid_t uid;
+ caddr_t arg;
+ struct proc *p;
+ {
+
+ return (EOPNOTSUPP);
+ }
+
+ /*
+ * Called once to initialize data structures...
+ */
+ int
+ u9fs_init(vfsp)
+ struct vfsconf *vfsp;
+ {
+ u9fsmount_zone = zinit("U9FSMOUNT", sizeof(struct u9fsmount), 0, 0, 1);
+ u9fs_nhinit(); /* Init the u9fsnode table */
+ u9fs_uhinit();
+ return 0;
+ }
+
+ int
+ u9fs_uninit(vfsp)
+ struct vfsconf *vfsp;
+ {
+ return 0;
+ }
+
+ static int
+ u9fs_iosize(nmp)
+ struct u9fsmount* nmp;
+ {
+ int iosize;
+
+ /*
+ * Calculate the size used for io buffers. Use the larger
+ * of the two sizes to minimise u9fs requests but make sure
+ * that it is at least one VM page to avoid wasting buffer
+ * space.
+ */
+ iosize = max(nmp->nm_rsize, nmp->nm_wsize);
+ if (iosize < PAGE_SIZE) iosize = PAGE_SIZE;
+ return iosize;
+ }
+
+ static void
+ u9fs_decode_args(nmp, argp, p)
+ struct u9fsmount *nmp;
+ struct u9fs_args *argp;
+ struct proc * p;
+ {
+ int s, i;
+ int maxio;
+ struct p9user * p9p, p9u;
+ struct u9fsuser * u9p;
+
+ s = splnet();
+ /* Update flags atomically. Don't change the lock bits. */
+ nmp->nm_flag = argp->flags | nmp->nm_flag;
+ splx(s);
+
+ maxio = U9FS_MAXFDATA;
+
+ if (argp->wsize > 0) {
+ nmp->nm_wsize = argp->wsize;
+ /* Round down to multiple of blocksize */
+ nmp->nm_wsize &= ~(U9FS_FABLKSIZE - 1);
+ if (nmp->nm_wsize <= 0)
+ nmp->nm_wsize = U9FS_FABLKSIZE;
+ }
+ if (nmp->nm_wsize > maxio)
+ nmp->nm_wsize = maxio;
+ if (nmp->nm_wsize > MAXBSIZE)
+ nmp->nm_wsize = MAXBSIZE;
+
+ if (argp->rsize > 0) {
+ nmp->nm_rsize = argp->rsize;
+ /* Round down to multiple of blocksize */
+ nmp->nm_rsize &= ~(U9FS_FABLKSIZE - 1);
+ if (nmp->nm_rsize <= 0)
+ nmp->nm_rsize = U9FS_FABLKSIZE;
+ }
+ if (nmp->nm_rsize > maxio)
+ nmp->nm_rsize = maxio;
+ if (nmp->nm_rsize > MAXBSIZE)
+ nmp->nm_rsize = MAXBSIZE;
+
+ if (argp->readdirsize > 0) {
+ nmp->nm_readdirsize = argp->readdirsize;
+ }
+ if (nmp->nm_readdirsize > maxio)
+ nmp->nm_readdirsize = maxio;
+ if (nmp->nm_readdirsize > nmp->nm_rsize)
+ nmp->nm_readdirsize = nmp->nm_rsize;
+
+ if( argp->nusers ) {
+ p9p = argp->users;
+ for(i = 0; i < argp->nusers; i++) {
+ copyin(p9p, &p9u, sizeof(p9u));
+ u9fs_hashuser(p9u.p9_uid, p9u.p9_name);
+ p9p ++;
+ }
+ printf("%d p9users loaded\n", argp->nusers);
+ }
+
+ if( (u9p = u9fs_finduser(u9fs_name2uid(argp->user))) ) {
+ bcopy(argp->key, u9p->u_ckey, U9AUTH_DESKEYLEN);
+ }
+ }
diff -N -c -r /usr/src/sys/9fs/9fs_vnops.c ./9fs/9fs_vnops.c
*** /usr/src/sys/9fs/9fs_vnops.c Wed Dec 31 19:00:00 1969
--- ./9fs/9fs_vnops.c Mon May 22 11:40:00 2000
***************
*** 0 ****
--- 1,1794 ----
+ /*
+ * Copyright (c) 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Rick Macklem at The University of Guelph.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)u9fs_vnops.c 8.16 (Berkeley) 5/27/95
+ * $Id: u9fs_vnops.c,v 1.116.2.3 1999/02/13 08:03:47 dillon Exp $
+ */
+
+
+ /*
+ * vnode op calls for 9FS
+ */
+
+ #include "opt_inet.h"
+
+ #include <sys/param.h>
+ #include <sys/kernel.h>
+ #include <sys/systm.h>
+ #include <sys/resourcevar.h>
+ #include <sys/proc.h>
+ #include <sys/mount.h>
+ #include <sys/buf.h>
+ #include <sys/malloc.h>
+ #include <sys/mbuf.h>
+ #include <sys/namei.h>
+ #include <sys/socket.h>
+ #include <sys/vnode.h>
+ #include <sys/dirent.h>
+ #include <sys/fcntl.h>
+ #include <sys/lockf.h>
+ #include <sys/stat.h>
+ #include <sys/sysctl.h>
+
+ #include <vm/vm.h>
+ #include <vm/vm_extern.h>
+ #include <vm/vm_zone.h>
+ #include <vm/vm_prot.h>
+ #include <vm/vm_page.h>
+ #include <vm/vm_object.h>
+ #include <vm/vm_pager.h>
+ #include <vm/vnode_pager.h>
+
+ #include <net/if.h>
+ #include <netinet/in.h>
+ #include <netinet/in_var.h>
+
+ #include <9fs/bitstring.h>
+ #include <9fs/9p.h>
+ #include <9fs/9auth.h>
+ #include <9fs/9fs.h>
+
+ #define u9fs_poll vop_nopoll
+ static int u9fs_lookup __P((struct vop_lookup_args *));
+ static int u9fs_create __P((struct vop_create_args *));
+ static int u9fs_mknod __P((struct vop_mknod_args *));
+ static int u9fs_open __P((struct vop_open_args *));
+ static int u9fs_close __P((struct vop_close_args *));
+ static int u9fs_access __P((struct vop_access_args *));
+ static int u9fs_getattr __P((struct vop_getattr_args *));
+ static int u9fs_setattr __P((struct vop_setattr_args *));
+ static int u9fs_read __P((struct vop_read_args *));
+ static int u9fs_mmap __P((struct vop_mmap_args *));
+ static int u9fs_fsync __P((struct vop_fsync_args *));
+ static int u9fs_remove __P((struct vop_remove_args *));
+ static int u9fs_link __P((struct vop_link_args *));
+ static int u9fs_rename __P((struct vop_rename_args *));
+ static int u9fs_mkdir __P((struct vop_mkdir_args *));
+ static int u9fs_rmdir __P((struct vop_rmdir_args *));
+ static int u9fs_symlink __P((struct vop_symlink_args *));
+ static int u9fs_readdir __P((struct vop_readdir_args *));
+ static int u9fs_bmap __P((struct vop_bmap_args *));
+ static int u9fs_strategy __P((struct vop_strategy_args *));
+ static int u9fs_readlink __P((struct vop_readlink_args *));
+ static int u9fs_print __P((struct vop_print_args *));
+ static int u9fs_advlock __P((struct vop_advlock_args *));
+ static int u9fs_bwrite __P((struct vop_bwrite_args *));
+ static int u9fs_abortop __P((struct vop_abortop_args *));
+ static int u9fs_getpages __P((struct vop_getpages_args *));
+ static int u9fs_putpages __P((struct vop_putpages_args *));
+ static int u9fs_inactive __P((struct vop_inactive_args *));
+ static int u9fs_reclaim __P((struct vop_reclaim_args *));
+ static int u9fs_write __P((struct vop_write_args *));
+
+ /*
+ * Global vfs data structures for u9fs
+ */
+ vop_t **u9fs_vnodeop_p;
+ static struct vnodeopv_entry_desc u9fs_vnodeop_entries[] = {
+ { &vop_default_desc, (vop_t *) vop_defaultop },
+ { &vop_abortop_desc, (vop_t *) u9fs_abortop },
+ { &vop_access_desc, (vop_t *) u9fs_access },
+ { &vop_advlock_desc, (vop_t *) u9fs_advlock },
+ { &vop_bmap_desc, (vop_t *) u9fs_bmap },
+ { &vop_bwrite_desc, (vop_t *) u9fs_bwrite },
+ { &vop_close_desc, (vop_t *) u9fs_close },
+ { &vop_create_desc, (vop_t *) u9fs_create },
+ { &vop_fsync_desc, (vop_t *) u9fs_fsync },
+ { &vop_getattr_desc, (vop_t *) u9fs_getattr },
+ { &vop_getpages_desc, (vop_t *) u9fs_getpages },
+ { &vop_putpages_desc, (vop_t *) u9fs_putpages },
+ { &vop_inactive_desc, (vop_t *) u9fs_inactive },
+ { &vop_lease_desc, (vop_t *) vop_null },
+ { &vop_link_desc, (vop_t *) u9fs_link },
+ { &vop_lock_desc, (vop_t *) vop_sharedlock },
+ { &vop_lookup_desc, (vop_t *) u9fs_lookup },
+ { &vop_mkdir_desc, (vop_t *) u9fs_mkdir },
+ { &vop_mknod_desc, (vop_t *) u9fs_mknod },
+ { &vop_mmap_desc, (vop_t *) u9fs_mmap },
+ { &vop_open_desc, (vop_t *) u9fs_open },
+ { &vop_poll_desc, (vop_t *) vop_nopoll },
+ { &vop_print_desc, (vop_t *) u9fs_print },
+ { &vop_read_desc, (vop_t *) u9fs_read },
+ { &vop_readdir_desc, (vop_t *) u9fs_readdir },
+ { &vop_readlink_desc, (vop_t *) u9fs_readlink },
+ { &vop_reclaim_desc, (vop_t *) u9fs_reclaim },
+ { &vop_remove_desc, (vop_t *) u9fs_remove },
+ { &vop_rename_desc, (vop_t *) u9fs_rename },
+ { &vop_rmdir_desc, (vop_t *) u9fs_rmdir },
+ { &vop_setattr_desc, (vop_t *) u9fs_setattr },
+ { &vop_strategy_desc, (vop_t *) u9fs_strategy },
+ { &vop_symlink_desc, (vop_t *) u9fs_symlink },
+ { &vop_write_desc, (vop_t *) u9fs_write },
+ { NULL, NULL }
+ };
+ static struct vnodeopv_desc u9fs_vnodeop_opv_desc =
+ { &u9fs_vnodeop_p, u9fs_vnodeop_entries };
+ VNODEOP_SET(u9fs_vnodeop_opv_desc);
+
+ extern vm_zone_t u9fsnode_zone;
+
+ static int u9fs_trunc(struct vnode * vp, struct ucred * cred, struct proc * p);
+ static void u9fs_free_fid __P((u_short fid, struct u9fsmount * nmp, struct proc * p));
+ static void u9fs_updtcache __P((struct u9fsnode *, struct u9fsreq *));
+
+ #define DIRHDSIZ (sizeof (struct dirent) - (MAXNAMLEN + 1))
+
+ /* open returns a qid for cache consistent check */
+ static void
+ u9fs_updtcache(struct u9fsnode * np, struct u9fsreq * rep)
+ {
+ if( rep->r_type != Rerror )
+ np->n_dir.dir_qid = rep->r_qid;
+ }
+
+ static int
+ u9fs_trunc(vp, cred, p)
+ register struct vnode * vp;
+ struct ucred * cred;
+ struct proc * p;
+ {
+ struct u9fsnode *np = VTOU9FS(vp);
+ struct u9fsmount * nmp = VFSTOU9FS(vp->v_mount);
+ int error;
+ u_short newfid;
+ struct u9fsreq req, rep;
+ u_char mode;
+
+ /*
+ * Disallow write attempts on filesystems mounted read-only;
+ * unless the file is a socket, fifo, or a block or character
+ * device resident on the filesystem.
+ */
+ if ( (vp->v_mount->mnt_flag & MNT_RDONLY)) {
+ switch (vp->v_type) {
+ case VREG:
+ case VDIR:
+ case VLNK:
+ return (EROFS);
+ default:
+ break;
+ }
+ }
+ mode = U9P_MODE_WR | U9P_MODE_TRUNC;
+ bzero(&req, sizeof(req));
+ req.r_nmp = nmp;
+ req.r_procp = p;
+ newfid = u9fs_id_new(nmp->nm_fids);
+ req.r_type = Tclone;
+ req.r_fid = np->n_fid;
+ req.r_newfid = newfid;
+ error = u9fs_request(&req, &rep, 1);
+ if( error )
+ return error;
+ req.r_type = Topen;
+ req.r_fid = newfid;
+ req.r_mode = mode;
+ error = u9fs_request(&req, &rep, 1);
+ if( !error )
+ u9fs_vinvalbuf(vp, 0, cred, p, 0);
+ if( error || np->n_wrfid ) {
+ u9fs_free_fid(newfid, nmp, p);
+ return error;
+ }
+
+ if( !U9P_PERM_EXCL(np->n_dir.dir_mode))
+ np->n_wrfid = newfid;
+ else
+ u9fs_free_fid(newfid, nmp, p);
+
+ return (0);
+ }
+
+ /*
+ * u9fs access vnode op.
+ */
+ static int
+ u9fs_access(ap)
+ struct vop_access_args /* {
+ struct vnode *a_vp;
+ int a_mode;
+ struct ucred *a_cred;
+ struct proc *a_p;
+ } */ *ap;
+ {
+ register struct vnode *vp = ap->a_vp;
+ struct u9fsnode *np = VTOU9FS(vp);
+ struct u9fsmount * nmp = VFSTOU9FS(vp->v_mount);
+ struct proc * p = ap->a_p;
+ int error, a_mode = ap->a_mode;
+ u_short * fidp = 0, *fidp2 = 0, newfid;
+ struct u9fsreq req, rep;
+ u_char mode;
+ struct ucred * cred = ap->a_cred;
+
+ /* XXX: for the moment, only the authenticator has access */
+ if( cred->cr_uid != nmp->nm_authuid )
+ return (EPERM);
+
+ /*
+ * Disallow write attempts on filesystems mounted read-only;
+ * unless the file is a socket, fifo, or a block or character
+ * device resident on the filesystem.
+ */
+ if ((a_mode & VWRITE) && (vp->v_mount->mnt_flag & MNT_RDONLY)) {
+ switch (vp->v_type) {
+ case VREG:
+ case VDIR:
+ case VLNK:
+ return (EROFS);
+ default:
+ break;
+ }
+ }
+
+ /* we cant open an exclusive open file here */
+ if( U9P_PERM_EXCL(np->n_dir.dir_mode) )
+ return 0;
+
+ /* check permission by actually opening it */
+ /* translate mode */
+ mode = 0;
+ if( a_mode & VREAD ) {
+ fidp = &np->n_rdfid;
+ mode = U9P_MODE_RD;
+ }
+ if( a_mode & VWRITE ) {
+ fidp = &np->n_wrfid;
+ mode = U9P_MODE_WR;
+ }
+ if( (a_mode & (VREAD|VWRITE)) == (VREAD|VWRITE) ) {
+ fidp2 = &np->n_rdfid;
+ mode = U9P_MODE_RDWR;
+ }
+
+ if( a_mode & VEXEC ) {
+ fidp = &np->n_rdfid;
+ if( vp->v_type == VREG )
+ mode = U9P_MODE_EX;
+ }
+
+ if( fidp2 == 0 )
+ fidp2 = fidp;
+
+ /* open fid mode */
+ bzero(&req, sizeof(req));
+ req.r_nmp = nmp;
+ req.r_procp = p;
+ newfid = u9fs_id_new(nmp->nm_fids);
+ req.r_type = Tclone;
+ req.r_fid = np->n_fid;
+ req.r_newfid = newfid;
+ error = u9fs_request(&req, &rep, 1);
+ if( error )
+ return error;
+ req.r_type = Topen;
+ req.r_fid = newfid;
+ req.r_mode = mode;
+ error = u9fs_request(&req, &rep, 1);
+ u9fs_updtcache(np, &rep);
+ if( error || (*fidp && *fidp2 ) ) {
+ u9fs_free_fid(newfid, nmp, p);
+ return error;
+ }
+
+ *fidp = *fidp2 = newfid;
+
+ return (0);
+ }
+
+ /*
+ * u9fs open vnode op
+ * Check to see if the type is ok
+ * and that deletion is not in progress.
+ * For paged in text files, you will need to flush the page cache
+ * if consistency is lost.
+ */
+ /* ARGSUSED */
+ static int
+ u9fs_open(ap)
+ struct vop_open_args /* {
+ struct vnode *a_vp;
+ int a_mode;
+ struct ucred *a_cred;
+ struct proc *a_p;
+ } */ *ap;
+ {
+ register struct vnode *vp = ap->a_vp;
+ struct u9fsnode *np = VTOU9FS(vp);
+ int error=0, a_mode = ap->a_mode;
+ u_short * fidp = 0, *fidp2 = 0, newfid;
+ struct u9fsmount * nmp = VFSTOU9FS(vp->v_mount);
+ struct proc * p = ap->a_p;
+ struct u9fsreq req, rep;
+ u_char mode;
+ struct ucred * cred = ap->a_cred;
+
+ /* assume access permissions have been checked via VOP_ACCESS */
+ /* the file is actually opened except the rdwr case */
+
+ if( a_mode & (O_EXCL|O_SHLOCK|O_EXLOCK) ) {
+ #if 0 /* XXX: what can we do here? */
+ return (EOPNOTSUPP);
+ #endif
+ }
+
+ /* translate mode */
+ mode = 0;
+ if( a_mode & FREAD ) {
+ fidp = &np->n_rdfid;
+ mode = U9P_MODE_RD;
+ }
+ if( a_mode & FWRITE ) {
+ fidp = &np->n_wrfid;
+ mode = U9P_MODE_WR;
+ }
+ if( (a_mode & (FREAD|FWRITE)) == (FREAD|FWRITE) ) {
+ fidp2 = & np->n_rdfid;
+ mode = U9P_MODE_RDWR;
+ }
+ if( fidp2 == 0)
+ fidp2 = fidp;
+
+ if( U9P_PERM_EXCL(np->n_dir.dir_mode) ) {
+ if( *fidp || *fidp2 )
+ return ENOLCK;
+
+ /* open fid mode */
+ bzero(&req, sizeof(req));
+ req.r_nmp = nmp;
+ req.r_procp = p;
+ newfid = u9fs_id_new(nmp->nm_fids);
+ req.r_type = Tclone;
+ req.r_fid = np->n_fid;
+ req.r_newfid = newfid;
+ error = u9fs_request(&req, &rep, 1);
+ if( error )
+ return error;
+ req.r_type = Topen;
+ req.r_fid = newfid;
+ req.r_mode = mode;
+ error = u9fs_request(&req, &rep, 1);
+ if( error ) {
+ u9fs_free_fid(newfid, nmp, p);
+ return error;
+ }
+ u9fs_updtcache(np, &rep);
+
+ *fidp = *fidp2 = newfid;
+ }
+
+ if( *fidp == 0 )
+ panic("open");
+
+ if( *fidp2 == 0 ) {
+ /* open fid mode */
+ bzero(&req, sizeof(req));
+ req.r_nmp = nmp;
+ req.r_procp = p;
+ newfid = u9fs_id_new(nmp->nm_fids);
+ req.r_type = Tclone;
+ req.r_fid = np->n_fid;
+ req.r_newfid = newfid;
+ error = u9fs_request(&req, &rep, 1);
+ if( error )
+ return error;
+ req.r_type = Topen;
+ req.r_fid = newfid;
+ req.r_mode = mode;
+ error = u9fs_request(&req, &rep, 1);
+ if( error ) {
+ u9fs_free_fid(newfid, nmp, p);
+ return error;
+ }
+ u9fs_updtcache(np, &rep);
+ *fidp2 = newfid;
+ }
+
+ if( np->n_qid.vers != np->n_dir.dir_qid.vers ) /* content changed */
+ u9fs_vinvalbuf(vp, 0, cred, p, 0);
+
+ return 0;
+ }
+
+ /*
+ * u9fs close vnode op
+ * What an U9FS client should do upon close after writing is a debatable issue.
+ * Most U9FS clients push delayed writes to the server upon close, basically for
+ * two reasons:
+ * 1 - So that any write errors may be reported back to the client process
+ * doing the close system call. By far the two most likely errors are
+ * U9FSERR_NOSPC and U9FSERR_DQUOT to indicate space allocation failure.
+ * 2 - To put a worst case upper bound on cache inconsistency between
+ * multiple clients for the file.
+ * There is also a consistency problem for Version 2 of the protocol w.r.t.
+ * not being able to tell if other clients are writing a file concurrently,
+ * since there is no way of knowing if the changed modify time in the reply
+ * is only due to the write for this client.
+ * (U9FS Version 3 provides weak cache consistency data in the reply that
+ * should be sufficient to detect and handle this case.)
+ *
+ * The current code does the following:
+ * for U9FS Version 2 - play it safe and flush/invalidate all dirty buffers
+ * for U9FS Version 3 - flush dirty buffers to the server but don't invalidate
+ * or commit them (this satisfies 1 and 2 except for the
+ * case where the server crashes after this close but
+ * before the commit RPC, which is felt to be "good
+ * enough". Changing the last argument to u9fs_flush() to
+ * a 1 would force a commit operation, if it is felt a
+ * commit is necessary now.
+ * for NQU9FS - do nothing now, since 2 is dealt with via leases and
+ * 1 should be dealt with via an fsync() system call for
+ * cases where write errors are important.
+ */
+ /* ARGSUSED */
+ static int
+ u9fs_close(ap)
+ struct vop_close_args /* {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ int a_fflag;
+ struct ucred *a_cred;
+ struct proc *a_p;
+ } */ *ap;
+ {
+ int fflag = ap->a_fflag;
+ struct vnode * vp = ap->a_vp;
+ struct u9fsnode * np = VTOU9FS(vp);
+ struct u9fsmount * nmp = VFSTOU9FS(vp->v_mount);
+ struct proc * p = ap->a_p;
+
+ if( U9P_PERM_EXCL(np->n_dir.dir_mode) ) {
+ if( (fflag & FREAD) ) {
+ u9fs_free_fid(np->n_rdfid, nmp, p);
+ np->n_rdfid = 0;
+ }
+
+ if( (fflag & FWRITE) == FWRITE ) {
+ u9fs_free_fid(np->n_wrfid, nmp, p);
+ np->n_wrfid = 0;
+ }
+
+ if( (fflag & (FREAD|FWRITE)) == (FREAD|FWRITE) )
+ np->n_wrfid = 0;
+ }
+
+ return 0;
+ }
+
+ /*
+ * u9fs getattr call from vfs.
+ */
+ static int
+ u9fs_getattr(ap)
+ struct vop_getattr_args /* {
+ struct vnode *a_vp;
+ struct vattr *a_vap;
+ struct ucred *a_cred;
+ struct proc *a_p;
+ } */ *ap;
+ {
+ register struct vnode *vp = ap->a_vp;
+ register struct u9fsnode *np = VTOU9FS(vp);
+ int error = 0;
+ struct u9fsreq req, rep;
+ struct u9fsmount * nmp = VFSTOU9FS(vp->v_mount);
+ struct u9fsdir * dir;
+ struct vattr * vap = ap->a_vap;
+
+ /*
+ * Update local times for special files.
+ */
+ if (np->n_flag & (NACC | NUPD))
+ np->n_flag |= NCHG;
+ #if 0
+ /*
+ * First look in the cache.
+ */
+ if (u9fs_getattrcache(vp, ap->a_vap) == 0)
+ return (0);
+ #endif
+ if( np->n_fid == 0 )
+ panic("u9fs_getattr");
+
+ /* stat fid */
+ bzero(&req, sizeof(req));
+ req.r_nmp = nmp;
+ req.r_procp = ap->a_p;
+ req.r_type = Tstat;
+ req.r_fid = np->n_fid;
+ error = u9fs_request(& req, & rep, 1);
+ if( error )
+ return error;
+
+ /* fill in vattr */
+ dir = & np->n_dir;
+ u9p_m2d(rep.r_stat, dir);
+
+ bzero(vap, sizeof(*vap));
+ /* the plan9 file system has no other types. */
+ /* XXX: we have not delt with devices yet */
+ if( U9P_PERM_CHDIR(dir->dir_mode) )
+ vap->va_type = VDIR;
+ else
+ vap->va_type = VREG;
+
+ vap->va_mode = U9P_PERM_ALL(dir->dir_mode);
+ vap->va_nlink = 1;
+ vap->va_uid = u9fs_name2uid(dir->dir_uid);
+ vap->va_gid = u9fs_name2uid(dir->dir_gid);
+ vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
+ vap->va_fileid = dir->dir_qid.path;
+ vap->va_size = np->n_size = dir->dir_length;
+ vap->va_blocksize = PAGE_SIZE;
+ vap->va_atime.tv_sec = dir->dir_atime;
+ vap->va_atime.tv_nsec = 0;
+ vap->va_mtime.tv_sec = dir->dir_mtime;
+ vap->va_mtime.tv_nsec = 0;
+ vap->va_ctime.tv_sec = dir->dir_mtime;
+ vap->va_ctime.tv_nsec = dir->dir_mtime;
+ vap->va_gen = VNOVAL;
+ vap->va_flags = 0;
+ vap->va_bytes = vap->va_size;
+ vap->va_filerev = dir->dir_qid.vers;
+
+ vp->v_type = vap->va_type;
+ vp->v_tag = VT_U9FS;
+
+ return (error);
+ }
+
+ /*
+ * u9fs setattr call.
+ */
+ static int
+ u9fs_setattr(ap)
+ struct vop_setattr_args /* {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_vp;
+ struct vattr *a_vap;
+ struct ucred *a_cred;
+ struct proc *a_p;
+ } */ *ap;
+ {
+ register struct vnode *vp = ap->a_vp;
+ register struct u9fsnode *np = VTOU9FS(vp);
+ register struct vattr *vap = ap->a_vap;
+ int error = 0;
+ struct u9fsmount * nmp = VFSTOU9FS(vp->v_mount);
+ struct u9fsdir dir;
+ struct u9fsuser * u9p;
+ struct vattr attr;
+ struct u9fsreq req, rep;
+
+ if( vp->v_mount->mnt_flag & MNT_RDONLY )
+ return (EROFS);
+
+ if( vap->va_nlink != VNOVAL || vap->va_uid != VNOVAL ||
+ vap->va_fsid != VNOVAL || vap->va_fileid != VNOVAL ||
+ #if 0
+ vap->va_size != VNOVAL || vap->va_blocksize != VNOVAL ||
+ #endif
+ vap->va_atime.tv_sec != VNOVAL || vap->va_ctime.tv_sec != VNOVAL ||
+ vap->va_gen != VNOVAL ||
+ vap->va_flags != VNOVAL || vap->va_bytes != VNOVAL ) {
+ #if 0
+ printf("%d %d %d %d %d %d %d %d %d %d %d\n", vap->va_nlink, vap->va_uid, vap->va_fsid,
+ vap->va_fileid, vap->va_size, vap->va_blocksize,
+ vap->va_atime.tv_sec, vap->va_ctime.tv_sec, vap->va_gen,
+ vap->va_flags, vap->va_bytes);
+ printf("unsupported setattr\n");
+ /* touch tries to change ctime first.
+ * if fails, it touches the first byte
+ */
+ #endif
+ return (EOPNOTSUPP);
+ }
+
+ if( vap->va_size == 0 )
+ u9fs_trunc(vp, ap->a_cred, ap->a_p);
+
+ bcopy(&np->n_dir, &dir, sizeof(dir));
+ if( vap->va_mode != (mode_t)VNOVAL ) {
+ dir.dir_mode = U9P_PERM_NONPERM(dir.dir_mode)|U9P_PERM_ALL(vap->va_mode);
+ }
+ if( vap->va_gid != VNOVAL ) {
+ if( (u9p = u9fs_finduser(vap->va_gid)) == 0 )
+ return (EINVAL);
+ strncpy(u9p->u_name, dir.dir_gid, U9FS_NAMELEN);
+ }
+ if( vap->va_mtime.tv_sec != VNOVAL ) {
+ dir.dir_mtime = vap->va_mtime.tv_sec;
+ }
+
+ /* stat fid */
+ bzero(&req, sizeof(req));
+ req.r_nmp = nmp;
+ req.r_procp = ap->a_p;
+ req.r_type = Twstat;
+ req.r_fid = np->n_fid;
+ u9p_d2m(&dir, req.r_stat);
+ error = u9fs_request(& req, & rep, 1);
+ if( error )
+ return error;
+ VOP_GETATTR(vp, &attr, ap->a_cred, ap->a_p);
+
+ return 0;
+ }
+
+ /*
+ * u9fs lookup call, one step at a time...
+ * First look in cache
+ * If not found, unlock the directory u9fsnode and do the rpc
+ */
+ static int
+ u9fs_lookup(ap)
+ struct vop_lookup_args /* {
+ struct vnodeop_desc *a_desc;
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+ } */ *ap;
+ {
+ struct componentname *cnp = ap->a_cnp;
+ struct vnode *dvp = ap->a_dvp;
+ struct vnode **vpp = ap->a_vpp;
+ int flags = cnp->cn_flags;
+ struct vnode *newvp;
+ struct u9fsmount *nmp;
+ long len;
+ u9fsfh_t fh;
+ struct u9fsnode *np;
+ int lockparent, wantparent, error = 0;
+ struct proc *p = cnp->cn_proc;
+ struct u9fsreq req, rep;
+ u_short newfid;
+ struct vattr attrs;
+
+ *vpp = NULLVP;
+ if ((flags & ISLASTCN) && (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
+ (cnp->cn_nameiop == DELETE || cnp->cn_nameiop == RENAME))
+ return (EROFS);
+ if (dvp->v_type != VDIR)
+ return (ENOTDIR);
+ lockparent = flags & LOCKPARENT;
+ wantparent = flags & (LOCKPARENT|WANTPARENT);
+ nmp = VFSTOU9FS(dvp->v_mount);
+ np = VTOU9FS(dvp);
+ #if 0
+ if ((error = cache_lookup(dvp, vpp, cnp)) && error != ENOENT) {
+ struct vattr vattr;
+ int vpid;
+
+ if (error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, p)) {
+ *vpp = NULLVP;
+ return (error);
+ }
+
+ newvp = *vpp;
+ vpid = newvp->v_id;
+ /*
+ * See the comment starting `Step through' in ufs/ufs_lookup.c
+ * for an explanation of the locking protocol
+ */
+ if (dvp == newvp) {
+ VREF(newvp);
+ error = 0;
+ } else if (flags & ISDOTDOT) {
+ VOP_UNLOCK(dvp, 0, p);
+ error = vget(newvp, LK_EXCLUSIVE, p);
+ if (!error && lockparent && (flags & ISLASTCN))
+ error = vn_lock(dvp, LK_EXCLUSIVE, p);
+ } else {
+ error = vget(newvp, LK_EXCLUSIVE, p);
+ if (!lockparent || error || !(flags & ISLASTCN))
+ VOP_UNLOCK(dvp, 0, p);
+ }
+ if (!error) {
+ if (vpid == newvp->v_id) {
+ if (!VOP_GETATTR(newvp, &vattr, cnp->cn_cred, p)
+ && vattr.va_ctime.tv_sec == VTOU9FS(newvp)->n_ctime) {
+ u9fsstats.lookupcache_hits++;
+ if (cnp->cn_nameiop != LOOKUP &&
+ (flags & ISLASTCN))
+ cnp->cn_flags |= SAVENAME;
+ return (0);
+ }
+ cache_purge(newvp);
+ }
+ vput(newvp);
+ if (lockparent && dvp != newvp && (flags & ISLASTCN))
+ VOP_UNLOCK(dvp, 0, p);
+ }
+ error = vn_lock(dvp, LK_EXCLUSIVE, p);
+ *vpp = NULLVP;
+ if (error)
+ return (error);
+ }
+ #endif
+ error = 0;
+ newvp = NULLVP;
+ len = cnp->cn_namelen;
+
+ /* Tclwalk tag fid newfid name */
+ bzero(&req, sizeof(req));
+ req.r_procp = p;
+ req.r_nmp = nmp;
+ req.r_type = Tclwalk;
+ req.r_fid = np->n_fid;
+ newfid = req.r_newfid = u9fs_id_new(nmp->nm_fids);
+ bcopy(cnp->cn_nameptr, req.r_name, len);
+ if( (error = u9fs_request(&req, &rep, 1)) ) {
+ u9fs_id_free(nmp->nm_fids, newfid);
+ return error;
+ }
+
+ fh = rep.r_qid.path;
+ if( fh == 0 ) {
+ u9fs_id_free(nmp->nm_fids, newfid);
+ error = ENOENT;
+ goto lastcheck;
+ }
+
+ /*
+ * Handle RENAME case...
+ */
+ if (cnp->cn_nameiop == RENAME && wantparent && (flags & ISLASTCN)) {
+ #if 0
+ /* XXX: I dont understand this. rename foo foo? */
+ if (U9FS_CMPFH(np, fhp, fhsize)) {
+ m_freem(mrep);
+ return (EISDIR);
+ }
+ #endif
+ error = u9fs_nget(dvp->v_mount, fh, &np, p);
+ if (error)
+ goto fail;
+
+ if ( np->n_fid )
+ u9fs_free_fid(newfid, nmp, p);
+ else
+ np->n_fid = newfid;
+
+ newvp = U9FSTOV(np);
+ *vpp = newvp;
+ cnp->cn_flags |= SAVENAME;
+ if (!lockparent)
+ VOP_UNLOCK(dvp, 0, p);
+ return (0);
+ }
+
+ if (flags & ISDOTDOT) {
+ VOP_UNLOCK(dvp, 0, p);
+ error = u9fs_nget(dvp->v_mount, fh, &np, p);
+ if (error) {
+ vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, p);
+ goto fail;
+ }
+ if( np->n_fid )
+ u9fs_free_fid(newfid, nmp, p);
+ else
+ np->n_fid = req.r_newfid;
+
+ newvp = U9FSTOV(np);
+ if (lockparent && (flags & ISLASTCN) &&
+ (error = vn_lock(dvp, LK_EXCLUSIVE, p))) {
+ vput(newvp);
+ return (error);
+ }
+ } else if (np->n_qid.path == fh) {
+ u9fs_free_fid(newfid, nmp, p);
+ VREF(dvp);
+ newvp = dvp;
+ } else {
+ error = u9fs_nget(dvp->v_mount, fh, &np, p);
+ if (error)
+ goto fail;
+
+ if( np->n_fid )
+ u9fs_free_fid(newfid, nmp, p);
+ else
+ np->n_fid = req.r_newfid;
+
+ if (!lockparent || !(flags & ISLASTCN))
+ VOP_UNLOCK(dvp, 0, p);
+ newvp = U9FSTOV(np);
+
+ VOP_GETATTR(newvp, & attrs, p->p_ucred, p);
+ }
+
+ if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
+ cnp->cn_flags |= SAVENAME;
+ #if 0
+ if ((cnp->cn_flags & MAKEENTRY) &&
+ (cnp->cn_nameiop != DELETE || !(flags & ISLASTCN))) {
+ np->n_ctime = np->n_vattr.va_ctime.tv_sec;
+ cache_enter(dvp, newvp, cnp);
+ }
+ #endif
+ *vpp = newvp;
+ lastcheck:
+ if (error) {
+ if (newvp != NULLVP) {
+ vrele(newvp);
+ *vpp = NULLVP;
+ }
+ if ((cnp->cn_nameiop == CREATE || cnp->cn_nameiop == RENAME) &&
+ (flags & ISLASTCN) && error == ENOENT) {
+ if (!lockparent)
+ VOP_UNLOCK(dvp, 0, p);
+ if (dvp->v_mount->mnt_flag & MNT_RDONLY)
+ error = EROFS;
+ else
+ error = EJUSTRETURN;
+ }
+ if (cnp->cn_nameiop != LOOKUP && (flags & ISLASTCN))
+ cnp->cn_flags |= SAVENAME;
+ }
+ return (error);
+
+ fail:
+ u9fs_free_fid(newfid, nmp, p);
+ return (error);
+ }
+
+ /*
+ * u9fs read call.
+ * Just call u9fs_bioread() to do the work.
+ */
+ static int
+ u9fs_read(ap)
+ struct vop_read_args /* {
+ struct vnode *a_vp;
+ struct uio *a_uio;
+ int a_ioflag;
+ struct ucred *a_cred;
+ } */ *ap;
+ {
+ register struct vnode *vp = ap->a_vp;
+
+ if (vp->v_type != VREG)
+ return (EPERM);
+ return (u9fs_bioread(vp, ap->a_uio, ap->a_ioflag, ap->a_cred, 0));
+ }
+
+ /*
+ * u9fs readlink call
+ */
+ static int
+ u9fs_readlink(ap)
+ struct vop_readlink_args /* {
+ struct vnode *a_vp;
+ struct uio *a_uio;
+ struct ucred *a_cred;
+ } */ *ap;
+ {
+ return (EOPNOTSUPP);
+ }
+
+ /*
+ * u9fs mknod vop
+ * just call u9fs_mknodrpc() to do the work.
+ */
+ /* ARGSUSED */
+ static int
+ u9fs_mknod(ap)
+ struct vop_mknod_args /* {
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+ struct vattr *a_vap;
+ } */ *ap;
+ {
+ return (EOPNOTSUPP);
+ }
+
+ /*
+ * u9fs file create call
+ */
+ static int
+ u9fs_create(ap)
+ struct vop_create_args /* {
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+ struct vattr *a_vap;
+ } */ *ap;
+ {
+ register struct vnode *dvp = ap->a_dvp;
+ register struct vattr *vap = ap->a_vap;
+ register struct componentname *cnp = ap->a_cnp;
+ struct u9fsnode *np = (struct u9fsnode *)0;
+ struct vnode *newvp = (struct vnode *)0;
+ int error = 0, len;
+ struct vattr vattr;
+ struct u9fsreq req, rep;
+ struct u9fsmount *nmp;
+ u9fsfh_t fh;
+ struct proc * p;
+ int pfid;
+
+ #if 0
+ /*
+ * Oops, not for me..
+ */
+ if (vap->va_type == VSOCK)
+ return (u9fs_mknodrpc(dvp, ap->a_vpp, cnp, vap));
+ #endif
+
+ if (error = VOP_GETATTR(dvp, &vattr, cnp->cn_cred, cnp->cn_proc)) {
+ VOP_ABORTOP(dvp, cnp);
+ return (error);
+ }
+
+ nmp = VFSTOU9FS(dvp->v_mount);
+ np = VTOU9FS(dvp);
+ p = cnp->cn_proc;
+
+ bzero(&req, sizeof(req));
+ req.r_nmp = nmp;
+ req.r_procp = p;
+
+ req.r_type = Tclone;
+ pfid = req.r_fid = np->n_fid;
+ req.r_newfid = u9fs_id_new(nmp->nm_fids);
+ error = u9fs_request(&req, &rep, 1);
+ if( error )
+ return error;
+
+ req.r_type = Tcreate;
+ req.r_fid = req.r_newfid;
+ len = cnp->cn_namelen;
+ if( len > U9FS_NAMELEN )
+ len = U9FS_NAMELEN;
+ strncpy(req.r_name, cnp->cn_nameptr, len);
+ req.r_name[U9FS_NAMELEN] = 0;
+ req.r_perm = U9P_PERM_ALL(vap->va_mode);
+ if( vap->va_type == VDIR ) {
+ req.r_perm |= 0x80000000;
+ req.r_mode = U9P_MODE_RD;
+ } else
+ req.r_mode = U9P_MODE_WR | U9P_MODE_TRUNC;
+ if(vap->va_vaflags & VA_EXCLUSIVE)
+ req.r_mode = U9P_MODE_EX;
+
+ error = u9fs_request(&req, &rep, 1);
+ if( error )
+ return error;
+
+ fh = rep.r_qid.path;
+ u9fs_nget(dvp->v_mount, fh, &np, p);
+ newvp = U9FSTOV(np);
+ if( vap->va_type == VDIR )
+ np->n_rdfid = req.r_fid;
+ else
+ np->n_wrfid = req.r_fid;
+
+ req.r_type = Tclwalk;
+ req.r_fid = pfid;
+ req.r_newfid = u9fs_id_new(nmp->nm_fids);
+ /* r_name is already filled */
+ error = u9fs_request(&req, &rep, 1);
+ if( error )
+ return error;
+ np->n_fid = req.r_newfid;
+ VOP_GETATTR(newvp, & vattr, p->p_ucred, p);
+
+ *ap->a_vpp = newvp;
+ zfree(namei_zone, cnp->cn_pnbuf);
+
+ return 0;
+ }
+
+ /*
+ * u9fs file remove call
+ * To try and make u9fs semantics closer to ufs semantics, a file that has
+ * other processes using the vnode is renamed instead of removed and then
+ * removed later on the last close.
+ * - If v_usecount > 1
+ * If a rename is not already in the works
+ * call u9fs_sillyrename() to set it up
+ * else
+ * do the remove rpc
+ */
+ static int
+ u9fs_remove(ap)
+ struct vop_remove_args /* {
+ struct vnodeop_desc *a_desc;
+ struct vnode * a_dvp;
+ struct vnode * a_vp;
+ struct componentname * a_cnp;
+ } */ *ap;
+ {
+ register struct vnode *vp = ap->a_vp;
+ register struct componentname *cnp = ap->a_cnp;
+ struct u9fsnode *np;
+ struct u9fsreq req, rep;
+ struct u9fsmount *nmp;
+ struct proc * p;
+ int error;
+
+ nmp = VFSTOU9FS(vp->v_mount);
+ np = VTOU9FS(vp);
+ p = cnp->cn_proc;
+ bzero(&req, sizeof(req));
+ req.r_nmp = nmp;
+ req.r_procp = p;
+ req.r_type = Tremove;
+ req.r_fid = np->n_fid;
+ error = u9fs_request(&req, &rep, 1);
+ if( error )
+ return error;
+ zfree(namei_zone, cnp->cn_pnbuf);
+ return 0;
+ }
+
+ /*
+ * u9fs file rename call
+ */
+ static int
+ u9fs_rename(ap)
+ struct vop_rename_args /* {
+ struct vnode *a_fdvp;
+ struct vnode *a_fvp;
+ struct componentname *a_fcnp;
+ struct vnode *a_tdvp;
+ struct vnode *a_tvp;
+ struct componentname *a_tcnp;
+ } */ *ap;
+ {
+ register struct vnode *fvp = ap->a_fvp;
+ register struct vnode *tvp = ap->a_tvp;
+ register struct vnode *fdvp = ap->a_fdvp;
+ register struct vnode *tdvp = ap->a_tdvp;
+ register struct componentname *tcnp = ap->a_tcnp;
+ register struct componentname *fcnp = ap->a_fcnp;
+ int error, len;
+ struct u9fsmount * nmp;
+ struct u9fsreq req, rep;
+ struct u9fsdir dir;
+ struct u9fsnode * np;
+
+ /* we cant do cross-directory renaming or move to an existing file */
+ if( fdvp != tdvp || tvp != 0 || fvp->v_mount->mnt_flag & MNT_RDONLY ){
+ printf("rename to existing file not supported\n");
+ error = EOPNOTSUPP;
+ goto out;
+ }
+
+ nmp = VFSTOU9FS(fvp->v_mount);
+ np = VTOU9FS(fvp);
+
+ bcopy(&np->n_dir, &dir, sizeof(dir));
+ len = tcnp->cn_namelen;
+ if( len > U9FS_NAMELEN )
+ len = U9FS_NAMELEN;
+ strncpy(dir.dir_name, tcnp->cn_nameptr, len);
+ dir.dir_name[U9FS_NAMELEN-1] = 0;
+
+ /* stat fid */
+ bzero(&req, sizeof(req));
+ req.r_nmp = nmp;
+ req.r_procp = fcnp->cn_proc;
+ req.r_type = Twstat;
+ req.r_fid = np->n_fid;
+ u9p_d2m(&dir, req.r_stat);
+ error = u9fs_request(& req, & rep, 1);
+
+ out:
+ if (tdvp == tvp)
+ vrele(tdvp);
+ else
+ vput(tdvp);
+ if (tvp)
+ vput(tvp);
+ vrele(fdvp);
+ vrele(fvp);
+
+ return error;
+ }
+
+ /*
+ * u9fs hard link create call
+ */
+ static int
+ u9fs_link(ap)
+ struct vop_link_args /* {
+ struct vnode *a_tdvp;
+ struct vnode *a_vp;
+ struct componentname *a_cnp;
+ } */ *ap;
+ {
+ return (EOPNOTSUPP);
+ }
+
+ /*
+ * u9fs symbolic link create call
+ */
+ static int
+ u9fs_symlink(ap)
+ struct vop_symlink_args /* {
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+ struct vattr *a_vap;
+ char *a_target;
+ } */ *ap;
+ {
+ return (EOPNOTSUPP);
+ }
+
+ /*
+ * u9fs make dir call
+ */
+ static int
+ u9fs_mkdir(ap)
+ struct vop_mkdir_args /* {
+ struct vnode *a_dvp;
+ struct vnode **a_vpp;
+ struct componentname *a_cnp;
+ struct vattr *a_vap;
+ } */ *ap;
+ {
+ struct vop_create_args cap;
+
+ cap.a_dvp = ap->a_dvp;
+ cap.a_vpp = ap->a_vpp;
+ cap.a_cnp = ap->a_cnp;
+ cap.a_vap = ap->a_vap;
+ return u9fs_create(&cap);
+ }
+
+ /*
+ * u9fs remove directory call
+ */
+ static int
+ u9fs_rmdir(ap)
+ struct vop_rmdir_args /* {
+ struct vnode *a_dvp;
+ struct vnode *a_vp;
+ struct componentname *a_cnp;
+ } */ *ap;
+ {
+ register struct vnode *vp = ap->a_vp;
+ register struct componentname *cnp = ap->a_cnp;
+ struct u9fsnode *np;
+ struct u9fsreq req, rep;
+ struct u9fsmount *nmp;
+ struct proc * p;
+ int error;
+
+ nmp = VFSTOU9FS(vp->v_mount);
+ np = VTOU9FS(vp);
+ p = cnp->cn_proc;
+ bzero(&req, sizeof(req));
+ req.r_nmp = nmp;
+ req.r_procp = p;
+ req.r_type = Tremove;
+ req.r_fid = np->n_fid;
+ error = u9fs_request(&req, &rep, 1);
+ if( error )
+ return error;
+ u9fs_id_free(nmp->nm_fids, np->n_fid);
+ np->n_fid = 0;
+ zfree(namei_zone, cnp->cn_pnbuf);
+ return 0;
+ }
+
+ /*
+ * u9fs readdir call
+ */
+ static int
+ u9fs_readdir(ap)
+ struct vop_readdir_args /* {
+ struct vnode *a_vp;
+ struct uio *a_uio;
+ struct ucred *a_cred;
+ } */ *ap;
+ {
+ register struct vnode *vp = ap->a_vp;
+ register struct uio *uio = ap->a_uio;
+ int error;
+
+ if (vp->v_type != VDIR)
+ return (EPERM);
+
+ /*
+ * Call u9fs_bioread() to do the real work.
+ */
+ error = u9fs_bioread(vp, uio, 0, ap->a_cred, 0);
+
+ return (error);
+ }
+
+ /*
+ * Kludge City..
+ * - make u9fs_bmap() essentially a no-op that does no translation
+ * - do u9fs_strategy() by doing I/O with u9fs_readrpc/u9fs_writerpc
+ * (Maybe I could use the process's page mapping, but I was concerned that
+ * Kernel Write might not be enabled and also figured copyout() would do
+ * a lot more work than bcopy() and also it currently happens in the
+ * context of the swapper process (2).
+ */
+ static int
+ u9fs_bmap(ap)
+ struct vop_bmap_args /* {
+ struct vnode *a_vp;
+ daddr_t a_bn;
+ struct vnode **a_vpp;
+ daddr_t *a_bnp;
+ int *a_runp;
+ int *a_runb;
+ } */ *ap;
+ {
+ register struct vnode *vp = ap->a_vp;
+
+ if (ap->a_vpp != NULL)
+ *ap->a_vpp = vp;
+ if (ap->a_bnp != NULL)
+ *ap->a_bnp = ap->a_bn * btodb(vp->v_mount->mnt_stat.f_iosize);
+ if (ap->a_runp != NULL)
+ *ap->a_runp = 0;
+ if (ap->a_runb != NULL)
+ *ap->a_runb = 0;
+ return (0);
+
+ return 0;
+ }
+
+ /*
+ * Strategy routine.
+ * For async requests when u9fsiod(s) are running, queue the request by
+ * calling u9fs_asyncio(), otherwise just all u9fs_doio() to do the
+ * request.
+ */
+ static int
+ u9fs_strategy(ap)
+ struct vop_strategy_args *ap;
+ {
+ register struct buf *bp = ap->a_bp;
+ struct ucred *cr;
+ struct proc *p;
+ int error = 0;
+
+ if (bp->b_flags & B_PHYS)
+ panic("nfs physio");
+ if (bp->b_flags & B_ASYNC)
+ panic("u9fs async");
+
+ p = curproc; /* XXX */
+ if (bp->b_flags & B_READ)
+ cr = bp->b_rcred;
+ else
+ cr = bp->b_wcred;
+ error = u9fs_doio(bp, cr, p);
+ return (error);
+ }
+
+ /*
+ * Mmap a file
+ *
+ * NB Currently unsupported.
+ */
+ /* ARGSUSED */
+ static int
+ u9fs_mmap(ap)
+ struct vop_mmap_args /* {
+ struct vnode *a_vp;
+ int a_fflags;
+ struct ucred *a_cred;
+ struct proc *a_p;
+ } */ *ap;
+ {
+ return (EINVAL);
+ }
+
+ /*
+ * fsync vnode op. Just call u9fs_flush() with commit == 1.
+ */
+ /* ARGSUSED */
+ static int
+ u9fs_fsync(ap)
+ struct vop_fsync_args /* {
+ struct vnodeop_desc *a_desc;
+ struct vnode * a_vp;
+ struct ucred * a_cred;
+ int a_waitfor;
+ struct proc * a_p;
+ } */ *ap;
+ {
+ /* we have a blocking writeback cache */
+ return 0;
+ }
+
+ /*
+ * U9FS advisory byte-level locks.
+ * Currently unsupported.
+ */
+ static int
+ u9fs_advlock(ap)
+ struct vop_advlock_args /* {
+ struct vnode *a_vp;
+ caddr_t a_id;
+ int a_op;
+ struct flock *a_fl;
+ int a_flags;
+ } */ *ap;
+ {
+ register struct u9fsnode *np = VTOU9FS(ap->a_vp);
+
+ /*
+ * The following kludge is to allow diskless support to work
+ * until a real NFS lockd is implemented. Basically, just pretend
+ * that this is a local lock.
+ */
+ return (lf_advlock(ap, &(np->n_lockf), np->n_size));
+ }
+
+ /*
+ * Print out the contents of an u9fsnode.
+ */
+ static int
+ u9fs_print(ap)
+ struct vop_print_args /* {
+ struct vnode *a_vp;
+ } */ *ap;
+ {
+ panic("u9fs_print");
+ return 0;
+ }
+
+ /*
+ * Just call u9fs_writebp() with the force argument set to 1.
+ */
+ static int
+ u9fs_bwrite(ap)
+ struct vop_bwrite_args /* {
+ struct vnode *a_bp;
+ } */ *ap;
+ {
+ panic("u9fs_bwrite");
+ return 0;
+ }
+
+ /*
+ * Vnode op for VM getpages.
+ */
+ static int
+ u9fs_getpages(ap)
+ struct vop_getpages_args /* {
+ struct vnode *a_vp;
+ vm_page_t *a_m;
+ int a_count;
+ int a_reqpage;
+ vm_ooffset_t a_offset;
+ } */ *ap;
+ {
+ int i, error, nextoff, size, toff, npages, count;
+ struct uio uio;
+ struct iovec iov;
+ vm_offset_t kva;
+ struct buf *bp;
+ struct vnode *vp;
+ struct proc *p;
+ struct ucred *cred;
+ struct u9fsmount *nmp;
+ vm_page_t *pages;
+
+ vp = ap->a_vp;
+ p = curproc; /* XXX */
+ cred = curproc->p_ucred; /* XXX */
+ nmp = VFSTOU9FS(vp->v_mount);
+ pages = ap->a_m;
+ count = ap->a_count;
+
+ if (vp->v_object == NULL) {
+ printf("u9fs_getpages: called with non-merged cache vnode??\n");
+ return VM_PAGER_ERROR;
+ }
+
+ /*
+ * We use only the kva address for the buffer, but this is extremely
+ * convienient and fast.
+ */
+ bp = getpbuf();
+
+ npages = btoc(count);
+ kva = (vm_offset_t) bp->b_data;
+ pmap_qenter(kva, pages, npages);
+
+ iov.iov_base = (caddr_t) kva;
+ iov.iov_len = count;
+ uio.uio_iov = &iov;
+ uio.uio_iovcnt = 1;
+ uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
+ uio.uio_resid = count;
+ uio.uio_segflg = UIO_SYSSPACE;
+ uio.uio_rw = UIO_READ;
+ uio.uio_procp = p;
+
+ error = u9fs_readrpc(vp, &uio, cred);
+ pmap_qremove(kva, npages);
+
+ relpbuf(bp);
+
+ if (error && (uio.uio_resid == count))
+ return VM_PAGER_ERROR;
+
+ size = count - uio.uio_resid;
+
+ for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
+ vm_page_t m;
+ nextoff = toff + PAGE_SIZE;
+ m = pages[i];
+
+ m->flags &= ~PG_ZERO;
+
+ if (nextoff <= size) {
+ m->valid = VM_PAGE_BITS_ALL;
+ m->dirty = 0;
+ } else {
+ int nvalid = ((size + DEV_BSIZE - 1) - toff) & ~(DEV_BSIZE - 1);
+ vm_page_set_validclean(m, 0, nvalid);
+ }
+
+ if (i != ap->a_reqpage) {
+ /*
+ * Whether or not to leave the page activated is up in
+ * the air, but we should put the page on a page queue
+ * somewhere (it already is in the object). Result:
+ * It appears that emperical results show that
+ * deactivating pages is best.
+ */
+
+ /*
+ * Just in case someone was asking for this page we
+ * now tell them that it is ok to use.
+ */
+ if (!error) {
+ if (m->flags & PG_WANTED)
+ vm_page_activate(m);
+ else
+ vm_page_deactivate(m);
+ vm_page_wakeup(m);
+ } else {
+ vnode_pager_freepage(m);
+ }
+ }
+ }
+ return 0;
+ }
+
+ /*
+ * Vnode op for VM putpages.
+ */
+ static int
+ u9fs_putpages(ap)
+ struct vop_putpages_args /* {
+ struct vnode *a_vp;
+ vm_page_t *a_m;
+ int a_count;
+ int a_sync;
+ int *a_rtvals;
+ vm_ooffset_t a_offset;
+ } */ *ap;
+ {
+ panic("u9fs_putpages");
+ return 0;
+ }
+
+ static int
+ u9fs_inactive(ap)
+ struct vop_inactive_args /* {
+ struct vnode *a_vp;
+ struct proc *a_p;
+ } */ *ap;
+ {
+ VOP_UNLOCK(ap->a_vp, 0, ap->a_p);
+ return 0;
+ }
+
+ /*
+ * Reclaim an u9fsnode so that it can be used for other purposes.
+ */
+ static int
+ u9fs_reclaim(ap)
+ struct vop_reclaim_args /* {
+ struct vnode *a_vp;
+ } */ *ap;
+ {
+ register struct vnode *vp = ap->a_vp;
+ register struct u9fsnode *np = VTOU9FS(vp);
+ register struct u9fsmount *nmp = VFSTOU9FS(vp->v_mount);
+ struct proc * p = curproc;
+
+ /* some vnodes do not have fids due to previous access failure */
+ if( np->n_fid ) {
+ /* clunk fids */
+ u9fs_free_fid(np->n_fid, nmp, p);
+ if( np->n_rdfid )
+ u9fs_free_fid(np->n_rdfid, nmp, p);
+ if( np->n_wrfid )
+ u9fs_free_fid(np->n_wrfid, nmp, p);
+ }
+
+ LIST_REMOVE(np, n_hash);
+ cache_purge(vp);
+ zfree(u9fsnode_zone, vp->v_data);
+ vp->v_data = (void *)0;
+
+ return (0);
+ }
+
+ /*
+ * Vnode op for write using bio
+ */
+ static int
+ u9fs_write(ap)
+ struct vop_write_args /* {
+ struct vnode *a_vp;
+ struct uio *a_uio;
+ int a_ioflag;
+ struct ucred *a_cred;
+ } */ *ap;
+ {
+ if (ap->a_vp->v_type != VREG)
+ return (EIO);
+
+ return u9fs_biowrite(ap->a_vp, ap->a_uio, ap->a_ioflag, ap->a_cred);
+ }
+
+ /*
+ * Nfs abort op, called after namei() when a CREATE/DELETE isn't actually
+ * done. Currently nothing to do.
+ */
+ /* ARGSUSED */
+ static int
+ u9fs_abortop(ap)
+ struct vop_abortop_args /* {
+ struct vnode *a_dvp;
+ struct componentname *a_cnp;
+ } */ *ap;
+ {
+ return (0);
+ }
+
+ /*
+ * u9fs write call
+ */
+ int
+ u9fs_writerpc(vp, uiop, cred)
+ register struct vnode *vp;
+ register struct uio *uiop;
+ struct ucred *cred;
+ {
+ struct u9fsmount *nmp = VFSTOU9FS(vp->v_mount);
+ int error = 0, len, tsiz, rlen;
+ struct u9fsreq req, rep;
+ struct u9fsnode * np = VTOU9FS(vp);
+ struct proc * p = uiop->uio_procp;
+ struct mbuf * top;
+
+ tsiz = uiop->uio_resid;
+ if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
+ return (EFBIG);
+ bzero(&req, sizeof(req));
+ req.r_nmp = nmp;
+ req.r_procp = p;
+ req.r_type = Twrite;
+ req.r_fid = np->n_wrfid;
+ while (tsiz > 0) {
+ len = (tsiz > nmp->nm_wsize) ? nmp->nm_wsize : tsiz;
+ req.r_offset = uiop->uio_offset;
+ req.r_count = len;
+ error = u9fs_uiotombuf(uiop, &top, len);
+ if( error )
+ break;
+ req.r_data = (char *)top;
+ error = u9fs_request(&req, &rep, 1);
+ if( error )
+ break;
+ rlen = rep.r_count;
+ if( rlen < len ) {
+ error = EIO;
+ break;
+ }
+ tsiz -= len;
+
+ /* each write message increments version number by one.
+ to avoid flushing our write cache, update the version */
+ if( np->n_qid.vers )
+ np->n_qid.vers++;
+ else
+ np->n_qid.vers = np->n_dir.dir_qid.vers + 1;
+ }
+ if (error)
+ uiop->uio_resid = tsiz;
+ return (error);
+ }
+
+ /*
+ * Readdir rpc call.
+ * Called from below the buffer cache by u9fs_doio().
+ */
+ int
+ u9fs_readdirrpc(vp, uiop, cred)
+ struct vnode *vp;
+ register struct uio *uiop;
+ struct ucred *cred;
+
+ {
+ register int len, left;
+ register struct dirent *dp;
+ struct u9fsmount *nmp = VFSTOU9FS(vp->v_mount);
+ struct u9fsnode *np = VTOU9FS(vp);
+ int error = 0, tlen, more_dirs = 1, bigenough;
+ struct u9fsreq req, rep;
+ int count;
+ struct u9fsdir u9dir;
+
+ bigenough = uiop->uio_resid >= sizeof(struct dirent);
+ bzero(&req, sizeof(req));
+ req.r_nmp = nmp;
+ req.r_type = Tread;
+ req.r_fid = np->n_rdfid;
+ req.r_count = nmp->nm_readdirsize;
+ while ( more_dirs && bigenough ) {
+ req.r_offset = uiop->uio_offset;
+ error = u9fs_request(&req, &rep, 0);
+ if( error )
+ return error;
+
+ count = rep.r_count;
+ more_dirs = (count == req.r_count);
+ len = 0;
+ dp = (struct dirent *)uiop->uio_iov->iov_base;
+ left = uiop->uio_resid;
+ while( len < count ) {
+ /* XXX: too conservative, but OK */
+ if( left < sizeof(*dp) ) {
+ bigenough = 0;
+ break;
+ }
+ if( u9p_m_m2d(&req.r_mrep, & u9dir) ) {
+ printf("u9p_m_m2d failed!\n");
+ return (EIO);
+ }
+
+ dp->d_fileno = u9dir.dir_qid.path;
+ if( U9P_PERM_CHDIR(u9dir.dir_mode) )
+ dp->d_type = DT_DIR;
+ else
+ dp->d_type = DT_REG;
+ u9dir.dir_name[U9FS_NAMELEN-1] = 0; /* just to be sure */
+ dp->d_namlen = strlen(u9dir.dir_name);
+ memcpy(dp->d_name, u9dir.dir_name, dp->d_namlen+1);
+ tlen = DIRHDSIZ + dp->d_namlen + 4;
+ tlen = tlen - (tlen & 0x3);
+ dp->d_reclen = tlen;
+ dp = (struct dirent *)(((char *)dp) + tlen);
+ left -= tlen;
+ len += sizeof(u9dir);
+ }
+ tlen = uiop->uio_resid - left;
+ uiop->uio_resid = left;
+ uiop->uio_iov->iov_base += tlen;
+ uiop->uio_iov->iov_len -= tlen;
+ uiop->uio_offset += len;
+ m_freem(req.r_mrep);
+ }
+ return 0;
+ }
+
+ /*
+ * u9fs read rpc call
+ * Ditto above
+ */
+ int
+ u9fs_readrpc(vp, uiop, cred)
+ register struct vnode *vp;
+ struct uio *uiop;
+ struct ucred *cred;
+ {
+ struct u9fsmount *nmp;
+ struct u9fsnode *np = VTOU9FS(vp);
+ int error = 0, len, retlen, tsiz;
+ struct u9fsreq req, rep;
+
+ nmp = VFSTOU9FS(vp->v_mount);
+ tsiz = uiop->uio_resid;
+ if (uiop->uio_offset + tsiz > nmp->nm_maxfilesize)
+ return (EFBIG);
+ bzero(&req, sizeof(req));
+ req.r_nmp = nmp;
+ req.r_type = Tread;
+ req.r_fid = np->n_rdfid;
+ while (tsiz > 0) {
+ len = (tsiz > nmp->nm_rsize) ? nmp->nm_rsize : tsiz;
+ req.r_count = len;
+ req.r_offset = uiop->uio_offset;
+ error = u9fs_request(&req, &rep, 0);
+ if( error )
+ return error;
+ retlen = rep.r_count;
+ if( retlen && (error = u9fs_mbuftouio(req.r_mrep, uiop, retlen)) ) {
+ m_freem(req.r_mrep);
+ return error;
+ }
+
+ m_freem(req.r_mrep);
+ req.r_mrep = 0;
+ tsiz -= retlen;
+ if (retlen < len)
+ tsiz = 0;
+ }
+ return (0);
+ }
+
+ static void u9fs_free_fid(fid, nmp, p)
+ u_short fid;
+ struct u9fsmount * nmp;
+ struct proc * p;
+ {
+ struct u9fsreq req, rep;
+
+ /* clunk fid */
+ bzero(&req, sizeof(req));
+ req.r_nmp = nmp;
+ req.r_procp = p;
+ req.r_type = Tclunk;
+ req.r_fid = fid;
+ u9fs_request(&req, &rep, 1);
+ u9fs_id_free(nmp->nm_fids, fid);
+ }
diff -N -c -r /usr/src/sys/9fs/9p.c ./9fs/9p.c
*** /usr/src/sys/9fs/9p.c Wed Dec 31 19:00:00 1969
--- ./9fs/9p.c Thu Nov 25 15:04:16 1999
***************
*** 0 ****
--- 1,974 ----
+ #include <sys/param.h>
+ #include <sys/systm.h>
+ #include <sys/socket.h>
+ #include <sys/socketvar.h>
+ #include <netinet/in.h>
+ #include <sys/mbuf.h>
+ #include <sys/malloc.h>
+ #include <sys/vnode.h>
+ #include <sys/mount.h>
+
+ #include <9fs/bitstring.h>
+ #include <9fs/9p.h>
+ #include <9fs/9auth.h>
+ #include <9fs/9fs.h>
+
+ int u9p_usetcp = 0;
+ struct u9fs_reqq u9fs_reqq;
+
+ #define N2HCHAR(x) x = *p++
+ #define N2HSHORT(x) x = (p[0] | (p[1]<<8)); p += 2
+ #define N2HLONG(x) x = (p[0] | (p[1]<<8) |\
+ (p[2]<<16) | (p[3]<<24)); p += 4
+ #define N2HQUAD(x) x = (u_int64_t)(p[0] | (p[1]<<8) |\
+ (p[2]<<16) | (p[3]<<24)) |\
+ ((u_int64_t)(p[4] | (p[5]<<8) |\
+ (p[6]<<16) | (p[7]<<24)) << 32); p += 8
+ #define N2HSTRING(x,n) bcopy(p, x, n); p += n
+
+ #define H2NCHAR(x) *p++ = x
+ #define H2NSHORT(x) p[0]=x; p[1]=x>>8; p += 2
+ #define H2NLONG(x) p[0]=x; p[1]=x>>8; p[2]=x>>16; p[3]=x>>24; p += 4
+ #define H2NQUAD(x) p[0]=x; p[1]=x>>8;\
+ p[2]=x>>16; p[3]=x>>24;\
+ p[4]=x>>32; p[5]=x>>40;\
+ p[6]=x>>48; p[7]=x>>56;\
+ p += 8
+ #define H2NSTRING(x,n) bcopy(x, p, n); p += n
+
+ static void u9p_print __P((u_char * m, int len, struct u9fsreq * f));
+
+ static char * u9p_types[] = {
+ "Tnop",
+ "Rnop",
+ "Tosession",
+ "Rosession",
+ "Terror",
+ "Rerror",
+ "Tflush",
+ "Rflush",
+ "Toattach",
+ "Roattach",
+ "Tclone",
+ "Rclone",
+ "Twalk",
+ "Rwalk",
+ "Topen",
+ "Ropen",
+ "Tcreate",
+ "Rcreate",
+ "Tread",
+ "Rread",
+ "Twrite",
+ "Rwrite",
+ "Tclunk",
+ "Rclunk",
+ "Tremove",
+ "Rremove",
+ "Tstat",
+ "Rstat",
+ "Twstat",
+ "Rwstat",
+ "Tclwalk",
+ "Rclwalk",
+ "Tauth",
+ "Rauth",
+ "Tsession",
+ "Rsession",
+ "Tattach",
+ "Rattach",
+ "Ttunnel",
+ "Rtunnel",
+ "Tmax"
+ };
+
+ int u9p_m2s(char *ap, int n, struct u9fsreq *f)
+ {
+ u_char *p;
+
+ p = (u_char*)ap;
+ N2HCHAR(f->r_type);
+ N2HSHORT(f->r_tag);
+ switch(f->r_type)
+ {
+ default:
+ return 0;
+
+ case Tnop:
+ case Tosession:
+ break;
+
+ case Tsession:
+ N2HSTRING(f->r_chal, sizeof(f->r_chal));
+ break;
+
+ case Tflush:
+ N2HSHORT(f->r_oldtag);
+ break;
+
+ case Tattach:
+ N2HSHORT(f->r_fid);
+ N2HSTRING(f->r_uname, sizeof(f->r_uname));
+ N2HSTRING(f->r_aname, sizeof(f->r_aname));
+ N2HSTRING(f->r_ticket, sizeof(f->r_ticket));
+ N2HSTRING(f->r_auth, sizeof(f->r_auth));
+ break;
+
+ case Toattach:
+ N2HSHORT(f->r_fid);
+ N2HSTRING(f->r_uname, sizeof(f->r_uname));
+ N2HSTRING(f->r_aname, sizeof(f->r_aname));
+ N2HSTRING(f->r_ticket, U9FS_NAMELEN);
+ break;
+
+ case Tauth:
+ N2HSHORT(f->r_fid);
+ N2HSTRING(f->r_uname, sizeof(f->r_uname));
+ N2HSTRING(f->r_ticket, 8+U9FS_NAMELEN);
+ break;
+
+ case Tclone:
+ N2HSHORT(f->r_fid);
+ N2HSHORT(f->r_newfid);
+ break;
+
+ case Twalk:
+ N2HSHORT(f->r_fid);
+ N2HSTRING(f->r_name, sizeof(f->r_name));
+ break;
+
+ case Topen:
+ N2HSHORT(f->r_fid);
+ N2HCHAR(f->r_mode);
+ break;
+
+ case Tcreate:
+ N2HSHORT(f->r_fid);
+ N2HSTRING(f->r_name, sizeof(f->r_name));
+ N2HLONG(f->r_perm);
+ N2HCHAR(f->r_mode);
+ break;
+
+ case Tread:
+ N2HSHORT(f->r_fid);
+ N2HQUAD(f->r_offset);
+ N2HSHORT(f->r_count);
+ break;
+
+ case Twrite:
+ N2HSHORT(f->r_fid);
+ N2HQUAD(f->r_offset);
+ N2HSHORT(f->r_count);
+ p++; /* pad(1) */
+ f->r_data = (char*)p; p += f->r_count;
+ break;
+
+ case Ttunnel:
+ N2HSHORT(f->r_fid);
+ break;
+
+ case Tclunk:
+ N2HSHORT(f->r_fid);
+ break;
+
+ case Tremove:
+ N2HSHORT(f->r_fid);
+ break;
+
+ case Tstat:
+ N2HSHORT(f->r_fid);
+ break;
+
+ case Twstat:
+ N2HSHORT(f->r_fid);
+ N2HSTRING(f->r_stat, sizeof(f->r_stat));
+ break;
+
+ case Tclwalk:
+ N2HSHORT(f->r_fid);
+ N2HSHORT(f->r_newfid);
+ N2HSTRING(f->r_name, sizeof(f->r_name));
+ break;
+ /*
+ */
+ case Rnop:
+ case Rosession:
+ break;
+
+ case Rsession:
+ N2HSTRING(f->r_chal, sizeof(f->r_chal));
+ N2HSTRING(f->r_authid, sizeof(f->r_authid));
+ N2HSTRING(f->r_authdom, sizeof(f->r_authdom));
+ break;
+
+ case Rerror:
+ N2HSTRING(f->r_ename, sizeof(f->r_ename));
+ break;
+
+ case Rflush:
+ break;
+
+ case Rattach:
+ N2HSHORT(f->r_fid);
+ N2HLONG(f->r_qid.path);
+ N2HLONG(f->r_qid.vers);
+ N2HSTRING(f->r_rauth, sizeof(f->r_rauth));
+ break;
+
+ case Roattach:
+ N2HSHORT(f->r_fid);
+ N2HLONG(f->r_qid.path);
+ N2HLONG(f->r_qid.vers);
+ break;
+
+ case Rauth:
+ N2HSHORT(f->r_fid);
+ N2HSTRING(f->r_ticket, 8+8+7+7);
+ break;
+
+ case Rclone:
+ N2HSHORT(f->r_fid);
+ break;
+
+ case Rwalk:
+ case Rclwalk:
+ N2HSHORT(f->r_fid);
+ N2HLONG(f->r_qid.path);
+ N2HLONG(f->r_qid.vers);
+ break;
+
+ case Ropen:
+ N2HSHORT(f->r_fid);
+ N2HLONG(f->r_qid.path);
+ N2HLONG(f->r_qid.vers);
+ break;
+
+ case Rcreate:
+ N2HSHORT(f->r_fid);
+ N2HLONG(f->r_qid.path);
+ N2HLONG(f->r_qid.vers);
+ break;
+
+ case Rread:
+ N2HSHORT(f->r_fid);
+ N2HSHORT(f->r_count);
+ p++; /* pad(1) */
+ f->r_data = (char*)p; p += f->r_count;
+ break;
+
+ case Rwrite:
+ N2HSHORT(f->r_fid);
+ N2HSHORT(f->r_count);
+ break;
+
+ case Rtunnel:
+ N2HSHORT(f->r_fid);
+ break;
+
+ case Rclunk:
+ N2HSHORT(f->r_fid);
+ break;
+
+ case Rremove:
+ N2HSHORT(f->r_fid);
+ break;
+
+ case Rstat:
+ N2HSHORT(f->r_fid);
+ N2HSTRING(f->r_stat, sizeof(f->r_stat));
+ break;
+
+ case Rwstat:
+ N2HSHORT(f->r_fid);
+ break;
+ }
+ if((u_char*)ap+n == p)
+ return n;
+ return 0;
+ }
+
+ void u9p_print(u_char * m, int len, struct u9fsreq * f)
+ {
+ struct u9fsreq u9fsreq;
+
+ if( f == 0 )
+ f = & u9fsreq;
+
+ if( len < 3 ) {
+ printf("truncated-9p %d", len);
+ return;
+ }
+
+ if( u9p_m2s((char *)m, len, f) == 0 )
+ return;
+
+ printf("%s tag %d ", u9p_types[f->r_type-Tnop], f->r_tag);
+
+ switch( f->r_type ) {
+ default:
+ return;
+
+ case Tnop:
+ case Tosession:
+ case Toattach:
+ case Tauth:
+ break;
+
+ case Tsession:
+ case Rsession:
+ printf("chal 0x%x 0x%x", *(u_int *)&f->r_chal[0], *(u_int *)&f->r_chal[4]);
+ break;
+
+ case Tflush:
+ printf("oldtag %d", f->r_oldtag);
+ break;
+
+ case Tclone:
+ printf("fid %d newfid %d", f->r_fid, f->r_newfid);
+ break;
+
+ case Twalk:
+ printf("fid %d name %s", f->r_fid, f->r_name);
+ break;
+
+ case Topen:
+ printf("fid %d %c", f->r_fid, f->r_mode);
+ break;
+
+ case Tcreate:
+ printf("fid %d name %s perm 0x%x mode %c", f->r_fid,
+ f->r_name, f->r_perm, f->r_mode);
+ break;
+
+ case Tread:
+ case Twrite:
+ printf("fid %d offset 0x%llx count %d", f->r_fid,
+ f->r_offset, f->r_count);
+ break;
+
+ case Tattach:
+ case Ttunnel:
+ case Tclunk:
+ case Tremove:
+ case Tstat:
+ case Twstat:
+ case Rclone:
+ case Rtunnel:
+ case Rclunk:
+ case Rremove:
+ case Rstat:
+ case Rwstat:
+ printf("fid %d", f->r_fid);
+ break;
+
+ case Tclwalk:
+ printf("fid %d ", f->r_fid);
+ printf("newfid %d ", f->r_newfid);
+ printf("name %s", f->r_name);
+ break;
+ /*
+ */
+ case Rnop:
+ case Rosession:
+ case Rflush:
+ case Roattach:
+ case Rauth:
+ break;
+
+ case Rerror:
+ printf("ename %s", f->r_ename);
+ break;
+
+ case Rattach:
+ case Rwalk:
+ case Rclwalk:
+ case Ropen:
+ case Rcreate:
+ printf("fid %d ", f->r_fid);
+ printf("qid 0x%x 0x%x", f->r_qid.path, f->r_qid.vers);
+ break;
+
+ case Rread:
+ printf("fid %d count %d ", f->r_fid, f->r_count);
+ break;
+
+ case Rwrite:
+ printf("fid %d count %d", f->r_fid, f->r_count);
+ break;
+ }
+ }
+
+ int
+ u9p_s2m(struct u9fsreq *f, char *ap, int copydata)
+ {
+ u_char *p;
+
+ p = (u_char*)ap;
+ H2NCHAR(f->r_type);
+ H2NSHORT(f->r_tag);
+ switch(f->r_type)
+ {
+ default:
+ return 0;
+
+ case Tosession:
+ case Tnop:
+ break;
+
+ case Tsession:
+ H2NSTRING(f->r_chal, sizeof(f->r_chal));
+ break;
+
+ case Tflush:
+ H2NSHORT(f->r_oldtag);
+ break;
+
+ case Tattach:
+ H2NSHORT(f->r_fid);
+ H2NSTRING(f->r_uname, sizeof(f->r_uname));
+ H2NSTRING(f->r_aname, sizeof(f->r_aname));
+ H2NSTRING(f->r_ticket, sizeof(f->r_ticket));
+ H2NSTRING(f->r_auth, sizeof(f->r_auth));
+ break;
+
+ case Toattach:
+ H2NSHORT(f->r_fid);
+ H2NSTRING(f->r_uname, sizeof(f->r_uname));
+ H2NSTRING(f->r_aname, sizeof(f->r_aname));
+ H2NSTRING(f->r_ticket, U9FS_NAMELEN);
+ break;
+
+ case Tauth:
+ H2NSHORT(f->r_fid);
+ H2NSTRING(f->r_uname, sizeof(f->r_uname));
+ H2NSTRING(f->r_ticket, 8+U9FS_NAMELEN);
+ break;
+
+ case Tclone:
+ H2NSHORT(f->r_fid);
+ H2NSHORT(f->r_newfid);
+ break;
+
+ case Twalk:
+ H2NSHORT(f->r_fid);
+ H2NSTRING(f->r_name, sizeof(f->r_name));
+ break;
+
+ case Topen:
+ H2NSHORT(f->r_fid);
+ H2NCHAR(f->r_mode);
+ break;
+
+ case Tcreate:
+ H2NSHORT(f->r_fid);
+ H2NSTRING(f->r_name, sizeof(f->r_name));
+ H2NLONG(f->r_perm);
+ H2NCHAR(f->r_mode);
+ break;
+
+ case Tread:
+ H2NSHORT(f->r_fid);
+ H2NQUAD(f->r_offset);
+ H2NSHORT(f->r_count);
+ break;
+
+ case Twrite:
+ H2NSHORT(f->r_fid);
+ H2NQUAD(f->r_offset);
+ H2NSHORT(f->r_count);
+ p++; /* pad(1) */
+ if( copydata ) {
+ H2NSTRING(f->r_data, f->r_count);
+ }
+ break;
+
+ case Ttunnel:
+ H2NSHORT(f->r_fid);
+ break;
+
+ case Tclunk:
+ H2NSHORT(f->r_fid);
+ break;
+
+ case Tremove:
+ H2NSHORT(f->r_fid);
+ break;
+
+ case Tstat:
+ H2NSHORT(f->r_fid);
+ break;
+
+ case Twstat:
+ H2NSHORT(f->r_fid);
+ H2NSTRING(f->r_stat, sizeof(f->r_stat));
+ break;
+
+ case Tclwalk:
+ H2NSHORT(f->r_fid);
+ H2NSHORT(f->r_newfid);
+ H2NSTRING(f->r_name, sizeof(f->r_name));
+ break;
+ /*
+ */
+ case Rosession:
+ case Rnop:
+ break;
+
+ case Rsession:
+ H2NSTRING(f->r_chal, sizeof(f->r_chal));
+ H2NSTRING(f->r_authid, sizeof(f->r_authid));
+ H2NSTRING(f->r_authdom, sizeof(f->r_authdom));
+ break;
+
+ case Rerror:
+ H2NSTRING(f->r_ename, sizeof(f->r_ename));
+ break;
+
+ case Rflush:
+ break;
+
+ case Rattach:
+ H2NSHORT(f->r_fid);
+ H2NLONG(f->r_qid.path);
+ H2NLONG(f->r_qid.vers);
+ H2NSTRING(f->r_rauth, sizeof(f->r_rauth));
+ break;
+
+ case Roattach:
+ H2NSHORT(f->r_fid);
+ H2NLONG(f->r_qid.path);
+ H2NLONG(f->r_qid.vers);
+ break;
+
+ case Rauth:
+ H2NSHORT(f->r_fid);
+ H2NSTRING(f->r_ticket, 8+8+7+7);
+ break;
+
+ case Rclone:
+ H2NSHORT(f->r_fid);
+ break;
+
+ case Rwalk:
+ case Rclwalk:
+ H2NSHORT(f->r_fid);
+ H2NLONG(f->r_qid.path);
+ H2NLONG(f->r_qid.vers);
+ break;
+
+ case Ropen:
+ H2NSHORT(f->r_fid);
+ H2NLONG(f->r_qid.path);
+ H2NLONG(f->r_qid.vers);
+ break;
+
+ case Rcreate:
+ H2NSHORT(f->r_fid);
+ H2NLONG(f->r_qid.path);
+ H2NLONG(f->r_qid.vers);
+ break;
+
+ case Rread:
+ H2NSHORT(f->r_fid);
+ H2NSHORT(f->r_count);
+ p++; /* pad(1) */
+ if( copydata ) {
+ H2NSTRING(f->r_data, f->r_count);
+ }
+ break;
+
+ case Rwrite:
+ H2NSHORT(f->r_fid);
+ H2NSHORT(f->r_count);
+ break;
+
+ case Rtunnel:
+ H2NSHORT(f->r_fid);
+ break;
+
+ case Rclunk:
+ H2NSHORT(f->r_fid);
+ break;
+
+ case Rremove:
+ H2NSHORT(f->r_fid);
+ break;
+
+ case Rstat:
+ H2NSHORT(f->r_fid);
+ if( copydata )
+ H2NSTRING(f->r_stat, sizeof(f->r_stat));
+ break;
+
+ case Rwstat:
+ H2NSHORT(f->r_fid);
+ break;
+ }
+ return p - (u_char*)ap;
+ }
+
+ int
+ u9p_m2d(char *ap, struct u9fsdir *f)
+ {
+ u_char *p;
+
+ p = (u_char*)ap;
+ N2HSTRING(f->dir_name, sizeof(f->dir_name));
+ N2HSTRING(f->dir_uid, sizeof(f->dir_uid));
+ N2HSTRING(f->dir_gid, sizeof(f->dir_gid));
+ N2HLONG(f->dir_qid.path);
+ N2HLONG(f->dir_qid.vers);
+ N2HLONG(f->dir_mode);
+ N2HLONG(f->dir_atime);
+ N2HLONG(f->dir_mtime);
+ N2HQUAD(f->dir_length);
+ N2HSHORT(f->dir_type);
+ N2HSHORT(f->dir_dev);
+ return p - (u_char*)ap;
+ }
+
+ int
+ u9p_d2m(struct u9fsdir *f, char *ap)
+ {
+ u_char *p;
+
+ p = (u_char*)ap;
+ H2NSTRING(f->dir_name, sizeof(f->dir_name));
+ H2NSTRING(f->dir_uid, sizeof(f->dir_uid));
+ H2NSTRING(f->dir_gid, sizeof(f->dir_gid));
+ H2NLONG(f->dir_qid.path);
+ H2NLONG(f->dir_qid.vers);
+ H2NLONG(f->dir_mode);
+ H2NLONG(f->dir_atime);
+ H2NLONG(f->dir_mtime);
+ H2NQUAD(f->dir_length);
+ H2NSHORT(f->dir_type);
+ H2NSHORT(f->dir_dev);
+ return p - (u_char*)ap;
+ }
+
+ /* parse 9P types */
+ int u9p_type(char * t)
+ {
+ int i;
+
+ for(i = 0; i < sizeof(u9p_types)/sizeof(u9p_types[0]); i++) {
+ if( strcmp(u9p_types[i], t) == 0 )
+ return (i+Tnop);
+ }
+ return 0;
+ }
+
+ /* m is freed if shorter than s */
+ #if 1
+ #define U9P_PULLUP(m,s) if( (*(m))->m_len < (s) && ((*(m)) = m_pullup((*(m)),(s))) == 0 ) return 1; p = mtod((*(m)), u_char *)
+ #else
+ #define U9P_PULLUP(m,s) if( (*(m))->m_len < (s) && ((*(m)) = m_pullup((*(m)),(s))) == 0 ) panic("PULLUP"); p = mtod((*(m)), u_char *)
+ #endif
+
+ #define U9P_ADJ(m,s) (*(m))->m_len -= (s); (*(m))->m_data += (s)
+
+ u_short u9p_m_tag(struct mbuf ** m)
+ {
+ char * p;
+ u_short t;
+
+ U9P_PULLUP(m,3);
+ p = mtod(*m, char *);
+ p++;
+ N2HSHORT(t);
+
+ return t;
+ }
+
+ int
+ u9p_m_m2s(struct mbuf **m, struct u9fsreq *f)
+ {
+ u_char *p;
+
+ U9P_PULLUP(m,3);
+ N2HCHAR(f->r_type);
+ N2HSHORT(f->r_tag);
+ U9P_ADJ(m, sizeof(f->r_type)+sizeof(f->r_tag));
+
+ switch(f->r_type) {
+ default:
+ goto drop;
+
+ case Tnop:
+ break;
+
+ case Tsession:
+ U9P_PULLUP(m,sizeof(f->r_chal));
+ N2HSTRING(f->r_chal, sizeof(f->r_chal));
+ U9P_ADJ(m, sizeof(f->r_chal));
+ break;
+
+ case Tflush:
+ U9P_PULLUP(m,sizeof(f->r_oldtag));
+ N2HSHORT(f->r_oldtag);
+ U9P_ADJ(m, f->r_oldtag);
+ break;
+
+ case Tattach:
+ U9P_PULLUP(m, sizeof(f->r_fid)+sizeof(f->r_uname)+sizeof(f->r_aname));
+ N2HSHORT(f->r_fid);
+ N2HSTRING(f->r_uname, sizeof(f->r_uname));
+ N2HSTRING(f->r_aname, sizeof(f->r_aname));
+ U9P_ADJ(m, sizeof(f->r_fid)+sizeof(f->r_uname)+sizeof(f->r_aname));
+
+ U9P_PULLUP(m, sizeof(f->r_ticket)+sizeof(f->r_auth));
+ N2HSTRING(f->r_ticket, sizeof(f->r_ticket));
+ N2HSTRING(f->r_auth, sizeof(f->r_auth));
+ U9P_ADJ(m, sizeof(f->r_ticket)+sizeof(f->r_auth));
+ break;
+
+ case Tclone:
+ U9P_PULLUP(m, sizeof(f->r_fid)+sizeof(f->r_newfid));
+ N2HSHORT(f->r_fid);
+ N2HSHORT(f->r_newfid);
+ U9P_ADJ(m, sizeof(f->r_fid)+sizeof(f->r_newfid));
+ break;
+
+ case Twalk:
+ U9P_PULLUP(m, sizeof(f->r_fid)+sizeof(f->r_name));
+ N2HSHORT(f->r_fid);
+ N2HSTRING(f->r_name, sizeof(f->r_name));
+ U9P_ADJ(m, sizeof(f->r_fid)+sizeof(f->r_name));
+ break;
+
+ case Topen:
+ U9P_PULLUP(m, sizeof(f->r_fid)+sizeof(f->r_mode));
+ N2HSHORT(f->r_fid);
+ N2HCHAR(f->r_mode);
+ U9P_ADJ(m, sizeof(f->r_fid)+sizeof(f->r_mode));
+ break;
+
+ case Tcreate:
+ U9P_PULLUP(m, sizeof(f->r_fid)+sizeof(f->r_name)
+ +sizeof(f->r_perm)+sizeof(f->r_mode));
+ N2HSHORT(f->r_fid);
+ N2HSTRING(f->r_name, sizeof(f->r_name));
+ N2HLONG(f->r_perm);
+ N2HCHAR(f->r_mode);
+ U9P_ADJ(m, sizeof(f->r_fid)+sizeof(f->r_name)
+ +sizeof(f->r_perm)+sizeof(f->r_mode));
+ break;
+
+ case Tread:
+ U9P_PULLUP(m, sizeof(f->r_fid)+sizeof(f->r_offset)+sizeof(f->r_count));
+ N2HSHORT(f->r_fid);
+ N2HQUAD(f->r_offset);
+ N2HSHORT(f->r_count);
+ U9P_ADJ(m, sizeof(f->r_fid)+sizeof(f->r_offset)+sizeof(f->r_count));
+ break;
+
+ case Twrite:
+ U9P_PULLUP(m, sizeof(f->r_fid)+sizeof(f->r_offset)+sizeof(f->r_count));
+ N2HSHORT(f->r_fid);
+ N2HQUAD(f->r_offset);
+ N2HSHORT(f->r_count);
+ p++; /* pad(1) */
+ f->r_data = (char*)p; p += f->r_count;
+ U9P_ADJ(m, sizeof(f->r_fid)+sizeof(f->r_offset)+sizeof(f->r_count)+1);
+ break;
+
+ case Tclunk:
+ case Tremove:
+ case Tstat:
+ U9P_PULLUP(m, sizeof(f->r_fid));
+ N2HSHORT(f->r_fid);
+ U9P_ADJ(m, sizeof(f->r_fid));
+ break;
+
+ case Twstat:
+ U9P_PULLUP(m, sizeof(f->r_fid));
+ N2HSHORT(f->r_fid);
+ m_copydata(*m, sizeof(f->r_fid), sizeof(f->r_stat), f->r_stat);
+ m_adj(*m, sizeof(f->r_fid)+sizeof(f->r_stat));
+ break;
+
+ case Tclwalk:
+ U9P_PULLUP(m, sizeof(f->r_fid)+sizeof(f->r_newfid)+sizeof(f->r_name));
+ N2HSHORT(f->r_fid);
+ N2HSHORT(f->r_newfid);
+ N2HSTRING(f->r_name, sizeof(f->r_name));
+ U9P_ADJ(m, sizeof(f->r_fid)+sizeof(f->r_newfid)+sizeof(f->r_name));
+ break;
+ /*
+ */
+ case Rnop:
+ break;
+
+ case Rsession:
+ U9P_PULLUP(m, sizeof(f->r_fid)+sizeof(f->r_authid)+sizeof(f->r_authdom));
+ N2HSTRING(f->r_chal, sizeof(f->r_chal));
+ N2HSTRING(f->r_authid, sizeof(f->r_authid));
+ N2HSTRING(f->r_authdom, sizeof(f->r_authdom));
+ U9P_ADJ(m, sizeof(f->r_fid)+sizeof(f->r_authid)+sizeof(f->r_authdom));
+ break;
+
+ case Rerror:
+ U9P_PULLUP(m, sizeof(f->r_ename));
+ N2HSTRING(f->r_ename, sizeof(f->r_ename));
+ U9P_ADJ(m, sizeof(f->r_ename));
+ break;
+
+ case Rflush:
+ break;
+
+ case Rattach:
+ U9P_PULLUP(m, sizeof(f->r_fid)+sizeof(f->r_qid.path)
+ +sizeof(f->r_qid.vers)+sizeof(f->r_rauth));
+ N2HSHORT(f->r_fid);
+ N2HLONG(f->r_qid.path);
+ N2HLONG(f->r_qid.vers);
+ N2HSTRING(f->r_rauth, sizeof(f->r_rauth));
+ U9P_ADJ(m, sizeof(f->r_fid)+sizeof(f->r_qid.path)
+ +sizeof(f->r_qid.vers)+sizeof(f->r_rauth));
+ break;
+
+ case Rclone:
+ U9P_PULLUP(m, sizeof(f->r_fid));
+ N2HSHORT(f->r_fid);
+ U9P_ADJ(m, sizeof(f->r_fid));
+ break;
+
+ case Rwalk:
+ case Rclwalk:
+ case Ropen:
+ case Rcreate:
+ U9P_PULLUP(m, sizeof(f->r_fid)+sizeof(f->r_qid.path)
+ +sizeof(f->r_qid.vers));
+ N2HSHORT(f->r_fid);
+ N2HLONG(f->r_qid.path);
+ N2HLONG(f->r_qid.vers);
+ U9P_ADJ(m, sizeof(f->r_fid)+sizeof(f->r_qid.path)
+ +sizeof(f->r_qid.vers));
+ break;
+
+ case Rread:
+ U9P_PULLUP(m, sizeof(f->r_fid)+sizeof(f->r_count));
+ N2HSHORT(f->r_fid);
+ N2HSHORT(f->r_count);
+ p++; /* pad(1) */
+ f->r_data = (char*)p; p += f->r_count;
+ U9P_ADJ(m, sizeof(f->r_fid)+sizeof(f->r_count)+1);
+ break;
+
+ case Rwrite:
+ U9P_PULLUP(m, sizeof(f->r_fid)+sizeof(f->r_count));
+ N2HSHORT(f->r_fid);
+ N2HSHORT(f->r_count);
+ U9P_ADJ(m, sizeof(f->r_fid)+sizeof(f->r_count));
+ break;
+
+ case Rclunk:
+ case Rremove:
+ case Rwstat:
+ U9P_PULLUP(m, sizeof(f->r_fid));
+ N2HSHORT(f->r_fid);
+ U9P_ADJ(m, sizeof(f->r_fid));
+ break;
+
+ case Rstat:
+ U9P_PULLUP(m, sizeof(f->r_fid));
+ N2HSHORT(f->r_fid);
+ m_copydata(*m, sizeof(f->r_fid), sizeof(f->r_stat), f->r_stat);
+ m_adj(*m, sizeof(f->r_fid)+sizeof(f->r_stat));
+ break;
+
+ }
+ return 0;
+
+ drop:
+ m_freem(*m);
+ return 1;
+ }
+
+ struct mbuf *
+ u9p_m_s2m (struct u9fsreq *f)
+ {
+ register struct mbuf * m;
+ struct mbuf * m0;
+ char * ap;
+ int sz;
+
+ /* we want one contiguous piece */
+ if( f->r_type == Tattach || f->r_type == Rstat || f->r_type == Twstat )
+ sz = 146; /* sizeof a Tattach */
+ else
+ sz = 87; /* sizeof a Tsession */
+
+ MGETHDR(m, M_WAIT, MT_DATA);
+ if( sz > MHLEN )
+ MCLGET(m, M_WAIT);
+ m->m_len = 0;
+
+ if ( M_TRAILINGSPACE(m) < sz )
+ panic("u9p_m_s2m");
+
+ ap = mtod(m, char *);
+ m->m_len = u9p_s2m(f, ap, 0);
+ m->m_pkthdr.len = m->m_len;
+
+ /* append data mbufs */
+ switch ( f->r_type ) {
+ default:
+ break;
+ case Twrite:
+ case Rread:
+ m0 = (struct mbuf *)f->r_data;
+ m->m_next = m0;
+ m->m_pkthdr.len += f->r_count;
+ break;
+ }
+
+ return m;
+ }
+
+ int
+ u9p_m_m2d (struct mbuf **m, struct u9fsdir *f)
+ {
+ u_char *p;
+
+ U9P_PULLUP(m, sizeof(f->dir_name)+sizeof(f->dir_uid)+sizeof(f->dir_gid));
+ N2HSTRING(f->dir_name, sizeof(f->dir_name));
+ N2HSTRING(f->dir_uid, sizeof(f->dir_uid));
+ N2HSTRING(f->dir_gid, sizeof(f->dir_gid));
+ U9P_ADJ(m, sizeof(f->dir_name)+sizeof(f->dir_uid)+sizeof(f->dir_gid));
+
+ U9P_PULLUP(m, sizeof(f->dir_qid)+sizeof(f->dir_mode)
+ +sizeof(f->dir_atime)+sizeof(f->dir_mtime)
+ +sizeof(f->dir_length)+sizeof(f->dir_type)+sizeof(f->dir_dev));
+ N2HLONG(f->dir_qid.path);
+ N2HLONG(f->dir_qid.vers);
+ N2HLONG(f->dir_mode);
+ N2HLONG(f->dir_atime);
+ N2HLONG(f->dir_mtime);
+ N2HQUAD(f->dir_length);
+ N2HSHORT(f->dir_type);
+ N2HSHORT(f->dir_dev);
+ U9P_ADJ(m, sizeof(f->dir_qid)+sizeof(f->dir_mode)
+ +sizeof(f->dir_atime)+sizeof(f->dir_mtime)
+ +sizeof(f->dir_length)+sizeof(f->dir_type)+sizeof(f->dir_dev));
+
+ return 0;
+ }
+
+ struct mbuf * u9p_m_d2m (struct u9fsdir *f)
+ {
+ char * ap;
+ struct mbuf * m;
+ MGET(m, M_WAIT, MT_DATA);
+ MCLGET(m, M_WAIT);
+ m->m_len = 0;
+
+ if ( M_TRAILINGSPACE(m) < sizeof(struct u9fsdir) )
+ panic("u9p_m_d2m");
+
+ ap = mtod(m, char *);
+ m->m_len = u9p_d2m(f, ap);
+
+ return m;
+ }
diff -N -c -r /usr/src/sys/9fs/9p.h ./9fs/9p.h
*** /usr/src/sys/9fs/9p.h Wed Dec 31 19:00:00 1969
--- ./9fs/9p.h Thu Nov 25 15:45:46 1999
***************
*** 0 ****
--- 1,183 ----
+ #ifndef _9FS_9P_H_
+ #define _9FS_9P_H_
+
+
+ #define U9FS_AUTHLEN 13
+ #define U9FS_NAMELEN 28
+ #define U9FS_TICKETLEN 72
+ #define U9FS_ERRLEN 64
+ #define U9FS_DOMLEN 48
+ #define U9FS_CHALLEN 8
+ #define U9FS_DIRLEN 116
+ #define U9FS_MAXFDATA 8192
+ #define U9FS_MAXDDATA (((int)U9FS_MAXFDATA/U9FS_DIRLEN)*U9FS_DIRLEN)
+
+ #define U9P_MODE_RD 0x0
+ #define U9P_MODE_WR 0x1
+ #define U9P_MODE_RDWR 0x2
+ #define U9P_MODE_EX 0x3
+ #define U9P_MODE_TRUNC 0x10
+ #define U9P_MODE_CLOSE 0x40
+
+ #define U9P_PERM_CHDIR(m) (0x80000000&(m))
+ #define U9P_PERM_OWNER(m) ((m)&0x7)
+ #define U9P_PERM_GROUP(m) (((m)>>3)&0x7)
+ #define U9P_PERM_OTHER(m) (((m)>>6)&0x7)
+ #define U9P_PERM_ALL(m) ((m)&0777)
+ #define U9P_PERM_EXCL(m) ((m)&0x20000000)
+ #define U9P_PERM_APPEND(m) ((m)&0x40000000)
+ #define U9P_PERM_NONPERM(m) ((m)&0xfffffe00)
+
+ /* this is too small */
+ typedef u_int32_t u9fsfh_t;
+
+ struct u9fs_qid {
+ u9fsfh_t path;
+ u_int32_t vers;
+ };
+
+ struct u9fsreq {
+ TAILQ_ENTRY(u9fsreq) r_chain;
+ struct u9fsreq * r_rep;
+ struct mbuf * r_mrep;
+ struct proc *r_procp; /* Proc that did I/O system call */
+ struct u9fsmount *r_nmp;
+
+ /* actual content of the 9P message */
+ char r_type;
+ short r_fid;
+ u_short r_tag;
+ union {
+ struct {
+ u_short oldtag; /* Tflush */
+ struct u9fs_qid qid; /* Rattach, Rwalk, Ropen, Rcreate */
+ char rauth[U9FS_AUTHLEN]; /* Rattach */
+ } u1;
+ struct {
+ char uname[U9FS_NAMELEN]; /* Tattach */
+ char aname[U9FS_NAMELEN]; /* Tattach */
+ char ticket[U9FS_TICKETLEN]; /* Tattach */
+ char auth[U9FS_AUTHLEN]; /* Tattach */
+ } u2;
+ struct {
+ char ename[U9FS_ERRLEN]; /* Rerror */
+ char authid[U9FS_NAMELEN]; /* Rsession */
+ char authdom[U9FS_DOMLEN]; /* Rsession */
+ char chal[U9FS_CHALLEN]; /* Tsession/Rsession */
+ } u3;
+ struct {
+ u_int32_t perm; /* Tcreate */
+ short newfid; /* Tclone, Tclwalk */
+ char name[U9FS_NAMELEN]; /* Twalk, Tclwalk, Tcreate */
+ char mode; /* Tcreate, Topen */
+ } u4;
+ struct {
+ u_int64_t offset; /* Tread, Twrite */
+ u_short count; /* Tread, Twrite, Rread */
+ char *data; /* Twrite, Rread */
+ } u5;
+ char stat[U9FS_DIRLEN]; /* Twstat, Rstat */
+ } u;
+ };
+
+ #define r_oldtag u.u1.oldtag
+ #define r_qid u.u1.qid
+ #define r_rauth u.u1.rauth
+ #define r_uname u.u2.uname
+ #define r_aname u.u2.aname
+ #define r_ticket u.u2.ticket
+ #define r_auth u.u2.auth
+ #define r_ename u.u3.ename
+ #define r_authid u.u3.authid
+ #define r_authdom u.u3.authdom
+ #define r_chal u.u3.chal
+ #define r_perm u.u4.perm
+ #define r_newfid u.u4.newfid
+ #define r_name u.u4.name
+ #define r_mode u.u4.mode
+ #define r_offset u.u5.offset
+ #define r_count u.u5.count
+ #define r_data u.u5.data
+ #define r_stat u.stat
+
+ struct u9fsdir {
+ char dir_name[U9FS_NAMELEN];
+ char dir_uid[U9FS_NAMELEN];
+ char dir_gid[U9FS_NAMELEN];
+ struct u9fs_qid dir_qid;
+ u_int32_t dir_mode;
+ u_int32_t dir_atime;
+ u_int32_t dir_mtime;
+ union {
+ u_int64_t length;
+ struct { /* little endian */
+ u_int32_t llength;
+ u_int32_t hlength;
+ } l;
+ } u;
+ u_short dir_type;
+ u_short dir_dev;
+ };
+
+ #define dir_length u.length
+ #define dir_llength u.l.llength
+ #define dir_hlength u.l.hlength
+
+ enum
+ {
+ Tnop = 50,
+ Rnop,
+ Tosession = 52, /* illegal */
+ Rosession, /* illegal */
+ Terror = 54, /* illegal */
+ Rerror,
+ Tflush = 56,
+ Rflush,
+ Toattach = 58, /* illegal */
+ Roattach, /* illegal */
+ Tclone = 60,
+ Rclone,
+ Twalk = 62,
+ Rwalk,
+ Topen = 64,
+ Ropen,
+ Tcreate = 66,
+ Rcreate,
+ Tread = 68,
+ Rread,
+ Twrite = 70,
+ Rwrite,
+ Tclunk = 72,
+ Rclunk,
+ Tremove = 74,
+ Rremove,
+ Tstat = 76,
+ Rstat,
+ Twstat = 78,
+ Rwstat,
+ Tclwalk = 80,
+ Rclwalk,
+ Tauth = 82, /* illegal */
+ Rauth, /* illegal */
+ Tsession = 84,
+ Rsession,
+ Tattach = 86,
+ Rattach,
+ Ttunnel = 88,
+ Rtunnel,
+ Tmax
+ };
+
+ int u9p_m2s __P((char *ap, int n, struct u9fsreq *f));
+ int u9p_s2m __P((struct u9fsreq *f, char *ap, int copydata));
+ int u9p_m2d __P((char *ap, struct u9fsdir *f));
+ int u9p_d2m __P((struct u9fsdir *f, char *ap));
+ int u9p_type __P((char * t));
+
+ int u9p_m_m2s __P((struct mbuf **m, struct u9fsreq *f));
+ struct mbuf * u9p_m_s2m __P((struct u9fsreq *f));
+ int u9p_m_m2d __P((struct mbuf **m, struct u9fsdir *f));
+ struct mbuf * u9p_m_d2m __P((struct u9fsdir *f));
+ u_short u9p_m_tag __P((struct mbuf **m));
+
+ #endif
diff -N -c -r /usr/src/sys/9fs/bitstring.h ./9fs/bitstring.h
*** /usr/src/sys/9fs/bitstring.h Wed Dec 31 19:00:00 1969
--- ./9fs/bitstring.h Thu Oct 21 12:34:50 1999
***************
*** 0 ****
--- 1,143 ----
+ /*
+ * Copyright (c) 1989, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * Paul Vixie.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ * must display the following acknowledgement:
+ * This product includes software developed by the University of
+ * California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)bitstring.h 8.1 (Berkeley) 7/19/93
+ */
+
+ #ifndef _BITSTRING_H_
+ #define _BITSTRING_H_
+
+ typedef unsigned char bitstr_t;
+
+ /* internal macros */
+ /* byte of the bitstring bit is in */
+ #define _bit_byte(bit) \
+ ((bit) >> 3)
+
+ /* mask for the bit within its byte */
+ #define _bit_mask(bit) \
+ (1 << ((bit)&0x7))
+
+ /* external macros */
+ /* bytes in a bitstring of nbits bits */
+ #define bitstr_size(nbits) \
+ ((((nbits) - 1) >> 3) + 1)
+
+ /* allocate a bitstring */
+ #define bit_alloc(space, nbits, type, flags) \
+ MALLOC((space), bitstr_t *, \
+ (u_int)bitstr_size(nbits)*sizeof(bitstr_t), (type), (flags))
+
+ /* allocate a bitstring on the stack */
+ #define bit_decl(name, nbits) \
+ (name)[bitstr_size(nbits)]
+
+ /* is bit N of bitstring name set? */
+ #define bit_test(name, bit) \
+ ((name)[_bit_byte(bit)] & _bit_mask(bit))
+
+ /* set bit N of bitstring name */
+ #define bit_set(name, bit) \
+ (name)[_bit_byte(bit)] |= _bit_mask(bit)
+
+ /* clear bit N of bitstring name */
+ #define bit_clear(name, bit) \
+ (name)[_bit_byte(bit)] &= ~_bit_mask(bit)
+
+ /* clear bits start ... stop in bitstring */
+ #define bit_nclear(name, start, stop) { \
+ register bitstr_t *_name = name; \
+ register int _start = start, _stop = stop; \
+ register int _startbyte = _bit_byte(_start); \
+ register int _stopbyte = _bit_byte(_stop); \
+ if (_startbyte == _stopbyte) { \
+ _name[_startbyte] &= ((0xff >> (8 - (_start&0x7))) | \
+ (0xff << ((_stop&0x7) + 1))); \
+ } else { \
+ _name[_startbyte] &= 0xff >> (8 - (_start&0x7)); \
+ while (++_startbyte < _stopbyte) \
+ _name[_startbyte] = 0; \
+ _name[_stopbyte] &= 0xff << ((_stop&0x7) + 1); \
+ } \
+ }
+
+ /* set bits start ... stop in bitstring */
+ #define bit_nset(name, start, stop) { \
+ register bitstr_t *_name = name; \
+ register int _start = start, _stop = stop; \
+ register int _startbyte = _bit_byte(_start); \
+ register int _stopbyte = _bit_byte(_stop); \
+ if (_startbyte == _stopbyte) { \
+ _name[_startbyte] |= ((0xff << (_start&0x7)) & \
+ (0xff >> (7 - (_stop&0x7)))); \
+ } else { \
+ _name[_startbyte] |= 0xff << ((_start)&0x7); \
+ while (++_startbyte < _stopbyte) \
+ _name[_startbyte] = 0xff; \
+ _name[_stopbyte] |= 0xff >> (7 - (_stop&0x7)); \
+ } \
+ }
+
+ /* find first bit clear in name */
+ #define bit_ffc(name, nbits, value) { \
+ register bitstr_t *_name = name; \
+ register int _byte, _nbits = nbits; \
+ register int _stopbyte = _bit_byte(_nbits), _value = -1; \
+ for (_byte = 0; _byte <= _stopbyte; ++_byte) \
+ if (_name[_byte] != 0xff) { \
+ _value = _byte << 3; \
+ for (_stopbyte = _name[_byte]; (_stopbyte&0x1); \
+ ++_value, _stopbyte >>= 1); \
+ break; \
+ } \
+ *(value) = _value; \
+ }
+
+ /* find first bit set in name */
+ #define bit_ffs(name, nbits, value) { \
+ register bitstr_t *_name = name; \
+ register int _byte, _nbits = nbits; \
+ register int _stopbyte = _bit_byte(_nbits), _value = -1; \
+ for (_byte = 0; _byte <= _stopbyte; ++_byte) \
+ if (_name[_byte]) { \
+ _value = _byte << 3; \
+ for (_stopbyte = _name[_byte]; !(_stopbyte&0x1); \
+ ++_value, _stopbyte >>= 1); \
+ break; \
+ } \
+ *(value) = _value; \
+ }
+
+ #endif /* !_BITSTRING_H_ */
diff -N -c -r /usr/src/sys/conf/files ./conf/files
*** /usr/src/sys/conf/files Fri Apr 30 15:32:40 1999
--- ./conf/files Thu Nov 25 15:34:34 1999
***************
*** 535,540 ****
--- 535,541 ----
netinet/tcp_timer.c optional inet
netinet/tcp_usrreq.c optional inet
netinet/udp_usrreq.c optional inet
+ netinet/il.c optional il
netipx/ipx.c optional ipx
netipx/ipx_cksum.c optional ipx
netipx/ipx_input.c optional ipx
***************
*** 571,576 ****
--- 572,586 ----
nfs/nfs_syscalls.c optional nfs
nfs/nfs_vfsops.c optional nfs
nfs/nfs_vnops.c optional nfs
+ 9fs/9fs_vfsops.c optional u9fs
+ 9fs/9fs_vnops.c optional u9fs
+ 9fs/9p.c optional u9fs
+ 9fs/9auth.c optional u9fs
+ 9fs/9crypt.c optional u9fs
+ 9fs/9fs_subr.c optional u9fs
+ 9fs/9fs_socket.c optional u9fs
+ 9fs/9fs_bio.c optional u9fs
+ 9fs/9fs_node.c optional u9fs
nfs/bootp_subr.c optional bootp
nfs/krpc_subr.c optional bootp
pccard/pccard.c optional card
diff -N -c -r /usr/src/sys/conf/options ./conf/options
*** /usr/src/sys/conf/options Tue May 11 01:35:28 1999
--- ./conf/options Mon Oct 11 19:59:14 1999
***************
*** 202,207 ****
--- 202,208 ----
BRIDGE opt_bdg.h
MROUTING opt_mrouting.h
INET opt_inet.h
+ IL opt_inet.h
IPDIVERT
DUMMYNET opt_ipdn.h
IPFIREWALL opt_ipfw.h
***************
*** 314,319 ****
--- 315,322 ----
NFS_MUIDHASHSIZ opt_nfs.h
NFS_NOSERVER opt_nfs.h
NFS_DEBUG opt_nfs.h
+
+ U9FS
# give bktr an opt_bktr.h file
OVERRIDE_CARD opt_bktr.h
diff -N -c -r /usr/src/sys/i386/conf/IL ./i386/conf/IL
*** /usr/src/sys/i386/conf/IL Wed Dec 31 19:00:00 1969
--- ./i386/conf/IL Sat Oct 23 14:01:36 1999
***************
*** 0 ****
--- 1,234 ----
+ #
+ # GENERIC -- Generic machine with WD/AHx/NCR/BTx family disks
+ #
+ # For more information read the handbook part System Administration ->
+ # Configuring the FreeBSD Kernel -> The Configuration File.
+ # The handbook is available in /usr/share/doc/handbook or online as
+ # latest version from the FreeBSD World Wide Web server
+ # <URL:http://www.FreeBSD.ORG/>
+ #
+ # An exhaustive list of options and more detailed explanations of the
+ # device lines is present in the ./LINT configuration file. If you are
+ # in doubt as to the purpose or necessity of a line, check first in LINT.
+ #
+ # $Id: GENERIC,v 1.143.2.2 1999/02/15 02:50:07 des Exp $
+
+ machine "i386"
+ cpu "I586_CPU"
+ cpu "I686_CPU"
+ ident GENERIC
+ maxusers 128
+
+ #options DDB
+ options IL # plan9's IL
+ options "U9FS" # plan9's 9fs client
+ options INET #InterNETworking
+ options FFS #Berkeley Fast Filesystem
+ options FFS_ROOT #FFS usable as root device [keep this!]
+ options MFS #Memory Filesystem
+ options MFS_ROOT #MFS usable as root device, "MFS" req'ed
+ options NFS #Network Filesystem
+ options NFS_ROOT #NFS usable as root device, "NFS" req'ed
+ options "CD9660" #ISO 9660 Filesystem
+ options "CD9660_ROOT" #CD-ROM usable as root. "CD9660" req'ed
+ options PROCFS #Process filesystem
+ options FDESC #File descriptor filesystem
+ options "COMPAT_43" #Compatible with BSD 4.3 [KEEP THIS!]
+ options SCSI_DELAY=15000 #Be pessimistic about Joe SCSI device
+ options UCONSOLE #Allow users to grab the console
+ options FAILSAFE #Be conservative
+ options USERCONFIG #boot -c editor
+ options VISUAL_USERCONFIG #visual boot -c editor
+ options NMBCLUSTERS=4096
+ options MAXFILES=10000
+
+ config kernel root on wd0
+
+ # To make an SMP kernel, the next two are needed
+ #options SMP # Symmetric MultiProcessor Kernel
+ #options APIC_IO # Symmetric (APIC) I/O
+ # Optionally these may need tweaked, (defaults shown):
+ #options NCPU=2 # number of CPUs
+ #options NBUS=4 # number of busses
+ #options NAPIC=1 # number of IO APICs
+ #options NINTR=24 # number of INTs
+
+ controller isa0
+ controller eisa0
+ controller pci0
+
+ controller fdc0 at isa? port "IO_FD1" bio irq 6 drq 2
+ disk fd0 at fdc0 drive 0
+ disk fd1 at fdc0 drive 1
+
+ options "CMD640" # work around CMD640 chip deficiency
+ controller wdc0 at isa? port "IO_WD1" bio irq 14 flags 0xa0ff vector wdintr
+ disk wd0 at wdc0 drive 0
+ disk wd1 at wdc0 drive 1
+
+ controller wdc1 at isa? port "IO_WD2" bio irq 15 flags 0xa0ff vector wdintr
+ disk wd2 at wdc1 drive 0
+ disk wd3 at wdc1 drive 1
+
+ options ATAPI #Enable ATAPI support for IDE bus
+ options ATAPI_STATIC #Don't do it as an LKM
+ #device acd0 #IDE CD-ROM
+ #device wfd0 #IDE Floppy (e.g. LS-120)
+
+ # A single entry for any of these controllers (ncr, ahb, ahc) is
+ # sufficient for any number of installed devices.
+ #controller ncr0
+ #controller ahb0
+ #controller ahc0
+ #controller isp0
+
+ # This controller offers a number of configuration options, too many to
+ # document here - see the LINT file in this directory and look up the
+ # dpt0 entry there for much fuller documentation on this.
+ controller dpt0
+
+ #controller adv0 at isa? port ? cam irq ?
+ #controller adw0
+ #controller bt0 at isa? port ? cam irq ?
+ #controller aha0 at isa? port ? cam irq ?
+ #controller aic0 at isa? port 0x340 bio irq 11
+
+ controller scbus0
+
+ device da0
+
+ device sa0
+
+ device pass0
+
+ device cd0 #Only need one of these, the code dynamically grows
+
+ #device wt0 at isa? port 0x300 bio irq 5 drq 1
+ #device mcd0 at isa? port 0x300 bio irq 10
+
+ #controller matcd0 at isa? port 0x230 bio
+
+ #device scd0 at isa? port 0x230 bio
+
+ # atkbdc0 controlls both the keyboard and the PS/2 mouse
+ controller atkbdc0 at isa? port IO_KBD tty
+ device atkbd0 at isa? tty irq 1
+ device psm0 at isa? tty irq 12
+
+ device vga0 at isa? port ? conflicts
+
+ # splash screen/screen saver
+ pseudo-device splash
+
+ # syscons is the default console driver, resembling an SCO console
+ device sc0 at isa? tty
+ # Enable this and PCVT_FREEBSD for pcvt vt220 compatible console driver
+ #device vt0 at isa? tty
+ #options XSERVER # support for X server
+ #options FAT_CURSOR # start with block cursor
+ # If you have a ThinkPAD, uncomment this along with the rest of the PCVT lines
+ #options PCVT_SCANSET=2 # IBM keyboards are non-std
+
+ device npx0 at isa? port IO_NPX irq 13
+
+ #
+ # Laptop support (see LINT for more options)
+ #
+ device apm0 at isa? disable flags 0x31 # Advanced Power Management
+
+ # PCCARD (PCMCIA) support
+ #controller card0
+ #device pcic0 at card?
+ #device pcic1 at card?
+
+ device sio0 at isa? port "IO_COM1" flags 0x10 tty irq 4
+ device sio1 at isa? port "IO_COM2" tty irq 3
+ device sio2 at isa? disable port "IO_COM3" tty irq 5
+ device sio3 at isa? disable port "IO_COM4" tty irq 9
+
+ # Parallel port
+ device ppc0 at isa? port? net irq 7
+ controller ppbus0
+ device nlpt0 at ppbus?
+ device plip0 at ppbus?
+ device ppi0 at ppbus?
+ #controller vpo0 at ppbus?
+
+ #
+ # The following Ethernet NICs are all PCI devices.
+ #
+ device ax0 # ASIX AX88140A
+ device de0 # DEC/Intel DC21x4x (``Tulip'')
+ device fxp0 # Intel EtherExpress PRO/100B (82557, 82558)
+ device mx0 # Macronix 98713/98715/98725 (``PMAC'')
+ device pn0 # Lite-On 82c168/82c169 (``PNIC'')
+ device rl0 # RealTek 8129/8139
+ device tl0 # Texas Instruments ThunderLAN
+ device tx0 # SMC 9432TX (83c170 ``EPIC'')
+ device vr0 # VIA Rhine, Rhine II
+ device vx0 # 3Com 3c590, 3c595 (``Vortex'')
+ device wb0 # Winbond W89C840F
+ device xl0 # 3Com 3c90x (``Boomerang'', ``Cyclone'')
+
+ # Order is important here due to intrusive probes, do *not* alphabetize
+ # this list of network interfaces until the probes have been fixed.
+ # Right now it appears that the ie0 must be probed before ep0. See
+ # revision 1.20 of this file.
+
+ #device ed0 at isa? port 0x280 net irq 10 iomem 0xd8000
+ #device ie0 at isa? port 0x300 net irq 10 iomem 0xd0000
+ #device ep0 at isa? port 0x300 net irq 10
+ #device ex0 at isa? port? net irq?
+ #device fe0 at isa? port 0x300 net irq ?
+ #device le0 at isa? port 0x300 net irq 5 iomem 0xd0000
+ #device lnc0 at isa? port 0x280 net irq 10 drq 0
+ #device ze0 at isa? port 0x300 net irq 10 iomem 0xd8000
+ #device zp0 at isa? port 0x300 net irq 10 iomem 0xd8000
+ #device cs0 at isa? port 0x300 net irq ?
+
+ pseudo-device loop
+ pseudo-device ether
+ pseudo-device sl 1
+ pseudo-device ppp 1
+ pseudo-device tun 1
+ pseudo-device pty 32
+ pseudo-device gzip # Exec gzipped a.out's
+
+ # KTRACE enables the system-call tracing facility ktrace(2).
+ # This adds 4 KB bloat to your kernel, and slightly increases
+ # the costs of each syscall.
+ options KTRACE #kernel tracing
+
+ # This provides support for System V shared memory and message queues.
+ #
+ options SYSVSHM
+ options SYSVMSG
+
+ # The `bpfilter' pseudo-device enables the Berkeley Packet Filter. Be
+ # aware of the legal and administrative consequences of enabling this
+ # option. The number of devices determines the maximum number of
+ # simultaneous BPF clients programs runnable.
+ pseudo-device bpfilter 4 #Berkeley packet filter
+
+
+ # USB support
+ #controller uhci0
+ #controller ohci0
+ #controller usb0
+ #
+ # for the moment we have to specify the priorities of the device
+ # drivers explicitly by the ordering in the list below. This will
+ # be changed in the future.
+ #
+ #device ums0
+ #device ukbd0
+ #device ulpt0
+ #device uhub0
+ #device ucom0
+ #device umodem0
+ #device hid0
+ #device ugen0
+
+ #
+ #options USB_DEBUG
+ #options USBVERBOSE
diff -N -c -r /usr/src/sys/netinet/il.c ./netinet/il.c
*** /usr/src/sys/netinet/il.c Wed Dec 31 19:00:00 1969
--- ./netinet/il.c Tue Nov 23 19:16:13 1999
***************
*** 0 ****
--- 1,1147 ----
+ #include <unistd.h>
+ #include <ctype.h>
+ #include <sys/types.h>
+ #include <sys/param.h>
+ #include <sys/time.h>
+ #include <sys/systm.h>
+ #include <vm/vm_zone.h>
+
+ #include <sys/malloc.h>
+ #include <machine/param.h>
+ #include <sys/mbuf.h>
+ #include <sys/protosw.h>
+ #include <sys/socket.h>
+ #include <sys/socketvar.h>
+ #include <sys/proc.h>
+ #include <net/if.h>
+ #include <net/route.h>
+ #include <netinet/in_systm.h>
+ #include <netinet/in.h>
+ #include <netinet/in_var.h>
+ #include <netinet/if_ether.h>
+ #include <netinet/ip.h>
+ #include <netinet/ip_var.h>
+ #include <netinet/in_pcb.h>
+ #include <errno.h>
+
+ #include <netinet/il.h>
+ #include <netinet/il_var.h>
+
+ struct ilpcb * il_drop(struct ilpcb *ilpcb, int errno0);
+ static struct ilpcb * il_close(struct ilpcb *ilpcb);
+
+ /* kernel protocol states needed */
+ static struct inpcbhead ilb;
+ static struct inpcbinfo ilbinfo;
+
+ u_long il_sendspace = 1024*64;
+ u_long il_recvspace = 1024*64;
+
+ /*
+ * Target size of IL PCB hash tables. Must be a power of two.
+ *
+ * Note that this can be overridden by the kernel environment
+ * variable net.inet.tcp.tcbhashsize
+ */
+ #ifndef ILBHASHSIZE
+ #define ILBHASHSIZE 512
+ #endif
+
+ enum /* Connection state */
+ {
+ ILS_CLOSED,
+ ILS_SYNCER,
+ ILS_SYNCEE,
+ ILS_ESTABLISHED,
+ ILS_LISTENING,
+ ILS_CLOSING,
+ ILS_OPENING, /* only for file server */
+ };
+
+ char *ilstates[] =
+ {
+ "Closed",
+ "Syncer",
+ "Syncee",
+ "Established",
+ "Listening",
+ "Closing",
+ "Opening", /* only for file server */
+ };
+
+ enum /* Packet types */
+ {
+ ILT_SYNC,
+ ILT_DATA,
+ ILT_DATAQUERY,
+ ILT_ACK,
+ ILT_QUERY,
+ ILT_STATE,
+ ILT_CLOSE
+ };
+
+ char *iltype[] =
+ {
+ "sync",
+ "data",
+ "dataquery",
+ "ack",
+ "query",
+ "state",
+ "close",
+ };
+
+ /*
+ * This is the actual shape of what we allocate using the zone
+ * allocator. Doing it this way allows us to protect both structures
+ * using the same generation count, and also eliminates the overhead
+ * of allocating tcpcbs separately. By hiding the structure here,
+ * we avoid changing most of the rest of the code (although it needs
+ * to be changed, eventually, for greater efficiency).
+ */
+ #define ALIGNMENT 32
+ #define ALIGNM1 (ALIGNMENT - 1)
+ struct inp_ilpcb {
+ union {
+ struct inpcb inp;
+ char align[(sizeof(struct inpcb) + ALIGNM1) & ~ALIGNM1];
+ } inp_tp_u;
+ struct ilpcb ilpcb;
+ };
+ #undef ALIGNMENT
+ #undef ALIGNM1
+
+ static __inline struct mbuf * il_segq_top(struct ilpcb * ilpcb)
+ {
+ return (ilpcb->segq);
+ }
+
+ static __inline void il_segq_dequeue(struct ilpcb * ilpcb)
+ {
+ struct mbuf * m = ilpcb->segq;
+ ilpcb->segq = m->m_nextpkt;
+ m->m_nextpkt = 0;
+ }
+
+ static __inline void il_segq_insert(struct ilpcb * ilpcb, struct mbuf * m, u_long seq, struct ilhdr * il)
+ {
+ u_long pseq;
+ struct mbuf * mp, * mq;
+
+ m->m_pkthdr.header = il;
+
+ mp = 0;
+ mq = ilpcb->segq;
+ while ( mq ) {
+ il = mq->m_pkthdr.header;
+ pseq = ntohl(*(u_long *)il->ilid);
+ if( pseq > seq )
+ break;
+ if( pseq == seq ) { /* we already got this packet */
+ m_freem(m);
+ return;
+ }
+ mp = mq;
+ mq = mq->m_nextpkt;
+ }
+
+ if( mp == 0 ) {
+ m->m_nextpkt = ilpcb->segq;
+ ilpcb->segq = m;
+ return;
+ }
+ mp->m_nextpkt = m;
+ m->m_nextpkt = mq;
+ }
+
+ void il_init()
+ {
+ LIST_INIT(&ilb);
+ ilbinfo.listhead = &ilb;
+ ilbinfo.hashbase = hashinit(ILBHASHSIZE, M_PCB, &ilbinfo.hashmask);
+ ilbinfo.porthashbase = hashinit(ILBHASHSIZE, M_PCB,
+ &ilbinfo.porthashmask);
+ ilbinfo.ipi_zone = zinit("ilpcb", sizeof(struct inp_ilpcb), maxsockets,
+ ZONE_INTERRUPT, 0);
+ }
+
+ /* fill in il header and cksum, ip src/dst addresses */
+ static int il_output(struct ilpcb * ilpcb, struct mbuf *m, int type, u_long seq, u_char spec)
+ {
+ struct ilhdr * il;
+ struct ip * ip;
+ int illen;
+ struct inpcb * inp;
+ struct socket * so;
+
+ /* XXX: check total size is less than IP_MAXPACKET */
+
+ if( m == 0 ) {
+ inp = ilpcb->inpcb;
+ so = inp->inp_socket;
+ m = m_copypacket(so->so_snd.sb_mb, M_DONTWAIT);
+ }
+
+ /*
+ * Calculate data length and get a mbuf
+ * for IL and IP headers.
+ */
+ illen = m->m_pkthdr.len; /* size of il payload */
+ M_PREPEND(m, sizeof(struct ip) + sizeof(struct ilhdr), M_DONTWAIT);
+ if( m == 0 )
+ return ENOBUFS;
+
+ ip = mtod(m, struct ip *);
+ il = (struct ilhdr *) (ip+1);
+ bzero(ip, sizeof(*ip));
+
+ ip->ip_p = IPPROTO_IL;
+ ip->ip_src = ilpcb->inpcb->inp_laddr;
+ ip->ip_dst = ilpcb->inpcb->inp_faddr;
+ ip->ip_len = m->m_pkthdr.len;
+ ip->ip_ttl = ilpcb->inpcb->inp_ip_ttl; /* XXX */
+ ip->ip_tos = ilpcb->inpcb->inp_ip_tos; /* XXX */
+
+ *(u_short *)il->illen = htons(illen + sizeof(struct ilhdr));
+ il->iltype = type;
+ il->ilspec = spec;
+ *(u_short *)il->ilsrc = ilpcb->inpcb->inp_lport;
+ *(u_short *)il->ildst = ilpcb->inpcb->inp_fport;
+ if ( type != ILT_SYNC )
+ *(u_long *)il->ilid = htonl(seq);
+ else
+ *(u_long *)il->ilid = htonl(ilpcb->start);
+
+ if( type != ILT_ACK && type != ILT_STATE) {
+ if( ilpcb->rxt_timer == 0 )
+ ilpcb->rxt_timer = ilpcb->rxt_timer_cur;
+ if( ilpcb->death_timer == 0 )
+ ilpcb->death_timer = ilpcb->death_timer_cur;
+ }
+
+ *(u_long *)il->ilack = htonl(ilpcb->recvd);
+ il->ilsum[0] = il->ilsum[1] = 0;
+
+ /* IL checksum does not cover IP header */
+ m->m_data += sizeof(struct ip);
+ m->m_len -= sizeof(struct ip);
+ *(u_short *)il->ilsum = in_cksum(m, illen + sizeof(struct ilhdr));
+ m->m_data -= sizeof(struct ip);
+ m->m_len += sizeof(struct ip);
+
+ return ip_output(m, ilpcb->inpcb->inp_options, &ilpcb->inpcb->inp_route,
+ ilpcb->inpcb->inp_socket->so_options & SO_DONTROUTE ,0);
+ }
+
+ static int il_send_empty(struct ilpcb * ilpcb, int type, u_char spec)
+ {
+ struct mbuf * m0;
+
+ MGETHDR(m0, M_DONTWAIT, MT_DATA);
+ m0->m_len = 0;
+ m0->m_pkthdr.len = 0;
+ MH_ALIGN(m0, 0); /* leave space for the packet header */
+
+ return il_output(ilpcb, m0, type, ilpcb->next, spec);
+ }
+
+ static int il_respond(struct ilpcb * ilpcb, struct ip * ip, struct ilhdr *il, int type, u_char spec)
+ {
+ struct mbuf * m;
+ int illen;
+ struct ip * ip0;
+ struct ilhdr *il0;
+ struct route * ro;
+ struct route sro;
+
+ if( ilpcb ) {
+ ro = & ilpcb->inpcb->inp_route;
+ } else {
+ ro = &sro;
+ bzero(ro, sizeof *ro);
+ }
+
+ MGETHDR(m, M_DONTWAIT, MT_DATA);
+ m->m_len = 0;
+ m->m_pkthdr.len = 0;
+ MH_ALIGN(m, 0); /* leave space for the packet header */
+ illen = m->m_pkthdr.len; /* size of il payload */
+ M_PREPEND(m, sizeof(struct ip) + sizeof(struct ilhdr), M_DONTWAIT);
+ if( m == 0 )
+ return ENOBUFS;
+
+ ip0 = mtod(m, struct ip *);
+ il0 = (struct ilhdr *) (ip0+1);
+ bzero(ip0, sizeof(*ip0));
+
+ ip0->ip_p = IPPROTO_IL;
+ ip0->ip_src = ip->ip_dst;
+ ip0->ip_dst = ip->ip_src;
+ ip0->ip_ttl = ip_defttl;
+ ip0->ip_len = sizeof(struct ip) + sizeof(struct ilhdr);
+ *(u_short *)il0->illen = htons(illen + sizeof(struct ilhdr));
+ il0->iltype = type;
+ il0->ilspec = spec;
+ bcopy(il->ilsrc, il0->ildst, 2);
+ bcopy(il->ildst, il0->ilsrc, 2);
+ *(u_long *)il0->ilid = 0;
+ bcopy(il->ilid, il0->ilack, 4);
+ il0->ilsum[0] = il0->ilsum[1] = 0;
+
+ /* IL checksum does not cover IP header */
+ m->m_data += sizeof(struct ip);
+ m->m_len -= sizeof(struct ip);
+ *(u_short *)il0->ilsum = in_cksum(m, illen + sizeof(struct ilhdr));
+ m->m_data -= sizeof(struct ip);
+ m->m_len += sizeof(struct ip);
+
+ return ip_output(m, 0, ro, 0 ,0);
+ }
+
+ static struct ilpcb *
+ il_newconn(struct ilpcb * ilpcb, struct in_addr ti_dst, u_short ti_dport,
+ struct in_addr ti_src, u_short ti_sport)
+ {
+ register struct ilpcb * ilpcb0;
+ struct socket *so2, * so;
+ struct inpcb * inp;
+ struct sockaddr_in sin;
+
+ so = ilpcb->inpcb->inp_socket;
+ so2 = sonewconn(so, 0);
+ if (so2 == 0) {
+ so2 = sodropablereq(so);
+ if (so2) {
+ il_drop(sotoilpcb(so2), ETIMEDOUT);
+ so2 = sonewconn(so, 0);
+ }
+ if (!so2)
+ return 0;
+ }
+ so = so2;
+
+ inp = (struct inpcb *)so->so_pcb;
+ inp->inp_laddr = ti_dst;
+ inp->inp_lport = ti_dport;
+ if (in_pcbinshash(inp) != 0) {
+ /*
+ * Undo the assignments above if we failed to put
+ * the PCB on the hash lists.
+ */
+ inp->inp_laddr.s_addr = INADDR_ANY;
+ inp->inp_lport = 0;
+
+ soabort(so);
+ return 0;
+ }
+
+ bzero((char *)&sin, sizeof(sin));
+ sin.sin_family = AF_INET;
+ sin.sin_len = sizeof(sin);
+ sin.sin_addr = ti_src;
+ sin.sin_port = ti_sport;
+ if (in_pcbconnect(inp, (struct sockaddr *)&sin, &proc0)) {
+ inp->inp_laddr.s_addr = INADDR_ANY;
+ soabort(so);
+ return 0;
+ }
+
+ ilpcb0 = intoilpcb(inp);
+ ilpcb0->state = ILS_LISTENING;
+
+ return ilpcb0;
+ }
+
+ /* ack processing */
+ static void il_proc_ack(struct ilpcb * ilpcb, struct socket * so, u_long ack)
+ {
+ if( ack >= ilpcb->unacked ) {
+ ilpcb->rxt_timer = 0;
+ ilpcb->death_timer = 0;
+
+ /* the rxt timer is not prop. to RTT */
+ /* reset it so that the first rxt is always 1 second */
+ ilpcb->rxt_timer_cur = 2;
+
+ if( ack >= ilpcb->next )
+ ack = ilpcb->next - 1;
+ while (ilpcb->unacked <= ack ) {
+ sbdroprecord(&so->so_snd);
+ ilpcb->unacked++;
+ }
+ if( ilpcb->unacked != ilpcb->next ) {
+ ilpcb->rxt_timer = ilpcb->rxt_timer_cur;
+ ilpcb->death_timer = ilpcb->death_timer_cur; /* do we need this here? */
+ }
+ sowwakeup(so);
+ }
+ }
+
+ static int il_proc_data(struct ilpcb * ilpcb, struct socket * so, struct mbuf * m, u_long seq, int spec)
+ {
+ struct mbuf * m0;
+ struct ip * ip;
+ int hlen = sizeof(struct ip) + sizeof(struct ilhdr);
+ struct ilhdr * il;
+ int needack = 0;
+
+ ip = mtod(m, struct ip *);
+ il = (struct ilhdr *)(ip+1);
+ if( seq == ilpcb->recvd + 1 ) {
+ needack = 1;
+ while(1) {
+ ilpcb->recvd = seq;
+
+ m->m_len -= hlen;
+ m->m_pkthdr.len -= hlen;
+ m->m_data += hlen;
+ sbappendrecord(&so->so_rcv, m);
+
+ if( (m0 = il_segq_top(ilpcb)) == 0 )
+ break;
+ ip = mtod(m0, struct ip *);
+ il = (struct ilhdr *)(ip+1);
+ seq = ntohl(*(u_long *)il->ilid);
+ if( seq != ilpcb->recvd + 1 )
+ break;
+ il_segq_dequeue(ilpcb);
+ m = m0;
+ };
+ sorwakeup(so);
+ } else {
+ if( seq > ilpcb->recvd )
+ il_segq_insert(ilpcb, m, seq, il);
+ else
+ m_freem(m);
+ }
+
+ return needack;
+ }
+
+ /* assume we only have one connection */
+ void il_input(struct mbuf * m, int iphlen)
+ {
+ struct ilhdr * il;
+ struct ilpcb * ilpcb = 0;
+ int len, type;
+ u_long seq, ack;
+ struct ip * ip;
+ struct inpcb * inp;
+ u_short sport, dport;
+ struct socket * so;
+ u_char spec;
+
+ /*
+ * Strip IP options, if any; should skip this,
+ * make available to user, and use on returned packets,
+ * but we don't yet have a way to check the checksum
+ * with options still present.
+ */
+ if (iphlen > sizeof (struct ip)) {
+ ip_stripoptions(m, (struct mbuf *)0);
+ iphlen = sizeof(struct ip);
+ }
+
+ /*
+ * Get IP and IL header together in first mbuf.
+ */
+ ip = mtod(m, struct ip *);
+ if (m->m_len < iphlen + sizeof(struct ilhdr)) {
+ if ((m = m_pullup(m, iphlen + sizeof(struct ilhdr))) == 0) {
+ return;
+ }
+ ip = mtod(m, struct ip *);
+ }
+ il = (struct ilhdr *)((caddr_t)ip + iphlen);
+
+ len = ntohs(*(u_short *)il->illen);
+ seq = ntohl(*(u_long *)il->ilid);
+ ack = ntohl(*(u_long *)il->ilack);
+ sport = *(u_short *)il->ilsrc;
+ dport = *(u_short *)il->ildst;
+ type = il->iltype;
+ spec = il->ilspec;
+
+ inp = in_pcblookup_hash(&ilbinfo, ip->ip_src, sport, ip->ip_dst, dport, 1);
+ if ( inp == 0 && type == ILT_SYNC )
+ goto dropwithrest;
+ if( inp == 0 )
+ goto drop;
+
+ ilpcb = intoilpcb(inp);
+ if( ilpcb == 0 )
+ goto drop;
+
+ so = inp->inp_socket;
+ if( type == ILT_QUERY ) { /* XXX: can we use the same mbuf to send? */
+ il_send_empty(ilpcb, ILT_STATE, il->ilspec);
+ goto drop;
+ }
+
+ again:
+ /* FSM transition */
+ switch( ilpcb->state ) {
+ case ILS_SYNCER:
+ if( ack != ilpcb->start )
+ goto drop;
+ switch( type ) {
+ case ILT_SYNC:
+ ilpcb->unacked++;
+ ilpcb->recvd = seq;
+ il_send_empty(ilpcb, ILT_ACK, 0);
+ ilpcb->state = ILS_ESTABLISHED;
+ ilpcb->rxt_timer = 0;
+ ilpcb->death_timer = 0;
+ soisconnected(inp->inp_socket);
+ break;
+ case ILT_CLOSE:
+ il_drop(ilpcb, ECONNREFUSED);
+ break;
+ }
+ break;
+
+ case ILS_LISTENING:
+ if( type == ILT_SYNC && ack == 0 && so->so_options & SO_ACCEPTCONN ) {
+ ilpcb = il_newconn(ilpcb, ip->ip_dst, dport, ip->ip_src, sport);
+
+ ilpcb->next = ilpcb->start = random();
+ ilpcb->unacked = ilpcb->next;
+ ilpcb->rstart = ilpcb->recvd = seq;
+ ilpcb->state = ILS_SYNCEE;
+ il_send_empty(ilpcb, ILT_SYNC, 0);
+ ilpcb->next++;
+ } else
+ il_respond(ilpcb, ip, il, ILT_CLOSE, 0);
+ break;
+
+ case ILS_SYNCEE:
+ if( ack == ilpcb->start ) {
+ ilpcb->rxt_timer = 0;
+ ilpcb->unacked++;
+ ilpcb->state = ILS_ESTABLISHED;
+ soisconnected(so);
+ goto again;
+ break;
+ }
+ if( type == ILT_SYNC && seq == ilpcb->recvd && ack == 0 )
+ il_send_empty(ilpcb, ILT_SYNC, 0);
+ break;
+
+ case ILS_ESTABLISHED:
+ il_proc_ack(ilpcb, so, ack);
+ switch( type ) {
+ case ILT_DATA:
+ if( il_proc_data(ilpcb, so, m, seq, spec) )
+ ilpcb->flags |= ILF_NEEDACK;
+ goto done;
+ break;
+ case ILT_DATAQUERY:
+ il_proc_data(ilpcb, so, m, seq, spec);
+ il_send_empty(ilpcb, ILT_STATE, spec);
+ goto done;
+ break;
+ case ILT_CLOSE:
+ if( ack < ilpcb->next && ack >= ilpcb->start ) {
+ if( ilpcb->recvd+1 == seq )
+ ilpcb->recvd = seq;
+ il_send_empty(ilpcb, ILT_CLOSE, 0);
+ ilpcb->state = ILS_CLOSING;
+ }
+ break;
+ case ILT_STATE:
+ if( ack < ilpcb->rxt_max ) {
+ ilpcb->rxt_max = ilpcb->next;
+ il_output(ilpcb, 0, ILT_DATAQUERY, ilpcb->unacked, 1);
+ }
+ break;
+ case ILT_SYNC:
+ il_send_empty(ilpcb, ILT_ACK, 0);
+ break;
+ }
+ break;
+
+ case ILS_CLOSED:
+ goto drop;
+ break;
+
+ case ILS_CLOSING:
+ if( type == ILT_CLOSE ) {
+ if( ilpcb->recvd+1 == seq )
+ ilpcb->recvd = seq;
+ il_send_empty(ilpcb, ILT_CLOSE, 0);
+ ilpcb->state = ILS_CLOSED;
+ il_close(ilpcb);
+ }
+ break;
+ }
+
+ m_freem(m);
+ done:
+ return;
+
+ dropwithrest:
+ il_respond(ilpcb, ip, il, ILT_CLOSE, 0);
+ drop:
+ m_freem(m);
+ }
+
+ static void il_sendseqinit(struct ilpcb * ilpcb)
+ {
+ ilpcb->start = ilpcb->next = random();
+ ilpcb->unacked = ilpcb->next;
+ ilpcb->state = ILS_SYNCER;
+ ilpcb->next++;
+ }
+
+ static void il_rxt_timeout(struct ilpcb * ilpcb)
+ {
+ switch ( ilpcb->state ) {
+ case ILS_ESTABLISHED:
+ il_output(ilpcb, 0, ILT_DATAQUERY, ilpcb->unacked, 1);
+ ilpcb->rxtot++;
+ break;
+ case ILS_SYNCER:
+ case ILS_SYNCEE:
+ il_send_empty(ilpcb, ILT_SYNC, 0);
+ break;
+ case ILS_CLOSING:
+ il_send_empty(ilpcb, ILT_CLOSE, 0);
+ break;
+ }
+ ilpcb->rxt_timer = ilpcb->rxt_timer_cur;
+ }
+
+ void il_ctlinput(int cmd, struct sockaddr *sa, void *vip)
+ {}
+
+ int il_ctloutput(struct socket *so, struct sockopt *sopt)
+ { return 0; }
+
+ void il_drain()
+ {}
+
+ void il_slowtimo()
+ {
+ struct ilpcb * ilpcb;
+ struct inpcb * inp;
+ int s;
+
+ s = splnet();
+ for(inp = ilb.lh_first; inp; inp = inp->inp_list.le_next) {
+ ilpcb = intoilpcb(inp);
+ if(ilpcb->death_timer && --ilpcb->death_timer == 0 )
+ il_drop(ilpcb, ETIMEDOUT);
+
+ if(ilpcb->rxt_timer && --ilpcb->rxt_timer == 0 ) {
+ ilpcb->rxt_timer_cur <<= 1;
+ il_rxt_timeout(ilpcb);
+ }
+ }
+ splx(s);
+ }
+
+ void il_fasttimo()
+ {
+ struct ilpcb * ilpcb;
+ struct inpcb * inp;
+ int s;
+
+ s = splnet();
+ for(inp = ilb.lh_first; inp; inp = inp->inp_list.le_next) {
+ ilpcb = intoilpcb(inp);
+ if(ilpcb->flags & ILF_NEEDACK) {
+ ilpcb->flags &= ~ILF_NEEDACK;
+ il_send_empty(ilpcb, ILT_ACK, 0);
+ }
+ }
+ splx(s);
+ }
+
+ static struct ilpcb * il_newilpcb(struct inpcb * inp)
+ {
+ struct inp_ilpcb *it;
+ register struct ilpcb *ilpcb;
+
+ it = (struct inp_ilpcb *)inp;
+ ilpcb = &it->ilpcb;
+ bzero((char *) ilpcb, sizeof(struct ilpcb));
+
+ ilpcb->state = ILS_CLOSED;
+ ilpcb->inpcb = inp;
+ ilpcb->rxt_timer_cur = 2;
+ ilpcb->death_timer_cur = 20;
+
+ ilpcb->inpcb = inp; /* XXX */
+ inp->inp_ip_ttl = ip_defttl;
+ inp->inp_ppcb = (caddr_t)ilpcb;
+ return (ilpcb); /* XXX */
+ }
+
+ /*
+ * Common subroutine to open a TCP connection to remote host specified
+ * by struct sockaddr_in in mbuf *nam. Call in_pcbbind to assign a local
+ * port number if needed. Call in_pcbladdr to do the routing and to choose
+ * a local host address (interface). If there is an existing incarnation
+ * of the same connection in TIME-WAIT state and if the remote host was
+ * sending CC options and if the connection duration was < MSL, then
+ * truncate the previous TIME-WAIT state and proceed.
+ * Initialize connection parameters and enter SYN-SENT state.
+ */
+ static int
+ il_connect(struct ilpcb *ilpcb, struct sockaddr *nam, struct proc *p)
+ {
+ struct inpcb *inp = ilpcb->inpcb, *oinp;
+ struct socket *so = inp->inp_socket;
+ struct sockaddr_in *sin = (struct sockaddr_in *)nam;
+ struct sockaddr_in *ifaddr;
+ int error;
+
+ if (inp->inp_lport == 0) {
+ error = in_pcbbind(inp, (struct sockaddr *)0, p);
+ if (error)
+ return error;
+ }
+
+ /*
+ * Cannot simply call in_pcbconnect, because there might be an
+ * earlier incarnation of this same connection still in
+ * TIME_WAIT state, creating an ADDRINUSE error.
+ */
+ error = in_pcbladdr(inp, nam, &ifaddr);
+ if (error)
+ return error;
+ oinp = in_pcblookup_hash(inp->inp_pcbinfo,
+ sin->sin_addr, sin->sin_port,
+ inp->inp_laddr.s_addr != INADDR_ANY ? inp->inp_laddr
+ : ifaddr->sin_addr,
+ inp->inp_lport, 0);
+ if (oinp) {
+ return EADDRINUSE;
+ }
+ if (inp->inp_laddr.s_addr == INADDR_ANY)
+ inp->inp_laddr = ifaddr->sin_addr;
+ inp->inp_faddr = sin->sin_addr;
+ inp->inp_fport = sin->sin_port;
+ in_pcbrehash(inp);
+
+ #if 0
+ ilpcb->t_template = tcp_template(tp);
+ if (ilpcb->t_template == 0) {
+ in_pcbdisconnect(inp);
+ return ENOBUFS;
+ }
+ #endif
+
+ soisconnecting(so);
+ il_sendseqinit(ilpcb);
+
+ return 0;
+ }
+
+ static int il_usr_send(struct socket *so, int flags, struct mbuf * m, struct sockaddr *addr, struct mbuf *control, struct proc *p)
+ {
+ struct ilpcb * ilpcb;
+ struct inpcb * inp = sotoinpcb(so);
+ int error;
+ struct mbuf * m0;
+
+ if (inp == 0) {
+ m_freem(m);
+ return EINVAL;
+ }
+ ilpcb = intoilpcb(inp);
+
+ if (sbspace(&so->so_snd) < -512) {
+ m_freem(m);
+ error = ENOBUFS;
+ goto out;
+ }
+
+ sbappendrecord(&so->so_snd, m);
+ m0 = m_copypacket(m, M_DONTWAIT);
+ error = il_output(ilpcb, m0, ILT_DATA, ilpcb->next++, 0);
+
+ out:
+ return error;
+ }
+
+ static int il_usr_attach(struct socket *so, int proto, struct proc *p)
+ {
+ int s = splnet();
+ int error = 0;
+ struct inpcb *inp = sotoinpcb(so);
+ struct ilpcb *ilpcb = 0;
+
+ if (inp) {
+ error = EISCONN;
+ goto out;
+ }
+
+ if (so->so_snd.sb_hiwat == 0 || so->so_rcv.sb_hiwat == 0) {
+ error = soreserve(so, il_sendspace, il_recvspace);
+ if (error)
+ goto out;
+ }
+
+ error = in_pcballoc(so, &ilbinfo, p);
+
+ if (error)
+ goto out;
+
+ inp = sotoinpcb(so);
+ ilpcb = il_newilpcb(inp);
+ if (ilpcb == 0) {
+ int nofd = so->so_state & SS_NOFDREF; /* XXX */
+
+ so->so_state &= ~SS_NOFDREF; /* don't free the socket yet */
+ in_pcbdetach(inp);
+ so->so_state |= nofd;
+ error = ENOBUFS;
+ goto out;
+ }
+ ilpcb->state = ILS_CLOSED;
+ ilpcb->segq = 0;
+
+ out:
+ splx(s);
+ return error;
+
+ }
+
+ static int il_usr_bind(struct socket *so, struct sockaddr *nam, struct proc *p)
+ {
+ int s = splnet();
+ int error = 0;
+ struct inpcb *inp = sotoinpcb(so);
+ struct ilpcb *ilpcb;
+ struct sockaddr_in *sinp;
+
+ if (inp == 0) {
+ splx(s);
+ return EINVAL;
+ }
+ ilpcb = intoilpcb(inp);
+
+ /*
+ * Must check for multicast addresses and disallow binding
+ * to them.
+ */
+ sinp = (struct sockaddr_in *)nam;
+ if (sinp->sin_family == AF_INET &&
+ IN_MULTICAST(ntohl(sinp->sin_addr.s_addr))) {
+ error = EAFNOSUPPORT;
+ goto out;
+ }
+ error = in_pcbbind(inp, nam, p);
+ out: splx(s);
+ return error;
+ }
+
+ /*
+ * Initiate connection to peer.
+ * Create a template for use in transmissions on this connection.
+ * Enter SYN_SENT state, and mark socket as connecting.
+ * Start keep-alive timer, and seed output sequence space.
+ * Send initial segment on connection.
+ */
+ static int
+ il_usr_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
+ {
+ int s = splnet();
+ int error = 0;
+ struct inpcb *inp = sotoinpcb(so);
+ struct ilpcb *ilpcb;
+ struct sockaddr_in *sinp;
+
+ if (inp == 0) {
+ splx(s);
+ return EINVAL;
+ }
+ ilpcb = intoilpcb(inp);
+
+ /*
+ * Must disallow TCP ``connections'' to multicast addresses.
+ */
+ sinp = (struct sockaddr_in *)nam;
+ if (sinp->sin_family == AF_INET
+ && IN_MULTICAST(ntohl(sinp->sin_addr.s_addr))) {
+ error = EAFNOSUPPORT;
+ goto out;
+ }
+
+ if ((error = il_connect(ilpcb, nam, p)) != 0)
+ goto out;
+
+ error = il_send_empty(ilpcb, ILT_SYNC, 0);
+
+ out: splx(s);
+ return error;
+ }
+
+ /*
+ * Close a TCP control block:
+ * discard all space held by the tcp
+ * discard internet protocol block
+ * wake up any sleepers
+ */
+ static struct ilpcb *
+ il_close(struct ilpcb *ilpcb)
+ {
+ register struct mbuf *q;
+ register struct mbuf *nq;
+ struct inpcb *inp = ilpcb->inpcb;
+ struct socket *so = inp->inp_socket;
+
+ /* free the reassembly queue, if any */
+ for (q = ilpcb->segq; q; q = nq) {
+ nq = q->m_nextpkt;
+ ilpcb->segq = nq;
+ m_freem(q);
+ }
+ inp->inp_ppcb = NULL;
+ soisdisconnected(so);
+ in_pcbdetach(inp);
+ return ((struct ilpcb *)0);
+ }
+
+ /*
+ * User issued close, and wish to trail through shutdown states:
+ * if never received SYN, just forget it. If got a SYN from peer,
+ * but haven't sent FIN, then go to FIN_WAIT_1 state to send peer a FIN.
+ * If already got a FIN from peer, then almost done; go to LAST_ACK
+ * state. In all other cases, have already sent FIN to peer (e.g.
+ * after PRU_SHUTDOWN), and just have to play tedious game waiting
+ * for peer to send FIN or not respond to keep-alives, etc.
+ * We can let the user exit from the close as soon as the FIN is acked.
+ */
+ static struct ilpcb *
+ il_usrclosed(struct ilpcb *ilpcb)
+ {
+
+ switch (ilpcb->state) {
+ case ILS_CLOSED:
+ case ILS_LISTENING:
+ ilpcb->state = ILS_CLOSED;
+ ilpcb = il_close(ilpcb);
+ break;
+
+ case ILS_SYNCER:
+ case ILS_SYNCEE:
+ case ILS_ESTABLISHED:
+ il_send_empty(ilpcb, ILT_CLOSE, 0);
+ ilpcb->state = ILS_CLOSING;
+ break;
+
+ case ILS_CLOSING:
+ break;
+ }
+ return (ilpcb);
+ }
+
+ /*
+ * Drop a TCP connection, reporting
+ * the specified error. If connection is synchronized,
+ * then send a RST to peer.
+ */
+ struct ilpcb *
+ il_drop(ilpcb, errno0)
+ register struct ilpcb *ilpcb;
+ int errno0;
+ {
+ struct socket *so = ilpcb->inpcb->inp_socket;
+
+ panic("il_drop");
+
+ switch(ilpcb->state) {
+ case ILS_SYNCEE:
+ case ILS_ESTABLISHED:
+ case ILS_CLOSING:
+ il_send_empty(ilpcb, ILT_CLOSE, 0);
+ default:
+ break;
+ }
+ ilpcb->state = ILS_CLOSED;
+ so->so_error = errno0;
+ return (il_close(ilpcb));
+ }
+
+ /*
+ * Initiate (or continue) disconnect.
+ * If embryonic state, just send reset (once).
+ * If in ``let data drain'' option and linger null, just drop.
+ * Otherwise (hard), mark socket disconnecting and drop
+ * current input data; switch states based on user close, and
+ * send segment to peer (with FIN).
+ */
+ static struct ilpcb *
+ il_disconnect(struct ilpcb *ilpcb)
+ {
+ struct socket *so = ilpcb->inpcb->inp_socket;
+
+ soisdisconnecting(so);
+ sbflush(&so->so_rcv);
+ ilpcb = il_usrclosed(ilpcb);
+
+ return (ilpcb);
+ }
+
+
+ /*
+ * pru_detach() detaches the IL protocol from the socket.
+ * If the protocol state is non-embryonic, then can't
+ * do this directly: have to initiate a pru_disconnect(),
+ * which may finish later; embryonic TCB's can just
+ * be discarded here.
+ */
+ static int
+ il_usr_detach(struct socket *so)
+ {
+ int s = splnet();
+ int error = 0;
+ struct inpcb *inp = sotoinpcb(so);
+ struct ilpcb *ilpcb;
+
+ if (inp == 0) {
+ splx(s);
+ return EINVAL; /* XXX */
+ }
+ ilpcb = intoilpcb(inp);
+ ilpcb = il_disconnect(ilpcb);
+ splx(s);
+ return error;
+ }
+
+ /*
+ * Mark the connection as being incapable of further output.
+ */
+ static int
+ il_usr_shutdown(struct socket *so)
+ {
+ int s = splnet();
+ int error = 0;
+ struct inpcb *inp = sotoinpcb(so);
+ struct ilpcb *ilpcb;
+
+ if (inp == 0) {
+ splx(s);
+ return EINVAL;
+ }
+ ilpcb = intoilpcb(inp);
+
+ socantsendmore(so);
+ ilpcb = il_usrclosed(ilpcb);
+ splx(s);
+ return error;
+ }
+
+ /*
+ * Initiate disconnect from peer.
+ * If connection never passed embryonic stage, just drop;
+ * else if don't need to let data drain, then can just drop anyways,
+ * else have to begin TCP shutdown process: mark socket disconnecting,
+ * drain unread data, state switch to reflect user close, and
+ * send segment (e.g. FIN) to peer. Socket will be really disconnected
+ * when peer sends FIN and acks ours.
+ *
+ * SHOULD IMPLEMENT LATER PRU_CONNECT VIA REALLOC TCPCB.
+ */
+ static int
+ il_usr_disconnect(struct socket *so)
+ {
+ int s = splnet();
+ int error = 0;
+ struct inpcb *inp = sotoinpcb(so);
+ struct ilpcb * ilpcb;
+
+ if (inp == 0) {
+ splx(s);
+ return EINVAL;
+ }
+ ilpcb = intoilpcb(inp);
+
+ il_disconnect(ilpcb);
+ splx(s);
+ return error;
+ }
+
+ /*
+ * Abort the TCP.
+ */
+ static int
+ il_usr_abort(struct socket *so)
+ {
+ int s = splnet();
+ int error = 0;
+ struct inpcb *inp = sotoinpcb(so);
+ struct ilpcb * ilpcb;
+
+ if (inp == 0) {
+ splx(s);
+ return EINVAL;
+ }
+ ilpcb = intoilpcb(inp);
+
+ ilpcb = il_drop(ilpcb, ECONNABORTED);
+ splx(s);
+ return error;
+
+ }
+
+ /*
+ * Prepare to accept connections.
+ */
+ static int
+ il_usr_listen(struct socket *so, struct proc *p)
+ {
+ int s = splnet();
+ int error = 0;
+ struct inpcb *inp = sotoinpcb(so);
+ struct ilpcb *ilpcb;
+
+ if (inp == 0) {
+ splx(s);
+ return EINVAL;
+ }
+ ilpcb = intoilpcb(inp);
+
+ if (inp->inp_lport == 0)
+ error = in_pcbbind(inp, (struct sockaddr *)0, p);
+ if (error == 0)
+ ilpcb->state = ILS_LISTENING;
+
+ splx(s);
+ return error;
+ }
+
+ /*
+ * Accept a connection. Essentially all the work is
+ * done at higher levels; just return the address
+ * of the peer, storing through addr.
+ */
+ static int
+ il_usr_accept(struct socket *so, struct sockaddr **nam)
+ {
+ int s = splnet();
+ int error = 0;
+ struct inpcb *inp = sotoinpcb(so);
+ struct ilpcb * ilpcb;
+
+ if (inp == 0) {
+ splx(s);
+ return EINVAL;
+ }
+ ilpcb = intoilpcb(inp);
+
+ in_setpeeraddr(so, nam);
+ splx(s);
+ return error;
+ }
+
+ /* xxx - should be const */
+ struct pr_usrreqs il_usrreqs = {
+ il_usr_abort, il_usr_accept, il_usr_attach, il_usr_bind,
+ il_usr_connect, pru_connect2_notsupp, in_control, il_usr_detach,
+ il_usr_disconnect, il_usr_listen, in_setpeeraddr, pru_rcvd_notsupp,
+ pru_rcvoob_notsupp, il_usr_send, pru_sense_null, il_usr_shutdown,
+ in_setsockaddr, sosend, soreceive, sopoll
+ };
diff -N -c -r /usr/src/sys/netinet/il.h ./netinet/il.h
*** /usr/src/sys/netinet/il.h Wed Dec 31 19:00:00 1969
--- ./netinet/il.h Thu Sep 30 11:24:51 1999
***************
*** 0 ****
--- 1,17 ----
+
+ #ifndef NETINET_IL_H_
+ #define NETINET_IL_H_
+
+ struct ilhdr
+ {
+ u_char ilsum[2]; /* Checksum including header */
+ u_char illen[2]; /* Packet length */
+ u_char iltype; /* Packet type */
+ u_char ilspec; /* Special */
+ u_char ilsrc[2]; /* Src port */
+ u_char ildst[2]; /* Dst port */
+ u_char ilid[4]; /* Sequence id */
+ u_char ilack[4]; /* Acked sequence */
+ };
+
+ #endif
diff -N -c -r /usr/src/sys/netinet/il_var.h ./netinet/il_var.h
*** /usr/src/sys/netinet/il_var.h Wed Dec 31 19:00:00 1969
--- ./netinet/il_var.h Thu Oct 7 10:45:05 1999
***************
*** 0 ****
--- 1,46 ----
+ #ifndef NETINET_IL_VAR_H_
+ #define NETINET_IL_VAR_H_
+
+ struct ilpcb /* Control block */
+ {
+ int state; /* Connection state */
+ struct inpcb * inpcb; /* back pointer to internet pcb */
+ u_long unacked;
+
+ #define ILF_NEEDACK 1
+ u_long flags;
+
+ u_long rxt_max;
+ int rxt_timer; /* number of ticks to the next timeout */
+ int rxt_timer_cur; /* current rxt timer period */
+
+ int death_timer;
+ int death_timer_cur;
+
+ u_long next; /* Id of next to send */
+ u_long recvd; /* Last packet received */
+
+ u_long start; /* Local start id */
+ u_long rstart; /* Remote start id */
+ int rxtot; /* number of retransmits on this connection */
+
+ struct mbuf * segq;
+ };
+
+ #define intoilpcb(ip) ((struct ilpcb *)(ip)->inp_ppcb)
+ #define sotoilpcb(so) (intoilpcb(sotoinpcb(so)))
+
+ #ifdef KERNEL
+ void il_init __P((void));
+ void il_input __P((struct mbuf * m, int iphlen));
+ void il_slowtimo __P((void));
+ void il_fasttimo __P((void));
+ void il_ctlinput __P((int cmd, struct sockaddr *sa, void *vip));
+ int il_ctloutput __P((struct socket *so, struct sockopt *sopt));
+ void il_drain __P((void));
+
+ extern struct pr_usrreqs il_usrreqs;
+
+ #endif
+
+ #endif
diff -N -c -r /usr/src/sys/netinet/in_proto.c ./netinet/in_proto.c
*** /usr/src/sys/netinet/in_proto.c Sat Aug 22 23:07:14 1998
--- ./netinet/in_proto.c Wed Oct 6 17:55:12 1999
***************
*** 36,41 ****
--- 36,42 ----
#include "opt_ipdivert.h"
#include "opt_ipx.h"
+ #include "opt_inet.h"
#include <sys/param.h>
#include <sys/kernel.h>
***************
*** 71,76 ****
--- 72,82 ----
#include <netns/ns_if.h>
#endif
+ #ifdef IL
+ #include <netinet/il.h>
+ #include <netinet/il_var.h>
+ #endif
+
extern struct domain inetdomain;
static struct pr_usrreqs nousrreqs;
***************
*** 161,166 ****
--- 167,181 ----
0,
0, 0, 0, 0,
&rip_usrreqs
+ },
+ #endif
+ #ifdef IL
+ { SOCK_SEQPACKET, &inetdomain, IPPROTO_IL,
+ PR_CONNREQUIRED|PR_IMPLOPCL|PR_WANTRCVD|PR_ATOMIC,
+ il_input, 0, il_ctlinput, il_ctloutput,
+ 0,
+ il_init, il_fasttimo, il_slowtimo, il_drain,
+ &il_usrreqs
},
#endif
/* raw wildcard */
diff -N -c -r /usr/src/sys/sys/vnode.h ./sys/vnode.h
*** /usr/src/sys/sys/vnode.h Sat Mar 20 04:37:49 1999
--- ./sys/vnode.h Fri Oct 15 17:44:42 1999
***************
*** 62,68 ****
enum vtagtype {
VT_NON, VT_UFS, VT_NFS, VT_MFS, VT_PC, VT_LFS, VT_LOFS, VT_FDESC,
VT_PORTAL, VT_NULL, VT_UMAP, VT_KERNFS, VT_PROCFS, VT_AFS, VT_ISOFS,
! VT_UNION, VT_MSDOSFS, VT_DEVFS, VT_TFS, VT_VFS, VT_CODA, VT_NTFS
};
/*
--- 62,68 ----
enum vtagtype {
VT_NON, VT_UFS, VT_NFS, VT_MFS, VT_PC, VT_LFS, VT_LOFS, VT_FDESC,
VT_PORTAL, VT_NULL, VT_UMAP, VT_KERNFS, VT_PROCFS, VT_AFS, VT_ISOFS,
! VT_UNION, VT_MSDOSFS, VT_DEVFS, VT_TFS, VT_VFS, VT_CODA, VT_NTFS, VT_U9FS
};
/*
|