[2413] in Release_Engineering

home help back first fref pref prev next nref lref last post

[Donald T. Davis: ]

daemon@ATHENA.MIT.EDU (Richard Basch)
Sun Jul 15 19:56:25 1990

Date: Sun, 15 Jul 90 19:55:49 -0400
To: rel-eng@MIT.EDU
From: Richard Basch <probe@MIT.EDU>


Still verifying... but here is the submission...

-Richard


------- Forwarded Message

From: don@ATHENA.MIT.EDU
Date: Fri, 13 Jul 90 22:12:04 -0400
To: ed@cayman.com, probe@ATHENA.MIT.EDU
Cc: jik@ATHENA.MIT.EDU

ed and richard, here are the differences (diff -c old new) and the new
version of vm_text.c . it does run, and i've tracked its performance with
pstat -x in order to ensure that it was doing as i intended.
please audit asap if you can; sorry about the last minute notice,
but it took me a day to find the last bug: i thought the list-stitching
stuff was broken, and debugged accordingly, but it was the cache-count
tracking that was broken. you hadda be there. thanks for the help to you both.
							-don

*** /source/bsd-4.3/common/sys/sys/vm_text.c	Fri Jun  1 12:19:43 1990
--- /site/sys/sys/vm_text.c	Fri Jul 13 21:39:30 1990
***************
*** 66,76 ****
  	else \
  		(xp)->x_forw->x_back = &(xp)->x_forw; \
  }
! #define FREE_AT_TAIL(xp) { \
  	(xp)->x_back = xtail; \
  	*xtail = (xp); \
  	xtail = &(xp)->x_forw; \
  	/* x_forw is NULL */ \
  }
  #define	ALLOC(xp) { \
  	*((xp)->x_back) = (xp)->x_forw; \
--- 66,79 ----
  	else \
  		(xp)->x_forw->x_back = &(xp)->x_forw; \
  }
! #define CACHE_AT_TAIL(xp) { \
! 	if (!xcache != !x_lru) panic("text cache size 1"); \
! 	xcache++; \
  	(xp)->x_back = xtail; \
  	*xtail = (xp); \
  	xtail = &(xp)->x_forw; \
  	/* x_forw is NULL */ \
+ 	if (!x_lru) x_lru = (struct text *)xtail; \
  }
  #define	ALLOC(xp) { \
  	*((xp)->x_back) = (xp)->x_forw; \
***************
*** 81,86 ****
--- 84,97 ----
  	(xp)->x_forw = NULL; \
  	(xp)->x_back = NULL; \
  }
+ #define FREE_CACHE_LRU(xp,msg) { \
+ /*	if (!x_lru)			panic("text cache underflow"); \
+ 	if (!(xp)->x_forw && \
+ 	    &(xp)->x_forw != xtail)	panic("text cache- bad null link"); \
+ 	if (x_lru != (xp))		panic("freeing non-lru text"); */ \
+ 	if (!xcache != !(xp)->x_forw)	panic("text cache size 2"); \
+ 	x_lru = (xp)->x_forw; \
+ }
  
  /*
   * We place free text table entries on a free list.
***************
*** 91,107 ****
   * For machines with limited swap space, this may result
   * in filling up swap, and thus we allow a limit
   * to be placed on the number of text images to cache.
!  * (In that case, really should change the algorithm
!  * for freeing a text when the cache is full;
!  * should free least-recently-used text rather than current one.)
   */
  struct	text *xhead, **xtail;		/* text table free list */
  int	xcache;				/* number of "sticky" texts retained */
  /*int	maxtextcache = -1;		/* maximum number of "sticky" texts */
! /*int	maxtextcache = 10;		/* maximum number of "sticky" texts */
! /* We are disabling caching as it is too much of a performance hit */
! int	maxtextcache = 0;		/* maximum number of "sticky" texts */
  struct	xstats xstats;			/* cache statistics */
  
  /*
   * initialize text table
--- 102,116 ----
   * For machines with limited swap space, this may result
   * in filling up swap, and thus we allow a limit
   * to be placed on the number of text images to cache.
!  * In that case, we free least-recently-used text.
   */
  struct	text *xhead, **xtail;		/* text table free list */
+ struct	text *x_lru;			/* least-recently-used cached text */
  int	xcache;				/* number of "sticky" texts retained */
  /*int	maxtextcache = -1;		/* maximum number of "sticky" texts */
! int	maxtextcache = 10;		/* maximum number of "sticky" texts */
  struct	xstats xstats;			/* cache statistics */
+ static	xuntext();
  
  /*
   * initialize text table
***************
*** 111,118 ****
  	register struct text *xp;
  
  	xtail = &xhead;
  	for (xp = text; xp < textNTEXT; xp++)
! 		FREE_AT_TAIL(xp);
  	if (maxtextcache < 0 || maxtextcache > ntext)
  		maxtextcache = ntext;
  }
--- 120,128 ----
  	register struct text *xp;
  
  	xtail = &xhead;
+ 	x_lru =  xhead = NULL;
  	for (xp = text; xp < textNTEXT; xp++)
! 		FREE_AT_HEAD(xp);
  	if (maxtextcache < 0 || maxtextcache > ntext)
  		maxtextcache = ntext;
  }
***************
*** 140,146 ****
  #else
          if (--xp->x_count == 0 && (xp->x_iptr->i_mode & ISVTX) == 0) {
  #endif
! 		if (xcache >= maxtextcache || xp->x_flag & XTRC ||
  #if defined(NFS) || defined(VFS)
  		    vattr.va_nlink == 0) {			/* XXX */
  #else
--- 150,156 ----
  #else
          if (--xp->x_count == 0 && (xp->x_iptr->i_mode & ISVTX) == 0) {
  #endif
! 		if (xp->x_flag & XTRC ||
  #if defined(NFS) || defined(VFS)
  		    vattr.va_nlink == 0) {			/* XXX */
  #else
***************
*** 160,169 ****
  				xstats.free_cacheswap++;
  				xp->x_flag |= XUNUSED;
  			}
- 			xcache++;
  			xstats.free_cache++;
  			xccdec(xp, u.u_procp);
! 			FREE_AT_TAIL(xp);
  		}
  	} else {
  		xccdec(xp, u.u_procp);
--- 170,183 ----
  				xstats.free_cacheswap++;
  				xp->x_flag |= XUNUSED;
  			}
  			xstats.free_cache++;
  			xccdec(xp, u.u_procp);
! 			CACHE_AT_TAIL(xp);
! 			if ( xcache > maxtextcache) { /* cache overflow */
! 			    xuntext(x_lru);
! 			    xcache--;
! 			    FREE_CACHE_LRU(x_lru,"ovrflw");
! 			}
  		}
  	} else {
  		xccdec(xp, u.u_procp);
***************
*** 235,243 ****
  		X_LOCK(xp);
  		if (xp->x_back) {
  			xstats.alloc_cachehit++;
  			ALLOC(xp);
  			xp->x_flag &= ~XUNUSED;
- 			xcache--;
  		} else
  			xstats.alloc_inuse++;
  		xp->x_count++;
--- 249,260 ----
  		X_LOCK(xp);
  		if (xp->x_back) {
  			xstats.alloc_cachehit++;
+ 			xcache--;
+ 			if (xp == x_lru){
+ 			    FREE_CACHE_LRU(xp,"hit");
+ 			}
  			ALLOC(xp);
  			xp->x_flag &= ~XUNUSED;
  		} else
  			xstats.alloc_inuse++;
  		xp->x_count++;
***************
*** 257,262 ****
--- 274,283 ----
  		psignal(u.u_procp, SIGKILL);
  		return;
  	}
+ 	if (xp == x_lru) {
+ 		xcache--;
+ 		FREE_CACHE_LRU(xp,"flush");
+ 	}
  	ALLOC(xp);
  #if defined(NFS) || defined(VFS)
  	if (xp->x_vptr) {
***************
*** 267,273 ****
  		if (xp->x_flag & XUNUSED)
  			xstats.alloc_unused++;
  		xuntext(xp);
- 		xcache--;
  	}
  	xp->x_flag = XLOAD|XLOCK;
  	if (pagi) {
--- 288,293 ----
***************
*** 448,457 ****
  			if (xp->x_count == 0)
  			        if (xp->x_forw == NULL &&
  				    xp->x_back == NULL) {
! 				        FREE_AT_HEAD(xp);
  			        }
! 			        else
! 			                xcache--;
  		}
  	if (vfsp != NULL)
  	        mpurgevfs(vfsp);
--- 468,482 ----
  			if (xp->x_count == 0)
  			        if (xp->x_forw == NULL &&
  				    xp->x_back == NULL) {
! 				    FREE_AT_HEAD(xp);
  			        }
! 			        else if (xcache--, xp == x_lru) {
! 				    FREE_CACHE_LRU(xp,"unmount");
! 				}
! 				else {
! 				    ALLOC(xp);
! 				    FREE_AT_HEAD(xp);
! 				}
  		}
  	if (vfsp != NULL)
  	        mpurgevfs(vfsp);



+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
vm_text.c follows:
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
/*
 * 5799-CGZ (C) COPYRIGHT IBM CORPORATION  1986,1987
 * LICENSED MATERIALS - PROPERTY OF IBM
 * REFER TO COPYRIGHT INSTRUCTIONS FORM NUMBER G120-2083
 */
/* $Header: /site/sys/sys/RCS/vm_text.c,v 1.11 90/06/01 12:19:29 epeisach Exp Locker: don $ */
/* $ACIS:vm_text.c 9.1$ */
/* $Source: /site/sys/sys/RCS/vm_text.c,v $ */

#ifndef lint
static char *rcsid = "$Header: /site/sys/sys/RCS/vm_text.c,v 1.11 90/06/01 12:19:29 epeisach Exp Locker: don $";
#endif

/*
 * Copyright (c) 1982, 1986 Regents of the University of California.
 * All rights reserved.  The Berkeley software License Agreement
 * specifies the terms and conditions for redistribution.
 *
 *	@(#)vm_text.c	7.1 (Berkeley) 6/5/86
 */

#include "../machine/pte.h"
#ifdef ibm032
#include "../machine/mmu.h"
#endif ibm032

#include "param.h"
#include "systm.h"
#include "map.h"
#if !defined(NFS) && !defined(VFS)
#include "dir.h"
#include "inode.h"
#endif
#include "user.h"
#include "proc.h"
#include "text.h"
#include "buf.h"
#include "seg.h"
#include "vm.h"
#include "cmap.h"
#include "uio.h"
#include "exec.h"
#if defined(NFS) || defined(VFS)
#include "vfs.h"
#include "vnode.h"
#endif /* NFS || VFS */

#define X_LOCK(xp) { \
	while ((xp)->x_flag & XLOCK) { \
		(xp)->x_flag |= XWANT; \
		sleep((caddr_t)(xp), PSWP); \
	} \
	(xp)->x_flag |= XLOCK; \
}
#define	XUNLOCK(xp) { \
	if ((xp)->x_flag & XWANT) \
		wakeup((caddr_t)(xp)); \
	(xp)->x_flag &= ~(XLOCK|XWANT); \
}
#define FREE_AT_HEAD(xp) { \
	(xp)->x_forw = xhead; \
	xhead = (xp); \
	(xp)->x_back = &xhead; \
	if (xtail == &xhead) \
		xtail = &(xp)->x_forw; \
	else \
		(xp)->x_forw->x_back = &(xp)->x_forw; \
}
#define CACHE_AT_TAIL(xp) { \
	if (!xcache != !x_lru) panic("text cache size 1"); \
	xcache++; \
	(xp)->x_back = xtail; \
	*xtail = (xp); \
	xtail = &(xp)->x_forw; \
	/* x_forw is NULL */ \
	if (!x_lru) x_lru = (struct text *)xtail; \
}
#define	ALLOC(xp) { \
	*((xp)->x_back) = (xp)->x_forw; \
	if ((xp)->x_forw) \
		(xp)->x_forw->x_back = (xp)->x_back; \
	else \
		xtail = (xp)->x_back; \
	(xp)->x_forw = NULL; \
	(xp)->x_back = NULL; \
}
#define FREE_CACHE_LRU(xp,msg) { \
/*	if (!x_lru)			panic("text cache underflow"); \
	if (!(xp)->x_forw && \
	    &(xp)->x_forw != xtail)	panic("text cache- bad null link"); \
	if (x_lru != (xp))		panic("freeing non-lru text"); */ \
	if (!xcache != !(xp)->x_forw)	panic("text cache size 2"); \
	x_lru = (xp)->x_forw; \
}

/*
 * We place free text table entries on a free list.
 * All text images are treated as "sticky,"
 * and are placed on the free list (as an LRU cache) when unused.
 * They may be reclaimed from the free list until reused.
 * Files marked sticky are locked into the table, and are never freed.
 * For machines with limited swap space, this may result
 * in filling up swap, and thus we allow a limit
 * to be placed on the number of text images to cache.
 * In that case, we free least-recently-used text.
 */
struct	text *xhead, **xtail;		/* text table free list */
struct	text *x_lru;			/* least-recently-used cached text */
int	xcache;				/* number of "sticky" texts retained */
/*int	maxtextcache = -1;		/* maximum number of "sticky" texts */
int	maxtextcache = 10;		/* maximum number of "sticky" texts */
struct	xstats xstats;			/* cache statistics */
static	xuntext();

/*
 * initialize text table
 */
xinit()
{
	register struct text *xp;

	xtail = &xhead;
	x_lru =  xhead = NULL;
	for (xp = text; xp < textNTEXT; xp++)
		FREE_AT_HEAD(xp);
	if (maxtextcache < 0 || maxtextcache > ntext)
		maxtextcache = ntext;
}

/*
 * relinquish use of the shared text segment
 * of a process.
 */
xfree()
{
	register struct text *xp;
#if defined(NFS) || defined(VFS)
	register struct vnode *vp;
	struct vattr vattr;
#endif /* NFS || VFS */

	if ((xp = u.u_procp->p_textp) == NULL)
		return;
	xstats.free++;
	X_LOCK(xp);
#if defined(NFS) || defined(VFS)
	vp = xp->x_vptr;
	VOP_GETATTR(vp, &vattr, u.u_cred);
	if (--xp->x_count == 0 && (vattr.va_mode & VSVTX) == 0) {
#else
        if (--xp->x_count == 0 && (xp->x_iptr->i_mode & ISVTX) == 0) {
#endif
		if (xp->x_flag & XTRC ||
#if defined(NFS) || defined(VFS)
		    vattr.va_nlink == 0) {			/* XXX */
#else
                    xp->x_iptr->i_nlink == 0) {                 /* XXX */
#endif /* NFS || VFS */
			xp->x_rssize -= vmemfree(tptopte(u.u_procp, 0), 
			   (int)u.u_tsize);
			if (xp->x_rssize != 0)
				panic("xfree rssize");
			while (xp->x_poip)
				sleep((caddr_t)&xp->x_poip, PSWP+1);
			xp->x_flag &= ~XLOCK;
			xuntext(xp);
			FREE_AT_HEAD(xp);
		} else {
			if (xp->x_flag & XWRIT) {
				xstats.free_cacheswap++;
				xp->x_flag |= XUNUSED;
			}
			xstats.free_cache++;
			xccdec(xp, u.u_procp);
			CACHE_AT_TAIL(xp);
			if ( xcache > maxtextcache) { /* cache overflow */
			    xuntext(x_lru);
			    xcache--;
			    FREE_CACHE_LRU(x_lru,"ovrflw");
			}
		}
	} else {
		xccdec(xp, u.u_procp);
		xstats.free_inuse++;
	}
#ifndef ibm032
	xunlink(u.u_procp);
#else ibm032
	xunlink(u.u_procp, 0);
#endif ibm032
	XUNLOCK(xp);
	u.u_procp->p_textp = NULL;
}

/*
 * Attach to a shared text segment.
 * If there is no shared text, just return.
 * If there is, hook up to it:
 * if it is not currently being used, it has to be read
 * in from the vnode (vp); the written bit is set to force it
 * to be written out as appropriate.
 * If it is being used, but is not currently in core,
 * a swap has to be done to get it back.
 */
#ifdef P0INVALID

#if defined(NFS) || defined(VFS)
xalloc(vp, ep, pagi, pziv)
#else
xalloc(ip, ep, pagi, pziv)
#endif /* NFS || VFS */

#else /* P0INVALID */

#if defined(NFS) || defined(VFS)
xalloc(vp, ep, pagi)
#else
xalloc(ip, ep, pagi)
#endif /* NFS || VFS */

#endif /* P0INVALID */

#if defined(NFS) || defined(VFS)
	register struct vnode *vp;
#else
	register struct inode *ip;
#endif
	struct exec *ep;
{
	register struct text *xp;
	register size_t ts;

	if (ep->a_text == 0)
		return;
	xstats.alloc++;
#if defined(NFS) || defined(VFS)
	while ((xp = vp->v_text) != NULL) {
#else
        while ((xp = ip->i_text) != NULL) {
#endif /* NFS || VFS */
		if (xp->x_flag&XLOCK) {
			/*
			 * Wait for text to be unlocked,
			 * then start over (may have changed state).
			 */
			xwait(xp);
			continue;
		}
		X_LOCK(xp);
		if (xp->x_back) {
			xstats.alloc_cachehit++;
			xcache--;
			if (xp == x_lru){
			    FREE_CACHE_LRU(xp,"hit");
			}
			ALLOC(xp);
			xp->x_flag &= ~XUNUSED;
		} else
			xstats.alloc_inuse++;
		xp->x_count++;
		u.u_procp->p_textp = xp;
		xlink(u.u_procp);
#ifdef P0INVALID
		settprot(RO, pziv);
#else
		settprot(RO);
#endif
		XUNLOCK(xp);
		return;
	}
	xp = xhead;
	if (xp == NULL) {
		tablefull("text");
		psignal(u.u_procp, SIGKILL);
		return;
	}
	if (xp == x_lru) {
		xcache--;
		FREE_CACHE_LRU(xp,"flush");
	}
	ALLOC(xp);
#if defined(NFS) || defined(VFS)
	if (xp->x_vptr) {
#else
	if (xp->x_iptr) {
#endif /* NFS || VFS */
		xstats.alloc_cacheflush++;
		if (xp->x_flag & XUNUSED)
			xstats.alloc_unused++;
		xuntext(xp);
	}
	xp->x_flag = XLOAD|XLOCK;
	if (pagi) {
#ifdef P0INVALID
		if (pziv) xp->x_flag |= XPZIV;
#endif
#if defined(NFS) || defined(VFS)
		xp->x_flag |= XPAGV;
#else
		xp->x_flag |= XPAGI;
#endif
	}
	ts = clrnd(btoc(ep->a_text));
	xp->x_size = ts;
	if (vsxalloc(xp) == NULL) {
		swkill(u.u_procp, "xalloc: no swap space");
		FREE_AT_HEAD(xp);
		return;
	}
	xp->x_count = 1;
	xp->x_ccount = 0;
	xp->x_rssize = 0;
#if defined(NFS) || defined(VFS)
	xp->x_vptr = vp;
	vp->v_flag |= VTEXT;
	vp->v_text = xp;
	VN_HOLD(vp);
#else
        xp->x_iptr = ip;
        ip->i_flag |= ITEXT;
        ip->i_text = xp;
        ip->i_count++;
#endif /* NFS || VFS */
	u.u_procp->p_textp = xp;
	xlink(u.u_procp);
	if (pagi == 0) {
#ifdef P0INVALID
		settprot(RW, 0);
#else
		settprot(RW);
#endif
		u.u_procp->p_flag |= SKEEP;
#if defined(NFS) || defined(VFS)
		(void) vn_rdwr(UIO_READ, vp,
			(caddr_t)ctob(tptov(u.u_procp, 0)),
                        (int)ep->a_text,
				(off_t)(((ep->a_magic == 0413)
#ifdef P0INVALID
					 || (ep->a_magic == 0420)
#endif P0INVALID
					 )?
                                	CLBYTES:
                                	sizeof (struct exec)),
			UIO_USERSPACE, IO_UNIT, (int *)0);
#else
                (void) rdwri(UIO_READ, ip,
                        (caddr_t)ctob(tptov(u.u_procp, 0)),
#ifdef VICE
                        (int)ep->a_text, offset,
#else
                        (int)ep->a_text, (off_t)sizeof (struct exec),
#endif
                        2, (int *)0);
#endif /* NFS || VFS */

		u.u_procp->p_flag &= ~SKEEP;
	}
#ifdef P0INVALID
	settprot(RO, pziv);
#else
	settprot(RO);
#endif
	xp->x_flag |= XWRIT;
	xp->x_flag &= ~XLOAD;
	XUNLOCK(xp);
}

/*
 * Lock and unlock a text segment from swapping
 */
xlock(xp)
	register struct text *xp;
{

	X_LOCK(xp);
}

/*
 * Wait for xp to be unlocked if it is currently locked.
 */
xwait(xp)
register struct text *xp;
{

	X_LOCK(xp);
	XUNLOCK(xp);
}

xunlock(xp)
register struct text *xp;
{

	XUNLOCK(xp);
}

/*
 * Decrement the in-core usage count of a shared text segment,
 * which must be locked.  When the count drops to zero,
 * free the core space.
 */
xccdec(xp, p)
	register struct text *xp;
	register struct proc *p;
{

	if (--xp->x_ccount == 0) {
		if (xp->x_flag & XWRIT) {
			vsswap(p, tptopte(p, 0), CTEXT, 0, (int)xp->x_size,
			    (struct dmap *)0);
#if defined(NFS) || defined(VFS)
			if (xp->x_flag & XPAGV)
#else
			if (xp->x_flag & XPAGI)
#endif /* NFS || VFS */
				(void)swap(p, xp->x_ptdaddr,
				    (caddr_t)tptopte(p, 0),
				    (int)xp->x_size * sizeof (struct pte),
#if defined(NFS) || defined(VFS)
				    B_WRITE, B_PAGET, swapdev_vp, 0);
#else
				    B_WRITE, B_PAGET, swapdev, 0);
#endif /* NFS || VFS */
			xp->x_flag &= ~XWRIT;
		} else
			xp->x_rssize -= vmemfree(tptopte(p, 0),
			    (int)xp->x_size);
		if (xp->x_rssize != 0)
			panic("text rssize");
	}
}

/*
 * Detach a process from the in-core text.
 * External interface to xccdec, used when swapping out a process.
 */
xdetach(xp, p)
	register struct text *xp;
	struct proc *p;
{

	if (xp && xp->x_ccount != 0) {
		X_LOCK(xp);
		xccdec(xp, p);
#ifndef ibm032
		xunlink(p);
#else ibm032
		xunlink(p, 0);
#endif ibm032
		XUNLOCK(xp);
	}
}

/*
 * Free the swap image of all unused saved-text text segments
 * which are from virtual filesystem vfsp  (used by umount system call).
 */

#if defined(NFS) || defined(VFS)
xumount(vfsp)
	register struct vfs *vfsp;
{
	register struct text *xp;

	for (xp = text; xp < textNTEXT; xp++) 
		if (xp->x_vptr != NULL && (xp->x_vptr->v_vfsp == vfsp ||
					   vfsp == NULL)) {
			xuntext(xp);
			if (xp->x_count == 0)
			        if (xp->x_forw == NULL &&
				    xp->x_back == NULL) {
				    FREE_AT_HEAD(xp);
			        }
			        else if (xcache--, xp == x_lru) {
				    FREE_CACHE_LRU(xp,"unmount");
				}
				else {
				    ALLOC(xp);
				    FREE_AT_HEAD(xp);
				}
		}
	if (vfsp != NULL)
	        mpurgevfs(vfsp);
}
/*
 * remove a shared text segment from the text table, if possible.
 */

xrele(vp)
	register struct vnode *vp;
{

	if (vp->v_flag & VTEXT)
		xuntext(vp->v_text);
}

#else

xumount(dev)
        register dev_t dev;
{
        register struct text *xp;

        for (xp = text; xp < textNTEXT; xp++)
                if (xp->x_iptr != NULL &&
                    (dev == xp->x_iptr->i_dev || dev == NODEV))
                        xuntext(xp);
}

/*
 * remove a shared text segment from the text table, if possible.
 */
xrele(ip)
        register struct inode *ip;
{

        if (ip->i_flag & ITEXT)
                xuntext(ip->i_text);
}

#endif /* NFS || VFS */


/*
 * remove text image from the text table.
 * the use count must be zero.
 */
xuntext(xp)
	register struct text *xp;
{
#if defined(NFS) || defined(VFS)
	register struct vnode *vp;
#else
	register struct inode *ip;
#endif /* NFS || VFS */

	X_LOCK(xp);
	if (xp->x_count == 0) {
#if defined(NFS) || defined(VFS)
		vp = xp->x_vptr;
		xp->x_vptr = (struct vnode *)0;
#else
                ip = xp->x_iptr;
                xp->x_iptr = NULL;
#endif /* NFS || VFS */
		vsxfree(xp, ctod(xp->x_size));
#if defined(NFS) || defined(VFS)
		vp->v_flag &= ~(VTEXT | VTEXTMOD);
		vp->v_text = NULL;
		VN_RELE(vp);
#else
                ip->i_flag &= ~ITEXT;
                ip->i_text = NULL;
                irele(ip);
#endif /* NFS || VFS */
	}
	XUNLOCK(xp);
}

/*
 * Add a process to those sharing a text segment by
 * getting the page tables and then linking to x_caddr.
 */
xlink(p)
	register struct proc *p;
{
	register struct text *xp = p->p_textp;

	if (xp == 0)
		return;
	vinitpt(p);
	p->p_xlink = xp->x_caddr;
	xp->x_caddr = p;
	xp->x_ccount++;

#ifdef ibm032
       /* switch to new address space for shared text */

       p->p_sid0 = make410sid( xp );
       if( p == u.u_procp )
               set_segreg( 0, p->p_sid0 );
#endif ibm032
}

#ifndef ibm032
xunlink(p)
#else ibm032
xunlink(p, isxrepl)
#endif ibm032
	register struct proc *p;
{
	register struct text *xp = p->p_textp;
	register struct proc *q;

	if (xp == 0)
		return;
#ifdef ibm032

       /* switch from address space for shared text */

       if (isxrepl==0) {
               p->p_sid0 = makeP0sid(p);
               if (p == u.u_procp)
                       set_segreg(0, p->p_sid0);
	}

#endif ibm032
	if (xp->x_caddr == p) {
		xp->x_caddr = p->p_xlink;
		p->p_xlink = 0;
		return;
	}
	for (q = xp->x_caddr; q->p_xlink; q = q->p_xlink)
		if (q->p_xlink == p) {
			q->p_xlink = p->p_xlink;
			p->p_xlink = 0;
			return;
		}
	panic("lost text");
}

/*
 * Replace p by q in a text incore linked list.
 * Used by vfork(), internally.
 */
xrepl(p, q)
	struct proc *p, *q;
{
	register struct text *xp = q->p_textp;

	if (xp == 0)
		return;
#ifndef ibm032
	xunlink(p);
#else ibm032
	xunlink(p, 1);
#endif ibm032
	q->p_xlink = xp->x_caddr;
	xp->x_caddr = q;
}

#if defined(NFS) || defined(VFS)
int xkillcnt = 0;

/*
 * Invalidate the text associated with vp.
 * Purge in core cache of pages associated with vp and kill all active
 * processes.
 */
xinval(vp)
	register struct vnode *vp;
{
	register struct text *xp;
	register struct proc *p;

	mpurge(vp);
	for (xp = text; xp < textNTEXT; xp++) {
		if ((xp->x_flag & XPAGV) && (xp->x_vptr == vp)) {
			for (p = xp->x_caddr; p; p = p->p_xlink) {
				/*
				 * swkill without uprintf
				 */
				printf(
				   "pid %d killed due to text modification\n",
				   p->p_pid);
				psignal(p, SIGKILL);
				p->p_flag |= SULOCK;
				xkillcnt++;
			}
			break;
		}
	}
}
#endif /* NFS || VFS */

------- End Forwarded Message

home help back first fref pref prev next nref lref last post