diff options
Diffstat (limited to 'ufs')
-rw-r--r-- | ufs/Makefile | 3 | ||||
-rw-r--r-- | ufs/alloc.c | 38 | ||||
-rw-r--r-- | ufs/dir.c | 8 | ||||
-rw-r--r-- | ufs/hyper.c | 10 | ||||
-rw-r--r-- | ufs/inode.c | 44 | ||||
-rw-r--r-- | ufs/main.c | 10 | ||||
-rw-r--r-- | ufs/pager.c | 105 | ||||
-rw-r--r-- | ufs/pokeloc.c | 14 | ||||
-rw-r--r-- | ufs/sizes.c | 47 | ||||
-rw-r--r-- | ufs/ufs.h | 13 |
10 files changed, 170 insertions, 122 deletions
diff --git a/ufs/Makefile b/ufs/Makefile index a1cfc4c9..b66a6259 100644 --- a/ufs/Makefile +++ b/ufs/Makefile @@ -24,7 +24,8 @@ SRCS = alloc.c consts.c dir.c hyper.c inode.c main.c pager.c \ sizes.c subr.c tables.c bmap.c pokeloc.c xinl.c OBJS = $(SRCS:.c=.o) -HURDLIBS = diskfs iohelp fshelp store pager threads ports ihash shouldbeinlibc +HURDLIBS = diskfs iohelp fshelp store pager ports ihash shouldbeinlibc +OTHERLIBS = -lpthread include ../Makeconf diff --git a/ufs/alloc.c b/ufs/alloc.c index d8f92255..9056a8b7 100644 --- a/ufs/alloc.c +++ b/ufs/alloc.c @@ -64,7 +64,7 @@ the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ extern u_long nextgennumber; -spin_lock_t alloclock = SPIN_LOCK_INITIALIZER; +pthread_spinlock_t alloclock = PTHREAD_SPINLOCK_INITIALIZER; /* Forward declarations */ static u_long ffs_hashalloc (struct node *, int, long, int, @@ -244,7 +244,7 @@ ffs_alloc(register struct node *np, } assert (cred); #endif /* DIAGNOSTIC */ - spin_lock (&alloclock); + pthread_spin_lock (&alloclock); if (size == fs->fs_bsize && fs->fs_cstotal.cs_nbfree == 0) goto nospace; if (cred && !idvec_contains (cred->user->uids, 0) @@ -263,7 +263,7 @@ ffs_alloc(register struct node *np, bno = (daddr_t)ffs_hashalloc(np, cg, (long)bpref, size, (u_long (*)())ffs_alloccg); if (bno > 0) { - spin_unlock (&alloclock); + pthread_spin_unlock (&alloclock); np->dn_stat.st_blocks += btodb(size); np->dn_set_ctime = 1; np->dn_set_mtime = 1; @@ -278,7 +278,7 @@ ffs_alloc(register struct node *np, (void) chkdq(ip, (long)-btodb(size), cred, FORCE); #endif nospace: - spin_unlock (&alloclock); + pthread_spin_unlock (&alloclock); printf ("file system full"); /* ffs_fserr(fs, cred->cr_uid, "file system full"); */ /* uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); */ @@ -321,7 +321,7 @@ ffs_realloccg(register struct node *np, panic("ffs_realloccg: missing credential\n"); #endif /* DIAGNOSTIC */ - spin_lock (&alloclock); + pthread_spin_lock (&alloclock); if (!idvec_contains (cred->user->uids, 0) && freespace(fs, fs->fs_minfree) <= 0) @@ -356,7 +356,7 @@ ffs_realloccg(register struct node *np, bno = ffs_fragextend(np, cg, (long)bprev, osize, nsize); if (bno) { assert (bno == bprev); - spin_unlock (&alloclock); + pthread_spin_unlock (&alloclock); np->dn_stat.st_blocks += btodb(nsize - osize); np->dn_set_ctime = 1; np->dn_set_mtime = 1; @@ -430,7 +430,7 @@ ffs_realloccg(register struct node *np, if (nsize < request) ffs_blkfree(np, bno + numfrags(fs, nsize), (long)(request - nsize)); - spin_unlock (&alloclock); + pthread_spin_unlock (&alloclock); np->dn_stat.st_blocks += btodb(nsize - osize); np->dn_set_mtime = 1; np->dn_set_ctime = 1; @@ -457,7 +457,7 @@ nospace: /* * no space available */ - spin_unlock (&alloclock); + pthread_spin_unlock (&alloclock); printf ("file system full"); /* ffs_fserr(fs, cred->cr_uid, "file system full"); */ /* uprintf("\n%s: write failed, file system is full\n", fs->fs_fsmnt); */ @@ -660,11 +660,11 @@ diskfs_alloc_node (struct node *dir, fs = sblock; - spin_lock (&alloclock); + pthread_spin_lock (&alloclock); if (fs->fs_cstotal.cs_nifree == 0) { - spin_unlock (&alloclock); + pthread_spin_unlock (&alloclock); goto noinodes; } @@ -678,7 +678,7 @@ diskfs_alloc_node (struct node *dir, cg = ino_to_cg(fs, ipref); ino = (ino_t)ffs_hashalloc(dir, cg, (long)ipref, mode, ffs_nodealloccg); - spin_unlock (&alloclock); + pthread_spin_unlock (&alloclock); if (ino == 0) goto noinodes; error = diskfs_cached_lookup (ino, &np); @@ -694,12 +694,12 @@ diskfs_alloc_node (struct node *dir, /* * Set up a new generation number for this inode. */ - spin_lock (&gennumberlock); + pthread_spin_lock (&gennumberlock); sex = diskfs_mtime->seconds; if (++nextgennumber < (u_long)sex) nextgennumber = sex; np->dn_stat.st_gen = nextgennumber; - spin_unlock (&gennumberlock); + pthread_spin_unlock (&gennumberlock); *npp = np; alloc_sync (np); @@ -773,11 +773,11 @@ ffs_blkpref(struct node *np, daddr_t nextblk; fs = sblock; - spin_lock (&alloclock); + pthread_spin_lock (&alloclock); if (indx % fs->fs_maxbpg == 0 || bap[indx - 1] == 0) { if (lbn < NDADDR) { cg = ino_to_cg(fs, np->dn->number); - spin_unlock (&alloclock); + pthread_spin_unlock (&alloclock); return (fs->fs_fpg * cg + fs->fs_frag); } /* @@ -796,19 +796,19 @@ ffs_blkpref(struct node *np, for (cg = startcg; cg < fs->fs_ncg; cg++) if (csum[cg].cs_nbfree >= avgbfree) { fs->fs_cgrotor = cg; - spin_unlock (&alloclock); + pthread_spin_unlock (&alloclock); return (fs->fs_fpg * cg + fs->fs_frag); } for (cg = 0; cg <= startcg; cg++) if (csum[cg].cs_nbfree >= avgbfree) { fs->fs_cgrotor = cg; - spin_unlock (&alloclock); + pthread_spin_unlock (&alloclock); return (fs->fs_fpg * cg + fs->fs_frag); } - spin_unlock (&alloclock); + pthread_spin_unlock (&alloclock); return 0; } - spin_unlock (&alloclock); + pthread_spin_unlock (&alloclock); /* * One or more previous blocks have been laid out. If less * than fs_maxcontig previous blocks are contiguous, the @@ -244,9 +244,9 @@ diskfs_lookup_hard (struct node *dp, const char *name, enum lookup_type type, /* Drop what we *thought* was .. (but isn't any more) and try *again*. */ diskfs_nput (np); - mutex_unlock (&dp->lock); + pthread_mutex_unlock (&dp->lock); err = diskfs_cached_lookup (inum, &np); - mutex_lock (&dp->lock); + pthread_mutex_lock (&dp->lock); if (err) goto out; retry_dotdot = inum; @@ -259,9 +259,9 @@ diskfs_lookup_hard (struct node *dp, const char *name, enum lookup_type type, /* Lock them in the proper order, and then repeat the directory scan to see if this is still right. */ - mutex_unlock (&dp->lock); + pthread_mutex_unlock (&dp->lock); err = diskfs_cached_lookup (inum, &np); - mutex_lock (&dp->lock); + pthread_mutex_lock (&dp->lock); if (err) goto out; retry_dotdot = inum; diff --git a/ufs/hyper.c b/ufs/hyper.c index ece327a2..95013895 100644 --- a/ufs/hyper.c +++ b/ufs/hyper.c @@ -280,7 +280,7 @@ diskfs_set_hypermetadata (int wait, int clean) { error_t err; - spin_lock (&alloclock); + pthread_spin_lock (&alloclock); if (csum_dirty) { @@ -316,7 +316,7 @@ diskfs_set_hypermetadata (int wait, int clean) if (err) { - spin_unlock (&alloclock); + pthread_spin_unlock (&alloclock); return err; } @@ -337,7 +337,7 @@ diskfs_set_hypermetadata (int wait, int clean) wait = 1; /* must be synchronous */ } - spin_unlock (&alloclock); + pthread_spin_unlock (&alloclock); /* Update the superblock if necessary (clean bit was just set). */ copy_sblock (); @@ -355,7 +355,7 @@ copy_sblock () err = diskfs_catch_exception (); assert_perror (err); - spin_lock (&alloclock); + pthread_spin_lock (&alloclock); if (sblock_dirty) { @@ -386,7 +386,7 @@ copy_sblock () sblock_dirty = 0; } - spin_unlock (&alloclock); + pthread_spin_unlock (&alloclock); diskfs_end_catch_exception (); } diff --git a/ufs/inode.c b/ufs/inode.c index 77a45edb..066eb1e5 100644 --- a/ufs/inode.c +++ b/ufs/inode.c @@ -35,7 +35,7 @@ static struct node *nodehash[INOHSZ]; static error_t read_disknode (struct node *np); -spin_lock_t gennumberlock = SPIN_LOCK_INITIALIZER; +pthread_spinlock_t gennumberlock = PTHREAD_SPINLOCK_INITIALIZER; /* Initialize the inode hash table. */ void @@ -55,15 +55,15 @@ diskfs_cached_lookup (ino_t inum, struct node **npp) struct node *np; error_t err; - spin_lock (&diskfs_node_refcnt_lock); + pthread_spin_lock (&diskfs_node_refcnt_lock); for (np = nodehash[INOHASH(inum)]; np; np = np->dn->hnext) { if (np->dn->number != inum) continue; np->references++; - spin_unlock (&diskfs_node_refcnt_lock); - mutex_lock (&np->lock); + pthread_spin_unlock (&diskfs_node_refcnt_lock); + pthread_mutex_lock (&np->lock); *npp = np; return 0; } @@ -74,30 +74,32 @@ diskfs_cached_lookup (ino_t inum, struct node **npp) dn->dirents = 0; dn->dir_idx = 0; - rwlock_init (&dn->allocptrlock); + pthread_rwlock_init (&dn->allocptrlock, NULL); + pthread_mutex_init (&dn->waitlock, NULL); + pthread_cond_init (&dn->waitcond, NULL); dn->dirty = 0; dn->fileinfo = 0; np = diskfs_make_node (dn); np->cache_id = inum; - mutex_lock (&np->lock); + pthread_mutex_lock (&np->lock); dn->hnext = nodehash[INOHASH(inum)]; if (dn->hnext) dn->hnext->dn->hprevp = &dn->hnext; dn->hprevp = &nodehash[INOHASH(inum)]; nodehash[INOHASH(inum)] = np; - spin_unlock (&diskfs_node_refcnt_lock); + pthread_spin_unlock (&diskfs_node_refcnt_lock); err = read_disknode (np); if (!diskfs_check_readonly () && !np->dn_stat.st_gen) { - spin_lock (&gennumberlock); + pthread_spin_lock (&gennumberlock); if (++nextgennumber < diskfs_mtime->seconds) nextgennumber = diskfs_mtime->seconds; np->dn_stat.st_gen = nextgennumber; - spin_unlock (&gennumberlock); + pthread_spin_unlock (&gennumberlock); np->dn_set_ctime = 1; } @@ -117,14 +119,14 @@ ifind (ino_t inum) { struct node *np; - spin_lock (&diskfs_node_refcnt_lock); + pthread_spin_lock (&diskfs_node_refcnt_lock); for (np = nodehash[INOHASH(inum)]; np; np = np->dn->hnext) { if (np->dn->number != inum) continue; assert (np->references); - spin_unlock (&diskfs_node_refcnt_lock); + pthread_spin_unlock (&diskfs_node_refcnt_lock); return np; } assert (0); @@ -168,7 +170,7 @@ diskfs_lost_hardrefs (struct node *np) if (np->dn->fileinfo) { - spin_lock (&_libports_portrefcntlock); + pthread_spin_lock (&_libports_portrefcntlock); pi = (struct port_info *) np->dn->fileinfo->p; if (pi->refcnt == 1) { @@ -178,7 +180,7 @@ diskfs_lost_hardrefs (struct node *np) can't happen as long as we hold NP locked. So we can safely unlock _libports_portrefcntlock for the following call. */ - spin_unlock (&_libports_portrefcntlock); + pthread_spin_unlock (&_libports_portrefcntlock); /* Right now the node is locked with no hard refs; this is an anomalous situation. Before messing with @@ -197,7 +199,7 @@ diskfs_lost_hardrefs (struct node *np) diskfs_nput (np); } else - spin_unlock (&_libports_portrefcntlock); + pthread_spin_unlock (&_libports_portrefcntlock); } #endif } @@ -440,7 +442,7 @@ diskfs_node_iterate (error_t (*fun)(struct node *)) /* Acquire a reference on all the nodes in the hash table and enter them into a list on the stack. */ - spin_lock (&diskfs_node_refcnt_lock); + pthread_spin_lock (&diskfs_node_refcnt_lock); for (n = 0; n < INOHSZ; n++) for (np = nodehash[n]; np; np = np->dn->hnext) { @@ -450,16 +452,16 @@ diskfs_node_iterate (error_t (*fun)(struct node *)) i->np = np; list = i; } - spin_unlock (&diskfs_node_refcnt_lock); + pthread_spin_unlock (&diskfs_node_refcnt_lock); err = 0; for (i = list; i; i = i->next) { if (!err) { - mutex_lock (&i->np->lock); + pthread_mutex_lock (&i->np->lock); err = (*fun)(i->np); - mutex_unlock (&i->np->lock); + pthread_mutex_unlock (&i->np->lock); } diskfs_nrele (i->np); } @@ -646,14 +648,14 @@ diskfs_S_file_get_storage_info (struct protid *cred, return EOPNOTSUPP; np = cred->po->np; - mutex_lock (&np->lock); + pthread_mutex_lock (&np->lock); /* See if this file fits in the direct block pointers. If not, punt for now. (Reading indir blocks is a pain, and I'm postponing pain.) XXX */ if (np->allocsize > NDADDR * sblock->fs_bsize) { - mutex_unlock (&np->lock); + pthread_mutex_unlock (&np->lock); return EINVAL; } @@ -685,7 +687,7 @@ diskfs_S_file_get_storage_info (struct protid *cred, } diskfs_end_catch_exception (); - mutex_unlock (&np->lock); + pthread_mutex_unlock (&np->lock); if (! err) err = store_clone (store, &file_store); @@ -46,19 +46,19 @@ warp_root (void) error_t err; err = diskfs_cached_lookup (2, &diskfs_root_node); assert (!err); - mutex_unlock (&diskfs_root_node->lock); + pthread_mutex_unlock (&diskfs_root_node->lock); } /* XXX */ -struct mutex printf_lock = MUTEX_INITIALIZER; +pthread_mutex_t printf_lock = PTHREAD_MUTEX_INITIALIZER; int printf (const char *fmt, ...) { va_list arg; int done; va_start (arg, fmt); - mutex_lock (&printf_lock); + pthread_mutex_lock (&printf_lock); done = vprintf (fmt, arg); - mutex_unlock (&printf_lock); + pthread_mutex_unlock (&printf_lock); va_end (arg); return done; } @@ -195,7 +195,7 @@ main (int argc, char **argv) /* SET HOST NAME */ /* And this thread is done with its work. */ - cthread_exit (0); + pthread_exit (NULL); return 0; } diff --git a/ufs/pager.c b/ufs/pager.c index 3038932d..1e3d140c 100644 --- a/ufs/pager.c +++ b/ufs/pager.c @@ -21,9 +21,9 @@ #include <unistd.h> #include <hurd/store.h> -spin_lock_t node2pagelock = SPIN_LOCK_INITIALIZER; +pthread_spinlock_t node2pagelock = PTHREAD_SPINLOCK_INITIALIZER; -spin_lock_t unlocked_pagein_lock = SPIN_LOCK_INITIALIZER; +pthread_spinlock_t unlocked_pagein_lock = PTHREAD_SPINLOCK_INITIALIZER; #ifdef DONT_CACHE_MEMORY_OBJECTS #define MAY_CACHE 0 @@ -47,11 +47,11 @@ find_address (struct user_pager_info *upi, vm_address_t offset, daddr_t *addr, int *disksize, - struct rwlock **nplock, + pthread_rwlock_t **nplock, int isread) { error_t err; - struct rwlock *lock; + pthread_rwlock_t *lock; assert (upi->type == DISK || upi->type == FILE_DATA); @@ -81,37 +81,49 @@ find_address (struct user_pager_info *upi, I think this is sufficiently rare to put it off for the time being.) */ - spin_lock (&unlocked_pagein_lock); + pthread_spin_lock (&unlocked_pagein_lock); if (offset >= upi->allow_unlocked_pagein && (offset + vm_page_size <= upi->allow_unlocked_pagein + upi->unlocked_pagein_length)) { - spin_unlock (&unlocked_pagein_lock); + pthread_spin_unlock (&unlocked_pagein_lock); *nplock = 0; goto have_lock; } - spin_unlock (&unlocked_pagein_lock); + pthread_spin_unlock (&unlocked_pagein_lock); /* Block on the rwlock if necessary; but when we wake up, don't acquire it; check again from the top. This is mutated inline from rwlock.h. */ lock = &np->dn->allocptrlock; - mutex_lock (&lock->master); - if (lock->readers == -1 || lock->writers_waiting) + /* + TD - Why do we lock first? + To prevent this nigh-impossible scenario: + 1) trylock - writer has lock + 2) switch back to writer before we lock waitlock + 3) writer finishes: releases lock and broadcasts + 4) we wait for a condition that will never get broadcast + With this, either we get the lock, or we get the signal. + */ + pthread_mutex_lock (&np->dn->waitlock); + if (pthread_rwlock_tryrdlock (lock)) { - lock->readers_waiting++; - condition_wait (&lock->wakeup, &lock->master); - lock->readers_waiting--; - mutex_unlock (&lock->master); + /* + TD - we now don't block on the rwlock. Instead, we wait on a + condition that will be signalled when the lock is unlocked, + or when it is safe not to lock the page. We don't spin on an + invariant, as spurius wakeups can do no harm. + */ + pthread_cond_wait (&np->dn->waitcond, &np->dn->waitlock); + pthread_mutex_unlock (&np->dn->waitlock); goto try_again; } - lock->readers++; - mutex_unlock (&lock->master); + pthread_mutex_unlock (&np->dn->waitlock); *nplock = lock; } else { - rwlock_reader_lock (&np->dn->allocptrlock); + pthread_rwlock_rdlock (&np->dn->allocptrlock); *nplock = &np->dn->allocptrlock; } @@ -120,7 +132,7 @@ find_address (struct user_pager_info *upi, if (offset >= np->allocsize) { if (*nplock) - rwlock_reader_unlock (*nplock); + pthread_rwlock_unlock (*nplock); if (isread) return EIO; else @@ -138,7 +150,7 @@ find_address (struct user_pager_info *upi, err = fetch_indir_spec (np, lblkno (sblock, offset), indirs); if (err && *nplock) - rwlock_reader_unlock (*nplock); + pthread_rwlock_unlock (*nplock); else { if (indirs[0].bno) @@ -162,7 +174,7 @@ pager_read_page (struct user_pager_info *pager, int *writelock) { error_t err; - struct rwlock *nplock; + pthread_rwlock_t *nplock; daddr_t addr; int disksize; @@ -193,7 +205,7 @@ pager_read_page (struct user_pager_info *pager, } if (nplock) - rwlock_reader_unlock (nplock); + pthread_rwlock_unlock (nplock); return err; } @@ -207,7 +219,7 @@ pager_write_page (struct user_pager_info *pager, { daddr_t addr; int disksize; - struct rwlock *nplock; + pthread_rwlock_t *nplock; error_t err; err = find_address (pager, page, &addr, &disksize, &nplock, 0); @@ -226,7 +238,7 @@ pager_write_page (struct user_pager_info *pager, err = 0; if (nplock) - rwlock_reader_unlock (nplock); + pthread_rwlock_unlock (nplock); return err; } @@ -268,7 +280,7 @@ pager_unlock_page (struct user_pager_info *pager, dn = np->dn; di = dino (dn->number); - rwlock_writer_lock (&dn->allocptrlock); + pthread_rwlock_wrlock (&dn->allocptrlock); /* If this is the last block, we don't let it get unlocked. */ if (address + __vm_page_size @@ -276,21 +288,31 @@ pager_unlock_page (struct user_pager_info *pager, { printf ("attempt to unlock at last block denied\n"); fflush (stdout); - rwlock_writer_unlock (&dn->allocptrlock); + pthread_rwlock_unlock (&dn->allocptrlock); + /* Wake up any remaining sleeping readers. Wow, so much work.... */ + pthread_mutex_lock (&dn->waitlock); + pthread_cond_broadcast (&dn->waitcond); + pthread_mutex_unlock (&dn->waitlock); return EIO; } err = fetch_indir_spec (np, lblkno (sblock, address), indirs); if (err) { - rwlock_writer_unlock (&dn->allocptrlock); + pthread_rwlock_unlock (&dn->allocptrlock); + pthread_mutex_lock (&dn->waitlock); + pthread_cond_broadcast (&dn->waitcond); + pthread_mutex_unlock (&dn->waitlock); return EIO; } err = diskfs_catch_exception (); if (err) { - rwlock_writer_unlock (&dn->allocptrlock); + pthread_rwlock_unlock (&dn->allocptrlock); + pthread_mutex_lock (&dn->waitlock); + pthread_cond_broadcast (&dn->waitcond); + pthread_mutex_unlock (&dn->waitlock); return EIO; } @@ -421,7 +443,10 @@ pager_unlock_page (struct user_pager_info *pager, out: diskfs_end_catch_exception (); - rwlock_writer_unlock (&dn->allocptrlock); + pthread_rwlock_unlock (&dn->allocptrlock); + pthread_mutex_lock (&dn->waitlock); + pthread_cond_broadcast (&dn->waitcond); + pthread_mutex_unlock (&dn->waitlock); return err; } @@ -452,10 +477,10 @@ pager_clear_user_data (struct user_pager_info *upi) /* XXX Do the right thing for the disk pager here too. */ if (upi->type == FILE_DATA) { - spin_lock (&node2pagelock); + pthread_spin_lock (&node2pagelock); if (upi->np->dn->fileinfo == upi) upi->np->dn->fileinfo = 0; - spin_unlock (&node2pagelock); + pthread_spin_unlock (&node2pagelock); diskfs_nrele_light (upi->np); } free (upi); @@ -491,11 +516,11 @@ diskfs_file_update (struct node *np, struct dirty_indir *d, *tmp; struct user_pager_info *upi; - spin_lock (&node2pagelock); + pthread_spin_lock (&node2pagelock); upi = np->dn->fileinfo; if (upi) ports_port_ref (upi->p); - spin_unlock (&node2pagelock); + pthread_spin_unlock (&node2pagelock); if (upi) { @@ -522,11 +547,11 @@ flush_node_pager (struct node *node) struct disknode *dn = node->dn; struct dirty_indir *dirty = dn->dirty; - spin_lock (&node2pagelock); + pthread_spin_lock (&node2pagelock); upi = dn->fileinfo; if (upi) ports_port_ref (upi->p); - spin_unlock (&node2pagelock); + pthread_spin_unlock (&node2pagelock); if (upi) { @@ -558,7 +583,7 @@ diskfs_get_filemap (struct node *np, vm_prot_t prot) && (!direct_symlink_extension || np->dn_stat.st_size >= sblock->fs_maxsymlinklen))); - spin_lock (&node2pagelock); + pthread_spin_lock (&node2pagelock); do if (!np->dn->fileinfo) { @@ -575,7 +600,7 @@ diskfs_get_filemap (struct node *np, vm_prot_t prot) { diskfs_nrele_light (np); free (upi); - spin_unlock (&node2pagelock); + pthread_spin_unlock (&node2pagelock); return MACH_PORT_NULL; } np->dn->fileinfo = upi; @@ -596,7 +621,7 @@ diskfs_get_filemap (struct node *np, vm_prot_t prot) } while (right == MACH_PORT_NULL); - spin_unlock (&node2pagelock); + pthread_spin_unlock (&node2pagelock); mach_port_insert_right (mach_task_self (), right, right, MACH_MSG_TYPE_MAKE_SEND); @@ -611,11 +636,11 @@ drop_pager_softrefs (struct node *np) { struct user_pager_info *upi; - spin_lock (&node2pagelock); + pthread_spin_lock (&node2pagelock); upi = np->dn->fileinfo; if (upi) ports_port_ref (upi->p); - spin_unlock (&node2pagelock); + pthread_spin_unlock (&node2pagelock); if (MAY_CACHE && upi) pager_change_attributes (upi->p, 0, MEMORY_OBJECT_COPY_DELAY, 0); @@ -630,11 +655,11 @@ allow_pager_softrefs (struct node *np) { struct user_pager_info *upi; - spin_lock (&node2pagelock); + pthread_spin_lock (&node2pagelock); upi = np->dn->fileinfo; if (upi) ports_port_ref (upi->p); - spin_unlock (&node2pagelock); + pthread_spin_unlock (&node2pagelock); if (MAY_CACHE && upi) pager_change_attributes (upi->p, 1, MEMORY_OBJECT_COPY_DELAY, 0); diff --git a/ufs/pokeloc.c b/ufs/pokeloc.c index 267aa106..e1d5ffcb 100644 --- a/ufs/pokeloc.c +++ b/ufs/pokeloc.c @@ -28,7 +28,7 @@ struct pokeloc }; struct pokeloc *pokelist; -spin_lock_t pokelistlock = SPIN_LOCK_INITIALIZER; +pthread_spinlock_t pokelistlock = PTHREAD_SPINLOCK_INITIALIZER; /* Remember that data here on the disk has been modified. */ void @@ -41,10 +41,10 @@ record_poke (void *loc, vm_size_t length) pl->offset = trunc_page (offset); pl->length = round_page (offset + length) - pl->offset; - spin_lock (&pokelistlock); + pthread_spin_lock (&pokelistlock); pl->next = pokelist; pokelist = pl; - spin_unlock (&pokelistlock); + pthread_spin_unlock (&pokelistlock); } /* Get rid of any outstanding pokes. */ @@ -53,10 +53,10 @@ flush_pokes () { struct pokeloc *pl; - spin_lock (&pokelistlock); + pthread_spin_lock (&pokelistlock); pl = pokelist; pokelist = 0; - spin_unlock (&pokelistlock); + pthread_spin_unlock (&pokelistlock); while (pl) { @@ -72,7 +72,7 @@ sync_disk (int wait) { struct pokeloc *pl, *tmp; - spin_lock (&pokelistlock); + pthread_spin_lock (&pokelistlock); for (pl = pokelist; pl; pl = tmp) { pager_sync_some (diskfs_disk_pager, pl->offset, pl->length, wait); @@ -80,6 +80,6 @@ sync_disk (int wait) free (pl); } pokelist = 0; - spin_unlock (&pokelistlock); + pthread_spin_unlock (&pokelistlock); } diff --git a/ufs/sizes.c b/ufs/sizes.c index 58cbfc98..e3d51b7d 100644 --- a/ufs/sizes.c +++ b/ufs/sizes.c @@ -91,11 +91,11 @@ diskfs_truncate (struct node *np, immediately. (We are implicitly changing the data to zeros and doing it without the kernel's immediate knowledge; accordingl we must help out the kernel thusly.) */ - spin_lock (&node2pagelock); + pthread_spin_lock (&node2pagelock); upi = np->dn->fileinfo; if (upi) ports_port_ref (upi->p); - spin_unlock (&node2pagelock); + pthread_spin_unlock (&node2pagelock); if (upi) { @@ -115,7 +115,7 @@ diskfs_truncate (struct node *np, ports_port_deref (upi->p); } - rwlock_writer_lock (&np->dn->allocptrlock); + pthread_rwlock_wrlock (&np->dn->allocptrlock); /* Update the size on disk; fsck will finish freeing blocks if necessary should we crash. */ @@ -250,7 +250,13 @@ diskfs_truncate (struct node *np, np->dn_set_ctime = 1; diskfs_node_update (np, 1); - rwlock_writer_unlock (&np->dn->allocptrlock); + pthread_rwlock_unlock (&np->dn->allocptrlock); + /* Wake up any remaining sleeping readers. + This sequence of three calls is now necessary whenever we acquire a write + lock on allocptrlock. If we do not, we may leak some readers. */ + pthread_mutex_lock (&np->dn->waitlock); + pthread_cond_broadcast (&np->dn->waitcond); + pthread_mutex_unlock (&np->dn->waitlock); /* At this point the last block (as defined by np->allocsize) might not be allocated. We need to allocate it to maintain @@ -275,11 +281,11 @@ diskfs_truncate (struct node *np, diskfs_end_catch_exception (); /* Now we can permit delayed copies again. */ - spin_lock (&node2pagelock); + pthread_spin_lock (&node2pagelock); upi = np->dn->fileinfo; if (upi) ports_port_ref (upi->p); - spin_unlock (&node2pagelock); + pthread_spin_unlock (&node2pagelock); if (upi) { pager_change_attributes (upi->p, MAY_CACHE, @@ -415,15 +421,23 @@ block_extended (struct node *np, assert_perror (err); /* Allow these pageins to occur even though we're holding the lock */ - spin_lock (&unlocked_pagein_lock); + pthread_spin_lock (&unlocked_pagein_lock); np->dn->fileinfo->allow_unlocked_pagein = lbn * sblock->fs_bsize; np->dn->fileinfo->unlocked_pagein_length = round_page (old_size); - spin_unlock (&unlocked_pagein_lock); + pthread_spin_unlock (&unlocked_pagein_lock); /* Make sure all waiting pageins see this change. */ - mutex_lock (&np->dn->allocptrlock.master); - condition_broadcast (&np->dn->allocptrlock.wakeup); - mutex_unlock (&np->dn->allocptrlock.master); + /* BDD - Is this sane? */ + /* TD - No... no it wasn't. But, it looked right. */ + /* + This new code should, SHOULD, behave as the original code did. + This will wake up all readers waiting on the lock. This code favors + strongly writers, but, as of making this change, pthreads favors + writers, and cthreads did favor writers. + */ + pthread_mutex_lock (&np->dn->waitlock); + pthread_cond_broadcast (&np->dn->waitcond); + pthread_mutex_unlock (&np->dn->waitlock); /* Force the pages in core and make sure they are dirty */ for (pokeaddr = (int *)mapaddr; @@ -432,10 +446,10 @@ block_extended (struct node *np, *pokeaddr = *pokeaddr; /* Turn off the special pagein permission */ - spin_lock (&unlocked_pagein_lock); + pthread_spin_lock (&unlocked_pagein_lock); np->dn->fileinfo->allow_unlocked_pagein = 0; np->dn->fileinfo->unlocked_pagein_length = 0; - spin_unlock (&unlocked_pagein_lock); + pthread_spin_unlock (&unlocked_pagein_lock); /* Undo mapping */ mach_port_deallocate (mach_task_self (), mapobj); @@ -497,7 +511,7 @@ diskfs_grow (struct node *np, if (size == 0) size = sblock->fs_bsize; - rwlock_writer_lock (&np->dn->allocptrlock); + pthread_rwlock_wrlock (&np->dn->allocptrlock); /* The old last block of the file. */ olbn = lblkno (sblock, np->allocsize - 1); @@ -680,7 +694,10 @@ diskfs_grow (struct node *np, np->allocsize = newallocsize; } - rwlock_writer_unlock (&np->dn->allocptrlock); + pthread_rwlock_unlock (&np->dn->allocptrlock); + pthread_mutex_lock (&np->dn->waitlock); + pthread_cond_broadcast (&np->dn->waitcond); + pthread_mutex_unlock (&np->dn->waitlock); if (need_sync) diskfs_file_update (np, 1); @@ -25,6 +25,7 @@ #include <hurd/diskfs.h> #include <sys/mman.h> #include <assert.h> +#include <pthread.h> #include <features.h> #include "fs.h" #include "dinode.h" @@ -54,7 +55,9 @@ struct disknode /* Links on hash list. */ struct node *hnext, **hprevp; - struct rwlock allocptrlock; + pthread_rwlock_t allocptrlock; + pthread_mutex_t waitlock; + pthread_cond_t waitcond; struct dirty_indir *dirty; @@ -112,14 +115,14 @@ extern struct csum *csum; int sblock_dirty; int csum_dirty; -spin_lock_t node2pagelock; +pthread_spinlock_t node2pagelock; -spin_lock_t alloclock; +pthread_spinlock_t alloclock; -spin_lock_t gennumberlock; +pthread_spinlock_t gennumberlock; u_long nextgennumber; -spin_lock_t unlocked_pagein_lock; +pthread_spinlock_t unlocked_pagein_lock; /* The compat_mode specifies whether or not we write extensions onto the disk. */ |