1
0
mirror of https://github.com/openbsd/src.git synced 2024-12-22 07:27:59 -08:00

revert "try to simplify the locking code around busy maps"

anton@ and syzkaller have trouble with it.
This commit is contained in:
dlg 2024-10-21 06:07:33 +00:00
parent 2372b7954f
commit 5793e77b11
2 changed files with 60 additions and 57 deletions

View File

@ -1,4 +1,4 @@
/* $OpenBSD: uvm_map.c,v 1.331 2024/10/20 11:28:17 dlg Exp $ */
/* $OpenBSD: uvm_map.c,v 1.332 2024/10/21 06:07:33 dlg Exp $ */
/* $NetBSD: uvm_map.c,v 1.86 2000/11/27 08:40:03 chs Exp $ */
/*
@ -5201,77 +5201,68 @@ out:
boolean_t
vm_map_lock_try_ln(struct vm_map *map, char *file, int line)
{
int rv;
boolean_t rv;
if (map->flags & VM_MAP_INTRSAFE) {
if (!mtx_enter_try(&map->mtx))
return FALSE;
rv = mtx_enter_try(&map->mtx);
} else {
struct proc *busy;
mtx_enter(&map->flags_lock);
busy = map->busy;
if ((map->flags & VM_MAP_BUSY) && (map->busy != curproc)) {
mtx_leave(&map->flags_lock);
return (FALSE);
}
mtx_leave(&map->flags_lock);
if (busy != NULL && busy != curproc)
return FALSE;
rv = rw_enter(&map->lock, RW_WRITE|RW_NOSLEEP);
if (rv != 0)
return FALSE;
/* to be sure, to be sure */
mtx_enter(&map->flags_lock);
busy = map->busy;
mtx_leave(&map->flags_lock);
if (busy != NULL && busy != curproc) {
rw_exit(&map->lock);
return FALSE;
rv = (rw_enter(&map->lock, RW_WRITE|RW_NOSLEEP) == 0);
/* check if the lock is busy and back out if we won the race */
if (rv) {
mtx_enter(&map->flags_lock);
if ((map->flags & VM_MAP_BUSY) &&
(map->busy != curproc)) {
rw_exit(&map->lock);
rv = FALSE;
}
mtx_leave(&map->flags_lock);
}
}
map->timestamp++;
LPRINTF(("map lock: %p (at %s %d)\n", map, file, line));
uvm_tree_sanity(map, file, line);
uvm_tree_size_chk(map, file, line);
if (rv) {
map->timestamp++;
LPRINTF(("map lock: %p (at %s %d)\n", map, file, line));
uvm_tree_sanity(map, file, line);
uvm_tree_size_chk(map, file, line);
}
return TRUE;
return (rv);
}
void
vm_map_lock_ln(struct vm_map *map, char *file, int line)
{
if ((map->flags & VM_MAP_INTRSAFE) == 0) {
mtx_enter(&map->flags_lock);
for (;;) {
while (map->busy != NULL && map->busy != curproc) {
map->nbusy++;
msleep_nsec(&map->busy, &map->mtx,
do {
mtx_enter(&map->flags_lock);
tryagain:
while ((map->flags & VM_MAP_BUSY) &&
(map->busy != curproc)) {
map->flags |= VM_MAP_WANTLOCK;
msleep_nsec(&map->flags, &map->flags_lock,
PVM, vmmapbsy, INFSLP);
map->nbusy--;
}
mtx_leave(&map->flags_lock);
rw_enter_write(&map->lock);
/* to be sure, to be sure */
mtx_enter(&map->flags_lock);
if (map->busy != NULL && map->busy != curproc) {
/* go around again */
rw_exit_write(&map->lock);
} else {
/* we won */
break;
}
} while (rw_enter(&map->lock, RW_WRITE|RW_SLEEPFAIL) != 0);
/* check if the lock is busy and back out if we won the race */
mtx_enter(&map->flags_lock);
if ((map->flags & VM_MAP_BUSY) && (map->busy != curproc)) {
rw_exit(&map->lock);
goto tryagain;
}
mtx_leave(&map->flags_lock);
} else {
mtx_enter(&map->mtx);
}
if (map->busy != curproc) {
KASSERT(map->busy == NULL);
if (map->busy != curproc)
map->timestamp++;
}
LPRINTF(("map lock: %p (at %s %d)\n", map, file, line));
uvm_tree_sanity(map, file, line);
uvm_tree_size_chk(map, file, line);
@ -5323,24 +5314,25 @@ vm_map_busy_ln(struct vm_map *map, char *file, int line)
mtx_enter(&map->flags_lock);
map->busy = curproc;
map->flags |= VM_MAP_BUSY;
mtx_leave(&map->flags_lock);
}
void
vm_map_unbusy_ln(struct vm_map *map, char *file, int line)
{
unsigned int nbusy;
int oflags;
KASSERT((map->flags & VM_MAP_INTRSAFE) == 0);
KASSERT(map->busy == curproc);
mtx_enter(&map->flags_lock);
nbusy = map->nbusy;
oflags = map->flags;
map->busy = NULL;
map->flags &= ~(VM_MAP_BUSY|VM_MAP_WANTLOCK);
mtx_leave(&map->flags_lock);
if (nbusy > 0)
wakeup(&map->busy);
if (oflags & VM_MAP_WANTLOCK)
wakeup(&map->flags);
}
void

View File

@ -1,4 +1,4 @@
/* $OpenBSD: uvm_map.h,v 1.91 2024/10/20 11:28:17 dlg Exp $ */
/* $OpenBSD: uvm_map.h,v 1.92 2024/10/21 06:07:33 dlg Exp $ */
/* $NetBSD: uvm_map.h,v 1.24 2001/02/18 21:19:08 chs Exp $ */
/*
@ -214,6 +214,17 @@ RBT_PROTOTYPE(uvm_map_addr, vm_map_entry, daddrs.addr_entry,
* map is write-locked. may be tested
* without asserting `flags_lock'.
*
* VM_MAP_BUSY r/w; may only be set when map is
* write-locked, may only be cleared by
* thread which set it, map read-locked
* or write-locked. must be tested
* while `flags_lock' is asserted.
*
* VM_MAP_WANTLOCK r/w; may only be set when the map
* is busy, and thread is attempting
* to write-lock. must be tested
* while `flags_lock' is asserted.
*
* VM_MAP_GUARDPAGES r/o; must be specified at map
* initialization time.
* If set, guards will appear between
@ -246,7 +257,6 @@ RBT_PROTOTYPE(uvm_map_addr, vm_map_entry, daddrs.addr_entry,
* a atomic operations
* I immutable after creation or exec(2)
* v `vm_map_lock' (this map `lock' or `mtx')
* f flags_lock
*/
struct vm_map {
struct pmap *pmap; /* [I] Physical map */
@ -256,10 +266,9 @@ struct vm_map {
vsize_t size; /* virtual size */
int ref_count; /* [a] Reference count */
int flags; /* [f] flags */
int flags; /* flags */
unsigned int timestamp; /* Version number */
struct proc *busy; /* [f] thread holding map busy*/
unsigned int nbusy; /* [f] waiters for busy */
struct proc *busy; /* [v] thread holding map busy*/
vaddr_t min_offset; /* [I] First address in map. */
vaddr_t max_offset; /* [I] Last address in map. */
@ -314,6 +323,8 @@ struct vm_map {
#define VM_MAP_PAGEABLE 0x01 /* ro: entries are pageable */
#define VM_MAP_INTRSAFE 0x02 /* ro: interrupt safe map */
#define VM_MAP_WIREFUTURE 0x04 /* rw: wire future mappings */
#define VM_MAP_BUSY 0x08 /* rw: map is busy */
#define VM_MAP_WANTLOCK 0x10 /* rw: want to write-lock */
#define VM_MAP_GUARDPAGES 0x20 /* rw: add guard pgs to map */
#define VM_MAP_ISVMSPACE 0x40 /* ro: map is a vmspace */
#define VM_MAP_PINSYSCALL_ONCE 0x100 /* rw: pinsyscall done */