Skip to content

Commit

Permalink
vflush: Speed up reclaim by doing less in the loop
Browse files Browse the repository at this point in the history
This removes leaving the `vnode_all_list_lock` in the loop as that is
not needed. It also only enters the `v_mutex` of nodes that are not
`VNODE_DEAD` yet.

This converts part of the loop to a new function called
`flush_file_objects` to make it more readable.

This also removes the restart of the loop, which is safe because
`vnode_all_list_lock` is never unlocked.

Signed-off-by: Axel Gembe <[email protected]>
  • Loading branch information
EchterAgo committed Nov 9, 2023
1 parent 39a519d commit e588afe
Showing 1 changed file with 87 additions and 102 deletions.
189 changes: 87 additions & 102 deletions module/os/windows/spl/spl-vnode.c
Original file line number Diff line number Diff line change
Expand Up @@ -1537,6 +1537,64 @@ mount_count_nodes(struct mount *mp, int flags)
return (count);
}

static void
flush_file_objects(struct vnode *rvp)
{
// Release the AVL tree
// Attempt to flush out any caches;

FILE_OBJECT *fileobject;
vnode_fileobjects_t *node;
int Status;

// Make sure we don't call vnode_flushcache() again from IRP_MJ_CLOSE.
rvp->v_flags |= VNODE_FLUSHING;

if (avl_is_empty(&rvp->v_fileobjects))
return;

for (node = avl_first(&rvp->v_fileobjects); node != NULL;
node = AVL_NEXT(&rvp->v_fileobjects, node)) {
fileobject = node->fileobject;

Check failure on line 1558 in module/os/windows/spl/spl-vnode.c

View workflow job for this annotation

GitHub Actions / checkstyle

non-continuation indented 4 spaces

// Because the CC* calls can re-enter ZFS, we need to
// release the lock, and because we release the lock the
// while has to start from the top each time. We release
// the node at end of this while.

try {
Status = ObReferenceObjectByPointer(fileobject, 0,
*IoFileObjectType, KernelMode);
} except(EXCEPTION_EXECUTE_HANDLER) {
Status = GetExceptionCode();
}

// Try to lock fileobject before we use it.
if (NT_SUCCESS(Status)) {
// Let go of mutex, as flushcache will re-enter
// (IRP_MJ_CLEANUP)
mutex_exit(&rvp->v_mutex);
node->remove = vnode_flushcache(rvp, fileobject, TRUE);
ObDereferenceObject(fileobject);
mutex_enter(&rvp->v_mutex);
} // if ObReferenceObjectByPointer
} // for

// Remove any nodes we successfully closed.
restart_remove_closed:
for (node = avl_first(&rvp->v_fileobjects); node != NULL;
node = AVL_NEXT(&rvp->v_fileobjects, node)) {
if (node->remove) {
avl_remove(&rvp->v_fileobjects, node);
kmem_free(node, sizeof (*node));
goto restart_remove_closed;
}
}

dprintf("vp %p has %d fileobject(s) remaining\n", rvp,
avl_numnodes(&rvp->v_fileobjects));
}

static void
print_reclaim_stats(boolean_t init, int reclaims)
{
Expand Down Expand Up @@ -1586,129 +1644,56 @@ vflush(struct mount *mp, struct vnode *skipvp, int flags)
// FORCECLOSE : release everything, force unmount

// if mp is NULL, we are reclaiming nodes, until threshold
int isbusy = 0;
int reclaims = 0;
vnode_fileobjects_t *node;
struct vnode *rvp;
int Status;
boolean_t filesonly = B_TRUE;

dprintf("vflush start\n");

mutex_enter(&vnode_all_list_lock);

filesanddirs:
print_reclaim_stats(B_TRUE, 0);

while (1) {
for (rvp = list_head(&vnode_all_list);
rvp;
rvp = list_next(&vnode_all_list, rvp)) {

// skip vnodes not belonging to this mount
if (mp && rvp->v_mount != mp)
continue;
filesanddirs:
for (rvp = list_head(&vnode_all_list); rvp;
rvp = list_next(&vnode_all_list, rvp)) {
// skip vnodes not belonging to this mount
if (mp && rvp->v_mount != mp)
continue;

if (filesonly && vnode_isdir(rvp))
continue;
if (filesonly && vnode_isdir(rvp))
continue;

// If we aren't FORCE and asked to SKIPROOT, and node
// is MARKROOT, then go to next.
if (!(flags & FORCECLOSE)) {
if ((flags & SKIPROOT))
if (rvp->v_flags & VNODE_MARKROOT)
continue;
// If we aren't FORCE and asked to SKIPROOT, and node
// is MARKROOT, then go to next.
if (!(flags & FORCECLOSE)) {
if ((flags & SKIPROOT))
if (rvp->v_flags & VNODE_MARKROOT)
continue;
#if 0 // when we use SYSTEM vnodes
if ((flags & SKIPSYSTEM))
if (rvp->v_flags & VNODE_MARKSYSTEM)
continue;
if ((flags & SKIPSYSTEM))
if (rvp->v_flags & VNODE_MARKSYSTEM)
continue;
#endif
}
// We are to remove this node, even if ROOT - unmark it.
mutex_exit(&vnode_all_list_lock);

// Release the AVL tree
// KIRQL OldIrql;

// Attempt to flush out any caches;
mutex_enter(&rvp->v_mutex);
// Make sure we don't call vnode_cacheflush() again
// from IRP_MJ_CLOSE.
rvp->v_flags |= VNODE_FLUSHING;

for (node = avl_first(&rvp->v_fileobjects);
node != NULL;
node = AVL_NEXT(&rvp->v_fileobjects, node)) {
FILE_OBJECT *fileobject = node->fileobject;

// Because the CC* calls can re-enter ZFS, we need to
// release the lock, and because we release the lock the
// while has to start from the top each time. We release
// the node at end of this while.

try {
Status = ObReferenceObjectByPointer(
fileobject,
0,
*IoFileObjectType,
KernelMode);
} except(EXCEPTION_EXECUTE_HANDLER) {
Status = GetExceptionCode();
}

// Try to lock fileobject before we use it.
if (NT_SUCCESS(Status)) {
int ok;

// Let go of mutex, as flushcache will re-enter
// (IRP_MJ_CLEANUP)
mutex_exit(&rvp->v_mutex);
node->remove = vnode_flushcache(rvp,
fileobject, TRUE);

ObDereferenceObject(fileobject);

mutex_enter(&rvp->v_mutex);
}
// We are to remove this node, even if ROOT - unmark it.

} // if ObReferenceObjectByPointer
} // for
if (rvp->v_flags & VNODE_DEAD) {
continue;
}

// Remove any nodes we successfully closed.
restart:
for (node = avl_first(&rvp->v_fileobjects);
node != NULL;
node = AVL_NEXT(&rvp->v_fileobjects, node)) {
if (node->remove) {
avl_remove(&rvp->v_fileobjects, node);
kmem_free(node, sizeof (*node));
goto restart;
}
}
mutex_enter(&rvp->v_mutex);

dprintf("vp %p has %d fileobject(s) remaining\n", rvp,
avl_numnodes(&rvp->v_fileobjects));
flush_file_objects(rvp);

// vnode_recycle_int() will call mutex_exit(&rvp->v_mutex);
// vnode_recycle_int() will exit v_mutex
// re-check flags, due to releasing locks
isbusy = 1;
if (!(rvp->v_flags & VNODE_DEAD))
isbusy = vnode_recycle_int(rvp,
(flags & FORCECLOSE) | VNODELOCKED);
else
mutex_exit(&rvp->v_mutex);

mutex_enter(&vnode_all_list_lock);

if (!isbusy) {
reclaims++;
print_reclaim_stats(B_FALSE, reclaims);
break; // must restart loop if unlinked node
}
if (!vnode_recycle_int(rvp, (flags & FORCECLOSE) |
VNODELOCKED)) {
reclaims++;
print_reclaim_stats(B_FALSE, reclaims);
}

// If the end of the list was reached, stop entirely
if (!rvp)
break;
}

if (filesonly) {
Expand Down

0 comments on commit e588afe

Please sign in to comment.