From ba667e22df89ec00019100c5aab3529fa402f33c Mon Sep 17 00:00:00 2001 From: Axel Gembe Date: Thu, 9 Nov 2023 19:54:01 +0700 Subject: [PATCH] vflush: Speed up reclaim by doing less in the loop This removes leaving the `vnode_all_list_lock` in the loop as that is not needed. It also only enters the `v_mutex` of nodes that are not `VNODE_DEAD` yet. This converts part of the loop to a new function called `flush_file_objects` to make it more readable. This also removes the restart of the loop, which is safe because `vnode_all_list_lock` is never unlocked. Signed-off-by: Axel Gembe --- module/os/windows/spl/spl-vnode.c | 189 ++++++++++++++---------------- 1 file changed, 87 insertions(+), 102 deletions(-) diff --git a/module/os/windows/spl/spl-vnode.c b/module/os/windows/spl/spl-vnode.c index 6103e1632ebb..f7675555df6c 100644 --- a/module/os/windows/spl/spl-vnode.c +++ b/module/os/windows/spl/spl-vnode.c @@ -1537,6 +1537,64 @@ mount_count_nodes(struct mount *mp, int flags) return (count); } +static void +flush_file_objects(struct vnode *rvp) +{ + // Release the AVL tree + // Attempt to flush out any caches; + + FILE_OBJECT *fileobject; + vnode_fileobjects_t *node; + int Status; + + // Make sure we don't call vnode_flushcache() again from IRP_MJ_CLOSE. + rvp->v_flags |= VNODE_FLUSHING; + + if (avl_is_empty(&rvp->v_fileobjects)) + return; + + for (node = avl_first(&rvp->v_fileobjects); node != NULL; + node = AVL_NEXT(&rvp->v_fileobjects, node)) { + fileobject = node->fileobject; + + // Because the CC* calls can re-enter ZFS, we need to + // release the lock, and because we release the lock the + // while has to start from the top each time. We release + // the node at end of this while. + + try { + Status = ObReferenceObjectByPointer(fileobject, 0, + *IoFileObjectType, KernelMode); + } except(EXCEPTION_EXECUTE_HANDLER) { + Status = GetExceptionCode(); + } + + // Try to lock fileobject before we use it. + if (NT_SUCCESS(Status)) { + // Let go of mutex, as flushcache will re-enter + // (IRP_MJ_CLEANUP) + mutex_exit(&rvp->v_mutex); + node->remove = vnode_flushcache(rvp, fileobject, TRUE); + ObDereferenceObject(fileobject); + mutex_enter(&rvp->v_mutex); + } // if ObReferenceObjectByPointer + } // for + + // Remove any nodes we successfully closed. +restart_remove_closed: + for (node = avl_first(&rvp->v_fileobjects); node != NULL; + node = AVL_NEXT(&rvp->v_fileobjects, node)) { + if (node->remove) { + avl_remove(&rvp->v_fileobjects, node); + kmem_free(node, sizeof (*node)); + goto restart_remove_closed; + } + } + + dprintf("vp %p has %d fileobject(s) remaining\n", rvp, + avl_numnodes(&rvp->v_fileobjects)); +} + static void print_reclaim_stats(boolean_t init, int reclaims) { @@ -1586,129 +1644,56 @@ vflush(struct mount *mp, struct vnode *skipvp, int flags) // FORCECLOSE : release everything, force unmount // if mp is NULL, we are reclaiming nodes, until threshold - int isbusy = 0; int reclaims = 0; vnode_fileobjects_t *node; struct vnode *rvp; - int Status; boolean_t filesonly = B_TRUE; dprintf("vflush start\n"); mutex_enter(&vnode_all_list_lock); -filesanddirs: print_reclaim_stats(B_TRUE, 0); - while (1) { - for (rvp = list_head(&vnode_all_list); - rvp; - rvp = list_next(&vnode_all_list, rvp)) { - - // skip vnodes not belonging to this mount - if (mp && rvp->v_mount != mp) - continue; +filesanddirs: + for (rvp = list_head(&vnode_all_list); rvp; + rvp = list_next(&vnode_all_list, rvp)) { + // skip vnodes not belonging to this mount + if (mp && rvp->v_mount != mp) + continue; - if (filesonly && vnode_isdir(rvp)) - continue; + if (filesonly && vnode_isdir(rvp)) + continue; - // If we aren't FORCE and asked to SKIPROOT, and node - // is MARKROOT, then go to next. - if (!(flags & FORCECLOSE)) { - if ((flags & SKIPROOT)) - if (rvp->v_flags & VNODE_MARKROOT) - continue; + // If we aren't FORCE and asked to SKIPROOT, and node + // is MARKROOT, then go to next. + if (!(flags & FORCECLOSE)) { + if ((flags & SKIPROOT)) + if (rvp->v_flags & VNODE_MARKROOT) + continue; #if 0 // when we use SYSTEM vnodes - if ((flags & SKIPSYSTEM)) - if (rvp->v_flags & VNODE_MARKSYSTEM) - continue; + if ((flags & SKIPSYSTEM)) + if (rvp->v_flags & VNODE_MARKSYSTEM) + continue; #endif - } - // We are to remove this node, even if ROOT - unmark it. - mutex_exit(&vnode_all_list_lock); - - // Release the AVL tree - // KIRQL OldIrql; - - // Attempt to flush out any caches; - mutex_enter(&rvp->v_mutex); - // Make sure we don't call vnode_cacheflush() again - // from IRP_MJ_CLOSE. - rvp->v_flags |= VNODE_FLUSHING; - - for (node = avl_first(&rvp->v_fileobjects); - node != NULL; - node = AVL_NEXT(&rvp->v_fileobjects, node)) { - FILE_OBJECT *fileobject = node->fileobject; - - // Because the CC* calls can re-enter ZFS, we need to - // release the lock, and because we release the lock the - // while has to start from the top each time. We release - // the node at end of this while. - - try { - Status = ObReferenceObjectByPointer( - fileobject, - 0, - *IoFileObjectType, - KernelMode); - } except(EXCEPTION_EXECUTE_HANDLER) { - Status = GetExceptionCode(); - } - - // Try to lock fileobject before we use it. - if (NT_SUCCESS(Status)) { - int ok; - - // Let go of mutex, as flushcache will re-enter - // (IRP_MJ_CLEANUP) - mutex_exit(&rvp->v_mutex); - node->remove = vnode_flushcache(rvp, - fileobject, TRUE); - - ObDereferenceObject(fileobject); - - mutex_enter(&rvp->v_mutex); + } + // We are to remove this node, even if ROOT - unmark it. - } // if ObReferenceObjectByPointer - } // for + if (rvp->v_flags & VNODE_DEAD) { + continue; + } - // Remove any nodes we successfully closed. -restart: - for (node = avl_first(&rvp->v_fileobjects); - node != NULL; - node = AVL_NEXT(&rvp->v_fileobjects, node)) { - if (node->remove) { - avl_remove(&rvp->v_fileobjects, node); - kmem_free(node, sizeof (*node)); - goto restart; - } - } + mutex_enter(&rvp->v_mutex); - dprintf("vp %p has %d fileobject(s) remaining\n", rvp, - avl_numnodes(&rvp->v_fileobjects)); + flush_file_objects(rvp); - // vnode_recycle_int() will call mutex_exit(&rvp->v_mutex); + // vnode_recycle_int() will exit v_mutex // re-check flags, due to releasing locks - isbusy = 1; - if (!(rvp->v_flags & VNODE_DEAD)) - isbusy = vnode_recycle_int(rvp, - (flags & FORCECLOSE) | VNODELOCKED); - else - mutex_exit(&rvp->v_mutex); - - mutex_enter(&vnode_all_list_lock); - - if (!isbusy) { - reclaims++; - print_reclaim_stats(B_FALSE, reclaims); - break; // must restart loop if unlinked node - } + if (!vnode_recycle_int(rvp, (flags & FORCECLOSE) | + VNODELOCKED)) { + reclaims++; + print_reclaim_stats(B_FALSE, reclaims); } - - // If the end of the list was reached, stop entirely - if (!rvp) - break; } if (filesonly) {