diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 9ef94fcc44b..e34ff40c1da 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -2,7 +2,7 @@ "build": { "dockerfile": "Dockerfile", "args": { - "GEOS_TPL_TAG": "251-99" + "GEOS_TPL_TAG": "255-118" } }, "runArgs": [ diff --git a/.github/workflows/build_and_test.yml b/.github/workflows/build_and_test.yml index dee5f516a81..f9b423b95a2 100644 --- a/.github/workflows/build_and_test.yml +++ b/.github/workflows/build_and_test.yml @@ -12,6 +12,10 @@ on: CMAKE_BUILD_TYPE: required: true type: string + CODE_COVERAGE: + required: false + type: boolean + default: false DOCKER_IMAGE_TAG: required: true type: string @@ -127,6 +131,10 @@ jobs: script_args+=(--cmake-build-type ${{ inputs.CMAKE_BUILD_TYPE }}) script_args+=(${{ inputs.BUILD_AND_TEST_CLI_ARGS }}) + if ${{ inputs.CODE_COVERAGE }} == 'true'; then + script_args+=(--code-coverage) + fi + # In case of integrated tests run, we still want to send the results to the cloud for inspection. # While for standard build (if even possible), pushing a failed build would be pointless. # GHA set `-e` to bash scripts by default to fail asap, @@ -148,5 +156,13 @@ jobs: echo "Download the bundle at https://storage.googleapis.com/${{ inputs.GCP_BUCKET }}/${DATA_BASENAME}" fi fi - exit ${EXIT_STATUS} + + - name: Upload coverage to Codecov + if: inputs.CODE_COVERAGE + uses: codecov/codecov-action@v3 + with: + files: geos_coverage.info.cleaned + fail_ci_if_error: true + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} \ No newline at end of file diff --git a/.github/workflows/ci_tests.yml b/.github/workflows/ci_tests.yml index 4863884a573..11784f7afa2 100644 --- a/.github/workflows/ci_tests.yml +++ b/.github/workflows/ci_tests.yml @@ -1,5 +1,10 @@ name: GEOS CI -on: pull_request + +on: + push: + branches: + - develop + pull_request: # Cancels in-progress workflows for a PR when updated concurrency: @@ -12,12 +17,11 @@ jobs: # Jobs will be cancelled if PR is a draft. # PR status must be "Open" to run CI. - is_pull_request_a_draft: + is_not_draft_pull_request: # Everywhere in this workflow, we use the most recent ubuntu distribution available in Github Actions # to ensure maximum support of google cloud's sdk. runs-on: ubuntu-22.04 outputs: - NUM_ASSIGNEES: ${{ steps.extract_pr_info.outputs.NUM_ASSIGNEES }} DOCKER_IMAGE_TAG: ${{ steps.extract_docker_image_tag.outputs.DOCKER_IMAGE_TAG }} LABELS: ${{ steps.extract_pr_info.outputs.LABELS }} steps: @@ -34,7 +38,6 @@ jobs: if [[ $draft_status == true ]]; then exit 1 ; fi # If the workflow is meant to continue, we extract additional information for the json of the pr. - echo "NUM_ASSIGNEES=$(echo ${pr_json} | jq '.assignees | length')" >> "$GITHUB_OUTPUT" echo "LABELS=$(echo ${pr_json} | jq -crM '[.labels[].name]')" >> "$GITHUB_OUTPUT" # The TPL tag is contained in the codespaces configuration to avoid duplications. - name: Checkout .devcontainer/devcontainer.json @@ -53,19 +56,22 @@ jobs: # PR must be assigned to be merged. # This job will fail if this is not the case. - is_pull_request_assigned: - needs: [is_pull_request_a_draft] + if_not_unassigned_pull_request: + needs: [is_not_draft_pull_request] runs-on: ubuntu-22.04 steps: - - name: Check that the PR is assigned + - name: If this is a PR, Check that it is assigned run: | - echo "There are ${{ needs.is_pull_request_a_draft.outputs.NUM_ASSIGNEES }} on this PR." - ${{ needs.is_pull_request_a_draft.outputs.NUM_ASSIGNEES > 0 }} + if [[ ${{github.event_name}} != 'pull_request' ]]; then exit 0 ; fi + pr_json=$(curl -H "Accept: application/vnd.github+json" https://api.github.com/repos/${{ github.repository }}/pulls/${{ github.event.number }}) + NUM_ASSIGNEES=$(echo ${pr_json} | jq '.assignees | length') + echo "There are ${NUM_ASSIGNEES} assignees on this PR." + if [[ $NUM_ASSIGNEES == 0 ]]; then exit 1 ; fi # Validates that the PR is still pointing to the HEAD of the main branch of the submodules repositories. # (There are exceptions, read the script about those). are_submodules_in_sync: - needs: [is_pull_request_a_draft] + needs: [is_not_draft_pull_request] runs-on: ubuntu-22.04 steps: # The integrated test submodule repository contains large data (using git lfs). @@ -82,7 +88,7 @@ jobs: check_code_style_and_documentation: name: ${{ matrix.name }} - needs: [is_pull_request_a_draft] + needs: [is_not_draft_pull_request] strategy: fail-fast : false matrix: @@ -97,7 +103,7 @@ jobs: with: BUILD_AND_TEST_CLI_ARGS: ${{ matrix.BUILD_AND_TEST_ARGS }} CMAKE_BUILD_TYPE: Release - DOCKER_IMAGE_TAG: ${{ needs.is_pull_request_a_draft.outputs.DOCKER_IMAGE_TAG }} + DOCKER_IMAGE_TAG: ${{ needs.is_not_draft_pull_request.outputs.DOCKER_IMAGE_TAG }} DOCKER_REPOSITORY: geosx/ubuntu20.04-gcc9 RUNS_ON: ubuntu-22.04 USE_SCCACHE: false @@ -106,7 +112,7 @@ jobs: # Those are quite fast and can efficiently benefit from the `sccache' tool to make them even faster. cpu_builds: name: ${{ matrix.name }} - needs: [is_pull_request_a_draft] + needs: [is_not_draft_pull_request] strategy: # In-progress jobs will not be cancelled if there is a failure fail-fast : false @@ -161,7 +167,7 @@ jobs: uses: ./.github/workflows/build_and_test.yml with: CMAKE_BUILD_TYPE: ${{ matrix.CMAKE_BUILD_TYPE }} - DOCKER_IMAGE_TAG: ${{ needs.is_pull_request_a_draft.outputs.DOCKER_IMAGE_TAG }} + DOCKER_IMAGE_TAG: ${{ needs.is_not_draft_pull_request.outputs.DOCKER_IMAGE_TAG }} DOCKER_REPOSITORY: ${{ matrix.DOCKER_REPOSITORY }} ENABLE_HYPRE: ${{ matrix.ENABLE_HYPRE }} ENABLE_TRILINOS: ${{ matrix.ENABLE_TRILINOS }} @@ -174,29 +180,45 @@ jobs: # Note: The integrated tests are optional and are (for the moment) run for convenience only. run_integrated_tests: needs: - - is_pull_request_a_draft + - is_not_draft_pull_request - cpu_builds - if: "${{ contains( fromJSON( needs.is_pull_request_a_draft.outputs.LABELS ), 'ci: run integrated tests') }}" + if: "${{ contains( fromJSON( needs.is_not_draft_pull_request.outputs.LABELS ), 'ci: run integrated tests') }}" uses: ./.github/workflows/build_and_test.yml secrets: inherit with: BUILD_AND_TEST_CLI_ARGS: --build-exe-only BUILD_TYPE: integrated_tests CMAKE_BUILD_TYPE: Release - DOCKER_IMAGE_TAG: ${{ needs.is_pull_request_a_draft.outputs.DOCKER_IMAGE_TAG }} + DOCKER_IMAGE_TAG: ${{ needs.is_not_draft_pull_request.outputs.DOCKER_IMAGE_TAG }} DOCKER_REPOSITORY: geosx/ubuntu22.04-gcc11 ENABLE_HYPRE: ON ENABLE_TRILINOS: OFF GCP_BUCKET: geosx/integratedTests RUNS_ON: ubuntu-22.04 - + + code_coverage: + needs: + - is_not_draft_pull_request + uses: ./.github/workflows/build_and_test.yml + secrets: inherit + with: + BUILD_AND_TEST_CLI_ARGS: "--no-run-unit-tests" + CMAKE_BUILD_TYPE: Debug + CODE_COVERAGE: true + DOCKER_IMAGE_TAG: ${{ needs.is_not_draft_pull_request.outputs.DOCKER_IMAGE_TAG }} + DOCKER_REPOSITORY: geosx/ubuntu22.04-gcc11 + ENABLE_HYPRE: ON + ENABLE_TRILINOS: OFF + GCP_BUCKET: geosx/ubuntu22.04-gcc11 + RUNS_ON: ubuntu-22.04 + # If the 'ci: ready to be merged' PR label is found, the cuda jobs run immediately along side linux jobs. # Note: CUDA jobs should only be run if PR is ready to merge. cuda_builds: name: ${{ matrix.name }} needs: - - is_pull_request_a_draft - if: "${{ contains( fromJSON( needs.is_pull_request_a_draft.outputs.LABELS ), 'ci: ready to be merged') }}" + - is_not_draft_pull_request + if: "${{ contains( fromJSON( needs.is_not_draft_pull_request.outputs.LABELS ), 'ci: ready to be merged') }}" strategy: # In-progress jobs will not be cancelled if there is a failure fail-fast : false @@ -237,7 +259,7 @@ jobs: with: BUILD_AND_TEST_CLI_ARGS: ${{ matrix.BUILD_AND_TEST_CLI_ARGS }} CMAKE_BUILD_TYPE: ${{ matrix.CMAKE_BUILD_TYPE }} - DOCKER_IMAGE_TAG: ${{ needs.is_pull_request_a_draft.outputs.DOCKER_IMAGE_TAG }} + DOCKER_IMAGE_TAG: ${{ needs.is_not_draft_pull_request.outputs.DOCKER_IMAGE_TAG }} DOCKER_REPOSITORY: ${{ matrix.DOCKER_REPOSITORY }} ENABLE_HYPRE_DEVICE: ${{ matrix.ENABLE_HYPRE_DEVICE }} ENABLE_HYPRE: ${{ matrix.ENABLE_HYPRE }} @@ -250,7 +272,7 @@ jobs: check_that_all_jobs_succeeded: runs-on: ubuntu-22.04 needs: - - is_pull_request_assigned + - if_not_unassigned_pull_request - are_submodules_in_sync - check_code_style_and_documentation - cpu_builds @@ -259,7 +281,7 @@ jobs: steps: - run: | ${{ - needs.is_pull_request_assigned.result == 'success' && + needs.if_not_unassigned_pull_request.result == 'success' && needs.are_submodules_in_sync.result == 'success' && needs.check_code_style_and_documentation.result == 'success' && needs.cpu_builds.result == 'success' && diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f905724ba95..033f29acf68 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -21,6 +21,10 @@ stages: - git submodule set-url integratedTests git@github.com:GEOS-DEV/integratedTests.git - git submodule set-url src/coreComponents/fileIO/coupling/hdf5_interface https://github.com/GEOS-DEV/hdf5_interface.git + # Clean up directory and submodules when pre-initialized from previous CI run + - git clean -x -f -d + - git submodule foreach --recursive git clean -x -f -d + # Update submodules - git submodule update --init --recursive src/cmake/blt - git submodule update --init --recursive src/coreComponents/LvArray diff --git a/README.md b/README.md index f4561b7b60c..ee7671ba61e 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,7 @@ [![DOI](https://zenodo.org/badge/131810578.svg)](https://zenodo.org/badge/latestdoi/131810578) +[![codecov](https://codecov.io/github/GEOS-DEV/GEOS/graph/badge.svg?token=0VTEHPQG58)](https://codecov.io/github/GEOS-DEV/GEOS) +![CI](https://github.com/GEOS-DEV/GEOS/actions/workflows/ci_tests.yml/badge.svg) +[![docs](https://readthedocs.com/projects/geosx-geosx/badge/?version=latest)](https://geosx-geosx.readthedocs-hosted.com/en/latest/) Welcome to the GEOS project! ----------------------------- diff --git a/host-configs/LBL/cori-gcc@8.1.0.cmake b/host-configs/LBL/cori-gcc@8.1.0.cmake index 8b46e4e6a08..8b6f5af6517 100644 --- a/host-configs/LBL/cori-gcc@8.1.0.cmake +++ b/host-configs/LBL/cori-gcc@8.1.0.cmake @@ -24,7 +24,6 @@ set(MPI_Fortran_COMPILER ${MPI_HOME}/bin/mpifort CACHE PATH "") set(MPIEXEC /usr/bin/srun CACHE PATH "") set(MPIEXEC_NUMPROC_FLAG "-n" CACHE STRING "") -set(GEOSX_ENABLE_FPE OFF CACHE BOOL "" FORCE) set(GEOSX_TPL_DIR "/global/project/projectdirs/m1411/GEOSX/tpls/install-cori-gcc\@8.1.0-release-24-07-20" CACHE PATH "" ) diff --git a/host-configs/LBL/cori-intel.cmake b/host-configs/LBL/cori-intel.cmake index 87b03329eed..85044651337 100644 --- a/host-configs/LBL/cori-intel.cmake +++ b/host-configs/LBL/cori-intel.cmake @@ -24,7 +24,6 @@ set(MPI_Fortran_COMPILER "ftn" CACHE PATH "" FORCE) set(MPIEXEC "/usr/bin/srun" CACHE PATH "") set(MPIEXEC_NUMPROC_FLAG "-n" CACHE STRING "") -set(GEOSX_ENABLE_FPE OFF CACHE BOOL "" FORCE) set(GEOSX_TPL_DIR "/global/project/projectdirs/m1411/GEOSX/tpls/install-cori-intel-release-22-07-20" CACHE PATH "" ) set(GEOSX_LINK_PREPEND_FLAG "-Wl,--whole-archive" CACHE STRING "" FORCE) diff --git a/host-configs/TOTAL/pangea3-gcc8.4.1-openmpi-4.1.2.cmake b/host-configs/TOTAL/pangea3-gcc8.4.1-openmpi-4.1.2.cmake index ee75f9079b7..c2b8e96c328 100644 --- a/host-configs/TOTAL/pangea3-gcc8.4.1-openmpi-4.1.2.cmake +++ b/host-configs/TOTAL/pangea3-gcc8.4.1-openmpi-4.1.2.cmake @@ -1,7 +1,5 @@ # hostconfig for pangea3 # -# export MODULEPATH=/workrd/SCR/NUM/geosx_num/module_files:$MODULEPATH -# module load cmake/3.21.4 gcc/8.4.1 cuda/11.0.3 ompi/4.1.2 openblas/0.3.18 python4geosx/p3/gcc8.4.1-ompi4.1.2 # set(CONFIG_NAME "pangea3-gcc8.4.1-ompi-4.1.2" CACHE PATH "") @@ -56,7 +54,7 @@ if (DEFINED ENV{CUDA_ROOT}) set(CMAKE_CUDA_COMPILER ${CUDA_TOOLKIT_ROOT_DIR}/bin/nvcc CACHE STRING "") set(CUDA_ARCH sm_70 CACHE STRING "") set(CMAKE_CUDA_ARCHITECTURES 70 CACHE STRING "") - set(CMAKE_CUDA_FLAGS "-restrict -arch ${CUDA_ARCH} --expt-extended-lambda -Werror cross-execution-space-call,reorder,deprecated-declarations" CACHE STRING "") + set(CMAKE_CUDA_FLAGS "-restrict -arch ${CUDA_ARCH} --expt-relaxed-constexpr --expt-extended-lambda -Werror cross-execution-space-call,reorder,deprecated-declarations" CACHE STRING "") set(CMAKE_CUDA_FLAGS_RELEASE "-O3 -DNDEBUG -Xcompiler -DNDEBUG -Xcompiler -O3 -Xcompiler -mcpu=powerpc64le -Xcompiler -mtune=powerpc64le" CACHE STRING "") set(CMAKE_CUDA_FLAGS_RELWITHDEBINFO "-g -lineinfo ${CMAKE_CUDA_FLAGS_RELEASE}" CACHE STRING "") set(CMAKE_CUDA_FLAGS_DEBUG "-g -G -O0 -Xcompiler -O0" CACHE STRING "") @@ -64,7 +62,7 @@ if (DEFINED ENV{CUDA_ROOT}) # Uncomment this line to make nvcc output register usage for each kernel. # set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --resource-usage" CACHE STRING "" FORCE) else() - message(FATAL_ERROR "You must have CUDA_ROOT environment variable set, we advise loading module cuda/11.0.3") + message(FATAL_ERROR "You must have CUDA_ROOT environment variable set, we advise loading module cuda/11.5.0") endif() # GTEST options @@ -108,7 +106,7 @@ set(ENABLE_PETSC OFF CACHE BOOL "") set(ENABLE_HYPRE ON CACHE BOOL "") set(ENABLE_HYPRE_DEVICE "CUDA" CACHE BOOL "") -# activate workaround for fmt formatter -set(ENABLE_FMT_CONST_FORMATTER_WORKAROUND ON CACHE BOOL "") +# disable benchmarks, they are incompatible with P3's nvcc version (cuda 11.5.0) +set(ENABLE_BENCHMARKS OFF CACHE BOOL "") include( ${CMAKE_CURRENT_LIST_DIR}/../tpls.cmake ) diff --git a/inputFiles/compositionalMultiphaseFlow/4comp_2ph_1d.xml b/inputFiles/compositionalMultiphaseFlow/4comp_2ph_1d.xml index e2bbab3190d..556e49fdea4 100644 --- a/inputFiles/compositionalMultiphaseFlow/4comp_2ph_1d.xml +++ b/inputFiles/compositionalMultiphaseFlow/4comp_2ph_1d.xml @@ -7,6 +7,8 @@ logLevel="1" discretization="fluidTPFA" targetRegions="{ Region1 }" + initialDt="1e5" + targetFlowCFL="2" temperature="297.15"> + timeFrequency="5e6" + target="/Outputs/vtkOutput"/> - - - - - + diff --git a/inputFiles/compositionalMultiphaseFlow/co2_flux_3d.xml b/inputFiles/compositionalMultiphaseFlow/co2_flux_3d.xml index f3334b3e9d4..e0b3cecde11 100644 --- a/inputFiles/compositionalMultiphaseFlow/co2_flux_3d.xml +++ b/inputFiles/compositionalMultiphaseFlow/co2_flux_3d.xml @@ -119,6 +119,7 @@ @@ -23,7 +24,7 @@ @@ -99,6 +100,11 @@ + + + diff --git a/inputFiles/compositionalMultiphaseFlow/pvdo.txt b/inputFiles/compositionalMultiphaseFlow/pvdo.txt index df0be305c70..ce761452b11 100644 --- a/inputFiles/compositionalMultiphaseFlow/pvdo.txt +++ b/inputFiles/compositionalMultiphaseFlow/pvdo.txt @@ -1,8 +1,8 @@ # P[Pa] Bo[m3/sm3] Visc(Pa.s) -2000000 1.02 0.000975 -5000000 1.03 0.00091 -10000000 1.04 0.00083 -20000000 1.05 0.000695 -30000000 1.07 0.000594 -40000000 1.08 0.00051 -50000000.7 1.09 0.000449 +2000000 1.09 0.000449 +5000000 1.08 0.00051 +10000000 1.07 0.000594 +20000000 1.05 0.000695 +30000000 1.04 0.00083 +40000000 1.03 0.00091 +50000000.7 1.02 0.000975 diff --git a/inputFiles/compositionalMultiphaseWell/benchmarks/Class09Pb3/class09_pb3_benchmark.xml b/inputFiles/compositionalMultiphaseWell/benchmarks/Class09Pb3/class09_pb3_benchmark.xml index 95b03293b68..d0c07591e87 100644 --- a/inputFiles/compositionalMultiphaseWell/benchmarks/Class09Pb3/class09_pb3_benchmark.xml +++ b/inputFiles/compositionalMultiphaseWell/benchmarks/Class09Pb3/class09_pb3_benchmark.xml @@ -14,7 +14,7 @@ diff --git a/inputFiles/hydraulicFracturing/kgdViscosityDominated_poroelastic_base.xml b/inputFiles/hydraulicFracturing/kgdViscosityDominated_poroelastic_base.xml index 85e24c5015f..76c2d059736 100644 --- a/inputFiles/hydraulicFracturing/kgdViscosityDominated_poroelastic_base.xml +++ b/inputFiles/hydraulicFracturing/kgdViscosityDominated_poroelastic_base.xml @@ -39,7 +39,8 @@ targetRegions="{ Domain }" nodeBasedSIF="1" rockToughness="1e4" - mpiCommOrder="1"/> + mpiCommOrder="1" + isPoroelastic="1"/> diff --git a/inputFiles/hydraulicFracturing/pennyShapedToughnessDominated_poroelastic_benchmark.xml b/inputFiles/hydraulicFracturing/pennyShapedToughnessDominated_poroelastic_benchmark.xml index 166fa20cf2e..dff5dd1605c 100644 --- a/inputFiles/hydraulicFracturing/pennyShapedToughnessDominated_poroelastic_benchmark.xml +++ b/inputFiles/hydraulicFracturing/pennyShapedToughnessDominated_poroelastic_benchmark.xml @@ -69,7 +69,8 @@ targetRegions="{ Domain }" nodeBasedSIF="1" rockToughness="3.0e6" - mpiCommOrder="1"/> + mpiCommOrder="1" + isPoroelastic="1"/> diff --git a/inputFiles/hydraulicFracturing/pennyShapedToughnessDominated_poroelastic_smoke.xml b/inputFiles/hydraulicFracturing/pennyShapedToughnessDominated_poroelastic_smoke.xml index 9959607e44a..bab9396dd71 100644 --- a/inputFiles/hydraulicFracturing/pennyShapedToughnessDominated_poroelastic_smoke.xml +++ b/inputFiles/hydraulicFracturing/pennyShapedToughnessDominated_poroelastic_smoke.xml @@ -44,7 +44,8 @@ targetRegions="{ Domain }" nodeBasedSIF="1" rockToughness="3.0e6" - mpiCommOrder="1"/> + mpiCommOrder="1" + isPoroelastic="1"/> diff --git a/inputFiles/hydraulicFracturing/pennyShapedViscosityDominated_poroelastic_smoke.xml b/inputFiles/hydraulicFracturing/pennyShapedViscosityDominated_poroelastic_smoke.xml index a330b0c1102..f162d0da562 100644 --- a/inputFiles/hydraulicFracturing/pennyShapedViscosityDominated_poroelastic_smoke.xml +++ b/inputFiles/hydraulicFracturing/pennyShapedViscosityDominated_poroelastic_smoke.xml @@ -44,7 +44,8 @@ targetRegions="{ Domain }" nodeBasedSIF="1" rockToughness="0.3e6" - mpiCommOrder="1"/> + mpiCommOrder="1" + isPoroelastic="1"/> diff --git a/inputFiles/hydraulicFracturing/pknViscosityDominated_poroelastic_smoke.xml b/inputFiles/hydraulicFracturing/pknViscosityDominated_poroelastic_smoke.xml index e78204ea5d2..1d2262b7a46 100644 --- a/inputFiles/hydraulicFracturing/pknViscosityDominated_poroelastic_smoke.xml +++ b/inputFiles/hydraulicFracturing/pknViscosityDominated_poroelastic_smoke.xml @@ -14,7 +14,7 @@ flowSolverName="SinglePhaseFlow" surfaceGeneratorName="SurfaceGen" logLevel="1" - targetRegions="{ Fracture }" + targetRegions="{ Domain, Fracture }" contactRelationName="fractureContact" maxNumResolves="5" initialDt="0.1"> @@ -44,7 +44,8 @@ targetRegions="{ Domain }" nodeBasedSIF="1" rockToughness="0.1e6" - mpiCommOrder="1"/> + mpiCommOrder="1" + isPoroelastic="1"/> diff --git a/inputFiles/multiphaseFlowFractures/deadoil_3ph_corey_2d_impermeableFault.xml b/inputFiles/multiphaseFlowFractures/deadoil_3ph_corey_2d_impermeableFault.xml index 3b037407799..3ba4ba2a899 100644 --- a/inputFiles/multiphaseFlowFractures/deadoil_3ph_corey_2d_impermeableFault.xml +++ b/inputFiles/multiphaseFlowFractures/deadoil_3ph_corey_2d_impermeableFault.xml @@ -15,6 +15,7 @@ targetRegions="{ Domain, Fracture }" initialDt="1e4"> + file="../../../GEOSDATA/DataSets/mandelMeshes/mandel_prism6_0430_cells.vtu"/> diff --git a/inputFiles/poromechanics/PoroElastic_staircase_co2_3d_sequential.xml b/inputFiles/poromechanics/PoroElastic_staircase_co2_3d_sequential.xml index eef1b2b0eb9..2775e1326bf 100755 --- a/inputFiles/poromechanics/PoroElastic_staircase_co2_3d_sequential.xml +++ b/inputFiles/poromechanics/PoroElastic_staircase_co2_3d_sequential.xml @@ -13,7 +13,7 @@ solidSolverName="linearElasticity" reservoirAndWellsSolverName="reservoirAndWells" logLevel="1" - targetRegions="{ channel, barrier, wellRegion1, wellRegion2 }"> + targetRegions="{ channel, barrier }"> + targetRegions="{ channel, barrier }"> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/inputFiles/poromechanics/PoroViscoDruckerPrager_smoke.xml b/inputFiles/poromechanics/PoroViscoDruckerPrager_smoke.xml new file mode 100644 index 00000000000..892d3a9fa74 --- /dev/null +++ b/inputFiles/poromechanics/PoroViscoDruckerPrager_smoke.xml @@ -0,0 +1,52 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/inputFiles/poromechanics/PoroViscoExtendedDruckerPrager_base.xml b/inputFiles/poromechanics/PoroViscoExtendedDruckerPrager_base.xml new file mode 100644 index 00000000000..072aa138f0a --- /dev/null +++ b/inputFiles/poromechanics/PoroViscoExtendedDruckerPrager_base.xml @@ -0,0 +1,228 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/inputFiles/poromechanics/PoroViscoExtendedDruckerPrager_smoke.xml b/inputFiles/poromechanics/PoroViscoExtendedDruckerPrager_smoke.xml new file mode 100644 index 00000000000..4e81b81c03b --- /dev/null +++ b/inputFiles/poromechanics/PoroViscoExtendedDruckerPrager_smoke.xml @@ -0,0 +1,52 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/inputFiles/poromechanics/PoroViscoModifiedCamClay_base.xml b/inputFiles/poromechanics/PoroViscoModifiedCamClay_base.xml new file mode 100644 index 00000000000..b48274d0c7a --- /dev/null +++ b/inputFiles/poromechanics/PoroViscoModifiedCamClay_base.xml @@ -0,0 +1,228 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/inputFiles/poromechanics/PoroViscoModifiedCamClay_smoke.xml b/inputFiles/poromechanics/PoroViscoModifiedCamClay_smoke.xml new file mode 100644 index 00000000000..8a0581c357c --- /dev/null +++ b/inputFiles/poromechanics/PoroViscoModifiedCamClay_smoke.xml @@ -0,0 +1,52 @@ + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/inputFiles/poromechanics/nonlinearAcceleration/smallEggModel/smallEggModel.xml b/inputFiles/poromechanics/nonlinearAcceleration/smallEggModel/smallEggModel.xml new file mode 100755 index 00000000000..a8259a6e66d --- /dev/null +++ b/inputFiles/poromechanics/nonlinearAcceleration/smallEggModel/smallEggModel.xml @@ -0,0 +1,296 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/inputFiles/poromechanics/nonlinearAcceleration/validationCase/validationCase.xml b/inputFiles/poromechanics/nonlinearAcceleration/validationCase/validationCase.xml new file mode 100755 index 00000000000..af9a6c20b5c --- /dev/null +++ b/inputFiles/poromechanics/nonlinearAcceleration/validationCase/validationCase.xml @@ -0,0 +1,503 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/inputFiles/poromechanicsFractures/PoroElastic_efem-edfm_eggModel_large.xml b/inputFiles/poromechanicsFractures/PoroElastic_efem-edfm_eggModel_large.xml index 4c1baeb73dd..7b99a61756a 100644 --- a/inputFiles/poromechanicsFractures/PoroElastic_efem-edfm_eggModel_large.xml +++ b/inputFiles/poromechanicsFractures/PoroElastic_efem-edfm_eggModel_large.xml @@ -67,7 +67,7 @@ diff --git a/inputFiles/poromechanicsFractures/PoroElastic_efem-edfm_eggModel_small.xml b/inputFiles/poromechanicsFractures/PoroElastic_efem-edfm_eggModel_small.xml index a065fc3d739..a523346a32e 100644 --- a/inputFiles/poromechanicsFractures/PoroElastic_efem-edfm_eggModel_small.xml +++ b/inputFiles/poromechanicsFractures/PoroElastic_efem-edfm_eggModel_small.xml @@ -67,7 +67,7 @@ diff --git a/inputFiles/singlePhaseFlow/3D_10x10x10_thermalCompressible_base.xml b/inputFiles/singlePhaseFlow/3D_10x10x10_thermalCompressible_base.xml index 44920972449..33608531c56 100644 --- a/inputFiles/singlePhaseFlow/3D_10x10x10_thermalCompressible_base.xml +++ b/inputFiles/singlePhaseFlow/3D_10x10x10_thermalCompressible_base.xml @@ -80,7 +80,7 @@ compressibility="5e-10" thermalExpansionCoeff="3e-4" viscosibility="0.0" - volumetricHeatCapacity="4.0e3" + specificHeatCapacity="4.0e3" referenceInternalEnergy="1.1e6"/> diff --git a/inputFiles/thermalMultiphaseFlow/co2_thermal_2d.xml b/inputFiles/thermalMultiphaseFlow/co2_thermal_2d.xml index 75b8298e2dd..19b770ede18 100644 --- a/inputFiles/thermalMultiphaseFlow/co2_thermal_2d.xml +++ b/inputFiles/thermalMultiphaseFlow/co2_thermal_2d.xml @@ -113,6 +113,7 @@ + defaultDrainedLinearTEC="3e-7"/> @@ -43,7 +43,7 @@ name="rockPorosity" grainBulkModulus="1.0e27" defaultReferencePorosity="0.2" - defaultThermalExpansionCoefficient="3e-7"/> + defaultPorosityTEC="3e-7"/> @@ -64,7 +64,7 @@ compressibility="0.0" thermalExpansionCoeff="0.0" viscosibility="0.0" - volumetricHeatCapacity="1.672e2" + specificHeatCapacity="1.672e2" referenceInternalEnergy="0.001"/> diff --git a/inputFiles/thermoPoromechanics/ThermoPoroElastic_staircase_co2_smoke.xml b/inputFiles/thermoPoromechanics/ThermoPoroElastic_staircase_co2_smoke.xml index 4b4f424cd89..97101133740 100644 --- a/inputFiles/thermoPoromechanics/ThermoPoroElastic_staircase_co2_smoke.xml +++ b/inputFiles/thermoPoromechanics/ThermoPoroElastic_staircase_co2_smoke.xml @@ -165,7 +165,7 @@ defaultDensity="2650" defaultBulkModulus="5.e9" defaultPoissonRatio="0.25" - defaultThermalExpansionCoefficient="1e-5"/> + defaultDrainedLinearTEC="1e-5"/> @@ -173,7 +173,7 @@ name="rockPorosityChannel" grainBulkModulus="1.0e27" defaultReferencePorosity="0.2" - defaultThermalExpansionCoefficient="1e-5"/> + defaultPorosityTEC="1e-5"/> + defaultDrainedLinearTEC="1e-5"/> @@ -194,7 +194,7 @@ name="rockPorosityBarrier" grainBulkModulus="1.0e27" defaultReferencePorosity="0.05" - defaultThermalExpansionCoefficient="1e-5"/> + defaultPorosityTEC="1e-5"/> @@ -240,7 +240,7 @@ compressibility="1.0e-10" thermalExpansionCoeff="0.0" viscosibility="0.0" - volumetricHeatCapacity="1.672e2" + specificHeatCapacity="1.672e2" referenceInternalEnergy="0.001"/> @@ -55,7 +55,7 @@ name="cellDensity" initialCondition="1" objectPath="mesh/FE1/ElementRegions/Region/cb" - fieldName="mediumDensity" + fieldName="acousticDensity" scale="1.0" setNames="{ all }"/> diff --git a/inputFiles/wavePropagation/acous3D_Q3_small_base.xml b/inputFiles/wavePropagation/acous3D_Q3_small_base.xml index 9b8ec3c4e95..26f0631f8fa 100644 --- a/inputFiles/wavePropagation/acous3D_Q3_small_base.xml +++ b/inputFiles/wavePropagation/acous3D_Q3_small_base.xml @@ -63,7 +63,7 @@ name="cellVelocity" initialCondition="1" objectPath="mesh/FE2/ElementRegions/Region/cb" - fieldName="mediumVelocity" + fieldName="acousticVelocity" scale="1500" setNames="{ all }"/> @@ -71,7 +71,7 @@ name="cellDensity" initialCondition="1" objectPath="mesh/FE2/ElementRegions/Region/cb" - fieldName="mediumDensity" + fieldName="acousticDensity" scale="1" setNames="{ all }"/> diff --git a/inputFiles/wavePropagation/acous3D_Q5_abc_smoke.xml b/inputFiles/wavePropagation/acous3D_Q5_abc_smoke.xml index b221f455741..bd39714adc3 100644 --- a/inputFiles/wavePropagation/acous3D_Q5_abc_smoke.xml +++ b/inputFiles/wavePropagation/acous3D_Q5_abc_smoke.xml @@ -2,7 +2,7 @@ - + diff --git a/inputFiles/wavePropagation/acous3D_Q5_firstOrder_small_base.xml b/inputFiles/wavePropagation/acous3D_Q5_firstOrder_small_base.xml index 6f88e1e2e20..32018f54c95 100644 --- a/inputFiles/wavePropagation/acous3D_Q5_firstOrder_small_base.xml +++ b/inputFiles/wavePropagation/acous3D_Q5_firstOrder_small_base.xml @@ -47,7 +47,7 @@ name="cellVelocity" initialCondition="1" objectPath="mesh/FE1/ElementRegions/Region/cb" - fieldName="mediumVelocity" + fieldName="acousticVelocity" scale="1500.0" setNames="{ all }"/> @@ -55,7 +55,7 @@ name="cellDensity" initialCondition="1" objectPath="mesh/FE1/ElementRegions/Region/cb" - fieldName="mediumDensity" + fieldName="acousticDensity" scale="1.0" setNames="{ all }"/> diff --git a/inputFiles/wavePropagation/acous3D_Q5_small_base.xml b/inputFiles/wavePropagation/acous3D_Q5_small_base.xml index 1ecc941b4d7..0ad60b621bf 100644 --- a/inputFiles/wavePropagation/acous3D_Q5_small_base.xml +++ b/inputFiles/wavePropagation/acous3D_Q5_small_base.xml @@ -63,7 +63,7 @@ name="cellVelocity" initialCondition="1" objectPath="mesh/FE2/ElementRegions/Region/cb" - fieldName="mediumVelocity" + fieldName="acousticVelocity" scale="1500" setNames="{ all }"/> @@ -71,8 +71,8 @@ name="cellDensity" initialCondition="1" objectPath="mesh/FE2/ElementRegions/Region/cb" - fieldName="mediumDensity" - scale="1500" + fieldName="acousticDensity" + scale="1" setNames="{ all }"/> @@ -96,7 +96,7 @@ @@ -55,7 +55,7 @@ name="cellDensity" initialCondition="1" objectPath="mesh/FE1/ElementRegions/Region/cb" - fieldName="mediumDensity" + fieldName="acousticDensity" scale="1.0" setNames="{ all }"/> diff --git a/inputFiles/wavePropagation/acous3D_pml_smoke.xml b/inputFiles/wavePropagation/acous3D_pml_smoke.xml index 8d94dd3b873..7e2b978d31d 100644 --- a/inputFiles/wavePropagation/acous3D_pml_smoke.xml +++ b/inputFiles/wavePropagation/acous3D_pml_smoke.xml @@ -181,7 +181,7 @@ name="cellVelocity1" initialCondition="1" objectPath="ElementRegions/interiorDomain" - fieldName="mediumVelocity" + fieldName="acousticVelocity" scale="2000" setNames="{ all }"/> @@ -189,7 +189,7 @@ name="cellDensity1" initialCondition="1" objectPath="ElementRegions/interiorDomain" - fieldName="mediumDensity" + fieldName="acousticDensity" scale="1" setNames="{ all }"/> @@ -198,7 +198,7 @@ name="cellVelocity2" initialCondition="1" objectPath="ElementRegions/pmlDomain" - fieldName="mediumVelocity" + fieldName="acousticVelocity" scale="2000" setNames="{ all }"/> @@ -206,7 +206,7 @@ name="cellDensity2" initialCondition="1" objectPath="ElementRegions/pmlDomain" - fieldName="mediumDensity" + fieldName="acousticDensity" scale="1" setNames="{ all }"/> diff --git a/inputFiles/wavePropagation/acous3D_small_base.xml b/inputFiles/wavePropagation/acous3D_small_base.xml index 6b611740154..694ced3e1e0 100644 --- a/inputFiles/wavePropagation/acous3D_small_base.xml +++ b/inputFiles/wavePropagation/acous3D_small_base.xml @@ -63,7 +63,7 @@ name="cellVelocity" initialCondition="1" objectPath="ElementRegions/Region/cb" - fieldName="mediumVelocity" + fieldName="acousticVelocity" scale="1500" setNames="{ all }"/> @@ -71,7 +71,7 @@ name="cellDensity" initialCondition="1" objectPath="ElementRegions/Region/cb" - fieldName="mediumDensity" + fieldName="acousticDensity" scale="1" setNames="{ all }"/> diff --git a/inputFiles/wavePropagation/acous3D_vti_smoke.xml b/inputFiles/wavePropagation/acous3D_vti_smoke.xml index c3da486c9b3..ef9127fe278 100644 --- a/inputFiles/wavePropagation/acous3D_vti_smoke.xml +++ b/inputFiles/wavePropagation/acous3D_vti_smoke.xml @@ -162,7 +162,7 @@ name="cellVelocity" initialCondition="1" objectPath="ElementRegions/Region/cb" - fieldName="mediumVelocity" + fieldName="acousticVelocity" scale="1500" setNames="{ all }"/> diff --git a/inputFiles/wavePropagation/acouselas3D_Q2_abc_smoke.xml b/inputFiles/wavePropagation/acouselas3D_Q2_abc_smoke.xml new file mode 100644 index 00000000000..ddbc4a0e177 --- /dev/null +++ b/inputFiles/wavePropagation/acouselas3D_Q2_abc_smoke.xml @@ -0,0 +1,154 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/inputFiles/wavePropagation/benchmarks/acous3D_benchmark_base.xml b/inputFiles/wavePropagation/benchmarks/acous3D_benchmark_base.xml index 3ae51015bdb..d8a475ea54f 100644 --- a/inputFiles/wavePropagation/benchmarks/acous3D_benchmark_base.xml +++ b/inputFiles/wavePropagation/benchmarks/acous3D_benchmark_base.xml @@ -84,7 +84,7 @@ name="initialPressure" initialCondition="1" setNames="{ all }" - objectPath="nodeManager" + objectPath="mesh/FE1/nodeManager" fieldName="pressure_n" scale="0.0"/> @@ -92,7 +92,7 @@ name="initialPressure_nm1" initialCondition="1" setNames="{ all }" - objectPath="nodeManager" + objectPath="mesh/FE1/nodeManager" fieldName="pressure_nm1" scale="0.0"/> @@ -100,8 +100,8 @@ diff --git a/inputFiles/wavePropagation/benchmarks/acouselas3D.xml b/inputFiles/wavePropagation/benchmarks/acouselas3D.xml new file mode 100644 index 00000000000..7ee7cc3e86e --- /dev/null +++ b/inputFiles/wavePropagation/benchmarks/acouselas3D.xml @@ -0,0 +1,215 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/inputFiles/wavePropagation/benchmarks/elas3D_benchmark_base.xml b/inputFiles/wavePropagation/benchmarks/elas3D_benchmark_base.xml index a1759a7b731..fc3b967238b 100644 --- a/inputFiles/wavePropagation/benchmarks/elas3D_benchmark_base.xml +++ b/inputFiles/wavePropagation/benchmarks/elas3D_benchmark_base.xml @@ -118,7 +118,7 @@ name="initialdisplacementnx" initialCondition="1" setNames="{ all }" - objectPath="nodeManager" + objectPath="mesh/FE1/nodeManager" fieldName="displacementx_n" scale="0.0"/> @@ -126,7 +126,7 @@ name="initialdisplacementny" initialCondition="1" setNames="{ all }" - objectPath="nodeManager" + objectPath="mesh/FE1/nodeManager" fieldName="displacementy_n" scale="0.0"/> @@ -134,7 +134,7 @@ name="initialdisplacementnz" initialCondition="1" setNames="{ all }" - objectPath="nodeManager" + objectPath="mesh/FE1/nodeManager" fieldName="displacementz_n" scale="0.0"/> @@ -142,7 +142,7 @@ name="initialdisplacementnm1x" initialCondition="1" setNames="{ all }" - objectPath="nodeManager" + objectPath="mesh/FE1/nodeManager" fieldName="displacementx_nm1" scale="0.0"/> @@ -150,7 +150,7 @@ name="initialdisplacementnm1y" initialCondition="1" setNames="{ all }" - objectPath="nodeManager" + objectPath="mesh/FE1/nodeManager" fieldName="displacementy_nm1" scale="0.0"/> @@ -158,7 +158,7 @@ name="initialdisplacementnm1z" initialCondition="1" setNames="{ all }" - objectPath="nodeManager" + objectPath="mesh/FE1/nodeManager" fieldName="displacementz_nm1" scale="0.0"/> @@ -167,24 +167,24 @@ @@ -220,7 +220,8 @@ + levelNames="{ FE1 }" + plotLevel="3"/> @@ -141,7 +141,7 @@ name="cellVelocityVs" initialCondition="1" objectPath="ElementRegions/Region/cb" - fieldName="mediumVelocityVs" + fieldName="elasticVelocityVs" scale="2000" setNames="{ all }"/> @@ -149,7 +149,7 @@ name="cellDensity" initialCondition="1" objectPath="ElementRegions/Region/cb" - fieldName="mediumDensity" + fieldName="elasticDensity" scale="2000" setNames="{ all }"/> @@ -158,6 +158,7 @@ diff --git a/inputFiles/wavePropagation/elas3D_Q3_firstOrder_small_base.xml b/inputFiles/wavePropagation/elas3D_Q3_firstOrder_small_base.xml index e3fca7e9744..9c833d44be1 100644 --- a/inputFiles/wavePropagation/elas3D_Q3_firstOrder_small_base.xml +++ b/inputFiles/wavePropagation/elas3D_Q3_firstOrder_small_base.xml @@ -64,7 +64,7 @@ name="cellVelocityVp" initialCondition="1" objectPath="mesh/FE1/ElementRegions/Region/cb" - fieldName="mediumVelocityVp" + fieldName="elasticVelocityVp" scale="1500" setNames="{ all }"/> @@ -72,7 +72,7 @@ name="cellVelocityVs" initialCondition="1" objectPath="mesh/FE1/ElementRegions/Region/cb" - fieldName="mediumVelocityVs" + fieldName="elasticVelocityVs" scale="1060" setNames="{ all }"/> @@ -80,7 +80,7 @@ name="cellDensity" initialCondition="1" objectPath="mesh/FE1/ElementRegions/Region/cb" - fieldName="mediumDensity" + fieldName="elasticDensity" scale="1" setNames="{ all }"/> diff --git a/inputFiles/wavePropagation/elas3D_Q3_small_base.xml b/inputFiles/wavePropagation/elas3D_Q3_small_base.xml index 04155600218..c8e1063beb9 100644 --- a/inputFiles/wavePropagation/elas3D_Q3_small_base.xml +++ b/inputFiles/wavePropagation/elas3D_Q3_small_base.xml @@ -147,7 +147,7 @@ name="cellVelocityVp" initialCondition="1" objectPath="mesh/FE1/ElementRegions/Region/cb" - fieldName="mediumVelocityVp" + fieldName="elasticVelocityVp" scale="1500" setNames="{ all }"/> @@ -155,7 +155,7 @@ name="cellVelocityVs" initialCondition="1" objectPath="mesh/FE1/ElementRegions/Region/cb" - fieldName="mediumVelocityVs" + fieldName="elasticVelocityVs" scale="1060" setNames="{ all }"/> @@ -163,7 +163,7 @@ name="cellDensity" initialCondition="1" objectPath="mesh/FE1/ElementRegions/Region/cb" - fieldName="mediumDensity" + fieldName="elasticDensity" scale="1" setNames="{ all }"/> diff --git a/inputFiles/wavePropagation/elas3D_Q5_firstOrder_small_base.xml b/inputFiles/wavePropagation/elas3D_Q5_firstOrder_small_base.xml index 2e215fd1406..b360569bb0e 100644 --- a/inputFiles/wavePropagation/elas3D_Q5_firstOrder_small_base.xml +++ b/inputFiles/wavePropagation/elas3D_Q5_firstOrder_small_base.xml @@ -64,7 +64,7 @@ name="cellVelocityVp" initialCondition="1" objectPath="mesh/FE1/ElementRegions/Region/cb" - fieldName="mediumVelocityVp" + fieldName="elasticVelocityVp" scale="1500" setNames="{ all }"/> @@ -72,7 +72,7 @@ name="cellVelocityVs" initialCondition="1" objectPath="mesh/FE1/ElementRegions/Region/cb" - fieldName="mediumVelocityVs" + fieldName="elasticVelocityVs" scale="1060" setNames="{ all }"/> @@ -80,7 +80,7 @@ name="cellDensity" initialCondition="1" objectPath="mesh/FE1/ElementRegions/Region/cb" - fieldName="mediumDensity" + fieldName="elasticDensity" scale="1" setNames="{ all }"/> @@ -99,17 +99,17 @@ @@ -155,7 +155,7 @@ name="cellVelocityVs" initialCondition="1" objectPath="mesh/FE1/ElementRegions/Region/cb" - fieldName="mediumVelocityVs" + fieldName="elasticVelocityVs" scale="1060" setNames="{ all }"/> @@ -163,7 +163,7 @@ name="cellDensity" initialCondition="1" objectPath="mesh/FE1/ElementRegions/Region/cb" - fieldName="mediumDensity" + fieldName="elasticDensity" scale="1" setNames="{ all }"/> diff --git a/inputFiles/wavePropagation/elas3D_firstOrder_small_base.xml b/inputFiles/wavePropagation/elas3D_firstOrder_small_base.xml index 8833ceb5454..7c3fa5783a6 100644 --- a/inputFiles/wavePropagation/elas3D_firstOrder_small_base.xml +++ b/inputFiles/wavePropagation/elas3D_firstOrder_small_base.xml @@ -64,7 +64,7 @@ name="cellVelocityVp" initialCondition="1" objectPath="ElementRegions/Region/cb" - fieldName="mediumVelocityVp" + fieldName="elasticVelocityVp" scale="1500" setNames="{ all }"/> @@ -72,7 +72,7 @@ name="cellVelocityVs" initialCondition="1" objectPath="ElementRegions/Region/cb" - fieldName="mediumVelocityVs" + fieldName="elasticVelocityVs" scale="1060" setNames="{ all }"/> @@ -80,7 +80,7 @@ name="cellDensity" initialCondition="1" objectPath="ElementRegions/Region/cb" - fieldName="mediumDensity" + fieldName="elasticDensity" scale="1" setNames="{ all }"/> diff --git a/inputFiles/wavePropagation/elas3D_small_base.xml b/inputFiles/wavePropagation/elas3D_small_base.xml index 0ed1a918810..2f79d894055 100644 --- a/inputFiles/wavePropagation/elas3D_small_base.xml +++ b/inputFiles/wavePropagation/elas3D_small_base.xml @@ -154,7 +154,7 @@ name="cellVelocityVp" initialCondition="1" objectPath="ElementRegions/Region/cb" - fieldName="mediumVelocityVp" + fieldName="elasticVelocityVp" scale="1500" setNames="{ all }"/> @@ -162,7 +162,7 @@ name="cellVelocityVs" initialCondition="1" objectPath="ElementRegions/Region/cb" - fieldName="mediumVelocityVs" + fieldName="elasticVelocityVs" scale="1060" setNames="{ all }"/> @@ -170,7 +170,7 @@ name="cellDensity" initialCondition="1" objectPath="ElementRegions/Region/cb" - fieldName="mediumDensity" + fieldName="elasticDensity" scale="1" setNames="{ all }"/> @@ -198,6 +198,7 @@ diff --git a/inputFiles/wellbore/CasedThermoElasticWellbore_ImperfectInterfaces_base.xml b/inputFiles/wellbore/CasedThermoElasticWellbore_ImperfectInterfaces_base.xml index 666e35b8712..bb3a18f4cdb 100644 --- a/inputFiles/wellbore/CasedThermoElasticWellbore_ImperfectInterfaces_base.xml +++ b/inputFiles/wellbore/CasedThermoElasticWellbore_ImperfectInterfaces_base.xml @@ -256,7 +256,7 @@ compressibility="5e-10" thermalExpansionCoeff="1e-10" viscosibility="0.0" - volumetricHeatCapacity="1" + specificHeatCapacity="1" referenceInternalEnergy="1"/> @@ -266,21 +266,21 @@ defaultDensity="7500" defaultBulkModulus="159.4202899e9" defaultShearModulus="86.61417323e9" - defaultThermalExpansionCoefficient="1.2e-5"/> + defaultDrainedLinearTEC="1.2e-5"/> + defaultDrainedLinearTEC="2.0e-5"/> + defaultDrainedLinearTEC="2.0e-5"/> diff --git a/inputFiles/wellbore/CasedThermoElasticWellbore_base.xml b/inputFiles/wellbore/CasedThermoElasticWellbore_base.xml index 94c6bf9017c..133d834fdd3 100644 --- a/inputFiles/wellbore/CasedThermoElasticWellbore_base.xml +++ b/inputFiles/wellbore/CasedThermoElasticWellbore_base.xml @@ -165,7 +165,7 @@ compressibility="5e-10" thermalExpansionCoeff="1e-10" viscosibility="0.0" - volumetricHeatCapacity="1" + specificHeatCapacity="1" referenceInternalEnergy="1"/> @@ -175,21 +175,21 @@ defaultDensity="7500" defaultBulkModulus="159.4202899e9" defaultShearModulus="86.61417323e9" - defaultThermalExpansionCoefficient="1.2e-5"/> + defaultDrainedLinearTEC="1.2e-5"/> + defaultDrainedLinearTEC="2.0e-5"/> + defaultDrainedLinearTEC="2.0e-5"/> diff --git a/inputFiles/wellbore/ThermoPoroElasticWellbore_base.xml b/inputFiles/wellbore/ThermoPoroElasticWellbore_base.xml index f264c696b33..7f2318765f8 100644 --- a/inputFiles/wellbore/ThermoPoroElasticWellbore_base.xml +++ b/inputFiles/wellbore/ThermoPoroElasticWellbore_base.xml @@ -67,7 +67,7 @@ name="rockPorosity" defaultReferencePorosity="0.001" grainBulkModulus="23.5e9" - defaultThermalExpansionCoefficient="4e-5"/> + defaultPorosityTEC="4e-5"/> @@ -100,7 +100,7 @@ compressibility="5e-10" thermalExpansionCoeff="3e-4" viscosibility="0.0" - volumetricHeatCapacity="1" + specificHeatCapacity="1" referenceInternalEnergy="1"/> @@ -110,7 +110,7 @@ defaultDensity="2700" defaultBulkModulus="20.7e9" defaultShearModulus="12.4e9" - defaultThermalExpansionCoefficient="4e-5"/> + defaultDrainedLinearTEC="4e-5"/> diff --git a/integratedTests b/integratedTests index 490987ae0c4..d92088aa880 160000 --- a/integratedTests +++ b/integratedTests @@ -1 +1 @@ -Subproject commit 490987ae0c4f2c0b25a889766d2f1060ffd7ace8 +Subproject commit d92088aa88028e976408dcfc75fd0e932974c90a diff --git a/scripts/ci_build_and_test_in_container.sh b/scripts/ci_build_and_test_in_container.sh index 0c504566933..a77e3fc0ae2 100755 --- a/scripts/ci_build_and_test_in_container.sh +++ b/scripts/ci_build_and_test_in_container.sh @@ -34,6 +34,8 @@ Usage: $0 Request for the build of geos only. --cmake-build-type ... One of Debug, Release, RelWithDebInfo and MinSizeRel. Forwarded to CMAKE_BUILD_TYPE. + --code-coverage + run a code build and test. --data-basename output.tar.gz If some data needs to be extracted from the build, the argument will define the tarball. Has to be a `tar.gz`. --exchange-dir /path/to/exchange @@ -64,7 +66,7 @@ exit 1 or_die cd $(dirname $0)/.. # Parsing using getopt -args=$(or_die getopt -a -o h --long build-exe-only,cmake-build-type:,data-basename:,exchange-dir:,host-config:,install-dir-basename:,no-install-schema,no-run-unit-tests,repository:,run-integrated-tests,sccache-credentials:,test-code-style,test-documentation,help -- "$@") +args=$(or_die getopt -a -o h --long build-exe-only,cmake-build-type:,code-coverage,data-basename:,exchange-dir:,host-config:,install-dir-basename:,no-install-schema,no-run-unit-tests,repository:,run-integrated-tests,sccache-credentials:,test-code-style,test-documentation,help -- "$@") # Variables with default values BUILD_EXE_ONLY=false @@ -74,6 +76,7 @@ RUN_UNIT_TESTS=true RUN_INTEGRATED_TESTS=false TEST_CODE_STYLE=false TEST_DOCUMENTATION=false +CODE_COVERAGE=false eval set -- ${args} while : @@ -101,6 +104,7 @@ do --no-run-unit-tests) RUN_UNIT_TESTS=false; shift;; --repository) GEOS_SRC_DIR=$2; shift 2;; --run-integrated-tests) RUN_INTEGRATED_TESTS=true; shift;; + --code-coverage) CODE_COVERAGE=true; shift;; --sccache-credentials) SCCACHE_CREDS=$2; shift 2;; --test-code-style) TEST_CODE_STYLE=true; shift;; --test-documentation) TEST_DOCUMENTATION=true; shift;; @@ -153,6 +157,14 @@ if [[ "${RUN_INTEGRATED_TESTS}" = true ]]; then ATS_CMAKE_ARGS="-DATS_ARGUMENTS=\"--machine openmpi --ats openmpi_mpirun=/usr/bin/mpirun --ats openmpi_args=--allow-run-as-root --ats openmpi_procspernode=2 --ats openmpi_maxprocs=2\" -DPython3_ROOT_DIR=${ATS_PYTHON_HOME}" fi + +if [[ "${CODE_COVERAGE}" = true ]]; then + or_die apt-get update + or_die apt-get install -y lcov +fi + + + # The -DBLT_MPI_COMMAND_APPEND="--allow-run-as-root;--oversubscribe" option is added for OpenMPI. # # OpenMPI prevents from running as `root` user by default. @@ -175,6 +187,7 @@ or_die python3 scripts/config-build.py \ --ninja \ -DBLT_MPI_COMMAND_APPEND='"--allow-run-as-root;--oversubscribe"' \ -DGEOSX_INSTALL_SCHEMA=${GEOSX_INSTALL_SCHEMA} \ + -DENABLE_COVERAGE=$([[ "${CODE_COVERAGE}" = true ]] && echo 1 || echo 0) \ ${SCCACHE_CMAKE_ARGS} \ ${ATS_CMAKE_ARGS} @@ -207,6 +220,16 @@ else fi fi +if [[ ! -z "${SCCACHE_CREDS}" ]]; then + echo "sccache post-build state" + or_die ${SCCACHE} --show-adv-stats +fi + +if [[ "${CODE_COVERAGE}" = true ]]; then + or_die ninja coreComponents_coverage + cp -r ${GEOSX_BUILD_DIR}/coreComponents_coverage.info.cleaned ${GEOS_SRC_DIR}/geos_coverage.info.cleaned +fi + # Run the unit tests (excluding previously ran checks). if [[ "${RUN_UNIT_TESTS}" = true ]]; then or_die ctest --output-on-failure -E "testUncrustifyCheck|testDoxygenCheck" @@ -235,11 +258,6 @@ if [[ "${RUN_INTEGRATED_TESTS}" = true ]]; then or_die gzip ${DATA_EXCHANGE_DIR}/${DATA_BASENAME_WE}.tar fi -if [[ ! -z "${SCCACHE_CREDS}" ]]; then - echo "sccache final state" - or_die ${SCCACHE} --show-adv-stats -fi - # If we're here, either everything went OK or we have to deal with the integrated tests manually. if [[ ! -z "${INTEGRATED_TEST_EXIT_STATUS+x}" ]]; then echo "Exiting the build process with exit status ${INTEGRATED_TEST_EXIT_STATUS} from the integrated tests." diff --git a/scripts/setupPythonEnvironment.bash b/scripts/setupPythonEnvironment.bash index e017b4f3cac..8732edf3130 100755 --- a/scripts/setupPythonEnvironment.bash +++ b/scripts/setupPythonEnvironment.bash @@ -3,12 +3,19 @@ # Configuration SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -PACKAGE_DIR=$SCRIPT_DIR/../src/coreComponents/python/modules -declare -a TARGET_PACKAGES=("$PACKAGE_DIR/geosx_mesh_tools_package" - "$PACKAGE_DIR/geosx_xml_tools_package" - "$PACKAGE_DIR/hdf5_wrapper_package" - "$PACKAGE_DIR/pygeosx_tools_package" - "$SCRIPT_DIR/../integratedTests/scripts/geos_ats_package") +PYTHON_TARGET= +BIN_DIR= +PACKAGE_DIR= +TMP_CLONE_DIR= +PIP_CMD="pip --disable-pip-version-check" + + +declare -a TARGET_PACKAGES=("geosx_mesh_tools_package" + "geosx_mesh_doctor" + "geosx_xml_tools_package" + "hdf5_wrapper_package" + "pygeosx_tools_package" + "geos_ats_package") declare -a LINK_SCRIPTS=("preprocess_xml" "format_xml" "convert_abaqus" @@ -19,11 +26,6 @@ declare -a LINK_SCRIPTS=("preprocess_xml" # Read input arguments -PYTHON_TARGET= -BIN_DIR= -PIP_CMD="pip --disable-pip-version-check" - - if [[ -z "${VERBOSE}" ]] then VERBOSE=false @@ -45,6 +47,10 @@ case $key in BIN_DIR="$2" shift # past argument ;; + -d|--pkg-dir) + PACKAGE_DIR="$2" + shift # past argument + ;; -v|--verbose) VERBOSE=true shift # past argument @@ -54,6 +60,7 @@ case $key in echo "Python environment setup options:" echo "-p/--python-target \"Target parent python bin\"" echo "-b/--bin-dir \"Directory to link new scripts\"" + echo "-d/--pkg-dir \"Directory containing target python packages\"" echo "-v/--verbose \"Increase verbosity level\"" echo "" exit @@ -84,18 +91,36 @@ then fi +# Check for a predefined package directory +echo "Checking for python packages..." +if [[ -z "${PACKAGE_DIR}" ]] +then + echo "Cloning the GEOS python package repository..." + TMP_CLONE_DIR=$(mktemp -d) + PACKAGE_DIR=$TMP_CLONE_DIR/geosPythonPackages + git clone --depth 1 --branch main --single-branch https://github.com/GEOS-DEV/geosPythonPackages.git $PACKAGE_DIR +elif [ ! -d "${PACKAGE_DIR}/geosx_xml_tools_package" ] +then + echo "The specified package directory does not contain the expected targets." + echo "The path specified with -d/--pkg-dir should point to a copy of the geosPythonPackages repository." + exit 1 +fi + + # Install packages echo "Installing python packages..." for p in "${TARGET_PACKAGES[@]}" do - if [ -d "$p" ] + if [ -d "$PACKAGE_DIR/$p" ] then echo " $p" + + # Try installing the package if $VERBOSE - INSTALL_MSG=$($PYTHON_TARGET -m $PIP_CMD install $p) + INSTALL_MSG=$($PYTHON_TARGET -m $PIP_CMD install $PACKAGE_DIR/$p) INSTALL_RC=$? then - INSTALL_MSG=$($PYTHON_TARGET -m $PIP_CMD install $p 2>&1) + INSTALL_MSG=$($PYTHON_TARGET -m $PIP_CMD install $PACKAGE_DIR/$p 2>&1) INSTALL_RC=$? fi @@ -189,5 +214,12 @@ then fi fi + +if [[ ! -z "${TMP_CLONE_DIR}" ]] +then + rm -rf $TMP_CLONE_DIR +fi + + echo "Done!" diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 847adc5a11f..7041bf98ff1 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -229,6 +229,65 @@ install( FILES ${CMAKE_BINARY_DIR}/schema.xsd DESTINATION ${CMAKE_INSTALL_DATAROOTDIR}/${CMAKE_PROJECT_NAME}/schema OPTIONAL) +################################ +# Add python environment setup +################################ +if ( Python3_EXECUTABLE ) + message(STATUS "Found python version ${Python3_VERSION}") + if (${Python3_VERSION} VERSION_LESS "3.6.0") + message(STATUS "Note: try setting Python3_ROOT_DIR and/or Python3_EXECUTABLE in your host config to the appropriate version.") + message( FATAL_ERROR "Building the GEOSX python tools requires Python >= 3.6." ) + endif() + + # Select the version of python to target + if( ENABLE_PYGEOSX ) + set( PYTHON_POST_EXECUTABLE ${CMAKE_BINARY_DIR}/lib/PYGEOSX/bin/python CACHE PATH "" FORCE ) + else() + set( PYTHON_POST_EXECUTABLE ${Python3_EXECUTABLE} CACHE PATH "" FORCE ) + endif() + + # Check for the virtualenv package + execute_process( + COMMAND ${Python3_EXECUTABLE} -c "import virtualenv" + RESULT_VARIABLE VIRTUALENV_AVAILABLE + ) + + if (NOT ${VIRTUALENV_AVAILABLE} EQUAL 0) + message(WARNING "The \"virtualenv\" package was not found in the target python environment (${Python3_EXECUTABLE}). This package may be required to build PYGEOSX or the python development environment.") + endif() + + # Build targets + set( GEOSX_PYTHON_TOOLS_BINS + "${CMAKE_BINARY_DIR}/bin/preprocess_xml" + "${CMAKE_BINARY_DIR}/bin/format_xml" ) + + add_custom_command( OUTPUT ${GEOSX_PYTHON_TOOLS_BINS} + COMMAND bash ${CMAKE_SOURCE_DIR}/../scripts/setupPythonEnvironment.bash -p ${PYTHON_POST_EXECUTABLE} -b ${CMAKE_BINARY_DIR}/bin + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + ) + + add_custom_target( geosx_python_tools + DEPENDS ${GEOSX_PYTHON_TOOLS_BINS} ) + + add_custom_target( geosx_python_tools_test + COMMAND ${CMAKE_BINARY_DIR}/python/geosx/bin/test_geosx_xml_tools + COMMAND rm -r ${CMAKE_BINARY_DIR}/python/geosx_xml_tools_tests* + WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/python + DEPENDS geosx_python_tools + ) + + add_custom_target( geosx_format_all_xml_files + COMMAND bash ${CMAKE_SOURCE_DIR}/../scripts/formatXMLFiles.bash -g ${CMAKE_BINARY_DIR}/bin/format_xml ${CMAKE_SOURCE_DIR} ${CMAKE_SOURCE_DIR}/../examples + WORKING_DIRECTORY ${CMAKE_BINARY_DIR} + DEPENDS geosx_xml_tools + ) + +else() + message(WARNING "Building the GEOSX python tools requires Python >= 3.6.") + message(STATUS "If you need these, try setting Python3_ROOT_DIR and/or Python3_EXECUTABLE in your host config.") +endif() + + ################################ # Add integratedTests ################################ diff --git a/src/cmake/GeosxMacros.cmake b/src/cmake/GeosxMacros.cmake index 81264a61c57..af5105678ee 100644 --- a/src/cmake/GeosxMacros.cmake +++ b/src/cmake/GeosxMacros.cmake @@ -53,6 +53,12 @@ macro( geosx_add_code_checks ) UNCRUSTIFY_CFG_FILE ${PROJECT_SOURCE_DIR}/uncrustify.cfg ) endif() + if (ENABLE_COVERAGE) + blt_add_code_coverage_target( NAME ${arg_PREFIX}_coverage + RUNNER ctest -E 'blt_gtest_smoke|blt_mpi_smoke|testUncrustifyCheck|testDoxygenCheck' + SOURCE_DIRECTORIES ${PROJECT_SOURCE_DIR}/coreComponents ) + endif() + endmacro( geosx_add_code_checks ) ##------------------------------------------------------------------------------ diff --git a/src/cmake/GeosxOptions.cmake b/src/cmake/GeosxOptions.cmake index b1530b390cc..c0dd7d5075d 100644 --- a/src/cmake/GeosxOptions.cmake +++ b/src/cmake/GeosxOptions.cmake @@ -4,10 +4,8 @@ message( "CMAKE_SYSTEM_NAME = ${CMAKE_SYSTEM_NAME}" ) message( "CMAKE_HOST_APPLE = ${CMAKE_HOST_APPLE}" ) ### OPTIONS ### -option( GEOSX_ENABLE_FPE "" ON ) option( GEOS_ENABLE_TESTS "" ON ) - -option( ENABLE_CALIPER "" OFF ) +option( ENABLE_CALIPER "Enables Caliper instrumentation" OFF ) option( ENABLE_MATHPRESSO "" ON ) @@ -122,8 +120,8 @@ message( "CMAKE_CXX_COMPILER_ID = ${CMAKE_CXX_COMPILER_ID}" ) blt_append_custom_compiler_flag( FLAGS_VAR CMAKE_CXX_FLAGS DEFAULT "${OpenMP_CXX_FLAGS}" ) blt_append_custom_compiler_flag( FLAGS_VAR CMAKE_CXX_FLAGS - GNU "-Wall -Wextra -Wpedantic -pedantic-errors -Wshadow -Wfloat-equal -Wcast-align -Wcast-qual" - CLANG "-Wall -Wextra -Wpedantic -pedantic-errors -Wshadow -Wfloat-equal -Wno-cast-align -Wcast-qual" + GNU "-Wpedantic -pedantic-errors -Wshadow -Wfloat-equal -Wcast-align -Wcast-qual" + CLANG "-Wpedantic -pedantic-errors -Wshadow -Wfloat-equal -Wno-cast-align -Wcast-qual" ) blt_append_custom_compiler_flag( FLAGS_VAR CMAKE_CXX_FLAGS_DEBUG diff --git a/src/coreComponents/CMakeLists.txt b/src/coreComponents/CMakeLists.txt index 2de7ccd0068..81653fb0b8f 100644 --- a/src/coreComponents/CMakeLists.txt +++ b/src/coreComponents/CMakeLists.txt @@ -16,8 +16,7 @@ set( subdirs fileIO physicsSolvers events - mainInterface - python ) + mainInterface ) unset( parallelDeps ) @@ -107,8 +106,7 @@ if ( TARGET geosx_core ) endif() geosx_add_code_checks( PREFIX coreComponents - EXCLUDES cmake - src/coreComponents/constitutive/PVTPackage ) + EXCLUDES cmake constitutive/PVTPackage ) if( ENABLE_UNCRUSTIFY ) add_test( NAME testUncrustifyCheck diff --git a/src/coreComponents/LvArray b/src/coreComponents/LvArray index 2c370fb5b48..52aac7fd723 160000 --- a/src/coreComponents/LvArray +++ b/src/coreComponents/LvArray @@ -1 +1 @@ -Subproject commit 2c370fb5b485e94ca549dbd7a769ddb8f40d2605 +Subproject commit 52aac7fd7233365a01206634f4d8b1b2b5be8e15 diff --git a/src/coreComponents/codingUtilities/CMakeLists.txt b/src/coreComponents/codingUtilities/CMakeLists.txt index 784e4aa0455..c51ea1ca849 100644 --- a/src/coreComponents/codingUtilities/CMakeLists.txt +++ b/src/coreComponents/codingUtilities/CMakeLists.txt @@ -32,8 +32,6 @@ set_source_files_properties( Parsing.cpp PROPERTIES LANGUAGE CXX ) target_include_directories( codingUtilities PUBLIC ${CMAKE_SOURCE_DIR}/coreComponents ) -geosx_add_code_checks(PREFIX codingUtilities ) - if( GEOS_ENABLE_TESTS ) add_subdirectory(tests) diff --git a/src/coreComponents/codingUtilities/UnitTestUtilities.hpp b/src/coreComponents/codingUtilities/UnitTestUtilities.hpp index cc5280b15a7..3755134d630 100644 --- a/src/coreComponents/codingUtilities/UnitTestUtilities.hpp +++ b/src/coreComponents/codingUtilities/UnitTestUtilities.hpp @@ -87,14 +87,14 @@ ::testing::AssertionResult checkRelativeErrorFormat( const char *, const char *, { real64 const delta = std::abs( v1 - v2 ); real64 const value = std::max( std::abs( v1 ), std::abs( v2 ) ); - if( delta > absTol && delta > relTol * value ) + if( delta > absTol && delta > relTol * (value + 1.0) ) { return ::testing::AssertionFailure() << std::scientific << std::setprecision( 5 ) - << " relative error: " << delta / value + << " error norm: " << delta / (value + 1.0) << " (" << v1 << " vs " << v2 << ")," - << " exceeds " << relTol <<". " + << " exceeds " << relTol << ". " << " absolute error: " << delta << " exeeds " - << absTol < +VECTOR_TYPE axpy( VECTOR_TYPE const & vec1, + VECTOR_TYPE const & vec2, + SCALAR_TYPE const alpha ) +{ + GEOS_ASSERT( vec1.size() == vec2.size() ); + const localIndex N = vec1.size(); + VECTOR_TYPE result( N ); + RAJA::forall< parallelHostPolicy >( RAJA::TypedRangeSegment< localIndex >( 0, N ), + [&] GEOS_HOST ( localIndex const i ) + { + result[i] = vec1[i] + alpha * vec2[i]; + } ); + return result; +} + +template< typename VECTOR_TYPE, typename SCALAR_TYPE > +VECTOR_TYPE scale( VECTOR_TYPE const & vec, + SCALAR_TYPE const scalarMult ) +{ + const localIndex N = vec.size(); + VECTOR_TYPE result( N ); + RAJA::forall< parallelHostPolicy >( RAJA::TypedRangeSegment< localIndex >( 0, N ), + [&] GEOS_HOST ( localIndex const i ) + { + result[i] = scalarMult * vec[i]; + } ); + return result; +} + +template< typename VECTOR_TYPE > +real64 dot( VECTOR_TYPE const & vec1, + VECTOR_TYPE const & vec2 ) +{ + GEOS_ASSERT( vec1.size() == vec2.size()); + RAJA::ReduceSum< parallelHostReduce, real64 > result( 0.0 ); + const localIndex N = vec1.size(); + RAJA::forall< parallelHostPolicy >( RAJA::TypedRangeSegment< localIndex >( 0, N ), + [&] GEOS_HOST ( localIndex const i ) + { + result += vec1[i] * vec2[i]; + } ); + return result.get(); +} + } // namespace geos #endif /* GEOS_CODINGUTILITIES_UTILITIES_H_ */ diff --git a/src/coreComponents/common/CMakeLists.txt b/src/coreComponents/common/CMakeLists.txt index f555696f508..f8a0ca5345d 100644 --- a/src/coreComponents/common/CMakeLists.txt +++ b/src/coreComponents/common/CMakeLists.txt @@ -44,6 +44,7 @@ set( common_sources MpiWrapper.cpp Path.cpp initializeEnvironment.cpp + Units.cpp ) set( dependencyList ${parallelDeps} lvarray pugixml::pugixml RAJA chai conduit::conduit fmt::fmt ) @@ -78,8 +79,6 @@ blt_add_library( NAME common target_include_directories( common PUBLIC ${CMAKE_BINARY_DIR}/include ) target_include_directories( common PUBLIC ${CMAKE_SOURCE_DIR}/coreComponents ) -geosx_add_code_checks(PREFIX common ) - if( GEOS_ENABLE_TESTS ) add_subdirectory( unitTests ) endif() diff --git a/src/coreComponents/common/GEOS_RAJA_Interface.hpp b/src/coreComponents/common/GEOS_RAJA_Interface.hpp index 82a238f69e7..2aad742dc77 100644 --- a/src/coreComponents/common/GEOS_RAJA_Interface.hpp +++ b/src/coreComponents/common/GEOS_RAJA_Interface.hpp @@ -32,7 +32,7 @@ namespace geos auto const hostMemorySpace = LvArray::MemorySpace::host; -using serialPolicy = RAJA::loop_exec; +using serialPolicy = RAJA::seq_exec; using serialAtomic = RAJA::seq_atomic; using serialReduce = RAJA::seq_reduce; diff --git a/src/coreComponents/common/PhysicsConstants.hpp b/src/coreComponents/common/PhysicsConstants.hpp index 1d884741a4d..410e45fbc16 100644 --- a/src/coreComponents/common/PhysicsConstants.hpp +++ b/src/coreComponents/common/PhysicsConstants.hpp @@ -16,8 +16,8 @@ * @file PhysicsConstants.hpp * @brief Regroups useful constants that are globally used for math and physics computations. */ -#ifndef GEOS_MATH_PHYSICSCONSTANTS_HPP_ -#define GEOS_MATH_PHYSICSCONSTANTS_HPP_ +#ifndef GEOS_COMMON_PHYSICSCONSTANTS_HPP_ +#define GEOS_COMMON_PHYSICSCONSTANTS_HPP_ namespace geos { @@ -25,15 +25,23 @@ namespace geos namespace constants { - /** * @brief Zero degree Celsius in Kelvin */ constexpr double zeroDegreesCelsiusInKelvin = 273.15; +/** + * @brief Shorthand for pi + */ +constexpr double pi = 3.141592653589793238; + +/** + * @brief Universal gas constant + */ +constexpr double gasConstant = 8.31446261815324; } // end namespace constants } // end namespace geos -#endif //GEOS_MATH_PHYSICSCONSTANTS_HPP_ +#endif //GEOS_COMMON_PHYSICSCONSTANTS_HPP_ diff --git a/src/coreComponents/common/Units.cpp b/src/coreComponents/common/Units.cpp new file mode 100644 index 00000000000..ae2b5541a22 --- /dev/null +++ b/src/coreComponents/common/Units.cpp @@ -0,0 +1,92 @@ +/* + * ------------------------------------------------------------------------------------------------------------ + * SPDX-License-Identifier: LGPL-2.1-only + * + * Copyright (c) 2018-2020 Lawrence Livermore National Security LLC + * Copyright (c) 2018-2020 The Board of Trustees of the Leland Stanford Junior University + * Copyright (c) 2018-2020 TotalEnergies + * Copyright (c) 2019- GEOSX Contributors + * All rights reserved + * + * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details. + * ------------------------------------------------------------------------------------------------------------ + */ +/** + * @file Units.cpp + */ + +#include "Units.hpp" + +namespace geos +{ + +namespace units +{ + + +TimeFormatInfo::TimeFormatInfo( double const totalSeconds, int const years, int const days, + int const hours, int const minutes, int const seconds ): + m_totalSeconds( totalSeconds ), + m_years( years ), + m_days( days ), + m_hours( hours ), + m_minutes( minutes ), + m_seconds( seconds ) +{} + +string TimeFormatInfo::toString() const +{ + std::ostringstream oss; + if( m_years != 0 ) + { + oss << m_years << "y, " << m_days << "d, "; + } + else if( m_days != 0 ) + { + oss << m_days << "d, "; + } + oss << GEOS_FMT( "{:0>2}h{:0>2}m{:0>2}s ({} s)", + m_hours, m_minutes, m_seconds, m_totalSeconds ); + return oss.str(); +} + +std::ostream & operator<<( std::ostream & os, TimeFormatInfo const & info ) +{ + os << info.toString(); + return os; +} + + +template< typename DURATION > +TimeFormatInfo TimeFormatInfo::fromDuration( DURATION const value ) +{ + using namespace std::chrono; + + auto const totalYears = duration_cast< units::Years >( value ); + auto const daysOut = duration_cast< units::Days >( value - totalYears ); + auto const hoursOut = duration_cast< hours >( value - totalYears - daysOut ); + auto const minutesOut = duration_cast< minutes >( value - totalYears - daysOut - hoursOut ); + auto const secondsOut = duration_cast< seconds >( value - totalYears - daysOut - hoursOut - minutesOut ); + + return TimeFormatInfo( duration< double >( value ).count(), int( totalYears.count() ), + int( daysOut.count() ), int( hoursOut.count() ), + int( minutesOut.count() ), int( secondsOut.count() ) ); +} +// available specializations +template TimeFormatInfo TimeFormatInfo::fromDuration< SystemClock::duration >( SystemClock::duration duration ); + +TimeFormatInfo TimeFormatInfo::fromSeconds( double const seconds ) +{ + int totalYears = int( seconds / YearSeconds ); + int daysOut = int( ( seconds - totalYears * YearSeconds ) / DaySeconds ); + int hoursOut = int( ( seconds - totalYears * YearSeconds - daysOut * DaySeconds ) / HourSeconds ); + int minutesOut = int( ( seconds - totalYears * YearSeconds - daysOut * DaySeconds - hoursOut * HourSeconds ) / MinuteSeconds ); + int secondsOut = int( seconds - totalYears * YearSeconds - daysOut * DaySeconds - hoursOut * HourSeconds - minutesOut * MinuteSeconds ); + + return TimeFormatInfo( seconds, totalYears, daysOut, hoursOut, minutesOut, secondsOut ); +} + + +} // end namespace units + +} // end namespace geos diff --git a/src/coreComponents/common/Units.hpp b/src/coreComponents/common/Units.hpp index f71f799c074..43e8fc680e3 100644 --- a/src/coreComponents/common/Units.hpp +++ b/src/coreComponents/common/Units.hpp @@ -33,12 +33,14 @@ namespace units * @return the input Kelvin degrees converted in Celsius * @param kelvin degrees input */ +GEOS_HOST_DEVICE inline constexpr double convertKToC( double kelvin ) { return kelvin - constants::zeroDegreesCelsiusInKelvin; } /** * @return the input Celsius degrees converted in Kelvin * @param celsius degrees input */ +GEOS_HOST_DEVICE inline constexpr double convertCToK( double celsius ) { return celsius + constants::zeroDegreesCelsiusInKelvin; } @@ -98,6 +100,10 @@ constexpr inline std::string_view getDescription( Unit unit ) case TemperatureInC: return "temperature [C]"; case Distance: return "distance [m]"; case Time: return "time [s]"; + case Viscosity: return "viscosity [Pa*s]"; + case Enthalpy: return "enthalpy [J/kg]"; + case Density: return "density [kg/m3]"; + case Solubility: return "solubility [g/L]"; } } @@ -141,8 +147,102 @@ inline string formatValue( real64 value, Unit unit ) } +/// Clock in use in GEOS to manipulate system times. +using SystemClock = std::chrono::system_clock; + +/// One year = 365.2425 days (= 146097 / 400) following the Gregorian calendar and the C++ convention. +using YearDaysRatio = std::ratio< 146097, 400 >; +/// Day helper duration type, equivalent to C++20 std::chrono::days. +using Days = std::chrono::duration< int64_t, std::ratio_multiply< std::ratio< 24 >, std::chrono::hours::period > >; +/// Year helper duration type, equivalent to C++20 std::chrono::years. +using Years = std::chrono::duration< int64_t, std::ratio_multiply< YearDaysRatio, Days::period > >; + +/// Days in one year (following the Gregorian calendar and the C++ convention) = 365.2425 days (= 146097 / 400). +static constexpr double YearDays = ( double )YearDaysRatio::num / YearDaysRatio::den; +/// Seconds in a minute +static constexpr double MinuteSeconds = 60.0; +/// Seconds in a hour +static constexpr double HourSeconds = 60.0 * MinuteSeconds; +/// Seconds in a day +static constexpr double DaySeconds = 24.0 * HourSeconds; +/// Seconds in a year +static constexpr double YearSeconds = YearDays * DaySeconds; + + +/** + * @brief Stores information that is useful to duration strings. Based on the geos::units time constants + */ +struct TimeFormatInfo +{ + /// Total time (including the decimal part) this instance represents in seconds + double m_totalSeconds = 0.0; + /// Number of integral years to show + int m_years = 0; + /// Number of integral days to show + int m_days = 0; + /// Number of integral hours to show + int m_hours = 0; + /// Number of integral minutes to show + int m_minutes = 0; + /// Number of integral seconds to show + int m_seconds = 0; + + + /** + * @brief Construct a TimeFormatInfo from raw data (which must be coherent) + * @param totalSeconds The total time (including the decimal part) this instance represents in seconds + * @param years Number of integral years to show + * @param days Number of integral days to show + * @param hours Number of integral hours to show + * @param minutes Number of integral minutes to show + * @param seconds Number of integral seconds to show + */ + TimeFormatInfo( double totalSeconds, int years, int days, int hours, int minutes, int seconds ); + /** + * @return A TimeFormatInfo constructed from the seconds to represent + * @param seconds the total time to represents in seconds (including the decimal part) + */ + static TimeFormatInfo fromSeconds( double const seconds ); + /** + * @return A TimeFormatInfo constructed from a standard typed duration value + * @param duration the duration to represents, in SystemClock::duration type + * (more types can be added by adding std::chrono::duration template specialisations). + */ + template< typename DURATION > static TimeFormatInfo fromDuration( DURATION duration ); + + /** + * @brief Insert the string representation information in the provided stream. + */ + friend std::ostream & operator<<( std::ostream & os, TimeFormatInfo const & ctx ); + + /** + * @return a user friendly string representation of this structure. + */ + string toString() const; +}; + + } // end namespace units } // end namespace geos + +/** + * @brief Formatter to be able to directly use a DurationInfo as a GEOS_FMT() argument + */ +template<> +struct GEOS_FMT_NS::formatter< geos::units::TimeFormatInfo > : GEOS_FMT_NS::formatter< std::string > +{ + /** + * @brief Format the specified TimeFormatInfo to a string. + * @param durationData the TimeFormatInfo object to format + * @param ctx formatting state consisting of the formatting arguments and the output iterator + * @return iterator to the output buffer + */ + auto format( geos::units::TimeFormatInfo const & durationData, format_context & ctx ) + { + return GEOS_FMT_NS::formatter< std::string >::format( durationData.toString(), ctx ); + } +}; + #endif //GEOS_MATH_PHYSICSCONSTANTS_HPP_ diff --git a/src/coreComponents/common/unitTests/CMakeLists.txt b/src/coreComponents/common/unitTests/CMakeLists.txt index 0367171706e..814fd512cec 100644 --- a/src/coreComponents/common/unitTests/CMakeLists.txt +++ b/src/coreComponents/common/unitTests/CMakeLists.txt @@ -3,7 +3,8 @@ set( gtest_geosx_tests testDataTypes.cpp testFixedSizeDeque.cpp testTypeDispatch.cpp - testLifoStorage.cpp ) + testLifoStorage.cpp + testUnits.cpp ) if ( ENABLE_CALIPER ) list( APPEND gtest_geosx_tests diff --git a/src/coreComponents/common/unitTests/testUnits.cpp b/src/coreComponents/common/unitTests/testUnits.cpp new file mode 100644 index 00000000000..2ab6ef2cbc3 --- /dev/null +++ b/src/coreComponents/common/unitTests/testUnits.cpp @@ -0,0 +1,136 @@ +/* + * ------------------------------------------------------------------------------------------------------------ + * SPDX-License-Identifier: LGPL-2.1-only + * + * Copyright (c) 2018-2020 Lawrence Livermore National Security LLC + * Copyright (c) 2018-2020 The Board of Trustees of the Leland Stanford Junior University + * Copyright (c) 2018-2020 TotalEnergies + * Copyright (c) 2019- GEOSX Contributors + * All rights reserved + * + * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details. + * ------------------------------------------------------------------------------------------------------------ + */ + +#include "common/Units.hpp" + +#include + +using namespace geos; +using namespace geos::units; + + +struct DurationCase +{ + string m_expectedString; + SystemClock::duration m_systemDuration; + double m_simDuration; + + template< class DURATION > + DurationCase( string_view expectedString, DURATION durationValue ): + m_expectedString( expectedString ), + m_systemDuration( std::chrono::duration_cast< SystemClock::duration >( durationValue ) ), + m_simDuration( std::chrono::duration_cast< std::chrono::duration< double > >( durationValue ).count() ) + {} +}; + +TEST( Units, SystemDurationFormatTest ) +{ + using namespace std::chrono; + + std::vector< DurationCase > durationCases = { + + DurationCase( + "00h00m00s (1.11e-07 s)", + nanoseconds( 111 ) ), + + DurationCase( + "00h00m00s (0.000111 s)", + microseconds( 111 ) ), + + DurationCase( + "00h00m00s (0.111 s)", + milliseconds( 111 ) ), + + DurationCase( + "00h02m25s (145.016 s)", + seconds( 145 ) + milliseconds( 16 ) ), + + DurationCase( + "22h25m45s (80745.016 s)", + hours( 20 ) + minutes( 145 ) + seconds( 45 ) + milliseconds( 16 ) ), + + DurationCase( + "20d, 12h02m25s (1771345.016 s)", + hours( long( 24 * 20.5 ) ) + seconds( 145 ) + milliseconds( 16 ) ), + + DurationCase( + "20d, 12h02m25s (1771345.016 s)", + Days( 20 ) + hours( 12 ) + seconds( 145 ) + milliseconds( 16 ) ), + + DurationCase( + "1y, 0d, 00h00m00s (31556952 s)", + Years( 1 ) ), + + DurationCase( + "1y, 0d, 12h00m00s (31600152 s)", + Years( 1 ) + hours( 12 ) ), + + DurationCase( + "1y, 20d, 12h02m25s (33328297.016 s)", + Years( 1 ) + hours( long( 24 * 20.5 ) ) + seconds( 145 ) + milliseconds( 16 ) ), + + DurationCase( + "1y, 20d, 12h02m25s (33328297.016 s)", + Years( 1 ) + Days( 20 ) + hours( 12 ) + seconds( 145 ) + milliseconds( 16 ) ), + + DurationCase( + "12y, 362d, 05h49m12s (409981176 s)", + Years( 13 ) - Days( 3 ) ), + + DurationCase( + "13y, 365d, 05h48m12s (441797268 s)", + Years( 14 ) - minutes( 1 ) ), + + DurationCase( + "14y, 0d, 00h00m00s (441797328 s)", + Years( 14 ) ), + + DurationCase( + "14y, 0d, 12h00m00s (441840528 s)", + Years( 14 ) + hours( 12 ) ), + + DurationCase( + "100y, 20d, 12h02m25s (3157466545.016 s)", + Years( 100 ) + Days( 20 ) + hours( 12 ) + seconds( 145 ) + milliseconds( 16 ) ), + + DurationCase( + "292y, 20d, 12h02m25s (9216401329.016 s)", + Years( 292 ) + Days( 20 ) + hours( 12 ) + seconds( 145 ) + milliseconds( 16 ) ), + + DurationCase( + "5500y, 20d, 12h02m25s (173565007345 s)", + Years( 5500 ) + Days( 20 ) + hours( 12 ) + seconds( 145 ) ), + + }; + + const SystemClock::duration maxDuration = SystemClock::duration::max(); + const string errorInfo = GEOS_FMT( "(Max possible duration = {} s)", + duration_cast< seconds >( maxDuration ).count() ); + + // Duration with more than 292 years are not supported by the SystemClock type. + double maxSystemTime = duration_cast< seconds, double, std::ratio< 1 > >( SystemClock::duration::max() ).count(); + + for( DurationCase const & durationCase : durationCases ) + { + // testing "double" typed time (which has a limit that is way higher than the tests cases) + EXPECT_STREQ( durationCase.m_expectedString.c_str(), + TimeFormatInfo::fromSeconds( durationCase.m_simDuration ).toString().c_str() ) << errorInfo; + + if( durationCase.m_simDuration <= maxSystemTime ) + { + EXPECT_STREQ( durationCase.m_expectedString.c_str(), + TimeFormatInfo::fromDuration( durationCase.m_systemDuration ).toString().c_str() ) << errorInfo; + } + } +} diff --git a/src/coreComponents/constitutive/CMakeLists.txt b/src/coreComponents/constitutive/CMakeLists.txt index 9e2ddd521c9..47e573b6f13 100644 --- a/src/coreComponents/constitutive/CMakeLists.txt +++ b/src/coreComponents/constitutive/CMakeLists.txt @@ -315,6 +315,3 @@ target_include_directories( constitutive PUBLIC ${CMAKE_SOURCE_DIR}/coreComponen if( GEOS_ENABLE_TESTS ) add_subdirectory( unitTests ) endif( ) - -geosx_add_code_checks( PREFIX constitutive - EXCLUDES PVTPackage ) diff --git a/src/coreComponents/constitutive/ConstitutivePassThru.hpp b/src/coreComponents/constitutive/ConstitutivePassThru.hpp index ef6a51fc069..0f97f509309 100644 --- a/src/coreComponents/constitutive/ConstitutivePassThru.hpp +++ b/src/coreComponents/constitutive/ConstitutivePassThru.hpp @@ -269,9 +269,9 @@ struct ConstitutivePassThru< PorousSolidBase > PorousSolid< ModifiedCamClay >, PorousSolid< DelftEgg >, PorousSolid< DruckerPrager >, - //PorousSolid< DuvautLionsSolid< DruckerPrager > >, - // PorousSolid< DuvautLionsSolid< DruckerPragerExtended > >, - //PorousSolid< DuvautLionsSolid< ModifiedCamClay > >, + PorousSolid< DuvautLionsSolid< DruckerPrager > >, + PorousSolid< DuvautLionsSolid< DruckerPragerExtended > >, + PorousSolid< DuvautLionsSolid< ModifiedCamClay > >, PorousSolid< ElasticIsotropic >, PorousSolid< ElasticTransverseIsotropic >, PorousSolid< ElasticIsotropicPressureDependent >, @@ -359,9 +359,9 @@ struct ConstitutivePassThru< CoupledSolidBase > PorousSolid< ModifiedCamClay >, PorousSolid< DelftEgg >, PorousSolid< DruckerPrager >, - //PorousSolid< DuvautLionsSolid< DruckerPrager > >, - //PorousSolid< DuvautLionsSolid< DruckerPragerExtended > >, - //PorousSolid< DuvautLionsSolid< ModifiedCamClay > >, + PorousSolid< DuvautLionsSolid< DruckerPrager > >, + PorousSolid< DuvautLionsSolid< DruckerPragerExtended > >, + PorousSolid< DuvautLionsSolid< ModifiedCamClay > >, PorousSolid< ElasticIsotropic >, PorousSolid< ElasticTransverseIsotropic >, PorousSolid< ElasticIsotropicPressureDependent >, @@ -385,9 +385,9 @@ struct ConstitutivePassThru< CoupledSolidBase > PorousSolid< ModifiedCamClay >, PorousSolid< DelftEgg >, PorousSolid< DruckerPrager >, - //PorousSolid< DuvautLionsSolid< DruckerPrager > >, - //PorousSolid< DuvautLionsSolid< DruckerPragerExtended > >, - //PorousSolid< DuvautLionsSolid< ModifiedCamClay > >, + PorousSolid< DuvautLionsSolid< DruckerPrager > >, + PorousSolid< DuvautLionsSolid< DruckerPragerExtended > >, + PorousSolid< DuvautLionsSolid< ModifiedCamClay > >, PorousSolid< ElasticIsotropic >, PorousSolid< ElasticTransverseIsotropic >, PorousSolid< ElasticIsotropicPressureDependent >, diff --git a/src/coreComponents/constitutive/capillaryPressure/BrooksCoreyCapillaryPressure.cpp b/src/coreComponents/constitutive/capillaryPressure/BrooksCoreyCapillaryPressure.cpp index 5e11be9caed..10518b167d8 100644 --- a/src/coreComponents/constitutive/capillaryPressure/BrooksCoreyCapillaryPressure.cpp +++ b/src/coreComponents/constitutive/capillaryPressure/BrooksCoreyCapillaryPressure.cpp @@ -88,7 +88,7 @@ void BrooksCoreyCapillaryPressure::postProcessInput() if( m_phaseTypes[ip] != CapillaryPressureBase::REFERENCE_PHASE ) { - GEOS_THROW_IF_LT_MSG( m_phaseCapPressureExponentInv[ip], 1.0, + GEOS_THROW_IF_LE_MSG( m_phaseCapPressureExponentInv[ip], 0.0, errorMsg( viewKeyStruct::phaseCapPressureExponentInvString() ), InputError ); GEOS_THROW_IF_LT_MSG( m_phaseEntryPressure[ip], 0.0, diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/CO2BrineFluid.cpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/CO2BrineFluid.cpp index 276a632f21f..55c3c2a048e 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/CO2BrineFluid.cpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/CO2BrineFluid.cpp @@ -84,6 +84,8 @@ CO2BrineFluid< PHASE1, PHASE2, FLASH >:: CO2BrineFluid( string const & name, Group * const parent ): MultiFluidBase( name, parent ) { + enableLogLevelInput(); + registerWrapper( viewKeyStruct::phasePVTParaFilesString(), &m_phasePVTParaFiles ). setInputFlag( InputFlags::REQUIRED ). setRestartFlags( RestartFlags::NO_WRITE ). @@ -311,8 +313,10 @@ void CO2BrineFluid< PHASE1, PHASE2, FLASH >::createPVTModels() InputError ); // then, we are ready to instantiate the phase models - m_phase1 = std::make_unique< PHASE1 >( getName() + "_phaseModel1", phase1InputParams, m_componentNames, m_componentMolarWeight ); - m_phase2 = std::make_unique< PHASE2 >( getName() + "_phaseModel2", phase2InputParams, m_componentNames, m_componentMolarWeight ); + m_phase1 = std::make_unique< PHASE1 >( getName() + "_phaseModel1", phase1InputParams, m_componentNames, m_componentMolarWeight, + getLogLevel() > 0 && logger::internal::rank==0 ); + m_phase2 = std::make_unique< PHASE2 >( getName() + "_phaseModel2", phase2InputParams, m_componentNames, m_componentMolarWeight, + getLogLevel() > 0 && logger::internal::rank==0 ); // 2) Create the flash model { @@ -336,7 +340,8 @@ void CO2BrineFluid< PHASE1, PHASE2, FLASH >::createPVTModels() strs, m_phaseNames, m_componentNames, - m_componentMolarWeight ); + m_componentMolarWeight, + getLogLevel() > 0 && logger::internal::rank==0 ); } } else diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/PhaseModel.hpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/PhaseModel.hpp index 1fd0fea36a6..9b8d794ee58 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/PhaseModel.hpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/PhaseModel.hpp @@ -57,19 +57,23 @@ struct PhaseModel PhaseModel( string const & phaseModelName, array1d< array1d< string > > const & inputParams, string_array const & componentNames, - array1d< real64 > const & componentMolarWeight ) + array1d< real64 > const & componentMolarWeight, + bool const printTable ) : density( phaseModelName + "_" + Density::catalogName(), inputParams[InputParamOrder::DENSITY], componentNames, - componentMolarWeight ), + componentMolarWeight, + printTable ), viscosity( phaseModelName + "_" + Viscosity::catalogName(), inputParams[InputParamOrder::VISCOSITY], componentNames, - componentMolarWeight ), + componentMolarWeight, + printTable ), enthalpy( phaseModelName + "_" + Enthalpy::catalogName(), inputParams[InputParamOrder::ENTHALPY], componentNames, - componentMolarWeight ) + componentMolarWeight, + printTable ) {} /// The phase density model diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/BrineEnthalpy.cpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/BrineEnthalpy.cpp index 4707c3179d3..8c21a92c8f6 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/BrineEnthalpy.cpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/BrineEnthalpy.cpp @@ -183,7 +183,8 @@ TableFunction const * makeBrineEnthalpyTable( string_array const & inputParams, BrineEnthalpy::BrineEnthalpy( string const & name, string_array const & inputParams, string_array const & componentNames, - array1d< real64 > const & componentMolarWeight ): + array1d< real64 > const & componentMolarWeight, + bool const printTable ): PVTFunctionBase( name, componentNames, componentMolarWeight ) @@ -196,6 +197,11 @@ BrineEnthalpy::BrineEnthalpy( string const & name, m_CO2EnthalpyTable = makeCO2EnthalpyTable( inputParams, m_functionName, FunctionManager::getInstance() ); m_brineEnthalpyTable = makeBrineEnthalpyTable( inputParams, m_functionName, FunctionManager::getInstance() ); + if( printTable ) + { + m_CO2EnthalpyTable->print( m_CO2EnthalpyTable->getName() ); + m_brineEnthalpyTable->print( m_brineEnthalpyTable->getName() ); + } } void BrineEnthalpy::checkTablesParameters( real64 const pressure, @@ -219,7 +225,7 @@ BrineEnthalpy::createKernelWrapper() const m_waterIndex ); } -REGISTER_CATALOG_ENTRY( PVTFunctionBase, BrineEnthalpy, string const &, string_array const &, string_array const &, array1d< real64 > const & ) +REGISTER_CATALOG_ENTRY( PVTFunctionBase, BrineEnthalpy, string const &, string_array const &, string_array const &, array1d< real64 > const &, bool const ) } // namespace PVTProps diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/BrineEnthalpy.hpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/BrineEnthalpy.hpp index 4f63eb07e36..1017ed50fea 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/BrineEnthalpy.hpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/BrineEnthalpy.hpp @@ -90,7 +90,8 @@ class BrineEnthalpy : public PVTFunctionBase BrineEnthalpy( string const & name, string_array const & inputParams, string_array const & componentNames, - array1d< real64 > const & componentMolarWeight ); + array1d< real64 > const & componentMolarWeight, + bool const printTable ); static string catalogName() { return "BrineEnthalpy"; } diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/CO2Enthalpy.cpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/CO2Enthalpy.cpp index afde18ff57b..eb2875471d9 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/CO2Enthalpy.cpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/CO2Enthalpy.cpp @@ -251,7 +251,8 @@ TableFunction const * makeCO2EnthalpyTable( string_array const & inputParams, CO2Enthalpy::CO2Enthalpy( string const & name, string_array const & inputParams, string_array const & componentNames, - array1d< real64 > const & componentMolarWeight ): + array1d< real64 > const & componentMolarWeight, + bool const printTable ): PVTFunctionBase( name, componentNames, componentMolarWeight ) @@ -260,6 +261,8 @@ CO2Enthalpy::CO2Enthalpy( string const & name, m_CO2Index = PVTFunctionHelpers::findName( componentNames, expectedCO2ComponentNames, "componentNames" ); m_CO2EnthalpyTable = makeCO2EnthalpyTable( inputParams, m_functionName, FunctionManager::getInstance() ); + if( printTable ) + m_CO2EnthalpyTable->print( m_CO2EnthalpyTable->getName() ); } @@ -301,7 +304,7 @@ CO2Enthalpy::createKernelWrapper() const m_CO2Index ); } -REGISTER_CATALOG_ENTRY( PVTFunctionBase, CO2Enthalpy, string const &, string_array const &, string_array const &, array1d< real64 > const & ) +REGISTER_CATALOG_ENTRY( PVTFunctionBase, CO2Enthalpy, string const &, string_array const &, string_array const &, array1d< real64 > const &, bool const ) } // namespace PVTProps diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/CO2Enthalpy.hpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/CO2Enthalpy.hpp index 96d335fd66f..5ce85a9645d 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/CO2Enthalpy.hpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/CO2Enthalpy.hpp @@ -78,7 +78,8 @@ class CO2Enthalpy : public PVTFunctionBase CO2Enthalpy( string const & name, string_array const & inputParams, string_array const & componentNames, - array1d< real64 > const & componentMolarWeight ); + array1d< real64 > const & componentMolarWeight, + bool const printTable ); static string catalogName() { return "CO2Enthalpy"; } diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/CO2Solubility.cpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/CO2Solubility.cpp index 623ceafaafa..262ba55c081 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/CO2Solubility.cpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/CO2Solubility.cpp @@ -20,7 +20,6 @@ #include "constitutive/fluid/multifluid/CO2Brine/functions/CO2EOSSolver.hpp" #include "constitutive/fluid/multifluid/CO2Brine/functions/PVTFunctionHelpers.hpp" -#include "constitutive/fluid/multifluid/MultiFluidConstants.hpp" #include "functions/FunctionManager.hpp" #include "common/Units.hpp" @@ -42,7 +41,7 @@ namespace constexpr real64 P_Pa_f = 1e+5; constexpr real64 P_c = 73.773 * P_Pa_f; constexpr real64 T_c = 304.1282; -constexpr real64 Rgas = MultiFluidConstants::gasConstant; +constexpr real64 Rgas = constants::gasConstant; constexpr real64 V_c = Rgas*T_c/P_c; // these coefficients are in Table (A1) of Duan and Sun (2003) @@ -246,7 +245,7 @@ TableFunction const * makeSolubilityTable( string_array const & inputParams, array1d< real64 > values( tableCoords.nPressures() * tableCoords.nTemperatures() ); calculateCO2Solubility( functionName, tolerance, tableCoords, salinity, values ); - string const tableName = functionName + "_table"; + string const tableName = functionName + "_co2Dissolution_table"; if( functionManager.hasGroup< TableFunction >( tableName ) ) { return functionManager.getGroupPointer< TableFunction >( tableName ); @@ -262,13 +261,43 @@ TableFunction const * makeSolubilityTable( string_array const & inputParams, } } +TableFunction const * makeVapourisationTable( string_array const & inputParams, + string const & functionName, + FunctionManager & functionManager ) +{ + // initialize the (p,T) coordinates + PTTableCoordinates tableCoords; + PVTFunctionHelpers::initializePropertyTable( inputParams, tableCoords ); + + // Currently initialise to all zeros + + array1d< real64 > values( tableCoords.nPressures() * tableCoords.nTemperatures() ); + values.zero(); + + string const tableName = functionName + "_waterVaporization_table"; + if( functionManager.hasGroup< TableFunction >( tableName ) ) + { + return functionManager.getGroupPointer< TableFunction >( tableName ); + } + else + { + TableFunction * const vapourisationTable = dynamicCast< TableFunction * >( functionManager.createChild( "TableFunction", tableName ) ); + vapourisationTable->setTableCoordinates( tableCoords.getCoords(), + { units::Pressure, units::TemperatureInC } ); + vapourisationTable->setTableValues( values, units::Solubility ); + vapourisationTable->setInterpolationMethod( TableFunction::InterpolationType::Linear ); + return vapourisationTable; + } +} + } // namespace CO2Solubility::CO2Solubility( string const & name, string_array const & inputParams, string_array const & phaseNames, string_array const & componentNames, - array1d< real64 > const & componentMolarWeight ): + array1d< real64 > const & componentMolarWeight, + bool const printTable ): FlashModelBase( name, componentNames, componentMolarWeight ) @@ -293,6 +322,12 @@ CO2Solubility::CO2Solubility( string const & name, m_phaseLiquidIndex = PVTFunctionHelpers::findName( phaseNames, expectedWaterPhaseNames, "phaseNames" ); m_CO2SolubilityTable = makeSolubilityTable( inputParams, m_modelName, FunctionManager::getInstance() ); + m_WaterVapourisationTable = makeVapourisationTable( inputParams, m_modelName, FunctionManager::getInstance() ); + if( printTable ) + { + m_CO2SolubilityTable->print( m_CO2SolubilityTable->getName() ); + m_WaterVapourisationTable->print( m_WaterVapourisationTable->getName() ); + } } void CO2Solubility::checkTablesParameters( real64 const pressure, @@ -300,19 +335,22 @@ void CO2Solubility::checkTablesParameters( real64 const pressure, { m_CO2SolubilityTable->checkCoord( pressure, 0 ); m_CO2SolubilityTable->checkCoord( temperature, 1 ); + m_WaterVapourisationTable->checkCoord( pressure, 0 ); + m_WaterVapourisationTable->checkCoord( temperature, 1 ); } CO2Solubility::KernelWrapper CO2Solubility::createKernelWrapper() const { return KernelWrapper( m_componentMolarWeight, *m_CO2SolubilityTable, + *m_WaterVapourisationTable, m_CO2Index, m_waterIndex, m_phaseGasIndex, m_phaseLiquidIndex ); } -REGISTER_CATALOG_ENTRY( FlashModelBase, CO2Solubility, string const &, string_array const &, string_array const &, string_array const &, array1d< real64 > const & ) +REGISTER_CATALOG_ENTRY( FlashModelBase, CO2Solubility, string const &, string_array const &, string_array const &, string_array const &, array1d< real64 > const &, bool const ) } // end namespace PVTProps diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/CO2Solubility.hpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/CO2Solubility.hpp index bd12c9a7870..c2ca26b3f5c 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/CO2Solubility.hpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/CO2Solubility.hpp @@ -46,17 +46,21 @@ class CO2SolubilityUpdate final : public FlashModelBaseUpdate CO2SolubilityUpdate( arrayView1d< real64 const > const & componentMolarWeight, TableFunction const & CO2SolubilityTable, + TableFunction const & waterVapourisationTable, integer const CO2Index, integer const waterIndex, integer const phaseGasIndex, integer const phaseLiquidIndex ) : FlashModelBaseUpdate( componentMolarWeight ), m_CO2SolubilityTable( CO2SolubilityTable.createKernelWrapper() ), + //m_WaterVapourisationTable( waterVapourisationTable.createKernelWrapper() ), m_CO2Index( CO2Index ), m_waterIndex( waterIndex ), m_phaseGasIndex( phaseGasIndex ), m_phaseLiquidIndex( phaseLiquidIndex ) - {} + { + GEOS_UNUSED_VAR( waterVapourisationTable ); + } template< int USD1 > GEOS_HOST_DEVICE @@ -70,13 +74,20 @@ class CO2SolubilityUpdate final : public FlashModelBaseUpdate { FlashModelBaseUpdate::move( space, touch ); m_CO2SolubilityTable.move( space, touch ); + //m_WaterVapourisationTable.move( space, touch ); } protected: + /// Expected number of components + static constexpr integer numComps = 2; + /// Table with CO2 solubility tabulated as a function (P,T) TableFunction::KernelWrapper m_CO2SolubilityTable; + /// Table with water vapourisation as a function (P,T) + //TableFunction::KernelWrapper m_WaterVapourisationTable; + /// Index of the CO2 phase integer m_CO2Index; @@ -99,7 +110,8 @@ class CO2Solubility : public FlashModelBase string_array const & inputParams, string_array const & phaseNames, string_array const & componentNames, - array1d< real64 > const & componentMolarWeight ); + array1d< real64 > const & componentMolarWeight, + bool const printTable ); static string catalogName() { return "CO2Solubility"; } @@ -124,6 +136,9 @@ class CO2Solubility : public FlashModelBase /// Table to compute solubility as a function of pressure and temperature TableFunction const * m_CO2SolubilityTable; + /// Table to compute water vapourisation as a function of pressure and temperature + TableFunction const * m_WaterVapourisationTable; + /// Index of the CO2 component integer m_CO2Index; @@ -149,112 +164,173 @@ CO2SolubilityUpdate::compute( real64 const & pressure, { using Deriv = multifluid::DerivativeOffset; - // solubility mol/kg(water) X = Csat/W + // Solubility of CO2 is read from the tables in the form of moles of CO2 per kg of water + // Solubility of water is read from the tables in the form of moles of water per kg of CO2 real64 const input[2] = { pressure, temperature }; - real64 solubilityDeriv[2]{}; - real64 solubility = m_CO2SolubilityTable.compute( input, solubilityDeriv ); - solubility *= m_componentMolarWeight[m_waterIndex]; - for( integer ic = 0; ic < 2; ++ic ) + real64 co2SolubilityDeriv[2]{}; + real64 watSolubilityDeriv[2]{0.0, 0.0}; + real64 co2Solubility = m_CO2SolubilityTable.compute( input, co2SolubilityDeriv ); + real64 watSolubility = 0.0; //m_WaterVapourisationTable.compute( input, watSolubilityDeriv ); + + // Convert the solubility to mole/mole + co2Solubility *= m_componentMolarWeight[m_waterIndex]; + watSolubility *= m_componentMolarWeight[m_CO2Index]; + for( integer const ic : {Deriv::dP, Deriv::dT} ) { - solubilityDeriv[ic] *= m_componentMolarWeight[m_waterIndex]; + co2SolubilityDeriv[ic] *= m_componentMolarWeight[m_waterIndex]; + watSolubilityDeriv[ic] *= m_componentMolarWeight[m_CO2Index]; } - // Y = C/W = z/(1-z) - real64 Y = 0.0; - real64 dY_dCompFrac[2]{}; + real64 const z_co2 = compFraction[m_CO2Index]; + real64 const z_wat = compFraction[m_waterIndex]; - if( compFraction[m_CO2Index] > 1.0 - minForDivision ) + real64 const determinant = 1.0 - co2Solubility*watSolubility; + + GEOS_ERROR_IF_LT_MSG ( LvArray::math::abs( determinant ), minForDivision, + GEOS_FMT( "Failed to calculate solubility at pressure {} Pa and temperature {} C.", pressure, temperature ) ); + + real64 invDeterminant = 0.0; + real64 invDeterminantDeriv[] = { 0.0, 0.0 }; + + invDeterminant = 1.0 / determinant; + for( integer const ic : {Deriv::dP, Deriv::dT} ) { - Y = compFraction[m_CO2Index] / minForDivision; - dY_dCompFrac[m_CO2Index] = 1.0 / minForDivision; - dY_dCompFrac[m_waterIndex] = 0.0; + invDeterminantDeriv[ic] = invDeterminant*invDeterminant*(co2Solubility*watSolubilityDeriv[ic] + watSolubility*co2SolubilityDeriv[ic]); + } + + real64 x_co2 = co2Solubility * (z_wat - watSolubility * z_co2) * invDeterminant; + real64 x_co2Deriv[4]{ 0.0, 0.0, 0.0, 0.0 }; + if( minForDivision < x_co2 ) + { + // Pressure and temperature derivatives + for( integer const ic : {Deriv::dP, Deriv::dT} ) + { + x_co2Deriv[ic] = co2SolubilityDeriv[ic] * (z_wat - watSolubility * z_co2) * invDeterminant + - co2Solubility * watSolubilityDeriv[ic] * z_co2 * invDeterminant + + co2Solubility * (z_wat - watSolubility * z_co2) * invDeterminantDeriv[ic]; + } + // Composition derivatives + x_co2Deriv[Deriv::dC+m_CO2Index] = -co2Solubility * watSolubility * invDeterminant; + x_co2Deriv[Deriv::dC+m_waterIndex] = co2Solubility * invDeterminant; } else { - real64 const oneMinusCompFracInv = 1.0 / (1.0 - compFraction[m_CO2Index]); - Y = compFraction[m_CO2Index] * oneMinusCompFracInv; - dY_dCompFrac[m_CO2Index] = oneMinusCompFracInv * oneMinusCompFracInv; - dY_dCompFrac[m_waterIndex] = 0.0; + x_co2 = 0.0; } - auto setZero = []( real64 & val ){ val = 0.0; }; - LvArray::forValuesInSlice( phaseFraction.derivs, setZero ); - LvArray::forValuesInSlice( phaseCompFraction.derivs, setZero ); - - if( Y < solubility ) + real64 y_wat = watSolubility * (z_co2 - x_co2); + real64 y_watDeriv[4]{ 0.0, 0.0, 0.0, 0.0 }; + if( minForDivision < y_wat ) { - // liquid phase only - - // 1) Compute phase fractions - - phaseFraction.value[m_phaseLiquidIndex] = 1.0; - phaseFraction.value[m_phaseGasIndex] = 0.0; - - // 2) Compute phase component fractions - - phaseCompFraction.value[m_phaseGasIndex][m_CO2Index] = 1.0; - phaseCompFraction.value[m_phaseGasIndex][m_waterIndex] = 0.0; - for( localIndex ic = 0; ic < 2; ++ic ) + // Pressure and temperature derivatives + for( integer const ic : {Deriv::dP, Deriv::dT} ) { - phaseCompFraction.value[m_phaseLiquidIndex][ic] = compFraction[ic]; - - for( localIndex jc = 0; jc < 2; ++jc ) - { - phaseCompFraction.derivs[m_phaseLiquidIndex][ic][Deriv::dC+jc] = (ic == jc ) ? 1.0 : 0.0; - phaseCompFraction.derivs[m_phaseGasIndex][ic][Deriv::dC+jc] = 0.0; - } + y_watDeriv[ic] = watSolubilityDeriv[ic] * (z_co2 - x_co2) - watSolubility * x_co2Deriv[ic]; } + // Composition derivatives + y_watDeriv[Deriv::dC+m_CO2Index] = watSolubility*(1.0 - x_co2Deriv[Deriv::dC+m_CO2Index]); + y_watDeriv[Deriv::dC+m_waterIndex] = -watSolubility * x_co2Deriv[Deriv::dC+m_waterIndex]; } else { - // two-phase flow + y_wat = 0.0; + } - // 1) Compute phase fractions + // Liquid and vapour phase fractions + real64 const L = x_co2 + z_wat - y_wat; + real64 const V = y_wat + z_co2 - x_co2; // = 1 - L; - // liquid phase fraction = (Csat + W) / (C + W) = (Csat/W + 1) / (C/W + 1) - real64 const onePlusYInv = 1.0 / ( 1.0 + Y ); - phaseFraction.value[m_phaseLiquidIndex] = (solubility + 1.0) * onePlusYInv; + if( minForDivision < L && minForDivision < V ) + { + // Two phases - phaseFraction.derivs[m_phaseLiquidIndex][Deriv::dP] = solubilityDeriv[0] * onePlusYInv; - phaseFraction.derivs[m_phaseLiquidIndex][Deriv::dT] = solubilityDeriv[1] * onePlusYInv; - phaseFraction.derivs[m_phaseLiquidIndex][Deriv::dC+m_CO2Index] = - -dY_dCompFrac[m_CO2Index] * phaseFraction.value[m_phaseLiquidIndex] * onePlusYInv; - phaseFraction.derivs[m_phaseLiquidIndex][Deriv::dC+m_waterIndex] = - -dY_dCompFrac[m_waterIndex] * phaseFraction.value[m_phaseLiquidIndex] * onePlusYInv; + // 1) Compute phase fractions and derivatives - phaseFraction.value[m_phaseGasIndex] = 1.0 - phaseFraction.value[m_phaseLiquidIndex]; + real64 const dL_dP = x_co2Deriv[Deriv::dP] - y_watDeriv[Deriv::dP]; + real64 const dL_dT = x_co2Deriv[Deriv::dT] - y_watDeriv[Deriv::dT]; + real64 const dL_dzco2 = x_co2Deriv[Deriv::dC+m_CO2Index] - y_watDeriv[Deriv::dC+m_CO2Index]; + real64 const dL_dzwat = x_co2Deriv[Deriv::dC+m_waterIndex] + 1.0 - y_watDeriv[Deriv::dC+m_waterIndex]; - phaseFraction.derivs[m_phaseGasIndex][Deriv::dP] = -phaseFraction.derivs[m_phaseLiquidIndex][Deriv::dP]; - phaseFraction.derivs[m_phaseGasIndex][Deriv::dT] = -phaseFraction.derivs[m_phaseLiquidIndex][Deriv::dT]; - phaseFraction.derivs[m_phaseGasIndex][Deriv::dC+m_CO2Index] = -phaseFraction.derivs[m_phaseLiquidIndex][Deriv::dC+m_CO2Index]; - phaseFraction.derivs[m_phaseGasIndex][Deriv::dC+m_waterIndex] = -phaseFraction.derivs[m_phaseLiquidIndex][Deriv::dC+m_waterIndex]; + real64 const dV_dP = y_watDeriv[Deriv::dP] - x_co2Deriv[Deriv::dP]; + real64 const dV_dT = y_watDeriv[Deriv::dT] - x_co2Deriv[Deriv::dT]; + real64 const dV_dzco2 = y_watDeriv[Deriv::dC+m_CO2Index] + 1.0 - x_co2Deriv[Deriv::dC+m_CO2Index]; + real64 const dV_dzwat = y_watDeriv[Deriv::dC+m_waterIndex] - x_co2Deriv[Deriv::dC+m_waterIndex]; - // 2) Compute phase component fractions + phaseFraction.value[m_phaseLiquidIndex] = L; - // liquid phase composition CO2 = Csat / (Csat + W) = (Csat/W) / (Csat/W + 1) - real64 const onePlusSolubilityInv = 1.0 / ( 1.0 + solubility ); - phaseCompFraction.value[m_phaseLiquidIndex][m_CO2Index] = solubility * onePlusSolubilityInv; + phaseFraction.derivs[m_phaseLiquidIndex][Deriv::dP] = dL_dP; + phaseFraction.derivs[m_phaseLiquidIndex][Deriv::dT] = dL_dT; + phaseFraction.derivs[m_phaseLiquidIndex][Deriv::dC+m_CO2Index] = dL_dzco2; + phaseFraction.derivs[m_phaseLiquidIndex][Deriv::dC+m_waterIndex] = dL_dzwat; - phaseCompFraction.derivs[m_phaseLiquidIndex][m_CO2Index][Deriv::dP] = solubilityDeriv[0] * (onePlusSolubilityInv*onePlusSolubilityInv); - phaseCompFraction.derivs[m_phaseLiquidIndex][m_CO2Index][Deriv::dT] = solubilityDeriv[1] * (onePlusSolubilityInv*onePlusSolubilityInv); + phaseFraction.value[m_phaseGasIndex] = V; - phaseCompFraction.value[m_phaseLiquidIndex][m_waterIndex] = 1.0 - phaseCompFraction.value[m_phaseLiquidIndex][m_CO2Index]; + phaseFraction.derivs[m_phaseGasIndex][Deriv::dP] = dV_dP; + phaseFraction.derivs[m_phaseGasIndex][Deriv::dT] = dV_dT; + phaseFraction.derivs[m_phaseGasIndex][Deriv::dC+m_CO2Index] = dV_dzco2; + phaseFraction.derivs[m_phaseGasIndex][Deriv::dC+m_waterIndex] = dV_dzwat; - phaseCompFraction.derivs[m_phaseLiquidIndex][m_waterIndex][Deriv::dP] = -phaseCompFraction.derivs[m_phaseLiquidIndex][m_CO2Index][Deriv::dP]; - phaseCompFraction.derivs[m_phaseLiquidIndex][m_waterIndex][Deriv::dT] = -phaseCompFraction.derivs[m_phaseLiquidIndex][m_CO2Index][Deriv::dT]; + // 2) Compute phase component fractions and derivatives - // gas phase composition CO2 = 1.0 + // 2.1) Assigning the number of moles in each phase + phaseCompFraction.value[m_phaseLiquidIndex][m_CO2Index] = x_co2; + phaseCompFraction.value[m_phaseLiquidIndex][m_waterIndex] = z_wat - y_wat; + phaseCompFraction.value[m_phaseGasIndex][m_CO2Index] = z_co2 - x_co2; + phaseCompFraction.value[m_phaseGasIndex][m_waterIndex] = y_wat; - phaseCompFraction.value[m_phaseGasIndex][m_CO2Index] = 1.0; - phaseCompFraction.value[m_phaseGasIndex][m_waterIndex] = 0.0; + for( integer const kc : {Deriv::dP, Deriv::dT, Deriv::dC+m_CO2Index, Deriv::dC+m_waterIndex} ) + { + phaseCompFraction.derivs[m_phaseLiquidIndex][m_CO2Index][kc] = x_co2Deriv[kc]; + phaseCompFraction.derivs[m_phaseLiquidIndex][m_waterIndex][kc] = -y_watDeriv[kc]; + phaseCompFraction.derivs[m_phaseGasIndex][m_CO2Index][kc] = -x_co2Deriv[kc]; + phaseCompFraction.derivs[m_phaseGasIndex][m_waterIndex][kc] = y_watDeriv[kc]; + } + phaseCompFraction.derivs[m_phaseLiquidIndex][m_waterIndex][Deriv::dC+m_waterIndex] += 1.0; + phaseCompFraction.derivs[m_phaseGasIndex][m_CO2Index][Deriv::dC+m_CO2Index] += 1.0; - phaseCompFraction.derivs[m_phaseGasIndex][m_CO2Index][Deriv::dP] = 0.0; - phaseCompFraction.derivs[m_phaseGasIndex][m_waterIndex][Deriv::dT] = 0.0; - phaseCompFraction.derivs[m_phaseGasIndex][m_CO2Index][Deriv::dP] = 0.0; - phaseCompFraction.derivs[m_phaseGasIndex][m_waterIndex][Deriv::dT] = 0.0; - // phaseCompFraction does not depend on globalComponentFraction + // 2.2) Divide by the number of moles in the phase to get the phase mole fraction + // Update: phaseCompFraction[ip][jc] <- phaseCompFraction[ip][jc] / phaseFraction[ip] + for( integer const ip : {m_phaseLiquidIndex, m_phaseGasIndex} ) + { + real64 const invFractionSqr = 1.0 / (phaseFraction.value[ip] * phaseFraction.value[ip]); + for( integer const jc : {m_CO2Index, m_waterIndex} ) + { + for( integer const kc : {Deriv::dP, Deriv::dT, Deriv::dC+m_CO2Index, Deriv::dC+m_waterIndex} ) + { + phaseCompFraction.derivs[ip][jc][kc] = ( phaseCompFraction.derivs[ip][jc][kc]*phaseFraction.value[ip] + - phaseCompFraction.value[ip][jc]*phaseFraction.derivs[ip][kc])*invFractionSqr; + } + phaseCompFraction.value[ip][jc] /= phaseFraction.value[ip]; + } + } + } + else + { + // Single phase: Select the present phase + integer const activePhase = minForDivision < L ? m_phaseLiquidIndex : m_phaseGasIndex; + + // Zero out everything to start + auto setZero = []( real64 & val ){ val = 0.0; }; + LvArray::forValuesInSlice( phaseFraction.value, setZero ); + LvArray::forValuesInSlice( phaseCompFraction.value, setZero ); + LvArray::forValuesInSlice( phaseFraction.derivs, setZero ); + LvArray::forValuesInSlice( phaseCompFraction.derivs, setZero ); + + // 1) Compute phase fractions + phaseFraction.value[activePhase] = 1.0; + + // 2) Compute phase component fractions + // Setup default values which will be overridden for the active phase + phaseCompFraction.value[m_phaseGasIndex][m_CO2Index] = 1.0; + phaseCompFraction.value[m_phaseLiquidIndex][m_waterIndex] = 1.0; + // Set the global composition as the composition of the active phase + for( integer ic = 0; ic < numComps; ++ic ) + { + phaseCompFraction.value[activePhase][ic] = compFraction[ic]; + phaseCompFraction.derivs[activePhase][ic][Deriv::dC+ic] = 1.0; + } } } diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/EzrokhiBrineDensity.cpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/EzrokhiBrineDensity.cpp index beb3fa855ff..ec24045a0fa 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/EzrokhiBrineDensity.cpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/EzrokhiBrineDensity.cpp @@ -36,7 +36,8 @@ namespace PVTProps EzrokhiBrineDensity::EzrokhiBrineDensity( string const & name, string_array const & inputPara, string_array const & componentNames, - array1d< real64 > const & componentMolarWeight ): + array1d< real64 > const & componentMolarWeight, + bool const printTable ): PVTFunctionBase( name, componentNames, componentMolarWeight ) @@ -50,6 +51,11 @@ EzrokhiBrineDensity::EzrokhiBrineDensity( string const & name, makeCoefficients( inputPara ); m_waterSatDensityTable = PureWaterProperties::makeSaturationDensityTable( m_functionName, FunctionManager::getInstance() ); m_waterSatPressureTable = PureWaterProperties::makeSaturationPressureTable( m_functionName, FunctionManager::getInstance() ); + if( printTable ) + { + m_waterSatDensityTable->print( m_waterSatDensityTable->getName() ); + m_waterSatPressureTable->print( m_waterSatPressureTable->getName() ); + } } void EzrokhiBrineDensity::makeCoefficients( string_array const & inputPara ) @@ -96,7 +102,7 @@ EzrokhiBrineDensity::createKernelWrapper() const m_coef2 ); } -REGISTER_CATALOG_ENTRY( PVTFunctionBase, EzrokhiBrineDensity, string const &, string_array const &, string_array const &, array1d< real64 > const & ) +REGISTER_CATALOG_ENTRY( PVTFunctionBase, EzrokhiBrineDensity, string const &, string_array const &, string_array const &, array1d< real64 > const &, bool const ) } // end namespace PVTProps diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/EzrokhiBrineDensity.hpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/EzrokhiBrineDensity.hpp index 9acdd42e7e8..0f66d19ba4e 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/EzrokhiBrineDensity.hpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/EzrokhiBrineDensity.hpp @@ -107,7 +107,8 @@ class EzrokhiBrineDensity : public PVTFunctionBase EzrokhiBrineDensity( string const & name, string_array const & inputPara, string_array const & componentNames, - array1d< real64 > const & componentMolarWeight ); + array1d< real64 > const & componentMolarWeight, + bool const printTable ); virtual ~EzrokhiBrineDensity() override = default; @@ -201,20 +202,23 @@ void EzrokhiBrineDensityUpdate::compute( real64 const & pressure, // compute only common part of derivatives w.r.t. CO2 and water phase compositions // later to be multiplied by (phaseComposition[m_waterIndex]) and ( -phaseComposition[m_CO2Index] ) respectively real64 const exponent_dPhaseComp = coefPhaseComposition * m_componentMolarWeight[m_CO2Index] * m_componentMolarWeight[m_waterIndex] * waterMWInv * waterMWInv; - real64 const exponentPowered = useMass ? pow( 10, exponent ) : pow( 10, exponent ) / m_componentMolarWeight[m_waterIndex]; + real64 exponentPowered = pow( 10, exponent ); value = waterDensity * exponentPowered; real64 const dValueCoef = LvArray::math::log( 10 ) * value; - - real64 const dValue_dPhaseComp = dValueCoef * exponent_dPhaseComp; dValue[Deriv::dP] = dValueCoef * exponent_dPressure + waterDensity_dPressure * exponentPowered; dValue[Deriv::dT] = dValueCoef * exponent_dTemperature + waterDensity_dTemperature * exponentPowered; // here, we multiply common part of derivatives by specific coefficients + real64 const dValue_dPhaseComp = dValueCoef * exponent_dPhaseComp; dValue[Deriv::dC+m_CO2Index] = dValue_dPhaseComp * phaseComposition[m_waterIndex] * dPhaseComposition[m_CO2Index][Deriv::dC+m_CO2Index]; dValue[Deriv::dC+m_waterIndex] = dValue_dPhaseComp * ( -phaseComposition[m_CO2Index] ) * dPhaseComposition[m_waterIndex][Deriv::dC+m_waterIndex]; + if( !useMass ) + { + divideByPhaseMolarWeight( phaseComposition, dPhaseComposition, value, dValue ); + } } } // end namespace PVTProps diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/EzrokhiBrineViscosity.cpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/EzrokhiBrineViscosity.cpp index 426a63c9a72..ec63aa192a9 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/EzrokhiBrineViscosity.cpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/EzrokhiBrineViscosity.cpp @@ -36,7 +36,8 @@ namespace PVTProps EzrokhiBrineViscosity::EzrokhiBrineViscosity( string const & name, string_array const & inputPara, string_array const & componentNames, - array1d< real64 > const & componentMolarWeight ): + array1d< real64 > const & componentMolarWeight, + bool const printTable ): PVTFunctionBase( name, componentNames, componentMolarWeight ) @@ -49,6 +50,8 @@ EzrokhiBrineViscosity::EzrokhiBrineViscosity( string const & name, makeCoefficients( inputPara ); m_waterViscosityTable = PureWaterProperties::makeSaturationViscosityTable( m_functionName, FunctionManager::getInstance() ); + if( printTable ) + m_waterViscosityTable->print( m_waterViscosityTable->getName() ); } void EzrokhiBrineViscosity::makeCoefficients( string_array const & inputPara ) @@ -90,7 +93,7 @@ EzrokhiBrineViscosity::createKernelWrapper() const m_coef2 ); } -REGISTER_CATALOG_ENTRY( PVTFunctionBase, EzrokhiBrineViscosity, string const &, string_array const &, string_array const &, array1d< real64 > const & ) +REGISTER_CATALOG_ENTRY( PVTFunctionBase, EzrokhiBrineViscosity, string const &, string_array const &, string_array const &, array1d< real64 > const &, bool const ) } // end namespace PVTProps diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/EzrokhiBrineViscosity.hpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/EzrokhiBrineViscosity.hpp index 2afdb6873aa..f007f573f83 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/EzrokhiBrineViscosity.hpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/EzrokhiBrineViscosity.hpp @@ -96,7 +96,8 @@ class EzrokhiBrineViscosity : public PVTFunctionBase EzrokhiBrineViscosity( string const & name, string_array const & inputPara, string_array const & componentNames, - array1d< real64 > const & componentMolarWeight ); + array1d< real64 > const & componentMolarWeight, + bool const printTable ); virtual ~EzrokhiBrineViscosity() override = default; diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/FenghourCO2Viscosity.cpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/FenghourCO2Viscosity.cpp index 6320be58bda..9f7ed16bc4a 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/FenghourCO2Viscosity.cpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/FenghourCO2Viscosity.cpp @@ -34,9 +34,8 @@ namespace PVTProps namespace { -void fenghourCO2ViscosityFunction( real64 const & temperatureCent, - real64 const & density, - real64 & viscosity ) +real64 fenghourCO2ViscosityFunction( real64 const & temperatureCent, + real64 const & density ) { constexpr real64 espar = 251.196; constexpr real64 esparInv = 1.0 / espar; @@ -67,7 +66,7 @@ void fenghourCO2ViscosityFunction( real64 const & temperatureCent, real64 const vxcess = density * (d11 + density * (d21 + d2*d2*(d64 / (Tred*Tred*Tred) + d2*(d81 + d82/Tred)))); // equation (1) of Fenghour and Wakeham (1998) - viscosity = 1e-6 * (vlimit + vxcess + vcrit); + return 1e-6 * (vlimit + vxcess + vcrit); } void calculateCO2Viscosity( PTTableCoordinates const & tableCoords, @@ -82,9 +81,8 @@ void calculateCO2Viscosity( PTTableCoordinates const & tableCoords, { for( localIndex j = 0; j < nTemperatures; ++j ) { - fenghourCO2ViscosityFunction( tableCoords.getTemperature( j ), - densities[j*nPressures+i], - viscosities[j*nPressures+i] ); + real64 const T = tableCoords.getTemperature( j ); + viscosities[j*nPressures+i] = fenghourCO2ViscosityFunction( T, densities[j*nPressures+i] ); } } } @@ -137,12 +135,15 @@ TableFunction const * makeViscosityTable( string_array const & inputParams, FenghourCO2Viscosity::FenghourCO2Viscosity( string const & name, string_array const & inputParams, string_array const & componentNames, - array1d< real64 > const & componentMolarWeight ) + array1d< real64 > const & componentMolarWeight, + bool const printTable ) : PVTFunctionBase( name, componentNames, componentMolarWeight ) { m_CO2ViscosityTable = makeViscosityTable( inputParams, m_functionName, FunctionManager::getInstance() ); + if( printTable ) + m_CO2ViscosityTable->print( m_CO2ViscosityTable->getName() ); } void FenghourCO2Viscosity::checkTablesParameters( real64 const pressure, @@ -159,7 +160,7 @@ FenghourCO2Viscosity::createKernelWrapper() const *m_CO2ViscosityTable ); } -REGISTER_CATALOG_ENTRY( PVTFunctionBase, FenghourCO2Viscosity, string const &, string_array const &, string_array const &, array1d< real64 > const & ) +REGISTER_CATALOG_ENTRY( PVTFunctionBase, FenghourCO2Viscosity, string const &, string_array const &, string_array const &, array1d< real64 > const &, bool const ) } // end namespace PVTProps diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/FenghourCO2Viscosity.hpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/FenghourCO2Viscosity.hpp index 1aa208afc3f..e8205e8745b 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/FenghourCO2Viscosity.hpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/FenghourCO2Viscosity.hpp @@ -74,7 +74,8 @@ class FenghourCO2Viscosity : public PVTFunctionBase FenghourCO2Viscosity( string const & name, string_array const & inputParams, string_array const & componentNames, - array1d< real64 > const & componentMolarWeight ); + array1d< real64 > const & componentMolarWeight, + bool const printTable ); virtual ~FenghourCO2Viscosity() override = default; diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/FlashModelBase.hpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/FlashModelBase.hpp index f7de275e572..314f7498dde 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/FlashModelBase.hpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/FlashModelBase.hpp @@ -82,7 +82,8 @@ class FlashModelBase string_array const &, string_array const &, string_array const &, - array1d< real64 > const & >; + array1d< real64 > const &, + bool const >; static typename CatalogInterface::CatalogType & getCatalog() { static CatalogInterface::CatalogType catalog; diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/NoOpPVTFunction.hpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/NoOpPVTFunction.hpp index 67b57bda644..47721c43539 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/NoOpPVTFunction.hpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/NoOpPVTFunction.hpp @@ -69,12 +69,13 @@ class NoOpPVTFunction : public PVTFunctionBase NoOpPVTFunction( string const & name, string_array const & inputPara, string_array const & componentNames, - array1d< real64 > const & componentMolarWeight ) + array1d< real64 > const & componentMolarWeight, + bool const printTable ) : PVTFunctionBase( name, componentNames, componentMolarWeight ) { - GEOS_UNUSED_VAR( inputPara ); + GEOS_UNUSED_VAR( inputPara, printTable ); } virtual ~NoOpPVTFunction() override = default; diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/PVTFunctionBase.hpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/PVTFunctionBase.hpp index bff7ea00f7a..7bfea2c71a8 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/PVTFunctionBase.hpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/PVTFunctionBase.hpp @@ -54,6 +54,43 @@ class PVTFunctionBaseUpdate protected: + template< int USD > + GEOS_HOST_DEVICE + real64 computePhaseMolarWeight( arraySlice1d< real64 const, USD > const & phaseComposition ) const + { + integer const numComp = phaseComposition.size(); + real64 MT = 0.0; + for( integer i = 0; i < numComp; i++ ) + { + MT += phaseComposition[i] * m_componentMolarWeight[i]; + } + return MT; + } + + template< int USD1, int USD2, int USD3 > + GEOS_HOST_DEVICE + void divideByPhaseMolarWeight( arraySlice1d< real64 const, USD1 > const & phaseComposition, + arraySlice2d< real64 const, USD2 > const & dPhaseComposition, + real64 & value, arraySlice1d< real64, USD3 > const & dValue ) const + { + integer const numComp = phaseComposition.size(); + integer const numDerivs = dValue.size(); + + real64 const MT = computePhaseMolarWeight( phaseComposition ); + + value /= MT; + + for( int der = 0; der < numDerivs; der++ ) + { + real64 dMT = 0.0; + for( int ic = 0; ic < numComp; ic++ ) + { + dMT += dPhaseComposition[ic][der] * m_componentMolarWeight[ic]; + } + dValue[der] = ( dValue[der] - value * dMT ) / MT; // value is already divided by MT + } + } + /// Array storing the component molar weights arrayView1d< real64 const > m_componentMolarWeight; @@ -79,7 +116,8 @@ class PVTFunctionBase string const &, array1d< string > const &, array1d< string > const &, - array1d< real64 > const & >; + array1d< real64 > const &, + bool const >; static typename CatalogInterface::CatalogType & getCatalog() { static CatalogInterface::CatalogType catalog; diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/PhillipsBrineDensity.cpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/PhillipsBrineDensity.cpp index 5b16d60080d..0cb55b9d879 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/PhillipsBrineDensity.cpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/PhillipsBrineDensity.cpp @@ -61,9 +61,10 @@ void calculateBrineDensity( PTTableCoordinates const & tableCoords, for( localIndex j = 0; j < nTemperatures; ++j ) { + real64 const T = tableCoords.getTemperature( j ); // see Phillips et al. (1981), equations (4) and (5), pages 14 and 15 real64 const x = c1 * exp( a1 * salinity ) - + c2 * exp( a2 * tableCoords.getTemperature( j ) ) + + c2 * exp( a2 * T ) + c3 * exp( a3 * P ); densities[j*nPressures+i] = (AA + BB * x + CC * x * x + DD * x * x * x) * 1000.0; } @@ -89,7 +90,7 @@ void calculatePureWaterDensity( PTTableCoordinates const & tableCoords, for( localIndex i = 0; i < nPressures; ++i ) { - real64 const P = tableCoords.getPressure( i ) / 1e5; + real64 const P = tableCoords.getPressure( i ); for( localIndex j = 0; j < nTemperatures; ++j ) { @@ -112,14 +113,15 @@ TableFunction const * makeDensityTable( string_array const & inputParams, string const & functionName, FunctionManager & functionManager ) { + GEOS_THROW_IF_LT_MSG( inputParams.size(), 9, + GEOS_FMT( "{}: insufficient number of model parameters", functionName ), + InputError ); + // initialize the (p,T) coordinates PTTableCoordinates tableCoords; PVTFunctionHelpers::initializePropertyTable( inputParams, tableCoords ); // initialize salinity - GEOS_THROW_IF_LT_MSG( inputParams.size(), 9, - GEOS_FMT( "{}: insufficient number of model parameters", functionName ), - InputError ); real64 salinity; try { @@ -168,7 +170,8 @@ TableFunction const * makeDensityTable( string_array const & inputParams, PhillipsBrineDensity::PhillipsBrineDensity( string const & name, string_array const & inputParams, string_array const & componentNames, - array1d< real64 > const & componentMolarWeight ): + array1d< real64 > const & componentMolarWeight, + bool const printTable ): PVTFunctionBase( name, componentNames, componentMolarWeight ) @@ -180,6 +183,8 @@ PhillipsBrineDensity::PhillipsBrineDensity( string const & name, m_waterIndex = PVTFunctionHelpers::findName( componentNames, expectedWaterComponentNames, "componentNames" ); m_brineDensityTable = makeDensityTable( inputParams, m_functionName, FunctionManager::getInstance() ); + if( printTable ) + m_brineDensityTable->print( m_brineDensityTable->getName() ); } PhillipsBrineDensity::KernelWrapper @@ -198,7 +203,7 @@ void PhillipsBrineDensity::checkTablesParameters( real64 const pressure, m_brineDensityTable->checkCoord( temperature, 1 ); } -REGISTER_CATALOG_ENTRY( PVTFunctionBase, PhillipsBrineDensity, string const &, string_array const &, string_array const &, array1d< real64 > const & ) +REGISTER_CATALOG_ENTRY( PVTFunctionBase, PhillipsBrineDensity, string const &, string_array const &, string_array const &, array1d< real64 > const &, bool const ) } // namespace PVTProps diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/PhillipsBrineDensity.hpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/PhillipsBrineDensity.hpp index 26381194127..85208dd7e70 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/PhillipsBrineDensity.hpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/PhillipsBrineDensity.hpp @@ -84,7 +84,8 @@ class PhillipsBrineDensity : public PVTFunctionBase PhillipsBrineDensity( string const & name, string_array const & inputParams, string_array const & componentNames, - array1d< real64 > const & componentMolarWeight ); + array1d< real64 > const & componentMolarWeight, + bool const printTable ); static string catalogName() { return "PhillipsBrineDensity"; } @@ -187,37 +188,22 @@ void PhillipsBrineDensityUpdate::compute( real64 const & pressure, // Brine density // equation (1) from Garcia (2001) - if( useMass ) + value = density + + m_componentMolarWeight[m_CO2Index] * conc + - concDensVol; + dValue[Deriv::dP] = densityDeriv[0] + + m_componentMolarWeight[m_CO2Index] * dConc_dPres + - dConcDensVol_dPres; + dValue[Deriv::dT] = densityDeriv[1] + + m_componentMolarWeight[m_CO2Index] * dConc_dTemp + - dConcDensVol_dTemp; + dValue[Deriv::dC+m_CO2Index] = m_componentMolarWeight[m_CO2Index] * dConc_dComp[m_CO2Index] + - dConcDensVol_dComp[m_CO2Index]; + dValue[Deriv::dC+m_waterIndex] = m_componentMolarWeight[m_CO2Index] * dConc_dComp[m_waterIndex] + - dConcDensVol_dComp[m_waterIndex]; + if( !useMass ) { - value = density - + m_componentMolarWeight[m_CO2Index] * conc - - concDensVol; - dValue[Deriv::dP] = densityDeriv[0] - + m_componentMolarWeight[m_CO2Index] * dConc_dPres - - dConcDensVol_dPres; - dValue[Deriv::dT] = densityDeriv[1] - + m_componentMolarWeight[m_CO2Index] * dConc_dTemp - - dConcDensVol_dTemp; - dValue[Deriv::dC+m_CO2Index] = m_componentMolarWeight[m_CO2Index] * dConc_dComp[m_CO2Index] - - dConcDensVol_dComp[m_CO2Index]; - dValue[Deriv::dC+m_waterIndex] = m_componentMolarWeight[m_CO2Index] * dConc_dComp[m_waterIndex] - - dConcDensVol_dComp[m_waterIndex]; - } - else - { - value = density / m_componentMolarWeight[m_waterIndex] - + conc - - concDensVol / m_componentMolarWeight[m_waterIndex]; - dValue[Deriv::dP] = densityDeriv[0] / m_componentMolarWeight[m_waterIndex] - + dConc_dPres - - dConcDensVol_dPres / m_componentMolarWeight[m_waterIndex]; - dValue[Deriv::dT] = densityDeriv[1] / m_componentMolarWeight[m_waterIndex] - + dConc_dTemp - - dConcDensVol_dTemp / m_componentMolarWeight[m_waterIndex]; - dValue[Deriv::dC+m_CO2Index] = dConc_dComp[m_CO2Index] - - dConcDensVol_dComp[m_CO2Index] / m_componentMolarWeight[m_waterIndex]; - dValue[Deriv::dC+m_waterIndex] = dConc_dComp[m_waterIndex] - - dConcDensVol_dComp[m_waterIndex] / m_componentMolarWeight[m_waterIndex]; + divideByPhaseMolarWeight( phaseComposition, dPhaseComposition, value, dValue ); } } diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/PhillipsBrineViscosity.cpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/PhillipsBrineViscosity.cpp index 8c818d5fe60..0c8c8466256 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/PhillipsBrineViscosity.cpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/PhillipsBrineViscosity.cpp @@ -35,12 +35,15 @@ namespace PVTProps PhillipsBrineViscosity::PhillipsBrineViscosity( string const & name, string_array const & inputPara, string_array const & componentNames, - array1d< real64 > const & componentMolarWeight ): + array1d< real64 > const & componentMolarWeight, + bool const printTable ): PVTFunctionBase( name, componentNames, componentMolarWeight ) { m_waterViscosityTable = PureWaterProperties::makeSaturationViscosityTable( m_functionName, FunctionManager::getInstance() ); + if( printTable ) + m_waterViscosityTable->print( m_waterViscosityTable->getName() ); makeCoefficients( inputPara ); } @@ -88,7 +91,7 @@ PhillipsBrineViscosity::createKernelWrapper() const m_coef1 ); } -REGISTER_CATALOG_ENTRY( PVTFunctionBase, PhillipsBrineViscosity, string const &, string_array const &, string_array const &, array1d< real64 > const & ) +REGISTER_CATALOG_ENTRY( PVTFunctionBase, PhillipsBrineViscosity, string const &, string_array const &, string_array const &, array1d< real64 > const &, bool const ) } // end namespace PVTProps diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/PhillipsBrineViscosity.hpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/PhillipsBrineViscosity.hpp index 90b2440c399..8edc4419d64 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/PhillipsBrineViscosity.hpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/PhillipsBrineViscosity.hpp @@ -82,7 +82,8 @@ class PhillipsBrineViscosity : public PVTFunctionBase PhillipsBrineViscosity( string const & name, string_array const & inputPara, string_array const & componentNames, - array1d< real64 > const & componentMolarWeight ); + array1d< real64 > const & componentMolarWeight, + bool const printTable ); virtual ~PhillipsBrineViscosity() override = default; diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/PureWaterProperties.cpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/PureWaterProperties.cpp index 94d1bda2a57..af9a4f2c6da 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/PureWaterProperties.cpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/PureWaterProperties.cpp @@ -36,9 +36,10 @@ PureWaterProperties::makeSaturationViscosityTable( string const & functionName, array1d< array1d< real64 > > temperatures; array1d< real64 > viscosities; + integer const nValues = 26; temperatures.resize( 1 ); - temperatures[0].resize( 26 ); - viscosities.resize( 26 ); + temperatures[0].resize( nValues ); + viscosities.resize( nValues ); temperatures[0][0] = 0.01; temperatures[0][1] = 10; @@ -116,9 +117,10 @@ PureWaterProperties::makeSaturationDensityTable( string const & functionName, array1d< array1d< real64 > > temperatures; array1d< real64 > densities; + integer const nValues = 26; temperatures.resize( 1 ); - temperatures[0].resize( 26 ); - densities.resize( 26 ); + temperatures[0].resize( nValues ); + densities.resize( nValues ); temperatures[0][0] = 0.01; temperatures[0][1] = 10; @@ -196,9 +198,10 @@ PureWaterProperties::makeSaturationPressureTable( string const & functionName, array1d< array1d< real64 > > temperatures; array1d< real64 > pressures; + integer const nValues = 26; temperatures.resize( 1 ); - temperatures[0].resize( 26 ); - pressures.resize( 26 ); + temperatures[0].resize( nValues ); + pressures.resize( nValues ); temperatures[0][0] = 0.01; temperatures[0][1] = 10; diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/SpanWagnerCO2Density.cpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/SpanWagnerCO2Density.cpp index 3ca6b6aee1f..fff330ed92e 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/SpanWagnerCO2Density.cpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/SpanWagnerCO2Density.cpp @@ -272,7 +272,8 @@ void SpanWagnerCO2Density::calculateCO2Density( string const & functionName, SpanWagnerCO2Density::SpanWagnerCO2Density( string const & name, string_array const & inputParams, string_array const & componentNames, - array1d< real64 > const & componentMolarWeight ): + array1d< real64 > const & componentMolarWeight, + bool const printTable ): PVTFunctionBase( name, componentNames, componentMolarWeight ) @@ -281,6 +282,8 @@ SpanWagnerCO2Density::SpanWagnerCO2Density( string const & name, m_CO2Index = PVTFunctionHelpers::findName( componentNames, expectedCO2ComponentNames, "componentNames" ); m_CO2DensityTable = makeDensityTable( inputParams, m_functionName, FunctionManager::getInstance() ); + if( printTable ) + m_CO2DensityTable->print( m_CO2DensityTable->getName() ); } void SpanWagnerCO2Density::checkTablesParameters( real64 const pressure, @@ -298,7 +301,7 @@ SpanWagnerCO2Density::createKernelWrapper() const m_CO2Index ); } -REGISTER_CATALOG_ENTRY( PVTFunctionBase, SpanWagnerCO2Density, string const &, string_array const &, string_array const &, array1d< real64 > const & ) +REGISTER_CATALOG_ENTRY( PVTFunctionBase, SpanWagnerCO2Density, string const &, string_array const &, string_array const &, array1d< real64 > const &, bool const ) } // namespace PVTProps diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/SpanWagnerCO2Density.hpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/SpanWagnerCO2Density.hpp index d1b2d266fe5..181559cd7bd 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/SpanWagnerCO2Density.hpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/SpanWagnerCO2Density.hpp @@ -79,7 +79,8 @@ class SpanWagnerCO2Density : public PVTFunctionBase SpanWagnerCO2Density( string const &, string_array const & inputParams, string_array const & componentNames, - array1d< real64 > const & componentMolarWeight ); + array1d< real64 > const & componentMolarWeight, + bool const printTable ); static string catalogName() { return "SpanWagnerCO2Density"; } diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/WaterDensity.cpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/WaterDensity.cpp index e222d98b9a7..244bdcd73c5 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/WaterDensity.cpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/WaterDensity.cpp @@ -35,13 +35,16 @@ namespace PVTProps WaterDensity::WaterDensity( string const & name, string_array const & inputParams, string_array const & componentNames, - array1d< real64 > const & componentMolarWeight ): + array1d< real64 > const & componentMolarWeight, + bool const printTable ): PVTFunctionBase( name, componentNames, componentMolarWeight ) { GEOS_UNUSED_VAR( inputParams ); m_waterDensityTable = PureWaterProperties::makeSaturationDensityTable( m_functionName, FunctionManager::getInstance() ); + if( printTable ) + m_waterDensityTable->print( m_waterDensityTable->getName() ); } void WaterDensity::checkTablesParameters( real64 const pressure, @@ -58,7 +61,7 @@ WaterDensity::createKernelWrapper() const *m_waterDensityTable ); } -REGISTER_CATALOG_ENTRY( PVTFunctionBase, WaterDensity, string const &, string_array const &, string_array const &, array1d< real64 > const & ) +REGISTER_CATALOG_ENTRY( PVTFunctionBase, WaterDensity, string const &, string_array const &, string_array const &, array1d< real64 > const &, bool const ) } // namespace PVTProps diff --git a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/WaterDensity.hpp b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/WaterDensity.hpp index 8cc49429498..e1bb09c0276 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/WaterDensity.hpp +++ b/src/coreComponents/constitutive/fluid/multifluid/CO2Brine/functions/WaterDensity.hpp @@ -74,10 +74,10 @@ class WaterDensity : public PVTFunctionBase WaterDensity( string const & name, string_array const & inputParams, string_array const & componentNames, - array1d< real64 > const & componentMolarWeight ); + array1d< real64 > const & componentMolarWeight, + bool const printTable ); static string catalogName() { return "WaterDensity"; } - virtual string getCatalogName() const final { return catalogName(); } /** diff --git a/src/coreComponents/constitutive/fluid/multifluid/MultiFluidConstants.hpp b/src/coreComponents/constitutive/fluid/multifluid/MultiFluidConstants.hpp index b0c6585e41b..4f69f3efa17 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/MultiFluidConstants.hpp +++ b/src/coreComponents/constitutive/fluid/multifluid/MultiFluidConstants.hpp @@ -20,6 +20,7 @@ #define GEOS_CONSTITUTIVE_FLUID_MULTIFLUID_MULTIFLUIDCONSTANTS_HPP_ #include "LvArray/src/Macros.hpp" +#include "common/PhysicsConstants.hpp" namespace geos { @@ -40,16 +41,6 @@ struct MultiFluidConstants */ static constexpr integer MAX_NUM_PHASES = 4; - /** - * @brief Shorthand for pi - */ - static constexpr real64 pi = 3.141592653589793238; - - /** - * @brief Universal gas constant - */ - static constexpr real64 gasConstant = 8.31446261815324; - /** * @brief Epsilon used in the calculations to check against zero */ diff --git a/src/coreComponents/constitutive/fluid/multifluid/PVTDriverRunTest.hpp b/src/coreComponents/constitutive/fluid/multifluid/PVTDriverRunTest.hpp index ee9c5324a25..da522d27913 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/PVTDriverRunTest.hpp +++ b/src/coreComponents/constitutive/fluid/multifluid/PVTDriverRunTest.hpp @@ -64,14 +64,16 @@ void PVTDriver::runTest( FLUID_TYPE & fluid, arrayView2d< real64 > const & table // note: column indexing should be kept consistent with output file header below. integer const numSteps = m_numSteps; + integer const outputCompressibility = m_outputCompressibility; + integer const outputPhaseComposition = m_outputPhaseComposition; using ExecPolicy = typename FLUID_TYPE::exec_policy; forAll< ExecPolicy >( composition.size( 0 ), - [this, numPhases, numComponents, numSteps, kernelWrapper, - table, composition] + [ outputCompressibility, outputPhaseComposition, numPhases, numComponents, numSteps, kernelWrapper, + table, composition] GEOS_HOST_DEVICE ( localIndex const i ) { // Index for start of phase properties - integer const PHASE = m_outputCompressibility != 0 ? TEMP + 3 : TEMP + 2; + integer const PHASE = outputCompressibility != 0 ? TEMP + 3 : TEMP + 2; // Temporary space for phase mole fractions stackArray1d< real64, constitutive::MultiFluidBase::MAX_NUM_COMPONENTS > phaseComposition( numComponents ); @@ -81,7 +83,7 @@ void PVTDriver::runTest( FLUID_TYPE & fluid, arrayView2d< real64 > const & table kernelWrapper.update( i, 0, table( n, PRES ), table( n, TEMP ), composition[i] ); table( n, TEMP + 1 ) = kernelWrapper.totalDensity()( i, 0 ); - if( m_outputCompressibility != 0 ) + if( outputCompressibility != 0 ) { table( n, TEMP + 2 ) = kernelWrapper.totalCompressibility( i, 0 ); } @@ -92,7 +94,7 @@ void PVTDriver::runTest( FLUID_TYPE & fluid, arrayView2d< real64 > const & table table( n, PHASE + p + numPhases ) = kernelWrapper.phaseDensity()( i, 0, p ); table( n, PHASE + p + 2 * numPhases ) = kernelWrapper.phaseViscosity()( i, 0, p ); } - if( m_outputPhaseComposition != 0 ) + if( outputPhaseComposition != 0 ) { for( integer p = 0; p < numPhases; ++p ) { diff --git a/src/coreComponents/constitutive/fluid/multifluid/blackOil/BlackOilFluid.hpp b/src/coreComponents/constitutive/fluid/multifluid/blackOil/BlackOilFluid.hpp index bfd8bfedce4..52716ec7a4e 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/blackOil/BlackOilFluid.hpp +++ b/src/coreComponents/constitutive/fluid/multifluid/blackOil/BlackOilFluid.hpp @@ -340,7 +340,7 @@ BlackOilFluid::KernelWrapper:: real64 phaseMolecularWeight[NP_BO]{}; real64 dPhaseMolecularWeight[NP_BO][NC_BO+2]{}; - // 1. Convert to mass if necessary + // 1. Convert to moles if necessary if( m_useMass ) { @@ -529,8 +529,8 @@ BlackOilFluid::KernelWrapper:: phaseFraction.value[ipWater] = zw; // oil - phaseCompFraction.value[ipOil][icOil] = zo; - phaseCompFraction.value[ipOil][icGas] = zg; + phaseCompFraction.value[ipOil][icOil] = zo / ( 1 - zw ); + phaseCompFraction.value[ipOil][icGas] = zg / ( 1 - zw ); phaseCompFraction.value[ipOil][icWater] = 0.0; // gas @@ -542,8 +542,10 @@ BlackOilFluid::KernelWrapper:: { phaseFraction.derivs[ipOil][Deriv::dC+icWater] = -1.0; phaseFraction.derivs[ipWater][Deriv::dC+icWater] = 1.0; - phaseCompFraction.derivs[ipOil][icOil][Deriv::dC+icOil] = 1.0; - phaseCompFraction.derivs[ipOil][icGas][Deriv::dC+icGas] = 1.0; + phaseCompFraction.derivs[ipOil][icOil][Deriv::dC+icOil] = 1 / ( 1 - zw ); + phaseCompFraction.derivs[ipOil][icOil][Deriv::dC+icWater] = zo / (( 1 - zw )*( 1 - zw )); + phaseCompFraction.derivs[ipOil][icGas][Deriv::dC+icGas] = 1 / ( 1 - zw ); + phaseCompFraction.derivs[ipOil][icGas][Deriv::dC+icWater] = zg / (( 1 - zw )*( 1 - zw )); } } } diff --git a/src/coreComponents/constitutive/fluid/multifluid/compositional/functions/CompositionalProperties.cpp b/src/coreComponents/constitutive/fluid/multifluid/compositional/functions/CompositionalProperties.cpp index 2f9e760574a..165801fa177 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/compositional/functions/CompositionalProperties.cpp +++ b/src/coreComponents/constitutive/fluid/multifluid/compositional/functions/CompositionalProperties.cpp @@ -44,7 +44,7 @@ void CompositionalProperties::computeMolarDensity( integer const numComps, real64 & molarDensity ) { - real64 vEos = MultiFluidConstants::gasConstant * temperature * compressibilityFactor / pressure; + real64 vEos = constants::gasConstant * temperature * compressibilityFactor / pressure; real64 vCorrected = vEos; for( integer ic = 0; ic < numComps; ++ic ) @@ -91,17 +91,17 @@ void CompositionalProperties::computeMolarDensity( integer const numComps, real64 dvCorrected_dx = 0.0; // Pressure derivative - dvCorrected_dx = MultiFluidConstants::gasConstant * temperature * (dCompressibilityFactor_dp - compressibilityFactor / pressure) / pressure; + dvCorrected_dx = constants::gasConstant * temperature * (dCompressibilityFactor_dp - compressibilityFactor / pressure) / pressure; dMolarDensity_dp = -molarDensity * molarDensity * dvCorrected_dx; // Temperature derivative - dvCorrected_dx = MultiFluidConstants::gasConstant * (temperature * dCompressibilityFactor_dt + compressibilityFactor) / pressure; + dvCorrected_dx = constants::gasConstant * (temperature * dCompressibilityFactor_dt + compressibilityFactor) / pressure; dMolarDensity_dt = -molarDensity * molarDensity * dvCorrected_dx; // Composition derivative for( integer ic = 0; ic < numComps; ++ic ) { - dvCorrected_dx = MultiFluidConstants::gasConstant * temperature * dCompressibilityFactor_dz[ic] / pressure + volumeShift[ic]; + dvCorrected_dx = constants::gasConstant * temperature * dCompressibilityFactor_dz[ic] / pressure + volumeShift[ic]; dMolarDensity_dz[ic] = -molarDensity * molarDensity * dvCorrected_dx; } } diff --git a/src/coreComponents/constitutive/fluid/multifluid/compositional/functions/CubicEOSPhaseModel.hpp b/src/coreComponents/constitutive/fluid/multifluid/compositional/functions/CubicEOSPhaseModel.hpp index 2cfbaae1dac..8adfa65457e 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/compositional/functions/CubicEOSPhaseModel.hpp +++ b/src/coreComponents/constitutive/fluid/multifluid/compositional/functions/CubicEOSPhaseModel.hpp @@ -627,8 +627,8 @@ solveCubicPolynomial( real64 const & m3, real64 const theta = acos( r / sqrt( qCubed ) ); real64 const qSqrt = sqrt( q ); roots[0] = -2 * qSqrt * cos( theta / 3 ) - a1 / 3; - roots[1] = -2 * qSqrt * cos( ( theta + 2 * MultiFluidConstants::pi ) / 3 ) - a1 / 3; - roots[2] = -2 * qSqrt * cos( ( theta + 4 * MultiFluidConstants::pi ) / 3 ) - a1 / 3; + roots[1] = -2 * qSqrt * cos( ( theta + 2 * constants::pi ) / 3 ) - a1 / 3; + roots[2] = -2 * qSqrt * cos( ( theta + 4 * constants::pi ) / 3 ) - a1 / 3; numRoots = 3; } // one real root diff --git a/src/coreComponents/constitutive/fluid/multifluid/compositional/functions/RachfordRice.hpp b/src/coreComponents/constitutive/fluid/multifluid/compositional/functions/RachfordRice.hpp index ed60c11f8c7..af7c05bb428 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/compositional/functions/RachfordRice.hpp +++ b/src/coreComponents/constitutive/fluid/multifluid/compositional/functions/RachfordRice.hpp @@ -98,48 +98,37 @@ struct RachfordRice real64 currentError = 1 / epsilon; // step 2: start the SSI loop - real64 funcXMin = 0.0; + // Evaluate at the bounds + real64 funcXMin = evaluate( kValues, feed, presentComponentIds, xMin ); + real64 funcXMax = evaluate( kValues, feed, presentComponentIds, xMax ); real64 funcXMid = 0.0; - real64 funcXMax = 0.0; - bool recomputeMin = true; - bool recomputeMax = true; + + // If the bound values are the same sign then we have a trivial solution + if( 0.0 < funcXMin * funcXMax ) + { + gasPhaseMoleFraction = (0.0 < funcXMin) ? 1.0 : 0.0; + return gasPhaseMoleFraction; + } + integer SSIIteration = 0; while( ( currentError > SSITolerance ) && ( SSIIteration < maxSSIIterations ) ) { real64 const xMid = 0.5 * ( xMin + xMax ); - if( recomputeMin ) - { - funcXMin = evaluate( kValues, feed, presentComponentIds, xMin ); - } - if( recomputeMax ) - { - funcXMax = evaluate( kValues, feed, presentComponentIds, xMax ); - } funcXMid = evaluate( kValues, feed, presentComponentIds, xMid ); - if( ( funcXMin < 0 ) && ( funcXMax < 0 ) ) - { - return gasPhaseMoleFraction = 0.0; - } - else if( ( funcXMin > 1 ) && ( funcXMax > 1 ) ) - { - return gasPhaseMoleFraction = 1.0; - } - else if( funcXMin * funcXMid < 0.0 ) + if( 0.0 < funcXMax * funcXMid ) { xMax = xMid; - recomputeMax = true; - recomputeMin = false; + funcXMax = funcXMid; } - else if( funcXMax * funcXMid < 0.0 ) + else if( 0.0 < funcXMin * funcXMid ) { xMin = xMid; - recomputeMax = false; - recomputeMin = true; + funcXMin = funcXMid; } - currentError = LvArray::math::min( LvArray::math::abs( funcXMax - funcXMin ), + currentError = LvArray::math::min( LvArray::math::abs( funcXMid ), LvArray::math::abs( xMax - xMin ) ); SSIIteration++; @@ -151,26 +140,28 @@ struct RachfordRice // step 3: start the Newton loop integer newtonIteration = 0; real64 newtonValue = gasPhaseMoleFraction; + real64 funcNewton = evaluate( kValues, feed, presentComponentIds, newtonValue ); while( ( currentError > newtonTolerance ) && ( newtonIteration < maxNewtonIterations ) ) { - real64 const deltaNewton = -evaluate( kValues, feed, presentComponentIds, newtonValue ) - / evaluateDerivative( kValues, feed, presentComponentIds, newtonValue ); - currentError = LvArray::math::abs( deltaNewton ) / LvArray::math::abs( newtonValue ); + real64 deltaNewton = -funcNewton / evaluateDerivative( kValues, feed, presentComponentIds, newtonValue ); // test if we are stepping out of the [xMin;xMax] interval if( newtonValue + deltaNewton < xMin ) { - newtonValue = 0.5 * ( newtonValue + xMin ); + deltaNewton = 0.5 * ( xMin - newtonValue ); } else if( newtonValue + deltaNewton > xMax ) { - newtonValue = 0.5 * ( newtonValue + xMax ); - } - else - { - newtonValue = newtonValue + deltaNewton; + deltaNewton = 0.5 * ( xMax - newtonValue ); } + + newtonValue = newtonValue + deltaNewton; + + funcNewton = evaluate( kValues, feed, presentComponentIds, newtonValue ); + + currentError = LvArray::math::min( LvArray::math::abs( funcNewton ), + LvArray::math::abs( deltaNewton ) ); newtonIteration++; // TODO: add warning if max number of Newton iterations is reached diff --git a/src/coreComponents/constitutive/fluid/multifluid/compositional/models/ComponentProperties.hpp b/src/coreComponents/constitutive/fluid/multifluid/compositional/models/ComponentProperties.hpp index 903eba1c28a..2dec4ae3c57 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/compositional/models/ComponentProperties.hpp +++ b/src/coreComponents/constitutive/fluid/multifluid/compositional/models/ComponentProperties.hpp @@ -62,7 +62,7 @@ class ComponentProperties final * @brief Get the number of components * @return The number of components */ - integer getNumberOfComponents() const { return m_componentNames.size( 0 ); } + integer getNumberOfComponents() const { return m_componentNames.size(); } struct KernelWrapper { diff --git a/src/coreComponents/constitutive/fluid/multifluid/compositional/models/FunctionBase.hpp b/src/coreComponents/constitutive/fluid/multifluid/compositional/models/FunctionBase.hpp index 81dbb177da7..3ccb6c97f8a 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/compositional/models/FunctionBase.hpp +++ b/src/coreComponents/constitutive/fluid/multifluid/compositional/models/FunctionBase.hpp @@ -45,8 +45,8 @@ enum class FunctionType : integer class FunctionBaseUpdate { public: - - FunctionBaseUpdate() = default; + GEOS_HOST_DEVICE FunctionBaseUpdate(){} + GEOS_HOST_DEVICE FunctionBaseUpdate( FunctionBaseUpdate const & ){} /** * @brief Move the KernelWrapper to the given execution space, optionally touching it. diff --git a/src/coreComponents/constitutive/fluid/multifluid/reactive/ReactiveBrineFluid.cpp b/src/coreComponents/constitutive/fluid/multifluid/reactive/ReactiveBrineFluid.cpp index 132834e5cef..f118a6a6819 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/reactive/ReactiveBrineFluid.cpp +++ b/src/coreComponents/constitutive/fluid/multifluid/reactive/ReactiveBrineFluid.cpp @@ -193,7 +193,8 @@ void ReactiveBrineFluid< PHASE > ::createPVTModels() InputError ); // then, we are ready to instantiate the phase models - m_phase = std::make_unique< PHASE >( getName() + "_phaseModel1", phase1InputParams, m_componentNames, m_componentMolarWeight ); + m_phase = std::make_unique< PHASE >( getName() + "_phaseModel1", phase1InputParams, m_componentNames, m_componentMolarWeight, + getLogLevel() > 0 && logger::internal::rank==0 ); } template< typename PHASE > diff --git a/src/coreComponents/constitutive/fluid/multifluid/reactive/chemicalReactions/KineticReactions.hpp b/src/coreComponents/constitutive/fluid/multifluid/reactive/chemicalReactions/KineticReactions.hpp index 7cd552f12a6..9b470dff5c0 100644 --- a/src/coreComponents/constitutive/fluid/multifluid/reactive/chemicalReactions/KineticReactions.hpp +++ b/src/coreComponents/constitutive/fluid/multifluid/reactive/chemicalReactions/KineticReactions.hpp @@ -22,7 +22,7 @@ #include "ReactionsBase.hpp" #include "constitutive/fluid/multifluid/Layouts.hpp" -#include "constitutive/fluid/multifluid/MultiFluidConstants.hpp" +#include "common/PhysicsConstants.hpp" namespace geos { @@ -43,7 +43,7 @@ class KineticReactions : public ReactionsBase { public: - static constexpr real64 RConst = MultiFluidConstants::gasConstant; + static constexpr real64 RConst = constants::gasConstant; KernelWrapper( integer const numPrimarySpecies, integer const numSecondarySpecies, diff --git a/src/coreComponents/constitutive/fluid/singlefluid/ThermalCompressibleSinglePhaseFluid.cpp b/src/coreComponents/constitutive/fluid/singlefluid/ThermalCompressibleSinglePhaseFluid.cpp index ed7a92429e2..96ffa8bf5f7 100644 --- a/src/coreComponents/constitutive/fluid/singlefluid/ThermalCompressibleSinglePhaseFluid.cpp +++ b/src/coreComponents/constitutive/fluid/singlefluid/ThermalCompressibleSinglePhaseFluid.cpp @@ -39,10 +39,10 @@ ThermalCompressibleSinglePhaseFluid::ThermalCompressibleSinglePhaseFluid( string setInputFlag( InputFlags::OPTIONAL ). setDescription( "Fluid thermal expansion coefficient. Unit: 1/K" ); - registerWrapper( viewKeyStruct::volumetricHeatCapacityString(), &m_volumetricHeatCapacity ). + registerWrapper( viewKeyStruct::specificHeatCapacityString(), &m_specificHeatCapacity ). setApplyDefaultValue( 0.0 ). setInputFlag( InputFlags::OPTIONAL ). - setDescription( "Fluid volumetric heat capacity. Unit: J/kg/K" ); + setDescription( "Fluid heat capacity. Unit: J/kg/K" ); registerWrapper( viewKeyStruct::referenceTemperatureString(), &m_referenceTemperature ). setApplyDefaultValue( 0.0 ). @@ -83,7 +83,7 @@ void ThermalCompressibleSinglePhaseFluid::postProcessInput() }; checkNonnegative( m_thermalExpansionCoeff, viewKeyStruct::thermalExpansionCoeffString() ); - checkNonnegative( m_volumetricHeatCapacity, viewKeyStruct::volumetricHeatCapacityString() ); + checkNonnegative( m_specificHeatCapacity, viewKeyStruct::specificHeatCapacityString() ); checkNonnegative( m_referenceInternalEnergy, viewKeyStruct::referenceInternalEnergyString() ); // Due to the way update wrapper is currently implemented, we can only support one model type @@ -101,7 +101,7 @@ ThermalCompressibleSinglePhaseFluid::createKernelWrapper() { return KernelWrapper( KernelWrapper::DensRelationType( m_referencePressure, m_referenceTemperature, m_referenceDensity, m_compressibility, -m_thermalExpansionCoeff ), KernelWrapper::ViscRelationType( m_referencePressure, m_referenceViscosity, m_viscosibility ), - KernelWrapper::IntEnergyRelationType( m_referenceTemperature, m_referenceInternalEnergy, m_volumetricHeatCapacity/m_referenceInternalEnergy ), + KernelWrapper::IntEnergyRelationType( m_referenceTemperature, m_referenceInternalEnergy, m_specificHeatCapacity/m_referenceInternalEnergy ), m_density, m_dDensity_dPressure, m_dDensity_dTemperature, diff --git a/src/coreComponents/constitutive/fluid/singlefluid/ThermalCompressibleSinglePhaseFluid.hpp b/src/coreComponents/constitutive/fluid/singlefluid/ThermalCompressibleSinglePhaseFluid.hpp index f3940c72efb..e20130bf9fb 100644 --- a/src/coreComponents/constitutive/fluid/singlefluid/ThermalCompressibleSinglePhaseFluid.hpp +++ b/src/coreComponents/constitutive/fluid/singlefluid/ThermalCompressibleSinglePhaseFluid.hpp @@ -240,7 +240,7 @@ class ThermalCompressibleSinglePhaseFluid : public CompressibleSinglePhaseFluid struct viewKeyStruct : public CompressibleSinglePhaseFluid::viewKeyStruct { static constexpr char const * thermalExpansionCoeffString() { return "thermalExpansionCoeff"; } - static constexpr char const * volumetricHeatCapacityString() { return "volumetricHeatCapacity"; } + static constexpr char const * specificHeatCapacityString() { return "specificHeatCapacity"; } static constexpr char const * referenceTemperatureString() { return "referenceTemperature"; } static constexpr char const * referenceInternalEnergyString() { return "referenceInternalEnergy"; } static constexpr char const * internalEnergyModelTypeString() { return "internalEnergyModelType"; } @@ -256,7 +256,7 @@ class ThermalCompressibleSinglePhaseFluid : public CompressibleSinglePhaseFluid real64 m_thermalExpansionCoeff; /// scalar fluid volumetric heat capacity coefficient - real64 m_volumetricHeatCapacity; + real64 m_specificHeatCapacity; /// reference temperature parameter real64 m_referenceTemperature; diff --git a/src/coreComponents/constitutive/solid/CoupledSolidBase.hpp b/src/coreComponents/constitutive/solid/CoupledSolidBase.hpp index 28ef6d3be21..35306907a4e 100644 --- a/src/coreComponents/constitutive/solid/CoupledSolidBase.hpp +++ b/src/coreComponents/constitutive/solid/CoupledSolidBase.hpp @@ -180,21 +180,22 @@ class CoupledSolidBase : public ConstitutiveBase } /** - * @brief Const/non-mutable accessor for the mean stress increment at the previous sequential iteration + * @brief Const/non-mutable accessor for the mean total stress increment + * (with respect to the previous converged time level) at the previous sequential iteration * @return Accessor */ - arrayView2d< real64 const > const getMeanEffectiveStressIncrement_k() const + arrayView2d< real64 const > const getMeanTotalStressIncrement_k() const { - return getBasePorosityModel().getMeanEffectiveStressIncrement_k(); + return getBasePorosityModel().getMeanTotalStressIncrement_k(); } /** - * @brief Non-const accessor for the mean stress increment at the previous sequential iteration + * @brief Non-const accessor for the mean total stress increment at the previous sequential iteration * @return Accessor */ - arrayView1d< real64 > const getAverageMeanEffectiveStressIncrement_k() + arrayView1d< real64 > const getAverageMeanTotalStressIncrement_k() { - return getBasePorosityModel().getAverageMeanEffectiveStressIncrement_k(); + return getBasePorosityModel().getAverageMeanTotalStressIncrement_k(); } @@ -232,6 +233,20 @@ class CoupledSolidBase : public ConstitutiveBase SolidInternalEnergy const & getSolidInternalEnergyModel() const { return this->getParent().template getGroup< SolidInternalEnergy >( m_solidInternalEnergyModelName ); } + /** + * @brief get a PorosityBase constant reference to the porosity model + * return a constant PorosityBase reference to the porosity model + */ + PorosityBase const & getBasePorosityModel() const + { return this->getParent().template getGroup< PorosityBase >( m_porosityModelName ); } + + /** + * @brief get a PorosityBase reference to the porosity model + * return a PorosityBase reference to the porosity model + */ + PorosityBase & getBasePorosityModel() + { return this->getParent().template getGroup< PorosityBase >( m_porosityModelName ); } + protected: /// the name of the solid model @@ -248,21 +263,6 @@ class CoupledSolidBase : public ConstitutiveBase private: - /** - * @brief get a PorosityBase constant reference to the porosity model - * return a constant PorosityBase reference to the porosity model - */ - PorosityBase const & getBasePorosityModel() const - { return this->getParent().template getGroup< PorosityBase >( m_porosityModelName ); } - - /** - * @brief get a PorosityBase reference to the porosity model - * return a PorosityBase reference to the porosity model - */ - PorosityBase & getBasePorosityModel() - { return this->getParent().template getGroup< PorosityBase >( m_porosityModelName ); } - - /** * @brief get a Permeability base constant reference to the permeability model * return a constant PermeabilityBase reference to the permeability model diff --git a/src/coreComponents/constitutive/solid/ElasticOrthotropic.hpp b/src/coreComponents/constitutive/solid/ElasticOrthotropic.hpp index 42a42d545a5..56731703729 100644 --- a/src/coreComponents/constitutive/solid/ElasticOrthotropic.hpp +++ b/src/coreComponents/constitutive/solid/ElasticOrthotropic.hpp @@ -158,7 +158,7 @@ class ElasticOrthotropicUpdates : public SolidBaseUpdates GEOS_HOST_DEVICE virtual real64 getShearModulus( localIndex const k ) const override final { - return std::max( std::max( m_c44[k], m_c55[k] ), m_c66[k] ); + return LvArray::math::max( LvArray::math::max( m_c44[k], m_c55[k] ), m_c66[k] ); } private: diff --git a/src/coreComponents/constitutive/solid/ElasticTransverseIsotropic.hpp b/src/coreComponents/constitutive/solid/ElasticTransverseIsotropic.hpp index 3e361f892d1..de8833cbcb0 100644 --- a/src/coreComponents/constitutive/solid/ElasticTransverseIsotropic.hpp +++ b/src/coreComponents/constitutive/solid/ElasticTransverseIsotropic.hpp @@ -150,7 +150,7 @@ class ElasticTransverseIsotropicUpdates : public SolidBaseUpdates GEOS_HOST_DEVICE virtual real64 getShearModulus( localIndex const k ) const override final { - return std::max( m_c44[k], m_c66[k] ); + return LvArray::math::max( m_c44[k], m_c66[k] ); } diff --git a/src/coreComponents/constitutive/solid/PorousSolid.cpp b/src/coreComponents/constitutive/solid/PorousSolid.cpp index b4c8627a196..ed9cadb7bca 100644 --- a/src/coreComponents/constitutive/solid/PorousSolid.cpp +++ b/src/coreComponents/constitutive/solid/PorousSolid.cpp @@ -56,9 +56,9 @@ typedef PorousSolid< DruckerPragerExtended > PorousDruckerPragerExtended; typedef PorousSolid< Damage< ElasticIsotropic > > PorousDamageElasticIsotropic; typedef PorousSolid< DamageSpectral< ElasticIsotropic > > PorousDamageSpectralElasticIsotropic; typedef PorousSolid< DamageVolDev< ElasticIsotropic > > PorousDamageVolDevElasticIsotropic; -//typedef PorousSolid< DuvautLionsSolid< DruckerPrager > > PorousViscoDruckerPrager; -//typedef PorousSolid< DuvautLionsSolid< DruckerPragerExtended > > PorousViscoDruckerPragerExtended; -//typedef PorousSolid< DuvautLionsSolid< ModifiedCamClay > > PorousViscoModifiedCamClay; +typedef PorousSolid< DuvautLionsSolid< DruckerPrager > > PorousViscoDruckerPrager; +typedef PorousSolid< DuvautLionsSolid< DruckerPragerExtended > > PorousViscoDruckerPragerExtended; +typedef PorousSolid< DuvautLionsSolid< ModifiedCamClay > > PorousViscoModifiedCamClay; typedef PorousSolid< ModifiedCamClay > PorousModifiedCamClay; @@ -72,9 +72,9 @@ REGISTER_CATALOG_ENTRY( ConstitutiveBase, PorousDamageElasticIsotropic, string c REGISTER_CATALOG_ENTRY( ConstitutiveBase, PorousDamageSpectralElasticIsotropic, string const &, Group * const ) REGISTER_CATALOG_ENTRY( ConstitutiveBase, PorousDamageVolDevElasticIsotropic, string const &, Group * const ) REGISTER_CATALOG_ENTRY( ConstitutiveBase, PorousModifiedCamClay, string const &, Group * const ) -//REGISTER_CATALOG_ENTRY( ConstitutiveBase, PorousViscoDruckerPrager, string const &, Group * const ) -//REGISTER_CATALOG_ENTRY( ConstitutiveBase, PorousViscoDruckerPragerExtended, string const &, Group * const ) -//REGISTER_CATALOG_ENTRY( ConstitutiveBase, PorousViscoModifiedCamClay, string const &, Group * const ) +REGISTER_CATALOG_ENTRY( ConstitutiveBase, PorousViscoDruckerPrager, string const &, Group * const ) +REGISTER_CATALOG_ENTRY( ConstitutiveBase, PorousViscoDruckerPragerExtended, string const &, Group * const ) +REGISTER_CATALOG_ENTRY( ConstitutiveBase, PorousViscoModifiedCamClay, string const &, Group * const ) } diff --git a/src/coreComponents/constitutive/solid/PorousSolid.hpp b/src/coreComponents/constitutive/solid/PorousSolid.hpp index 3c035cc49f4..67a16bef411 100644 --- a/src/coreComponents/constitutive/solid/PorousSolid.hpp +++ b/src/coreComponents/constitutive/solid/PorousSolid.hpp @@ -147,11 +147,15 @@ class PorousSolidUpdates : public CoupledSolidUpdates< SOLID_TYPE, BiotPorosity, dTotalStress_dTemperature, // To pass something here stiffness ); - // Compute effective stress increment for the porosity + // Compute total stress increment for the porosity update GEOS_UNUSED_VAR( pressure_n, temperature_n ); real64 const bulkModulus = m_solidUpdate.getBulkModulus( k ); real64 const meanEffectiveStressIncrement = bulkModulus * ( strainIncrement[0] + strainIncrement[1] + strainIncrement[2] ); - m_porosityUpdate.updateMeanEffectiveStressIncrement( k, q, meanEffectiveStressIncrement ); + real64 const biotCoefficient = m_porosityUpdate.getBiotCoefficient( k ); + real64 const thermalExpansionCoefficient = m_solidUpdate.getThermalExpansionCoefficient( k ); + real64 const meanTotalStressIncrement = meanEffectiveStressIncrement - biotCoefficient * ( pressure - pressure_n ) + - 3 * thermalExpansionCoefficient * bulkModulus * ( temperature - temperature_n ); + m_porosityUpdate.updateMeanTotalStressIncrement( k, q, meanTotalStressIncrement ); } /** @@ -333,7 +337,7 @@ class PorousSolid : public CoupledSolid< SOLID_TYPE, BiotPorosity, ConstantPerme * @brief Catalog name * @return Static catalog string */ - static string catalogName() { return string( "Porous" ) + SOLID_TYPE::m_catalogNameString; } + static string catalogName() { return string( "Porous" ) + SOLID_TYPE::catalogName(); } /** * @brief Get catalog name diff --git a/src/coreComponents/constitutive/solid/SolidBase.hpp b/src/coreComponents/constitutive/solid/SolidBase.hpp index fb91c53c330..e91a3323242 100644 --- a/src/coreComponents/constitutive/solid/SolidBase.hpp +++ b/src/coreComponents/constitutive/solid/SolidBase.hpp @@ -557,11 +557,13 @@ class SolidBase : public constitutive::ConstitutiveBase static constexpr char const * defaultDensityString() { return "defaultDensity"; } ///< Default density key static constexpr char const * thermalExpansionCoefficientString() { return "thermalExpansionCoefficient"; } // Thermal expansion // coefficient key - static constexpr char const * defaultThermalExpansionCoefficientString() { return "defaultThermalExpansionCoefficient"; } // Default - // thermal - // expansion - // coefficient - // key + static constexpr char const * defaultThermalExpansionCoefficientString() { return "defaultDrainedLinearTEC"; } // Default + // drained + // linear + // thermal + // expansion + // coefficient + // key }; /** diff --git a/src/coreComponents/constitutive/solid/porosity/BiotPorosity.cpp b/src/coreComponents/constitutive/solid/porosity/BiotPorosity.cpp index 1575a666a92..1494de982b9 100644 --- a/src/coreComponents/constitutive/solid/porosity/BiotPorosity.cpp +++ b/src/coreComponents/constitutive/solid/porosity/BiotPorosity.cpp @@ -46,9 +46,9 @@ BiotPorosity::BiotPorosity( string const & name, Group * const parent ): registerField( fields::porosity::thermalExpansionCoefficient{}, &m_thermalExpansionCoefficient ); - registerField( fields::porosity::meanEffectiveStressIncrement_k{}, &m_meanEffectiveStressIncrement_k ); + registerField( fields::porosity::meanTotalStressIncrement_k{}, &m_meanTotalStressIncrement_k ); - registerField( fields::porosity::averageMeanEffectiveStressIncrement_k{}, &m_averageMeanEffectiveStressIncrement_k ); + registerField( fields::porosity::averageMeanTotalStressIncrement_k{}, &m_averageMeanTotalStressIncrement_k ); registerWrapper( viewKeyStruct::solidBulkModulusString(), &m_bulkModulus ). setApplyDefaultValue( 1e-6 ). @@ -60,7 +60,7 @@ void BiotPorosity::allocateConstitutiveData( dataRepository::Group & parent, { PorosityBase::allocateConstitutiveData( parent, numConstitutivePointsPerParentIndex ); - m_meanEffectiveStressIncrement_k.resize( 0, numConstitutivePointsPerParentIndex ); + m_meanTotalStressIncrement_k.resize( 0, numConstitutivePointsPerParentIndex ); } void BiotPorosity::postProcessInput() @@ -95,15 +95,15 @@ void BiotPorosity::initializeState() const void BiotPorosity::saveConvergedState() const { PorosityBase::saveConvergedState(); - m_meanEffectiveStressIncrement_k.zero(); - m_averageMeanEffectiveStressIncrement_k.zero(); + m_meanTotalStressIncrement_k.zero(); + m_averageMeanTotalStressIncrement_k.zero(); } void BiotPorosity::ignoreConvergedState() const { PorosityBase::ignoreConvergedState(); - m_meanEffectiveStressIncrement_k.zero(); - m_averageMeanEffectiveStressIncrement_k.zero(); + m_meanTotalStressIncrement_k.zero(); + m_averageMeanTotalStressIncrement_k.zero(); } diff --git a/src/coreComponents/constitutive/solid/porosity/BiotPorosity.hpp b/src/coreComponents/constitutive/solid/porosity/BiotPorosity.hpp index 428cad81576..918efea6b67 100644 --- a/src/coreComponents/constitutive/solid/porosity/BiotPorosity.hpp +++ b/src/coreComponents/constitutive/solid/porosity/BiotPorosity.hpp @@ -52,8 +52,8 @@ class BiotPorosityUpdates : public PorosityBaseUpdates arrayView1d< real64 > const & referencePorosity, arrayView1d< real64 > const & biotCoefficient, arrayView1d< real64 > const & thermalExpansionCoefficient, - arrayView2d< real64 > const & meanEffectiveStressIncrement_k, - arrayView1d< real64 const > const & averageMeanEffectiveStressIncrement_k, + arrayView2d< real64 > const & meanTotalStressIncrement_k, + arrayView1d< real64 > const & averageMeanTotalStressIncrement_k, arrayView1d< real64 > const & bulkModulus, real64 const & grainBulkModulus ): PorosityBaseUpdates( newPorosity, porosity_n, @@ -65,8 +65,8 @@ class BiotPorosityUpdates : public PorosityBaseUpdates m_thermalExpansionCoefficient( thermalExpansionCoefficient ), m_biotCoefficient( biotCoefficient ), m_bulkModulus( bulkModulus ), - m_meanEffectiveStressIncrement_k( meanEffectiveStressIncrement_k ), - m_averageMeanEffectiveStressIncrement_k( averageMeanEffectiveStressIncrement_k ) + m_meanTotalStressIncrement_k( meanTotalStressIncrement_k ), + m_averageMeanTotalStressIncrement_k( averageMeanTotalStressIncrement_k ) {} GEOS_HOST_DEVICE @@ -105,9 +105,7 @@ class BiotPorosityUpdates : public PorosityBaseUpdates GEOS_HOST_DEVICE void computePorosity( real64 const & deltaPressureFromBeginningOfTimeStep, - real64 const & deltaPressureFromLastIteration, real64 const & deltaTemperatureFromBeginningOfTimeStep, - real64 const & deltaTemperatureFromLastIteration, real64 const & porosity_n, real64 const & referencePorosity, real64 & porosity, @@ -115,7 +113,7 @@ class BiotPorosityUpdates : public PorosityBaseUpdates real64 & dPorosity_dTemperature, real64 const & biotCoefficient, real64 const & thermalExpansionCoefficient, - real64 const & meanEffectiveStressIncrement_k, + real64 const & averageMeanTotalStressIncrement_k, real64 const & bulkModulus ) const { real64 const biotSkeletonModulusInverse = (biotCoefficient - referencePorosity) / m_grainBulkModulus; @@ -123,17 +121,18 @@ class BiotPorosityUpdates : public PorosityBaseUpdates real64 const fixedStressPressureCoefficient = biotCoefficient * biotCoefficient / bulkModulus; real64 const fixedStressTemperatureCoefficient = 3 * biotCoefficient * thermalExpansionCoefficient; + // total stress formulation for porosity update porosity = porosity_n - + biotCoefficient * meanEffectiveStressIncrement_k / bulkModulus // change due to stress increment (at the previous - // sequential iteration) + + biotCoefficient * averageMeanTotalStressIncrement_k / bulkModulus // change due to stress increment (at the previous + // sequential iteration) + biotSkeletonModulusInverse * deltaPressureFromBeginningOfTimeStep // change due to pressure increment - porosityThermalExpansion * deltaTemperatureFromBeginningOfTimeStep; // change due to temperature increment dPorosity_dPressure = biotSkeletonModulusInverse; dPorosity_dTemperature = -porosityThermalExpansion; // Fixed-stress part - porosity += fixedStressPressureCoefficient * deltaPressureFromLastIteration // fixed-stress pressure term - + fixedStressTemperatureCoefficient * deltaTemperatureFromLastIteration; // fixed-stress temperature term + porosity += fixedStressPressureCoefficient * deltaPressureFromBeginningOfTimeStep // fixed-stress pressure term + + fixedStressTemperatureCoefficient * deltaTemperatureFromBeginningOfTimeStep; // fixed-stress temperature term dPorosity_dPressure += fixedStressPressureCoefficient; dPorosity_dTemperature += fixedStressTemperatureCoefficient; } @@ -142,21 +141,17 @@ class BiotPorosityUpdates : public PorosityBaseUpdates virtual void updateFromPressureAndTemperature( localIndex const k, localIndex const q, real64 const & pressure, // current - real64 const & pressure_k, // last iteration (for sequential) + real64 const & GEOS_UNUSED_PARAM( pressure_k ), // last iteration (for sequential) real64 const & pressure_n, // last time step real64 const & temperature, - real64 const & temperature_k, + real64 const & GEOS_UNUSED_PARAM( temperature_k ), real64 const & temperature_n ) const override final { real64 const deltaPressureFromBeginningOfTimeStep = pressure - pressure_n; - real64 const deltaPressureFromLastIteration = pressure - pressure_k; real64 const deltaTemperatureFromBeginningOfTimeStep = temperature - temperature_n; - real64 const deltaTemperatureFromLastIteration = temperature - temperature_k; computePorosity( deltaPressureFromBeginningOfTimeStep, - deltaPressureFromLastIteration, deltaTemperatureFromBeginningOfTimeStep, - deltaTemperatureFromLastIteration, m_porosity_n[k][q], m_referencePorosity[k], m_newPorosity[k][q], @@ -164,7 +159,7 @@ class BiotPorosityUpdates : public PorosityBaseUpdates m_dPorosity_dTemperature[k][q], m_biotCoefficient[k], m_thermalExpansionCoefficient[k], - m_averageMeanEffectiveStressIncrement_k[k], + m_averageMeanTotalStressIncrement_k[k], m_bulkModulus[k] ); } @@ -177,11 +172,11 @@ class BiotPorosityUpdates : public PorosityBaseUpdates } GEOS_HOST_DEVICE - void updateMeanEffectiveStressIncrement( localIndex const k, - localIndex const q, - real64 const & meanEffectiveStressIncrement ) const + void updateMeanTotalStressIncrement( localIndex const k, + localIndex const q, + real64 const & meanTotalStressIncrement ) const { - m_meanEffectiveStressIncrement_k[k][q] = meanEffectiveStressIncrement; + m_meanTotalStressIncrement_k[k][q] = meanTotalStressIncrement; } protected: @@ -198,11 +193,11 @@ class BiotPorosityUpdates : public PorosityBaseUpdates /// View on the bulk modulus (updated by PorousSolid) arrayView1d< real64 > const m_bulkModulus; - /// View on the mean stress increment at quadrature points (updated by PorousSolid) - arrayView2d< real64 > const m_meanEffectiveStressIncrement_k; + /// View on the mean total stress increment at quadrature points (updated by PorousSolid) + arrayView2d< real64 > const m_meanTotalStressIncrement_k; - /// View on the average mean stress increment - arrayView1d< real64 const > const m_averageMeanEffectiveStressIncrement_k; + /// View on the average mean total stress increment + arrayView1d< real64 > const m_averageMeanTotalStressIncrement_k; }; @@ -222,13 +217,13 @@ class BiotPorosity : public PorosityBase { static constexpr char const *grainBulkModulusString() { return "grainBulkModulus"; } - static constexpr char const *meanEffectiveStressIncrementString() { return "meanEffectiveStressIncrement"; } + static constexpr char const *meanTotalStressIncrementString() { return "meanTotalStressIncrement"; } - static constexpr char const *averageMeanEffectiveStressIncrementString() { return "averageMeanEffectiveStressIncrement"; } + static constexpr char const *averageMeanTotalStressIncrementString() { return "averageMeanTotalStressIncrement"; } static constexpr char const *solidBulkModulusString() { return "solidBulkModulus"; } - static constexpr char const *defaultThermalExpansionCoefficientString() { return "defaultThermalExpansionCoefficient"; } + static constexpr char const *defaultThermalExpansionCoefficientString() { return "defaultPorosityTEC"; } } viewKeys; virtual void initializeState() const override final; @@ -242,14 +237,21 @@ class BiotPorosity : public PorosityBase return m_biotCoefficient.toViewConst(); } - virtual arrayView1d< real64 > const getAverageMeanEffectiveStressIncrement_k() override final + virtual arrayView1d< real64 > const getAverageMeanTotalStressIncrement_k() override final { - return m_averageMeanEffectiveStressIncrement_k.toView(); + return m_averageMeanTotalStressIncrement_k.toView(); } - virtual arrayView2d< real64 const > const getMeanEffectiveStressIncrement_k() const override final + virtual arrayView2d< real64 const > const getMeanTotalStressIncrement_k() const override final { - return m_meanEffectiveStressIncrement_k.toViewConst(); + return m_meanTotalStressIncrement_k.toViewConst(); + } + + GEOS_HOST_DEVICE + void updateAverageMeanTotalStressIncrement( localIndex const k, + real64 const & averageMeanTotalStressIncrement ) const + { + m_averageMeanTotalStressIncrement_k[ k ] = averageMeanTotalStressIncrement; } using KernelWrapper = BiotPorosityUpdates; @@ -268,8 +270,8 @@ class BiotPorosity : public PorosityBase m_referencePorosity, m_biotCoefficient, m_thermalExpansionCoefficient, - m_meanEffectiveStressIncrement_k, - m_averageMeanEffectiveStressIncrement_k, + m_meanTotalStressIncrement_k, + m_averageMeanTotalStressIncrement_k, m_bulkModulus, m_grainBulkModulus ); } @@ -290,11 +292,11 @@ class BiotPorosity : public PorosityBase /// Bulk modulus (updated in the update class, not read in input) array1d< real64 > m_bulkModulus; - /// Mean stress increment (updated in the update class, not read in input) - array2d< real64 > m_meanEffectiveStressIncrement_k; + /// Mean total stress increment (updated in the update class, not read in input) + array2d< real64 > m_meanTotalStressIncrement_k; - /// Average mean stress increment (not read in input) - array1d< real64 > m_averageMeanEffectiveStressIncrement_k; + /// Average mean total stress increment (not read in input) + array1d< real64 > m_averageMeanTotalStressIncrement_k; /// Grain bulk modulus (read from XML) real64 m_grainBulkModulus; diff --git a/src/coreComponents/constitutive/solid/porosity/PorosityBase.hpp b/src/coreComponents/constitutive/solid/porosity/PorosityBase.hpp index 4a8ca5ea528..4485e30c089 100644 --- a/src/coreComponents/constitutive/solid/porosity/PorosityBase.hpp +++ b/src/coreComponents/constitutive/solid/porosity/PorosityBase.hpp @@ -266,29 +266,39 @@ class PorosityBase : public ConstitutiveBase } /** - * @brief Const/non-mutable accessor for the mean stress increment at the previous sequential iteration + * @brief Const/non-mutable accessor for the mean total stress increment at the previous sequential iteration * @return Accessor */ - virtual arrayView2d< real64 const > const getMeanEffectiveStressIncrement_k() const + virtual arrayView2d< real64 const > const getMeanTotalStressIncrement_k() const { - GEOS_ERROR( "getMeanEffectiveStressIncrement_k() not implemented for this model" ); + GEOS_ERROR( "getMeanTotalStressIncrement_k() not implemented for this model" ); array2d< real64 > out; return out.toViewConst(); } /** - * @brief Non-const accessor for the mean stress increment at the previous sequential iteration + * @brief Non-const accessor for the average mean total stress increment at the previous sequential iteration * @return Accessor */ - virtual arrayView1d< real64 > const getAverageMeanEffectiveStressIncrement_k() + virtual arrayView1d< real64 > const getAverageMeanTotalStressIncrement_k() { - GEOS_ERROR( "getAverageMeanEffectiveStressIncrement_k() not implemented for this model" ); + GEOS_ERROR( "getAverageMeanTotalStressIncrement_k() not implemented for this model" ); array1d< real64 > out; return out.toView(); } + GEOS_HOST_DEVICE + inline + void savePorosity( localIndex const k, + localIndex const q, + real64 const & porosity, + real64 const & dPorosity_dPressure ) const + { + m_newPorosity[k][q] = porosity; + m_dPorosity_dPressure[k][q] = dPorosity_dPressure; + } using KernelWrapper = PorosityBaseUpdates; diff --git a/src/coreComponents/constitutive/solid/porosity/PorosityFields.hpp b/src/coreComponents/constitutive/solid/porosity/PorosityFields.hpp index 6bfb2226e56..f36a43b440f 100644 --- a/src/coreComponents/constitutive/solid/porosity/PorosityFields.hpp +++ b/src/coreComponents/constitutive/solid/porosity/PorosityFields.hpp @@ -94,21 +94,21 @@ DECLARE_FIELD( thermalExpansionCoefficient, WRITE_AND_READ, "Thermal expansion coefficient" ); -DECLARE_FIELD( meanEffectiveStressIncrement_k, - "meanEffectiveStressIncrement_k", +DECLARE_FIELD( meanTotalStressIncrement_k, + "meanTotalStressIncrement_k", array2d< real64 >, 0, NOPLOT, NO_WRITE, - "Mean effective stress increment at quadrature points at the previous sequential iteration" ); + "Mean total stress increment at quadrature points at the previous sequential iteration" ); -DECLARE_FIELD( averageMeanEffectiveStressIncrement_k, - "averageMeanEffectiveStressIncrement_k", +DECLARE_FIELD( averageMeanTotalStressIncrement_k, + "averageMeanTotalStressIncrement_k", array1d< real64 >, 0, NOPLOT, NO_WRITE, - "Mean effective stress increment averaged over quadrature points at the previous sequential iteration" ); + "Mean total stress increment averaged over quadrature points at the previous sequential iteration" ); diff --git a/src/coreComponents/constitutive/unitTests/CMakeLists.txt b/src/coreComponents/constitutive/unitTests/CMakeLists.txt index 9c50765fa2c..add6c460a3f 100644 --- a/src/coreComponents/constitutive/unitTests/CMakeLists.txt +++ b/src/coreComponents/constitutive/unitTests/CMakeLists.txt @@ -12,7 +12,7 @@ set( gtest_geosx_tests testCubicEOS.cpp testRachfordRice.cpp ) -set( dependencyList gtest constitutive ${parallelDeps} ) +set( dependencyList gtest blas lapack constitutive ${parallelDeps} ) if( ENABLE_CUDA_NVTOOLSEXT ) list( APPEND dependencyList CUDA::nvToolsExt ) diff --git a/src/coreComponents/constitutive/unitTests/testRachfordRice.cpp b/src/coreComponents/constitutive/unitTests/testRachfordRice.cpp index 8f6014096cf..0b7f3230735 100644 --- a/src/coreComponents/constitutive/unitTests/testRachfordRice.cpp +++ b/src/coreComponents/constitutive/unitTests/testRachfordRice.cpp @@ -110,8 +110,45 @@ TEST( RachfordRiceTest, testRachfordRiceTwoComponents ) presentComponentIds.toSliceConst() ); checkRelativeError( vaporFraction4, expectedVaporFraction4, relTol ); -} + //////////////////////////////////////// + + kValues[0] = 0.9; + kValues[1] = 1.09733; + + feed[0] = 1.0e-10; + feed[1] = 1.0 - feed[0]; + + presentComponentIds[0] = 0; + presentComponentIds[1] = 1; + + real64 const expectedVaporFraction5 = 1; + real64 const vaporFraction5 = + RachfordRice::solve( kValues.toSliceConst(), + feed.toSliceConst(), + presentComponentIds.toSliceConst() ); + + checkRelativeError( vaporFraction5, expectedVaporFraction5, relTol ); + + //////////////////////////////////////// + + kValues[0] = 1.09733; + kValues[1] = 0.9; + + feed[0] = 1.0e-10; + feed[1] = 1.0 - feed[0]; + + presentComponentIds[0] = 0; + presentComponentIds[1] = 1; + + real64 const expectedVaporFraction6 = 0.0; + real64 const vaporFraction6 = + RachfordRice::solve( kValues.toSliceConst(), + feed.toSliceConst(), + presentComponentIds.toSliceConst() ); + + checkRelativeError( vaporFraction6, expectedVaporFraction6, relTol ); +} TEST( RachfordRiceTest, testRachfordRiceFourComponents ) { diff --git a/src/coreComponents/dataRepository/CMakeLists.txt b/src/coreComponents/dataRepository/CMakeLists.txt index bea120432a7..1b882eb42e5 100644 --- a/src/coreComponents/dataRepository/CMakeLists.txt +++ b/src/coreComponents/dataRepository/CMakeLists.txt @@ -59,8 +59,6 @@ blt_add_library( NAME dataRepository target_include_directories( dataRepository PUBLIC ${CMAKE_SOURCE_DIR}/coreComponents ) -geosx_add_code_checks( PREFIX dataRepository ) - if( GEOS_ENABLE_TESTS ) add_subdirectory( unitTests ) diff --git a/src/coreComponents/denseLinearAlgebra/CMakeLists.txt b/src/coreComponents/denseLinearAlgebra/CMakeLists.txt index 9077e6f68d2..3edb50f95d5 100644 --- a/src/coreComponents/denseLinearAlgebra/CMakeLists.txt +++ b/src/coreComponents/denseLinearAlgebra/CMakeLists.txt @@ -22,4 +22,3 @@ if( GEOS_ENABLE_TESTS ) add_subdirectory( unitTests ) endif( ) -geosx_add_code_checks( PREFIX denseLinearAlgebra ) diff --git a/src/coreComponents/discretizationMethods/CMakeLists.txt b/src/coreComponents/discretizationMethods/CMakeLists.txt index 515347cd410..e038df90710 100644 --- a/src/coreComponents/discretizationMethods/CMakeLists.txt +++ b/src/coreComponents/discretizationMethods/CMakeLists.txt @@ -22,4 +22,3 @@ blt_add_library( NAME discretizationMethods target_include_directories( discretizationMethods PUBLIC ${CMAKE_SOURCE_DIR}/coreComponents ) -geosx_add_code_checks( PREFIX discretizationMethods ) diff --git a/src/coreComponents/events/CMakeLists.txt b/src/coreComponents/events/CMakeLists.txt index 87311ad735c..8e87a2f62c6 100644 --- a/src/coreComponents/events/CMakeLists.txt +++ b/src/coreComponents/events/CMakeLists.txt @@ -35,4 +35,3 @@ blt_add_library( NAME events target_include_directories( events PUBLIC ${CMAKE_SOURCE_DIR}/coreComponents ) -geosx_add_code_checks( PREFIX events ) diff --git a/src/coreComponents/events/EventManager.cpp b/src/coreComponents/events/EventManager.cpp index d06353fae8d..3ebb6c2af44 100644 --- a/src/coreComponents/events/EventManager.cpp +++ b/src/coreComponents/events/EventManager.cpp @@ -21,6 +21,7 @@ #include "common/TimingMacros.hpp" #include "events/EventBase.hpp" #include "mesh/mpiCommunications/CommunicationTools.hpp" +#include "common/Units.hpp" namespace geos { @@ -38,6 +39,7 @@ EventManager::EventManager( string const & name, m_dt(), m_cycle(), m_currentSubEvent(), + // TODO: default to TimeOutputFormat::full? m_timeOutputFormat( TimeOutputFormat::seconds ) { setInputFlags( InputFlags::REQUIRED ); @@ -224,39 +226,48 @@ bool EventManager::run( DomainPartition & domain ) void EventManager::outputTime() const { + // The formating here is a work in progress. + GEOS_LOG_RANK_0( GEOS_FMT( "\n" + "------------------- TIMESTEP START -------------------\n" + " - Time: {}\n" + " - Delta Time: {}\n" + " - Cycle: {}\n" + "------------------------------------------------------\n\n", + units::TimeFormatInfo::fromSeconds( m_time ), + units::TimeFormatInfo::fromSeconds( m_dt ), + m_cycle )); + + // We are keeping the old outputs to keep compatibility with current log reading scripts. if( m_timeOutputFormat==TimeOutputFormat::full ) { - integer const yearsOut = m_time / YEAR; - integer const daysOut = (m_time - yearsOut * YEAR) / DAY; - integer const hoursOut = (m_time - yearsOut * YEAR - daysOut * DAY) / HOUR; - integer const minutesOut = (m_time - yearsOut * YEAR - daysOut * DAY - hoursOut * HOUR) / MINUTE; - integer const secondsOut = m_time - yearsOut * YEAR - daysOut * DAY - hoursOut * HOUR - minutesOut * MINUTE; + units::TimeFormatInfo info = units::TimeFormatInfo::fromSeconds( m_time ); - GEOS_LOG_RANK_0( GEOS_FMT( "Time: {} years, {} days, {} hrs, {} min, {} s, dt: {} s, Cycle: {}", yearsOut, daysOut, hoursOut, minutesOut, secondsOut, m_dt, m_cycle ) ); + GEOS_LOG_RANK_0( GEOS_FMT( "Time: {} years, {} days, {} hrs, {} min, {} s, dt: {} s, Cycle: {}\n", + info.m_years, info.m_days, info.m_hours, info.m_minutes, info.m_seconds, m_dt, m_cycle ) ); } else if( m_timeOutputFormat==TimeOutputFormat::years ) { - real64 const yearsOut = m_time / YEAR; - GEOS_LOG_RANK_0( GEOS_FMT( "Time: {:.2f} years, dt: {} s, Cycle: {}", yearsOut, m_dt, m_cycle ) ); + real64 const yearsOut = m_time / units::YearSeconds; + GEOS_LOG_RANK_0( GEOS_FMT( "Time: {:.2f} years, dt: {} s, Cycle: {}\n", yearsOut, m_dt, m_cycle ) ); } else if( m_timeOutputFormat==TimeOutputFormat::days ) { - real64 const daysOut = m_time / DAY; - GEOS_LOG_RANK_0( GEOS_FMT( "Time: {:.2f} days, dt: {} s, Cycle: {}", daysOut, m_dt, m_cycle ) ); + real64 const daysOut = m_time / units::DaySeconds; + GEOS_LOG_RANK_0( GEOS_FMT( "Time: {:.2f} days, dt: {} s, Cycle: {}\n", daysOut, m_dt, m_cycle ) ); } else if( m_timeOutputFormat==TimeOutputFormat::hours ) { - real64 const hoursOut = m_time / HOUR; - GEOS_LOG_RANK_0( GEOS_FMT( "Time: {:.2f} hrs, dt: {} s, Cycle: {}", hoursOut, m_dt, m_cycle ) ); + real64 const hoursOut = m_time / units::HourSeconds; + GEOS_LOG_RANK_0( GEOS_FMT( "Time: {:.2f} hrs, dt: {} s, Cycle: {}\n", hoursOut, m_dt, m_cycle ) ); } else if( m_timeOutputFormat==TimeOutputFormat::minutes ) { - real64 const minutesOut = m_time / MINUTE; - GEOS_LOG_RANK_0( GEOS_FMT( "Time: {:.2f} min, dt: {} s, Cycle: {}", minutesOut, m_dt, m_cycle ) ); + real64 const minutesOut = m_time / units::MinuteSeconds; + GEOS_LOG_RANK_0( GEOS_FMT( "Time: {:.2f} min, dt: {} s, Cycle: {}\n", minutesOut, m_dt, m_cycle ) ); } else if( m_timeOutputFormat == TimeOutputFormat::seconds ) { - GEOS_LOG_RANK_0( GEOS_FMT( "Time: {:4.2e} s, dt: {} s, Cycle: {}", m_time, m_dt, m_cycle ) ); + GEOS_LOG_RANK_0( GEOS_FMT( "Time: {:4.2e} s, dt: {} s, Cycle: {}\n", m_time, m_dt, m_cycle ) ); } else { diff --git a/src/coreComponents/events/EventManager.hpp b/src/coreComponents/events/EventManager.hpp index a573ee15e7a..c839346d19a 100644 --- a/src/coreComponents/events/EventManager.hpp +++ b/src/coreComponents/events/EventManager.hpp @@ -126,15 +126,6 @@ class EventManager : public dataRepository::Group full }; - /// seconds in a minute - static constexpr integer MINUTE = 60; - /// seconds in a hour - static constexpr integer HOUR = 60 * MINUTE; - /// seconds in a day - static constexpr integer DAY = 24 * HOUR; - /// seconds in a year - static constexpr integer YEAR = 365 * DAY; - private: diff --git a/src/coreComponents/fieldSpecification/CMakeLists.txt b/src/coreComponents/fieldSpecification/CMakeLists.txt index a3c2e1d37e0..10ad2e8ec1a 100644 --- a/src/coreComponents/fieldSpecification/CMakeLists.txt +++ b/src/coreComponents/fieldSpecification/CMakeLists.txt @@ -37,4 +37,3 @@ blt_add_library( NAME fieldSpecification target_include_directories( fieldSpecification PUBLIC ${CMAKE_SOURCE_DIR}/coreComponents ) -geosx_add_code_checks( PREFIX fieldSpecification ) diff --git a/src/coreComponents/fileIO/CMakeLists.txt b/src/coreComponents/fileIO/CMakeLists.txt index 2d7fe04730c..c479846cc79 100644 --- a/src/coreComponents/fileIO/CMakeLists.txt +++ b/src/coreComponents/fileIO/CMakeLists.txt @@ -88,5 +88,4 @@ blt_add_library( NAME fileIO target_include_directories( fileIO PUBLIC ${CMAKE_SOURCE_DIR}/coreComponents ) -geosx_add_code_checks(PREFIX fileIO ) diff --git a/src/coreComponents/fileIO/Outputs/VTKOutput.cpp b/src/coreComponents/fileIO/Outputs/VTKOutput.cpp index d6c912d7bac..b9b6256bc1a 100644 --- a/src/coreComponents/fileIO/Outputs/VTKOutput.cpp +++ b/src/coreComponents/fileIO/Outputs/VTKOutput.cpp @@ -36,6 +36,7 @@ VTKOutput::VTKOutput( string const & name, m_plotLevel(), m_onlyPlotSpecifiedFieldNames(), m_fieldNames(), + m_levelNames(), m_writer( getOutputDirectory() + '/' + m_plotFileRoot ) { enableLogLevelInput(); @@ -70,6 +71,10 @@ VTKOutput::VTKOutput( string const & name, setInputFlag( InputFlags::OPTIONAL ). setDescription( "Names of the fields to output. If this attribute is specified, GEOSX outputs all the fields specified by the user, regardless of their `plotLevel`" ); + registerWrapper( viewKeysStruct::levelNames, &m_levelNames ). + setInputFlag( InputFlags::OPTIONAL ). + setDescription( "Names of mesh levels to output." ); + registerWrapper( viewKeysStruct::binaryString, &m_writeBinaryData ). setApplyDefaultValue( m_writeBinaryData ). setInputFlag( InputFlags::OPTIONAL ). @@ -88,6 +93,7 @@ void VTKOutput::postProcessInput() { m_writer.setOutputLocation( getOutputDirectory(), m_plotFileRoot ); m_writer.setFieldNames( m_fieldNames.toViewConst() ); + m_writer.setLevelNames( m_levelNames.toViewConst() ); m_writer.setOnlyPlotSpecifiedFieldNamesFlag( m_onlyPlotSpecifiedFieldNames ); string const fieldNamesString = viewKeysStruct::fieldNames; diff --git a/src/coreComponents/fileIO/Outputs/VTKOutput.hpp b/src/coreComponents/fileIO/Outputs/VTKOutput.hpp index 93abd3e865d..c87b92a0513 100644 --- a/src/coreComponents/fileIO/Outputs/VTKOutput.hpp +++ b/src/coreComponents/fileIO/Outputs/VTKOutput.hpp @@ -94,6 +94,7 @@ class VTKOutput : public OutputBase static constexpr auto outputRegionTypeString = "outputRegionType"; static constexpr auto onlyPlotSpecifiedFieldNames = "onlyPlotSpecifiedFieldNames"; static constexpr auto fieldNames = "fieldNames"; + static constexpr auto levelNames = "levelNames"; } vtkOutputViewKeys; /// @endcond @@ -120,6 +121,9 @@ class VTKOutput : public OutputBase /// array of names of the fields to output array1d< string > m_fieldNames; + /// array of names of the mesh levels to output (an empty array means all levels are saved) + array1d< string > m_levelNames; + /// VTK output mode vtk::VTKOutputMode m_writeBinaryData = vtk::VTKOutputMode::BINARY; diff --git a/src/coreComponents/fileIO/coupling/hdf5_interface b/src/coreComponents/fileIO/coupling/hdf5_interface index 5136554439e..7ee534586a6 160000 --- a/src/coreComponents/fileIO/coupling/hdf5_interface +++ b/src/coreComponents/fileIO/coupling/hdf5_interface @@ -1 +1 @@ -Subproject commit 5136554439e791dc5e948f2a74ede31c4c697ef5 +Subproject commit 7ee534586a6ea995532eb4e3cfc7da4e5ccff64a diff --git a/src/coreComponents/fileIO/doc/InputXMLFiles.rst b/src/coreComponents/fileIO/doc/InputXMLFiles.rst index 0a9267bf70c..fa5087e2cab 100644 --- a/src/coreComponents/fileIO/doc/InputXMLFiles.rst +++ b/src/coreComponents/fileIO/doc/InputXMLFiles.rst @@ -198,7 +198,7 @@ Advanced XML Features ================================= The `geosx_xml_tools` python package adds a set of advanced features to the GEOS xml format: units, parameters, and symbolic expressions. -See :ref:`PythonToolsSetup` for details on setup instructions, and :ref:`XMLToolsPackage` for package API details. +See`Python Tools Setup `_ for details on setup instructions, and `XML Parser Documentation `_ for package API details. Usage diff --git a/src/coreComponents/fileIO/vtk/VTKPolyDataWriterInterface.cpp b/src/coreComponents/fileIO/vtk/VTKPolyDataWriterInterface.cpp index 1016b332851..fce83637687 100644 --- a/src/coreComponents/fileIO/vtk/VTKPolyDataWriterInterface.cpp +++ b/src/coreComponents/fileIO/vtk/VTKPolyDataWriterInterface.cpp @@ -1156,14 +1156,24 @@ void VTKPolyDataWriterInterface::writeVtmFile( integer const cycle, { if( meshLevel.isShallowCopy() ) - { return; + + string const & meshLevelName = meshLevel.getName(); + + if( !m_levelNames.empty()) + { + if( m_levelNames.find( meshLevelName ) == m_levelNames.end()) + return; } + string const & meshBodyName = meshBody.getName(); + ElementRegionManager const & elemManager = meshLevel.getElemManager(); + ParticleManager const & particleManager = meshLevel.getParticleManager(); - string const meshPath = joinPath( getCycleSubFolder( cycle ), meshBody.getName(), meshLevel.getName() ); + string const meshPath = joinPath( getCycleSubFolder( cycle ), meshBodyName, meshLevelName ); + int const mpiSize = MpiWrapper::commSize(); auto addElementRegion = [&]( ElementRegionBase const & region ) @@ -1180,8 +1190,9 @@ void VTKPolyDataWriterInterface::writeVtmFile( integer const cycle, auto addParticleRegion = [&]( ParticleRegionBase const & region ) { - std::vector< string > const blockPath{ meshBody.getName(), meshLevel.getName(), region.getCatalogName(), region.getName() }; - string const regionPath = joinPath( meshPath, region.getName() ); + string const & regionName = region.getName(); + std::vector< string > const blockPath{ meshBodyName, meshLevelName, region.getCatalogName(), regionName }; + string const regionPath = joinPath( meshPath, regionName ); for( int i = 0; i < mpiSize; i++ ) { string const dataSetName = getRankFileName( i ); @@ -1290,15 +1301,20 @@ void VTKPolyDataWriterInterface::write( real64 const time, { if( meshLevel.isShallowCopy() ) - { return; + + string const & meshLevelName = meshLevel.getName(); + + if( !m_levelNames.empty()) + { + if( m_levelNames.find( meshLevelName ) == m_levelNames.end()) + return; } ElementRegionManager const & elemManager = meshLevel.getElemManager(); ParticleManager const & particleManager = meshLevel.getParticleManager(); NodeManager const & nodeManager = meshLevel.getNodeManager(); EmbeddedSurfaceNodeManager const & embSurfNodeManager = meshLevel.getEmbSurfNodeManager(); - string const & meshLevelName = meshLevel.getName(); string const & meshBodyName = meshBody.getName(); if( m_requireFieldRegistrationCheck && !m_fieldNames.empty() ) diff --git a/src/coreComponents/fileIO/vtk/VTKPolyDataWriterInterface.hpp b/src/coreComponents/fileIO/vtk/VTKPolyDataWriterInterface.hpp index 8d1fc54290a..1baa383407c 100644 --- a/src/coreComponents/fileIO/vtk/VTKPolyDataWriterInterface.hpp +++ b/src/coreComponents/fileIO/vtk/VTKPolyDataWriterInterface.hpp @@ -148,6 +148,14 @@ class VTKPolyDataWriterInterface m_fieldNames.insert( fieldNames.begin(), fieldNames.end() ); } + /** + * @brief Set the names of the mesh levels to output + * @param[in] levelNames the mesh levels to output (an empty array means all levels are saved) + */ + void setLevelNames( arrayView1d< string const > const & levelNames ) + { + m_levelNames.insert( levelNames.begin(), levelNames.end() ); + } /** * @brief Main method of this class. Write all the files for one time step. @@ -318,6 +326,9 @@ class VTKPolyDataWriterInterface /// Names of the fields to output std::set< string > m_fieldNames; + /// Names of the mesh levels to output (an empty array means all levels are saved) + std::set< string > m_levelNames; + /// The previousCycle integer m_previousCycle; diff --git a/src/coreComponents/finiteElement/CMakeLists.txt b/src/coreComponents/finiteElement/CMakeLists.txt index da3e81afabb..d8ade51e37e 100644 --- a/src/coreComponents/finiteElement/CMakeLists.txt +++ b/src/coreComponents/finiteElement/CMakeLists.txt @@ -50,7 +50,6 @@ blt_add_library( NAME finiteElement target_include_directories( finiteElement PUBLIC ${CMAKE_SOURCE_DIR}/coreComponents ) -geosx_add_code_checks( PREFIX finiteElement ) if( GEOS_ENABLE_TESTS ) add_subdirectory( unitTests ) diff --git a/src/coreComponents/finiteVolume/CMakeLists.txt b/src/coreComponents/finiteVolume/CMakeLists.txt index e5ba8874456..9fc47c9411b 100644 --- a/src/coreComponents/finiteVolume/CMakeLists.txt +++ b/src/coreComponents/finiteVolume/CMakeLists.txt @@ -51,4 +51,3 @@ blt_add_library( NAME finiteVolume target_include_directories( finiteVolume PUBLIC ${CMAKE_SOURCE_DIR}/coreComponents ) -geosx_add_code_checks( PREFIX finiteVolume ) diff --git a/src/coreComponents/functions/CMakeLists.txt b/src/coreComponents/functions/CMakeLists.txt index 8ad064d8df8..f28de10f61f 100644 --- a/src/coreComponents/functions/CMakeLists.txt +++ b/src/coreComponents/functions/CMakeLists.txt @@ -45,4 +45,3 @@ target_include_directories( functions PUBLIC ${CMAKE_SOURCE_DIR}/coreComponents if( GEOS_ENABLE_TESTS ) add_subdirectory( unitTests ) endif() -geosx_add_code_checks( PREFIX functions ) diff --git a/src/coreComponents/functions/TableFunction.cpp b/src/coreComponents/functions/TableFunction.cpp index 98584bd0c1c..892b2289627 100644 --- a/src/coreComponents/functions/TableFunction.cpp +++ b/src/coreComponents/functions/TableFunction.cpp @@ -180,6 +180,77 @@ void TableFunction::checkCoord( real64 const coord, localIndex const dim ) const SimulationError ); } +void TableFunction::print( std::string const & filename ) const +{ + std::ofstream os( filename + ".csv" ); + + integer const numDimensions = LvArray::integerConversion< integer >( m_coordinates.size() ); + + if( numDimensions != 2 ) + { + // print header + + for( integer d = 0; d < numDimensions; d++ ) + { + os << units::getDescription( getDimUnit( d )) << ","; + } + os << units::getDescription( m_valueUnit ) << "\n"; + + // print values + + // prepare dividers + std::vector< integer > div( numDimensions ); + div[0] = 1; + for( integer d = 1; d < numDimensions; d++ ) + { + div[d] = div[d-1] * m_coordinates[d-1].size(); + } + // loop through all the values + for( integer v = 0; v < m_values.size(); v++ ) + { + // find coords indices + std::vector< integer > idx( numDimensions ); + integer r = v; + for( integer d = numDimensions-1; d >= 0; d-- ) + { + idx[d] = r / div[d]; + r = r % div[d]; + } + // finally print out in right order + for( integer d = 0; d < numDimensions; d++ ) + { + arraySlice1d< real64 const > const coords = m_coordinates[d]; + os << coords[idx[d]] << ","; + } + os << m_values[v] << "\n"; + } + } + else // numDimensions == 2 + { + arraySlice1d< real64 const > const coordsX = m_coordinates[0]; + arraySlice1d< real64 const > const coordsY = m_coordinates[1]; + integer const nX = coordsX.size(); + integer const nY = coordsY.size(); + os<, localIndex > ProblemManager:: if( feDiscretization != nullptr ) { - elemRegion.forElementSubRegions< CellElementSubRegion, FaceElementSubRegion >( [&]( auto & subRegion ) + elemRegion.forElementSubRegions< CellElementSubRegion >( [&]( auto & subRegion ) { std::unique_ptr< finiteElement::FiniteElementBase > newFE = feDiscretization->factory( subRegion.getElementType() ); @@ -961,6 +961,20 @@ map< std::tuple< string, string, string, string >, localIndex > ProblemManager:: numQuadraturePointsInList = std::max( numQuadraturePointsInList, numQuadraturePoints ); } ); } ); + + // For now FaceElementSubRegions do not have a FE type associated with them. They don't need one for now and + // it would have to be a heterogeneous one coz they are usually heterogeneous subregions. + elemRegion.forElementSubRegions< FaceElementSubRegion >( [&]( FaceElementSubRegion const & subRegion ) + { + localIndex & numQuadraturePointsInList = regionQuadrature[ std::make_tuple( meshBodyName, + meshLevel.getName(), + regionName, + subRegion.getName() ) ]; + + localIndex const numQuadraturePoints = 1; + + numQuadraturePointsInList = std::max( numQuadraturePointsInList, numQuadraturePoints ); + } ); } else //if( fvFluxApprox != nullptr ) { diff --git a/src/coreComponents/math/CMakeLists.txt b/src/coreComponents/math/CMakeLists.txt index 3dd93cfc12a..290f23808db 100644 --- a/src/coreComponents/math/CMakeLists.txt +++ b/src/coreComponents/math/CMakeLists.txt @@ -14,4 +14,3 @@ blt_add_library( NAME math target_include_directories( math INTERFACE ${CMAKE_SOURCE_DIR}/coreComponents ) -geosx_add_code_checks( PREFIX math ) diff --git a/src/coreComponents/mesh/CMakeLists.txt b/src/coreComponents/mesh/CMakeLists.txt index 32c815d0c34..52b2fd69b60 100644 --- a/src/coreComponents/mesh/CMakeLists.txt +++ b/src/coreComponents/mesh/CMakeLists.txt @@ -192,4 +192,3 @@ if( GEOS_ENABLE_TESTS ) add_subdirectory( unitTests ) endif( ) -geosx_add_code_checks( PREFIX mesh ) diff --git a/src/coreComponents/mesh/CellElementRegion.cpp b/src/coreComponents/mesh/CellElementRegion.cpp index e1ff92e4940..3d614db6606 100644 --- a/src/coreComponents/mesh/CellElementRegion.cpp +++ b/src/coreComponents/mesh/CellElementRegion.cpp @@ -28,7 +28,8 @@ CellElementRegion::CellElementRegion( string const & name, Group * const parent setInputFlag( InputFlags::REQUIRED ); registerWrapper( viewKeyStruct::coarseningRatioString(), &m_coarseningRatio ). - setInputFlag( InputFlags::OPTIONAL ); + setInputFlag( InputFlags::OPTIONAL ). + setApplyDefaultValue( 0.0 ); } CellElementRegion::~CellElementRegion() diff --git a/src/coreComponents/physicsSolvers/CMakeLists.txt b/src/coreComponents/physicsSolvers/CMakeLists.txt index 547cce5990c..def7de24ea1 100644 --- a/src/coreComponents/physicsSolvers/CMakeLists.txt +++ b/src/coreComponents/physicsSolvers/CMakeLists.txt @@ -21,8 +21,8 @@ set( physicsSolvers_headers fluidFlow/IsothermalCompositionalMultiphaseBaseKernels.hpp fluidFlow/ThermalCompositionalMultiphaseBaseKernels.hpp fluidFlow/CompositionalMultiphaseFVM.hpp - fluidFlow/IsothermalCompositionalMultiphaseFVMKernels.hpp - fluidFlow/IsothermalCompositionalMultiphaseFVMKernelUtilities.hpp + fluidFlow/IsothermalCompositionalMultiphaseFVMKernels.hpp + fluidFlow/IsothermalCompositionalMultiphaseFVMKernelUtilities.hpp fluidFlow/ThermalCompositionalMultiphaseFVMKernels.hpp fluidFlow/CompositionalMultiphaseHybridFVM.hpp fluidFlow/CompositionalMultiphaseHybridFVMKernels.hpp @@ -32,7 +32,7 @@ set( physicsSolvers_headers fluidFlow/ReactiveCompositionalMultiphaseOBLKernels.hpp fluidFlow/FlowSolverBase.hpp fluidFlow/FlowSolverBaseFields.hpp - fluidFlow/FlowSolverBaseKernels.hpp + fluidFlow/FlowSolverBaseKernels.hpp fluidFlow/FluxKernelsHelper.hpp fluidFlow/HybridFVMHelperKernels.hpp fluidFlow/proppantTransport/ProppantTransport.hpp @@ -66,6 +66,7 @@ set( physicsSolvers_headers multiphysics/CompositionalMultiphaseReservoirAndWells.hpp multiphysics/CoupledReservoirAndWellsBase.hpp multiphysics/CoupledSolver.hpp + multiphysics/PoromechanicsSolver.hpp multiphysics/FlowProppantTransportSolver.hpp multiphysics/HydrofractureSolver.hpp multiphysics/HydrofractureSolverKernels.hpp @@ -79,11 +80,11 @@ set( physicsSolvers_headers multiphysics/poromechanicsKernels/PoromechanicsBase.hpp multiphysics/poromechanicsKernels/SinglePhasePoromechanics.hpp multiphysics/poromechanicsKernels/SinglePhasePoromechanics_impl.hpp - multiphysics/poromechanicsKernels/SinglePhasePoromechanicsConformingFractures.hpp + multiphysics/poromechanicsKernels/SinglePhasePoromechanicsConformingFractures.hpp multiphysics/poromechanicsKernels/SinglePhasePoromechanicsEFEM.hpp multiphysics/poromechanicsKernels/SinglePhasePoromechanicsEFEM_impl.hpp multiphysics/poromechanicsKernels/SinglePhasePoromechanicsFractures.hpp - multiphysics/poromechanicsKernels/SinglePhasePoromechanicsEmbeddedFractures.hpp + multiphysics/poromechanicsKernels/SinglePhasePoromechanicsEmbeddedFractures.hpp multiphysics/poromechanicsKernels/ThermalMultiphasePoromechanics.hpp multiphysics/poromechanicsKernels/ThermalMultiphasePoromechanics_impl.hpp multiphysics/poromechanicsKernels/ThermalSinglePhasePoromechanics.hpp @@ -125,6 +126,8 @@ set( physicsSolvers_headers surfaceGeneration/ParallelTopologyChange.hpp surfaceGeneration/SurfaceGenerator.hpp surfaceGeneration/SurfaceGeneratorFields.hpp + surfaceGeneration/kernels/surfaceGenerationKernels.hpp + surfaceGeneration/kernels/surfaceGenerationKernelsHelpers.hpp wavePropagation/WaveSolverBase.hpp wavePropagation/WaveSolverUtils.hpp wavePropagation/WaveSolverBaseFields.hpp @@ -137,7 +140,9 @@ set( physicsSolvers_headers wavePropagation/AcousticFirstOrderWaveEquationSEM.hpp wavePropagation/AcousticFirstOrderWaveEquationSEMKernel.hpp wavePropagation/AcousticVTIWaveEquationSEM.hpp - wavePropagation/AcousticVTIWaveEquationSEMKernel.hpp ) + wavePropagation/AcousticVTIWaveEquationSEMKernel.hpp + wavePropagation/AcousticElasticWaveEquationSEM.hpp + wavePropagation/AcousticElasticWaveEquationSEMKernel.hpp ) # Specify solver sources set( physicsSolvers_sources @@ -199,7 +204,8 @@ set( physicsSolvers_sources wavePropagation/ElasticWaveEquationSEM.cpp wavePropagation/ElasticFirstOrderWaveEquationSEM.cpp wavePropagation/AcousticFirstOrderWaveEquationSEM.cpp - wavePropagation/AcousticVTIWaveEquationSEM.cpp ) + wavePropagation/AcousticVTIWaveEquationSEM.cpp + wavePropagation/AcousticElasticWaveEquationSEM.cpp ) include( solidMechanics/kernels/SolidMechanicsKernels.cmake) include( multiphysics/poromechanicsKernels/PoromechanicsKernels.cmake) @@ -225,5 +231,4 @@ if( externalComponentDeps ) target_include_directories( physicsSolvers PUBLIC ${CMAKE_SOURCE_DIR}/externalComponents ) endif() -geosx_add_code_checks( PREFIX physicsSolvers ) diff --git a/src/coreComponents/physicsSolvers/FieldStatisticsBase.hpp b/src/coreComponents/physicsSolvers/FieldStatisticsBase.hpp index a15ba7778aa..fc1f4a2c5a3 100644 --- a/src/coreComponents/physicsSolvers/FieldStatisticsBase.hpp +++ b/src/coreComponents/physicsSolvers/FieldStatisticsBase.hpp @@ -23,6 +23,7 @@ #include "physicsSolvers/PhysicsSolverManager.hpp" #include "mainInterface/ProblemManager.hpp" #include "mesh/MeshLevel.hpp" +#include "fileIO/Outputs/OutputBase.hpp" namespace geos { @@ -45,7 +46,8 @@ class FieldStatisticsBase : public TaskBase FieldStatisticsBase( const string & name, Group * const parent ) : TaskBase( name, parent ), - m_solver( nullptr ) + m_solver( nullptr ), + m_outputDir( joinPath( OutputBase::getOutputDirectory(), name ) ) { enableLogLevelInput(); @@ -54,6 +56,11 @@ class FieldStatisticsBase : public TaskBase setRTTypeName( rtTypes::CustomTypes::groupNameRef ). setInputFlag( dataRepository::InputFlags::REQUIRED ). setDescription( "Name of the " + SOLVER::coupledSolverAttributePrefix() + " solver" ); + + this->registerWrapper( viewKeyStruct::writeCSVFlagString(), &m_writeCSV ). + setApplyDefaultValue( 0 ). + setInputFlag( dataRepository::InputFlags::OPTIONAL ). + setDescription( "Write statistics into a CSV file" ); } /** @@ -85,11 +92,33 @@ class FieldStatisticsBase : public TaskBase getDataContext(), m_solverName, LvArray::system::demangleType< SOLVER >() ), InputError ); + + // create dir for output + if( m_writeCSV > 0 ) + { + if( MpiWrapper::commRank() == 0 ) + { + makeDirsForPath( m_outputDir ); + } + // wait till the dir is created by rank 0 + MPI_Barrier( MPI_COMM_WORLD ); + } } + struct viewKeyStruct + { + static constexpr char const * writeCSVFlagString() { return "writeCSV"; } + }; + /// Pointer to the physics solver SOLVER * m_solver; + // Output directory + string const m_outputDir; + + // Flag to enable writing CSV output + integer m_writeCSV; + private: /// Name of the solver diff --git a/src/coreComponents/physicsSolvers/NonlinearSolverParameters.cpp b/src/coreComponents/physicsSolvers/NonlinearSolverParameters.cpp index 96ced600bbd..af316a23062 100644 --- a/src/coreComponents/physicsSolvers/NonlinearSolverParameters.cpp +++ b/src/coreComponents/physicsSolvers/NonlinearSolverParameters.cpp @@ -154,6 +154,11 @@ NonlinearSolverParameters::NonlinearSolverParameters( string const & name, setApplyDefaultValue( 0 ). setDescription( "Flag to decide whether to iterate between sequentially coupled solvers or not." ); + this->registerWrapper( viewKeysStruct::nonlinearAccelerationTypeString(), &m_nonlinearAccelerationType ). + setApplyDefaultValue( NonlinearAccelerationType::None ). + setInputFlag( dataRepository::InputFlags::OPTIONAL ). + setDescription( "Nonlinear acceleration type for sequential solver." ); + } void NonlinearSolverParameters::postProcessInput() diff --git a/src/coreComponents/physicsSolvers/NonlinearSolverParameters.hpp b/src/coreComponents/physicsSolvers/NonlinearSolverParameters.hpp index 99e5dd52634..2a41b03d7d4 100644 --- a/src/coreComponents/physicsSolvers/NonlinearSolverParameters.hpp +++ b/src/coreComponents/physicsSolvers/NonlinearSolverParameters.hpp @@ -128,6 +128,7 @@ class NonlinearSolverParameters : public dataRepository::Group static constexpr char const * couplingTypeString() { return "couplingType"; } static constexpr char const * sequentialConvergenceCriterionString() { return "sequentialConvergenceCriterion"; } static constexpr char const * subcyclingOptionString() { return "subcycling"; } + static constexpr char const * nonlinearAccelerationTypeString() { return "nonlinearAccelerationType"; } } viewKeys; /** @@ -167,6 +168,15 @@ class NonlinearSolverParameters : public dataRepository::Group NumberOfNonlinearIterations ///< convergence achieved when the subproblems convergence is achieved in less than minNewtonIteration }; + /** + * @brief Nonlinear acceleration type + */ + enum class NonlinearAccelerationType : integer + { + None, ///< no acceleration + Aitken ///< Aitken acceleration + }; + /** * @brief Calculates the upper limit for the number of iterations to allow a * decrease to the next time step. @@ -305,6 +315,9 @@ class NonlinearSolverParameters : public dataRepository::Group /// Flag to specify whether subcycling is allowed or not in sequential schemes integer m_subcyclingOption; + /// Type of nonlinear acceleration for sequential solver + NonlinearAccelerationType m_nonlinearAccelerationType; + /// Value used to make sure that residual normalizers are not too small when computing residual norm real64 m_minNormalizer = 1e-12; }; @@ -326,6 +339,10 @@ ENUM_STRINGS( NonlinearSolverParameters::SequentialConvergenceCriterion, "ResidualNorm", "NumberOfNonlinearIterations" ); +ENUM_STRINGS( NonlinearSolverParameters::NonlinearAccelerationType, + "None", + "Aitken" ); + } /* namespace geos */ #endif /* GEOS_PHYSICSSOLVERS_NONLINEARSOLVERPARAMETERS_HPP_ */ diff --git a/src/coreComponents/physicsSolvers/SolverBase.cpp b/src/coreComponents/physicsSolvers/SolverBase.cpp index a20cdddfaf7..5ea5eea1754 100644 --- a/src/coreComponents/physicsSolvers/SolverBase.cpp +++ b/src/coreComponents/physicsSolvers/SolverBase.cpp @@ -273,7 +273,7 @@ bool SolverBase::execute( real64 const time_n, if( getLogLevel() >= 1 && dtRemaining > 0.0 ) { - GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{}: sub-step = {}, accepted dt = {}, remaining dt = {}", getName(), subStep, dtAccepted, dtRemaining ) ); + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{}: sub-step = {}, accepted dt = {}, next dt = {}, remaining dt = {}", getName(), subStep, dtAccepted, nextDt, dtRemaining ) ); } } @@ -292,32 +292,34 @@ real64 SolverBase::setNextDt( real64 const & currentDt, real64 const nextDtNewton = setNextDtBasedOnNewtonIter( currentDt ); real64 const nextDtStateChange = setNextDtBasedOnStateChange( currentDt, domain ); - if( nextDtNewton < nextDtStateChange ) // time step size decided based on convergence + if( nextDtNewton < nextDtStateChange ) // time step size decided based on convergence { integer const iterDecreaseLimit = m_nonlinearSolverParameters.timeStepDecreaseIterLimit(); integer const iterIncreaseLimit = m_nonlinearSolverParameters.timeStepIncreaseIterLimit(); if( nextDtNewton > currentDt ) { - GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{}: Newton solver converged in less than {} iterations, time-step required will be increased.", - getName(), iterIncreaseLimit ) ); + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( + "{}: Newton solver converged in less than {} iterations, time-step required will be increased.", + getName(), iterIncreaseLimit )); } else if( nextDtNewton < currentDt ) { - GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{}: Newton solver converged in more than {} iterations, time-step required will be decreased.", - getName(), iterDecreaseLimit ) ); + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( + "{}: Newton solver converged in more than {} iterations, time-step required will be decreased.", + getName(), iterDecreaseLimit )); } } - else // time step size decided based on state change + else // time step size decided based on state change { if( nextDtStateChange > currentDt ) { GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{}: Time-step required will be increased based on state change.", - getName() ) ); + getName())); } else if( nextDtStateChange < currentDt ) { GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{}: Time-step required will be decreased based on state change.", - getName() ) ); + getName())); } } @@ -355,6 +357,15 @@ real64 SolverBase::setNextDtBasedOnNewtonIter( real64 const & currentDt ) return nextDt; } + +real64 SolverBase::setNextDtBasedOnCFL( const geos::real64 & currentDt, geos::DomainPartition & domain ) +{ + GEOS_UNUSED_VAR( currentDt, domain ); + return LvArray::NumericLimits< real64 >::max; // i.e., not implemented +} + + + real64 SolverBase::linearImplicitStep( real64 const & time_n, real64 const & dt, integer const GEOS_UNUSED_PARAM( cycleNumber ), @@ -809,7 +820,7 @@ real64 SolverBase::nonlinearImplicitStep( real64 const & time_n, } else { - GEOS_ERROR( getDataContext() << ": Nonconverged solutions not allowed. Terminating..." ); + GEOS_ERROR( "Nonconverged solutions not allowed. Terminating..." ); } } diff --git a/src/coreComponents/physicsSolvers/SolverBase.hpp b/src/coreComponents/physicsSolvers/SolverBase.hpp index 413113b0633..f6174cd9b57 100644 --- a/src/coreComponents/physicsSolvers/SolverBase.hpp +++ b/src/coreComponents/physicsSolvers/SolverBase.hpp @@ -153,7 +153,7 @@ class SolverBase : public ExecutableGroup * @param[in] currentDt the current time step size * @return the prescribed time step size */ - real64 setNextDtBasedOnNewtonIter( real64 const & currentDt ); + virtual real64 setNextDtBasedOnNewtonIter( real64 const & currentDt ); /** * @brief function to set the next dt based on state change @@ -164,6 +164,17 @@ class SolverBase : public ExecutableGroup virtual real64 setNextDtBasedOnStateChange( real64 const & currentDt, DomainPartition & domain ); + /** + * @brief function to set the next dt based on state change + * @param [in] currentDt the current time step size + * @param[in] domain the domain object + * @return the prescribed time step size + */ + virtual real64 setNextDtBasedOnCFL( real64 const & currentDt, + DomainPartition & domain ); + + + /** * @brief Entry function for an explicit time integration step * @param time_n time at the beginning of the step diff --git a/src/coreComponents/physicsSolvers/fluidFlow/CompositionalMultiphaseBase.cpp b/src/coreComponents/physicsSolvers/fluidFlow/CompositionalMultiphaseBase.cpp index e3b21b01324..6db5fe77bfa 100644 --- a/src/coreComponents/physicsSolvers/fluidFlow/CompositionalMultiphaseBase.cpp +++ b/src/coreComponents/physicsSolvers/fluidFlow/CompositionalMultiphaseBase.cpp @@ -44,6 +44,7 @@ #include "physicsSolvers/fluidFlow/CompositionalMultiphaseBaseFields.hpp" #include "physicsSolvers/fluidFlow/FlowSolverBaseFields.hpp" #include "physicsSolvers/fluidFlow/IsothermalCompositionalMultiphaseBaseKernels.hpp" +#include "physicsSolvers/fluidFlow/IsothermalCompositionalMultiphaseFVMKernels.hpp" #include "physicsSolvers/fluidFlow/ThermalCompositionalMultiphaseBaseKernels.hpp" #if defined( __INTEL_COMPILER ) @@ -125,6 +126,12 @@ CompositionalMultiphaseBase::CompositionalMultiphaseBase( const string & name, setApplyDefaultValue( 1 ). setDescription( "Flag indicating whether local (cell-wise) chopping of negative compositions is allowed" ); + this->registerWrapper( viewKeyStruct::targetFlowCFLString(), &m_targetFlowCFL ). + setApplyDefaultValue( -1. ). + setInputFlag( InputFlags::OPTIONAL ). + setDescription( "Target CFL condition `CFL condition `_" + "when computing the next timestep." ); + this->registerWrapper( viewKeyStruct::useTotalMassEquationString(), &m_useTotalMassEquation ). setSizedFromParent( 0 ). setInputFlag( InputFlags::OPTIONAL ). @@ -249,51 +256,61 @@ void CompositionalMultiphaseBase::registerDataOnMesh( Group & meshBodies ) [&]( localIndex const, ElementSubRegionBase & subRegion ) { + if( m_hasCapPressure ) { - if( m_hasCapPressure ) - { - subRegion.registerWrapper< string >( viewKeyStruct::capPressureNamesString() ). - setPlotLevel( PlotLevel::NOPLOT ). - setRestartFlags( RestartFlags::NO_WRITE ). - setSizedFromParent( 0 ). - setDescription( "Name of the capillary pressure constitutive model to use" ). - reference(); - - string & capPresName = subRegion.getReference< string >( viewKeyStruct::capPressureNamesString() ); - capPresName = getConstitutiveName< CapillaryPressureBase >( subRegion ); - GEOS_THROW_IF( capPresName.empty(), - GEOS_FMT( "{}: Capillary pressure model not found on subregion {}", - getDataContext(), subRegion.getDataContext() ), - InputError ); - } - if( m_hasDiffusion ) - { - subRegion.registerWrapper< string >( viewKeyStruct::diffusionNamesString() ). - setPlotLevel( PlotLevel::NOPLOT ). - setRestartFlags( RestartFlags::NO_WRITE ). - setSizedFromParent( 0 ). - setDescription( "Name of the diffusion constitutive model to use" ); - - string & diffusionName = subRegion.getReference< string >( viewKeyStruct::diffusionNamesString() ); - diffusionName = getConstitutiveName< DiffusionBase >( subRegion ); - GEOS_THROW_IF( diffusionName.empty(), - GEOS_FMT( "Diffusion model not found on subregion {}", subRegion.getName() ), - InputError ); - } - if( m_hasDispersion ) - { - subRegion.registerWrapper< string >( viewKeyStruct::dispersionNamesString() ). - setPlotLevel( PlotLevel::NOPLOT ). - setRestartFlags( RestartFlags::NO_WRITE ). - setSizedFromParent( 0 ). - setDescription( "Name of the dispersion constitutive model to use" ); - - string & dispersionName = subRegion.getReference< string >( viewKeyStruct::dispersionNamesString() ); - dispersionName = getConstitutiveName< DispersionBase >( subRegion ); - GEOS_THROW_IF( dispersionName.empty(), - GEOS_FMT( "Dispersion model not found on subregion {}", subRegion.getName() ), - InputError ); - } + subRegion.registerWrapper< string >( viewKeyStruct::capPressureNamesString() ). + setPlotLevel( PlotLevel::NOPLOT ). + setRestartFlags( RestartFlags::NO_WRITE ). + setSizedFromParent( 0 ). + setDescription( "Name of the capillary pressure constitutive model to use" ). + reference(); + + string & capPresName = subRegion.getReference< string >( viewKeyStruct::capPressureNamesString() ); + capPresName = getConstitutiveName< CapillaryPressureBase >( subRegion ); + GEOS_THROW_IF( capPresName.empty(), + GEOS_FMT( "{}: Capillary pressure model not found on subregion {}", + getDataContext(), subRegion.getDataContext() ), + InputError ); + } + + if( m_hasDiffusion ) + { + subRegion.registerWrapper< string >( viewKeyStruct::diffusionNamesString() ). + setPlotLevel( PlotLevel::NOPLOT ). + setRestartFlags( RestartFlags::NO_WRITE ). + setSizedFromParent( 0 ). + setDescription( "Name of the diffusion constitutive model to use" ); + + string & diffusionName = subRegion.getReference< string >( viewKeyStruct::diffusionNamesString() ); + diffusionName = getConstitutiveName< DiffusionBase >( subRegion ); + GEOS_THROW_IF( diffusionName.empty(), + GEOS_FMT( "Diffusion model not found on subregion {}", subRegion.getName() ), + InputError ); + } + + if( m_hasDispersion ) + { + subRegion.registerWrapper< string >( viewKeyStruct::dispersionNamesString() ). + setPlotLevel( PlotLevel::NOPLOT ). + setRestartFlags( RestartFlags::NO_WRITE ). + setSizedFromParent( 0 ). + setDescription( "Name of the dispersion constitutive model to use" ); + + string & dispersionName = subRegion.getReference< string >( viewKeyStruct::dispersionNamesString() ); + dispersionName = getConstitutiveName< DispersionBase >( subRegion ); + GEOS_THROW_IF( dispersionName.empty(), + GEOS_FMT( "Dispersion model not found on subregion {}", subRegion.getName() ), + InputError ); + } + + if( m_targetFlowCFL > 0 ) + { + subRegion.registerField< fields::flow::phaseOutflux >( getName() ). + reference().resizeDimension< 1 >( m_numPhases ); + subRegion.registerField< fields::flow::componentOutflux >( getName() ). + reference().resizeDimension< 1 >( m_numComponents ); + subRegion.registerField< fields::flow::phaseCFLNumber >( getName() ); + subRegion.registerField< fields::flow::componentCFLNumber >( getName() ); } string const & fluidName = subRegion.getReference< string >( viewKeyStruct::fluidNamesString() ); @@ -577,12 +594,12 @@ void CompositionalMultiphaseBase::validateConstitutiveModels( DomainPartition co } ); } -void CompositionalMultiphaseBase::updateComponentFraction( ObjectManagerBase & dataGroup ) const +void CompositionalMultiphaseBase::updateGlobalComponentFraction( ObjectManagerBase & dataGroup ) const { GEOS_MARK_FUNCTION; isothermalCompositionalMultiphaseBaseKernels:: - ComponentFractionKernelFactory:: + GlobalComponentFractionKernelFactory:: createAndLaunch< parallelDevicePolicy<> >( m_numComponents, dataGroup ); @@ -712,7 +729,7 @@ real64 CompositionalMultiphaseBase::updateFluidState( ObjectManagerBase & subReg { GEOS_MARK_FUNCTION; - updateComponentFraction( subRegion ); + updateGlobalComponentFraction( subRegion ); updateFluidModel( subRegion ); real64 const maxDeltaPhaseVolFrac = updatePhaseVolumeFraction( subRegion ); updateRelPermModel( subRegion ); @@ -725,6 +742,7 @@ real64 CompositionalMultiphaseBase::updateFluidState( ObjectManagerBase & subReg } void CompositionalMultiphaseBase::initializeFluidState( MeshLevel & mesh, + DomainPartition & domain, arrayView1d< string const > const & regionNames ) { GEOS_MARK_FUNCTION; @@ -763,6 +781,9 @@ void CompositionalMultiphaseBase::initializeFluidState( MeshLevel & mesh, } ); + // with initial component densities defined - check if they need to be corrected to avoid zero diags etc + chopNegativeDensities( domain ); + // for some reason CUDA does not want the host_device lambda to be defined inside the generic lambda // I need the exact type of the subRegion for updateSolidflowProperties to work well. mesh.getElemManager().forElementSubRegions< CellElementSubRegion, @@ -1194,7 +1215,7 @@ void CompositionalMultiphaseBase::initializePostInitialConditionsPreSubGroups() } ); // Initialize primary variables from applied initial conditions - initializeFluidState( mesh, regionNames ); + initializeFluidState( mesh, domain, regionNames ); mesh.getElemManager().forElementRegions< SurfaceElementRegion >( regionNames, [&]( localIndex const, @@ -1452,6 +1473,15 @@ void CompositionalMultiphaseBase::applySourceFluxBC( real64 const time, { return; } + if( !subRegion.hasWrapper( dofKey ) ) + { + if( fs.getLogLevel() >= 1 ) + { + GEOS_LOG_RANK( GEOS_FMT( "{}: trying to apply SourceFlux, but its targetSet named '{}' intersects with non-simulated region named '{}'.", + getDataContext(), setName, subRegion.getName() ) ); + } + return; + } arrayView1d< globalIndex const > const dofNumber = subRegion.getReference< array1d< globalIndex > >( dofKey ); arrayView1d< integer const > const ghostRank = subRegion.ghostRank(); @@ -1928,6 +1958,7 @@ void CompositionalMultiphaseBase::chopNegativeDensities( DomainPartition & domai using namespace isothermalCompositionalMultiphaseBaseKernels; integer const numComp = m_numComponents; + real64 const minCompDens = m_minCompDens; forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, MeshLevel & mesh, @@ -1948,9 +1979,9 @@ void CompositionalMultiphaseBase::chopNegativeDensities( DomainPartition & domai { for( integer ic = 0; ic < numComp; ++ic ) { - if( compDens[ei][ic] < m_minCompDens ) + if( compDens[ei][ic] < minCompDens ) { - compDens[ei][ic] = m_minCompDens; + compDens[ei][ic] = minCompDens; } } } @@ -2002,8 +2033,9 @@ real64 CompositionalMultiphaseBase::setNextDtBasedOnStateChange( real64 const & { if( ghostRank[ei] < 0 ) { - subRegionMaxPresChange.max( LvArray::math::abs( pres[ei] - pres_n[ei] ) / LvArray::math::max( pres_n[ei], LvArray::NumericLimits< real64 >::epsilon ) ); - subRegionMaxTempChange.max( LvArray::math::abs( temp[ei] - temp_n[ei] ) / LvArray::math::max( temp_n[ei], LvArray::NumericLimits< real64 >::epsilon ) ); + // switch from relative to absolute when pressure less than 1 + subRegionMaxPresChange.max( LvArray::math::abs( pres[ei] - pres_n[ei] ) / LvArray::math::max( LvArray::math::abs( pres_n[ei] ), 1.0 ) ); + subRegionMaxTempChange.max( LvArray::math::abs( temp[ei] - temp_n[ei] ) / LvArray::math::max( LvArray::math::abs( temp_n[ei] ), 1.0 ) ); for( integer ip = 0; ip < numPhase; ++ip ) { subRegionMaxPhaseVolFracChange.max( LvArray::math::abs( phaseVolFrac[ei][ip] - phaseVolFrac_n[ei][ip] ) ); @@ -2047,6 +2079,172 @@ real64 CompositionalMultiphaseBase::setNextDtBasedOnStateChange( real64 const & return std::min( std::min( nextDtPressure, nextDtPhaseVolFrac ), nextDtTemperature ); } +real64 CompositionalMultiphaseBase::setNextDtBasedOnCFL( const geos::real64 & currentDt, geos::DomainPartition & domain ) +{ + + real64 maxPhaseCFL, maxCompCFL; + + computeCFLNumbers( domain, currentDt, maxPhaseCFL, maxCompCFL ); + + GEOS_LOG_LEVEL_RANK_0( 1, getName() << ": Max phase CFL number: " << maxPhaseCFL ); + GEOS_LOG_LEVEL_RANK_0( 1, getName() << ": Max component CFL number: " << maxCompCFL ); + + return std::min( m_targetFlowCFL*currentDt/maxCompCFL, m_targetFlowCFL*currentDt/maxPhaseCFL ); + +} + +void CompositionalMultiphaseBase::computeCFLNumbers( geos::DomainPartition & domain, const geos::real64 & dt, + geos::real64 & maxPhaseCFL, geos::real64 & maxCompCFL ) +{ + GEOS_MARK_FUNCTION; + + integer const numPhases = numFluidPhases(); + integer const numComps = numFluidComponents(); + + // Step 1: reset the arrays involved in the computation of CFL numbers + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, + MeshLevel & mesh, + arrayView1d< string const > const & regionNames ) + { + mesh.getElemManager().forElementSubRegions( regionNames, + [&]( localIndex const, + ElementSubRegionBase & subRegion ) + { + arrayView2d< real64, compflow::USD_PHASE > const & phaseOutflux = + subRegion.getField< fields::flow::phaseOutflux >(); + arrayView2d< real64, compflow::USD_COMP > const & compOutflux = + subRegion.getField< fields::flow::componentOutflux >(); + phaseOutflux.zero(); + compOutflux.zero(); + } ); + + // Step 2: compute the total volumetric outflux for each reservoir cell by looping over faces + NumericalMethodsManager & numericalMethodManager = domain.getNumericalMethodManager(); + FiniteVolumeManager & fvManager = numericalMethodManager.getFiniteVolumeManager(); + FluxApproximationBase & fluxApprox = fvManager.getFluxApproximation( getDiscretizationName() ); + + isothermalCompositionalMultiphaseFVMKernels:: + CFLFluxKernel::CompFlowAccessors compFlowAccessors( mesh.getElemManager(), getName() ); + isothermalCompositionalMultiphaseFVMKernels:: + CFLFluxKernel::MultiFluidAccessors multiFluidAccessors( mesh.getElemManager(), getName() ); + isothermalCompositionalMultiphaseFVMKernels:: + CFLFluxKernel::PermeabilityAccessors permeabilityAccessors( mesh.getElemManager(), getName() ); + isothermalCompositionalMultiphaseFVMKernels:: + CFLFluxKernel::RelPermAccessors relPermAccessors( mesh.getElemManager(), getName() ); + + // TODO: find a way to compile with this modifiable accessors in CompFlowAccessors, and remove them from here + ElementRegionManager::ElementViewAccessor< arrayView2d< real64, compflow::USD_PHASE > > const phaseOutfluxAccessor = + mesh.getElemManager().constructViewAccessor< array2d< real64, compflow::LAYOUT_PHASE >, + arrayView2d< real64, compflow::USD_PHASE > >( fields::flow::phaseOutflux::key() ); + + ElementRegionManager::ElementViewAccessor< arrayView2d< real64, compflow::USD_COMP > > const compOutfluxAccessor = + mesh.getElemManager().constructViewAccessor< array2d< real64, compflow::LAYOUT_COMP >, + arrayView2d< real64, compflow::USD_COMP > >( fields::flow::componentOutflux::key() ); + + + fluxApprox.forAllStencils( mesh, [&] ( auto & stencil ) + { + + typename TYPEOFREF( stencil ) ::KernelWrapper stencilWrapper = stencil.createKernelWrapper(); + + // While this kernel is waiting for a factory class, pass all the accessors here + isothermalCompositionalMultiphaseBaseKernels::KernelLaunchSelector1 + < isothermalCompositionalMultiphaseFVMKernels::CFLFluxKernel >( numComps, + numPhases, + dt, + stencilWrapper, + compFlowAccessors.get( fields::flow::pressure{} ), + compFlowAccessors.get( fields::flow::gravityCoefficient{} ), + compFlowAccessors.get( fields::flow::phaseVolumeFraction{} ), + permeabilityAccessors.get( fields::permeability::permeability{} ), + permeabilityAccessors.get( fields::permeability::dPerm_dPressure{} ), + relPermAccessors.get( fields::relperm::phaseRelPerm{} ), + multiFluidAccessors.get( fields::multifluid::phaseViscosity{} ), + multiFluidAccessors.get( fields::multifluid::phaseDensity{} ), + multiFluidAccessors.get( fields::multifluid::phaseMassDensity{} ), + multiFluidAccessors.get( fields::multifluid::phaseCompFraction{} ), + phaseOutfluxAccessor.toNestedView(), + compOutfluxAccessor.toNestedView() ); + } ); + } ); + + // Step 3: finalize the (cell-based) computation of the CFL numbers + real64 localMaxPhaseCFLNumber = 0.0; + real64 localMaxCompCFLNumber = 0.0; + + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, + MeshLevel & mesh, + arrayView1d< string const > const & regionNames ) + { + mesh.getElemManager().forElementSubRegions( regionNames, + [&]( localIndex const, + ElementSubRegionBase & subRegion ) + { + arrayView2d< real64 const, compflow::USD_PHASE > const & phaseOutflux = + subRegion.getField< fields::flow::phaseOutflux >(); + arrayView2d< real64 const, compflow::USD_COMP > const & compOutflux = + subRegion.getField< fields::flow::componentOutflux >(); + + arrayView1d< real64 > const & phaseCFLNumber = subRegion.getField< fields::flow::phaseCFLNumber >(); + arrayView1d< real64 > const & compCFLNumber = subRegion.getField< fields::flow::componentCFLNumber >(); + + arrayView1d< real64 const > const & volume = subRegion.getElementVolume(); + + arrayView2d< real64 const, compflow::USD_COMP > const & compDens = + subRegion.getField< fields::flow::globalCompDensity >(); + arrayView2d< real64 const, compflow::USD_COMP > const compFrac = + subRegion.getField< fields::flow::globalCompFraction >(); + arrayView2d< real64, compflow::USD_PHASE > const phaseVolFrac = + subRegion.getField< fields::flow::phaseVolumeFraction >(); + + Group const & constitutiveModels = subRegion.getGroup( ElementSubRegionBase::groupKeyStruct::constitutiveModelsString() ); + + string const & fluidName = subRegion.getReference< string >( CompositionalMultiphaseBase::viewKeyStruct::fluidNamesString() ); + MultiFluidBase const & fluid = constitutiveModels.getGroup< MultiFluidBase >( fluidName ); + arrayView3d< real64 const, multifluid::USD_PHASE > const & phaseVisc = fluid.phaseViscosity(); + + string const & relpermName = subRegion.getReference< string >( CompositionalMultiphaseBase::viewKeyStruct::relPermNamesString() ); + RelativePermeabilityBase const & relperm = constitutiveModels.getGroup< RelativePermeabilityBase >( relpermName ); + arrayView3d< real64 const, relperm::USD_RELPERM > const & phaseRelPerm = relperm.phaseRelPerm(); + arrayView4d< real64 const, relperm::USD_RELPERM_DS > const & dPhaseRelPerm_dPhaseVolFrac = relperm.dPhaseRelPerm_dPhaseVolFraction(); + + string const & solidName = subRegion.getReference< string >( CompositionalMultiphaseBase::viewKeyStruct::solidNamesString() ); + CoupledSolidBase const & solid = constitutiveModels.getGroup< CoupledSolidBase >( solidName ); + arrayView2d< real64 const > const & porosity = solid.getPorosity(); + + real64 subRegionMaxPhaseCFLNumber = 0.0; + real64 subRegionMaxCompCFLNumber = 0.0; + + isothermalCompositionalMultiphaseBaseKernels::KernelLaunchSelector2 + < isothermalCompositionalMultiphaseFVMKernels::CFLKernel >( numComps, numPhases, + subRegion.size(), + volume, + porosity, + compDens, + compFrac, + phaseVolFrac, + phaseRelPerm, + dPhaseRelPerm_dPhaseVolFrac, + phaseVisc, + phaseOutflux, + compOutflux, + phaseCFLNumber, + compCFLNumber, + subRegionMaxPhaseCFLNumber, + subRegionMaxCompCFLNumber ); + + localMaxPhaseCFLNumber = LvArray::math::max( localMaxPhaseCFLNumber, subRegionMaxPhaseCFLNumber ); + localMaxCompCFLNumber = LvArray::math::max( localMaxCompCFLNumber, subRegionMaxCompCFLNumber ); + + } ); + } ); + + maxPhaseCFL = MpiWrapper::max( localMaxPhaseCFLNumber ); + maxCompCFL = MpiWrapper::max( localMaxCompCFLNumber ); + +} + + void CompositionalMultiphaseBase::resetStateToBeginningOfStep( DomainPartition & domain ) { GEOS_MARK_FUNCTION; @@ -2242,7 +2440,16 @@ void CompositionalMultiphaseBase::updateState( DomainPartition & domain ) } ); } ); - GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( " {}: Max phase volume fraction change = {}", getName(), maxDeltaPhaseVolFrac ) ); + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( " {}: Max phase volume fraction change = {}", getName(), fmt::format( "{:.{}f}", maxDeltaPhaseVolFrac, 4 ) ) ); +} + +real64 CompositionalMultiphaseBase::setNextDt( const geos::real64 & currentDt, geos::DomainPartition & domain ) +{ + + if( m_targetFlowCFL<0 ) + return SolverBase::setNextDt( currentDt, domain ); + else + return setNextDtBasedOnCFL( currentDt, domain ); } } // namespace geos diff --git a/src/coreComponents/physicsSolvers/fluidFlow/CompositionalMultiphaseBase.hpp b/src/coreComponents/physicsSolvers/fluidFlow/CompositionalMultiphaseBase.hpp index a90c23fe6b4..5491db5e2d4 100644 --- a/src/coreComponents/physicsSolvers/fluidFlow/CompositionalMultiphaseBase.hpp +++ b/src/coreComponents/physicsSolvers/fluidFlow/CompositionalMultiphaseBase.hpp @@ -108,10 +108,10 @@ class CompositionalMultiphaseBase : public FlowSolverBase DomainPartition & domain ) override; /** - * @brief Recompute component fractions from primary variables (component densities) + * @brief Recompute global component fractions from primary variables (component densities) * @param dataGroup the group storing the required fields */ - void updateComponentFraction( ObjectManagerBase & dataGroup ) const; + void updateGlobalComponentFraction( ObjectManagerBase & dataGroup ) const; /** * @brief Recompute phase volume fractions (saturations) from constitutive and primary variables @@ -244,6 +244,8 @@ class CompositionalMultiphaseBase : public FlowSolverBase static constexpr char const * targetRelativePresChangeString() { return "targetRelativePressureChangeInTimeStep"; } static constexpr char const * targetRelativeTempChangeString() { return "targetRelativeTemperatureChangeInTimeStep"; } static constexpr char const * targetPhaseVolFracChangeString() { return "targetPhaseVolFractionChangeInTimeStep"; } + static constexpr char const * targetFlowCFLString() { return "targetFlowCFL"; } + // nonlinear solver parameters @@ -265,7 +267,7 @@ class CompositionalMultiphaseBase : public FlowSolverBase * from prescribed intermediate values (i.e. global densities from global fractions) * and any applicable hydrostatic equilibration of the domain */ - void initializeFluidState( MeshLevel & mesh, arrayView1d< string const > const & regionNames ); + void initializeFluidState( MeshLevel & mesh, DomainPartition & domain, arrayView1d< string const > const & regionNames ); /** * @brief Compute the hydrostatic equilibrium using the compositions and temperature input tables @@ -356,6 +358,20 @@ class CompositionalMultiphaseBase : public FlowSolverBase virtual real64 setNextDtBasedOnStateChange( real64 const & currentDt, DomainPartition & domain ) override; + void computeCFLNumbers( DomainPartition & domain, real64 const & dt, real64 & maxPhaseCFL, real64 & maxCompCFL ); + + /** + * @brief function to set the next time step size + * @param[in] currentDt the current time step size + * @param[in] domain the domain object + * @return the prescribed time step size + */ + real64 setNextDt( real64 const & currentDt, + DomainPartition & domain ) override; + + virtual real64 setNextDtBasedOnCFL( real64 const & currentDt, + DomainPartition & domain ) override; + virtual void initializePostInitialConditionsPreSubGroups() override; integer useTotalMassEquation() const { return m_useTotalMassEquation; } @@ -366,6 +382,7 @@ class CompositionalMultiphaseBase : public FlowSolverBase virtual void initializePreSubGroups() override; + /** * @brief Utility function that checks the consistency of the constitutive models * @param[in] domain the domain partition @@ -460,6 +477,9 @@ class CompositionalMultiphaseBase : public FlowSolverBase /// name of the fluid constitutive model used as a reference for component/phase description string m_referenceFluidModelName; + /// the targeted CFL for timestep + real64 m_targetFlowCFL; + private: /** diff --git a/src/coreComponents/physicsSolvers/fluidFlow/CompositionalMultiphaseFVM.hpp b/src/coreComponents/physicsSolvers/fluidFlow/CompositionalMultiphaseFVM.hpp index b44138bbb67..82776f70485 100644 --- a/src/coreComponents/physicsSolvers/fluidFlow/CompositionalMultiphaseFVM.hpp +++ b/src/coreComponents/physicsSolvers/fluidFlow/CompositionalMultiphaseFVM.hpp @@ -180,14 +180,6 @@ class CompositionalMultiphaseFVM : public CompositionalMultiphaseBase virtual void initializePreSubGroups() override; - /** - * @brief Compute the largest CFL number in the domain - * @param dt the time step size - * @param domain the domain containing the mesh and fields - */ - void - computeCFLNumbers( real64 const & dt, DomainPartition & domain ); - struct DBCParameters { /// Flag to enable Dissipation Based Continuation Method diff --git a/src/coreComponents/physicsSolvers/fluidFlow/CompositionalMultiphaseStatistics.cpp b/src/coreComponents/physicsSolvers/fluidFlow/CompositionalMultiphaseStatistics.cpp index 5890a4665e7..5f7bf339c4b 100644 --- a/src/coreComponents/physicsSolvers/fluidFlow/CompositionalMultiphaseStatistics.cpp +++ b/src/coreComponents/physicsSolvers/fluidFlow/CompositionalMultiphaseStatistics.cpp @@ -109,7 +109,37 @@ void CompositionalMultiphaseStatistics::registerDataOnMesh( Group & meshBodies ) regionStatistics.phaseMass.resizeDimension< 0 >( numPhases ); regionStatistics.trappedPhaseMass.resizeDimension< 0 >( numPhases ); regionStatistics.immobilePhaseMass.resizeDimension< 0 >( numPhases ); - regionStatistics.dissolvedComponentMass.resizeDimension< 0, 1 >( numPhases, numComps ); + regionStatistics.componentMass.resizeDimension< 0, 1 >( numPhases, numComps ); + + // write output header + if( m_writeCSV > 0 && MpiWrapper::commRank() == 0 ) + { + std::ofstream outputFile( m_outputDir + "/" + regionNames[i] + ".csv" ); + integer const useMass = m_solver->getReference< integer >( CompositionalMultiphaseBase::viewKeyStruct::useMassFlagString() ); + string const massUnit = useMass ? "kg" : "mol"; + outputFile << + "Time [s],Min pressure [Pa],Average pressure [Pa],Max pressure [Pa],Min delta pressure [Pa],Max delta pressure [Pa]," << + "Min temperature [Pa],Average temperature [Pa],Max temperature [Pa],Total dynamic pore volume [rm^3]"; + for( integer ip = 0; ip < numPhases; ++ip ) + outputFile << ",Phase " << ip << " dynamic pore volume [rm^3]"; + for( integer ip = 0; ip < numPhases; ++ip ) + outputFile << ",Phase " << ip << " mass [" << massUnit << "]"; + for( integer ip = 0; ip < numPhases; ++ip ) + outputFile << ",Trapped phase " << ip << " mass (metric 1) [" << massUnit << "]"; + for( integer ip = 0; ip < numPhases; ++ip ) + outputFile << ",Non-trapped phase " << ip << " mass (metric 1) [" << massUnit << "]"; + for( integer ip = 0; ip < numPhases; ++ip ) + outputFile << ",Immobile phase " << ip << " mass (metric 2) [" << massUnit << "]"; + for( integer ip = 0; ip < numPhases; ++ip ) + outputFile << ",Mobile phase " << ip << " mass (metric 2) [" << massUnit << "]"; + for( integer ip = 0; ip < numPhases; ++ip ) + { + for( integer ic = 0; ic < numComps; ++ic ) + outputFile << ",Component " << ic << " (phase " << ip << ") mass [" << massUnit << "]"; + } + outputFile << std::endl; + outputFile.close(); + } } } @@ -130,7 +160,7 @@ void CompositionalMultiphaseStatistics::registerDataOnMesh( Group & meshBodies ) } ); } -bool CompositionalMultiphaseStatistics::execute( real64 const GEOS_UNUSED_PARAM( time_n ), +bool CompositionalMultiphaseStatistics::execute( real64 const time_n, real64 const dt, integer const GEOS_UNUSED_PARAM( cycleNumber ), integer const GEOS_UNUSED_PARAM( eventCounter ), @@ -143,19 +173,22 @@ bool CompositionalMultiphaseStatistics::execute( real64 const GEOS_UNUSED_PARAM( { if( m_computeRegionStatistics ) { - computeRegionStatistics( mesh, regionNames ); + // current time is time_n + dt + computeRegionStatistics( time_n + dt, mesh, regionNames ); } } ); if( m_computeCFLNumbers ) { - computeCFLNumbers( dt, domain ); + // current time is time_n + dt + computeCFLNumbers( time_n + dt, dt, domain ); } return false; } -void CompositionalMultiphaseStatistics::computeRegionStatistics( MeshLevel & mesh, +void CompositionalMultiphaseStatistics::computeRegionStatistics( real64 const time, + MeshLevel & mesh, arrayView1d< string const > const & regionNames ) const { GEOS_MARK_FUNCTION; @@ -188,7 +221,7 @@ void CompositionalMultiphaseStatistics::computeRegionStatistics( MeshLevel & mes regionStatistics.phaseMass.setValues< serialPolicy >( 0.0 ); regionStatistics.trappedPhaseMass.setValues< serialPolicy >( 0.0 ); regionStatistics.immobilePhaseMass.setValues< serialPolicy >( 0.0 ); - regionStatistics.dissolvedComponentMass.setValues< serialPolicy >( 0.0 ); + regionStatistics.componentMass.setValues< serialPolicy >( 0.0 ); } // Step 2: increment the average/min/max quantities for all the subRegions @@ -237,7 +270,7 @@ void CompositionalMultiphaseStatistics::computeRegionStatistics( MeshLevel & mes array1d< real64 > subRegionTrappedPhaseMass( numPhases ); array1d< real64 > subRegionImmobilePhaseMass( numPhases ); array1d< real64 > subRegionRelpermPhaseMass( numPhases ); - array2d< real64 > subRegionDissolvedComponentMass( numPhases, numComps ); + array2d< real64 > subRegionComponentMass( numPhases, numComps ); isothermalCompositionalMultiphaseBaseKernels:: StatisticsKernel:: @@ -270,7 +303,7 @@ void CompositionalMultiphaseStatistics::computeRegionStatistics( MeshLevel & mes subRegionPhaseMass.toView(), subRegionTrappedPhaseMass.toView(), subRegionImmobilePhaseMass.toView(), - subRegionDissolvedComponentMass.toView() ); + subRegionComponentMass.toView() ); ElementRegionBase & region = elemManager.getRegion( subRegion.getParent().getParent().getName() ); RegionStatistics & regionStatistics = region.getReference< RegionStatistics >( viewKeyStruct::regionStatisticsString() ); @@ -314,7 +347,7 @@ void CompositionalMultiphaseStatistics::computeRegionStatistics( MeshLevel & mes for( integer ic = 0; ic < numComps; ++ic ) { - regionStatistics.dissolvedComponentMass[ip][ic] += subRegionDissolvedComponentMass[ip][ic]; + regionStatistics.componentMass[ip][ic] += subRegionComponentMass[ip][ic]; } } @@ -343,13 +376,25 @@ void CompositionalMultiphaseStatistics::computeRegionStatistics( MeshLevel & mes regionStatistics.totalPoreVolume += regionStatistics.phasePoreVolume[ip]; for( integer ic = 0; ic < numComps; ++ic ) { - regionStatistics.dissolvedComponentMass[ip][ic] = MpiWrapper::sum( regionStatistics.dissolvedComponentMass[ip][ic] ); + regionStatistics.componentMass[ip][ic] = MpiWrapper::sum( regionStatistics.componentMass[ip][ic] ); } } regionStatistics.averagePressure = MpiWrapper::sum( regionStatistics.averagePressure ); - regionStatistics.averagePressure /= regionStatistics.totalUncompactedPoreVolume; regionStatistics.averageTemperature = MpiWrapper::sum( regionStatistics.averageTemperature ); - regionStatistics.averageTemperature /= regionStatistics.totalUncompactedPoreVolume; + if( regionStatistics.totalUncompactedPoreVolume > 0 ) + { + float invTotalUncompactedPoreVolume = 1.0 / regionStatistics.totalUncompactedPoreVolume; + regionStatistics.averagePressure *= invTotalUncompactedPoreVolume; + regionStatistics.averageTemperature *= invTotalUncompactedPoreVolume; + } + else + { + regionStatistics.averagePressure = 0.0; + regionStatistics.averageTemperature = 0.0; + GEOS_LOG_LEVEL_RANK_0( 1, getName() << ", " << regionNames[i] + << ": Cannot compute average pressure because region pore volume is zero." ); + } + // helpers to report statistics array1d< real64 > nonTrappedPhaseMass( numPhases ); @@ -363,191 +408,73 @@ void CompositionalMultiphaseStatistics::computeRegionStatistics( MeshLevel & mes integer const useMass = m_solver->getReference< integer >( CompositionalMultiphaseBase::viewKeyStruct::useMassFlagString() ); string const massUnit = useMass ? "kg" : "mol"; - GEOS_LOG_LEVEL_RANK_0( 1, getName() << ", " << regionNames[i] - << ": Pressure (min, average, max): " - << regionStatistics.minPressure << ", " << regionStatistics.averagePressure << ", " << regionStatistics.maxPressure << " Pa" ); - GEOS_LOG_LEVEL_RANK_0( 1, getName() << ", " << regionNames[i] - << ": Delta pressure (min, max): " - << regionStatistics.minDeltaPressure << ", " << regionStatistics.maxDeltaPressure << " Pa" ); - GEOS_LOG_LEVEL_RANK_0( 1, getName() << ", " << regionNames[i] - << ": Temperature (min, average, max): " - << regionStatistics.minTemperature << ", " << regionStatistics.averageTemperature << ", " << regionStatistics.maxTemperature << " K" ); - GEOS_LOG_LEVEL_RANK_0( 1, getName() << ", " << regionNames[i] - << ": Total dynamic pore volume: " << regionStatistics.totalPoreVolume << " rm^3" ); - GEOS_LOG_LEVEL_RANK_0( 1, getName() << ", " << regionNames[i] - << ": Phase dynamic pore volumes: " << regionStatistics.phasePoreVolume << " rm^3" ); - GEOS_LOG_LEVEL_RANK_0( 1, getName() << ", " << regionNames[i] - << ": Phase mass: " << regionStatistics.phaseMass << " " << massUnit ); + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{}, {} (time {} s): Pressure (min, average, max): {}, {}, {} Pa", + getName(), regionNames[i], time, regionStatistics.minPressure, regionStatistics.averagePressure, regionStatistics.maxPressure ) ); + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{}, {} (time {} s): Delta pressure (min, max): {}, {} Pa", + getName(), regionNames[i], time, regionStatistics.minDeltaPressure, regionStatistics.maxDeltaPressure ) ); + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{}, {} (time {} s): Temperature (min, average, max): {}, {}, {} K", + getName(), regionNames[i], time, regionStatistics.minTemperature, regionStatistics.averageTemperature, regionStatistics.maxTemperature ) ); + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{}, {} (time {} s): Total dynamic pore volume: {} rm^3", + getName(), regionNames[i], time, regionStatistics.totalPoreVolume ) ); + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{}, {} (time {} s): Phase dynamic pore volume: {} rm^3", + getName(), regionNames[i], time, regionStatistics.phasePoreVolume ) ); + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{}, {} (time {} s): Phase mass: {} {}", + getName(), regionNames[i], time, regionStatistics.phaseMass, massUnit ) ); // metric 1: trapping computed with the Land trapping coefficient (similar to Eclipse) - GEOS_LOG_LEVEL_RANK_0( 1, getName() << ", " << regionNames[i] - << ": Trapped phase mass (metric 1): " << regionStatistics.trappedPhaseMass << " " << massUnit ); - GEOS_LOG_LEVEL_RANK_0( 1, getName() << ", " << regionNames[i] - << ": Non-trapped phase mass (metric 1): " << nonTrappedPhaseMass << " " << massUnit ); + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{}, {} (time {} s): Trapped phase mass (metric 1): {} {}", + getName(), regionNames[i], time, regionStatistics.trappedPhaseMass, massUnit ) ); + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{}, {} (time {} s): Non-trapped phase mass (metric 1): {} {}", + getName(), regionNames[i], time, nonTrappedPhaseMass, massUnit ) ); // metric 2: immobile phase mass computed with a threshold on relative permeability - GEOS_LOG_LEVEL_RANK_0( 1, getName() << ", " << regionNames[i] - << ": Immobile phase mass (metric 2): " << regionStatistics.immobilePhaseMass << " " << massUnit ); - GEOS_LOG_LEVEL_RANK_0( 1, getName() << ", " << regionNames[i] - << ": Mobile phase mass (metric 2): " << mobilePhaseMass << " " << massUnit ); + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{}, {} (time {} s): Immobile phase mass (metric 2): {} {}", + getName(), regionNames[i], time, regionStatistics.immobilePhaseMass, massUnit ) ); + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{}, {} (time {} s): Mobile phase mass (metric 2): {} {}", + getName(), regionNames[i], time, mobilePhaseMass, massUnit ) ); + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{}, {} (time {} s): Component mass: {} {}", + getName(), regionNames[i], time, regionStatistics.componentMass, massUnit ) ); - GEOS_LOG_LEVEL_RANK_0( 1, getName() << ", " << regionNames[i] - << ": Dissolved component mass: " << regionStatistics.dissolvedComponentMass << " " << massUnit ); + if( m_writeCSV > 0 && MpiWrapper::commRank() == 0 ) + { + std::ofstream outputFile( m_outputDir + "/" + regionNames[i] + ".csv", std::ios_base::app ); + outputFile << time << "," << regionStatistics.minPressure << "," << regionStatistics.averagePressure << "," << regionStatistics.maxPressure << "," << + regionStatistics.minDeltaPressure << "," << regionStatistics.maxDeltaPressure << "," << regionStatistics.minTemperature << "," << + regionStatistics.averageTemperature << "," << regionStatistics.maxTemperature << "," << regionStatistics.totalPoreVolume; + for( integer ip = 0; ip < numPhases; ++ip ) + outputFile << "," << regionStatistics.phasePoreVolume[ip]; + for( integer ip = 0; ip < numPhases; ++ip ) + outputFile << "," << regionStatistics.phaseMass[ip]; + for( integer ip = 0; ip < numPhases; ++ip ) + outputFile << "," << regionStatistics.trappedPhaseMass[ip]; + for( integer ip = 0; ip < numPhases; ++ip ) + outputFile << "," << nonTrappedPhaseMass[ip]; + for( integer ip = 0; ip < numPhases; ++ip ) + outputFile << "," << regionStatistics.immobilePhaseMass[ip]; + for( integer ip = 0; ip < numPhases; ++ip ) + outputFile << "," << mobilePhaseMass[ip]; + for( integer ip = 0; ip < numPhases; ++ip ) + { + for( integer ic = 0; ic < numComps; ++ic ) + outputFile << "," << regionStatistics.componentMass[ip][ic]; + } + outputFile << std::endl; + outputFile.close(); + } } } -void CompositionalMultiphaseStatistics::computeCFLNumbers( real64 const & dt, +void CompositionalMultiphaseStatistics::computeCFLNumbers( real64 const time, + real64 const dt, DomainPartition & domain ) const { GEOS_MARK_FUNCTION; + real64 maxPhaseCFL, maxCompCFL; + m_solver->computeCFLNumbers( domain, dt, maxPhaseCFL, maxCompCFL ); - integer const numPhases = m_solver->numFluidPhases(); - integer const numComps = m_solver->numFluidComponents(); - - // Step 1: reset the arrays involved in the computation of CFL numbers - m_solver->forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, - MeshLevel & mesh, - arrayView1d< string const > const & regionNames ) - { - mesh.getElemManager().forElementSubRegions( regionNames, - [&]( localIndex const, - ElementSubRegionBase & subRegion ) - { - arrayView2d< real64, compflow::USD_PHASE > const & phaseOutflux = - subRegion.getField< fields::flow::phaseOutflux >(); - arrayView2d< real64, compflow::USD_COMP > const & compOutflux = - subRegion.getField< fields::flow::componentOutflux >(); - phaseOutflux.zero(); - compOutflux.zero(); - } ); - - // Step 2: compute the total volumetric outflux for each reservoir cell by looping over faces - NumericalMethodsManager & numericalMethodManager = domain.getNumericalMethodManager(); - FiniteVolumeManager & fvManager = numericalMethodManager.getFiniteVolumeManager(); - FluxApproximationBase & fluxApprox = fvManager.getFluxApproximation( m_solver->getDiscretizationName() ); - - isothermalCompositionalMultiphaseFVMKernels:: - CFLFluxKernel::CompFlowAccessors compFlowAccessors( mesh.getElemManager(), getName() ); - isothermalCompositionalMultiphaseFVMKernels:: - CFLFluxKernel::MultiFluidAccessors multiFluidAccessors( mesh.getElemManager(), getName() ); - isothermalCompositionalMultiphaseFVMKernels:: - CFLFluxKernel::PermeabilityAccessors permeabilityAccessors( mesh.getElemManager(), getName() ); - isothermalCompositionalMultiphaseFVMKernels:: - CFLFluxKernel::RelPermAccessors relPermAccessors( mesh.getElemManager(), getName() ); - - // TODO: find a way to compile with this modifiable accessors in CompFlowAccessors, and remove them from here - ElementRegionManager::ElementViewAccessor< arrayView2d< real64, compflow::USD_PHASE > > const phaseOutfluxAccessor = - mesh.getElemManager().constructViewAccessor< array2d< real64, compflow::LAYOUT_PHASE >, - arrayView2d< real64, compflow::USD_PHASE > >( fields::flow::phaseOutflux::key() ); - - ElementRegionManager::ElementViewAccessor< arrayView2d< real64, compflow::USD_COMP > > const compOutfluxAccessor = - mesh.getElemManager().constructViewAccessor< array2d< real64, compflow::LAYOUT_COMP >, - arrayView2d< real64, compflow::USD_COMP > >( fields::flow::componentOutflux::key() ); - - - fluxApprox.forAllStencils( mesh, [&] ( auto & stencil ) - { - - typename TYPEOFREF( stencil ) ::KernelWrapper stencilWrapper = stencil.createKernelWrapper(); - - // While this kernel is waiting for a factory class, pass all the accessors here - isothermalCompositionalMultiphaseBaseKernels::KernelLaunchSelector1 - < isothermalCompositionalMultiphaseFVMKernels::CFLFluxKernel >( numComps, - numPhases, - dt, - stencilWrapper, - compFlowAccessors.get( fields::flow::pressure{} ), - compFlowAccessors.get( fields::flow::gravityCoefficient{} ), - compFlowAccessors.get( fields::flow::phaseVolumeFraction{} ), - permeabilityAccessors.get( fields::permeability::permeability{} ), - permeabilityAccessors.get( fields::permeability::dPerm_dPressure{} ), - relPermAccessors.get( fields::relperm::phaseRelPerm{} ), - multiFluidAccessors.get( fields::multifluid::phaseViscosity{} ), - multiFluidAccessors.get( fields::multifluid::phaseDensity{} ), - multiFluidAccessors.get( fields::multifluid::phaseMassDensity{} ), - multiFluidAccessors.get( fields::multifluid::phaseCompFraction{} ), - phaseOutfluxAccessor.toNestedView(), - compOutfluxAccessor.toNestedView() ); - } ); - } ); - - // Step 3: finalize the (cell-based) computation of the CFL numbers - real64 localMaxPhaseCFLNumber = 0.0; - real64 localMaxCompCFLNumber = 0.0; - - m_solver->forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, - MeshLevel & mesh, - arrayView1d< string const > const & regionNames ) - { - mesh.getElemManager().forElementSubRegions( regionNames, - [&]( localIndex const, - ElementSubRegionBase & subRegion ) - { - arrayView2d< real64 const, compflow::USD_PHASE > const & phaseOutflux = - subRegion.getField< fields::flow::phaseOutflux >(); - arrayView2d< real64 const, compflow::USD_COMP > const & compOutflux = - subRegion.getField< fields::flow::componentOutflux >(); - - arrayView1d< real64 > const & phaseCFLNumber = subRegion.getField< fields::flow::phaseCFLNumber >(); - arrayView1d< real64 > const & compCFLNumber = subRegion.getField< fields::flow::componentCFLNumber >(); - - arrayView1d< real64 const > const & volume = subRegion.getElementVolume(); - - arrayView2d< real64 const, compflow::USD_COMP > const & compDens = - subRegion.getField< fields::flow::globalCompDensity >(); - arrayView2d< real64 const, compflow::USD_COMP > const compFrac = - subRegion.getField< fields::flow::globalCompFraction >(); - arrayView2d< real64, compflow::USD_PHASE > const phaseVolFrac = - subRegion.getField< fields::flow::phaseVolumeFraction >(); - - Group const & constitutiveModels = subRegion.getGroup( ElementSubRegionBase::groupKeyStruct::constitutiveModelsString() ); - - string const & fluidName = subRegion.getReference< string >( CompositionalMultiphaseBase::viewKeyStruct::fluidNamesString() ); - MultiFluidBase const & fluid = constitutiveModels.getGroup< MultiFluidBase >( fluidName ); - arrayView3d< real64 const, multifluid::USD_PHASE > const & phaseVisc = fluid.phaseViscosity(); - - string const & relpermName = subRegion.getReference< string >( CompositionalMultiphaseBase::viewKeyStruct::relPermNamesString() ); - RelativePermeabilityBase const & relperm = constitutiveModels.getGroup< RelativePermeabilityBase >( relpermName ); - arrayView3d< real64 const, relperm::USD_RELPERM > const & phaseRelPerm = relperm.phaseRelPerm(); - arrayView4d< real64 const, relperm::USD_RELPERM_DS > const & dPhaseRelPerm_dPhaseVolFrac = relperm.dPhaseRelPerm_dPhaseVolFraction(); - - string const & solidName = subRegion.getReference< string >( CompositionalMultiphaseBase::viewKeyStruct::solidNamesString() ); - CoupledSolidBase const & solid = constitutiveModels.getGroup< CoupledSolidBase >( solidName ); - arrayView2d< real64 const > const & porosity = solid.getPorosity(); - - real64 subRegionMaxPhaseCFLNumber = 0.0; - real64 subRegionMaxCompCFLNumber = 0.0; - - isothermalCompositionalMultiphaseBaseKernels::KernelLaunchSelector2 - < isothermalCompositionalMultiphaseFVMKernels::CFLKernel >( numComps, numPhases, - subRegion.size(), - volume, - porosity, - compDens, - compFrac, - phaseVolFrac, - phaseRelPerm, - dPhaseRelPerm_dPhaseVolFrac, - phaseVisc, - phaseOutflux, - compOutflux, - phaseCFLNumber, - compCFLNumber, - subRegionMaxPhaseCFLNumber, - subRegionMaxCompCFLNumber ); - - localMaxPhaseCFLNumber = LvArray::math::max( localMaxPhaseCFLNumber, subRegionMaxPhaseCFLNumber ); - localMaxCompCFLNumber = LvArray::math::max( localMaxCompCFLNumber, subRegionMaxCompCFLNumber ); - - } ); - } ); - - real64 const globalMaxPhaseCFLNumber = MpiWrapper::max( localMaxPhaseCFLNumber ); - real64 const globalMaxCompCFLNumber = MpiWrapper::max( localMaxCompCFLNumber ); - - GEOS_LOG_LEVEL_RANK_0( 1, getName() << ": Max phase CFL number: " << globalMaxPhaseCFLNumber ); - GEOS_LOG_LEVEL_RANK_0( 1, getName() << ": Max component CFL number: " << globalMaxCompCFLNumber ); + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{} (time {} s): Max phase CFL number: {}", getName(), time, maxPhaseCFL ) ); + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{} (time {} s): Max component CFL number: {}", getName(), time, maxCompCFL ) ); } diff --git a/src/coreComponents/physicsSolvers/fluidFlow/CompositionalMultiphaseStatistics.hpp b/src/coreComponents/physicsSolvers/fluidFlow/CompositionalMultiphaseStatistics.hpp index 97abb1abb5e..a4a46ad166a 100644 --- a/src/coreComponents/physicsSolvers/fluidFlow/CompositionalMultiphaseStatistics.hpp +++ b/src/coreComponents/physicsSolvers/fluidFlow/CompositionalMultiphaseStatistics.hpp @@ -115,26 +115,30 @@ class CompositionalMultiphaseStatistics : public FieldStatisticsBase< Compositio array1d< real64 > trappedPhaseMass; /// immobile region phase mass array1d< real64 > immobilePhaseMass; - /// dissolved region component mass - array2d< real64 > dissolvedComponentMass; + /// region component mass + array2d< real64 > componentMass; }; /** * @brief Compute some statistics on the reservoir (average field pressure, etc) + * @param[in] time current time * @param[in] mesh the mesh level object * @param[in] regionNames the array of target region names */ - void computeRegionStatistics( MeshLevel & mesh, + void computeRegionStatistics( real64 const time, + MeshLevel & mesh, arrayView1d< string const > const & regionNames ) const; /** * @brief Compute CFL numbers + * @param[in] time current time * @param[in] dt the time step size * @param[in] domain the domain partition */ - void computeCFLNumbers( real64 const & dt, + void computeCFLNumbers( real64 const time, + real64 const dt, DomainPartition & domain ) const; void postProcessInput() override; diff --git a/src/coreComponents/physicsSolvers/fluidFlow/FlowSolverBase.cpp b/src/coreComponents/physicsSolvers/fluidFlow/FlowSolverBase.cpp index 02bed4b1b0d..62e21cd33f3 100644 --- a/src/coreComponents/physicsSolvers/fluidFlow/FlowSolverBase.cpp +++ b/src/coreComponents/physicsSolvers/fluidFlow/FlowSolverBase.cpp @@ -212,14 +212,7 @@ void FlowSolverBase::saveConvergedState( ElementSubRegionBase & subRegion ) cons arrayView1d< real64 > const temp_n = subRegion.template getField< fields::flow::temperature_n >(); temp_n.setValues< parallelDevicePolicy<> >( temp ); - GEOS_THROW_IF( subRegion.hasField< fields::flow::pressure_k >() != - subRegion.hasField< fields::flow::temperature_k >(), - GEOS_FMT( "`{}` and `{}` must be either both existing or both non-existing on subregion {}", - fields::flow::pressure_k::key(), fields::flow::temperature_k::key(), subRegion.getName() ), - std::runtime_error ); - - if( subRegion.hasField< fields::flow::pressure_k >() && - subRegion.hasField< fields::flow::temperature_k >() ) + if( m_isFixedStressPoromechanicsUpdate ) { arrayView1d< real64 > const pres_k = subRegion.template getField< fields::flow::pressure_k >(); arrayView1d< real64 > const temp_k = subRegion.template getField< fields::flow::temperature_k >(); @@ -230,11 +223,8 @@ void FlowSolverBase::saveConvergedState( ElementSubRegionBase & subRegion ) cons void FlowSolverBase::saveIterationState( ElementSubRegionBase & subRegion ) const { - if( !( subRegion.hasField< fields::flow::pressure_k >() && - subRegion.hasField< fields::flow::temperature_k >() ) ) - { + if( !m_isFixedStressPoromechanicsUpdate ) return; - } arrayView1d< real64 const > const pres = subRegion.template getField< fields::flow::pressure >(); arrayView1d< real64 const > const temp = subRegion.template getField< fields::flow::temperature >(); @@ -472,18 +462,11 @@ void FlowSolverBase::updatePorosityAndPermeability( CellElementSubRegion & subRe string const & solidName = subRegion.getReference< string >( viewKeyStruct::solidNamesString() ); CoupledSolidBase & porousSolid = subRegion.template getConstitutiveModel< CoupledSolidBase >( solidName ); - GEOS_THROW_IF( subRegion.hasField< fields::flow::pressure_k >() != - subRegion.hasField< fields::flow::temperature_k >(), - GEOS_FMT( "`{}` and `{}` must be either both existing or both non-existing on subregion {}", - fields::flow::pressure_k::key(), fields::flow::temperature_k::key(), subRegion.getName() ), - std::runtime_error ); - constitutive::ConstitutivePassThru< CoupledSolidBase >::execute( porousSolid, [=, &subRegion] ( auto & castedPorousSolid ) { typename TYPEOFREF( castedPorousSolid ) ::KernelWrapper porousWrapper = castedPorousSolid.createKernelUpdates(); - if( subRegion.hasField< fields::flow::pressure_k >() && // for sequential simulations - subRegion.hasField< fields::flow::temperature_k >() ) + if( m_isFixedStressPoromechanicsUpdate ) // for sequential simulations { arrayView1d< real64 const > const & pressure_k = subRegion.getField< fields::flow::pressure_k >(); arrayView1d< real64 const > const & temperature_k = subRegion.getField< fields::flow::temperature_k >(); diff --git a/src/coreComponents/physicsSolvers/fluidFlow/FlowSolverBase.hpp b/src/coreComponents/physicsSolvers/fluidFlow/FlowSolverBase.hpp index 61f8b0f5f5a..8fcb9d89522 100644 --- a/src/coreComponents/physicsSolvers/fluidFlow/FlowSolverBase.hpp +++ b/src/coreComponents/physicsSolvers/fluidFlow/FlowSolverBase.hpp @@ -63,8 +63,6 @@ class FlowSolverBase : public SolverBase virtual void registerDataOnMesh( Group & MeshBodies ) override; - localIndex numDofPerCell() const { return m_numDofPerCell; } - struct viewKeyStruct : SolverBase::viewKeyStruct { // misc inputs diff --git a/src/coreComponents/physicsSolvers/fluidFlow/IsothermalCompositionalMultiphaseBaseKernels.hpp b/src/coreComponents/physicsSolvers/fluidFlow/IsothermalCompositionalMultiphaseBaseKernels.hpp index 2afbf350956..878878f3c78 100644 --- a/src/coreComponents/physicsSolvers/fluidFlow/IsothermalCompositionalMultiphaseBaseKernels.hpp +++ b/src/coreComponents/physicsSolvers/fluidFlow/IsothermalCompositionalMultiphaseBaseKernels.hpp @@ -139,15 +139,15 @@ void kernelLaunchSelectorCompSwitch( T value, LAMBDA && lambda ) } // namespace internal -/******************************** ComponentFractionKernel ********************************/ +/******************************** GlobalComponentFractionKernel ********************************/ /** - * @class ComponentFractionKernel + * @class GlobalComponentFractionKernel * @tparam NUM_COMP number of fluid components * @brief Define the interface for the update kernel in charge of computing the phase volume fractions */ template< integer NUM_COMP > -class ComponentFractionKernel : public PropertyKernelBase< NUM_COMP > +class GlobalComponentFractionKernel : public PropertyKernelBase< NUM_COMP > { public: @@ -159,7 +159,7 @@ class ComponentFractionKernel : public PropertyKernelBase< NUM_COMP > * @param[in] subRegion the element subregion * @param[in] fluid the fluid model */ - ComponentFractionKernel( ObjectManagerBase & subRegion ) + GlobalComponentFractionKernel( ObjectManagerBase & subRegion ) : Base(), m_compDens( subRegion.getField< fields::flow::globalCompDensity >() ), m_compFrac( subRegion.getField< fields::flow::globalCompFraction >() ), @@ -219,9 +219,9 @@ class ComponentFractionKernel : public PropertyKernelBase< NUM_COMP > }; /** - * @class ComponentFractionKernelFactory + * @class GlobalComponentFractionKernelFactory */ -class ComponentFractionKernelFactory +class GlobalComponentFractionKernelFactory { public: @@ -240,8 +240,8 @@ class ComponentFractionKernelFactory internal::kernelLaunchSelectorCompSwitch( numComp, [&] ( auto NC ) { integer constexpr NUM_COMP = NC(); - ComponentFractionKernel< NUM_COMP > kernel( subRegion ); - ComponentFractionKernel< NUM_COMP >::template launch< POLICY >( subRegion.size(), kernel ); + GlobalComponentFractionKernel< NUM_COMP > kernel( subRegion ); + GlobalComponentFractionKernel< NUM_COMP >::template launch< POLICY >( subRegion.size(), kernel ); } ); } diff --git a/src/coreComponents/physicsSolvers/fluidFlow/SinglePhaseBase.cpp b/src/coreComponents/physicsSolvers/fluidFlow/SinglePhaseBase.cpp index 6ec5d7c9854..c94e705ba3e 100644 --- a/src/coreComponents/physicsSolvers/fluidFlow/SinglePhaseBase.cpp +++ b/src/coreComponents/physicsSolvers/fluidFlow/SinglePhaseBase.cpp @@ -1028,6 +1028,15 @@ void SinglePhaseBase::applySourceFluxBC( real64 const time_n, { return; } + if( !subRegion.hasWrapper( dofKey ) ) + { + if( fs.getLogLevel() >= 1 ) + { + GEOS_LOG_RANK( GEOS_FMT( "{}: trying to apply SourceFlux, but its targetSet named '{}' intersects with non-simulated region named '{}'.", + getDataContext(), setName, subRegion.getName() ) ); + } + return; + } arrayView1d< globalIndex const > const dofNumber = subRegion.getReference< array1d< globalIndex > >( dofKey ); arrayView1d< integer const > const ghostRank = subRegion.ghostRank(); diff --git a/src/coreComponents/physicsSolvers/fluidFlow/SinglePhaseBaseKernels.hpp b/src/coreComponents/physicsSolvers/fluidFlow/SinglePhaseBaseKernels.hpp index c275f3a6781..1a53a221166 100644 --- a/src/coreComponents/physicsSolvers/fluidFlow/SinglePhaseBaseKernels.hpp +++ b/src/coreComponents/physicsSolvers/fluidFlow/SinglePhaseBaseKernels.hpp @@ -674,15 +674,21 @@ struct StatisticsKernel arrayView1d< real64 const > const & volume, arrayView1d< real64 const > const & pres, arrayView1d< real64 const > const & deltaPres, + arrayView1d< real64 const > const & temp, arrayView1d< real64 const > const & refPorosity, arrayView2d< real64 const > const & porosity, + arrayView2d< real64 const > const & density, real64 & minPres, real64 & avgPresNumerator, real64 & maxPres, real64 & minDeltaPres, real64 & maxDeltaPres, + real64 & minTemp, + real64 & avgTempNumerator, + real64 & maxTemp, real64 & totalUncompactedPoreVol, - real64 & totalPoreVol ) + real64 & totalPoreVol, + real64 & totalMass ) { RAJA::ReduceMin< parallelDeviceReduce, real64 > subRegionMinPres( LvArray::NumericLimits< real64 >::max ); RAJA::ReduceSum< parallelDeviceReduce, real64 > subRegionAvgPresNumerator( 0.0 ); @@ -691,8 +697,13 @@ struct StatisticsKernel RAJA::ReduceMin< parallelDeviceReduce, real64 > subRegionMinDeltaPres( LvArray::NumericLimits< real64 >::max ); RAJA::ReduceMax< parallelDeviceReduce, real64 > subRegionMaxDeltaPres( -LvArray::NumericLimits< real64 >::max ); + RAJA::ReduceMin< parallelDeviceReduce, real64 > subRegionMinTemp( LvArray::NumericLimits< real64 >::max ); + RAJA::ReduceSum< parallelDeviceReduce, real64 > subRegionAvgTempNumerator( 0.0 ); + RAJA::ReduceMax< parallelDeviceReduce, real64 > subRegionMaxTemp( -LvArray::NumericLimits< real64 >::max ); + RAJA::ReduceSum< parallelDeviceReduce, real64 > subRegionTotalUncompactedPoreVol( 0.0 ); RAJA::ReduceSum< parallelDeviceReduce, real64 > subRegionTotalPoreVol( 0.0 ); + RAJA::ReduceSum< parallelDeviceReduce, real64 > subRegionTotalMass( 0.0 ); forAll< parallelDevicePolicy<> >( size, [=] GEOS_HOST_DEVICE ( localIndex const ei ) { @@ -712,8 +723,13 @@ struct StatisticsKernel subRegionMinDeltaPres.min( deltaPres[ei] ); subRegionMaxDeltaPres.max( deltaPres[ei] ); + subRegionMinTemp.min( temp[ei] ); + subRegionAvgTempNumerator += uncompactedPoreVol * temp[ei]; + subRegionMaxTemp.max( temp[ei] ); + subRegionTotalUncompactedPoreVol += uncompactedPoreVol; subRegionTotalPoreVol += dynamicPoreVol; + subRegionTotalMass += dynamicPoreVol * density[ei][0]; } ); minPres = subRegionMinPres.get(); @@ -723,8 +739,13 @@ struct StatisticsKernel minDeltaPres = subRegionMinDeltaPres.get(); maxDeltaPres = subRegionMaxDeltaPres.get(); + minTemp = subRegionMinTemp.get(); + avgTempNumerator = subRegionAvgTempNumerator.get(); + maxTemp = subRegionMaxTemp.get(); + totalUncompactedPoreVol = subRegionTotalUncompactedPoreVol.get(); totalPoreVol = subRegionTotalPoreVol.get(); + totalMass = subRegionTotalMass.get(); } }; diff --git a/src/coreComponents/physicsSolvers/fluidFlow/SinglePhaseHybridFVM.hpp b/src/coreComponents/physicsSolvers/fluidFlow/SinglePhaseHybridFVM.hpp index dafb18f6a54..2f124db3319 100644 --- a/src/coreComponents/physicsSolvers/fluidFlow/SinglePhaseHybridFVM.hpp +++ b/src/coreComponents/physicsSolvers/fluidFlow/SinglePhaseHybridFVM.hpp @@ -176,13 +176,6 @@ class SinglePhaseHybridFVM : public SinglePhaseBase /**@}*/ - - struct viewKeyStruct : SinglePhaseBase::viewKeyStruct - { - // primary face-based field - static constexpr char const * deltaFacePressureString() { return "deltaFacePressure"; } - }; - virtual void initializePreSubGroups() override; virtual void initializePostInitialConditionsPreSubGroups() override; diff --git a/src/coreComponents/physicsSolvers/fluidFlow/SinglePhaseStatistics.cpp b/src/coreComponents/physicsSolvers/fluidFlow/SinglePhaseStatistics.cpp index 9f711fad10c..c74a9e906b1 100644 --- a/src/coreComponents/physicsSolvers/fluidFlow/SinglePhaseStatistics.cpp +++ b/src/coreComponents/physicsSolvers/fluidFlow/SinglePhaseStatistics.cpp @@ -59,12 +59,23 @@ void SinglePhaseStatistics::registerDataOnMesh( Group & meshBodies ) region.registerWrapper< RegionStatistics >( viewKeyStruct::regionStatisticsString() ). setRestartFlags( RestartFlags::NO_WRITE ); region.excludeWrappersFromPacking( { viewKeyStruct::regionStatisticsString() } ); + + // write output header + if( m_writeCSV > 0 && MpiWrapper::commRank() == 0 ) + { + std::ofstream outputFile( m_outputDir + "/" + regionNames[i] + ".csv" ); + outputFile << + "Time [s],Min pressure [Pa],Average pressure [Pa],Max pressure [Pa],Min delta pressure [Pa],Max delta pressure [Pa]," << + "Min temperature [Pa],Average temperature [Pa],Max temperature [Pa],Total dynamic pore volume [rm^3],Total fluid mass [kg]"; + outputFile << std::endl; + outputFile.close(); + } } } ); } -bool SinglePhaseStatistics::execute( real64 const GEOS_UNUSED_PARAM( time_n ), - real64 const GEOS_UNUSED_PARAM( dt ), +bool SinglePhaseStatistics::execute( real64 const time_n, + real64 const dt, integer const GEOS_UNUSED_PARAM( cycleNumber ), integer const GEOS_UNUSED_PARAM( eventCounter ), real64 const GEOS_UNUSED_PARAM( eventProgress ), @@ -74,12 +85,14 @@ bool SinglePhaseStatistics::execute( real64 const GEOS_UNUSED_PARAM( time_n ), MeshLevel & mesh, arrayView1d< string const > const & regionNames ) { - computeRegionStatistics( mesh, regionNames ); + // current time is time_n + dt + computeRegionStatistics( time_n + dt, mesh, regionNames ); } ); return false; } -void SinglePhaseStatistics::computeRegionStatistics( MeshLevel & mesh, +void SinglePhaseStatistics::computeRegionStatistics( real64 const time, + MeshLevel & mesh, arrayView1d< string const > const & regionNames ) const { GEOS_MARK_FUNCTION; @@ -98,8 +111,13 @@ void SinglePhaseStatistics::computeRegionStatistics( MeshLevel & mesh, regionStatistics.maxDeltaPressure = -LvArray::NumericLimits< real64 >::max; regionStatistics.minDeltaPressure = LvArray::NumericLimits< real64 >::max; + regionStatistics.averageTemperature = 0.0; + regionStatistics.maxTemperature = -LvArray::NumericLimits< real64 >::max; + regionStatistics.minTemperature = LvArray::NumericLimits< real64 >::max; + regionStatistics.totalPoreVolume = 0.0; regionStatistics.totalUncompactedPoreVolume = 0.0; + regionStatistics.totalMass = 0.0; } // Step 2: increment the average/min/max quantities for all the subRegions @@ -111,6 +129,7 @@ void SinglePhaseStatistics::computeRegionStatistics( MeshLevel & mesh, arrayView1d< real64 const > const volume = subRegion.getElementVolume(); arrayView1d< real64 const > const pres = subRegion.getField< fields::flow::pressure >(); arrayView1d< real64 const > const deltaPres = subRegion.getField< fields::flow::deltaPressure >(); + arrayView1d< real64 const > const temp = subRegion.getField< fields::flow::temperature >(); string const & solidName = subRegion.getReference< string >( SinglePhaseBase::viewKeyStruct::solidNamesString() ); Group const & constitutiveModels = subRegion.getGroup( ElementSubRegionBase::groupKeyStruct::constitutiveModelsString() ); @@ -118,13 +137,21 @@ void SinglePhaseStatistics::computeRegionStatistics( MeshLevel & mesh, arrayView1d< real64 const > const refPorosity = solid.getReferencePorosity(); arrayView2d< real64 const > const porosity = solid.getPorosity(); + string const & fluidName = subRegion.template getReference< string >( FlowSolverBase::viewKeyStruct::fluidNamesString() ); + SingleFluidBase const & fluid = constitutiveModels.getGroup< SingleFluidBase >( fluidName ); + arrayView2d< real64 const > const densities = fluid.density(); + real64 subRegionAvgPresNumerator = 0.0; real64 subRegionMinPres = 0.0; real64 subRegionMaxPres = 0.0; real64 subRegionMinDeltaPres = 0.0; real64 subRegionMaxDeltaPres = 0.0; + real64 subRegionAvgTempNumerator = 0.0; + real64 subRegionMinTemp = 0.0; + real64 subRegionMaxTemp = 0.0; real64 subRegionTotalUncompactedPoreVol = 0.0; real64 subRegionTotalPoreVol = 0.0; + real64 subRegionTotalMass = 0.0; singlePhaseBaseKernels::StatisticsKernel:: launch( subRegion.size(), @@ -132,15 +159,21 @@ void SinglePhaseStatistics::computeRegionStatistics( MeshLevel & mesh, volume, pres, deltaPres, + temp, refPorosity, porosity, + densities, subRegionMinPres, subRegionAvgPresNumerator, subRegionMaxPres, subRegionMinDeltaPres, subRegionMaxDeltaPres, + subRegionMinTemp, + subRegionAvgTempNumerator, + subRegionMaxTemp, subRegionTotalUncompactedPoreVol, - subRegionTotalPoreVol ); + subRegionTotalPoreVol, + subRegionTotalMass ); ElementRegionBase & region = elemManager.getRegion( subRegion.getParent().getParent().getName() ); RegionStatistics & regionStatistics = region.getReference< RegionStatistics >( viewKeyStruct::regionStatisticsString() ); @@ -164,9 +197,19 @@ void SinglePhaseStatistics::computeRegionStatistics( MeshLevel & mesh, regionStatistics.maxDeltaPressure = subRegionMaxDeltaPres; } + regionStatistics.averageTemperature += subRegionAvgTempNumerator; + if( subRegionMinTemp < regionStatistics.minTemperature ) + { + regionStatistics.minTemperature = subRegionMinTemp; + } + if( subRegionMaxTemp > regionStatistics.maxTemperature ) + { + regionStatistics.maxTemperature = subRegionMaxTemp; + } regionStatistics.totalUncompactedPoreVolume += subRegionTotalUncompactedPoreVol; regionStatistics.totalPoreVolume += subRegionTotalPoreVol; + regionStatistics.totalMass += subRegionTotalMass; } ); // Step 3: synchronize the results over the MPI ranks @@ -176,23 +219,53 @@ void SinglePhaseStatistics::computeRegionStatistics( MeshLevel & mesh, RegionStatistics & regionStatistics = region.getReference< RegionStatistics >( viewKeyStruct::regionStatisticsString() ); regionStatistics.minPressure = MpiWrapper::min( regionStatistics.minPressure ); + regionStatistics.averagePressure = MpiWrapper::sum( regionStatistics.averagePressure ); regionStatistics.maxPressure = MpiWrapper::max( regionStatistics.maxPressure ); + regionStatistics.minDeltaPressure = MpiWrapper::min( regionStatistics.minDeltaPressure ); regionStatistics.maxDeltaPressure = MpiWrapper::max( regionStatistics.maxDeltaPressure ); + + regionStatistics.minTemperature = MpiWrapper::min( regionStatistics.minTemperature ); + regionStatistics.averageTemperature = MpiWrapper::sum( regionStatistics.averageTemperature ); + regionStatistics.maxTemperature = MpiWrapper::max( regionStatistics.maxTemperature ); + regionStatistics.totalUncompactedPoreVolume = MpiWrapper::sum( regionStatistics.totalUncompactedPoreVolume ); regionStatistics.totalPoreVolume = MpiWrapper::sum( regionStatistics.totalPoreVolume ); - regionStatistics.averagePressure = MpiWrapper::sum( regionStatistics.averagePressure ); - regionStatistics.averagePressure /= regionStatistics.totalUncompactedPoreVolume; + regionStatistics.totalMass = MpiWrapper::sum( regionStatistics.totalMass ); - GEOS_LOG_LEVEL_RANK_0( 1, getName() << ", " << regionNames[i] - << ": Pressure (min, average, max): " - << regionStatistics.minPressure << ", " << regionStatistics.averagePressure << ", " << regionStatistics.maxPressure << " Pa" ); - GEOS_LOG_LEVEL_RANK_0( 1, getName() << ", " << regionNames[i] - << ": Delta pressure (min, max): " - << regionStatistics.minDeltaPressure << ", " << regionStatistics.maxDeltaPressure << " Pa" ); - GEOS_LOG_LEVEL_RANK_0( 1, getName() << ", " << regionNames[i] - << ": Total dynamic pore volume: " << regionStatistics.totalPoreVolume << " rm^3" ); + if( regionStatistics.totalUncompactedPoreVolume > 0 ) + { + float invTotalUncompactedPoreVolume = 1.0 / regionStatistics.totalUncompactedPoreVolume; + regionStatistics.averagePressure *= invTotalUncompactedPoreVolume; + regionStatistics.averageTemperature *= invTotalUncompactedPoreVolume; + } + else + { + regionStatistics.averagePressure = 0.0; + regionStatistics.averageTemperature = 0.0; + GEOS_WARNING( GEOS_FMT( "{}, {}: Cannot compute average pressure & temperature because region pore volume is zero.", getName(), regionNames[i] ) ); + } + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{}, {} (time {} s): Pressure (min, average, max): {}, {}, {} Pa", + getName(), regionNames[i], time, regionStatistics.minPressure, regionStatistics.averagePressure, regionStatistics.maxPressure ) ); + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{}, {} (time {} s): Delta pressure (min, max): {}, {} Pa", + getName(), regionNames[i], time, regionStatistics.minDeltaPressure, regionStatistics.maxDeltaPressure ) ); + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{}, {} (time {} s): Temperature (min, average, max): {}, {}, {} K", + getName(), regionNames[i], time, regionStatistics.minTemperature, regionStatistics.averageTemperature, regionStatistics.maxTemperature ) ); + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{}, {} (time {} s): Total dynamic pore volume: {} rm^3", + getName(), regionNames[i], time, regionStatistics.totalPoreVolume ) ); + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{}, {} (time {} s): Total fluid mass: {} kg", + getName(), regionNames[i], time, regionStatistics.totalMass ) ); + + if( m_writeCSV > 0 && MpiWrapper::commRank() == 0 ) + { + std::ofstream outputFile( m_outputDir + "/" + regionNames[i] + ".csv", std::ios_base::app ); + outputFile << time << "," << regionStatistics.minPressure << "," << regionStatistics.averagePressure << "," << regionStatistics.maxPressure << "," << + regionStatistics.minDeltaPressure << "," << regionStatistics.maxDeltaPressure << "," << + regionStatistics.minTemperature << "," << regionStatistics.averageTemperature << "," << regionStatistics.maxTemperature << "," << + regionStatistics.totalPoreVolume << "," << regionStatistics.totalMass << std::endl; + outputFile.close(); + } } } diff --git a/src/coreComponents/physicsSolvers/fluidFlow/SinglePhaseStatistics.hpp b/src/coreComponents/physicsSolvers/fluidFlow/SinglePhaseStatistics.hpp index 04c0bc28ff7..bfd1360fd92 100644 --- a/src/coreComponents/physicsSolvers/fluidFlow/SinglePhaseStatistics.hpp +++ b/src/coreComponents/physicsSolvers/fluidFlow/SinglePhaseStatistics.hpp @@ -89,6 +89,16 @@ class SinglePhaseStatistics : public FieldStatisticsBase< SinglePhaseBase > /// maximum region delta pressure real64 maxDeltaPressure; + // fluid mass + real64 totalMass; + + /// average region temperature + real64 averageTemperature; + /// minimum region temperature + real64 minTemperature; + /// maximum region temperature + real64 maxTemperature; + /// total region pore volume real64 totalPoreVolume; /// total region uncompacted pore volume @@ -100,7 +110,8 @@ class SinglePhaseStatistics : public FieldStatisticsBase< SinglePhaseBase > * @param[in] mesh the mesh level object * @param[in] regionNames the array of target region names */ - void computeRegionStatistics( MeshLevel & mesh, + void computeRegionStatistics( real64 const time, + MeshLevel & mesh, arrayView1d< string const > const & regionNames ) const; diff --git a/src/coreComponents/physicsSolvers/fluidFlow/wells/CompositionalMultiphaseWell.cpp b/src/coreComponents/physicsSolvers/fluidFlow/wells/CompositionalMultiphaseWell.cpp index 9134dddb42c..6620d93c277 100644 --- a/src/coreComponents/physicsSolvers/fluidFlow/wells/CompositionalMultiphaseWell.cpp +++ b/src/coreComponents/physicsSolvers/fluidFlow/wells/CompositionalMultiphaseWell.cpp @@ -251,7 +251,7 @@ void CompositionalMultiphaseWell::registerDataOnMesh( Group & meshBodies ) // write rates output header // the rank that owns the reference well element is responsible - if( getLogLevel() > 0 && subRegion.isLocallyOwned() ) + if( m_writeCSV > 0 && subRegion.isLocallyOwned() ) { string const wellControlsName = wellControls.getName(); string const massUnit = m_useMass ? "kg" : "mol"; @@ -261,9 +261,9 @@ void CompositionalMultiphaseWell::registerDataOnMesh( Group & meshBodies ) integer const numPhase = m_numPhases; // format: time,bhp,total_rate,total_vol_rate,phase0_vol_rate,phase1_vol_rate,... std::ofstream outputFile( m_ratesOutputDir + "/" + wellControlsName + ".csv" ); - outputFile << "time [s],bhp [Pa],total rate [" << massUnit << "/s],total " << conditionKey << " volumetric rate [" << unitKey << "m3/s]"; + outputFile << "Time [s],BHP [Pa],Total rate [" << massUnit << "/s],Total " << conditionKey << " Volumetric rate [" << unitKey << "m3/s]"; for( integer ip = 0; ip < numPhase; ++ip ) - outputFile << ",phase" << ip << " " << conditionKey << " volumetric rate [" << unitKey << "m3/s]"; + outputFile << ",Phase" << ip << " " << conditionKey << " volumetric rate [" << unitKey << "m3/s]"; outputFile << std::endl; outputFile.close(); } @@ -543,12 +543,12 @@ void CompositionalMultiphaseWell::initializePostInitialConditionsPreSubGroups() } ); } -void CompositionalMultiphaseWell::updateComponentFraction( WellElementSubRegion & subRegion ) const +void CompositionalMultiphaseWell::updateGlobalComponentFraction( WellElementSubRegion & subRegion ) const { GEOS_MARK_FUNCTION; isothermalCompositionalMultiphaseBaseKernels:: - ComponentFractionKernelFactory:: + GlobalComponentFractionKernelFactory:: createAndLaunch< parallelDevicePolicy<> >( m_numComponents, subRegion ); @@ -886,7 +886,7 @@ void CompositionalMultiphaseWell::updateTotalMassDensity( WellElementSubRegion & void CompositionalMultiphaseWell::updateSubRegionState( WellElementSubRegion & subRegion ) { // update properties - updateComponentFraction( subRegion ); + updateGlobalComponentFraction( subRegion ); // update volumetric rates for the well constraints // note: this must be called before updateFluidModel @@ -1874,18 +1874,25 @@ void CompositionalMultiphaseWell::printRates( real64 const & time_n, string const wellControlsName = wellControls.getName(); // format: time,total_rate,total_vol_rate,phase0_vol_rate,phase1_vol_rate,... - std::ofstream outputFile( m_ratesOutputDir + "/" + wellControlsName + ".csv", std::ios_base::app ); - - outputFile << time_n + dt; + std::ofstream outputFile; + if( m_writeCSV > 0 ) + { + outputFile.open( m_ratesOutputDir + "/" + wellControlsName + ".csv", std::ios_base::app ); + outputFile << time_n + dt; + } if( !wellControls.isWellOpen( time_n + dt ) ) { GEOS_LOG( GEOS_FMT( "{}: well is shut", wellControlsName ) ); - // print all zeros in the rates file - outputFile << ",0.0,0.0,0.0"; - for( integer ip = 0; ip < numPhase; ++ip ) - outputFile << ",0.0"; - outputFile << std::endl; + if( outputFile.is_open()) + { + // print all zeros in the rates file + outputFile << ",0.0,0.0,0.0"; + for( integer ip = 0; ip < numPhase; ++ip ) + outputFile << ",0.0"; + outputFile << std::endl; + outputFile.close(); + } return; } @@ -1924,19 +1931,21 @@ void CompositionalMultiphaseWell::printRates( real64 const & time_n, real64 const currentTotalRate = connRate[iwelemRef]; GEOS_LOG( GEOS_FMT( "{}: BHP (at the specified reference elevation): {} Pa", wellControlsName, currentBHP ) ); - outputFile << "," << currentBHP; GEOS_LOG( GEOS_FMT( "{}: Total rate: {} {}/s; total {} volumetric rate: {} {}m3/s", wellControlsName, currentTotalRate, massUnit, conditionKey, currentTotalVolRate, unitKey ) ); - outputFile << "," << currentTotalRate << "," << currentTotalVolRate; for( integer ip = 0; ip < numPhase; ++ip ) - { GEOS_LOG( GEOS_FMT( "{}: Phase {} {} volumetric rate: {} {}m3/s", wellControlsName, ip, conditionKey, currentPhaseVolRate[ip], unitKey ) ); - outputFile << "," << currentPhaseVolRate[ip]; + if( outputFile.is_open()) + { + outputFile << "," << currentBHP; + outputFile << "," << currentTotalRate << "," << currentTotalVolRate; + for( integer ip = 0; ip < numPhase; ++ip ) + outputFile << "," << currentPhaseVolRate[ip]; + outputFile << std::endl; + outputFile.close(); } - outputFile << std::endl; } ); - outputFile.close(); } ); } ); } diff --git a/src/coreComponents/physicsSolvers/fluidFlow/wells/CompositionalMultiphaseWell.hpp b/src/coreComponents/physicsSolvers/fluidFlow/wells/CompositionalMultiphaseWell.hpp index da7f355c931..e0b95e4799b 100644 --- a/src/coreComponents/physicsSolvers/fluidFlow/wells/CompositionalMultiphaseWell.hpp +++ b/src/coreComponents/physicsSolvers/fluidFlow/wells/CompositionalMultiphaseWell.hpp @@ -133,10 +133,10 @@ class CompositionalMultiphaseWell : public WellSolverBase /**@}*/ /** - * @brief Recompute component fractions from primary variables (component densities) + * @brief Recompute global component fractions from primary variables (component densities) * @param subRegion the well subregion containing all the primary and dependent fields */ - void updateComponentFraction( WellElementSubRegion & subRegion ) const; + void updateGlobalComponentFraction( WellElementSubRegion & subRegion ) const; /** * @brief Recompute the volumetric rates that are used in the well constraints diff --git a/src/coreComponents/physicsSolvers/fluidFlow/wells/SinglePhaseWell.cpp b/src/coreComponents/physicsSolvers/fluidFlow/wells/SinglePhaseWell.cpp index 6acb1125406..a1f1777c5e3 100644 --- a/src/coreComponents/physicsSolvers/fluidFlow/wells/SinglePhaseWell.cpp +++ b/src/coreComponents/physicsSolvers/fluidFlow/wells/SinglePhaseWell.cpp @@ -90,7 +90,7 @@ void SinglePhaseWell::registerDataOnMesh( Group & meshBodies ) wellControls.registerWrapper< real64 >( viewKeyStruct::dCurrentVolRate_dRateString() ); // write rates output header - if( getLogLevel() > 0 ) + if( m_writeCSV > 0 && subRegion.isLocallyOwned()) { string const wellControlsName = wellControls.getName(); integer const useSurfaceConditions = wellControls.useSurfaceConditions(); @@ -98,8 +98,8 @@ void SinglePhaseWell::registerDataOnMesh( Group & meshBodies ) string const unitKey = useSurfaceConditions ? "s" : "r"; // format: time,bhp,total_rate,total_vol_rate std::ofstream outputFile( m_ratesOutputDir + "/" + wellControlsName + ".csv" ); - outputFile << "time [s],bhp [Pa],total rate [kg/s],total " << conditionKey << " volumetric rate ["<( viewKeyStruct::fluidNamesString() ); @@ -1049,15 +1049,22 @@ void SinglePhaseWell::printRates( real64 const & time_n, string const wellControlsName = wellControls.getName(); // format: time,total_rate,total_vol_rate - std::ofstream outputFile( m_ratesOutputDir + "/" + wellControlsName + ".csv", std::ios_base::app ); - - outputFile << time_n + dt; + std::ofstream outputFile; + if( m_writeCSV > 0 ) + { + outputFile.open( m_ratesOutputDir + "/" + wellControlsName + ".csv", std::ios_base::app ); + outputFile << time_n + dt; + } if( !wellControls.isWellOpen( time_n + dt ) ) { GEOS_LOG( GEOS_FMT( "{}: well is shut", wellControlsName ) ); - // print all zeros in the rates file - outputFile << ",0.0,0.0,0.0" << std::endl; + if( outputFile.is_open()) + { + // print all zeros in the rates file + outputFile << ",0.0,0.0,0.0" << std::endl; + outputFile.close(); + } return; } @@ -1083,10 +1090,14 @@ void SinglePhaseWell::printRates( real64 const & time_n, real64 const currentTotalRate = connRate[iwelemRef]; GEOS_LOG( GEOS_FMT( "{}: BHP (at the specified reference elevation): {} Pa", wellControlsName, currentBHP ) ); - outputFile << "," << currentBHP; GEOS_LOG( GEOS_FMT( "{}: Total rate: {} kg/s; total {} volumetric rate: {} {}m3/s", wellControlsName, currentTotalRate, conditionKey, currentTotalVolRate, unitKey ) ); - outputFile << "," << currentTotalRate << "," << currentTotalVolRate << std::endl; + if( outputFile.is_open()) + { + outputFile << "," << currentBHP; + outputFile << "," << currentTotalRate << "," << currentTotalVolRate << std::endl; + outputFile.close(); + } } ); } ); } ); diff --git a/src/coreComponents/physicsSolvers/fluidFlow/wells/WellSolverBase.cpp b/src/coreComponents/physicsSolvers/fluidFlow/wells/WellSolverBase.cpp index babfb17a4fd..5086f5f2d83 100644 --- a/src/coreComponents/physicsSolvers/fluidFlow/wells/WellSolverBase.cpp +++ b/src/coreComponents/physicsSolvers/fluidFlow/wells/WellSolverBase.cpp @@ -26,6 +26,7 @@ #include "physicsSolvers/fluidFlow/FlowSolverBaseFields.hpp" #include "physicsSolvers/fluidFlow/wells/WellControls.hpp" #include "physicsSolvers/fluidFlow/wells/WellSolverBaseFields.hpp" +#include "fileIO/Outputs/OutputBase.hpp" namespace geos { @@ -38,10 +39,15 @@ WellSolverBase::WellSolverBase( string const & name, : SolverBase( name, parent ), m_numDofPerWellElement( 0 ), m_numDofPerResElement( 0 ), - m_ratesOutputDir( name + "_rates" ) + m_ratesOutputDir( joinPath( OutputBase::getOutputDirectory(), name + "_rates" ) ) { this->getWrapper< string >( viewKeyStruct::discretizationString() ). setInputFlag( InputFlags::FALSE ); + + this->registerWrapper( viewKeyStruct::writeCSVFlagString(), &m_writeCSV ). + setApplyDefaultValue( 0 ). + setInputFlag( dataRepository::InputFlags::OPTIONAL ). + setDescription( "Write rates into a CSV file" ); } Group * WellSolverBase::createChild( string const & childKey, string const & childName ) @@ -72,7 +78,7 @@ void WellSolverBase::postProcessInput() SolverBase::postProcessInput(); // create dir for rates output - if( getLogLevel() > 0 ) + if( m_writeCSV > 0 ) { if( MpiWrapper::commRank() == 0 ) { diff --git a/src/coreComponents/physicsSolvers/fluidFlow/wells/WellSolverBase.hpp b/src/coreComponents/physicsSolvers/fluidFlow/wells/WellSolverBase.hpp index f07890a1363..6f53122b71e 100644 --- a/src/coreComponents/physicsSolvers/fluidFlow/wells/WellSolverBase.hpp +++ b/src/coreComponents/physicsSolvers/fluidFlow/wells/WellSolverBase.hpp @@ -273,6 +273,7 @@ class WellSolverBase : public SolverBase struct viewKeyStruct : SolverBase::viewKeyStruct { static constexpr char const * fluidNamesString() { return "fluidNames"; } + static constexpr char const * writeCSVFlagString() { return "writeCSV"; } }; private: @@ -311,6 +312,7 @@ class WellSolverBase : public SolverBase /// the number of Degrees of Freedom per reservoir element integer m_numDofPerResElement; + integer m_writeCSV; string const m_ratesOutputDir; }; diff --git a/src/coreComponents/physicsSolvers/multiphysics/CoupledSolver.hpp b/src/coreComponents/physicsSolvers/multiphysics/CoupledSolver.hpp index 4c8b254ee0c..ad49c26ea2c 100644 --- a/src/coreComponents/physicsSolvers/multiphysics/CoupledSolver.hpp +++ b/src/coreComponents/physicsSolvers/multiphysics/CoupledSolver.hpp @@ -318,6 +318,18 @@ class CoupledSolver : public SolverBase return nextDt; } + virtual real64 setNextDtBasedOnNewtonIter( real64 const & currentDt ) override + { + real64 nextDt = SolverBase::setNextDtBasedOnNewtonIter( currentDt ); + forEachArgInTuple( m_solvers, [&]( auto & solver, auto ) + { + real64 const singlePhysicsNextDt = + solver->setNextDtBasedOnNewtonIter( currentDt ); + nextDt = LvArray::math::min( singlePhysicsNextDt, nextDt ); + } ); + return nextDt; + } + virtual void cleanup( real64 const time_n, integer const cycleNumber, integer const eventCounter, @@ -369,10 +381,6 @@ class CoupledSolver : public SolverBase { GEOS_MARK_FUNCTION; - real64 dtReturn = dt; - - real64 dtReturnTemporary; - Timestamp const meshModificationTimestamp = getMeshModificationTimestamp( domain ); // First call Coupled Solver setup (important for poromechanics initialization for sequentially coupled) @@ -397,69 +405,120 @@ class CoupledSolver : public SolverBase } ); NonlinearSolverParameters & solverParams = getNonlinearSolverParameters(); - integer & iter = solverParams.m_numNewtonIterations; - iter = 0; + integer const maxNumberDtCuts = solverParams.m_maxTimeStepCuts; + real64 const dtCutFactor = solverParams.m_timeStepCutFactor; + integer & dtAttempt = solverParams.m_numTimeStepAttempts; + bool isConverged = false; - /// Sequential coupling loop - while( iter < solverParams.m_maxIterNewton ) + // dt may be cut during the course of this step, so we are keeping a local + // value to track the achieved dt for this step. + real64 stepDt = dt; + + // outer loop attempts to apply full timestep, and managed the cutting of the timestep if + // required. + for( dtAttempt = 0; dtAttempt < maxNumberDtCuts; ++dtAttempt ) { - if( iter == 0 ) - { - // Reset the states of all solvers if any of them had to restart - forEachArgInTuple( m_solvers, [&]( auto & solver, auto ) - { - solver->resetStateToBeginningOfStep( domain ); - solver->getSolverStatistics().initializeTimeStepStatistics(); // initialize counters for subsolvers - } ); - resetStateToBeginningOfStep( domain ); - } + // TODO configuration loop - // Increment the solver statistics for reporting purposes - // Pass a "0" as argument (0 linear iteration) to skip the output of linear iteration stats at the end - m_solverStatistics.logNonlinearIteration( 0 ); + // Reset the states of all solvers if any of them had to restart + forEachArgInTuple( m_solvers, [&]( auto & solver, auto ) + { + solver->resetStateToBeginningOfStep( domain ); + solver->getSolverStatistics().initializeTimeStepStatistics(); // initialize counters for subsolvers + } ); + resetStateToBeginningOfStep( domain ); - // Solve the subproblems nonlinearly - forEachArgInTuple( m_solvers, [&]( auto & solver, auto idx ) + integer & iter = solverParams.m_numNewtonIterations; + iter = 0; + /// Sequential coupling loop + while( iter < solverParams.m_maxIterNewton ) { - GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( " Iteration {:2}: {}", iter+1, solver->getName() ) ); - dtReturnTemporary = solver->nonlinearImplicitStep( time_n, - dtReturn, + // Increment the solver statistics for reporting purposes + // Pass a "0" as argument (0 linear iteration) to skip the output of linear iteration stats at the end + m_solverStatistics.logNonlinearIteration( 0 ); + + startSequentialIteration( iter, domain ); + + // Solve the subproblems nonlinearly + forEachArgInTuple( m_solvers, [&]( auto & solver, auto idx ) + { + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( " Iteration {:2}: {}", iter + 1, solver->getName() ) ); + real64 solverDt = solver->nonlinearImplicitStep( time_n, + stepDt, cycleNumber, domain ); - mapSolutionBetweenSolvers( domain, idx() ); + mapSolutionBetweenSolvers( domain, idx() ); - if( dtReturnTemporary < dtReturn ) + if( solverDt < stepDt ) // subsolver had to cut the time step + { + iter = 0; // restart outer loop + stepDt = solverDt; // sync time step + } + } ); + + // Check convergence of the outer loop + isConverged = checkSequentialConvergence( iter, + time_n, + stepDt, + domain ); + + if( isConverged ) + { + // Save Time step statistics for the subsolvers + forEachArgInTuple( m_solvers, [&]( auto & solver, + auto ) + { + solver->getSolverStatistics().saveTimeStepStatistics(); + } ); + break; + } + else { - iter = 0; - dtReturn = dtReturnTemporary; + finishSequentialIteration( iter, domain ); } - } ); - // Check convergence of the outer loop - isConverged = checkSequentialConvergence( iter, - time_n, - dtReturn, - domain ); + ++iter; + } if( isConverged ) { - // Save Time step statistics for the subsolvers - forEachArgInTuple( m_solvers, [&]( auto & solver, auto ) + // get out of time loop + break; + } + else + { + // cut timestep, go back to beginning of step and restart the Newton loop + stepDt *= dtCutFactor; + GEOS_LOG_LEVEL_RANK_0 ( 1, GEOS_FMT( "New dt = {}", stepDt ) ); + + // notify the solver statistics counter that this is a time step cut + m_solverStatistics.logTimeStepCut(); + forEachArgInTuple( m_solvers, [&]( auto & solver, + auto ) { - solver->getSolverStatistics().saveTimeStepStatistics(); + solver->getSolverStatistics().logTimeStepCut(); } ); - break; } - // Add convergence check: - ++iter; } - GEOS_ERROR_IF( !isConverged, getDataContext() << ": sequentiallyCoupledSolverStep did not converge!" ); + if( !isConverged ) + { + GEOS_LOG_RANK_0( "Convergence not achieved." ); - implicitStepComplete( time_n, dt, domain ); + if( m_nonlinearSolverParameters.m_allowNonConverged > 0 ) + { + GEOS_LOG_RANK_0( "The accepted solution may be inaccurate." ); + } + else + { + GEOS_ERROR( "Nonconverged solutions not allowed. Terminating..." ); + } + } - return dtReturn; + implicitStepComplete( time_n, stepDt, domain ); + + return stepDt; } /** @@ -570,6 +629,18 @@ class CoupledSolver : public SolverBase NonlinearSolverParameters::viewKeysStruct::lineSearchActionString(), EnumStrings< NonlinearSolverParameters::LineSearchAction >::toString( NonlinearSolverParameters::LineSearchAction::None ) ), InputError ); + + if( m_nonlinearSolverParameters.m_nonlinearAccelerationType != NonlinearSolverParameters::NonlinearAccelerationType::None ) + validateNonlinearAcceleration(); + } + + virtual void validateNonlinearAcceleration() + { + GEOS_THROW ( GEOS_FMT( "{}: Nonlinear acceleration {} is not supported by {} solver '{}'", + getWrapperDataContext( NonlinearSolverParameters::viewKeysStruct::nonlinearAccelerationTypeString() ), + EnumStrings< NonlinearSolverParameters::NonlinearAccelerationType >::toString( m_nonlinearSolverParameters.m_nonlinearAccelerationType ), + getCatalogName(), getName()), + InputError ); } void @@ -581,6 +652,18 @@ class CoupledSolver : public SolverBase } ); } + virtual void startSequentialIteration( integer const & iter, + DomainPartition & domain ) + { + GEOS_UNUSED_VAR( iter, domain ); + } + + virtual void finishSequentialIteration( integer const & iter, + DomainPartition & domain ) + { + GEOS_UNUSED_VAR( iter, domain ); + } + protected: /// Pointers of the single-physics solvers diff --git a/src/coreComponents/physicsSolvers/multiphysics/MultiphasePoromechanics.cpp b/src/coreComponents/physicsSolvers/multiphysics/MultiphasePoromechanics.cpp index 29d1ebb0394..8c003debc24 100644 --- a/src/coreComponents/physicsSolvers/multiphysics/MultiphasePoromechanics.cpp +++ b/src/coreComponents/physicsSolvers/multiphysics/MultiphasePoromechanics.cpp @@ -22,9 +22,9 @@ #include "constitutive/fluid/multifluid/MultiFluidBase.hpp" #include "constitutive/solid/PorousSolid.hpp" -#include "mesh/utilities/AverageOverQuadraturePointsKernel.hpp" #include "physicsSolvers/fluidFlow/CompositionalMultiphaseBase.hpp" #include "physicsSolvers/fluidFlow/FlowSolverBaseFields.hpp" +#include "physicsSolvers/multiphysics/CompositionalMultiphaseReservoirAndWells.hpp" #include "physicsSolvers/multiphysics/poromechanicsKernels/MultiphasePoromechanics.hpp" #include "physicsSolvers/multiphysics/poromechanicsKernels/ThermalMultiphasePoromechanics.hpp" #include "physicsSolvers/solidMechanics/SolidMechanicsFields.hpp" @@ -71,8 +71,7 @@ catalogName() template< typename FLOW_SOLVER > MultiphasePoromechanics< FLOW_SOLVER >::MultiphasePoromechanics( const string & name, Group * const parent ) - : Base( name, parent ), - m_isThermal( 0 ) + : Base( name, parent ) { this->registerWrapper( viewKeyStruct::stabilizationTypeString(), &m_stabilizationType ). setInputFlag( InputFlags::OPTIONAL ). @@ -91,16 +90,6 @@ MultiphasePoromechanics< FLOW_SOLVER >::MultiphasePoromechanics( const string & setInputFlag( InputFlags::OPTIONAL ). setDescription( "Constant multiplier of stabilization strength." ); - this->registerWrapper( viewKeyStruct::isThermalString(), &m_isThermal ). - setApplyDefaultValue( 0 ). - setInputFlag( InputFlags::OPTIONAL ). - setDescription( "Flag indicating whether the problem is thermal or not. Set isThermal=\"1\" to enable the thermal coupling" ); - - this->registerWrapper( viewKeyStruct::performStressInitializationString(), &m_performStressInitialization ). - setApplyDefaultValue( false ). - setInputFlag( InputFlags::FALSE ). - setDescription( "Flag to indicate that the solver is going to perform stress initialization" ); - LinearSolverParameters & linearSolverParameters = this->m_linearSolverParameters.get(); linearSolverParameters.mgr.strategy = LinearSolverParameters::MGR::StrategyType::multiphasePoromechanics; linearSolverParameters.mgr.separateComponents = true; @@ -113,7 +102,7 @@ void MultiphasePoromechanics< FLOW_SOLVER >::postProcessInput() { Base::postProcessInput(); - GEOS_ERROR_IF( flowSolver()->catalogName() == "CompositionalMultiphaseReservoir" && + GEOS_ERROR_IF( this->flowSolver()->catalogName() == "CompositionalMultiphaseReservoir" && this->getNonlinearSolverParameters().couplingType() != NonlinearSolverParameters::CouplingType::Sequential, GEOS_FMT( "{}: {} solver is only designed to work for {} = {}", this->getDataContext(), catalogName(), NonlinearSolverParameters::viewKeysStruct::couplingTypeString(), @@ -124,46 +113,26 @@ void MultiphasePoromechanics< FLOW_SOLVER >::postProcessInput() template< typename FLOW_SOLVER > void MultiphasePoromechanics< FLOW_SOLVER >::registerDataOnMesh( Group & meshBodies ) { - SolverBase::registerDataOnMesh( meshBodies ); - - if( this->getNonlinearSolverParameters().m_couplingType == NonlinearSolverParameters::CouplingType::Sequential ) - { - // to let the solid mechanics solver that there is a pressure and temperature RHS in the mechanics solve - solidMechanicsSolver()->enableFixedStressPoromechanicsUpdate(); - // to let the flow solver that saving pressure_k and temperature_k is necessary (for the fixed-stress porosity terms) - flowSolver()->enableFixedStressPoromechanicsUpdate(); - } + Base::registerDataOnMesh( meshBodies ); - this->template forDiscretizationOnMeshTargets( meshBodies, [&] ( string const &, - MeshLevel & mesh, - arrayView1d< string const > const & regionNames ) + if( m_stabilizationType == StabilizationType::Global || + m_stabilizationType == StabilizationType::Local ) { - ElementRegionManager & elemManager = mesh.getElemManager(); - - elemManager.forElementSubRegions< ElementSubRegionBase >( regionNames, - [&]( localIndex const, - ElementSubRegionBase & subRegion ) + this->template forDiscretizationOnMeshTargets( meshBodies, [&] ( string const &, + MeshLevel & mesh, + arrayView1d< string const > const & regionNames ) { - subRegion.registerWrapper< string >( viewKeyStruct::porousMaterialNamesString() ). - setPlotLevel( PlotLevel::NOPLOT ). - setRestartFlags( RestartFlags::NO_WRITE ). - setSizedFromParent( 0 ); + ElementRegionManager & elemManager = mesh.getElemManager(); - if( m_stabilizationType == StabilizationType::Global || - m_stabilizationType == StabilizationType::Local ) + elemManager.forElementSubRegions< ElementSubRegionBase >( regionNames, + [&]( localIndex const, + ElementSubRegionBase & subRegion ) { subRegion.registerField< fields::flow::macroElementIndex >( this->getName() ); subRegion.registerField< fields::flow::elementStabConstant >( this->getName() ); - } - - if( this->getNonlinearSolverParameters().m_couplingType == NonlinearSolverParameters::CouplingType::Sequential ) - { - // register the bulk density for use in the solid mechanics solver - // ideally we would resize it here as well, but the solid model name is not available yet (see below) - subRegion.registerField< fields::poromechanics::bulkDensity >( this->getName() ); - } + } ); } ); - } ); + } } template< typename FLOW_SOLVER > @@ -175,20 +144,6 @@ void MultiphasePoromechanics< FLOW_SOLVER >::setupCoupling( DomainPartition cons DofManager::Connector::Elem ); } -template< typename FLOW_SOLVER > -void MultiphasePoromechanics< FLOW_SOLVER >::setupDofs( DomainPartition const & domain, - DofManager & dofManager ) const -{ - // note that the order of operations matters a lot here (for instance for the MGR labels) - // we must set up dofs for solid mechanics first, and then for flow - // that's the reason why this function is here and not in CoupledSolvers.hpp - solidMechanicsSolver()->setupDofs( domain, dofManager ); - flowSolver()->setupDofs( domain, dofManager ); - - setupCoupling( domain, dofManager ); -} - - template< typename FLOW_SOLVER > void MultiphasePoromechanics< FLOW_SOLVER >::assembleSystem( real64 const GEOS_UNUSED_PARAM( time ), real64 const dt, @@ -214,7 +169,7 @@ void MultiphasePoromechanics< FLOW_SOLVER >::assembleSystem( real64 const GEOS_U string const flowDofKey = dofManager.getKey( CompositionalMultiphaseBase::viewKeyStruct::elemDofFieldString() ); - if( m_isThermal ) + if( this->m_isThermal ) { poromechanicsMaxForce = assemblyLaunch< constitutive::PorousSolid< ElasticIsotropic >, // TODO: change once there is a cmake solution @@ -226,9 +181,9 @@ void MultiphasePoromechanics< FLOW_SOLVER >::assembleSystem( real64 const GEOS_U localRhs, dt, flowDofKey, - flowSolver()->numFluidComponents(), - flowSolver()->numFluidPhases(), - flowSolver()->useTotalMassEquation(), + this->flowSolver()->numFluidComponents(), + this->flowSolver()->numFluidPhases(), + this->flowSolver()->useTotalMassEquation(), FlowSolverBase::viewKeyStruct::fluidNamesString() ); } else @@ -243,18 +198,18 @@ void MultiphasePoromechanics< FLOW_SOLVER >::assembleSystem( real64 const GEOS_U localRhs, dt, flowDofKey, - flowSolver()->numFluidComponents(), - flowSolver()->numFluidPhases(), - flowSolver()->useTotalMassEquation(), + this->flowSolver()->numFluidComponents(), + this->flowSolver()->numFluidPhases(), + this->flowSolver()->useTotalMassEquation(), FlowSolverBase::viewKeyStruct::fluidNamesString() ); } } ); // step 2: apply mechanics solver on its target regions not included in the poromechanics solver target regions - solidMechanicsSolver()->forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, - MeshLevel & mesh, - arrayView1d< string const > const & regionNames ) + this->solidMechanicsSolver()->forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, + MeshLevel & mesh, + arrayView1d< string const > const & regionNames ) { // collect the target region of the mechanics solver not included in the poromechanics target regions @@ -288,7 +243,7 @@ void MultiphasePoromechanics< FLOW_SOLVER >::assembleSystem( real64 const GEOS_U } ); - solidMechanicsSolver()->getMaxForce() = LvArray::math::max( mechanicsMaxForce, poromechanicsMaxForce ); + this->solidMechanicsSolver()->getMaxForce() = LvArray::math::max( mechanicsMaxForce, poromechanicsMaxForce ); // step 3: compute the fluxes (face-based contributions) @@ -296,19 +251,19 @@ void MultiphasePoromechanics< FLOW_SOLVER >::assembleSystem( real64 const GEOS_U m_stabilizationType == StabilizationType::Local ) { updateStabilizationParameters( domain ); - flowSolver()->assembleStabilizedFluxTerms( dt, - domain, - dofManager, - localMatrix, - localRhs ); + this->flowSolver()->assembleStabilizedFluxTerms( dt, + domain, + dofManager, + localMatrix, + localRhs ); } else { - flowSolver()->assembleFluxTerms( dt, - domain, - dofManager, - localMatrix, - localRhs ); + this->flowSolver()->assembleFluxTerms( dt, + domain, + dofManager, + localMatrix, + localRhs ); } } @@ -325,54 +280,53 @@ void MultiphasePoromechanics< FLOW_SOLVER >::updateState( DomainPartition & doma [&]( localIndex const, CellElementSubRegion & subRegion ) { - real64 const deltaPhaseVolFrac = flowSolver()->updateFluidState( subRegion ); + real64 const deltaPhaseVolFrac = this->flowSolver()->updateFluidState( subRegion ); maxDeltaPhaseVolFrac = LvArray::math::max( maxDeltaPhaseVolFrac, deltaPhaseVolFrac ); - if( m_isThermal ) + if( this->m_isThermal ) { - flowSolver()->updateSolidInternalEnergyModel( subRegion ); + this->flowSolver()->updateSolidInternalEnergyModel( subRegion ); } } ); } ); - GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( " {}: Max phase volume fraction change: {}", - this->getName(), GEOS_FMT( "{:.{}f}", maxDeltaPhaseVolFrac, 2 ) ) ); + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( " {}: Max phase volume fraction change = {}", + this->getName(), GEOS_FMT( "{:.{}f}", maxDeltaPhaseVolFrac, 4 ) ) ); } template< typename FLOW_SOLVER > void MultiphasePoromechanics< FLOW_SOLVER >::initializePostInitialConditionsPreSubGroups() { - SolverBase::initializePostInitialConditionsPreSubGroups(); + Base::initializePostInitialConditionsPreSubGroups(); arrayView1d< string const > const & poromechanicsTargetRegionNames = this->template getReference< array1d< string > >( SolverBase::viewKeyStruct::targetRegionsString() ); -// arrayView1d< string const > const & solidMechanicsTargetRegionNames = -// solidMechanicsSolver()->template getReference< array1d< string > >( SolverBase::viewKeyStruct::targetRegionsString() ); + arrayView1d< string const > const & solidMechanicsTargetRegionNames = + this->solidMechanicsSolver()->template getReference< array1d< string > >( SolverBase::viewKeyStruct::targetRegionsString() ); arrayView1d< string const > const & flowTargetRegionNames = - flowSolver()->template getReference< array1d< string > >( SolverBase::viewKeyStruct::targetRegionsString() ); + this->flowSolver()->template getReference< array1d< string > >( SolverBase::viewKeyStruct::targetRegionsString() ); for( integer i = 0; i < poromechanicsTargetRegionNames.size(); ++i ) { -// Pavel: disabled to avoid false triggering for well regions -// GEOS_THROW_IF( std::find( solidMechanicsTargetRegionNames.begin(), solidMechanicsTargetRegionNames.end(), -// poromechanicsTargetRegionNames[i] ) -// == solidMechanicsTargetRegionNames.end(), -// GEOS_FMT( "{} {}: region {} must be a target region of {}", -// getCatalogName(), getDataContext(), poromechanicsTargetRegionNames[i], -// solidMechanicsSolver()->getDataContext() ), -// InputError ); + GEOS_THROW_IF( std::find( solidMechanicsTargetRegionNames.begin(), solidMechanicsTargetRegionNames.end(), + poromechanicsTargetRegionNames[i] ) + == solidMechanicsTargetRegionNames.end(), + GEOS_FMT( "{} {}: region {} must be a target region of {}", + getCatalogName(), this->getDataContext(), poromechanicsTargetRegionNames[i], + this->solidMechanicsSolver()->getDataContext() ), + InputError ); GEOS_THROW_IF( std::find( flowTargetRegionNames.begin(), flowTargetRegionNames.end(), poromechanicsTargetRegionNames[i] ) == flowTargetRegionNames.end(), GEOS_FMT( "{} {}: region `{}` must be a target region of `{}`", - getCatalogName(), this->getDataContext(), poromechanicsTargetRegionNames[i], flowSolver()->getDataContext() ), + getCatalogName(), this->getDataContext(), poromechanicsTargetRegionNames[i], this->flowSolver()->getDataContext() ), InputError ); } - integer & isFlowThermal = flowSolver()->isThermal(); - GEOS_WARNING_IF( m_isThermal && !isFlowThermal, + integer & isFlowThermal = this->flowSolver()->isThermal(); + GEOS_WARNING_IF( this->m_isThermal && !isFlowThermal, GEOS_FMT( "{} {}: The attribute `{}` of the flow solver `{}` is set to 1 since the poromechanics solver is thermal", - getCatalogName(), this->getName(), FlowSolverBase::viewKeyStruct::isThermalString(), flowSolver()->getName() ) ); - isFlowThermal = m_isThermal; + getCatalogName(), this->getName(), FlowSolverBase::viewKeyStruct::isThermalString(), this->flowSolver()->getName() ) ); + isFlowThermal = this->m_isThermal; - if( m_isThermal ) + if( this->m_isThermal ) { this->m_linearSolverParameters.get().mgr.strategy = LinearSolverParameters::MGR::StrategyType::thermalMultiphasePoromechanics; } @@ -381,50 +335,12 @@ void MultiphasePoromechanics< FLOW_SOLVER >::initializePostInitialConditionsPreS template< typename FLOW_SOLVER > void MultiphasePoromechanics< FLOW_SOLVER >::initializePreSubGroups() { - SolverBase::initializePreSubGroups(); + Base::initializePreSubGroups(); GEOS_THROW_IF( m_stabilizationType == StabilizationType::Local, this->getWrapperDataContext( viewKeyStruct::stabilizationTypeString() ) << ": Local stabilization has been disabled temporarily", InputError ); - - DomainPartition & domain = this->template getGroupByPath< DomainPartition >( "/Problem/domain" ); - - this->template forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, - MeshLevel & mesh, - arrayView1d< string const > const & regionNames ) - { - ElementRegionManager & elementRegionManager = mesh.getElemManager(); - elementRegionManager.forElementSubRegions< ElementSubRegionBase >( regionNames, - [&]( localIndex const, - ElementSubRegionBase & subRegion ) - { - // skip the wells - if( subRegion.getCatalogName() == "wellElementSubRegion" ) - return; - - string & porousName = subRegion.getReference< string >( viewKeyStruct::porousMaterialNamesString() ); - porousName = this->template getConstitutiveName< CoupledSolidBase >( subRegion ); - GEOS_ERROR_IF( porousName.empty(), GEOS_FMT( "{}: Solid model not found on subregion {}", - this->getDataContext(), subRegion.getName() ) ); - - if( subRegion.hasField< fields::poromechanics::bulkDensity >() ) - { - // get the solid model to know the number of quadrature points and resize the bulk density - CoupledSolidBase const & solid = this->template getConstitutiveModel< CoupledSolidBase >( subRegion, porousName ); - subRegion.getField< fields::poromechanics::bulkDensity >().resizeDimension< 1 >( solid.getDensity().size( 1 ) ); - } - } ); - } ); -} - -template< typename FLOW_SOLVER > -void MultiphasePoromechanics< FLOW_SOLVER >::implicitStepSetup( real64 const & time_n, - real64 const & dt, - DomainPartition & domain ) -{ - flowSolver()->keepFlowVariablesConstantDuringInitStep( m_performStressInitialization ); - Base::implicitStepSetup( time_n, dt, domain ); } template< typename FLOW_SOLVER > @@ -491,55 +407,6 @@ void MultiphasePoromechanics< FLOW_SOLVER >::updateStabilizationParameters( Doma } ); } -template< typename FLOW_SOLVER > -void MultiphasePoromechanics< FLOW_SOLVER >::mapSolutionBetweenSolvers( DomainPartition & domain, integer const solverType ) -{ - GEOS_MARK_FUNCTION; - - /// After the flow solver - if( solverType == static_cast< integer >( SolverType::Flow ) ) - { - // save pressure and temperature at the end of this iteration - flowSolver()->saveIterationState( domain ); - - this->template forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, - MeshLevel & mesh, - arrayView1d< string const > const & regionNames ) - { - - mesh.getElemManager().forElementSubRegions< CellElementSubRegion >( regionNames, [&]( localIndex const, - auto & subRegion ) - { - // update the bulk density - // TODO: ideally, we would not recompute the bulk density, but a more general "rhs" containing the body force and the - // pressure/temperature terms - updateBulkDensity( subRegion ); - } ); - } ); - } - - /// After the solid mechanics solver - if( solverType == static_cast< integer >( SolverType::SolidMechanics ) ) - { - // compute the average of the mean stress increment over quadrature points - averageMeanStressIncrement( domain ); - - this->template forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, - MeshLevel & mesh, - arrayView1d< string const > const & regionNames ) - { - - mesh.getElemManager().forElementSubRegions< CellElementSubRegion >( regionNames, [&]( localIndex const, - auto & subRegion ) - { - // update the porosity after a change in displacement (after mechanics solve) - // or a change in pressure/temperature (after a flow solve) - flowSolver()->updatePorosityAndPermeability( subRegion ); - } ); - } ); - } -} - template< typename FLOW_SOLVER > void MultiphasePoromechanics< FLOW_SOLVER >::updateBulkDensity( ElementSubRegionBase & subRegion ) { @@ -554,54 +421,12 @@ void MultiphasePoromechanics< FLOW_SOLVER >::updateBulkDensity( ElementSubRegion // update the bulk density poromechanicsKernels:: MultiphaseBulkDensityKernelFactory:: - createAndLaunch< parallelDevicePolicy<> >( flowSolver()->numFluidPhases(), + createAndLaunch< parallelDevicePolicy<> >( this->flowSolver()->numFluidPhases(), fluid, solid, subRegion ); } -template< typename FLOW_SOLVER > -void MultiphasePoromechanics< FLOW_SOLVER >::averageMeanStressIncrement( DomainPartition & domain ) -{ - this->template forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, - MeshLevel & mesh, - arrayView1d< string const > const & regionNames ) - { - mesh.getElemManager().forElementSubRegions< CellElementSubRegion >( regionNames, [&]( localIndex const, - auto & subRegion ) - { - // get the solid model (to access stress increment) - string const solidName = subRegion.template getReference< string >( viewKeyStruct::porousMaterialNamesString() ); - CoupledSolidBase & solid = this->template getConstitutiveModel< CoupledSolidBase >( subRegion, solidName ); - - arrayView2d< real64 const > const meanStressIncrement_k = solid.getMeanEffectiveStressIncrement_k(); - arrayView1d< real64 > const averageMeanStressIncrement_k = solid.getAverageMeanEffectiveStressIncrement_k(); - - finiteElement::FiniteElementBase & subRegionFE = - subRegion.template getReference< finiteElement::FiniteElementBase >( solidMechanicsSolver()->getDiscretizationName() ); - - // determine the finite element type - finiteElement::FiniteElementDispatchHandler< BASE_FE_TYPES >:: - dispatch3D( subRegionFE, [&] ( auto const finiteElement ) - { - using FE_TYPE = decltype( finiteElement ); - - // call the factory and launch the kernel - AverageOverQuadraturePoints1DKernelFactory:: - createAndLaunch< CellElementSubRegion, - FE_TYPE, - parallelDevicePolicy<> >( mesh.getNodeManager(), - mesh.getEdgeManager(), - mesh.getFaceManager(), - subRegion, - finiteElement, - meanStressIncrement_k, - averageMeanStressIncrement_k ); - } ); - } ); - } ); -} - template class MultiphasePoromechanics< CompositionalMultiphaseBase >; template class MultiphasePoromechanics< CompositionalMultiphaseReservoirAndWells< CompositionalMultiphaseBase > >; diff --git a/src/coreComponents/physicsSolvers/multiphysics/MultiphasePoromechanics.hpp b/src/coreComponents/physicsSolvers/multiphysics/MultiphasePoromechanics.hpp index 6dff80a74c4..67d14459e8f 100644 --- a/src/coreComponents/physicsSolvers/multiphysics/MultiphasePoromechanics.hpp +++ b/src/coreComponents/physicsSolvers/multiphysics/MultiphasePoromechanics.hpp @@ -19,11 +19,8 @@ #ifndef GEOS_PHYSICSSOLVERS_MULTIPHYSICS_MULTIPHASEPOROMECHANICS_HPP_ #define GEOS_PHYSICSSOLVERS_MULTIPHYSICS_MULTIPHASEPOROMECHANICS_HPP_ -#include "physicsSolvers/multiphysics/CoupledSolver.hpp" -#include "constitutive/solid/CoupledSolidBase.hpp" +#include "physicsSolvers/multiphysics/PoromechanicsSolver.hpp" #include "physicsSolvers/fluidFlow/CompositionalMultiphaseBase.hpp" -#include "physicsSolvers/multiphysics/CompositionalMultiphaseReservoirAndWells.hpp" -#include "physicsSolvers/solidMechanics/SolidMechanicsLagrangianFEM.hpp" namespace geos @@ -45,26 +42,17 @@ ENUM_STRINGS( StabilizationType, } template< typename FLOW_SOLVER > -class MultiphasePoromechanics : public CoupledSolver< FLOW_SOLVER, SolidMechanicsLagrangianFEM > +class MultiphasePoromechanics : public PoromechanicsSolver< FLOW_SOLVER > { public: - using Base = CoupledSolver< FLOW_SOLVER, SolidMechanicsLagrangianFEM >; + using Base = PoromechanicsSolver< FLOW_SOLVER >; using Base::m_solvers; using Base::m_dofManager; using Base::m_localMatrix; using Base::m_rhs; using Base::m_solution; - enum class SolverType : integer - { - Flow = 0, - SolidMechanics = 1 - }; - - /// String used to form the solverName used to register solvers in CoupledSolver - static string coupledSolverAttributePrefix() { return "poromechanics"; } - /** * @brief main constructor for MultiphasePoromechanics Objects * @param name the name of this instantiation of MultiphasePoromechanics in the repository @@ -86,24 +74,6 @@ class MultiphasePoromechanics : public CoupledSolver< FLOW_SOLVER, SolidMechanic */ string getCatalogName() const override { return catalogName(); } - /** - * @brief accessor for the pointer to the solid mechanics solver - * @return a pointer to the solid mechanics solver - */ - SolidMechanicsLagrangianFEM * solidMechanicsSolver() const - { - return std::get< toUnderlying( SolverType::SolidMechanics ) >( m_solvers ); - } - - /** - * @brief accessor for the pointer to the flow solver - * @return a pointer to the flow solver - */ - FLOW_SOLVER * flowSolver() const - { - return std::get< toUnderlying( SolverType::Flow ) >( m_solvers ); - } - /** * @defgroup Solver Interface Functions * @@ -118,13 +88,6 @@ class MultiphasePoromechanics : public CoupledSolver< FLOW_SOLVER, SolidMechanic virtual void setupCoupling( DomainPartition const & domain, DofManager & dofManager ) const override; - virtual void setupDofs( DomainPartition const & domain, - DofManager & dofManager ) const override; - - virtual void implicitStepSetup( real64 const & time_n, - real64 const & dt, - DomainPartition & domain ) override; - virtual void assembleSystem( real64 const time, real64 const dt, DomainPartition & domain, @@ -142,15 +105,6 @@ class MultiphasePoromechanics : public CoupledSolver< FLOW_SOLVER, SolidMechanic */ void updateStabilizationParameters( DomainPartition & domain ) const; - /* - * @brief Utility function to set the stress initialization flag - * @param[in] performStressInitialization true if the solver has to initialize stress, false otherwise - */ - void setStressInitialization( integer const performStressInitialization ) - { m_performStressInitialization = performStressInitialization; } - - virtual void mapSolutionBetweenSolvers( DomainPartition & domain, integer const solverType ) override final; - protected: virtual void initializePostInitialConditionsPreSubGroups() override; @@ -159,9 +113,6 @@ class MultiphasePoromechanics : public CoupledSolver< FLOW_SOLVER, SolidMechanic struct viewKeyStruct : Base::viewKeyStruct { - /// Names of the porous materials - constexpr static char const * porousMaterialNamesString() { return "porousMaterialNames"; } - /// Type of stabilization used in the simulation constexpr static char const * stabilizationTypeString() { return "stabilizationType"; } @@ -170,13 +121,6 @@ class MultiphasePoromechanics : public CoupledSolver< FLOW_SOLVER, SolidMechanic /// Multiplier on stabilization constexpr static char const * stabilizationMultiplierString() { return "stabilizationMultiplier"; } - - /// Flag to determine whether or not this is aa thermal simulation - constexpr static char const * isThermalString() { return "isThermal"; } - - /// Flag to indicate that the solver is going to perform stress initialization - constexpr static char const * performStressInitializationString() { return "performStressInitialization"; } - }; private: @@ -185,13 +129,7 @@ class MultiphasePoromechanics : public CoupledSolver< FLOW_SOLVER, SolidMechanic * @brief Helper function to recompute the bulk density * @param[in] subRegion the element subRegion */ - void updateBulkDensity( ElementSubRegionBase & subRegion ); - - /** - * @brief Helper function to average the mean stress increment over quadrature points - * @param[in] domain the domain partition - */ - void averageMeanStressIncrement( DomainPartition & domain ); + virtual void updateBulkDensity( ElementSubRegionBase & subRegion ) override; template< typename CONSTITUTIVE_BASE, typename KERNEL_WRAPPER, @@ -214,12 +152,6 @@ class MultiphasePoromechanics : public CoupledSolver< FLOW_SOLVER, SolidMechanic /// Multiplier on stabilization constant real64 m_stabilizationMultiplier; - /// flag to determine whether or not this is a thermal simulation - integer m_isThermal; - - /// Flag to indicate that the solver is going to perform stress initialization - integer m_performStressInitialization; - }; template< typename FLOW_SOLVER > @@ -257,7 +189,7 @@ real64 MultiphasePoromechanics< FLOW_SOLVER >::assemblyLaunch( MeshLevel & mesh, CONSTITUTIVE_BASE, CellElementSubRegion >( mesh, regionNames, - solidMechanicsSolver()->getDiscretizationName(), + this->solidMechanicsSolver()->getDiscretizationName(), materialNamesString, kernelWrapper ); } diff --git a/src/coreComponents/physicsSolvers/multiphysics/PoromechanicsInitialization.cpp b/src/coreComponents/physicsSolvers/multiphysics/PoromechanicsInitialization.cpp index c0c79fe0c8c..cb0e6541120 100644 --- a/src/coreComponents/physicsSolvers/multiphysics/PoromechanicsInitialization.cpp +++ b/src/coreComponents/physicsSolvers/multiphysics/PoromechanicsInitialization.cpp @@ -24,6 +24,7 @@ #include "mainInterface/ProblemManager.hpp" #include "physicsSolvers/fluidFlow/SinglePhaseBase.hpp" #include "physicsSolvers/multiphysics/SinglePhaseReservoirAndWells.hpp" +#include "physicsSolvers/multiphysics/CompositionalMultiphaseReservoirAndWells.hpp" namespace geos { diff --git a/src/coreComponents/physicsSolvers/multiphysics/PoromechanicsSolver.hpp b/src/coreComponents/physicsSolvers/multiphysics/PoromechanicsSolver.hpp new file mode 100644 index 00000000000..05eaca5fdef --- /dev/null +++ b/src/coreComponents/physicsSolvers/multiphysics/PoromechanicsSolver.hpp @@ -0,0 +1,471 @@ +/* + * ------------------------------------------------------------------------------------------------------------ + * SPDX-License-Identifier: LGPL-2.1-only + * + * Copyright (c) 2018-2020 Lawrence Livermore National Security LLC + * Copyright (c) 2018-2020 The Board of Trustees of the Leland Stanford Junior University + * Copyright (c) 2018-2020 TotalEnergies + * Copyright (c) 2019- GEOSX Contributors + * All rights reserved + * + * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details. + * ------------------------------------------------------------------------------------------------------------ + */ + +/** + * @file PoromechanicsSolver.hpp + * + */ + +#ifndef GEOS_PHYSICSSOLVERS_MULTIPHYSICS_POROMECHANICSSOLVER_HPP_ +#define GEOS_PHYSICSSOLVERS_MULTIPHYSICS_POROMECHANICSSOLVER_HPP_ + +#include "physicsSolvers/multiphysics/CoupledSolver.hpp" +#include "physicsSolvers/multiphysics/PoromechanicsFields.hpp" +#include "physicsSolvers/solidMechanics/SolidMechanicsLagrangianFEM.hpp" +#include "constitutive/solid/CoupledSolidBase.hpp" +#include "constitutive/solid/PorousSolid.hpp" +#include "mesh/utilities/AverageOverQuadraturePointsKernel.hpp" +#include "codingUtilities/Utilities.hpp" + +namespace geos +{ + +template< typename FLOW_SOLVER > +class PoromechanicsSolver : public CoupledSolver< FLOW_SOLVER, + SolidMechanicsLagrangianFEM > +{ +public: + + using Base = CoupledSolver< FLOW_SOLVER, + SolidMechanicsLagrangianFEM >; + using Base::m_solvers; + using Base::m_dofManager; + using Base::m_localMatrix; + using Base::m_rhs; + using Base::m_solution; + + enum class SolverType : integer + { + Flow = 0, + SolidMechanics = 1 + }; + + /// String used to form the solverName used to register solvers in CoupledSolver + static string coupledSolverAttributePrefix() { return "poromechanics"; } + + /** + * @brief main constructor for CoupledSolver Objects + * @param name the name of this instantiation of CoupledSolver in the repository + * @param parent the parent group of this instantiation of CoupledSolver + */ + PoromechanicsSolver( const string & name, + dataRepository::Group * const parent ) + : Base( name, parent ), + m_isThermal( 0 ) + { + this->registerWrapper( viewKeyStruct::isThermalString(), &m_isThermal ). + setApplyDefaultValue( 0 ). + setInputFlag( dataRepository::InputFlags::OPTIONAL ). + setDescription( "Flag indicating whether the problem is thermal or not. Set isThermal=\"1\" to enable the thermal coupling" ); + + this->registerWrapper( viewKeyStruct::performStressInitializationString(), &m_performStressInitialization ). + setApplyDefaultValue( false ). + setInputFlag( dataRepository::InputFlags::FALSE ). + setDescription( "Flag to indicate that the solver is going to perform stress initialization" ); + } + + virtual void initializePreSubGroups() override + { + Base::initializePreSubGroups(); + + DomainPartition & domain = this->template getGroupByPath< DomainPartition >( "/Problem/domain" ); + + this->template forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, + MeshLevel & mesh, + arrayView1d< string const > const & regionNames ) + { + ElementRegionManager & elementRegionManager = mesh.getElemManager(); + elementRegionManager.forElementSubRegions< ElementSubRegionBase >( regionNames, + [&]( localIndex const, + ElementSubRegionBase & subRegion ) + { + string & porousName = subRegion.getReference< string >( viewKeyStruct::porousMaterialNamesString() ); + porousName = this->template getConstitutiveName< constitutive::CoupledSolidBase >( subRegion ); + GEOS_THROW_IF( porousName.empty(), + GEOS_FMT( "{} {} : Solid model not found on subregion {}", + this->getCatalogName(), this->getDataContext().toString(), subRegion.getName() ), + InputError ); + + string & porosityModelName = subRegion.getReference< string >( constitutive::CoupledSolidBase::viewKeyStruct::porosityModelNameString() ); + porosityModelName = this->template getConstitutiveName< constitutive::PorosityBase >( subRegion ); + GEOS_THROW_IF( porosityModelName.empty(), + GEOS_FMT( "{} {} : Porosity model not found on subregion {}", + this->catalogName(), this->getDataContext().toString(), subRegion.getName() ), + InputError ); + + + + if( subRegion.hasField< fields::poromechanics::bulkDensity >() ) + { + // get the solid model to know the number of quadrature points and resize the bulk density + constitutive::CoupledSolidBase const & solid = this->template getConstitutiveModel< constitutive::CoupledSolidBase >( subRegion, porousName ); + subRegion.getField< fields::poromechanics::bulkDensity >().resizeDimension< 1 >( solid.getDensity().size( 1 ) ); + } + } ); + } ); + } + + virtual void registerDataOnMesh( dataRepository::Group & meshBodies ) override + { + SolverBase::registerDataOnMesh( meshBodies ); + + if( this->getNonlinearSolverParameters().m_couplingType == NonlinearSolverParameters::CouplingType::Sequential ) + { + // to let the solid mechanics solver that there is a pressure and temperature RHS in the mechanics solve + solidMechanicsSolver()->enableFixedStressPoromechanicsUpdate(); + // to let the flow solver that saving pressure_k and temperature_k is necessary (for the fixed-stress porosity terms) + flowSolver()->enableFixedStressPoromechanicsUpdate(); + } + + SolverBase::forDiscretizationOnMeshTargets( meshBodies, [&] ( string const &, + MeshLevel & mesh, + arrayView1d< string const > const & regionNames ) + { + ElementRegionManager & elemManager = mesh.getElemManager(); + + elemManager.forElementSubRegions< ElementSubRegionBase >( regionNames, + [&]( localIndex const, + ElementSubRegionBase & subRegion ) + { + subRegion.registerWrapper< string >( viewKeyStruct::porousMaterialNamesString() ). + setPlotLevel( dataRepository::PlotLevel::NOPLOT ). + setRestartFlags( dataRepository::RestartFlags::NO_WRITE ). + setSizedFromParent( 0 ); + + // This is needed by the way the surface generator currently does things. + subRegion.registerWrapper< string >( constitutive::CoupledSolidBase::viewKeyStruct::porosityModelNameString() ). + setPlotLevel( dataRepository::PlotLevel::NOPLOT ). + setRestartFlags( dataRepository::RestartFlags::NO_WRITE ). + setSizedFromParent( 0 ); + + if( this->getNonlinearSolverParameters().m_couplingType == NonlinearSolverParameters::CouplingType::Sequential ) + { + // register the bulk density for use in the solid mechanics solver + // ideally we would resize it here as well, but the solid model name is not available yet (see below) + subRegion.registerField< fields::poromechanics::bulkDensity >( this->getName() ); + } + } ); + } ); + } + + virtual void implicitStepSetup( real64 const & time_n, + real64 const & dt, + DomainPartition & domain ) override + { + flowSolver()->keepFlowVariablesConstantDuringInitStep( m_performStressInitialization ); + Base::implicitStepSetup( time_n, dt, domain ); + } + + virtual void setupDofs( DomainPartition const & domain, + DofManager & dofManager ) const override + { + // note that the order of operations matters a lot here (for instance for the MGR labels) + // we must set up dofs for solid mechanics first, and then for flow + // that's the reason why this function is here and not in CoupledSolvers.hpp + solidMechanicsSolver()->setupDofs( domain, dofManager ); + flowSolver()->setupDofs( domain, dofManager ); + + this->setupCoupling( domain, dofManager ); + } + + /** + * @brief accessor for the pointer to the solid mechanics solver + * @return a pointer to the solid mechanics solver + */ + SolidMechanicsLagrangianFEM * solidMechanicsSolver() const + { + return std::get< toUnderlying( SolverType::SolidMechanics ) >( m_solvers ); + } + + /** + * @brief accessor for the pointer to the flow solver + * @return a pointer to the flow solver + */ + FLOW_SOLVER * flowSolver() const + { + return std::get< toUnderlying( SolverType::Flow ) >( m_solvers ); + } + + /* + * @brief Utility function to set the stress initialization flag + * @param[in] performStressInitialization true if the solver has to initialize stress, false otherwise + */ + void setStressInitialization( integer const performStressInitialization ) + { + m_performStressInitialization = performStressInitialization; + } + + struct viewKeyStruct : Base::viewKeyStruct + { + /// Names of the porous materials + constexpr static char const * porousMaterialNamesString() { return "porousMaterialNames"; } + + /// Flag to indicate that the simulation is thermal + constexpr static char const * isThermalString() { return "isThermal"; } + + /// Flag to indicate that the solver is going to perform stress initialization + constexpr static char const * performStressInitializationString() { return "performStressInitialization"; } + }; + +protected: + + /* Implementation of Nonlinear Acceleration (Aitken) of averageMeanTotalStressIncrement */ + + void recordAverageMeanTotalStressIncrement( DomainPartition & domain, + array1d< real64 > & averageMeanTotalStressIncrement ) + { + averageMeanTotalStressIncrement.resize( 0 ); + SolverBase::forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, + MeshLevel & mesh, + arrayView1d< string const > const & regionNames ) { + mesh.getElemManager().forElementSubRegions< CellElementSubRegion >( regionNames, [&]( localIndex const, + auto & subRegion ) { + // get the solid model (to access stress increment) + string const solidName = subRegion.template getReference< string >( "porousMaterialNames" ); + constitutive::CoupledSolidBase & solid = SolverBase::getConstitutiveModel< constitutive::CoupledSolidBase >( + subRegion, solidName ); + + arrayView1d< const real64 > const & averageMeanTotalStressIncrement_k = solid.getAverageMeanTotalStressIncrement_k(); + for( localIndex k = 0; k < localIndex( averageMeanTotalStressIncrement_k.size()); k++ ) + { + averageMeanTotalStressIncrement.emplace_back( averageMeanTotalStressIncrement_k[k] ); + } + } ); + } ); + } + + void applyAcceleratedAverageMeanTotalStressIncrement( DomainPartition & domain, + array1d< real64 > & averageMeanTotalStressIncrement ) + { + integer i = 0; + SolverBase::forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, + MeshLevel & mesh, + arrayView1d< string const > const & regionNames ) { + mesh.getElemManager().forElementSubRegions< CellElementSubRegion >( regionNames, [&]( localIndex const, + auto & subRegion ) { + // get the solid model (to access stress increment) + string const solidName = subRegion.template getReference< string >( "porousMaterialNames" ); + constitutive::CoupledSolidBase & solid = SolverBase::getConstitutiveModel< constitutive::CoupledSolidBase >( + subRegion, solidName ); + auto & porosityModel = dynamic_cast< constitutive::BiotPorosity const & >( solid.getBasePorosityModel()); + arrayView1d< real64 > const & averageMeanTotalStressIncrement_k = solid.getAverageMeanTotalStressIncrement_k(); + for( localIndex k = 0; k < localIndex( averageMeanTotalStressIncrement_k.size()); k++ ) + { + porosityModel.updateAverageMeanTotalStressIncrement( k, averageMeanTotalStressIncrement[i] ); + i++; + } + } ); + } ); + } + + real64 computeAitkenRelaxationFactor( array1d< real64 > const & s0, + array1d< real64 > const & s1, + array1d< real64 > const & s1_tilde, + array1d< real64 > const & s2_tilde, + real64 const omega0 ) + { + array1d< real64 > r1 = axpy( s1_tilde, s0, -1.0 ); + array1d< real64 > r2 = axpy( s2_tilde, s1, -1.0 ); + + // diff = r2 - r1 + array1d< real64 > diff = axpy( r2, r1, -1.0 ); + + real64 const denom = dot( diff, diff ); + real64 const numer = dot( r1, diff ); + + real64 omega1 = 1.0; + if( !isZero( denom )) + { + omega1 = -1.0 * omega0 * numer / denom; + } + return omega1; + } + + array1d< real64 > computeUpdate( array1d< real64 > const & s1, + array1d< real64 > const & s2_tilde, + real64 const omega1 ) + { + return axpy( scale( s1, 1.0 - omega1 ), + scale( s2_tilde, omega1 ), + 1.0 ); + } + + void startSequentialIteration( integer const & iter, + DomainPartition & domain ) override + { + if( this->getNonlinearSolverParameters().m_nonlinearAccelerationType == NonlinearSolverParameters::NonlinearAccelerationType::Aitken ) + { + if( iter == 0 ) + { + recordAverageMeanTotalStressIncrement( domain, m_s1 ); + } + else + { + m_s0 = m_s1; + m_s1 = m_s2; + m_s1_tilde = m_s2_tilde; + m_omega0 = m_omega1; + } + } + } + + void finishSequentialIteration( integer const & iter, + DomainPartition & domain ) override + { + if( this->getNonlinearSolverParameters().m_nonlinearAccelerationType == NonlinearSolverParameters::NonlinearAccelerationType::Aitken ) + { + if( iter == 0 ) + { + m_s2 = m_s2_tilde; + m_omega1 = 1.0; + } + else + { + m_omega1 = computeAitkenRelaxationFactor( m_s0, m_s1, m_s1_tilde, m_s2_tilde, m_omega0 ); + m_s2 = computeUpdate( m_s1, m_s2_tilde, m_omega1 ); + applyAcceleratedAverageMeanTotalStressIncrement( domain, m_s2 ); + } + } + } + + virtual void mapSolutionBetweenSolvers( DomainPartition & domain, integer const solverType ) override + { + GEOS_MARK_FUNCTION; + + /// After the flow solver + if( solverType == static_cast< integer >( SolverType::Flow ) ) + { + // save pressure and temperature at the end of this iteration + flowSolver()->saveIterationState( domain ); + + this->template forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, + MeshLevel & mesh, + arrayView1d< string const > const & regionNames ) + { + + mesh.getElemManager().forElementSubRegions< CellElementSubRegion >( regionNames, [&]( localIndex const, + auto & subRegion ) + { + // update the bulk density + // TODO: ideally, we would not recompute the bulk density, but a more general "rhs" containing the body force and the + // pressure/temperature terms + updateBulkDensity( subRegion ); + } ); + } ); + } + + /// After the solid mechanics solver + if( solverType == static_cast< integer >( SolverType::SolidMechanics ) ) + { + // compute the average of the mean total stress increment over quadrature points + averageMeanTotalStressIncrement( domain ); + + this->template forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, + MeshLevel & mesh, + arrayView1d< string const > const & regionNames ) + { + + mesh.getElemManager().forElementSubRegions< CellElementSubRegion >( regionNames, [&]( localIndex const, + auto & subRegion ) + { + // update the porosity after a change in displacement (after mechanics solve) + // or a change in pressure/temperature (after a flow solve) + flowSolver()->updatePorosityAndPermeability( subRegion ); + } ); + } ); + } + + // needed to perform nonlinear acceleration + if( solverType == static_cast< integer >( SolverType::SolidMechanics ) && + this->getNonlinearSolverParameters().m_nonlinearAccelerationType== NonlinearSolverParameters::NonlinearAccelerationType::Aitken ) + { + recordAverageMeanTotalStressIncrement( domain, m_s2_tilde ); + } + } + + /** + * @brief Helper function to average the mean total stress increment over quadrature points + * @param[in] domain the domain partition + */ + void averageMeanTotalStressIncrement( DomainPartition & domain ) + { + this->template forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, + MeshLevel & mesh, + arrayView1d< string const > const & regionNames ) + { + mesh.getElemManager().forElementSubRegions< CellElementSubRegion >( regionNames, [&]( localIndex const, + auto & subRegion ) + { + // get the solid model (to access stress increment) + string const solidName = subRegion.template getReference< string >( viewKeyStruct::porousMaterialNamesString() ); + constitutive::CoupledSolidBase & solid = this->template getConstitutiveModel< constitutive::CoupledSolidBase >( subRegion, solidName ); + + arrayView2d< real64 const > const meanTotalStressIncrement_k = solid.getMeanTotalStressIncrement_k(); + arrayView1d< real64 > const averageMeanTotalStressIncrement_k = solid.getAverageMeanTotalStressIncrement_k(); + + finiteElement::FiniteElementBase & subRegionFE = + subRegion.template getReference< finiteElement::FiniteElementBase >( solidMechanicsSolver()->getDiscretizationName() ); + + // determine the finite element type + finiteElement::FiniteElementDispatchHandler< BASE_FE_TYPES >:: + dispatch3D( subRegionFE, [&] ( auto const finiteElement ) + { + using FE_TYPE = decltype( finiteElement ); + + // call the factory and launch the kernel + AverageOverQuadraturePoints1DKernelFactory:: + createAndLaunch< CellElementSubRegion, + FE_TYPE, + parallelDevicePolicy<> >( mesh.getNodeManager(), + mesh.getEdgeManager(), + mesh.getFaceManager(), + subRegion, + finiteElement, + meanTotalStressIncrement_k, + averageMeanTotalStressIncrement_k ); + } ); + } ); + } ); + } + + virtual void updateBulkDensity( ElementSubRegionBase & subRegion ) = 0; + + virtual void validateNonlinearAcceleration() override + { + if( MpiWrapper::commSize( MPI_COMM_GEOSX ) > 1 ) + { + GEOS_ERROR( "Nonlinear acceleration is not implemented for MPI runs" ); + } + } + + /// Flag to determine whether or not this is a thermal simulation + integer m_isThermal; + + /// Flag to indicate that the solver is going to perform stress initialization + integer m_performStressInitialization; + + /// Member variables needed for Nonlinear Acceleration ( Aitken ). Naming convention follows ( Jiang & Tchelepi, 2019 ) + array1d< real64 > m_s0; // Accelerated averageMeanTotalStresIncrement @ outer iteration v ( two iterations ago ) + array1d< real64 > m_s1; // Accelerated averageMeanTotalStresIncrement @ outer iteration v + 1 ( previous iteration ) + array1d< real64 > m_s1_tilde; // Unaccelerated averageMeanTotalStresIncrement @ outer iteration v + 1 ( previous iteration ) + array1d< real64 > m_s2; // Accelerated averageMeanTotalStresIncrement @ outer iteration v + 2 ( current iteration ) + array1d< real64 > m_s2_tilde; // Unaccelerated averageMeanTotalStresIncrement @ outer iteration v + 1 ( current iteration ) + real64 m_omega0; // Old Aitken relaxation factor + real64 m_omega1; // New Aitken relaxation factor + +}; + +} /* namespace geos */ + +#endif //GEOS_PHYSICSSOLVERS_MULTIPHYSICS_POROMECHANICSSOLVER_HPP_ diff --git a/src/coreComponents/physicsSolvers/multiphysics/SinglePhasePoromechanics.cpp b/src/coreComponents/physicsSolvers/multiphysics/SinglePhasePoromechanics.cpp index dd87ed0f528..4ab8faa8214 100644 --- a/src/coreComponents/physicsSolvers/multiphysics/SinglePhasePoromechanics.cpp +++ b/src/coreComponents/physicsSolvers/multiphysics/SinglePhasePoromechanics.cpp @@ -24,7 +24,6 @@ #include "constitutive/fluid/singlefluid/SingleFluidBase.hpp" #include "linearAlgebra/solvers/BlockPreconditioner.hpp" #include "linearAlgebra/solvers/SeparateComponentPreconditioner.hpp" -#include "mesh/utilities/AverageOverQuadraturePointsKernel.hpp" #include "physicsSolvers/fluidFlow/SinglePhaseBase.hpp" #include "physicsSolvers/multiphysics/SinglePhaseReservoirAndWells.hpp" #include "physicsSolvers/multiphysics/poromechanicsKernels/SinglePhasePoromechanics.hpp" @@ -73,19 +72,8 @@ catalogName() template< typename FLOW_SOLVER > SinglePhasePoromechanics< FLOW_SOLVER >::SinglePhasePoromechanics( const string & name, Group * const parent ) - : Base( name, parent ), - m_isThermal( 0 ) + : Base( name, parent ) { - this->registerWrapper( viewKeyStruct::isThermalString(), &m_isThermal ). - setApplyDefaultValue( 0 ). - setInputFlag( InputFlags::OPTIONAL ). - setDescription( "Flag indicating whether the problem is thermal or not. Set isThermal=\"1\" to enable the thermal coupling" ); - - this->registerWrapper( viewKeyStruct::performStressInitializationString(), &m_performStressInitialization ). - setApplyDefaultValue( false ). - setInputFlag( InputFlags::FALSE ). - setDescription( "Flag to indicate that the solver is going to perform stress initialization" ); - LinearSolverParameters & linearSolverParameters = this->m_linearSolverParameters.get(); linearSolverParameters.mgr.strategy = LinearSolverParameters::MGR::StrategyType::singlePhasePoromechanics; linearSolverParameters.mgr.separateComponents = true; @@ -98,7 +86,7 @@ void SinglePhasePoromechanics< FLOW_SOLVER >::postProcessInput() { Base::postProcessInput(); - GEOS_ERROR_IF( flowSolver()->catalogName() == "CompositionalMultiphaseReservoir" && + GEOS_ERROR_IF( this->flowSolver()->catalogName() == "SinglePhaseReservoir" && this->getNonlinearSolverParameters().couplingType() != NonlinearSolverParameters::CouplingType::Sequential, GEOS_FMT( "{}: {} solver is only designed to work for {} = {}", this->getName(), catalogName(), NonlinearSolverParameters::viewKeysStruct::couplingTypeString(), @@ -106,46 +94,6 @@ void SinglePhasePoromechanics< FLOW_SOLVER >::postProcessInput() )); } -template< typename FLOW_SOLVER > -void SinglePhasePoromechanics< FLOW_SOLVER >::registerDataOnMesh( Group & meshBodies ) -{ - SolverBase::registerDataOnMesh( meshBodies ); - - if( this->getNonlinearSolverParameters().m_couplingType == NonlinearSolverParameters::CouplingType::Sequential ) - { - // to let the solid mechanics solver that there is a pressure and temperature RHS in the mechanics solve - solidMechanicsSolver()->enableFixedStressPoromechanicsUpdate(); - // to let the flow solver that saving pressure_k and temperature_k is necessary (for the fixed-stress porosity terms) - flowSolver()->enableFixedStressPoromechanicsUpdate(); - } - - this->template forDiscretizationOnMeshTargets( meshBodies, [&] ( string const &, - MeshLevel & mesh, - arrayView1d< string const > const & regionNames ) - { - - ElementRegionManager & elemManager = mesh.getElemManager(); - - elemManager.forElementSubRegions< ElementSubRegionBase >( regionNames, - [&]( localIndex const, - ElementSubRegionBase & subRegion ) - { - subRegion.registerWrapper< string >( viewKeyStruct::porousMaterialNamesString() ). - setPlotLevel( PlotLevel::NOPLOT ). - setRestartFlags( RestartFlags::NO_WRITE ). - setSizedFromParent( 0 ); - - if( this->getNonlinearSolverParameters().m_couplingType == NonlinearSolverParameters::CouplingType::Sequential ) - { - // register the bulk density for use in the solid mechanics solver - // ideally we would resize it here as well, but the solid model name is not available yet (see below) - subRegion.registerField< fields::poromechanics::bulkDensity >( this->getName() ); - } - - } ); - } ); -} - template< typename FLOW_SOLVER > void SinglePhasePoromechanics< FLOW_SOLVER >::setupCoupling( DomainPartition const & GEOS_UNUSED_PARAM( domain ), DofManager & dofManager ) const @@ -155,67 +103,6 @@ void SinglePhasePoromechanics< FLOW_SOLVER >::setupCoupling( DomainPartition con DofManager::Connector::Elem ); } -template< typename FLOW_SOLVER > -void SinglePhasePoromechanics< FLOW_SOLVER >::setupDofs( DomainPartition const & domain, - DofManager & dofManager ) const -{ - // note that the order of operations matters a lot here (for instance for the MGR labels) - // we must set up dofs for solid mechanics first, and then for flow - // that's the reason why this function is here and not in CoupledSolvers.hpp - solidMechanicsSolver()->setupDofs( domain, dofManager ); - flowSolver()->setupDofs( domain, dofManager ); - - setupCoupling( domain, dofManager ); -} - -template< typename FLOW_SOLVER > -void SinglePhasePoromechanics< FLOW_SOLVER >::initializePreSubGroups() -{ - SolverBase::initializePreSubGroups(); - - DomainPartition & domain = this->template getGroupByPath< DomainPartition >( "/Problem/domain" ); - - this->template forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, - MeshLevel & mesh, - arrayView1d< string const > const & regionNames ) - { - ElementRegionManager & elementRegionManager = mesh.getElemManager(); - elementRegionManager.forElementSubRegions< ElementSubRegionBase >( regionNames, - [&]( localIndex const, - ElementSubRegionBase & subRegion ) - { - // skip the wells - if( subRegion.getCatalogName() == "wellElementSubRegion" ) - return; - - string & porousName = subRegion.getReference< string >( viewKeyStruct::porousMaterialNamesString() ); - porousName = this->template getConstitutiveName< CoupledSolidBase >( subRegion ); - GEOS_THROW_IF( porousName.empty(), - GEOS_FMT( "{} {} : Solid model not found on subregion {}", - getCatalogName(), this->getDataContext().toString(), subRegion.getName() ), - InputError ); - - if( subRegion.hasField< fields::poromechanics::bulkDensity >() ) - { - // get the solid model to know the number of quadrature points and resize the bulk density - CoupledSolidBase const & solid = this->template getConstitutiveModel< CoupledSolidBase >( subRegion, porousName ); - subRegion.getField< fields::poromechanics::bulkDensity >().resizeDimension< 1 >( solid.getDensity().size( 1 ) ); - } - - } ); - } ); - -} - -template< typename FLOW_SOLVER > -void SinglePhasePoromechanics< FLOW_SOLVER >::implicitStepSetup( real64 const & time_n, - real64 const & dt, - DomainPartition & domain ) -{ - flowSolver()->keepFlowVariablesConstantDuringInitStep( m_performStressInitialization ); - Base::implicitStepSetup( time_n, dt, domain ); -} - template< typename FLOW_SOLVER > void SinglePhasePoromechanics< FLOW_SOLVER >::setupSystem( DomainPartition & domain, DofManager & dofManager, @@ -241,35 +128,35 @@ void SinglePhasePoromechanics< FLOW_SOLVER >::setupSystem( DomainPartition & dom template< typename FLOW_SOLVER > void SinglePhasePoromechanics< FLOW_SOLVER >::initializePostInitialConditionsPreSubGroups() { - SolverBase::initializePostInitialConditionsPreSubGroups(); + Base::initializePostInitialConditionsPreSubGroups(); arrayView1d< string const > const & poromechanicsTargetRegionNames = this->template getReference< array1d< string > >( SolverBase::viewKeyStruct::targetRegionsString() ); arrayView1d< string const > const & flowTargetRegionNames = - flowSolver()->template getReference< array1d< string > >( SolverBase::viewKeyStruct::targetRegionsString() ); + this->flowSolver()->template getReference< array1d< string > >( SolverBase::viewKeyStruct::targetRegionsString() ); for( integer i = 0; i < poromechanicsTargetRegionNames.size(); ++i ) { GEOS_THROW_IF( std::find( flowTargetRegionNames.begin(), flowTargetRegionNames.end(), poromechanicsTargetRegionNames[i] ) == flowTargetRegionNames.end(), GEOS_FMT( "{} {}: region `{}` must be a target region of `{}`", - getCatalogName(), this->getDataContext(), poromechanicsTargetRegionNames[i], flowSolver()->getDataContext() ), + getCatalogName(), this->getDataContext(), poromechanicsTargetRegionNames[i], this->flowSolver()->getDataContext() ), InputError ); } - integer & isFlowThermal = flowSolver()->isThermal(); - GEOS_LOG_RANK_0_IF( m_isThermal && !isFlowThermal, + integer & isFlowThermal = this->flowSolver()->isThermal(); + GEOS_LOG_RANK_0_IF( this->m_isThermal && !isFlowThermal, GEOS_FMT( "{} {}: The attribute `{}` of the flow solver `{}` is set to 1 since the poromechanics solver is thermal", getCatalogName(), this->getName(), - FlowSolverBase::viewKeyStruct::isThermalString(), flowSolver()->getDataContext() ) ); - isFlowThermal = m_isThermal; + FlowSolverBase::viewKeyStruct::isThermalString(), this->flowSolver()->getDataContext() ) ); + isFlowThermal = this->m_isThermal; - if( m_isThermal ) + if( this->m_isThermal ) { this->m_linearSolverParameters.get().mgr.strategy = LinearSolverParameters::MGR::StrategyType::thermalSinglePhasePoromechanics; } else { - if( flowSolver()->getLinearSolverParameters().mgr.strategy == LinearSolverParameters::MGR::StrategyType::singlePhaseHybridFVM ) + if( this->flowSolver()->getLinearSolverParameters().mgr.strategy == LinearSolverParameters::MGR::StrategyType::singlePhaseHybridFVM ) { this->m_linearSolverParameters.get().mgr.strategy = LinearSolverParameters::MGR::StrategyType::hybridSinglePhasePoromechanics; } @@ -295,11 +182,11 @@ void SinglePhasePoromechanics< FLOW_SOLVER >::assembleSystem( real64 const time_ localRhs ); // Step 3: compute the fluxes (face-based contributions) - flowSolver()->assembleFluxTerms( dt, - domain, - dofManager, - localMatrix, - localRhs ); + this->flowSolver()->assembleFluxTerms( dt, + domain, + dofManager, + localMatrix, + localRhs ); } @@ -329,7 +216,7 @@ void SinglePhasePoromechanics< FLOW_SOLVER >::assembleElementBasedTerms( real64 string const flowDofKey = dofManager.getKey( SinglePhaseBase::viewKeyStruct::elemDofFieldString() ); - if( m_isThermal ) + if( this->m_isThermal ) { poromechanicsMaxForce = assemblyLaunch< constitutive::PorousSolid< ElasticIsotropic >, // TODO: change once there is a cmake solution @@ -361,9 +248,9 @@ void SinglePhasePoromechanics< FLOW_SOLVER >::assembleElementBasedTerms( real64 // step 2: apply mechanics solver on its target regions not included in the poromechanics solver target regions - solidMechanicsSolver()->forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, - MeshLevel & mesh, - arrayView1d< string const > const & regionNames ) + this->solidMechanicsSolver()->forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, + MeshLevel & mesh, + arrayView1d< string const > const & regionNames ) { // collect the target region of the mechanics solver not included in the poromechanics target regions array1d< string > filteredRegionNames; @@ -394,7 +281,7 @@ void SinglePhasePoromechanics< FLOW_SOLVER >::assembleElementBasedTerms( real64 dt ); } ); - solidMechanicsSolver()->getMaxForce() = LvArray::math::max( mechanicsMaxForce, poromechanicsMaxForce ); + this->solidMechanicsSolver()->getMaxForce() = LvArray::math::max( mechanicsMaxForce, poromechanicsMaxForce ); } template< typename FLOW_SOLVER > @@ -406,12 +293,12 @@ void SinglePhasePoromechanics< FLOW_SOLVER >::createPreconditioner() SchurComplementOption::RowsumDiagonalProbing, BlockScalingOption::FrobeniusNorm ); - auto mechPrecond = LAInterface::createPreconditioner( solidMechanicsSolver()->getLinearSolverParameters() ); + auto mechPrecond = LAInterface::createPreconditioner( this->solidMechanicsSolver()->getLinearSolverParameters() ); precond->setupBlock( 0, { { solidMechanics::totalDisplacement::key(), { 3, true } } }, std::make_unique< SeparateComponentPreconditioner< LAInterface > >( 3, std::move( mechPrecond ) ) ); - auto flowPrecond = LAInterface::createPreconditioner( flowSolver()->getLinearSolverParameters() ); + auto flowPrecond = LAInterface::createPreconditioner( this->flowSolver()->getLinearSolverParameters() ); precond->setupBlock( 1, { { flow::pressure::key(), { 1, true } } }, std::move( flowPrecond ) ); @@ -439,65 +326,15 @@ void SinglePhasePoromechanics< FLOW_SOLVER >::updateState( DomainPartition & dom [&]( localIndex const, CellElementSubRegion & subRegion ) { - flowSolver()->updateFluidState( subRegion ); - if( m_isThermal ) + this->flowSolver()->updateFluidState( subRegion ); + if( this->m_isThermal ) { - flowSolver()->updateSolidInternalEnergyModel( subRegion ); + this->flowSolver()->updateSolidInternalEnergyModel( subRegion ); } } ); } ); } -template< typename FLOW_SOLVER > -void SinglePhasePoromechanics< FLOW_SOLVER >::mapSolutionBetweenSolvers( DomainPartition & domain, integer const solverType ) -{ - GEOS_MARK_FUNCTION; - - /// After the flow solver - if( solverType == static_cast< integer >( SolverType::Flow ) ) - { - // save pressure and temperature at the end of this iteration - flowSolver()->saveIterationState( domain ); - - this->template forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, - MeshLevel & mesh, - arrayView1d< string const > const & regionNames ) - { - - mesh.getElemManager().forElementSubRegions< CellElementSubRegion >( regionNames, [&]( localIndex const, - auto & subRegion ) - { - // update the bulk density - // TODO: ideally, we would not recompute the bulk density, but a more general "rhs" containing the body force and the - // pressure/temperature terms - updateBulkDensity( subRegion ); - - } ); - } ); - } - - /// After the solid mechanics solver - if( solverType == static_cast< integer >( SolverType::SolidMechanics ) ) - { - // compute the average of the mean stress increment over quadrature points - averageMeanStressIncrement( domain ); - - this->template forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, - MeshLevel & mesh, - arrayView1d< string const > const & regionNames ) - { - - mesh.getElemManager().forElementSubRegions< CellElementSubRegion >( regionNames, [&]( localIndex const, - auto & subRegion ) - { - // update the porosity after a change in displacement (after mechanics solve) - // or a change in pressure/temperature (after a flow solve) - flowSolver()->updatePorosityAndPermeability( subRegion ); - } ); - } ); - } -} - template< typename FLOW_SOLVER > void SinglePhasePoromechanics< FLOW_SOLVER >::updateBulkDensity( ElementSubRegionBase & subRegion ) { @@ -517,48 +354,6 @@ void SinglePhasePoromechanics< FLOW_SOLVER >::updateBulkDensity( ElementSubRegio subRegion ); } -template< typename FLOW_SOLVER > -void SinglePhasePoromechanics< FLOW_SOLVER >::averageMeanStressIncrement( DomainPartition & domain ) -{ - this->template forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, - MeshLevel & mesh, - arrayView1d< string const > const & regionNames ) - { - mesh.getElemManager().forElementSubRegions< CellElementSubRegion >( regionNames, [&]( localIndex const, - auto & subRegion ) - { - // get the solid model (to access stress increment) - string const solidName = subRegion.template getReference< string >( viewKeyStruct::porousMaterialNamesString() ); - CoupledSolidBase & solid = this->template getConstitutiveModel< CoupledSolidBase >( subRegion, solidName ); - - arrayView2d< real64 const > const meanStressIncrement_k = solid.getMeanEffectiveStressIncrement_k(); - arrayView1d< real64 > const averageMeanStressIncrement_k = solid.getAverageMeanEffectiveStressIncrement_k(); - - finiteElement::FiniteElementBase & subRegionFE = - subRegion.template getReference< finiteElement::FiniteElementBase >( solidMechanicsSolver()->getDiscretizationName() ); - - // determine the finite element type - finiteElement::FiniteElementDispatchHandler< BASE_FE_TYPES >:: - dispatch3D( subRegionFE, [&] ( auto const finiteElement ) - { - using FE_TYPE = decltype( finiteElement ); - - // call the factory and launch the kernel - AverageOverQuadraturePoints1DKernelFactory:: - createAndLaunch< CellElementSubRegion, - FE_TYPE, - parallelDevicePolicy<> >( mesh.getNodeManager(), - mesh.getEdgeManager(), - mesh.getFaceManager(), - subRegion, - finiteElement, - meanStressIncrement_k, - averageMeanStressIncrement_k ); - } ); - } ); - } ); -} - template class SinglePhasePoromechanics< SinglePhaseBase >; template class SinglePhasePoromechanics< SinglePhaseReservoirAndWells< SinglePhaseBase > >; diff --git a/src/coreComponents/physicsSolvers/multiphysics/SinglePhasePoromechanics.hpp b/src/coreComponents/physicsSolvers/multiphysics/SinglePhasePoromechanics.hpp index 060ad19fe5b..52097c53b33 100644 --- a/src/coreComponents/physicsSolvers/multiphysics/SinglePhasePoromechanics.hpp +++ b/src/coreComponents/physicsSolvers/multiphysics/SinglePhasePoromechanics.hpp @@ -19,34 +19,24 @@ #ifndef GEOS_PHYSICSSOLVERS_MULTIPHYSICS_SINGLEPHASEPOROMECHANICS_HPP_ #define GEOS_PHYSICSSOLVERS_MULTIPHYSICS_SINGLEPHASEPOROMECHANICS_HPP_ -#include "physicsSolvers/multiphysics/CoupledSolver.hpp" -#include "physicsSolvers/solidMechanics/SolidMechanicsLagrangianFEM.hpp" +#include "physicsSolvers/multiphysics/PoromechanicsSolver.hpp" + namespace geos { template< typename FLOW_SOLVER > -class SinglePhasePoromechanics : public CoupledSolver< FLOW_SOLVER, - SolidMechanicsLagrangianFEM > +class SinglePhasePoromechanics : public PoromechanicsSolver< FLOW_SOLVER > { public: - using Base = CoupledSolver< FLOW_SOLVER, SolidMechanicsLagrangianFEM >; + using Base = PoromechanicsSolver< FLOW_SOLVER >; using Base::m_solvers; using Base::m_dofManager; using Base::m_localMatrix; using Base::m_rhs; using Base::m_solution; - enum class SolverType : integer - { - Flow = 0, - SolidMechanics = 1 - }; - - /// String used to form the solverName used to register solvers in CoupledSolver - static string coupledSolverAttributePrefix() { return "poromechanics"; } - /** * @brief main constructor for SinglePhasePoromechanics objects * @param name the name of this instantiation of SinglePhasePoromechanics in the repository @@ -68,24 +58,6 @@ class SinglePhasePoromechanics : public CoupledSolver< FLOW_SOLVER, */ string getCatalogName() const override { return catalogName(); } - /** - * @brief accessor for the pointer to the solid mechanics solver - * @return a pointer to the solid mechanics solver - */ - SolidMechanicsLagrangianFEM * solidMechanicsSolver() const - { - return std::get< toUnderlying( SolverType::SolidMechanics ) >( m_solvers ); - } - - /** - * @brief accessor for the pointer to the flow solver - * @return a pointer to the flow solver - */ - FLOW_SOLVER * flowSolver() const - { - return std::get< toUnderlying( SolverType::Flow ) >( m_solvers ); - } - /** * @defgroup Solver Interface Functions * @@ -95,18 +67,9 @@ class SinglePhasePoromechanics : public CoupledSolver< FLOW_SOLVER, virtual void postProcessInput() override; - virtual void registerDataOnMesh( dataRepository::Group & MeshBodies ) override; - virtual void setupCoupling( DomainPartition const & domain, DofManager & dofManager ) const override; - virtual void setupDofs( DomainPartition const & domain, - DofManager & dofManager ) const override; - - virtual void implicitStepSetup( real64 const & time_n, - real64 const & dt, - DomainPartition & domain ) override; - virtual void setupSystem( DomainPartition & domain, DofManager & dofManager, CRSMatrix< real64, globalIndex > & localMatrix, @@ -123,43 +86,23 @@ class SinglePhasePoromechanics : public CoupledSolver< FLOW_SOLVER, virtual void updateState( DomainPartition & domain ) override; - /* - * @brief Utility function to set the stress initialization flag - * @param[in] performStressInitialization true if the solver has to initialize stress, false otherwise - */ - void setStressInitialization( integer const performStressInitialization ) - { m_performStressInitialization = performStressInitialization; } - /**@}*/ - virtual void mapSolutionBetweenSolvers( DomainPartition & Domain, integer const idx ) override final; - struct viewKeyStruct : Base::viewKeyStruct { - /// Names of the porous materials - constexpr static char const * porousMaterialNamesString() { return "porousMaterialNames"; } - - /// Flag to indicate that the simulation is thermal - constexpr static char const * isThermalString() { return "isThermal"; } - - /// Flag to indicate that the solver is going to perform stress initialization - constexpr static char const * performStressInitializationString() { return "performStressInitialization"; } + // nothing yet here }; protected: virtual void initializePostInitialConditionsPreSubGroups() override; - virtual void initializePreSubGroups() override; - void assembleElementBasedTerms( real64 const time_n, real64 const dt, DomainPartition & domain, DofManager const & dofManager, CRSMatrixView< real64, globalIndex const > const & localMatrix, arrayView1d< real64 > const & localRhs ); - /// flag to determine whether or not this is a thermal simulation - integer m_isThermal; private: @@ -167,13 +110,7 @@ class SinglePhasePoromechanics : public CoupledSolver< FLOW_SOLVER, * @brief Helper function to recompute the bulk density * @param[in] subRegion the element subRegion */ - void updateBulkDensity( ElementSubRegionBase & subRegion ); - - /** - * @brief Helper function to average the mean stress increment - * @param[in] domain the domain partition - */ - void averageMeanStressIncrement( DomainPartition & domain ); + virtual void updateBulkDensity( ElementSubRegionBase & subRegion ) override; void createPreconditioner(); @@ -189,8 +126,6 @@ class SinglePhasePoromechanics : public CoupledSolver< FLOW_SOLVER, real64 const dt, PARAMS && ... params ); - /// Flag to indicate that the solver is going to perform stress initialization - integer m_performStressInitialization; }; template< typename FLOW_SOLVER > @@ -228,7 +163,7 @@ real64 SinglePhasePoromechanics< FLOW_SOLVER >::assemblyLaunch( MeshLevel & mesh CONSTITUTIVE_BASE, CellElementSubRegion >( mesh, regionNames, - solidMechanicsSolver()->getDiscretizationName(), + this->solidMechanicsSolver()->getDiscretizationName(), materialNamesString, kernelWrapper ); } diff --git a/src/coreComponents/physicsSolvers/multiphysics/poromechanicsKernels/PoromechanicsKernels.cmake b/src/coreComponents/physicsSolvers/multiphysics/poromechanicsKernels/PoromechanicsKernels.cmake index 790302aaca9..ff5ee1f49eb 100644 --- a/src/coreComponents/physicsSolvers/multiphysics/poromechanicsKernels/PoromechanicsKernels.cmake +++ b/src/coreComponents/physicsSolvers/multiphysics/poromechanicsKernels/PoromechanicsKernels.cmake @@ -23,7 +23,10 @@ set( porousSolidDispatch PorousSolid PorousSolid PorousSolid> PorousSolid> - PorousSolid> ) + PorousSolid> + PorousSolid> + PorousSolid> + PorousSolid> ) set( finiteElementDispatch H1_Hexahedron_Lagrange1_GaussLegendre2 H1_Wedge_Lagrange1_Gauss6 diff --git a/src/coreComponents/physicsSolvers/solidMechanics/SolidMechanicsStatistics.cpp b/src/coreComponents/physicsSolvers/solidMechanics/SolidMechanicsStatistics.cpp index 8fdc3403e29..ec97b4bc55d 100644 --- a/src/coreComponents/physicsSolvers/solidMechanics/SolidMechanicsStatistics.cpp +++ b/src/coreComponents/physicsSolvers/solidMechanics/SolidMechanicsStatistics.cpp @@ -22,6 +22,7 @@ #include "mainInterface/ProblemManager.hpp" #include "physicsSolvers/PhysicsSolverManager.hpp" #include "physicsSolvers/solidMechanics/SolidMechanicsLagrangianFEM.hpp" +#include "fileIO/Outputs/OutputBase.hpp" namespace geos { @@ -58,11 +59,20 @@ void SolidMechanicsStatistics::registerDataOnMesh( Group & meshBodies ) nodeStatistics.minDisplacement.resizeDimension< 0 >( 3 ); nodeStatistics.maxDisplacement.resizeDimension< 0 >( 3 ); + + // write output header + if( m_writeCSV > 0 && MpiWrapper::commRank() == 0 ) + { + std::ofstream outputFile( m_outputDir + "/" + mesh.getName() + "_node_statistics" + ".csv" ); + outputFile << "Time [s],Min displacement X [m],Min displacement Y [m],Min displacement Z [m]," + << "Max displacement X [m],Max displacement Y [m],Max displacement Z [m]" << std::endl; + outputFile.close(); + } } ); } -bool SolidMechanicsStatistics::execute( real64 const GEOS_UNUSED_PARAM( time_n ), - real64 const GEOS_UNUSED_PARAM( dt ), +bool SolidMechanicsStatistics::execute( real64 const time_n, + real64 const dt, integer const GEOS_UNUSED_PARAM( cycleNumber ), integer const GEOS_UNUSED_PARAM( eventCounter ), real64 const GEOS_UNUSED_PARAM( eventProgress ), @@ -72,12 +82,13 @@ bool SolidMechanicsStatistics::execute( real64 const GEOS_UNUSED_PARAM( time_n ) MeshLevel & mesh, arrayView1d< string const > const & ) { - computeNodeStatistics( mesh ); + // current time is time_n + dt + computeNodeStatistics( mesh, time_n + dt ); } ); return false; } -void SolidMechanicsStatistics::computeNodeStatistics( MeshLevel & mesh ) const +void SolidMechanicsStatistics::computeNodeStatistics( MeshLevel & mesh, real64 const time ) const { GEOS_MARK_FUNCTION; @@ -102,7 +113,8 @@ void SolidMechanicsStatistics::computeNodeStatistics( MeshLevel & mesh ) const maxDispZ, minDispX, minDispY, - minDispZ] GEOS_HOST_DEVICE ( localIndex const a ) + minDispZ] + GEOS_HOST_DEVICE ( localIndex const a ) { if( ghostRank[a] < 0 ) { @@ -138,14 +150,24 @@ void SolidMechanicsStatistics::computeNodeStatistics( MeshLevel & mesh ) const MpiWrapper::getMpiOp( MpiWrapper::Reduction::Min ), MPI_COMM_GEOSX ); - GEOS_LOG_LEVEL_RANK_0( 1, getName() << ": Min displacement (X, Y, Z): " - << nodeStatistics.minDisplacement[0] << ", " - << nodeStatistics.minDisplacement[1] << ", " - << nodeStatistics.minDisplacement[2] << " m" ); - GEOS_LOG_LEVEL_RANK_0( 1, getName() << ": Max displacement (X, Y, Z): " - << nodeStatistics.maxDisplacement[0] << ", " - << nodeStatistics.maxDisplacement[1] << ", " - << nodeStatistics.maxDisplacement[2] << " m" ); + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{} (time {} s): Min displacement (X, Y, Z): {}, {}, {} m", + getName(), time, nodeStatistics.minDisplacement[0], + nodeStatistics.minDisplacement[1], nodeStatistics.minDisplacement[2] ) ); + GEOS_LOG_LEVEL_RANK_0( 1, GEOS_FMT( "{} (time {} s): Max displacement (X, Y, Z): {}, {}, {} m", + getName(), time, nodeStatistics.maxDisplacement[0], + nodeStatistics.maxDisplacement[1], nodeStatistics.maxDisplacement[2] ) ); + + if( m_writeCSV > 0 && MpiWrapper::commRank() == 0 ) + { + std::ofstream outputFile( m_outputDir + "/" + mesh.getName() + "_node_statistics" + ".csv", std::ios_base::app ); + outputFile << time; + for( integer i = 0; i < 3; ++i ) + outputFile << "," << nodeStatistics.minDisplacement[i]; + for( integer i = 0; i < 3; ++i ) + outputFile << "," << nodeStatistics.maxDisplacement[i]; + outputFile << std::endl; + outputFile.close(); + } } REGISTER_CATALOG_ENTRY( TaskBase, diff --git a/src/coreComponents/physicsSolvers/solidMechanics/SolidMechanicsStatistics.hpp b/src/coreComponents/physicsSolvers/solidMechanics/SolidMechanicsStatistics.hpp index eeab460666c..77c32e85e6b 100644 --- a/src/coreComponents/physicsSolvers/solidMechanics/SolidMechanicsStatistics.hpp +++ b/src/coreComponents/physicsSolvers/solidMechanics/SolidMechanicsStatistics.hpp @@ -66,7 +66,7 @@ class SolidMechanicsStatistics : public FieldStatisticsBase< SolidMechanicsLagra * @brief Compute node-based statistics on the reservoir * @param[in] mesh the mesh level object */ - void computeNodeStatistics( MeshLevel & mesh ) const; + void computeNodeStatistics( MeshLevel & mesh, real64 const time ) const; private: @@ -90,6 +90,7 @@ class SolidMechanicsStatistics : public FieldStatisticsBase< SolidMechanicsLagra }; void registerDataOnMesh( Group & meshBodies ) override; + }; diff --git a/src/coreComponents/physicsSolvers/surfaceGeneration/SurfaceGenerator.cpp b/src/coreComponents/physicsSolvers/surfaceGeneration/SurfaceGenerator.cpp index b29940f75fb..42e8639eab7 100644 --- a/src/coreComponents/physicsSolvers/surfaceGeneration/SurfaceGenerator.cpp +++ b/src/coreComponents/physicsSolvers/surfaceGeneration/SurfaceGenerator.cpp @@ -19,7 +19,6 @@ #include "SurfaceGenerator.hpp" #include "ParallelTopologyChange.hpp" - #include "mesh/mpiCommunications/CommunicationTools.hpp" #include "mesh/mpiCommunications/NeighborCommunicator.hpp" #include "mesh/mpiCommunications/SpatialPartition.hpp" @@ -33,6 +32,7 @@ #include "physicsSolvers/solidMechanics/kernels/SolidMechanicsLagrangianFEMKernels.hpp" #include "physicsSolvers/surfaceGeneration/SurfaceGeneratorFields.hpp" #include "physicsSolvers/fluidFlow/FlowSolverBaseFields.hpp" +#include "kernels/surfaceGenerationKernels.hpp" #include @@ -176,7 +176,8 @@ SurfaceGenerator::SurfaceGenerator( const string & name, SolverBase( name, parent ), m_failCriterion( 1 ), // m_maxTurnAngle(91.0), - m_nodeBasedSIF( 0 ), + m_nodeBasedSIF( 1 ), + m_isPoroelastic( 0 ), m_rockToughness( 1.0e99 ), m_mpiCommOrder( 0 ) { @@ -191,6 +192,10 @@ SurfaceGenerator::SurfaceGenerator( const string & name, setInputFlag( InputFlags::OPTIONAL ). setDescription( "Flag for choosing between node or edge based criteria: 1 for node based criterion" ); + registerWrapper( viewKeyStruct::isPoroelasticString(), &m_isPoroelastic ). + setInputFlag( InputFlags::OPTIONAL ). + setDescription( "Flag that defines whether the material is poroelastic or not." ); + registerWrapper( viewKeyStruct::mpiCommOrderString(), &m_mpiCommOrder ). setInputFlag( InputFlags::OPTIONAL ). setDescription( "Flag to enable MPI consistent communication ordering" ); @@ -216,6 +221,23 @@ SurfaceGenerator::SurfaceGenerator( const string & name, setInputFlag( InputFlags::FALSE ); } +void SurfaceGenerator::postProcessInput() +{ + static const std::set< integer > binaryOptions = { 0, 1 }; + + GEOS_ERROR_IF( binaryOptions.count( m_isPoroelastic ) == 0, + getWrapperDataContext( viewKeyStruct::isPoroelasticString() ) << + ": option can be either 0 (false) or 1 (true)" ); + + GEOS_ERROR_IF( binaryOptions.count( m_nodeBasedSIF ) == 0, + getWrapperDataContext( viewKeyStruct::nodeBasedSIFString() ) << + ": option can be either 0 (false) or 1 (true)" ); + + GEOS_ERROR_IF( binaryOptions.count( m_mpiCommOrder ) == 0, + getWrapperDataContext( viewKeyStruct::mpiCommOrderString() ) << + ": option can be either 0 (false) or 1 (true)" ); +} + SurfaceGenerator::~SurfaceGenerator() { // TODO Auto-generated destructor stub @@ -2867,14 +2889,6 @@ void SurfaceGenerator::calculateNodeAndFaceSif( DomainPartition const & domain, elementManager.constructFullMaterialViewAccessor< array3d< real64, solid::STRESS_PERMUTATION >, arrayView3d< real64 const, solid::STRESS_USD > >( SolidBase::viewKeyStruct::stressString(), constitutiveManager ); - - - ElementRegionManager::ElementViewAccessor< arrayView4d< real64 const > > const - dNdX = elementManager.constructViewAccessor< array4d< real64 >, arrayView4d< real64 const > >( keys::dNdX ); - - ElementRegionManager::ElementViewAccessor< arrayView2d< real64 const > > const - detJ = elementManager.constructViewAccessor< array2d< real64 >, arrayView2d< real64 const > >( keys::detJ ); - nodeManager.getField< fields::solidMechanics::totalDisplacement >().move( hostMemorySpace, false ); forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, @@ -2894,322 +2908,313 @@ void SurfaceGenerator::calculateNodeAndFaceSif( DomainPartition const & domain, displacement.move( hostMemorySpace, false ); } ); - for( localIndex const trailingFaceIndex : m_trailingFaces ) -// RAJA::forall< parallelHostPolicy >( RAJA::TypedRangeSegment< localIndex >( 0, m_trailingFaces.size() ), -// [=] GEOS_HOST_DEVICE ( localIndex const trailingFacesCounter ) + // auto nodalForceKernel = surfaceGenerationKernels::createKernel( elementManager, constitutiveManager, + // viewKeyStruct::solidMaterialNameString(), false ); + + surfaceGenerationKernels::kernelSelector( elementManager, + constitutiveManager, + viewKeyStruct::solidMaterialNameString(), + m_isPoroelastic, [&] ( auto nodalForceKernel ) { -// localIndex const trailingFaceIndex = m_trailingFaces[ trailingFacesCounter ]; + for( localIndex const trailingFaceIndex : m_trailingFaces ) + { + // RAJA::forall< parallelHostPolicy >( RAJA::TypedRangeSegment< localIndex >( 0, m_trailingFaces.size() ), [=] GEOS_HOST_DEVICE ( + // localIndex const trailingFacesCounter ) + // localIndex const trailingFaceIndex = m_trailingFaces[ trailingFacesCounter ]; + /// TODO: check if a ghost face still has the correct attributes such as normal vector, face center, face index. - real64 const faceNormalVector[3] = LVARRAY_TENSOROPS_INIT_LOCAL_3( faceNormal[trailingFaceIndex] ); - //TODO: check if a ghost face still has the correct attributes such as normal vector, face center, face index. - localIndex_array unpinchedNodeID; - localIndex_array pinchedNodeID; - localIndex_array tipEdgesID; + real64 const faceNormalVector[3] = LVARRAY_TENSOROPS_INIT_LOCAL_3( faceNormal[trailingFaceIndex] ); + localIndex_array unpinchedNodeID; + localIndex_array pinchedNodeID; + localIndex_array tipEdgesID; - for( localIndex const nodeIndex : faceToNodeMap[ trailingFaceIndex ] ) - { - if( m_tipNodes.contains( nodeIndex )) - { - pinchedNodeID.emplace_back( nodeIndex ); - } - else + for( localIndex const nodeIndex : faceToNodeMap[ trailingFaceIndex ] ) { - unpinchedNodeID.emplace_back( nodeIndex ); + if( m_tipNodes.contains( nodeIndex )) + { + pinchedNodeID.emplace_back( nodeIndex ); + } + else + { + unpinchedNodeID.emplace_back( nodeIndex ); + } } - } - for( localIndex const edgeIndex : faceToEdgeMap[ trailingFaceIndex ] ) - { - if( m_tipEdges.contains( edgeIndex )) + for( localIndex const edgeIndex : faceToEdgeMap[ trailingFaceIndex ] ) { - tipEdgesID.emplace_back( edgeIndex ); + if( m_tipEdges.contains( edgeIndex )) + { + tipEdgesID.emplace_back( edgeIndex ); + } } - } - if( unpinchedNodeID.size() < 2 || (unpinchedNodeID.size() == 2 && tipEdgesID.size() < 2) ) - { - for( localIndex const nodeIndex : pinchedNodeID ) + if( unpinchedNodeID.size() < 2 || (unpinchedNodeID.size() == 2 && tipEdgesID.size() < 2) ) { - if( isNodeGhost[nodeIndex] < 0 ) + for( localIndex const nodeIndex : pinchedNodeID ) { - real64 nodeDisconnectForce[3] = { 0 }; - real64 const nodePosition[3] = LVARRAY_TENSOROPS_INIT_LOCAL_3( X[nodeIndex] ); - localIndex tralingNodeID = std::numeric_limits< localIndex >::max(); - localIndex nElemEachSide[2]; - nElemEachSide[0] = 0; - nElemEachSide[1] = 0; - - for( localIndex k=0; k( esr ); + real64 nodeDisconnectForce[3] = { 0 }; + real64 const nodePosition[3] = LVARRAY_TENSOROPS_INIT_LOCAL_3( X[nodeIndex] ); + localIndex tralingNodeID = std::numeric_limits< localIndex >::max(); + localIndex nElemEachSide[2]; + nElemEachSide[0] = 0; + nElemEachSide[1] = 0; + + for( localIndex k=0; k const & elementsToNodes = elementSubRegion.nodeList(); - arrayView2d< real64 const > const & elementCenter = elementSubRegion.getElementCenter().toViewConst(); - real64 K = bulkModulus[er][esr][m_solidMaterialFullIndex[er]][ei]; - real64 G = shearModulus[er][esr][m_solidMaterialFullIndex[er]][ei]; - real64 YoungModulus = 9 * K * G / ( 3 * K + G ); - real64 poissonRatio = ( 3 * K - 2 * G ) / ( 2 * ( 3 * K + G ) ); + CellElementSubRegion const & elementSubRegion = elementManager.getRegion( er ).getSubRegion< CellElementSubRegion >( esr ); - localIndex const numQuadraturePoints = detJ[er][esr].size( 1 ); + arrayView2d< localIndex const, cells::NODE_MAP_USD > const & elementsToNodes = elementSubRegion.nodeList(); + arrayView2d< real64 const > const & elementCenter = elementSubRegion.getElementCenter().toViewConst(); - for( localIndex n=0; n( temp, YoungModulus ); - LvArray::tensorOps::scale< 3 >( temp, 1.0 / (1 - poissonRatio * poissonRatio) ); - - LvArray::tensorOps::subtract< 3 >( xEle, nodePosition ); - if( LvArray::tensorOps::AiBi< 3 >( xEle, faceNormalVector ) > 0 ) //TODO: check the sign. + if( elementsToNodes( ei, n ) == nodeIndex ) { - nElemEachSide[0] += 1; - LvArray::tensorOps::add< 3 >( nodeDisconnectForce, temp ); - } - else - { - nElemEachSide[1] +=1; - LvArray::tensorOps::subtract< 3 >( nodeDisconnectForce, temp ); + real64 nodalForce[ 3 ] = {0}; + real64 xEle[ 3 ] = LVARRAY_TENSOROPS_INIT_LOCAL_3 ( elementCenter[ei] ); + + nodalForceKernel.calculateSingleNodalForce( er, esr, ei, n, nodalForce ); + + LvArray::tensorOps::subtract< 3 >( xEle, nodePosition ); + if( LvArray::tensorOps::AiBi< 3 >( xEle, faceNormalVector ) > 0 ) //TODO: check the sign. + { + nElemEachSide[0] += 1; + LvArray::tensorOps::add< 3 >( nodeDisconnectForce, nodalForce ); + } + else + { + nElemEachSide[1] +=1; + LvArray::tensorOps::subtract< 3 >( nodeDisconnectForce, nodalForce ); + } } } } - } - if( nElemEachSide[0]>=1 && nElemEachSide[1]>=1 ) - { - LvArray::tensorOps::scale< 3 >( nodeDisconnectForce, 0.5 ); - } + if( nElemEachSide[0]>=1 && nElemEachSide[1]>=1 ) + { + LvArray::tensorOps::scale< 3 >( nodeDisconnectForce, 0.5 ); + } - //Find the trailing node according to the node index and face index - if( unpinchedNodeID.size() == 0 ) //Tet mesh under three nodes pinched scenario. Need to find the other - // trailing face that containing the trailing node. - { - for( localIndex const edgeIndex: faceToEdgeMap[ trailingFaceIndex ] ) + //Find the trailing node according to the node index and face index + if( unpinchedNodeID.size() == 0 ) //Tet mesh under three nodes pinched scenario. Need to find the other + // trailing face that containing the trailing node. { - for( localIndex const faceIndex: edgeToFaceMap[ edgeIndex ] ) + for( localIndex const edgeIndex: faceToEdgeMap[ trailingFaceIndex ] ) { - if( faceIndex != trailingFaceIndex && m_tipFaces.contains( faceIndex )) + for( localIndex const faceIndex: edgeToFaceMap[ edgeIndex ] ) { - for( localIndex const iNode: faceToNodeMap[ faceIndex ] ) + if( faceIndex != trailingFaceIndex && m_tipFaces.contains( faceIndex )) { - if( !m_tipNodes.contains( iNode )) + for( localIndex const iNode: faceToNodeMap[ faceIndex ] ) { - tralingNodeID = iNode; + if( !m_tipNodes.contains( iNode )) + { + tralingNodeID = iNode; + } } } } } - } - if( tralingNodeID == std::numeric_limits< localIndex >::max()) + if( tralingNodeID == std::numeric_limits< localIndex >::max()) + { + GEOS_ERROR( getDataContext() << ": The triangular trailing face has three tip nodes but cannot find the other trailing face containing the trailing node." ); + } + } + else if( unpinchedNodeID.size() == 1 ) { - GEOS_ERROR( getDataContext() << ": The triangular trailing face has three tip nodes but cannot find the other trailing face containing the trailing node." ); + tralingNodeID = unpinchedNodeID[0]; } - } - else if( unpinchedNodeID.size() == 1 ) - { - tralingNodeID = unpinchedNodeID[0]; - } - else if( unpinchedNodeID.size() == 2 ) - { - for( localIndex const edgeIndex : nodeToEdgeMap[ nodeIndex ] ) + else if( unpinchedNodeID.size() == 2 ) { - auto const faceToEdgeMapIterator = faceToEdgeMap[ trailingFaceIndex ]; - if( std::find( faceToEdgeMapIterator.begin(), faceToEdgeMapIterator.end(), edgeIndex ) != faceToEdgeMapIterator.end() && - !m_tipEdges.contains( edgeIndex ) ) + for( localIndex const edgeIndex : nodeToEdgeMap[ nodeIndex ] ) { - tralingNodeID = edgeToNodeMap[edgeIndex][0] == nodeIndex ? edgeToNodeMap[edgeIndex][1] : edgeToNodeMap[edgeIndex][0]; + auto const faceToEdgeMapIterator = faceToEdgeMap[ trailingFaceIndex ]; + if( std::find( faceToEdgeMapIterator.begin(), faceToEdgeMapIterator.end(), edgeIndex ) != faceToEdgeMapIterator.end() && + !m_tipEdges.contains( edgeIndex ) ) + { + tralingNodeID = edgeToNodeMap[edgeIndex][0] == nodeIndex ? edgeToNodeMap[edgeIndex][1] : edgeToNodeMap[edgeIndex][0]; + } } } - } - //Calculate SIF for the node. - real64 tipNodeSIF; - real64 tipNodeForce[3]; - real64 trailingNodeDisp[3]; - localIndex theOtherTrailingNodeID; + //Calculate SIF for the node. + real64 tipNodeSIF; + real64 tipNodeForce[3]; + real64 trailingNodeDisp[3]; + localIndex theOtherTrailingNodeID; - if( childNodeIndices[tralingNodeID] == -1 ) - { - theOtherTrailingNodeID = parentNodeIndices[tralingNodeID]; - } - else - { - theOtherTrailingNodeID = childNodeIndices[tralingNodeID]; - } + if( childNodeIndices[tralingNodeID] == -1 ) + { + theOtherTrailingNodeID = parentNodeIndices[tralingNodeID]; + } + else + { + theOtherTrailingNodeID = childNodeIndices[tralingNodeID]; + } - LvArray::tensorOps::copy< 3 >( trailingNodeDisp, displacement[theOtherTrailingNodeID] ); - LvArray::tensorOps::subtract< 3 >( trailingNodeDisp, displacement[tralingNodeID] ); + LvArray::tensorOps::copy< 3 >( trailingNodeDisp, displacement[theOtherTrailingNodeID] ); + LvArray::tensorOps::subtract< 3 >( trailingNodeDisp, displacement[tralingNodeID] ); - //Calculate average young's modulus and poisson ratio for fext. - real64 fExternal[2][3]; - for( localIndex i=0; i<2; ++i ) - { - real64 averageYoungModulus( 0 ), averagePoissonRatio( 0 ); - localIndex nodeID = i == 0 ? tralingNodeID : theOtherTrailingNodeID; - for( localIndex k=0; k( fExternal[i], fext[nodeID] ); - LvArray::tensorOps::scale< 3 >( fExternal[i], averageYoungModulus / (1 - averagePoissonRatio * averagePoissonRatio) ); - } + LvArray::tensorOps::copy< 3 >( fExternal[i], fext[nodeID] ); + LvArray::tensorOps::scale< 3 >( fExternal[i], averageYoungModulus / (1 - averagePoissonRatio * averagePoissonRatio) ); + } - //TODO: The sign of fext here is opposite to the sign of fFaceA in function "CalculateEdgeSIF". - tipNodeForce[0] = nodeDisconnectForce[0] - ( fExternal[0][0] - fExternal[1][0] ) / 2.0; - tipNodeForce[1] = nodeDisconnectForce[1] - ( fExternal[0][1] - fExternal[1][1] ) / 2.0; - tipNodeForce[2] = nodeDisconnectForce[2] - ( fExternal[0][2] - fExternal[1][2] ) / 2.0; + //TODO: The sign of fext here is opposite to the sign of fFaceA in function "CalculateEdgeSIF". + tipNodeForce[0] = nodeDisconnectForce[0] - ( fExternal[0][0] - fExternal[1][0] ) / 2.0; + tipNodeForce[1] = nodeDisconnectForce[1] - ( fExternal[0][1] - fExternal[1][1] ) / 2.0; + tipNodeForce[2] = nodeDisconnectForce[2] - ( fExternal[0][2] - fExternal[1][2] ) / 2.0; // tipNodeForce[0] = nodeDisconnectForce[0]; // tipNodeForce[1] = nodeDisconnectForce[1]; // tipNodeForce[2] = nodeDisconnectForce[2]; - real64 tipArea; - tipArea = faceArea( trailingFaceIndex ); - if( faceToNodeMap.sizeOfArray( trailingFaceIndex ) == 3 ) - { - tipArea *= 2.0; - } + real64 tipArea = faceArea( trailingFaceIndex ); + if( faceToNodeMap.sizeOfArray( trailingFaceIndex ) == 3 ) + { + tipArea *= 2.0; + } - tipNodeSIF = pow( (fabs( tipNodeForce[0] * trailingNodeDisp[0] / 2.0 / tipArea ) + fabs( tipNodeForce[1] * trailingNodeDisp[1] / 2.0 / tipArea ) - + fabs( tipNodeForce[2] * trailingNodeDisp[2] / 2.0 / tipArea )), 0.5 ); + tipNodeSIF = pow( (fabs( tipNodeForce[0] * trailingNodeDisp[0] / 2.0 / tipArea ) + fabs( tipNodeForce[1] * trailingNodeDisp[1] / 2.0 / tipArea ) + + fabs( tipNodeForce[2] * trailingNodeDisp[2] / 2.0 / tipArea )), 0.5 ); - if( LvArray::tensorOps::AiBi< 3 >( trailingNodeDisp, faceNormalVector ) < 0.0 ) //In case the aperture is negative with the - // presence of confining stress. - { - tipNodeSIF *= -1; - } + if( LvArray::tensorOps::AiBi< 3 >( trailingNodeDisp, faceNormalVector ) < 0.0 ) //In case the aperture is negative with the + // presence of confining stress. + { + tipNodeSIF *= -1; + } - SIFNode_All[nodeIndex].emplace_back( tipNodeSIF ); + SIFNode_All[nodeIndex].emplace_back( tipNodeSIF ); - //Calculate SIF on tip faces connected to this trailing face and the tip node. - for( localIndex const edgeIndex: tipEdgesID ) - { - if( edgeToNodeMap[edgeIndex][0] == nodeIndex || edgeToNodeMap[edgeIndex][1] == nodeIndex ) + //Calculate SIF on tip faces connected to this trailing face and the tip node. + for( localIndex const edgeIndex: tipEdgesID ) { - real64 SIF_I = 0, SIF_II = 0, /*SIF_III,*/ SIF_Face; - real64 vecTipNorm[3], vecTip[3], tipForce[3], tipOpening[3]; + if( edgeToNodeMap[edgeIndex][0] == nodeIndex || edgeToNodeMap[edgeIndex][1] == nodeIndex ) + { + real64 SIF_I = 0, SIF_II = 0, /*SIF_III,*/ SIF_Face; + real64 vecTipNorm[3], vecTip[3], tipForce[3], tipOpening[3]; - LvArray::tensorOps::copy< 3 >( vecTipNorm, faceNormal[trailingFaceIndex] ); - LvArray::tensorOps::subtract< 3 >( vecTipNorm, faceNormal[childFaceIndices[trailingFaceIndex]] ); - LvArray::tensorOps::normalize< 3 >( vecTipNorm ); + LvArray::tensorOps::copy< 3 >( vecTipNorm, faceNormal[trailingFaceIndex] ); + LvArray::tensorOps::subtract< 3 >( vecTipNorm, faceNormal[childFaceIndices[trailingFaceIndex]] ); + LvArray::tensorOps::normalize< 3 >( vecTipNorm ); - real64 vecEdge[3]; - edgeManager.calculateLength( edgeIndex, X, vecEdge ); - LvArray::tensorOps::normalize< 3 >( vecEdge ); + real64 vecEdge[3]; + edgeManager.calculateLength( edgeIndex, X, vecEdge ); + LvArray::tensorOps::normalize< 3 >( vecEdge ); - LvArray::tensorOps::crossProduct( vecTip, vecTipNorm, vecEdge ); - LvArray::tensorOps::normalize< 3 >( vecTip ); - real64 v0[3]; - edgeManager.calculateCenter( edgeIndex, X, v0 ); - LvArray::tensorOps::subtract< 3 >( v0, faceCenter[ trailingFaceIndex ] ); + LvArray::tensorOps::crossProduct( vecTip, vecTipNorm, vecEdge ); + LvArray::tensorOps::normalize< 3 >( vecTip ); + real64 v0[3]; + edgeManager.calculateCenter( edgeIndex, X, v0 ); + LvArray::tensorOps::subtract< 3 >( v0, faceCenter[ trailingFaceIndex ] ); - if( LvArray::tensorOps::AiBi< 3 >( v0, vecTip ) < 0 ) - LvArray::tensorOps::scale< 3 >( vecTip, -1.0 ); + if( LvArray::tensorOps::AiBi< 3 >( v0, vecTip ) < 0 ) + LvArray::tensorOps::scale< 3 >( vecTip, -1.0 ); - tipForce[0] = LvArray::tensorOps::AiBi< 3 >( nodeDisconnectForce, vecTipNorm ) - - ( LvArray::tensorOps::AiBi< 3 >( fExternal[0], vecTipNorm ) - LvArray::tensorOps::AiBi< 3 >( fExternal[1], vecTipNorm ) ) / 2.0; - tipForce[1] = LvArray::tensorOps::AiBi< 3 >( nodeDisconnectForce, vecTip ) - - ( LvArray::tensorOps::AiBi< 3 >( fExternal[0], vecTip ) - LvArray::tensorOps::AiBi< 3 >( fExternal[1], vecTip ) ) / 2.0; - tipForce[2] = LvArray::tensorOps::AiBi< 3 >( nodeDisconnectForce, vecEdge ) - - ( LvArray::tensorOps::AiBi< 3 >( fExternal[0], vecEdge ) - LvArray::tensorOps::AiBi< 3 >( fExternal[1], vecEdge ) ) / 2.0; + tipForce[0] = LvArray::tensorOps::AiBi< 3 >( nodeDisconnectForce, vecTipNorm ) - + ( LvArray::tensorOps::AiBi< 3 >( fExternal[0], vecTipNorm ) - LvArray::tensorOps::AiBi< 3 >( fExternal[1], vecTipNorm ) ) / 2.0; + tipForce[1] = LvArray::tensorOps::AiBi< 3 >( nodeDisconnectForce, vecTip ) - + ( LvArray::tensorOps::AiBi< 3 >( fExternal[0], vecTip ) - LvArray::tensorOps::AiBi< 3 >( fExternal[1], vecTip ) ) / 2.0; + tipForce[2] = LvArray::tensorOps::AiBi< 3 >( nodeDisconnectForce, vecEdge ) - + ( LvArray::tensorOps::AiBi< 3 >( fExternal[0], vecEdge ) - LvArray::tensorOps::AiBi< 3 >( fExternal[1], vecEdge ) ) / 2.0; // tipForce[0] = LvArray::tensorOps::AiBi< 3 >( nodeDisconnectForce, vecTipNorm ); // tipForce[1] = LvArray::tensorOps::AiBi< 3 >( nodeDisconnectForce, vecTip ); // tipForce[2] = LvArray::tensorOps::AiBi< 3 >( nodeDisconnectForce, vecEdge ); - tipOpening[0] = LvArray::tensorOps::AiBi< 3 >( trailingNodeDisp, vecTipNorm ); - tipOpening[1] = LvArray::tensorOps::AiBi< 3 >( trailingNodeDisp, vecTip ); - tipOpening[2] = LvArray::tensorOps::AiBi< 3 >( trailingNodeDisp, vecEdge ); + tipOpening[0] = LvArray::tensorOps::AiBi< 3 >( trailingNodeDisp, vecTipNorm ); + tipOpening[1] = LvArray::tensorOps::AiBi< 3 >( trailingNodeDisp, vecTip ); + tipOpening[2] = LvArray::tensorOps::AiBi< 3 >( trailingNodeDisp, vecEdge ); // if( tipForce[0] > 0.0 ) - { - SIF_I = pow( fabs( tipForce[0] * tipOpening[0] / 2.0 / tipArea ), 0.5 ); - SIF_II = pow( fabs( tipForce[1] * tipOpening[1] / 2.0 / tipArea ), 0.5 ); + { + SIF_I = pow( fabs( tipForce[0] * tipOpening[0] / 2.0 / tipArea ), 0.5 ); + SIF_II = pow( fabs( tipForce[1] * tipOpening[1] / 2.0 / tipArea ), 0.5 ); // SIF_III = pow( fabs( tipForce[2] * tipOpening[2] / 2.0 / tipArea ), 0.5 ); - } + } - if( tipOpening[0] < 0 ) - { - SIF_I *= -1.0; - } + if( tipOpening[0] < 0 ) + { + SIF_I *= -1.0; + } - if( tipForce[1] < 0.0 ) - { - SIF_II *= -1.0; - } + if( tipForce[1] < 0.0 ) + { + SIF_II *= -1.0; + } - for( localIndex const faceIndex: edgeToFaceMap[ edgeIndex ] ) - { - if( m_tipFaces.contains( faceIndex )) + for( localIndex const faceIndex: edgeToFaceMap[ edgeIndex ] ) { - real64 vecFace[ 3 ]; - real64 fc[3] = LVARRAY_TENSOROPS_INIT_LOCAL_3 ( faceCenter[faceIndex] ); + if( m_tipFaces.contains( faceIndex )) + { + real64 vecFace[ 3 ]; + real64 fc[3] = LVARRAY_TENSOROPS_INIT_LOCAL_3 ( faceCenter[faceIndex] ); - //Get the vector in the face and normal to the edge. - real64 udist; + //Get the vector in the face and normal to the edge. + real64 udist; - real64 x0_x1[ 3 ] = LVARRAY_TENSOROPS_INIT_LOCAL_3( X[edgeToNodeMap[edgeIndex][0]] ); - real64 x0_fc[ 3 ] = LVARRAY_TENSOROPS_INIT_LOCAL_3( fc ); + real64 x0_x1[ 3 ] = LVARRAY_TENSOROPS_INIT_LOCAL_3( X[edgeToNodeMap[edgeIndex][0]] ); + real64 x0_fc[ 3 ] = LVARRAY_TENSOROPS_INIT_LOCAL_3( fc ); - LvArray::tensorOps::subtract< 3 >( x0_x1, X[edgeToNodeMap[edgeIndex][1]] ); - LvArray::tensorOps::normalize< 3 >( x0_x1 ); - LvArray::tensorOps::subtract< 3 >( x0_fc, X[edgeToNodeMap[edgeIndex][1]] ); - udist = LvArray::tensorOps::AiBi< 3 >( x0_x1, x0_fc ); + LvArray::tensorOps::subtract< 3 >( x0_x1, X[edgeToNodeMap[edgeIndex][1]] ); + LvArray::tensorOps::normalize< 3 >( x0_x1 ); + LvArray::tensorOps::subtract< 3 >( x0_fc, X[edgeToNodeMap[edgeIndex][1]] ); + udist = LvArray::tensorOps::AiBi< 3 >( x0_x1, x0_fc ); - real64 ptPrj[ 3 ] = LVARRAY_TENSOROPS_INIT_LOCAL_3 ( x0_x1 ); - LvArray::tensorOps::scale< 3 >( ptPrj, udist ); - LvArray::tensorOps::add< 3 >( ptPrj, X[edgeToNodeMap[edgeIndex][1]] ); - LvArray::tensorOps::copy< 3 >( vecFace, fc ); - LvArray::tensorOps::subtract< 3 >( vecFace, ptPrj ); - LvArray::tensorOps::normalize< 3 >( vecFace ); + real64 ptPrj[ 3 ] = LVARRAY_TENSOROPS_INIT_LOCAL_3 ( x0_x1 ); + LvArray::tensorOps::scale< 3 >( ptPrj, udist ); + LvArray::tensorOps::add< 3 >( ptPrj, X[edgeToNodeMap[edgeIndex][1]] ); + LvArray::tensorOps::copy< 3 >( vecFace, fc ); + LvArray::tensorOps::subtract< 3 >( vecFace, ptPrj ); + LvArray::tensorOps::normalize< 3 >( vecFace ); // if( LvArray::tensorOps::AiBi< 3 >( vecTip, vecFace ) > cos( m_maxTurnAngle )) - { - // We multiply this by 0.9999999 to avoid an exception caused by acos a number slightly larger than - // 1. - real64 thetaFace = acos( LvArray::tensorOps::AiBi< 3 >( vecTip, vecFace )*0.999999 ); + { + // We multiply this by 0.9999999 to avoid an exception caused by acos a number slightly larger than + // 1. + real64 thetaFace = acos( LvArray::tensorOps::AiBi< 3 >( vecTip, vecFace )*0.999999 ); - real64 tipCrossFace[ 3 ]; - LvArray::tensorOps::crossProduct( tipCrossFace, vecTip, vecEdge ); + real64 tipCrossFace[ 3 ]; + LvArray::tensorOps::crossProduct( tipCrossFace, vecTip, vecEdge ); - if( LvArray::tensorOps::AiBi< 3 >( tipCrossFace, vecEdge ) < 0.0 ) - { - thetaFace *= -1.0; - } + if( LvArray::tensorOps::AiBi< 3 >( tipCrossFace, vecEdge ) < 0.0 ) + { + thetaFace *= -1.0; + } - SIF_Face = cos( thetaFace / 2.0 ) * - ( SIF_I * cos( thetaFace / 2.0 ) * cos( thetaFace / 2.0 ) - 1.5 * SIF_II * sin( thetaFace ) ); + SIF_Face = cos( thetaFace / 2.0 ) * + ( SIF_I * cos( thetaFace / 2.0 ) * cos( thetaFace / 2.0 ) - 1.5 * SIF_II * sin( thetaFace ) ); - SIFonFace_All[faceIndex].emplace_back( SIF_Face ); + SIFonFace_All[faceIndex].emplace_back( SIF_Face ); + } } } } @@ -3218,7 +3223,7 @@ void SurfaceGenerator::calculateNodeAndFaceSif( DomainPartition const & domain, } } } - } + } ); //wu40: the tip node may be included in multiple trailing faces and SIF of the node/face will be calculated multiple // times. We chose the smaller node SIF and the larger face SIF. @@ -3779,10 +3784,10 @@ int SurfaceGenerator::calculateElementForcesOnEdge( DomainPartition const & doma if(( udist <= edgeLength && udist > 0.0 ) || threeNodesPinched ) { - real64 K = bulkModulus[er][esr][m_solidMaterialFullIndex[er]][ei]; - real64 G = shearModulus[er][esr][m_solidMaterialFullIndex[er]][ei]; - real64 YoungModulus = 9 * K * G / ( 3 * K + G ); - real64 poissonRatio = ( 3 * K - 2 * G ) / ( 2 * ( 3 * K + G ) ); + real64 const K = bulkModulus[er][esr][m_solidMaterialFullIndex[er]][ei]; + real64 const G = shearModulus[er][esr][m_solidMaterialFullIndex[er]][ei]; + real64 const YoungModulus = 9 * K * G / ( 3 * K + G ); + real64 const poissonRatio = ( 3 * K - 2 * G ) / ( 2 * ( 3 * K + G ) ); arrayView2d< localIndex const, cells::NODE_MAP_USD > const & elementsToNodes = elementSubRegion.nodeList(); for( localIndex n=0; n, arrayView4d< real64 const > >( dataRepository::keys::dNdX ) ), + m_detJ( elemManager.constructViewAccessor< array2d< real64 >, arrayView2d< real64 const > >( dataRepository::keys::detJ ) ), + m_bulkModulus( elemManager.constructFullMaterialViewAccessor< array1d< real64 >, arrayView1d< real64 const > >( "bulkModulus", constitutiveManager ) ), + m_shearModulus( elemManager.constructFullMaterialViewAccessor< array1d< real64 >, arrayView1d< real64 const > >( "shearModulus", constitutiveManager ) ), + m_stress( elemManager.constructFullMaterialViewAccessor< array3d< real64, solid::STRESS_PERMUTATION >, + arrayView3d< real64 const, solid::STRESS_USD > >( constitutive::SolidBase::viewKeyStruct::stressString(), + constitutiveManager ) ) + { + m_solidMaterialFullIndex.resize( elemManager.numRegions() ); + elemManager.forElementRegionsComplete< CellElementRegion >( [&]( localIndex regionIndex, + CellElementRegion const & region ) + { + string const & solidMaterialName = region.getSubRegion( 0 ).getReference< string >( solidMaterialKey ); + constitutive::ConstitutiveBase const & solid = constitutiveManager.getConstitutiveRelation< constitutive::ConstitutiveBase >( solidMaterialName ); + m_solidMaterialFullIndex[regionIndex] = solid.getIndexInParent(); + } ); + } + + virtual void + calculateSingleNodalForce( localIndex const er, + localIndex const esr, + localIndex const ei, + localIndex const targetNode, + real64 ( & force )[ 3 ] ) const + { + GEOS_MARK_FUNCTION; + + localIndex const numQuadraturePoints = m_detJ[er][esr].size( 1 ); + + // Loop over quadrature points + for( localIndex q = 0; q < numQuadraturePoints; ++q ) + { + real64 const quadratureStress[6] = LVARRAY_TENSOROPS_INIT_LOCAL_6 ( m_stress[er][esr][m_solidMaterialFullIndex[er]][ei][q] ); + real64 const dNdX[3] = LVARRAY_TENSOROPS_INIT_LOCAL_3 ( m_dNdX[er][esr][ei][q][targetNode] ); + computeNodalForce( quadratureStress, dNdX, m_detJ[er][esr][ei][q], force ); + } + + //wu40: the nodal force need to be weighted by Young's modulus and possion's ratio. + scaleNodalForce( m_bulkModulus[er][esr][m_solidMaterialFullIndex[er]][ei], m_shearModulus[er][esr][m_solidMaterialFullIndex[er]][ei], force ); + } + +protected: + + ElementRegionManager::ElementViewAccessor< arrayView4d< real64 const > > const m_dNdX; + + ElementRegionManager::ElementViewAccessor< arrayView2d< real64 const > > const m_detJ; + + ElementRegionManager::MaterialViewAccessor< arrayView1d< real64 const > > const m_bulkModulus; + + ElementRegionManager::MaterialViewAccessor< arrayView1d< real64 const > > const m_shearModulus; + + ElementRegionManager::MaterialViewAccessor< arrayView3d< real64 const, solid::STRESS_USD > > const m_stress; + + array1d< integer > m_solidMaterialFullIndex; +}; + + +class PoroElasticNodalForceKernel : public NodalForceKernel +{ + +public: + PoroElasticNodalForceKernel( ElementRegionManager const & elemManager, + constitutive::ConstitutiveManager const & constitutiveManager, + string const solidMaterialKey, + string const porosityModelKey ): + NodalForceKernel( elemManager, constitutiveManager, solidMaterialKey ), + m_pressure( elemManager.constructArrayViewAccessor< real64, 1 >( fields::flow::pressure::key() ) ), + m_biotCoefficient( elemManager.constructFullMaterialViewAccessor< array1d< real64 >, arrayView1d< real64 const > >( "biotCoefficient", constitutiveManager ) ) + { + m_porosityMaterialFullIndex.resize( elemManager.numRegions() ); + elemManager.forElementRegionsComplete< CellElementRegion >( [&]( localIndex regionIndex, + CellElementRegion const & region ) + { + string const & porosityModelName = region.getSubRegion( 0 ).getReference< string >( porosityModelKey ); + constitutive::ConstitutiveBase const & porosityModel = constitutiveManager.getConstitutiveRelation< constitutive::ConstitutiveBase >( porosityModelName ); + m_porosityMaterialFullIndex[regionIndex] = porosityModel.getIndexInParent(); + } ); + + } + + void + calculateSingleNodalForce( localIndex const er, + localIndex const esr, + localIndex const ei, + localIndex const targetNode, + real64 ( & force )[ 3 ] ) const override + + { + GEOS_MARK_FUNCTION; + + localIndex const numQuadraturePoints = m_detJ[er][esr].size( 1 ); + + // Loop over quadrature points + for( localIndex q = 0; q < numQuadraturePoints; ++q ) + { + real64 totalStress[6] = LVARRAY_TENSOROPS_INIT_LOCAL_6 ( m_stress[er][esr][m_solidMaterialFullIndex[er]][ei][q] ); + real64 const dNdX[3] = LVARRAY_TENSOROPS_INIT_LOCAL_3 ( m_dNdX[er][esr][ei][q][targetNode] ); + /// TODO: make it work for the thermal case as well + LvArray::tensorOps::symAddIdentity< 3 >( totalStress, -m_biotCoefficient[er][esr][m_porosityMaterialFullIndex[er]][ei] * m_pressure[er][esr][ei] ); + + computeNodalForce( totalStress, dNdX, m_detJ[er][esr][ei][q], force ); + } + + //wu40: the nodal force need to be weighted by Young's modulus and possion's ratio. + scaleNodalForce( m_bulkModulus[er][esr][m_solidMaterialFullIndex[er]][ei], m_shearModulus[er][esr][m_solidMaterialFullIndex[er]][ei], force ); + } + +private: + + ElementRegionManager::ElementViewAccessor< arrayView1d< real64 const > > const m_pressure; + + ElementRegionManager::MaterialViewAccessor< arrayView1d< real64 const > > const m_biotCoefficient; + + array1d< integer > m_porosityMaterialFullIndex; + +}; + +template< typename LAMBDA > +void kernelSelector( ElementRegionManager const & elemManager, + constitutive::ConstitutiveManager const & constitutiveManager, + string const solidMaterialKey, + integer const isPoroelastic, + LAMBDA && lambda ) +{ + if( isPoroelastic == 0 ) + { + lambda( NodalForceKernel( elemManager, constitutiveManager, solidMaterialKey ) ); + } + else + { + string const porosityModelKey = constitutive::CoupledSolidBase::viewKeyStruct::porosityModelNameString(); + lambda( PoroElasticNodalForceKernel( elemManager, constitutiveManager, solidMaterialKey, porosityModelKey ) ); + } + +} + +} // namespace solidMechanicsLagrangianFEMKernels + +} // namespace geos diff --git a/src/coreComponents/physicsSolvers/surfaceGeneration/kernels/surfaceGenerationKernelsHelpers.hpp b/src/coreComponents/physicsSolvers/surfaceGeneration/kernels/surfaceGenerationKernelsHelpers.hpp new file mode 100644 index 00000000000..917ce1f1a8a --- /dev/null +++ b/src/coreComponents/physicsSolvers/surfaceGeneration/kernels/surfaceGenerationKernelsHelpers.hpp @@ -0,0 +1,62 @@ +/* + * ------------------------------------------------------------------------------------------------------------ + * SPDX-License-Identifier: LGPL-2.1-only + * + * Copyright (c) 2018-2020 Lawrence Livermore National Security LLC + * Copyright (c) 2018-2020 The Board of Trustees of the Leland Stanford Junior University + * Copyright (c) 2018-2020 TotalEnergies + * Copyright (c) 2019- GEOSX Contributors + * All rights reserved + * + * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details. + * ------------------------------------------------------------------------------------------------------------ + */ + +/** + * @file surfaceGenerationKernelsHelpers.hpp + */ + + +#include "common/DataTypes.hpp" +#include "common/TimingMacros.hpp" + +namespace geos +{ + +namespace surfaceGenerationKernelsHelpers +{ + +GEOS_HOST_DEVICE +inline void computeNodalForce( real64 const ( &stress) [ 6 ], + real64 const ( &dNdX) [ 3 ], + real64 const detJ, + real64 ( & force ) [ 3 ] ) +{ + + force[ 0 ] -= ( stress[ 0 ] * dNdX[ 0 ] + + stress[ 5 ] * dNdX[ 1 ] + + stress[ 4 ] * dNdX[ 2 ] ) * detJ; + force[ 1 ] -= ( stress[ 5 ] * dNdX[ 0 ] + + stress[ 1 ] * dNdX[ 1 ] + + stress[ 3 ] * dNdX[ 2 ] ) * detJ; + force[ 2 ] -= ( stress[ 4 ] * dNdX[ 0 ] + + stress[ 3 ] * dNdX[ 1 ] + + stress[ 2 ] * dNdX[ 2 ] ) * detJ; +} + +GEOS_HOST_DEVICE +inline void scaleNodalForce( real64 const bulkModulus, + real64 const shearModulus, + real64 ( & force ) [ 3 ] ) +{ + real64 const YoungModulus = 9 * bulkModulus * shearModulus / ( 3 * bulkModulus + shearModulus ); + real64 const poissonRatio = ( 3 * bulkModulus - 2 * shearModulus ) / ( 2 * ( 3 * bulkModulus + shearModulus ) ); + + LvArray::tensorOps::scale< 3 >( force, YoungModulus ); + LvArray::tensorOps::scale< 3 >( force, 1.0 / (1 - poissonRatio * poissonRatio) ); +} + + +} + +} diff --git a/src/coreComponents/physicsSolvers/wavePropagation/AcousticElasticWaveEquationSEM.cpp b/src/coreComponents/physicsSolvers/wavePropagation/AcousticElasticWaveEquationSEM.cpp new file mode 100644 index 00000000000..3e0e5743550 --- /dev/null +++ b/src/coreComponents/physicsSolvers/wavePropagation/AcousticElasticWaveEquationSEM.cpp @@ -0,0 +1,221 @@ +/* + * ------------------------------------------------------------------------------------------------------------ + * SPDX-License-Identifier: LGPL-2.1-only + * + * Copyright (c) 2018-2020 Lawrence Livermore National Security LLC + * Copyright (c) 2018-2020 The Board of Trustees of the Leland Stanford Junior University + * Copyright (c) 2018-2020 TotalEnergies + * Copyright (c) 2019- GEOSX Contributors + * All rights reserved + * + * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details. + * ------------------------------------------------------------------------------------------------------------ + */ + +/** + * @file AcousticElasticWaveEquationSEM.cpp + */ + +#include "AcousticElasticWaveEquationSEM.hpp" +#include "AcousticElasticWaveEquationSEMKernel.hpp" +#include "dataRepository/Group.hpp" +#include +#include + +namespace geos +{ +using namespace dataRepository; + +void AcousticElasticWaveEquationSEM::registerDataOnMesh( Group & meshBodies ) +{ + SolverBase::registerDataOnMesh( meshBodies ); + + forDiscretizationOnMeshTargets( meshBodies, [&] ( string const &, + MeshLevel & mesh, + arrayView1d< string const > const & ) + { + NodeManager & nodeManager = mesh.getNodeManager(); + nodeManager.registerField< fields::CouplingVectorx >( getName() ); + nodeManager.registerField< fields::CouplingVectory >( getName() ); + nodeManager.registerField< fields::CouplingVectorz >( getName() ); + } ); +} + +void AcousticElasticWaveEquationSEM::initializePostInitialConditionsPreSubGroups() +{ + SolverBase::initializePostInitialConditionsPreSubGroups(); + + auto acousSolver = acousticSolver(); + auto elasSolver = elasticSolver(); + + auto acousNodesSet = acousSolver->getSolverNodesSet(); + auto elasNodesSet = elasSolver->getSolverNodesSet(); + + for( auto val : acousNodesSet ) + { + if( elasNodesSet.contains( val ) ) + m_interfaceNodesSet.insert( val ); + } + localIndex const numInterfaceNodes = MpiWrapper::sum( m_interfaceNodesSet.size() ); + GEOS_THROW_IF( numInterfaceNodes == 0, "Failed to compute interface: check xml input (solver order)", std::runtime_error ); + + m_acousRegions = acousSolver->getReference< array1d< string > >( SolverBase::viewKeyStruct::targetRegionsString() ); + m_elasRegions = elasSolver->getReference< array1d< string > >( SolverBase::viewKeyStruct::targetRegionsString() ); + + DomainPartition & domain = getGroupByPath< DomainPartition >( "/Problem/domain" ); + + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, + MeshLevel & mesh, + arrayView1d< string const > const & GEOS_UNUSED_PARAM( regionNames ) ) + { + NodeManager & nodeManager = mesh.getNodeManager(); + FaceManager & faceManager = mesh.getFaceManager(); + ElementRegionManager & elemManager = mesh.getElemManager(); + + arrayView2d< wsCoordType const, nodes::REFERENCE_POSITION_USD > const nodeCoords = nodeManager.getField< fields::referencePosition32 >().toViewConst(); + + arrayView2d< real64 const > const faceNormals = faceManager.faceNormal().toViewConst(); + ArrayOfArraysView< localIndex const > const faceToNode = faceManager.nodeList().toViewConst(); + arrayView2d< localIndex const > const faceToSubRegion = faceManager.elementSubRegionList(); + arrayView2d< localIndex const > const faceToRegion = faceManager.elementRegionList(); + arrayView2d< localIndex const > const faceToElement = faceManager.elementList(); + + arrayView1d< real32 > const couplingVectorx = nodeManager.getField< fields::CouplingVectorx >(); + couplingVectorx.zero(); + + arrayView1d< real32 > const couplingVectory = nodeManager.getField< fields::CouplingVectory >(); + couplingVectory.zero(); + + arrayView1d< real32 > const couplingVectorz = nodeManager.getField< fields::CouplingVectorz >(); + couplingVectorz.zero(); + + elemManager.forElementRegions( m_acousRegions, [&] ( localIndex const regionIndex, ElementRegionBase const & elemRegion ) + { + elemRegion.forElementSubRegionsIndex( [&]( localIndex const subRegionIndex, ElementSubRegionBase const & elementSubRegion ) + { + finiteElement::FiniteElementBase const & + fe = elementSubRegion.getReference< finiteElement::FiniteElementBase >( getDiscretizationName() ); + + finiteElement::FiniteElementDispatchHandler< SEM_FE_TYPES >::dispatch3D( fe, [&] ( auto const finiteElement ) + { + using FE_TYPE = TYPEOFREF( finiteElement ); + + acousticElasticWaveEquationSEMKernels::CouplingKernel< FE_TYPE > kernelC; + kernelC.template launch< EXEC_POLICY, ATOMIC_POLICY >( faceManager.size(), + nodeCoords, + regionIndex, + subRegionIndex, + faceToSubRegion, + faceToRegion, + faceToElement, + faceToNode, + faceNormals, + couplingVectorx, + couplingVectory, + couplingVectorz ); + } ); + } ); + } ); + } ); +} + +real64 AcousticElasticWaveEquationSEM::solverStep( real64 const & time_n, + real64 const & dt, + int const cycleNumber, + DomainPartition & domain ) +{ + GEOS_MARK_FUNCTION; + + auto acousSolver = acousticSolver(); + auto elasSolver = elasticSolver(); + + SortedArrayView< localIndex const > const interfaceNodesSet = m_interfaceNodesSet.toViewConst(); + + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, + MeshLevel & mesh, + arrayView1d< string const > const & GEOS_UNUSED_PARAM( regionNames ) ) + { + NodeManager & nodeManager = mesh.getNodeManager(); + + arrayView1d< real32 const > const acousticMass = nodeManager.getField< fields::AcousticMassVector >(); + arrayView1d< real32 const > const elasticMass = nodeManager.getField< fields::ElasticMassVector >(); + arrayView1d< localIndex > const acousticFSNodeIndicator = nodeManager.getField< fields::AcousticFreeSurfaceNodeIndicator >(); + arrayView1d< localIndex > const elasticFSNodeIndicator = nodeManager.getField< fields::ElasticFreeSurfaceNodeIndicator >(); + + arrayView1d< real32 const > const p_n = nodeManager.getField< fields::Pressure_n >(); + arrayView1d< real32 const > const ux_nm1 = nodeManager.getField< fields::Displacementx_nm1 >(); + arrayView1d< real32 const > const uy_nm1 = nodeManager.getField< fields::Displacementy_nm1 >(); + arrayView1d< real32 const > const uz_nm1 = nodeManager.getField< fields::Displacementz_nm1 >(); + arrayView1d< real32 const > const ux_n = nodeManager.getField< fields::Displacementx_n >(); + arrayView1d< real32 const > const uy_n = nodeManager.getField< fields::Displacementy_n >(); + arrayView1d< real32 const > const uz_n = nodeManager.getField< fields::Displacementz_n >(); + arrayView1d< real32 const > const atoex = nodeManager.getField< fields::CouplingVectorx >(); + arrayView1d< real32 const > const atoey = nodeManager.getField< fields::CouplingVectory >(); + arrayView1d< real32 const > const atoez = nodeManager.getField< fields::CouplingVectorz >(); + + arrayView1d< real32 > const p_np1 = nodeManager.getField< fields::Pressure_np1 >(); + arrayView1d< real32 > const ux_np1 = nodeManager.getField< fields::Displacementx_np1 >(); + arrayView1d< real32 > const uy_np1 = nodeManager.getField< fields::Displacementy_np1 >(); + arrayView1d< real32 > const uz_np1 = nodeManager.getField< fields::Displacementz_np1 >(); + + real32 const dt2 = pow( dt, 2 ); + + elasSolver->computeUnknowns( time_n, dt, cycleNumber, domain, mesh, m_elasRegions ); + + forAll< EXEC_POLICY >( interfaceNodesSet.size(), [=] GEOS_HOST_DEVICE ( localIndex const n ) + { + localIndex const a = interfaceNodesSet[n]; + if( elasticFSNodeIndicator[a] == 1 ) + return; + + real32 const aux = -p_n[a] / elasticMass[a]; + real32 const localIncrementx = dt2 * atoex[a] * aux; + real32 const localIncrementy = dt2 * atoey[a] * aux; + real32 const localIncrementz = dt2 * atoez[a] * aux; + + RAJA::atomicAdd< ATOMIC_POLICY >( &ux_np1[a], localIncrementx ); + RAJA::atomicAdd< ATOMIC_POLICY >( &uy_np1[a], localIncrementy ); + RAJA::atomicAdd< ATOMIC_POLICY >( &uz_np1[a], localIncrementz ); + } ); + + elasSolver->synchronizeUnknowns( time_n, dt, cycleNumber, domain, mesh, m_elasRegions ); + + acousSolver->computeUnknowns( time_n, dt, cycleNumber, domain, mesh, m_acousRegions ); + + forAll< EXEC_POLICY >( interfaceNodesSet.size(), [=] GEOS_HOST_DEVICE ( localIndex const n ) + { + localIndex const a = interfaceNodesSet[n]; + if( acousticFSNodeIndicator[a] == 1 ) + return; + + real32 const localIncrement = ( + atoex[a] * ( ux_np1[a] - 2.0 * ux_n[a] + ux_nm1[a] ) + + atoey[a] * ( uy_np1[a] - 2.0 * uy_n[a] + uy_nm1[a] ) + + atoez[a] * ( uz_np1[a] - 2.0 * uz_n[a] + uz_nm1[a] ) + ) / acousticMass[a]; + + RAJA::atomicAdd< ATOMIC_POLICY >( &p_np1[a], localIncrement ); + } ); + + acousSolver->synchronizeUnknowns( time_n, dt, cycleNumber, domain, mesh, m_acousRegions ); + + acousSolver->prepareNextTimestep( mesh ); + elasSolver->prepareNextTimestep( mesh ); + } ); + + return dt; +} + +void AcousticElasticWaveEquationSEM::cleanup( real64 const time_n, + integer const cycleNumber, + integer const eventCounter, + real64 const eventProgress, + DomainPartition & domain ) +{ + elasticSolver()->cleanup( time_n, cycleNumber, eventCounter, eventProgress, domain ); + acousticSolver()->cleanup( time_n, cycleNumber, eventCounter, eventProgress, domain ); +} + +REGISTER_CATALOG_ENTRY( SolverBase, AcousticElasticWaveEquationSEM, string const &, Group * const ) + +} /* namespace geos */ diff --git a/src/coreComponents/physicsSolvers/wavePropagation/AcousticElasticWaveEquationSEM.hpp b/src/coreComponents/physicsSolvers/wavePropagation/AcousticElasticWaveEquationSEM.hpp new file mode 100644 index 00000000000..326b248abd8 --- /dev/null +++ b/src/coreComponents/physicsSolvers/wavePropagation/AcousticElasticWaveEquationSEM.hpp @@ -0,0 +1,212 @@ +/* + * ------------------------------------------------------------------------------------------------------------ + * SPDX-License-Identifier: LGPL-2.1-only + * + * Copyright (c) 2018-2020 Lawrence Livermore National Security LLC + * Copyright (c) 2018-2020 The Board of Trustees of the Leland Stanford Junior University + * Copyright (c) 2018-2020 TotalEnergies + * Copyright (c) 2019- GEOSX Contributors + * All rights reserved + * + * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details. + * ------------------------------------------------------------------------------------------------------------ + */ + + +/** + * @file AcousticElasticWaveEquationSEM.hpp + */ + +#ifndef SRC_CORECOMPONENTS_PHYSICSSOLVERS_WAVEPROPAGATION_ACOUSTICELASTICWAVEEQUATIONSEM_HPP_ +#define SRC_CORECOMPONENTS_PHYSICSSOLVERS_WAVEPROPAGATION_ACOUSTICELASTICWAVEEQUATIONSEM_HPP_ + +#include "physicsSolvers/wavePropagation/ElasticWaveEquationSEM.hpp" +#include "physicsSolvers/wavePropagation/AcousticWaveEquationSEM.hpp" +#include "physicsSolvers/SolverBase.hpp" +#include + +namespace geos +{ + +template< typename ... SOLVERS > +class CoupledWaveSolver : public SolverBase +{ + +public: + + /** + * @brief main constructor for CoupledWaveSolver Objects + * @param name the name of this instantiation of CoupledWaveSolver in the repository + * @param parent the parent group of this instantiation of CoupledWaveSolver + */ + CoupledWaveSolver( const string & name, + Group * const parent ) + : SolverBase( name, parent ) + { + forEachArgInTuple( m_solvers, [&]( auto solver, auto idx ) + { + using SolverType = TYPEOFPTR( solver ); + string const key = SolverType::coupledSolverAttributePrefix() + "SolverName"; + registerWrapper( key, &m_names[idx()] ). + setRTTypeName( rtTypes::CustomTypes::groupNameRef ). + setInputFlag( dataRepository::InputFlags::REQUIRED ). + setDescription( "Name of the " + SolverType::coupledSolverAttributePrefix() + " solver used by the coupled solver" ); + } ); + } + + /// deleted copy constructor + CoupledWaveSolver( CoupledWaveSolver const & ) = delete; + + /// default move constructor + CoupledWaveSolver( CoupledWaveSolver && ) = default; + + /// deleted assignment operator + CoupledWaveSolver & operator=( CoupledWaveSolver const & ) = delete; + + /// deleted move operator + CoupledWaveSolver & operator=( CoupledWaveSolver && ) = delete; + + virtual void + postProcessInput() override final + { + SolverBase::postProcessInput(); + + forEachArgInTuple( m_solvers, [&]( auto & solver, auto idx ) + { + using SolverPtr = TYPEOFREF( solver ); + using SolverType = TYPEOFPTR( SolverPtr {} ); + solver = getParent().template getGroupPointer< SolverType >( m_names[idx()] ); + GEOS_THROW_IF( solver == nullptr, + GEOS_FMT( "Could not find solver '{}' of type {}", + m_names[idx()], LvArray::system::demangleType< SolverType >() ), + InputError ); + } ); + } + +protected: + + /// Pointers of the single-physics solvers + std::tuple< SOLVERS *... > m_solvers; + + /// Names of the single-physics solvers + std::array< string, sizeof...( SOLVERS ) > m_names; +}; + + +class AcousticElasticWaveEquationSEM : public CoupledWaveSolver< AcousticWaveEquationSEM, ElasticWaveEquationSEM > +{ +public: + using Base = CoupledWaveSolver< AcousticWaveEquationSEM, ElasticWaveEquationSEM >; + using Base::m_solvers; + using wsCoordType = AcousticWaveEquationSEM::wsCoordType; + + enum class SolverType : integer + { + AcousticWaveEquationSEM = 0, + ElasticWaveEquationSEM = 1 + }; + + /// String used to form the solverName used to register solvers in CoupledWaveSolver + static string coupledSolverAttributePrefix() { return "acousticelastic"; } + + using EXEC_POLICY = parallelDevicePolicy< >; + using ATOMIC_POLICY = AtomicPolicy< EXEC_POLICY >; + + virtual void registerDataOnMesh( Group & meshBodies ) override final; + + /** + * @brief main constructor for AcousticElasticWaveEquationSEM objects + * @param name the name of this instantiation of AcousticElasticWaveEquationSEM in the repository + * @param parent the parent group of this instantiation of AcousticElasticWaveEquationSEM + */ + AcousticElasticWaveEquationSEM( const string & name, + Group * const parent ) + : Base( name, parent ) + { } + + /// Destructor for the class + ~AcousticElasticWaveEquationSEM() override {} + + /** + * @brief name of the node manager in the object catalog + * @return string that contains the catalog name to generate a new AcousticElasticWaveEquationSEM object through the object catalog. + */ + static string catalogName() { return "AcousticElasticSEM"; } + + /** + * @copydoc SolverBase::getCatalogName() + */ + string getCatalogName() const override { return catalogName(); } + + /** + * @brief accessor for the pointer to the solid mechanics solver + * @return a pointer to the solid mechanics solver + */ + AcousticWaveEquationSEM * acousticSolver() const + { + return std::get< toUnderlying( SolverType::AcousticWaveEquationSEM ) >( m_solvers ); + } + + /** + * @brief accessor for the pointer to the flow solver + * @return a pointer to the flow solver + */ + ElasticWaveEquationSEM * elasticSolver() const + { + return std::get< toUnderlying( SolverType::ElasticWaveEquationSEM ) >( m_solvers ); + } + + // (requires not to be private because it is called from GEOS_HOST_DEVICE method) + virtual real64 + solverStep( real64 const & time_n, + real64 const & dt, + integer const cycleNumber, + DomainPartition & domain ) override; + + virtual void + cleanup( real64 const time_n, + integer const cycleNumber, + integer const eventCounter, + real64 const eventProgress, + DomainPartition & domain ) override; + +protected: + + virtual void initializePostInitialConditionsPreSubGroups() override; + + SortedArray< localIndex > m_interfaceNodesSet; + arrayView1d< string const > m_acousRegions; + arrayView1d< string const > m_elasRegions; +}; + +namespace fields +{ + +DECLARE_FIELD( CouplingVectorx, + "couplingVectorx", + array1d< real32 >, + 0, + NOPLOT, + WRITE_AND_READ, + "Coupling term on x." ); + +DECLARE_FIELD( CouplingVectory, + "couplingVectory", + array1d< real32 >, + 0, + NOPLOT, + WRITE_AND_READ, + "Coupling term on y." ); + +DECLARE_FIELD( CouplingVectorz, + "couplingVectorz", + array1d< real32 >, + 0, + NOPLOT, + WRITE_AND_READ, + "Coupling term on z." ); +} + +} /* namespace geos */ + +#endif /* SRC_CORECOMPONENTS_PHYSICSSOLVERS_WAVEPROPAGATION_ACOUSTICELASTICWAVEEQUATIONSEM_HPP_ */ diff --git a/src/coreComponents/physicsSolvers/wavePropagation/AcousticElasticWaveEquationSEMKernel.hpp b/src/coreComponents/physicsSolvers/wavePropagation/AcousticElasticWaveEquationSEMKernel.hpp new file mode 100644 index 00000000000..aa3a56b1c0b --- /dev/null +++ b/src/coreComponents/physicsSolvers/wavePropagation/AcousticElasticWaveEquationSEMKernel.hpp @@ -0,0 +1,102 @@ +/* + * ------------------------------------------------------------------------------------------------------------ + * SPDX-License-Identifier: LGPL-2.1-only + * + * Copyright (c) 2018-2020 Lawrence Livermore National Security LLC + * Copyright (c) 2018-2020 The Board of Trustees of the Leland Stanford Junior University + * Copyright (c) 2018-2020 TotalEnergies + * Copyright (c) 2019- GEOSX Contributors + * All rights reserved + * + * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details. + * ------------------------------------------------------------------------------------------------------------ + */ + +/** + * @file AcousticElasticWaveEquationSEMKernel.hpp + */ + +#ifndef SRC_CORECOMPONENTS_PHYSICSSOLVERS_WAVEPROPAGATION_ACOUSTICELASTICWAVEEQUATIONSEMKERNEL_HPP_ +#define SRC_CORECOMPONENTS_PHYSICSSOLVERS_WAVEPROPAGATION_ACOUSTICELASTICWAVEEQUATIONSEMKERNEL_HPP_ + +#include "finiteElement/kernelInterface/KernelBase.hpp" +#if !defined( GEOS_USE_HIP ) +#include "finiteElement/elementFormulations/Qk_Hexahedron_Lagrange_GaussLobatto.hpp" +#endif + +#include + +namespace geos +{ + +namespace acousticElasticWaveEquationSEMKernels +{ + +template< typename FE_TYPE > +struct CouplingKernel +{ + static constexpr localIndex numNodesPerFace = FE_TYPE::numNodesPerFace; + + template< typename EXEC_POLICY, typename ATOMIC_POLICY > + void + launch( localIndex const size, + arrayView2d< WaveSolverBase::wsCoordType const, + nodes::REFERENCE_POSITION_USD > const nodeCoords, + localIndex const regionIndex, + localIndex const subRegionIndex, + arrayView2d< localIndex const > const faceToSubRegion, + arrayView2d< localIndex const > const faceToRegion, + arrayView2d< localIndex const > const faceToElement, + ArrayOfArraysView< localIndex const > const facesToNodes, + arrayView2d< real64 const > const faceNormals, + arrayView1d< real32 > const couplingVectorx, + arrayView1d< real32 > const couplingVectory, + arrayView1d< real32 > const couplingVectorz ) + { + forAll< EXEC_POLICY >( size, [=] GEOS_HOST_DEVICE ( localIndex const f ) + { + localIndex e0 = faceToElement( f, 0 ), e1 = faceToElement( f, 1 ); + localIndex er0 = faceToRegion( f, 0 ), er1 = faceToRegion( f, 1 ); + localIndex esr0 = faceToSubRegion( f, 0 ), esr1 = faceToSubRegion( f, 1 ); + + if( e0 != -1 && e1 != -1 && er0 != er1 ) // an interface is defined as a transition between regions + { + // check that one of the region is the fluid subregion for the fluid -> solid coupling term + if((er0 == regionIndex && esr0 == subRegionIndex) || (er1 == regionIndex && esr1 == subRegionIndex)) + { + real64 xLocal[ numNodesPerFace ][ 3 ]; + for( localIndex a = 0; a < numNodesPerFace; ++a ) + { + for( localIndex i = 0; i < 3; ++i ) + { + xLocal[a][i] = nodeCoords( facesToNodes( f, a ), i ); + } + } + + // determine normal sign for fluid -> solid coupling + localIndex sgn = er0 == regionIndex ? 1 : (er1 == regionIndex ? -1 : 0); + + for( localIndex q = 0; q < numNodesPerFace; ++q ) + { + real64 const aux = FE_TYPE::computeDampingTerm( q, xLocal ); + + real32 const localIncrementx = aux * (sgn * faceNormals( f, 0 )); + real32 const localIncrementy = aux * (sgn * faceNormals( f, 1 )); + real32 const localIncrementz = aux * (sgn * faceNormals( f, 2 )); + + RAJA::atomicAdd< ATOMIC_POLICY >( &couplingVectorx[facesToNodes( f, q )], localIncrementx ); + RAJA::atomicAdd< ATOMIC_POLICY >( &couplingVectory[facesToNodes( f, q )], localIncrementy ); + RAJA::atomicAdd< ATOMIC_POLICY >( &couplingVectorz[facesToNodes( f, q )], localIncrementz ); + } + } + } + } ); + + } +}; + +} /* namespace acousticElasticWaveEquationSEMKernels */ + +} /* namespace geos */ + +#endif /* SRC_CORECOMPONENTS_PHYSICSSOLVERS_WAVEPROPAGATION_ACOUSTICELASTICWAVEEQUATIONSEMKERNEL_HPP_ */ diff --git a/src/coreComponents/physicsSolvers/wavePropagation/AcousticFirstOrderWaveEquationSEM.cpp b/src/coreComponents/physicsSolvers/wavePropagation/AcousticFirstOrderWaveEquationSEM.cpp index c7ec3d9d330..bdc1f623278 100644 --- a/src/coreComponents/physicsSolvers/wavePropagation/AcousticFirstOrderWaveEquationSEM.cpp +++ b/src/coreComponents/physicsSolvers/wavePropagation/AcousticFirstOrderWaveEquationSEM.cpp @@ -96,19 +96,19 @@ void AcousticFirstOrderWaveEquationSEM::registerDataOnMesh( Group & meshBodies ) nodeManager.registerField< wavesolverfields::Pressure_np1, wavesolverfields::ForcingRHS, - wavesolverfields::MassVector, + wavesolverfields::AcousticMassVector, wavesolverfields::DampingVector, - wavesolverfields::FreeSurfaceNodeIndicator >( getName() ); + wavesolverfields::AcousticFreeSurfaceNodeIndicator >( getName() ); FaceManager & faceManager = mesh.getFaceManager(); - faceManager.registerField< wavesolverfields::FreeSurfaceFaceIndicator >( getName() ); + faceManager.registerField< wavesolverfields::AcousticFreeSurfaceFaceIndicator >( getName() ); ElementRegionManager & elemManager = mesh.getElemManager(); elemManager.forElementSubRegions< CellElementSubRegion >( [&]( CellElementSubRegion & subRegion ) { - subRegion.registerField< wavesolverfields::MediumVelocity >( getName() ); - subRegion.registerField< wavesolverfields::MediumDensity >( getName() ); + subRegion.registerField< wavesolverfields::AcousticVelocity >( getName() ); + subRegion.registerField< wavesolverfields::AcousticDensity >( getName() ); subRegion.registerField< wavesolverfields::Velocity_x >( getName() ); subRegion.registerField< wavesolverfields::Velocity_y >( getName() ); @@ -210,7 +210,6 @@ void AcousticFirstOrderWaveEquationSEM::precomputeSourceAndReceiverTerm( MeshLev { using FE_TYPE = TYPEOFREF( finiteElement ); - constexpr localIndex numNodesPerElem = FE_TYPE::numNodes; localIndex const numFacesPerElem = elementSubRegion.numFacesPerElement(); acousticFirstOrderWaveEquationSEMKernels:: @@ -218,7 +217,6 @@ void AcousticFirstOrderWaveEquationSEM::precomputeSourceAndReceiverTerm( MeshLev launch< EXEC_POLICY, FE_TYPE > ( elementSubRegion.size(), regionIndex, - numNodesPerElem, numFacesPerElem, X, elemGhostRank, @@ -296,7 +294,7 @@ void AcousticFirstOrderWaveEquationSEM::initializePostInitialConditionsPreSubGro ArrayOfArraysView< localIndex const > const facesToNodes = faceManager.nodeList().toViewConst(); // mass matrix to be computed in this function - arrayView1d< real32 > const mass = nodeManager.getField< wavesolverfields::MassVector >(); + arrayView1d< real32 > const mass = nodeManager.getField< wavesolverfields::AcousticMassVector >(); /// damping matrix to be computed for each dof in the boundary of the mesh arrayView1d< real32 > const damping = nodeManager.getField< wavesolverfields::DampingVector >(); @@ -304,15 +302,15 @@ void AcousticFirstOrderWaveEquationSEM::initializePostInitialConditionsPreSubGro mass.zero(); /// get array of indicators: 1 if face is on the free surface; 0 otherwise - arrayView1d< localIndex const > const freeSurfaceFaceIndicator = faceManager.getField< wavesolverfields::FreeSurfaceFaceIndicator >(); + arrayView1d< localIndex const > const freeSurfaceFaceIndicator = faceManager.getField< wavesolverfields::AcousticFreeSurfaceFaceIndicator >(); mesh.getElemManager().forElementSubRegions< CellElementSubRegion >( regionNames, [&]( localIndex const, CellElementSubRegion & elementSubRegion ) { arrayView2d< localIndex const, cells::NODE_MAP_USD > const elemsToNodes = elementSubRegion.nodeList(); arrayView2d< localIndex const > const elemsToFaces = elementSubRegion.faceList(); - arrayView1d< real32 const > const velocity = elementSubRegion.getField< wavesolverfields::MediumVelocity >(); - arrayView1d< real32 const > const density = elementSubRegion.getField< wavesolverfields::MediumDensity >(); + arrayView1d< real32 const > const velocity = elementSubRegion.getField< wavesolverfields::AcousticVelocity >(); + arrayView1d< real32 const > const density = elementSubRegion.getField< wavesolverfields::AcousticDensity >(); finiteElement::FiniteElementBase const & fe = elementSubRegion.getReference< finiteElement::FiniteElementBase >( getDiscretizationName() ); @@ -343,7 +341,7 @@ void AcousticFirstOrderWaveEquationSEM::initializePostInitialConditionsPreSubGro } ); } ); - WaveSolverUtils::initTrace( "seismoTraceReceiver", getName(), m_receiverConstants.size( 0 ), m_receiverIsLocal ); + WaveSolverUtils::initTrace( "seismoTraceReceiver", getName(), m_outputSeismoTrace, m_receiverConstants.size( 0 ), m_receiverIsLocal ); } @@ -360,10 +358,10 @@ void AcousticFirstOrderWaveEquationSEM::applyFreeSurfaceBC( real64 const time, D ArrayOfArraysView< localIndex const > const faceToNodeMap = faceManager.nodeList().toViewConst(); /// array of indicators: 1 if a face is on on free surface; 0 otherwise - arrayView1d< localIndex > const freeSurfaceFaceIndicator = faceManager.getField< wavesolverfields::FreeSurfaceFaceIndicator >(); + arrayView1d< localIndex > const freeSurfaceFaceIndicator = faceManager.getField< wavesolverfields::AcousticFreeSurfaceFaceIndicator >(); /// array of indicators: 1 if a node is on on free surface; 0 otherwise - arrayView1d< localIndex > const freeSurfaceNodeIndicator = nodeManager.getField< wavesolverfields::FreeSurfaceNodeIndicator >(); + arrayView1d< localIndex > const freeSurfaceNodeIndicator = nodeManager.getField< wavesolverfields::AcousticFreeSurfaceNodeIndicator >(); freeSurfaceFaceIndicator.zero(); @@ -457,7 +455,7 @@ real64 AcousticFirstOrderWaveEquationSEM::explicitStepInternal( real64 const & t arrayView2d< wsCoordType const, nodes::REFERENCE_POSITION_USD > const X = nodeManager.getField< fields::referencePosition32 >().toViewConst(); - arrayView1d< real32 const > const mass = nodeManager.getField< wavesolverfields::MassVector >(); + arrayView1d< real32 const > const mass = nodeManager.getField< wavesolverfields::AcousticMassVector >(); arrayView1d< real32 const > const damping = nodeManager.getField< wavesolverfields::DampingVector >(); arrayView1d< real32 > const p_np1 = nodeManager.getField< wavesolverfields::Pressure_np1 >(); @@ -468,7 +466,7 @@ real64 AcousticFirstOrderWaveEquationSEM::explicitStepInternal( real64 const & t CellElementSubRegion & elementSubRegion ) { arrayView2d< localIndex const, cells::NODE_MAP_USD > const & elemsToNodes = elementSubRegion.nodeList(); - arrayView1d< real32 const > const density = elementSubRegion.getField< wavesolverfields::MediumDensity >(); + arrayView1d< real32 const > const density = elementSubRegion.getField< wavesolverfields::AcousticDensity >(); arrayView2d< real32 > const velocity_x = elementSubRegion.getField< wavesolverfields::Velocity_x >(); arrayView2d< real32 > const velocity_y = elementSubRegion.getField< wavesolverfields::Velocity_y >(); arrayView2d< real32 > const velocity_z = elementSubRegion.getField< wavesolverfields::Velocity_z >(); diff --git a/src/coreComponents/physicsSolvers/wavePropagation/AcousticFirstOrderWaveEquationSEMKernel.hpp b/src/coreComponents/physicsSolvers/wavePropagation/AcousticFirstOrderWaveEquationSEMKernel.hpp index ac793524a2a..9347166af08 100644 --- a/src/coreComponents/physicsSolvers/wavePropagation/AcousticFirstOrderWaveEquationSEMKernel.hpp +++ b/src/coreComponents/physicsSolvers/wavePropagation/AcousticFirstOrderWaveEquationSEMKernel.hpp @@ -39,7 +39,6 @@ struct PrecomputeSourceAndReceiverKernel * @tparam EXEC_POLICY execution policy * @tparam FE_TYPE finite element type * @param[in] size the number of cells in the subRegion - * @param[in] numNodesPerElem number of nodes per element * @param[in] numFacesPerElem number of faces per element * @param[in] nodeCoords coordinates of the nodes * @param[in] elemGhostRank rank of the ghost element @@ -68,7 +67,6 @@ struct PrecomputeSourceAndReceiverKernel static void launch( localIndex const size, localIndex const regionIndex, - localIndex const numNodesPerElem, localIndex const numFacesPerElem, arrayView2d< WaveSolverBase::wsCoordType const, nodes::REFERENCE_POSITION_USD > const nodeCoords, arrayView1d< integer const > const elemGhostRank, @@ -95,6 +93,7 @@ struct PrecomputeSourceAndReceiverKernel real32 const timeSourceDelay, localIndex const rickerOrder ) { + constexpr localIndex numNodesPerElem = FE_TYPE::numNodes; forAll< EXEC_POLICY >( size, [=] GEOS_HOST_DEVICE ( localIndex const k ) { @@ -133,7 +132,7 @@ struct PrecomputeSourceAndReceiverKernel sourceIsAccessible[isrc] = 1; sourceElem[isrc] = k; sourceRegion[isrc] = regionIndex; - real64 Ntest[FE_TYPE::numNodes]; + real64 Ntest[numNodesPerElem]; FE_TYPE::calcN( coordsOnRefElem, Ntest ); for( localIndex a = 0; a < numNodesPerElem; ++a ) @@ -181,7 +180,7 @@ struct PrecomputeSourceAndReceiverKernel rcvElem[ircv] = k; receiverRegion[ircv] = regionIndex; - real64 Ntest[FE_TYPE::numNodes]; + real64 Ntest[numNodesPerElem]; FE_TYPE::calcN( coordsOnRefElem, Ntest ); for( localIndex a = 0; a < numNodesPerElem; ++a ) @@ -227,26 +226,26 @@ struct MassMatrixKernel arrayView1d< real32 > const mass ) { - forAll< EXEC_POLICY >( size, [=] GEOS_HOST_DEVICE ( localIndex const k ) + forAll< EXEC_POLICY >( size, [=] GEOS_HOST_DEVICE ( localIndex const e ) { constexpr localIndex numNodesPerElem = FE_TYPE::numNodes; constexpr localIndex numQuadraturePointsPerElem = FE_TYPE::numQuadraturePoints; - real32 const invC2 = 1.0 / ( density[k] * velocity[k] * velocity[k] ); + real32 const invC2 = 1.0 / ( density[e] * pow( velocity[e], 2 ) ); real64 xLocal[ numNodesPerElem ][ 3 ]; for( localIndex a = 0; a < numNodesPerElem; ++a ) { for( localIndex i = 0; i < 3; ++i ) { - xLocal[a][i] = nodeCoords( elemsToNodes( k, a ), i ); + xLocal[a][i] = nodeCoords( elemsToNodes( e, a ), i ); } } for( localIndex q = 0; q < numQuadraturePointsPerElem; ++q ) { real32 const localIncrement = invC2 * m_finiteElement.computeMassTerm( q, xLocal ); - RAJA::atomicAdd< ATOMIC_POLICY >( &mass[elemsToNodes[k][q]], localIncrement ); + RAJA::atomicAdd< ATOMIC_POLICY >( &mass[elemsToNodes( e, q )], localIncrement ); } } ); // end loop over element } diff --git a/src/coreComponents/physicsSolvers/wavePropagation/AcousticVTIWaveEquationSEM.cpp b/src/coreComponents/physicsSolvers/wavePropagation/AcousticVTIWaveEquationSEM.cpp index 8922a92e8cd..08e6b724d8f 100644 --- a/src/coreComponents/physicsSolvers/wavePropagation/AcousticVTIWaveEquationSEM.cpp +++ b/src/coreComponents/physicsSolvers/wavePropagation/AcousticVTIWaveEquationSEM.cpp @@ -69,20 +69,20 @@ void AcousticVTIWaveEquationSEM::registerDataOnMesh( Group & meshBodies ) fields::wavesolverfields::Pressure_q_n, fields::wavesolverfields::Pressure_q_np1, fields::wavesolverfields::ForcingRHS, - fields::wavesolverfields::MassVector, + fields::wavesolverfields::AcousticMassVector, fields::wavesolverfields::DampingVector_p, fields::wavesolverfields::DampingVector_pq, fields::wavesolverfields::DampingVector_q, fields::wavesolverfields::DampingVector_qp, fields::wavesolverfields::StiffnessVector_p, fields::wavesolverfields::StiffnessVector_q, - fields::wavesolverfields::FreeSurfaceNodeIndicator, + fields::wavesolverfields::AcousticFreeSurfaceNodeIndicator, fields::wavesolverfields::LateralSurfaceNodeIndicator, fields::wavesolverfields::BottomSurfaceNodeIndicator >( getName() ); FaceManager & faceManager = mesh.getFaceManager(); - faceManager.registerField< fields::wavesolverfields::FreeSurfaceFaceIndicator >( getName() ); + faceManager.registerField< fields::wavesolverfields::AcousticFreeSurfaceFaceIndicator >( getName() ); faceManager.registerField< fields::wavesolverfields::LateralSurfaceFaceIndicator >( getName() ); faceManager.registerField< fields::wavesolverfields::BottomSurfaceFaceIndicator >( getName() ); @@ -93,7 +93,7 @@ void AcousticVTIWaveEquationSEM::registerDataOnMesh( Group & meshBodies ) subRegion.registerField< fields::wavesolverfields::Delta >( getName() ); subRegion.registerField< fields::wavesolverfields::Epsilon >( getName() ); subRegion.registerField< fields::wavesolverfields::F >( getName() ); - subRegion.registerField< fields::wavesolverfields::MediumVelocity >( getName() ); + subRegion.registerField< fields::wavesolverfields::AcousticVelocity >( getName() ); } ); } ); } @@ -115,7 +115,7 @@ void AcousticVTIWaveEquationSEM::precomputeSourceAndReceiverTerm( MeshLevel & me NodeManager const & nodeManager = mesh.getNodeManager(); FaceManager const & faceManager = mesh.getFaceManager(); - arrayView2d< wsCoordType const, nodes::REFERENCE_POSITION_USD > const X32 = + arrayView2d< wsCoordType const, nodes::REFERENCE_POSITION_USD > const nodeCoords32 = nodeManager.getField< fields::referencePosition32 >().toViewConst(); arrayView2d< real64 const > const faceNormal = faceManager.faceNormal(); @@ -168,16 +168,14 @@ void AcousticVTIWaveEquationSEM::precomputeSourceAndReceiverTerm( MeshLevel & me { using FE_TYPE = TYPEOFREF( finiteElement ); - constexpr localIndex numNodesPerElem = FE_TYPE::numNodes; localIndex const numFacesPerElem = elementSubRegion.numFacesPerElement(); acousticVTIWaveEquationSEMKernels:: PrecomputeSourceAndReceiverKernel:: launch< EXEC_POLICY, FE_TYPE > ( elementSubRegion.size(), - numNodesPerElem, numFacesPerElem, - X32, + nodeCoords32, elemGhostRank, elemsToNodes, elemsToFaces, @@ -250,7 +248,7 @@ void AcousticVTIWaveEquationSEM::initializePostInitialConditionsPreSubGroups() ArrayOfArraysView< localIndex const > const facesToNodes = faceManager.nodeList().toViewConst(); // mass matrix to be computed in this function - arrayView1d< real32 > const mass = nodeManager.getField< fields::wavesolverfields::MassVector >(); + arrayView1d< real32 > const mass = nodeManager.getField< fields::wavesolverfields::AcousticMassVector >(); mass.zero(); /// damping matrices to be computed for each dof in the boundary of the mesh arrayView1d< real32 > const damping_p = nodeManager.getField< fields::wavesolverfields::DampingVector_p >(); @@ -263,7 +261,7 @@ void AcousticVTIWaveEquationSEM::initializePostInitialConditionsPreSubGroups() damping_qp.zero(); /// get array of indicators: 1 if face is on the free surface; 0 otherwise - arrayView1d< localIndex const > const freeSurfaceFaceIndicator = faceManager.getField< fields::wavesolverfields::FreeSurfaceFaceIndicator >(); + arrayView1d< localIndex const > const freeSurfaceFaceIndicator = faceManager.getField< fields::wavesolverfields::AcousticFreeSurfaceFaceIndicator >(); arrayView1d< localIndex const > const lateralSurfaceFaceIndicator = faceManager.getField< fields::wavesolverfields::LateralSurfaceFaceIndicator >(); arrayView1d< localIndex const > const bottomSurfaceFaceIndicator = faceManager.getField< fields::wavesolverfields::BottomSurfaceFaceIndicator >(); @@ -273,7 +271,7 @@ void AcousticVTIWaveEquationSEM::initializePostInitialConditionsPreSubGroups() arrayView2d< localIndex const, cells::NODE_MAP_USD > const elemsToNodes = elementSubRegion.nodeList(); arrayView2d< localIndex const > const elemsToFaces = elementSubRegion.faceList(); - arrayView1d< real32 const > const velocity = elementSubRegion.getField< fields::wavesolverfields::MediumVelocity >(); + arrayView1d< real32 const > const velocity = elementSubRegion.getField< fields::wavesolverfields::AcousticVelocity >(); arrayView1d< real32 const > const epsilon = elementSubRegion.getField< fields::wavesolverfields::Epsilon >(); arrayView1d< real32 const > const delta = elementSubRegion.getField< fields::wavesolverfields::Delta >(); arrayView1d< real32 const > const vti_f = elementSubRegion.getField< fields::wavesolverfields::F >(); @@ -314,7 +312,7 @@ void AcousticVTIWaveEquationSEM::initializePostInitialConditionsPreSubGroups() } ); } ); - WaveSolverUtils::initTrace( "seismoTraceReceiver", getName(), m_receiverConstants.size( 0 ), m_receiverIsLocal ); + WaveSolverUtils::initTrace( "seismoTraceReceiver", getName(), m_outputSeismoTrace, m_receiverConstants.size( 0 ), m_receiverIsLocal ); } void AcousticVTIWaveEquationSEM::precomputeSurfaceFieldIndicator( DomainPartition & domain ) @@ -424,10 +422,10 @@ void AcousticVTIWaveEquationSEM::applyFreeSurfaceBC( real64 time, DomainPartitio ArrayOfArraysView< localIndex const > const faceToNodeMap = faceManager.nodeList().toViewConst(); /// array of indicators: 1 if a face is on on free surface; 0 otherwise - arrayView1d< localIndex > const freeSurfaceFaceIndicator = faceManager.getField< fields::wavesolverfields::FreeSurfaceFaceIndicator >(); + arrayView1d< localIndex > const freeSurfaceFaceIndicator = faceManager.getField< fields::wavesolverfields::AcousticFreeSurfaceFaceIndicator >(); /// array of indicators: 1 if a node is on on free surface; 0 otherwise - arrayView1d< localIndex > const freeSurfaceNodeIndicator = nodeManager.getField< fields::wavesolverfields::FreeSurfaceNodeIndicator >(); + arrayView1d< localIndex > const freeSurfaceNodeIndicator = nodeManager.getField< fields::wavesolverfields::AcousticFreeSurfaceNodeIndicator >(); fsManager.apply< FaceManager >( time, domain.getMeshBody( 0 ).getMeshLevel( m_discretizationName ), @@ -553,7 +551,7 @@ real64 AcousticVTIWaveEquationSEM::explicitStepInternal( real64 const & time_n, { NodeManager & nodeManager = mesh.getNodeManager(); - arrayView1d< real32 const > const mass = nodeManager.getField< fields::wavesolverfields::MassVector >(); + arrayView1d< real32 const > const mass = nodeManager.getField< fields::wavesolverfields::AcousticMassVector >(); arrayView1d< real32 const > const damping_p = nodeManager.getField< fields::wavesolverfields::DampingVector_p >(); arrayView1d< real32 const > const damping_q = nodeManager.getField< fields::wavesolverfields::DampingVector_q >(); arrayView1d< real32 const > const damping_pq = nodeManager.getField< fields::wavesolverfields::DampingVector_pq >(); @@ -567,7 +565,7 @@ real64 AcousticVTIWaveEquationSEM::explicitStepInternal( real64 const & time_n, arrayView1d< real32 > const q_n = nodeManager.getField< fields::wavesolverfields::Pressure_q_n >(); arrayView1d< real32 > const q_np1 = nodeManager.getField< fields::wavesolverfields::Pressure_q_np1 >(); - arrayView1d< localIndex const > const freeSurfaceNodeIndicator = nodeManager.getField< fields::wavesolverfields::FreeSurfaceNodeIndicator >(); + arrayView1d< localIndex const > const freeSurfaceNodeIndicator = nodeManager.getField< fields::wavesolverfields::AcousticFreeSurfaceNodeIndicator >(); arrayView1d< localIndex const > const lateralSurfaceNodeIndicator = nodeManager.getField< fields::wavesolverfields::LateralSurfaceNodeIndicator >(); arrayView1d< localIndex const > const bottomSurfaceNodeIndicator = nodeManager.getField< fields::wavesolverfields::BottomSurfaceNodeIndicator >(); arrayView1d< real32 > const stiffnessVector_p = nodeManager.getField< fields::wavesolverfields::StiffnessVector_p >(); diff --git a/src/coreComponents/physicsSolvers/wavePropagation/AcousticVTIWaveEquationSEMKernel.hpp b/src/coreComponents/physicsSolvers/wavePropagation/AcousticVTIWaveEquationSEMKernel.hpp index bd062c38fa2..52434125c57 100644 --- a/src/coreComponents/physicsSolvers/wavePropagation/AcousticVTIWaveEquationSEMKernel.hpp +++ b/src/coreComponents/physicsSolvers/wavePropagation/AcousticVTIWaveEquationSEMKernel.hpp @@ -39,7 +39,6 @@ struct PrecomputeSourceAndReceiverKernel * @tparam EXEC_POLICY execution policy * @tparam FE_TYPE finite element type * @param[in] size the number of cells in the subRegion - * @param[in] numNodesPerElem number of nodes per element * @param[in] numFacesPerElem number of faces per element * @param[in] nodeCoords coordinates of the nodes * @param[in] elemGhostRank the ghost ranks @@ -64,7 +63,6 @@ struct PrecomputeSourceAndReceiverKernel template< typename EXEC_POLICY, typename FE_TYPE > static void launch( localIndex const size, - localIndex const numNodesPerElem, localIndex const numFacesPerElem, arrayView2d< WaveSolverBase::wsCoordType const, nodes::REFERENCE_POSITION_USD > const nodeCoords, arrayView1d< integer const > const elemGhostRank, @@ -87,6 +85,7 @@ struct PrecomputeSourceAndReceiverKernel real32 const timeSourceDelay, localIndex const rickerOrder ) { + constexpr localIndex numNodesPerElem = FE_TYPE::numNodes; forAll< EXEC_POLICY >( size, [=] GEOS_HOST_DEVICE ( localIndex const k ) { @@ -123,7 +122,7 @@ struct PrecomputeSourceAndReceiverKernel coordsOnRefElem ); sourceIsAccessible[isrc] = 1; - real64 Ntest[FE_TYPE::numNodes]; + real64 Ntest[numNodesPerElem]; FE_TYPE::calcN( coordsOnRefElem, Ntest ); for( localIndex a = 0; a < numNodesPerElem; ++a ) @@ -170,7 +169,7 @@ struct PrecomputeSourceAndReceiverKernel receiverIsLocal[ircv] = 1; - real64 Ntest[FE_TYPE::numNodes]; + real64 Ntest[numNodesPerElem]; FE_TYPE::calcN( coordsOnRefElem, Ntest ); for( localIndex a = 0; a < numNodesPerElem; ++a ) @@ -220,7 +219,7 @@ struct MassMatrixKernel constexpr localIndex numNodesPerElem = FE_TYPE::numNodes; constexpr localIndex numQuadraturePointsPerElem = FE_TYPE::numQuadraturePoints; - real32 const invC2 = 1.0 / ( velocity[e] * velocity[e] ); + real32 const invC2 = 1.0 / pow( velocity[e], 2 ); real64 xLocal[ numNodesPerElem ][ 3 ]; for( localIndex a = 0; a < numNodesPerElem; ++a ) { diff --git a/src/coreComponents/physicsSolvers/wavePropagation/AcousticWaveEquationSEM.cpp b/src/coreComponents/physicsSolvers/wavePropagation/AcousticWaveEquationSEM.cpp index 6cbadc9ec14..d822d1b5a06 100644 --- a/src/coreComponents/physicsSolvers/wavePropagation/AcousticWaveEquationSEM.cpp +++ b/src/coreComponents/physicsSolvers/wavePropagation/AcousticWaveEquationSEM.cpp @@ -71,10 +71,10 @@ void AcousticWaveEquationSEM::registerDataOnMesh( Group & meshBodies ) fields::Pressure_np1, fields::PressureDoubleDerivative, fields::ForcingRHS, - fields::MassVector, + fields::AcousticMassVector, fields::DampingVector, fields::StiffnessVector, - fields::FreeSurfaceNodeIndicator >( getName() ); + fields::AcousticFreeSurfaceNodeIndicator >( getName() ); /// register PML auxiliary variables only when a PML is specified in the xml if( m_usePML ) @@ -89,14 +89,14 @@ void AcousticWaveEquationSEM::registerDataOnMesh( Group & meshBodies ) } FaceManager & faceManager = mesh.getFaceManager(); - faceManager.registerField< fields::FreeSurfaceFaceIndicator >( getName() ); + faceManager.registerField< fields::AcousticFreeSurfaceFaceIndicator >( getName() ); ElementRegionManager & elemManager = mesh.getElemManager(); elemManager.forElementSubRegions< CellElementSubRegion >( [&]( CellElementSubRegion & subRegion ) { - subRegion.registerField< fields::MediumVelocity >( getName() ); - subRegion.registerField< fields::MediumDensity >( getName() ); + subRegion.registerField< fields::AcousticVelocity >( getName() ); + subRegion.registerField< fields::AcousticDensity >( getName() ); subRegion.registerField< fields::PartialGradient >( getName() ); } ); @@ -106,7 +106,6 @@ void AcousticWaveEquationSEM::registerDataOnMesh( Group & meshBodies ) void AcousticWaveEquationSEM::postProcessInput() { - WaveSolverBase::postProcessInput(); m_pressureNp1AtReceivers.resize( m_nsamplesSeismoTrace, m_receiverCoordinates.size( 0 ) + 1 ); @@ -120,12 +119,11 @@ void AcousticWaveEquationSEM::precomputeSourceAndReceiverTerm( MeshLevel & mesh, FaceManager const & faceManager = mesh.getFaceManager(); arrayView2d< wsCoordType const, nodes::REFERENCE_POSITION_USD > const - X32 = nodeManager.getField< fields::referencePosition32 >().toViewConst(); + nodeCoords32 = nodeManager.getField< fields::referencePosition32 >().toViewConst(); arrayView2d< real64 const > const faceNormal = faceManager.faceNormal(); arrayView2d< real64 const > const faceCenter = faceManager.faceCenter(); - arrayView2d< real64 const > const sourceCoordinates = m_sourceCoordinates.toViewConst(); arrayView2d< localIndex > const sourceNodeIds = m_sourceNodeIds.toView(); arrayView2d< real64 > const sourceConstants = m_sourceConstants.toView(); @@ -172,7 +170,6 @@ void AcousticWaveEquationSEM::precomputeSourceAndReceiverTerm( MeshLevel & mesh, { using FE_TYPE = TYPEOFREF( finiteElement ); - constexpr localIndex numNodesPerElem = FE_TYPE::numNodes; localIndex const numFacesPerElem = elementSubRegion.numFacesPerElement(); { @@ -181,9 +178,8 @@ void AcousticWaveEquationSEM::precomputeSourceAndReceiverTerm( MeshLevel & mesh, PrecomputeSourceAndReceiverKernel:: launch< EXEC_POLICY, FE_TYPE > ( elementSubRegion.size(), - numNodesPerElem, numFacesPerElem, - X32, + nodeCoords32, elemGhostRank, elemsToNodes, elemsToFaces, @@ -265,7 +261,7 @@ void AcousticWaveEquationSEM::initializePostInitialConditionsPreSubGroups() ArrayOfArraysView< localIndex const > const facesToNodes = faceManager.nodeList().toViewConst(); // mass matrix to be computed in this function - arrayView1d< real32 > const mass = nodeManager.getField< fields::MassVector >(); + arrayView1d< real32 > const mass = nodeManager.getField< fields::AcousticMassVector >(); { GEOS_MARK_SCOPE( mass_zero ); mass.zero(); @@ -277,7 +273,7 @@ void AcousticWaveEquationSEM::initializePostInitialConditionsPreSubGroups() damping.zero(); } /// get array of indicators: 1 if face is on the free surface; 0 otherwise - arrayView1d< localIndex const > const freeSurfaceFaceIndicator = faceManager.getField< fields::FreeSurfaceFaceIndicator >(); + arrayView1d< localIndex const > const freeSurfaceFaceIndicator = faceManager.getField< fields::AcousticFreeSurfaceFaceIndicator >(); elemManager.forElementSubRegions< CellElementSubRegion >( regionNames, [&]( localIndex const, CellElementSubRegion & elementSubRegion ) @@ -288,8 +284,10 @@ void AcousticWaveEquationSEM::initializePostInitialConditionsPreSubGroups() arrayView2d< localIndex const, cells::NODE_MAP_USD > const elemsToNodes = elementSubRegion.nodeList(); arrayView2d< localIndex const > const elemsToFaces = elementSubRegion.faceList(); - arrayView1d< real32 const > const velocity = elementSubRegion.getField< fields::MediumVelocity >(); - arrayView1d< real32 const > const density = elementSubRegion.getField< fields::MediumDensity >(); + computeTargetNodeSet( elemsToNodes, elementSubRegion.size(), fe.getNumQuadraturePoints() ); + + arrayView1d< real32 const > const velocity = elementSubRegion.getField< fields::AcousticVelocity >(); + arrayView1d< real32 const > const density = elementSubRegion.getField< fields::AcousticDensity >(); /// Partial gradient if gradient as to be computed arrayView1d< real32 > grad = elementSubRegion.getField< fields::PartialGradient >(); @@ -323,7 +321,7 @@ void AcousticWaveEquationSEM::initializePostInitialConditionsPreSubGroups() } ); } ); - WaveSolverUtils::initTrace( "seismoTraceReceiver", getName(), m_receiverConstants.size( 0 ), m_receiverIsLocal ); + WaveSolverUtils::initTrace( "seismoTraceReceiver", getName(), m_outputSeismoTrace, m_receiverConstants.size( 0 ), m_receiverIsLocal ); } @@ -343,13 +341,13 @@ void AcousticWaveEquationSEM::applyFreeSurfaceBC( real64 time, DomainPartition & ArrayOfArraysView< localIndex const > const faceToNodeMap = faceManager.nodeList().toViewConst(); /// array of indicators: 1 if a face is on on free surface; 0 otherwise - arrayView1d< localIndex > const freeSurfaceFaceIndicator = faceManager.getField< fields::FreeSurfaceFaceIndicator >(); + arrayView1d< localIndex > const freeSurfaceFaceIndicator = faceManager.getField< fields::AcousticFreeSurfaceFaceIndicator >(); /// array of indicators: 1 if a node is on on free surface; 0 otherwise - arrayView1d< localIndex > const freeSurfaceNodeIndicator = nodeManager.getField< fields::FreeSurfaceNodeIndicator >(); + arrayView1d< localIndex > const freeSurfaceNodeIndicator = nodeManager.getField< fields::AcousticFreeSurfaceNodeIndicator >(); - //freeSurfaceFaceIndicator.zero(); - //freeSurfaceNodeIndicator.zero(); + // freeSurfaceFaceIndicator.zero(); + // freeSurfaceNodeIndicator.zero(); fsManager.apply< FaceManager >( time, domain.getMeshBody( 0 ).getMeshLevel( m_discretizationName ), @@ -432,7 +430,7 @@ void AcousticWaveEquationSEM::initializePML() NodeManager & nodeManager = mesh.getNodeManager(); /// WARNING: the array below is one of the PML auxiliary variables arrayView1d< real32 > const indicatorPML = nodeManager.getField< fields::AuxiliaryVar4PML >(); - arrayView2d< wsCoordType const, nodes::REFERENCE_POSITION_USD > const X32 = nodeManager.getField< fields::referencePosition32 >().toViewConst(); + arrayView2d< wsCoordType const, nodes::REFERENCE_POSITION_USD > const nodeCoords32 = nodeManager.getField< fields::referencePosition32 >().toViewConst(); indicatorPML.zero(); real32 xInteriorMin[3]{}; @@ -491,20 +489,20 @@ void AcousticWaveEquationSEM::initializePML() forAll< EXEC_POLICY >( nodeManager.size(), [=] GEOS_HOST_DEVICE ( localIndex const a ) { - xMinGlobal.min( X32[a][0] ); - yMinGlobal.min( X32[a][1] ); - zMinGlobal.min( X32[a][2] ); - xMaxGlobal.max( X32[a][0] ); - yMaxGlobal.max( X32[a][1] ); - zMaxGlobal.max( X32[a][2] ); + xMinGlobal.min( nodeCoords32[a][0] ); + yMinGlobal.min( nodeCoords32[a][1] ); + zMinGlobal.min( nodeCoords32[a][2] ); + xMaxGlobal.max( nodeCoords32[a][0] ); + yMaxGlobal.max( nodeCoords32[a][1] ); + zMaxGlobal.max( nodeCoords32[a][2] ); if( !isZero( indicatorPML[a] - 1.0 )) { - xMinInterior.min( X32[a][0] ); - yMinInterior.min( X32[a][1] ); - zMinInterior.min( X32[a][2] ); - xMaxInterior.max( X32[a][0] ); - yMaxInterior.max( X32[a][1] ); - zMaxInterior.max( X32[a][2] ); + xMinInterior.min( nodeCoords32[a][0] ); + yMinInterior.min( nodeCoords32[a][1] ); + zMinInterior.min( nodeCoords32[a][2] ); + xMaxInterior.max( nodeCoords32[a][0] ); + yMaxInterior.max( nodeCoords32[a][1] ); + zMaxInterior.max( nodeCoords32[a][2] ); } } ); @@ -560,7 +558,7 @@ void AcousticWaveEquationSEM::initializePML() CellElementSubRegion::NodeMapType const & elemToNodes = subRegion.getReference< CellElementSubRegion::NodeMapType >( CellElementSubRegion::viewKeyStruct::nodeListString() ); traits::ViewTypeConst< CellElementSubRegion::NodeMapType > const elemToNodesViewConst = elemToNodes.toViewConst(); - arrayView1d< real32 const > const vel = subRegion.getReference< array1d< real32 > >( fields::MediumVelocity::key()); + arrayView1d< real32 const > const vel = subRegion.getReference< array1d< real32 > >( fields::AcousticVelocity::key()); finiteElement::FiniteElementBase const & fe = subRegion.getReference< finiteElement::FiniteElementBase >( getDiscretizationName() ); @@ -575,7 +573,7 @@ void AcousticWaveEquationSEM::initializePML() waveSpeedPMLKernel< FE_TYPE > kernel( finiteElement ); kernel.template launch< EXEC_POLICY, ATOMIC_POLICY > ( targetSet, - X32, + nodeCoords32, elemToNodesViewConst, vel, xMin, @@ -613,15 +611,15 @@ void AcousticWaveEquationSEM::initializePML() /// add safeguards when PML thickness is negative or too small for( integer i=0; i<3; ++i ) { - if( param.thicknessMinXYZPML[i]<=minThicknessPML ) + if( param.thicknessMinXYZPML[i] <= minThicknessPML ) { - param.thicknessMinXYZPML[i]=LvArray::NumericLimits< real32 >::max; - param.waveSpeedMinXYZPML[i]=0; + param.thicknessMinXYZPML[i] = LvArray::NumericLimits< real32 >::max; + param.waveSpeedMinXYZPML[i] = 0; } if( param.thicknessMaxXYZPML[i]<=minThicknessPML ) { - param.thicknessMaxXYZPML[i]=LvArray::NumericLimits< real32 >::max; - param.waveSpeedMaxXYZPML[i]=0; + param.thicknessMaxXYZPML[i] = LvArray::NumericLimits< real32 >::max; + param.waveSpeedMaxXYZPML[i] = 0; } } @@ -666,7 +664,7 @@ void AcousticWaveEquationSEM::applyPML( real64 const time, DomainPartition & dom arrayView2d< real32 > const grad_n = nodeManager.getField< fields::AuxiliaryVar2PML >(); arrayView1d< real32 > const divV_n = nodeManager.getField< fields::AuxiliaryVar3PML >(); arrayView1d< real32 const > const u_n = nodeManager.getField< fields::AuxiliaryVar4PML >(); - arrayView2d< wsCoordType const, nodes::REFERENCE_POSITION_USD > const X32 = nodeManager.getField< fields::referencePosition32 >().toViewConst(); + arrayView2d< wsCoordType const, nodes::REFERENCE_POSITION_USD > const nodeCoords32 = nodeManager.getField< fields::referencePosition32 >().toViewConst(); /// Select the subregions concerned by the PML (specified in the xml by the Field Specification) /// 'targetSet' contains the indices of the elements in a given subregion @@ -690,7 +688,7 @@ void AcousticWaveEquationSEM::applyPML( real64 const time, DomainPartition & dom traits::ViewTypeConst< CellElementSubRegion::NodeMapType > const elemToNodesViewConst = elemToNodes.toViewConst(); /// Array view of the wave speed - arrayView1d< real32 const > const vel = subRegion.getReference< array1d< real32 > >( fields::MediumVelocity::key()); + arrayView1d< real32 const > const vel = subRegion.getReference< array1d< real32 > >( fields::AcousticVelocity::key()); /// Get the object needed to determine the type of the element in the subregion finiteElement::FiniteElementBase const & @@ -723,7 +721,7 @@ void AcousticWaveEquationSEM::applyPML( real64 const time, DomainPartition & dom PMLKernel< FE_TYPE > kernel( finiteElement ); kernel.template launch< EXEC_POLICY, ATOMIC_POLICY > ( targetSet, - X32, + nodeCoords32, elemToNodesViewConst, vel, p_n, @@ -781,7 +779,7 @@ real64 AcousticWaveEquationSEM::explicitStepForward( real64 const & time_n, } forAll< EXEC_POLICY >( nodeManager.size(), [=] GEOS_HOST_DEVICE ( localIndex const nodeIdx ) { - p_dt2[nodeIdx] = (p_np1[nodeIdx] - 2*p_n[nodeIdx] + p_nm1[nodeIdx])/(dt*dt); + p_dt2[nodeIdx] = (p_np1[nodeIdx] - 2*p_n[nodeIdx] + p_nm1[nodeIdx]) / pow( dt, 2 ); } ); if( m_enableLifo ) @@ -820,11 +818,7 @@ real64 AcousticWaveEquationSEM::explicitStepForward( real64 const & time_n, } - forAll< EXEC_POLICY >( nodeManager.size(), [=] GEOS_HOST_DEVICE ( localIndex const a ) - { - p_nm1[a] = p_n[a]; - p_n[a] = p_np1[a]; - } ); + prepareNextTimestep( mesh ); } ); return dtOut; @@ -845,7 +839,7 @@ real64 AcousticWaveEquationSEM::explicitStepBackward( real64 const & time_n, { NodeManager & nodeManager = mesh.getNodeManager(); - arrayView1d< real32 const > const mass = nodeManager.getField< fields::MassVector >(); + arrayView1d< real32 const > const mass = nodeManager.getField< fields::AcousticMassVector >(); arrayView1d< real32 > const p_nm1 = nodeManager.getField< fields::Pressure_nm1 >(); arrayView1d< real32 > const p_n = nodeManager.getField< fields::Pressure_n >(); @@ -853,7 +847,7 @@ real64 AcousticWaveEquationSEM::explicitStepBackward( real64 const & time_n, EventManager const & event = getGroupByPath< EventManager >( "/Problem/Events" ); real64 const & maxTime = event.getReference< real64 >( EventManager::viewKeyStruct::maxTimeString() ); - int const maxCycle = int(round( maxTime/dt )); + int const maxCycle = int(round( maxTime / dt )); if( computeGradient && cycleNumber < maxCycle ) { @@ -890,7 +884,7 @@ real64 AcousticWaveEquationSEM::explicitStepBackward( real64 const & time_n, elemManager.forElementSubRegions< CellElementSubRegion >( regionNames, [&]( localIndex const, CellElementSubRegion & elementSubRegion ) { - arrayView1d< real32 const > const velocity = elementSubRegion.getField< fields::MediumVelocity >(); + arrayView1d< real32 const > const velocity = elementSubRegion.getField< fields::AcousticVelocity >(); arrayView1d< real32 > grad = elementSubRegion.getField< fields::PartialGradient >(); arrayView2d< localIndex const, cells::NODE_MAP_USD > const & elemsToNodes = elementSubRegion.nodeList(); constexpr localIndex numNodesPerElem = 8; @@ -910,179 +904,217 @@ real64 AcousticWaveEquationSEM::explicitStepBackward( real64 const & time_n, } ); } - forAll< EXEC_POLICY >( nodeManager.size(), [=] GEOS_HOST_DEVICE ( localIndex const a ) - { - p_nm1[a] = p_n[a]; - p_n[a] = p_np1[a]; - } ); + prepareNextTimestep( mesh ); } ); return dtOut; } -real64 AcousticWaveEquationSEM::explicitStepInternal( real64 const & time_n, - real64 const & dt, - integer cycleNumber, - DomainPartition & domain ) +void AcousticWaveEquationSEM::prepareNextTimestep( MeshLevel & mesh ) { - GEOS_MARK_FUNCTION; + NodeManager & nodeManager = mesh.getNodeManager(); - GEOS_LOG_RANK_0_IF( dt < epsilonLoc, "Warning! Value for dt: " << dt << "s is smaller than local threshold: " << epsilonLoc ); + arrayView1d< real32 > const p_nm1 = nodeManager.getField< fields::Pressure_nm1 >(); + arrayView1d< real32 > const p_n = nodeManager.getField< fields::Pressure_n >(); + arrayView1d< real32 > const p_np1 = nodeManager.getField< fields::Pressure_np1 >(); - forDiscretizationOnMeshTargets( domain.getMeshBodies(), - [&] ( string const &, - MeshLevel & mesh, - arrayView1d< string const > const & regionNames ) + arrayView1d< real32 > const stiffnessVector = nodeManager.getField< fields::StiffnessVector >(); + arrayView1d< real32 > const rhs = nodeManager.getField< fields::ForcingRHS >(); + + SortedArrayView< localIndex const > const solverTargetNodesSet = m_solverTargetNodesSet.toViewConst(); + + forAll< EXEC_POLICY >( solverTargetNodesSet.size(), [=] GEOS_HOST_DEVICE ( localIndex const n ) { - NodeManager & nodeManager = mesh.getNodeManager(); + localIndex const a = solverTargetNodesSet[n]; - arrayView1d< real32 const > const mass = nodeManager.getField< fields::MassVector >(); - arrayView1d< real32 const > const damping = nodeManager.getField< fields::DampingVector >(); + p_nm1[a] = p_n[a]; + p_n[a] = p_np1[a]; - arrayView1d< real32 > const p_nm1 = nodeManager.getField< fields::Pressure_nm1 >(); - arrayView1d< real32 > const p_n = nodeManager.getField< fields::Pressure_n >(); - arrayView1d< real32 > const p_np1 = nodeManager.getField< fields::Pressure_np1 >(); + stiffnessVector[a] = rhs[a] = 0.0; + } ); +} - arrayView1d< localIndex const > const freeSurfaceNodeIndicator = nodeManager.getField< fields::FreeSurfaceNodeIndicator >(); - arrayView1d< real32 > const stiffnessVector = nodeManager.getField< fields::StiffnessVector >(); - arrayView1d< real32 > const rhs = nodeManager.getField< fields::ForcingRHS >(); +void AcousticWaveEquationSEM::computeUnknowns( real64 const & time_n, + real64 const & dt, + integer cycleNumber, + DomainPartition & domain, + MeshLevel & mesh, + arrayView1d< string const > const & regionNames ) +{ + NodeManager & nodeManager = mesh.getNodeManager(); - bool const usePML = m_usePML; + arrayView1d< real32 const > const mass = nodeManager.getField< fields::AcousticMassVector >(); + arrayView1d< real32 const > const damping = nodeManager.getField< fields::DampingVector >(); - auto kernelFactory = acousticWaveEquationSEMKernels::ExplicitAcousticSEMFactory( dt ); + arrayView1d< real32 > const p_nm1 = nodeManager.getField< fields::Pressure_nm1 >(); + arrayView1d< real32 > const p_n = nodeManager.getField< fields::Pressure_n >(); + arrayView1d< real32 > const p_np1 = nodeManager.getField< fields::Pressure_np1 >(); - finiteElement:: - regionBasedKernelApplication< EXEC_POLICY, - constitutive::NullModel, - CellElementSubRegion >( mesh, - regionNames, - getDiscretizationName(), - "", - kernelFactory ); + arrayView1d< localIndex const > const freeSurfaceNodeIndicator = nodeManager.getField< fields::AcousticFreeSurfaceNodeIndicator >(); + arrayView1d< real32 > const stiffnessVector = nodeManager.getField< fields::StiffnessVector >(); + arrayView1d< real32 > const rhs = nodeManager.getField< fields::ForcingRHS >(); - EventManager const & event = getGroupByPath< EventManager >( "/Problem/Events" ); - real64 const & minTime = event.getReference< real64 >( EventManager::viewKeyStruct::minTimeString() ); - integer const cycleForSource = int(round( -minTime/dt + cycleNumber )); - //std::cout<<"cycle GEOSX = "<( mesh, + regionNames, + getDiscretizationName(), + "", + kernelFactory ); - if( !usePML ) + EventManager const & event = getGroupByPath< EventManager >( "/Problem/Events" ); + real64 const & minTime = event.getReference< real64 >( EventManager::viewKeyStruct::minTimeString() ); + integer const cycleForSource = int(round( -minTime / dt + cycleNumber )); + addSourceToRightHandSide( cycleForSource, rhs ); + + /// calculate your time integrators + real64 const dt2 = pow( dt, 2 ); + + SortedArrayView< localIndex const > const solverTargetNodesSet = m_solverTargetNodesSet.toViewConst(); + if( !m_usePML ) + { + GEOS_MARK_SCOPE ( updateP ); + forAll< EXEC_POLICY >( solverTargetNodesSet.size(), [=] GEOS_HOST_DEVICE ( localIndex const n ) + { + localIndex const a = solverTargetNodesSet[n]; + if( freeSurfaceNodeIndicator[a] != 1 ) + { + p_np1[a] = p_n[a]; + p_np1[a] *= 2.0 * mass[a]; + p_np1[a] -= (mass[a] - 0.5 * dt * damping[a]) * p_nm1[a]; + p_np1[a] += dt2 * (rhs[a] - stiffnessVector[a]); + p_np1[a] /= mass[a] + 0.5 * dt * damping[a]; + } + } ); + } + else + { + parametersPML const & param = getReference< parametersPML >( viewKeyStruct::parametersPMLString() ); + arrayView2d< real32 > const v_n = nodeManager.getField< fields::AuxiliaryVar1PML >(); + arrayView2d< real32 > const grad_n = nodeManager.getField< fields::AuxiliaryVar2PML >(); + arrayView1d< real32 > const divV_n = nodeManager.getField< fields::AuxiliaryVar3PML >(); + arrayView1d< real32 > const u_n = nodeManager.getField< fields::AuxiliaryVar4PML >(); + arrayView2d< wsCoordType const, nodes::REFERENCE_POSITION_USD > const + nodeCoords32 = nodeManager.getField< fields::referencePosition32 >().toViewConst(); + + real32 const xMin[3] = {param.xMinPML[0], param.xMinPML[1], param.xMinPML[2]}; + real32 const xMax[3] = {param.xMaxPML[0], param.xMaxPML[1], param.xMaxPML[2]}; + real32 const dMin[3] = {param.thicknessMinXYZPML[0], param.thicknessMinXYZPML[1], param.thicknessMinXYZPML[2]}; + real32 const dMax[3] = {param.thicknessMaxXYZPML[0], param.thicknessMaxXYZPML[1], param.thicknessMaxXYZPML[2]}; + real32 const cMin[3] = {param.waveSpeedMinXYZPML[0], param.waveSpeedMinXYZPML[1], param.waveSpeedMinXYZPML[2]}; + real32 const cMax[3] = {param.waveSpeedMaxXYZPML[0], param.waveSpeedMaxXYZPML[1], param.waveSpeedMaxXYZPML[2]}; + real32 const r = param.reflectivityPML; + + /// apply the main function to update some of the PML auxiliary variables + /// Compute (divV) and (B.pressureGrad - C.auxUGrad) vectors for the PML region + applyPML( time_n, domain ); + + GEOS_MARK_SCOPE ( updatePWithPML ); + forAll< EXEC_POLICY >( solverTargetNodesSet.size(), [=] GEOS_HOST_DEVICE ( localIndex const n ) { - GEOS_MARK_SCOPE ( updateP ); - forAll< EXEC_POLICY >( nodeManager.size(), [=] GEOS_HOST_DEVICE ( localIndex const a ) + localIndex const a = solverTargetNodesSet[n]; + if( freeSurfaceNodeIndicator[a] != 1 ) { - if( freeSurfaceNodeIndicator[a] != 1 ) + real32 sigma[3]; + real32 xLocal[ 3 ]; + + for( integer i=0; i<3; ++i ) { - p_np1[a] = p_n[a]; - p_np1[a] *= 2.0*mass[a]; - p_np1[a] -= (mass[a]-0.5*dt*damping[a])*p_nm1[a]; - p_np1[a] += dt2*(rhs[a]-stiffnessVector[a]); - p_np1[a] /= mass[a]+0.5*dt*damping[a]; + xLocal[i] = nodeCoords32[a][i]; } - } ); - } - else - { - parametersPML const & param = getReference< parametersPML >( viewKeyStruct::parametersPMLString() ); - arrayView2d< real32 > const v_n = nodeManager.getField< fields::AuxiliaryVar1PML >(); - arrayView2d< real32 > const grad_n = nodeManager.getField< fields::AuxiliaryVar2PML >(); - arrayView1d< real32 > const divV_n = nodeManager.getField< fields::AuxiliaryVar3PML >(); - arrayView1d< real32 > const u_n = nodeManager.getField< fields::AuxiliaryVar4PML >(); - arrayView2d< wsCoordType const, nodes::REFERENCE_POSITION_USD > const X32 = nodeManager.getField< fields::referencePosition32 >().toViewConst(); - - real32 const xMin[ 3 ] = {param.xMinPML[0], param.xMinPML[1], param.xMinPML[2]}; - real32 const xMax[ 3 ] = {param.xMaxPML[0], param.xMaxPML[1], param.xMaxPML[2]}; - real32 const dMin[ 3 ] = {param.thicknessMinXYZPML[0], param.thicknessMinXYZPML[1], param.thicknessMinXYZPML[2]}; - real32 const dMax[ 3 ] = {param.thicknessMaxXYZPML[0], param.thicknessMaxXYZPML[1], param.thicknessMaxXYZPML[2]}; - real32 const cMin[ 3 ] = {param.waveSpeedMinXYZPML[0], param.waveSpeedMinXYZPML[1], param.waveSpeedMinXYZPML[2]}; - real32 const cMax[ 3 ] = {param.waveSpeedMaxXYZPML[0], param.waveSpeedMaxXYZPML[1], param.waveSpeedMaxXYZPML[2]}; - real32 const r = param.reflectivityPML; - /// apply the main function to update some of the PML auxiliary variables - /// Compute (divV) and (B.pressureGrad - C.auxUGrad) vectors for the PML region - applyPML( time_n, domain ); + acousticWaveEquationSEMKernels::PMLKernelHelper::computeDampingProfilePML( + xLocal, + xMin, + xMax, + dMin, + dMax, + cMin, + cMax, + r, + sigma ); - GEOS_MARK_SCOPE ( updatePWithPML ); - forAll< EXEC_POLICY >( nodeManager.size(), [=] GEOS_HOST_DEVICE ( localIndex const a ) - { - if( freeSurfaceNodeIndicator[a] != 1 ) - { - real32 sigma[3]; - real32 xLocal[ 3 ]; + real32 const alpha = sigma[0] + sigma[1] + sigma[2]; - for( integer i=0; i<3; ++i ) - { - xLocal[i] = X32[a][i]; - } + p_np1[a] = dt2 * ((rhs[a] - stiffnessVector[a]) / mass[a] - divV_n[a]) - + (1 - 0.5*alpha*dt)*p_nm1[a] + 2 * p_n[a]; - acousticWaveEquationSEMKernels::PMLKernelHelper::computeDampingProfilePML( - xLocal, - xMin, - xMax, - dMin, - dMax, - cMin, - cMax, - r, - sigma ); + p_np1[a] = p_np1[a] / (1 + 0.5 * alpha * dt); - real32 const alpha = sigma[0] + sigma[1] + sigma[2]; + for( integer i=0; i<3; ++i ) + { + v_n[a][i] = (1 - dt * sigma[i]) * v_n[a][i] - dt * grad_n[a][i]; + } + u_n[a] += dt * p_n[a]; + } + } ); + } +} - p_np1[a] = dt2*( (rhs[a] - stiffnessVector[a])/mass[a] - divV_n[a]) - - (1 - 0.5*alpha*dt)*p_nm1[a] - + 2*p_n[a]; +void AcousticWaveEquationSEM::synchronizeUnknowns( real64 const & time_n, + real64 const & dt, + integer const, + DomainPartition & domain, + MeshLevel & mesh, + arrayView1d< string const > const & ) +{ + NodeManager & nodeManager = mesh.getNodeManager(); - p_np1[a] = p_np1[a] / (1 + 0.5*alpha*dt); + arrayView1d< real32 > const p_n = nodeManager.getField< fields::Pressure_n >(); + arrayView1d< real32 > const p_np1 = nodeManager.getField< fields::Pressure_np1 >(); - for( integer i=0; i<3; ++i ) - { - v_n[a][i] = (1 - dt*sigma[i])*v_n[a][i] - dt*grad_n[a][i]; - } - u_n[a] += dt*p_n[a]; - } - } ); - } + arrayView1d< real32 > const stiffnessVector = nodeManager.getField< fields::StiffnessVector >(); + arrayView1d< real32 > const rhs = nodeManager.getField< fields::ForcingRHS >(); - /// synchronize pressure fields - FieldIdentifiers fieldsToBeSync; - fieldsToBeSync.addFields( FieldLocation::Node, { fields::Pressure_np1::key() } ); + /// synchronize pressure fields + FieldIdentifiers fieldsToBeSync; + fieldsToBeSync.addFields( FieldLocation::Node, { fields::Pressure_np1::key() } ); - if( usePML ) - { - fieldsToBeSync.addFields( FieldLocation::Node, { - fields::AuxiliaryVar1PML::key(), - fields::AuxiliaryVar4PML::key() } ); - } + if( m_usePML ) + { + fieldsToBeSync.addFields( FieldLocation::Node, { + fields::AuxiliaryVar1PML::key(), + fields::AuxiliaryVar4PML::key() } ); + } - CommunicationTools & syncFields = CommunicationTools::getInstance(); - syncFields.synchronizeFields( fieldsToBeSync, - mesh, - domain.getNeighbors(), - true ); + CommunicationTools & syncFields = CommunicationTools::getInstance(); + syncFields.synchronizeFields( fieldsToBeSync, + mesh, + domain.getNeighbors(), + true ); + /// compute the seismic traces since last step. + arrayView2d< real32 > const pReceivers = m_pressureNp1AtReceivers.toView(); - /// compute the seismic traces since last step. - arrayView2d< real32 > const pReceivers = m_pressureNp1AtReceivers.toView(); - computeAllSeismoTraces( time_n, dt, p_np1, p_n, pReceivers ); - incrementIndexSeismoTrace( time_n ); + computeAllSeismoTraces( time_n, dt, p_np1, p_n, pReceivers ); + incrementIndexSeismoTrace( time_n ); - /// prepare next step - forAll< EXEC_POLICY >( nodeManager.size(), [=] GEOS_HOST_DEVICE ( localIndex const a ) - { - stiffnessVector[a] = 0.0; - rhs[a] = 0.0; - } ); + if( m_usePML ) + { + arrayView2d< real32 > const grad_n = nodeManager.getField< fields::AuxiliaryVar2PML >(); + arrayView1d< real32 > const divV_n = nodeManager.getField< fields::AuxiliaryVar3PML >(); + grad_n.zero(); + divV_n.zero(); + } +} - if( usePML ) - { - arrayView2d< real32 > const grad_n = nodeManager.getField< fields::AuxiliaryVar2PML >(); - arrayView1d< real32 > const divV_n = nodeManager.getField< fields::AuxiliaryVar3PML >(); - grad_n.zero(); - divV_n.zero(); - } +real64 AcousticWaveEquationSEM::explicitStepInternal( real64 const & time_n, + real64 const & dt, + integer const cycleNumber, + DomainPartition & domain ) +{ + GEOS_MARK_FUNCTION; + GEOS_LOG_RANK_0_IF( dt < epsilonLoc, "Warning! Value for dt: " << dt << "s is smaller than local threshold: " << epsilonLoc ); + + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, + MeshLevel & mesh, + arrayView1d< string const > const & regionNames ) + { + computeUnknowns( time_n, dt, cycleNumber, domain, mesh, regionNames ); + synchronizeUnknowns( time_n, dt, cycleNumber, domain, mesh, regionNames ); } ); return dt; diff --git a/src/coreComponents/physicsSolvers/wavePropagation/AcousticWaveEquationSEM.hpp b/src/coreComponents/physicsSolvers/wavePropagation/AcousticWaveEquationSEM.hpp index 9d1f4db8094..67ea9982df2 100644 --- a/src/coreComponents/physicsSolvers/wavePropagation/AcousticWaveEquationSEM.hpp +++ b/src/coreComponents/physicsSolvers/wavePropagation/AcousticWaveEquationSEM.hpp @@ -46,6 +46,8 @@ class AcousticWaveEquationSEM : public WaveSolverBase AcousticWaveEquationSEM & operator=( AcousticWaveEquationSEM const & ) = delete; AcousticWaveEquationSEM & operator=( AcousticWaveEquationSEM && ) = delete; + /// String used to form the solverName used to register solvers in CoupledSolver + static string coupledSolverAttributePrefix() { return "acoustic"; } static string catalogName() { return "AcousticSEM"; } /** @@ -117,6 +119,22 @@ class AcousticWaveEquationSEM : public WaveSolverBase integer const cycleNumber, DomainPartition & domain ); + void computeUnknowns( real64 const & time_n, + real64 const & dt, + integer const cycleNumber, + DomainPartition & domain, + MeshLevel & mesh, + arrayView1d< string const > const & regionNames ); + + void synchronizeUnknowns( real64 const & time_n, + real64 const & dt, + integer const cycleNumber, + DomainPartition & domain, + MeshLevel & mesh, + arrayView1d< string const > const & regionNames ); + + void prepareNextTimestep( MeshLevel & mesh ); + protected: virtual void postProcessInput() override final; @@ -187,8 +205,8 @@ DECLARE_FIELD( ForcingRHS, WRITE_AND_READ, "RHS" ); -DECLARE_FIELD( MassVector, - "massVector", +DECLARE_FIELD( AcousticMassVector, + "acousticMassVector", array1d< real32 >, 0, NOPLOT, @@ -203,16 +221,16 @@ DECLARE_FIELD( DampingVector, WRITE_AND_READ, "Diagonal of the Damping Matrix." ); -DECLARE_FIELD( MediumVelocity, - "mediumVelocity", +DECLARE_FIELD( AcousticVelocity, + "acousticVelocity", array1d< real32 >, 0, NOPLOT, WRITE_AND_READ, "Medium velocity of the cell" ); -DECLARE_FIELD( MediumDensity, - "mediumDensity", +DECLARE_FIELD( AcousticDensity, + "acousticDensity", array1d< real32 >, 0, NOPLOT, @@ -227,16 +245,16 @@ DECLARE_FIELD( StiffnessVector, WRITE_AND_READ, "Stiffness vector contains R_h*Pressure_n." ); -DECLARE_FIELD( FreeSurfaceFaceIndicator, - "freeSurfaceFaceIndicator", +DECLARE_FIELD( AcousticFreeSurfaceFaceIndicator, + "acousticFreeSurfaceFaceIndicator", array1d< localIndex >, 0, NOPLOT, WRITE_AND_READ, "Free surface indicator, 1 if a face is on free surface 0 otherwise." ); -DECLARE_FIELD( FreeSurfaceNodeIndicator, - "freeSurfaceNodeIndicator", +DECLARE_FIELD( AcousticFreeSurfaceNodeIndicator, + "acousticFreeSurfaceNodeIndicator", array1d< localIndex >, 0, NOPLOT, diff --git a/src/coreComponents/physicsSolvers/wavePropagation/AcousticWaveEquationSEMKernel.hpp b/src/coreComponents/physicsSolvers/wavePropagation/AcousticWaveEquationSEMKernel.hpp index 09fae59376d..4ca784b5c7f 100644 --- a/src/coreComponents/physicsSolvers/wavePropagation/AcousticWaveEquationSEMKernel.hpp +++ b/src/coreComponents/physicsSolvers/wavePropagation/AcousticWaveEquationSEMKernel.hpp @@ -40,7 +40,6 @@ struct PrecomputeSourceAndReceiverKernel * @tparam EXEC_POLICY execution policy * @tparam FE_TYPE finite element type * @param[in] size the number of cells in the subRegion - * @param[in] numNodesPerElem number of nodes per element * @param[in] nodeCoords coordinates of the nodes * @param[in] elemsToNodes map from element to nodes * @param[in] elemsToFaces map from element to faces @@ -58,7 +57,6 @@ struct PrecomputeSourceAndReceiverKernel template< typename EXEC_POLICY, typename FE_TYPE > static void launch( localIndex const size, - localIndex const numNodesPerElem, localIndex const numFacesPerElem, arrayView2d< WaveSolverBase::wsCoordType const, nodes::REFERENCE_POSITION_USD > const nodeCoords, arrayView1d< integer const > const elemGhostRank, @@ -81,6 +79,7 @@ struct PrecomputeSourceAndReceiverKernel real32 const timeSourceDelay, localIndex const rickerOrder ) { + constexpr localIndex numNodesPerElem = FE_TYPE::numNodes; forAll< EXEC_POLICY >( size, [=] GEOS_HOST_DEVICE ( localIndex const k ) { @@ -117,12 +116,12 @@ struct PrecomputeSourceAndReceiverKernel coordsOnRefElem ); sourceIsAccessible[isrc] = 1; - real64 Ntest[FE_TYPE::numNodes]; + real64 Ntest[numNodesPerElem]; FE_TYPE::calcN( coordsOnRefElem, Ntest ); for( localIndex a = 0; a < numNodesPerElem; ++a ) { - sourceNodeIds[isrc][a] = elemsToNodes[k][a]; + sourceNodeIds[isrc][a] = elemsToNodes( k, a ); sourceConstants[isrc][a] = Ntest[a]; } @@ -164,12 +163,12 @@ struct PrecomputeSourceAndReceiverKernel receiverIsLocal[ircv] = 1; - real64 Ntest[FE_TYPE::numNodes]; + real64 Ntest[numNodesPerElem]; FE_TYPE::calcN( coordsOnRefElem, Ntest ); for( localIndex a = 0; a < numNodesPerElem; ++a ) { - receiverNodeIds[ircv][a] = elemsToNodes[k][a]; + receiverNodeIds[ircv][a] = elemsToNodes( k, a ); receiverConstants[ircv][a] = Ntest[a]; } } @@ -216,7 +215,7 @@ struct MassMatrixKernel constexpr localIndex numNodesPerElem = FE_TYPE::numNodes; constexpr localIndex numQuadraturePointsPerElem = FE_TYPE::numQuadraturePoints; - real32 const invC2 = 1.0 / ( density[e] * velocity[e] * velocity[e] ); + real32 const invC2 = 1.0 / ( density[e] * pow( velocity[e], 2 ) ); real64 xLocal[ numNodesPerElem ][ 3 ]; for( localIndex a = 0; a < numNodesPerElem; ++a ) { @@ -385,7 +384,7 @@ struct PMLKernel * @tparam ATOMIC_POLICY the atomic policy * @param[in] targetSet list of cells in the target set * @param[in] nodeCoords coordinates of the nodes - * @param[in] elemToNodesViewConst constant array view of map from element to nodes + * @param[in] elemToNodes constant array view of map from element to nodes * @param[in] velocity cell-wise velocity * @param[in] p_n pressure field at time n * @param[in] v_n PML auxiliary field at time n @@ -404,7 +403,7 @@ struct PMLKernel void launch( SortedArrayView< localIndex const > const targetSet, arrayView2d< WaveSolverBase::wsCoordType const, nodes::REFERENCE_POSITION_USD > const nodeCoords, - traits::ViewTypeConst< CellElementSubRegion::NodeMapType > const elemToNodesViewConst, + traits::ViewTypeConst< CellElementSubRegion::NodeMapType > const elemToNodes, arrayView1d< real32 const > const velocity, arrayView1d< real32 const > const p_n, arrayView2d< real32 const > const v_n, @@ -452,13 +451,13 @@ struct PMLKernel /// copy from global to local arrays for( localIndex i=0; i( &grad_n[elemToNodesViewConst[k][i]][j], localIncrementArray[j]/numNodesPerElem ); + RAJA::atomicAdd< ATOMIC_POLICY >( &grad_n[elemToNodes( k, i )][j], localIncrementArray[j]/numNodesPerElem ); } /// compute beta.pressure + gamma.u - c^2 * divV where beta and gamma are functions of the damping profile real32 const beta = sigma[0]*sigma[1]+sigma[0]*sigma[2]+sigma[1]*sigma[2]; real32 const gamma = sigma[0]*sigma[1]*sigma[2]; - real32 const localIncrement = beta*p_n[elemToNodesViewConst[k][i]] - + gamma*u_n[elemToNodesViewConst[k][i]] + real32 const localIncrement = beta*p_n[elemToNodes( k, i )] + + gamma*u_n[elemToNodes( k, i )] - c*c*( auxVGrad[0][0] + auxVGrad[1][1] + auxVGrad[2][2] ); - RAJA::atomicAdd< ATOMIC_POLICY >( &divV_n[elemToNodesViewConst[k][i]], localIncrement/numNodesPerElem ); + RAJA::atomicAdd< ATOMIC_POLICY >( &divV_n[elemToNodes( k, i )], localIncrement/numNodesPerElem ); } } ); } @@ -539,7 +538,7 @@ struct waveSpeedPMLKernel * @tparam ATOMIC_POLICY the atomic policy * @param[in] targetSet list of cells in the target set * @param[in] nodeCoords coordinates of the nodes - * @param[in] elemToNodesViewConst constant array view of map from element to nodes + * @param[in] elemToNodes constant array view of map from element to nodes * @param[in] velocity cell-wise velocity * @param[in] xMin coordinate limits of the inner PML boundaries, left-front-top * @param[in] xMax coordinate limits of the inner PML boundaries, right-back-bottom @@ -552,7 +551,7 @@ struct waveSpeedPMLKernel void launch( SortedArrayView< localIndex const > const targetSet, arrayView2d< WaveSolverBase::wsCoordType const, nodes::REFERENCE_POSITION_USD > const nodeCoords, - traits::ViewTypeConst< CellElementSubRegion::NodeMapType > const elemToNodesViewConst, + traits::ViewTypeConst< CellElementSubRegion::NodeMapType > const elemToNodes, arrayView1d< real32 const > const velocity, real32 const (&xMin)[3], real32 const (&xMax)[3], @@ -594,7 +593,7 @@ struct waveSpeedPMLKernel { for( localIndex i=0; i() ), m_p_n( nodeManager.getField< fields::Pressure_n >() ), m_stiffnessVector( nodeManager.getField< fields::StiffnessVector >() ), - m_density( elementSubRegion.template getField< fields::MediumDensity >() ), + m_density( elementSubRegion.template getField< fields::AcousticDensity >() ), m_dt( dt ) { GEOS_UNUSED_VAR( edgeManager ); @@ -803,9 +802,9 @@ class ExplicitAcousticSEM : public finiteElement::KernelBase< SUBREGION_TYPE, { m_finiteElementSpace.template computeStiffnessTerm( q, stack.xLocal, [&] ( int i, int j, real64 val ) { - real32 invDensity = 1./m_density[k]; - real32 const localIncrement = invDensity*val*m_p_n[m_elemsToNodes[k][j]]; - RAJA::atomicAdd< parallelDeviceAtomic >( &m_stiffnessVector[m_elemsToNodes[k][i]], localIncrement ); + real32 invDensity = 1.0 / m_density[k]; + real32 const localIncrement = invDensity*val*m_p_n[m_elemsToNodes( k, j )]; + RAJA::atomicAdd< parallelDeviceAtomic >( &m_stiffnessVector[m_elemsToNodes( k, i )], localIncrement ); } ); } diff --git a/src/coreComponents/physicsSolvers/wavePropagation/ElasticFirstOrderWaveEquationSEM.cpp b/src/coreComponents/physicsSolvers/wavePropagation/ElasticFirstOrderWaveEquationSEM.cpp index 1e4364f840c..512ee4e9ded 100644 --- a/src/coreComponents/physicsSolvers/wavePropagation/ElasticFirstOrderWaveEquationSEM.cpp +++ b/src/coreComponents/physicsSolvers/wavePropagation/ElasticFirstOrderWaveEquationSEM.cpp @@ -121,22 +121,22 @@ void ElasticFirstOrderWaveEquationSEM::registerDataOnMesh( Group & meshBodies ) wavesolverfields::Displacementy_np1, wavesolverfields::Displacementz_np1, wavesolverfields::ForcingRHS, - wavesolverfields::MassVector, + wavesolverfields::ElasticMassVector, wavesolverfields::DampingVectorx, wavesolverfields::DampingVectory, wavesolverfields::DampingVectorz, - wavesolverfields::FreeSurfaceNodeIndicator >( getName() ); + wavesolverfields::ElasticFreeSurfaceNodeIndicator >( getName() ); FaceManager & faceManager = mesh.getFaceManager(); - faceManager.registerField< wavesolverfields::FreeSurfaceFaceIndicator >( getName() ); + faceManager.registerField< wavesolverfields::ElasticFreeSurfaceFaceIndicator >( getName() ); ElementRegionManager & elemManager = mesh.getElemManager(); elemManager.forElementSubRegions< CellElementSubRegion >( [&]( CellElementSubRegion & subRegion ) { - subRegion.registerField< wavesolverfields::MediumVelocityVp >( getName() ); - subRegion.registerField< wavesolverfields::MediumVelocityVs >( getName() ); - subRegion.registerField< wavesolverfields::MediumDensity >( getName() ); + subRegion.registerField< wavesolverfields::ElasticVelocityVp >( getName() ); + subRegion.registerField< wavesolverfields::ElasticVelocityVs >( getName() ); + subRegion.registerField< wavesolverfields::ElasticDensity >( getName() ); subRegion.registerField< wavesolverfields::Lambda >( getName() ); subRegion.registerField< wavesolverfields::Mu >( getName() ); @@ -260,7 +260,6 @@ void ElasticFirstOrderWaveEquationSEM::precomputeSourceAndReceiverTerm( MeshLeve { using FE_TYPE = TYPEOFREF( finiteElement ); - constexpr localIndex numNodesPerElem = FE_TYPE::numNodes; localIndex const numFacesPerElem = elementSubRegion.numFacesPerElement(); elasticFirstOrderWaveEquationSEMKernels:: @@ -268,7 +267,6 @@ void ElasticFirstOrderWaveEquationSEM::precomputeSourceAndReceiverTerm( MeshLeve launch< EXEC_POLICY, FE_TYPE > ( elementSubRegion.size(), regionIndex, - numNodesPerElem, numFacesPerElem, X, elemGhostRank, @@ -351,7 +349,7 @@ void ElasticFirstOrderWaveEquationSEM::initializePostInitialConditionsPreSubGrou ArrayOfArraysView< localIndex const > const facesToNodes = faceManager.nodeList().toViewConst(); // mass matrix to be computed in this function - arrayView1d< real32 > const mass = nodeManager.getField< wavesolverfields::MassVector >(); + arrayView1d< real32 > const mass = nodeManager.getField< wavesolverfields::ElasticMassVector >(); mass.zero(); /// damping matrix to be computed for each dof in the boundary of the mesh arrayView1d< real32 > const dampingx = nodeManager.getField< wavesolverfields::DampingVectorx >(); @@ -362,7 +360,7 @@ void ElasticFirstOrderWaveEquationSEM::initializePostInitialConditionsPreSubGrou dampingz.zero(); /// get array of indicators: 1 if face is on the free surface; 0 otherwise - arrayView1d< localIndex const > const freeSurfaceFaceIndicator = faceManager.getField< wavesolverfields::FreeSurfaceFaceIndicator >(); + arrayView1d< localIndex const > const freeSurfaceFaceIndicator = faceManager.getField< wavesolverfields::ElasticFreeSurfaceFaceIndicator >(); mesh.getElemManager().forElementSubRegions< CellElementSubRegion >( regionNames, [&]( localIndex const, CellElementSubRegion & elementSubRegion ) @@ -370,9 +368,9 @@ void ElasticFirstOrderWaveEquationSEM::initializePostInitialConditionsPreSubGrou arrayView2d< localIndex const, cells::NODE_MAP_USD > const elemsToNodes = elementSubRegion.nodeList(); arrayView2d< localIndex const > const elemsToFaces = elementSubRegion.faceList(); - arrayView1d< real32 > const density = elementSubRegion.getField< wavesolverfields::MediumDensity >(); - arrayView1d< real32 > const velocityVp = elementSubRegion.getField< wavesolverfields::MediumVelocityVp >(); - arrayView1d< real32 > const velocityVs = elementSubRegion.getField< wavesolverfields::MediumVelocityVs >(); + arrayView1d< real32 > const density = elementSubRegion.getField< wavesolverfields::ElasticDensity >(); + arrayView1d< real32 > const velocityVp = elementSubRegion.getField< wavesolverfields::ElasticVelocityVp >(); + arrayView1d< real32 > const velocityVs = elementSubRegion.getField< wavesolverfields::ElasticVelocityVs >(); finiteElement::FiniteElementBase const & fe = elementSubRegion.getReference< finiteElement::FiniteElementBase >( getDiscretizationName() ); @@ -427,10 +425,10 @@ void ElasticFirstOrderWaveEquationSEM::applyFreeSurfaceBC( real64 const time, Do ArrayOfArraysView< localIndex const > const faceToNodeMap = faceManager.nodeList().toViewConst(); /// set array of indicators: 1 if a face is on on free surface; 0 otherwise - arrayView1d< localIndex > const freeSurfaceFaceIndicator = faceManager.getField< wavesolverfields::FreeSurfaceFaceIndicator >(); + arrayView1d< localIndex > const freeSurfaceFaceIndicator = faceManager.getField< wavesolverfields::ElasticFreeSurfaceFaceIndicator >(); /// set array of indicators: 1 if a node is on on free surface; 0 otherwise - arrayView1d< localIndex > const freeSurfaceNodeIndicator = nodeManager.getField< wavesolverfields::FreeSurfaceNodeIndicator >(); + arrayView1d< localIndex > const freeSurfaceNodeIndicator = nodeManager.getField< wavesolverfields::ElasticFreeSurfaceNodeIndicator >(); freeSurfaceFaceIndicator.zero(); freeSurfaceNodeIndicator.zero(); @@ -525,7 +523,7 @@ real64 ElasticFirstOrderWaveEquationSEM::explicitStepInternal( real64 const & ti arrayView2d< wsCoordType const, nodes::REFERENCE_POSITION_USD > const X = nodeManager.getField< fields::referencePosition32 >().toViewConst(); - arrayView1d< real32 const > const mass = nodeManager.getField< wavesolverfields::MassVector >(); + arrayView1d< real32 const > const mass = nodeManager.getField< wavesolverfields::ElasticMassVector >(); arrayView1d< real32 > const dampingx = nodeManager.getField< wavesolverfields::DampingVectorx >(); arrayView1d< real32 > const dampingy = nodeManager.getField< wavesolverfields::DampingVectory >(); arrayView1d< real32 > const dampingz = nodeManager.getField< wavesolverfields::DampingVectorz >(); @@ -541,9 +539,9 @@ real64 ElasticFirstOrderWaveEquationSEM::explicitStepInternal( real64 const & ti arrayView2d< localIndex const, cells::NODE_MAP_USD > const & elemsToNodes = elementSubRegion.nodeList(); - arrayView1d< real32 const > const velocityVp = elementSubRegion.getField< wavesolverfields::MediumVelocityVp >(); - arrayView1d< real32 const > const velocityVs = elementSubRegion.getField< wavesolverfields::MediumVelocityVs >(); - arrayView1d< real32 const > const density = elementSubRegion.getField< wavesolverfields::MediumDensity >(); + arrayView1d< real32 const > const velocityVp = elementSubRegion.getField< wavesolverfields::ElasticVelocityVp >(); + arrayView1d< real32 const > const velocityVs = elementSubRegion.getField< wavesolverfields::ElasticVelocityVs >(); + arrayView1d< real32 const > const density = elementSubRegion.getField< wavesolverfields::ElasticDensity >(); arrayView1d< real32 > const lambda = elementSubRegion.getField< wavesolverfields::Lambda >(); arrayView1d< real32 > const mu = elementSubRegion.getField< wavesolverfields::Mu >(); diff --git a/src/coreComponents/physicsSolvers/wavePropagation/ElasticFirstOrderWaveEquationSEMKernel.hpp b/src/coreComponents/physicsSolvers/wavePropagation/ElasticFirstOrderWaveEquationSEMKernel.hpp index c4414f17c0e..80aa82c77b3 100644 --- a/src/coreComponents/physicsSolvers/wavePropagation/ElasticFirstOrderWaveEquationSEMKernel.hpp +++ b/src/coreComponents/physicsSolvers/wavePropagation/ElasticFirstOrderWaveEquationSEMKernel.hpp @@ -62,7 +62,6 @@ struct PrecomputeSourceAndReceiverKernel static void launch( localIndex const size, localIndex const regionIndex, - localIndex const numNodesPerElem, localIndex const numFacesPerElem, arrayView2d< WaveSolverBase::wsCoordType const, nodes::REFERENCE_POSITION_USD > const nodeCoords, arrayView1d< integer const > const elemGhostRank, @@ -89,6 +88,7 @@ struct PrecomputeSourceAndReceiverKernel real32 const timeSourceDelay, localIndex const rickerOrder ) { + constexpr localIndex numNodesPerElem = FE_TYPE::numNodes; forAll< EXEC_POLICY >( size, [=] GEOS_HOST_DEVICE ( localIndex const k ) { @@ -126,7 +126,7 @@ struct PrecomputeSourceAndReceiverKernel sourceIsAccessible[isrc] = 1; sourceElem[isrc] = k; sourceRegion[isrc] = regionIndex; - real64 Ntest[FE_TYPE::numNodes]; + real64 Ntest[numNodesPerElem]; FE_TYPE::calcN( coordsOnRefElem, Ntest ); for( localIndex a = 0; a < numNodesPerElem; ++a ) @@ -174,7 +174,7 @@ struct PrecomputeSourceAndReceiverKernel rcvElem[ircv] = k; receiverRegion[ircv] = regionIndex; - real64 Ntest[FE_TYPE::numNodes]; + real64 Ntest[numNodesPerElem]; FE_TYPE::calcN( coordsOnRefElem, Ntest ); for( localIndex a = 0; a < numNodesPerElem; ++a ) @@ -305,9 +305,9 @@ struct DampingMatrixKernel for( localIndex q = 0; q < numNodesPerFace; ++q ) { real32 const aux = density[e] * m_finiteElement.computeDampingTerm( q, xLocal ); - real32 const localIncrementx = density[e] * (velocityVp[e] * abs( nx ) + velocityVs[e] * sqrt( pow( ny, 2 ) + pow( nz, 2 ) ) ) * aux; - real32 const localIncrementy = density[e] * (velocityVp[e] * abs( ny ) + velocityVs[e] * sqrt( pow( nx, 2 ) + pow( nz, 2 ) ) ) * aux; - real32 const localIncrementz = density[e] * (velocityVp[e] * abs( nz ) + velocityVs[e] * sqrt( pow( nx, 2 ) + pow( ny, 2 ) ) ) * aux; + real32 const localIncrementx = density[e] * (velocityVp[e] * LvArray::math::abs( nx ) + velocityVs[e] * LvArray::math::sqrt( pow( ny, 2 ) + pow( nz, 2 ) ) ) * aux; + real32 const localIncrementy = density[e] * (velocityVp[e] * LvArray::math::abs( ny ) + velocityVs[e] * LvArray::math::sqrt( pow( nx, 2 ) + pow( nz, 2 ) ) ) * aux; + real32 const localIncrementz = density[e] * (velocityVp[e] * LvArray::math::abs( nz ) + velocityVs[e] * LvArray::math::sqrt( pow( nx, 2 ) + pow( ny, 2 ) ) ) * aux; RAJA::atomicAdd< ATOMIC_POLICY >( &dampingx[facesToNodes( f, q )], localIncrementx ); RAJA::atomicAdd< ATOMIC_POLICY >( &dampingy[facesToNodes( f, q )], localIncrementy ); @@ -377,8 +377,8 @@ struct StressComputation } } - mu[k] = density[k] * velocityVs[k] * velocityVs[k]; - lambda[k] = density[k] * velocityVp[k] * velocityVp[k] - 2.0*mu[k]; + mu[k] = density[k] * pow( velocityVs[k], 2 ); + lambda[k] = density[k] * pow( velocityVp[k], 2 ) - 2.0*mu[k]; real32 uelemxx[numNodesPerElem] = {0.0}; real32 uelemyy[numNodesPerElem] = {0.0}; diff --git a/src/coreComponents/physicsSolvers/wavePropagation/ElasticWaveEquationSEM.cpp b/src/coreComponents/physicsSolvers/wavePropagation/ElasticWaveEquationSEM.cpp index 2de2021b306..a8104bb9309 100644 --- a/src/coreComponents/physicsSolvers/wavePropagation/ElasticWaveEquationSEM.cpp +++ b/src/coreComponents/physicsSolvers/wavePropagation/ElasticWaveEquationSEM.cpp @@ -88,49 +88,6 @@ ElasticWaveEquationSEM::~ElasticWaveEquationSEM() // TODO Auto-generated destructor stub } -localIndex ElasticWaveEquationSEM::getNumNodesPerElem() -{ - DomainPartition & domain = getGroupByPath< DomainPartition >( "/Problem/domain" ); - - NumericalMethodsManager const & numericalMethodManager = domain.getNumericalMethodManager(); - - FiniteElementDiscretizationManager const & - feDiscretizationManager = numericalMethodManager.getFiniteElementDiscretizationManager(); - - FiniteElementDiscretization const * const - feDiscretization = feDiscretizationManager.getGroupPointer< FiniteElementDiscretization >( m_discretizationName ); - GEOS_THROW_IF( feDiscretization == nullptr, - getDataContext() << ": FE discretization not found: " << m_discretizationName, - InputError ); - - localIndex numNodesPerElem = 0; - forDiscretizationOnMeshTargets( domain.getMeshBodies(), - [&]( string const &, - MeshLevel const & mesh, - arrayView1d< string const > const & regionNames ) - { - ElementRegionManager const & elemManager = mesh.getElemManager(); - elemManager.forElementRegions( regionNames, - [&] ( localIndex const, - ElementRegionBase const & elemRegion ) - { - elemRegion.forElementSubRegions( [&]( ElementSubRegionBase const & elementSubRegion ) - { - finiteElement::FiniteElementBase const & - fe = elementSubRegion.getReference< finiteElement::FiniteElementBase >( getDiscretizationName() ); - localIndex const numSupportPoints = fe.getNumSupportPoints(); - if( numSupportPoints > numNodesPerElem ) - { - numNodesPerElem = numSupportPoints; - } - } ); - } ); - - - } ); - return numNodesPerElem; -} - void ElasticWaveEquationSEM::initializePreSubGroups() { @@ -139,13 +96,11 @@ void ElasticWaveEquationSEM::initializePreSubGroups() localIndex const numNodesPerElem = getNumNodesPerElem(); localIndex const numSourcesGlobal = m_sourceCoordinates.size( 0 ); - m_sourceNodeIds.resize( numSourcesGlobal, numNodesPerElem ); m_sourceConstantsx.resize( numSourcesGlobal, numNodesPerElem ); m_sourceConstantsy.resize( numSourcesGlobal, numNodesPerElem ); m_sourceConstantsz.resize( numSourcesGlobal, numNodesPerElem ); localIndex const numReceiversGlobal = m_receiverCoordinates.size( 0 ); - m_receiverNodeIds.resize( numReceiversGlobal, numNodesPerElem ); m_receiverConstants.resize( numReceiversGlobal, numNodesPerElem ); } @@ -173,25 +128,25 @@ void ElasticWaveEquationSEM::registerDataOnMesh( Group & meshBodies ) fields::ForcingRHSx, fields::ForcingRHSy, fields::ForcingRHSz, - fields::MassVector, + fields::ElasticMassVector, fields::DampingVectorx, fields::DampingVectory, fields::DampingVectorz, fields::StiffnessVectorx, fields::StiffnessVectory, fields::StiffnessVectorz, - fields::FreeSurfaceNodeIndicator >( getName() ); + fields::ElasticFreeSurfaceNodeIndicator >( getName() ); FaceManager & faceManager = mesh.getFaceManager(); - faceManager.registerField< fields::FreeSurfaceFaceIndicator >( getName() ); + faceManager.registerField< fields::ElasticFreeSurfaceFaceIndicator >( getName() ); ElementRegionManager & elemManager = mesh.getElemManager(); elemManager.forElementSubRegions< CellElementSubRegion >( [&]( CellElementSubRegion & subRegion ) { - subRegion.registerField< fields::MediumVelocityVp >( getName() ); - subRegion.registerField< fields::MediumVelocityVs >( getName() ); - subRegion.registerField< fields::MediumDensity >( getName() ); + subRegion.registerField< fields::ElasticVelocityVp >( getName() ); + subRegion.registerField< fields::ElasticVelocityVs >( getName() ); + subRegion.registerField< fields::ElasticDensity >( getName() ); } ); } ); @@ -201,17 +156,8 @@ void ElasticWaveEquationSEM::registerDataOnMesh( Group & meshBodies ) void ElasticWaveEquationSEM::postProcessInput() { - WaveSolverBase::postProcessInput(); - GEOS_ERROR_IF( m_sourceCoordinates.size( 1 ) != 3, - getWrapperDataContext( WaveSolverBase::viewKeyStruct::sourceCoordinatesString() ) << - ": Invalid number of physical coordinates for the sources" ); - - GEOS_ERROR_IF( m_receiverCoordinates.size( 1 ) != 3, - getWrapperDataContext( WaveSolverBase::viewKeyStruct::receiverCoordinatesString() ) << - ": Invalid number of physical coordinates for the receivers" ); - EventManager const & event = getGroupByPath< EventManager >( "/Problem/Events" ); real64 const & maxTime = event.getReference< real64 >( EventManager::viewKeyStruct::maxTimeString() ); real64 dt = 0; @@ -228,20 +174,18 @@ void ElasticWaveEquationSEM::postProcessInput() if( m_dtSeismoTrace > 0 ) { - m_nsamplesSeismoTrace = int( maxTime / m_dtSeismoTrace) + 1; + m_nsamplesSeismoTrace = int( maxTime / m_dtSeismoTrace ) + 1; } else { m_nsamplesSeismoTrace = 0; } - localIndex const nsamples = int(maxTime/dt) + 1; + localIndex const nsamples = int( maxTime / dt ) + 1; + localIndex const numReceiversGlobal = m_receiverCoordinates.size( 0 ); localIndex const numSourcesGlobal = m_sourceCoordinates.size( 0 ); m_sourceIsAccessible.resize( numSourcesGlobal ); - localIndex const numReceiversGlobal = m_receiverCoordinates.size( 0 ); - m_receiverIsLocal.resize( numReceiversGlobal ); - m_displacementXNp1AtReceivers.resize( m_nsamplesSeismoTrace, numReceiversGlobal + 1 ); m_displacementYNp1AtReceivers.resize( m_nsamplesSeismoTrace, numReceiversGlobal + 1 ); m_displacementZNp1AtReceivers.resize( m_nsamplesSeismoTrace, numReceiversGlobal + 1 ); @@ -353,9 +297,9 @@ void ElasticWaveEquationSEM::precomputeSourceAndReceiverTerm( MeshLevel & mesh, } ); } -void ElasticWaveEquationSEM::computeDAS ( arrayView2d< real32 > const xCompRcv, - arrayView2d< real32 > const yCompRcv, - arrayView2d< real32 > const zCompRcv ) +void ElasticWaveEquationSEM::computeDAS( arrayView2d< real32 > const xCompRcv, + arrayView2d< real32 > const yCompRcv, + arrayView2d< real32 > const zCompRcv ) { arrayView2d< real64 const > const linearDASGeometry = m_linearDASGeometry.toViewConst(); @@ -399,9 +343,9 @@ void ElasticWaveEquationSEM::computeDAS ( arrayView2d< real32 > const xCompRcv, { // store strain data in the z-component of the receiver (copied to x after resize) zCompRcv[iSample][ircv] = - cd * ca * ( xCompRcv[iSample][numReceiversGlobal+ircv] - xCompRcv[iSample][ircv] ) - + cd * sa * ( yCompRcv[iSample][numReceiversGlobal+ircv] - yCompRcv[iSample][ircv] ) - + sd * ( zCompRcv[iSample][numReceiversGlobal+ircv] - zCompRcv[iSample][ircv] ); + cd * ca * ( xCompRcv[iSample][numReceiversGlobal+ircv] - xCompRcv[iSample][ircv] ) + + cd * sa * ( yCompRcv[iSample][numReceiversGlobal+ircv] - yCompRcv[iSample][ircv] ) + + sd * ( zCompRcv[iSample][numReceiversGlobal+ircv] - zCompRcv[iSample][ircv] ); zCompRcv[iSample][ircv] /= linearDASGeometry[ircv][2]; } @@ -470,8 +414,7 @@ void ElasticWaveEquationSEM::initializePostInitialConditionsPreSubGroups() DomainPartition & domain = getGroupByPath< DomainPartition >( "/Problem/domain" ); - real64 const time = 0.0; - applyFreeSurfaceBC( time, domain ); + applyFreeSurfaceBC( 0.0, domain ); forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, MeshLevel & mesh, @@ -486,7 +429,7 @@ void ElasticWaveEquationSEM::initializePostInitialConditionsPreSubGroups() arrayView2d< wsCoordType const, nodes::REFERENCE_POSITION_USD > const nodeCoords = nodeManager.getField< fields::referencePosition32 >().toViewConst(); // mass matrix to be computed in this function - arrayView1d< real32 > const mass = nodeManager.getField< fields::MassVector >(); + arrayView1d< real32 > const mass = nodeManager.getField< fields::ElasticMassVector >(); mass.zero(); /// damping matrix to be computed for each dof in the boundary of the mesh arrayView1d< real32 > const dampingx = nodeManager.getField< fields::DampingVectorx >(); @@ -497,7 +440,7 @@ void ElasticWaveEquationSEM::initializePostInitialConditionsPreSubGroups() dampingz.zero(); /// get array of indicators: 1 if face is on the free surface; 0 otherwise - arrayView1d< localIndex const > const freeSurfaceFaceIndicator = faceManager.getField< fields::FreeSurfaceFaceIndicator >(); + arrayView1d< localIndex const > const freeSurfaceFaceIndicator = faceManager.getField< fields::ElasticFreeSurfaceFaceIndicator >(); arrayView1d< integer const > const & facesDomainBoundaryIndicator = faceManager.getDomainBoundaryIndicator(); ArrayOfArraysView< localIndex const > const facesToNodes = faceManager.nodeList().toViewConst(); arrayView2d< real64 const > const faceNormal = faceManager.faceNormal(); @@ -511,9 +454,11 @@ void ElasticWaveEquationSEM::initializePostInitialConditionsPreSubGroups() arrayView2d< localIndex const, cells::NODE_MAP_USD > const elemsToNodes = elementSubRegion.nodeList(); arrayView2d< localIndex const > const elemsToFaces = elementSubRegion.faceList(); - arrayView1d< real32 const > const density = elementSubRegion.getField< fields::MediumDensity >(); - arrayView1d< real32 const > const velocityVp = elementSubRegion.getField< fields::MediumVelocityVp >(); - arrayView1d< real32 const > const velocityVs = elementSubRegion.getField< fields::MediumVelocityVs >(); + computeTargetNodeSet( elemsToNodes, elementSubRegion.size(), fe.getNumQuadraturePoints() ); + + arrayView1d< real32 const > const density = elementSubRegion.getField< fields::ElasticDensity >(); + arrayView1d< real32 const > const velocityVp = elementSubRegion.getField< fields::ElasticVelocityVp >(); + arrayView1d< real32 const > const velocityVs = elementSubRegion.getField< fields::ElasticVelocityVs >(); finiteElement::FiniteElementDispatchHandler< SEM_FE_TYPES >::dispatch3D( fe, [&] ( auto const finiteElement ) { @@ -544,8 +489,8 @@ void ElasticWaveEquationSEM::initializePostInitialConditionsPreSubGroups() } ); } ); - WaveSolverUtils::initTrace( "seismoTraceReceiver", getName(), m_receiverConstants.size( 0 ), m_receiverIsLocal ); - WaveSolverUtils::initTrace( "dasTraceReceiver", getName(), m_linearDASGeometry.size( 0 ), m_receiverIsLocal ); + WaveSolverUtils::initTrace( "seismoTraceReceiver", getName(), m_outputSeismoTrace, m_receiverConstants.size( 0 ), m_receiverIsLocal ); + WaveSolverUtils::initTrace( "dasTraceReceiver", getName(), m_outputSeismoTrace, m_linearDASGeometry.size( 0 ), m_receiverIsLocal ); } @@ -570,10 +515,10 @@ void ElasticWaveEquationSEM::applyFreeSurfaceBC( real64 const time, DomainPartit ArrayOfArraysView< localIndex const > const faceToNodeMap = faceManager.nodeList().toViewConst(); /// set array of indicators: 1 if a face is on on free surface; 0 otherwise - arrayView1d< localIndex > const freeSurfaceFaceIndicator = faceManager.getField< fields::FreeSurfaceFaceIndicator >(); + arrayView1d< localIndex > const freeSurfaceFaceIndicator = faceManager.getField< fields::ElasticFreeSurfaceFaceIndicator >(); /// set array of indicators: 1 if a node is on on free surface; 0 otherwise - arrayView1d< localIndex > const freeSurfaceNodeIndicator = nodeManager.getField< fields::FreeSurfaceNodeIndicator >(); + arrayView1d< localIndex > const freeSurfaceNodeIndicator = nodeManager.getField< fields::ElasticFreeSurfaceNodeIndicator >(); fsManager.apply( time, @@ -646,127 +591,188 @@ real64 ElasticWaveEquationSEM::explicitStepBackward( real64 const & time_n, return dtOut; } -real64 ElasticWaveEquationSEM::explicitStepInternal( real64 const & time_n, - real64 const & dt, - integer const cycleNumber, - DomainPartition & domain ) +void ElasticWaveEquationSEM::computeUnknowns( real64 const &, + real64 const & dt, + integer const cycleNumber, + DomainPartition &, + MeshLevel & mesh, + arrayView1d< string const > const & regionNames ) { - GEOS_MARK_FUNCTION; + NodeManager & nodeManager = mesh.getNodeManager(); - GEOS_LOG_RANK_0_IF( dt < epsilonLoc, "Warning! Value for dt: " << dt << "s is smaller than local threshold: " << epsilonLoc ); + arrayView1d< real32 const > const mass = nodeManager.getField< fields::ElasticMassVector >(); + arrayView1d< real32 const > const dampingx = nodeManager.getField< fields::DampingVectorx >(); + arrayView1d< real32 const > const dampingy = nodeManager.getField< fields::DampingVectory >(); + arrayView1d< real32 const > const dampingz = nodeManager.getField< fields::DampingVectorz >(); + arrayView1d< real32 > const stiffnessVectorx = nodeManager.getField< fields::StiffnessVectorx >(); + arrayView1d< real32 > const stiffnessVectory = nodeManager.getField< fields::StiffnessVectory >(); + arrayView1d< real32 > const stiffnessVectorz = nodeManager.getField< fields::StiffnessVectorz >(); - forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, - MeshLevel & mesh, - arrayView1d< string const > const & regionNames ) + arrayView1d< real32 > const ux_nm1 = nodeManager.getField< fields::Displacementx_nm1 >(); + arrayView1d< real32 > const uy_nm1 = nodeManager.getField< fields::Displacementy_nm1 >(); + arrayView1d< real32 > const uz_nm1 = nodeManager.getField< fields::Displacementz_nm1 >(); + arrayView1d< real32 > const ux_n = nodeManager.getField< fields::Displacementx_n >(); + arrayView1d< real32 > const uy_n = nodeManager.getField< fields::Displacementy_n >(); + arrayView1d< real32 > const uz_n = nodeManager.getField< fields::Displacementz_n >(); + arrayView1d< real32 > const ux_np1 = nodeManager.getField< fields::Displacementx_np1 >(); + arrayView1d< real32 > const uy_np1 = nodeManager.getField< fields::Displacementy_np1 >(); + arrayView1d< real32 > const uz_np1 = nodeManager.getField< fields::Displacementz_np1 >(); + + /// get array of indicators: 1 if node on free surface; 0 otherwise + arrayView1d< localIndex const > const freeSurfaceNodeIndicator = nodeManager.getField< fields::ElasticFreeSurfaceNodeIndicator >(); + + arrayView1d< real32 > const rhsx = nodeManager.getField< fields::ForcingRHSx >(); + arrayView1d< real32 > const rhsy = nodeManager.getField< fields::ForcingRHSy >(); + arrayView1d< real32 > const rhsz = nodeManager.getField< fields::ForcingRHSz >(); + + auto kernelFactory = elasticWaveEquationSEMKernels::ExplicitElasticSEMFactory( dt ); + + finiteElement:: + regionBasedKernelApplication< EXEC_POLICY, + constitutive::NullModel, + CellElementSubRegion >( mesh, + regionNames, + getDiscretizationName(), + "", + kernelFactory ); + + + addSourceToRightHandSide( cycleNumber, rhsx, rhsy, rhsz ); + + real64 const dt2 = pow( dt, 2 ); + SortedArrayView< localIndex const > const solverTargetNodesSet = m_solverTargetNodesSet.toViewConst(); + forAll< EXEC_POLICY >( solverTargetNodesSet.size(), [=] GEOS_HOST_DEVICE ( localIndex const n ) { - NodeManager & nodeManager = mesh.getNodeManager(); + localIndex const a = solverTargetNodesSet[n]; + if( freeSurfaceNodeIndicator[a] != 1 ) + { + ux_np1[a] = ux_n[a]; + ux_np1[a] *= 2.0*mass[a]; + ux_np1[a] -= (mass[a]-0.5*dt*dampingx[a])*ux_nm1[a]; + ux_np1[a] += dt2*(rhsx[a]-stiffnessVectorx[a]); + ux_np1[a] /= mass[a]+0.5*dt*dampingx[a]; + uy_np1[a] = uy_n[a]; + uy_np1[a] *= 2.0*mass[a]; + uy_np1[a] -= (mass[a]-0.5*dt*dampingy[a])*uy_nm1[a]; + uy_np1[a] += dt2*(rhsy[a]-stiffnessVectory[a]); + uy_np1[a] /= mass[a]+0.5*dt*dampingy[a]; + uz_np1[a] = uz_n[a]; + uz_np1[a] *= 2.0*mass[a]; + uz_np1[a] -= (mass[a]-0.5*dt*dampingz[a])*uz_nm1[a]; + uz_np1[a] += dt2*(rhsz[a]-stiffnessVectorz[a]); + uz_np1[a] /= mass[a]+0.5*dt*dampingz[a]; + } + } ); +} - arrayView1d< real32 const > const mass = nodeManager.getField< fields::MassVector >(); - arrayView1d< real32 const > const dampingx = nodeManager.getField< fields::DampingVectorx >(); - arrayView1d< real32 const > const dampingy = nodeManager.getField< fields::DampingVectory >(); - arrayView1d< real32 const > const dampingz = nodeManager.getField< fields::DampingVectorz >(); - arrayView1d< real32 > const stiffnessVectorx = nodeManager.getField< fields::StiffnessVectorx >(); - arrayView1d< real32 > const stiffnessVectory = nodeManager.getField< fields::StiffnessVectory >(); - arrayView1d< real32 > const stiffnessVectorz = nodeManager.getField< fields::StiffnessVectorz >(); +void ElasticWaveEquationSEM::synchronizeUnknowns( real64 const & time_n, + real64 const & dt, + integer const, + DomainPartition & domain, + MeshLevel & mesh, + arrayView1d< string const > const & ) +{ + NodeManager & nodeManager = mesh.getNodeManager(); + arrayView1d< real32 > const stiffnessVectorx = nodeManager.getField< fields::StiffnessVectorx >(); + arrayView1d< real32 > const stiffnessVectory = nodeManager.getField< fields::StiffnessVectory >(); + arrayView1d< real32 > const stiffnessVectorz = nodeManager.getField< fields::StiffnessVectorz >(); - arrayView1d< real32 > const ux_nm1 = nodeManager.getField< fields::Displacementx_nm1 >(); - arrayView1d< real32 > const uy_nm1 = nodeManager.getField< fields::Displacementy_nm1 >(); - arrayView1d< real32 > const uz_nm1 = nodeManager.getField< fields::Displacementz_nm1 >(); - arrayView1d< real32 > const ux_n = nodeManager.getField< fields::Displacementx_n >(); - arrayView1d< real32 > const uy_n = nodeManager.getField< fields::Displacementy_n >(); - arrayView1d< real32 > const uz_n = nodeManager.getField< fields::Displacementz_n >(); - arrayView1d< real32 > const ux_np1 = nodeManager.getField< fields::Displacementx_np1 >(); - arrayView1d< real32 > const uy_np1 = nodeManager.getField< fields::Displacementy_np1 >(); - arrayView1d< real32 > const uz_np1 = nodeManager.getField< fields::Displacementz_np1 >(); + arrayView1d< real32 > const ux_nm1 = nodeManager.getField< fields::Displacementx_nm1 >(); + arrayView1d< real32 > const uy_nm1 = nodeManager.getField< fields::Displacementy_nm1 >(); + arrayView1d< real32 > const uz_nm1 = nodeManager.getField< fields::Displacementz_nm1 >(); + arrayView1d< real32 > const ux_n = nodeManager.getField< fields::Displacementx_n >(); + arrayView1d< real32 > const uy_n = nodeManager.getField< fields::Displacementy_n >(); + arrayView1d< real32 > const uz_n = nodeManager.getField< fields::Displacementz_n >(); + arrayView1d< real32 > const ux_np1 = nodeManager.getField< fields::Displacementx_np1 >(); + arrayView1d< real32 > const uy_np1 = nodeManager.getField< fields::Displacementy_np1 >(); + arrayView1d< real32 > const uz_np1 = nodeManager.getField< fields::Displacementz_np1 >(); - /// get array of indicators: 1 if node on free surface; 0 otherwise - arrayView1d< localIndex const > const freeSurfaceNodeIndicator = nodeManager.getField< fields::FreeSurfaceNodeIndicator >(); + arrayView1d< real32 > const rhsx = nodeManager.getField< fields::ForcingRHSx >(); + arrayView1d< real32 > const rhsy = nodeManager.getField< fields::ForcingRHSy >(); + arrayView1d< real32 > const rhsz = nodeManager.getField< fields::ForcingRHSz >(); - arrayView1d< real32 > const rhsx = nodeManager.getField< fields::ForcingRHSx >(); - arrayView1d< real32 > const rhsy = nodeManager.getField< fields::ForcingRHSy >(); - arrayView1d< real32 > const rhsz = nodeManager.getField< fields::ForcingRHSz >(); + /// synchronize displacement fields + FieldIdentifiers fieldsToBeSync; + fieldsToBeSync.addFields( FieldLocation::Node, { fields::Displacementx_np1::key(), fields::Displacementy_np1::key(), fields::Displacementz_np1::key() } ); - auto kernelFactory = elasticWaveEquationSEMKernels::ExplicitElasticSEMFactory( dt ); + CommunicationTools & syncFields = CommunicationTools::getInstance(); + syncFields.synchronizeFields( fieldsToBeSync, + domain.getMeshBody( 0 ).getMeshLevel( m_discretizationName ), + domain.getNeighbors(), + true ); - finiteElement:: - regionBasedKernelApplication< EXEC_POLICY, - constitutive::NullModel, - CellElementSubRegion >( mesh, - regionNames, - getDiscretizationName(), - "", - kernelFactory ); + // compute the seismic traces since last step. + arrayView2d< real32 > const uXReceivers = m_displacementXNp1AtReceivers.toView(); + arrayView2d< real32 > const uYReceivers = m_displacementYNp1AtReceivers.toView(); + arrayView2d< real32 > const uZReceivers = m_displacementZNp1AtReceivers.toView(); + computeAllSeismoTraces( time_n, dt, ux_np1, ux_n, uXReceivers ); + computeAllSeismoTraces( time_n, dt, uy_np1, uy_n, uYReceivers ); + computeAllSeismoTraces( time_n, dt, uz_np1, uz_n, uZReceivers ); - addSourceToRightHandSide( cycleNumber, rhsx, rhsy, rhsz ); + incrementIndexSeismoTrace( time_n ); +} +void ElasticWaveEquationSEM::prepareNextTimestep( MeshLevel & mesh ) +{ + NodeManager & nodeManager = mesh.getNodeManager(); + arrayView1d< real32 > const ux_nm1 = nodeManager.getField< fields::Displacementx_nm1 >(); + arrayView1d< real32 > const uy_nm1 = nodeManager.getField< fields::Displacementy_nm1 >(); + arrayView1d< real32 > const uz_nm1 = nodeManager.getField< fields::Displacementz_nm1 >(); + arrayView1d< real32 > const ux_n = nodeManager.getField< fields::Displacementx_n >(); + arrayView1d< real32 > const uy_n = nodeManager.getField< fields::Displacementy_n >(); + arrayView1d< real32 > const uz_n = nodeManager.getField< fields::Displacementz_n >(); + arrayView1d< real32 > const ux_np1 = nodeManager.getField< fields::Displacementx_np1 >(); + arrayView1d< real32 > const uy_np1 = nodeManager.getField< fields::Displacementy_np1 >(); + arrayView1d< real32 > const uz_np1 = nodeManager.getField< fields::Displacementz_np1 >(); - real64 const dt2 = dt*dt; - forAll< EXEC_POLICY >( nodeManager.size(), [=] GEOS_HOST_DEVICE ( localIndex const a ) - { - if( freeSurfaceNodeIndicator[a] != 1 ) - { - ux_np1[a] = ux_n[a]; - ux_np1[a] *= 2.0*mass[a]; - ux_np1[a] -= (mass[a]-0.5*dt*dampingx[a])*ux_nm1[a]; - ux_np1[a] += dt2*(rhsx[a]-stiffnessVectorx[a]); - ux_np1[a] /= mass[a]+0.5*dt*dampingx[a]; - uy_np1[a] = uy_n[a]; - uy_np1[a] *= 2.0*mass[a]; - uy_np1[a] -= (mass[a]-0.5*dt*dampingy[a])*uy_nm1[a]; - uy_np1[a] += dt2*(rhsy[a]-stiffnessVectory[a]); - uy_np1[a] /= mass[a]+0.5*dt*dampingy[a]; - uz_np1[a] = uz_n[a]; - uz_np1[a] *= 2.0*mass[a]; - uz_np1[a] -= (mass[a]-0.5*dt*dampingz[a])*uz_nm1[a]; - uz_np1[a] += dt2*(rhsz[a]-stiffnessVectorz[a]); - uz_np1[a] /= mass[a]+0.5*dt*dampingz[a]; - } - } ); + arrayView1d< real32 > const stiffnessVectorx = nodeManager.getField< fields::StiffnessVectorx >(); + arrayView1d< real32 > const stiffnessVectory = nodeManager.getField< fields::StiffnessVectory >(); + arrayView1d< real32 > const stiffnessVectorz = nodeManager.getField< fields::StiffnessVectorz >(); - /// synchronize pressure fields - FieldIdentifiers fieldsToBeSync; - fieldsToBeSync.addFields( FieldLocation::Node, { fields::Displacementx_np1::key(), fields::Displacementy_np1::key(), fields::Displacementz_np1::key() } ); + arrayView1d< real32 > const rhsx = nodeManager.getField< fields::ForcingRHSx >(); + arrayView1d< real32 > const rhsy = nodeManager.getField< fields::ForcingRHSy >(); + arrayView1d< real32 > const rhsz = nodeManager.getField< fields::ForcingRHSz >(); - CommunicationTools & syncFields = CommunicationTools::getInstance(); - syncFields.synchronizeFields( fieldsToBeSync, - domain.getMeshBody( 0 ).getMeshLevel( m_discretizationName ), - domain.getNeighbors(), - true ); + SortedArrayView< localIndex const > const solverTargetNodesSet = m_solverTargetNodesSet.toViewConst(); - // compute the seismic traces since last step. - arrayView2d< real32 > const uXReceivers = m_displacementXNp1AtReceivers.toView(); - arrayView2d< real32 > const uYReceivers = m_displacementYNp1AtReceivers.toView(); - arrayView2d< real32 > const uZReceivers = m_displacementZNp1AtReceivers.toView(); + forAll< EXEC_POLICY >( solverTargetNodesSet.size(), [=] GEOS_HOST_DEVICE ( localIndex const n ) + { + localIndex const a = solverTargetNodesSet[n]; + ux_nm1[a] = ux_n[a]; + uy_nm1[a] = uy_n[a]; + uz_nm1[a] = uz_n[a]; + ux_n[a] = ux_np1[a]; + uy_n[a] = uy_np1[a]; + uz_n[a] = uz_np1[a]; + + stiffnessVectorx[a] = stiffnessVectory[a] = stiffnessVectorz[a] = 0.0; + rhsx[a] = rhsy[a] = rhsz[a] = 0.0; + } ); - computeAllSeismoTraces( time_n, dt, ux_np1, ux_n, uXReceivers ); - computeAllSeismoTraces( time_n, dt, uy_np1, uy_n, uYReceivers ); - computeAllSeismoTraces( time_n, dt, uz_np1, uz_n, uZReceivers ); +} - incrementIndexSeismoTrace( time_n ); +real64 ElasticWaveEquationSEM::explicitStepInternal( real64 const & time_n, + real64 const & dt, + integer const cycleNumber, + DomainPartition & domain ) +{ + GEOS_MARK_FUNCTION; - forAll< EXEC_POLICY >( nodeManager.size(), [=] GEOS_HOST_DEVICE ( localIndex const a ) - { - ux_nm1[a] = ux_n[a]; - uy_nm1[a] = uy_n[a]; - uz_nm1[a] = uz_n[a]; - ux_n[a] = ux_np1[a]; - uy_n[a] = uy_np1[a]; - uz_n[a] = uz_np1[a]; - - stiffnessVectorx[a] = 0.0; - stiffnessVectory[a] = 0.0; - stiffnessVectorz[a] = 0.0; - rhsx[a] = 0.0; - rhsy[a] = 0.0; - rhsz[a] = 0.0; - } ); + GEOS_LOG_RANK_0_IF( dt < epsilonLoc, "Warning! Value for dt: " << dt << "s is smaller than local threshold: " << epsilonLoc ); + + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, + MeshLevel & mesh, + arrayView1d< string const > const & regionNames ) + { + computeUnknowns( time_n, dt, cycleNumber, domain, mesh, regionNames ); + synchronizeUnknowns( time_n, dt, cycleNumber, domain, mesh, regionNames ); + prepareNextTimestep( mesh ); } ); return dt; - } void ElasticWaveEquationSEM::cleanup( real64 const time_n, diff --git a/src/coreComponents/physicsSolvers/wavePropagation/ElasticWaveEquationSEM.hpp b/src/coreComponents/physicsSolvers/wavePropagation/ElasticWaveEquationSEM.hpp index 119b40da706..6054a6d63d0 100644 --- a/src/coreComponents/physicsSolvers/wavePropagation/ElasticWaveEquationSEM.hpp +++ b/src/coreComponents/physicsSolvers/wavePropagation/ElasticWaveEquationSEM.hpp @@ -51,6 +51,8 @@ class ElasticWaveEquationSEM : public WaveSolverBase ElasticWaveEquationSEM & operator=( ElasticWaveEquationSEM const & ) = delete; ElasticWaveEquationSEM & operator=( ElasticWaveEquationSEM && ) = delete; + /// String used to form the solverName used to register solvers in CoupledSolver + static string coupledSolverAttributePrefix() { return "elastic"; } static string catalogName() { return "ElasticSEM"; } /** @@ -136,6 +138,22 @@ class ElasticWaveEquationSEM : public WaveSolverBase real64 const & dt, integer const cycleNumber, DomainPartition & domain ); + + void computeUnknowns( real64 const & time_n, + real64 const & dt, + integer const cycleNumber, + DomainPartition & domain, + MeshLevel & mesh, + arrayView1d< string const > const & regionNames ); + + void synchronizeUnknowns( real64 const & time_n, + real64 const & dt, + integer const cycleNumber, + DomainPartition & domain, + MeshLevel & mesh, + arrayView1d< string const > const & regionNames ); + + void prepareNextTimestep( MeshLevel & mesh ); protected: virtual void postProcessInput() override final; @@ -171,32 +189,15 @@ class ElasticWaveEquationSEM : public WaveSolverBase */ virtual void applyPML( real64 const time, DomainPartition & domain ) override; - localIndex getNumNodesPerElem(); - - /// Indices of the nodes (in the right order) for each source point - array2d< localIndex > m_sourceNodeIds; - /// Constant part of the source for the nodes listed in m_sourceNodeIds in x-direction array2d< real64 > m_sourceConstantsx; - /// Constant part of the source for the nodes listed in m_sourceNodeIds in x-direction + /// Constant part of the source for the nodes listed in m_sourceNodeIds in y-direction array2d< real64 > m_sourceConstantsy; - /// Constant part of the source for the nodes listed in m_sourceNodeIds in x-direction + /// Constant part of the source for the nodes listed in m_sourceNodeIds in z-direction array2d< real64 > m_sourceConstantsz; - /// Flag that indicates whether the source is accessible or not to the MPI rank - array1d< localIndex > m_sourceIsAccessible; - - /// Indices of the element nodes (in the right order) for each receiver point - array2d< localIndex > m_receiverNodeIds; - - /// Basis function evaluated at the receiver for the nodes listed in m_receiverNodeIds - array2d< real64 > m_receiverConstants; - - /// Flag that indicates whether the receiver is local or not to the MPI rank - array1d< localIndex > m_receiverIsLocal; - /// Displacement_np1 at the receiver location for each time step for each receiver (x-component) array2d< real32 > m_displacementXNp1AtReceivers; @@ -314,8 +315,8 @@ DECLARE_FIELD( ForcingRHSz, WRITE_AND_READ, "RHS for z-direction" ); -DECLARE_FIELD( MassVector, - "massVector", +DECLARE_FIELD( ElasticMassVector, + "elasticMassVector", array1d< real32 >, 0, NOPLOT, @@ -370,40 +371,40 @@ DECLARE_FIELD( StiffnessVectorz, WRITE_AND_READ, "z-component of stiffness vector." ); -DECLARE_FIELD( MediumVelocityVp, - "mediumVelocityVp", +DECLARE_FIELD( ElasticVelocityVp, + "elasticVelocityVp", array1d< real32 >, 0, NOPLOT, WRITE_AND_READ, "P-waves speed in the cell" ); -DECLARE_FIELD( MediumVelocityVs, - "mediumVelocityVs", +DECLARE_FIELD( ElasticVelocityVs, + "elasticVelocityVs", array1d< real32 >, 0, NOPLOT, WRITE_AND_READ, "S-waves speed in the cell" ); -DECLARE_FIELD( MediumDensity, - "mediumDensity", +DECLARE_FIELD( ElasticDensity, + "elasticDensity", array1d< real32 >, 0, NOPLOT, WRITE_AND_READ, "Medium density of the cell" ); -DECLARE_FIELD( FreeSurfaceFaceIndicator, - "freeSurfaceFaceIndicator", +DECLARE_FIELD( ElasticFreeSurfaceFaceIndicator, + "elasticFreeSurfaceFaceIndicator", array1d< localIndex >, 0, NOPLOT, WRITE_AND_READ, "Free surface indicator, 1 if a face is on free surface 0 otherwise." ); -DECLARE_FIELD( FreeSurfaceNodeIndicator, - "freeSurfaceNodeIndicator", +DECLARE_FIELD( ElasticFreeSurfaceNodeIndicator, + "elasticFreeSurfaceNodeIndicator", array1d< localIndex >, 0, NOPLOT, diff --git a/src/coreComponents/physicsSolvers/wavePropagation/ElasticWaveEquationSEMKernel.hpp b/src/coreComponents/physicsSolvers/wavePropagation/ElasticWaveEquationSEMKernel.hpp index ebdd56542f4..00970fa61fd 100644 --- a/src/coreComponents/physicsSolvers/wavePropagation/ElasticWaveEquationSEMKernel.hpp +++ b/src/coreComponents/physicsSolvers/wavePropagation/ElasticWaveEquationSEMKernel.hpp @@ -90,12 +90,11 @@ struct PrecomputeSourceAndReceiverKernel R1Tensor const sourceForce, R2SymTensor const sourceMoment ) { + constexpr localIndex numNodesPerElem = FE_TYPE::numNodes; forAll< EXEC_POLICY >( size, [=] GEOS_HOST_DEVICE ( localIndex const k ) { - constexpr localIndex numNodesPerElem = FE_TYPE::numNodes; - real64 const center[3] = { elemCenter[k][0], elemCenter[k][1], elemCenter[k][2] }; @@ -141,15 +140,15 @@ struct PrecomputeSourceAndReceiverKernel coordsOnRefElem ); sourceIsAccessible[isrc] = 1; - real64 N[FE_TYPE::numNodes]; - real64 gradN[FE_TYPE::numNodes][3]; + real64 N[numNodesPerElem]; + real64 gradN[numNodesPerElem][3]; FE_TYPE::calcN( coordsOnRefElem, N ); FE_TYPE::calcGradN( coordsOnRefElem, xLocal, gradN ); R2SymTensor moment = sourceMoment; for( localIndex q=0; q< numNodesPerElem; ++q ) { real64 inc[3] = { 0, 0, 0 }; - sourceNodeIds[isrc][q] = elemsToNodes[k][q]; + sourceNodeIds[isrc][q] = elemsToNodes( k, q ); inc[0] += sourceForce[0] * N[q]; inc[1] += sourceForce[1] * N[q]; inc[2] += sourceForce[2] * N[q]; @@ -169,7 +168,6 @@ struct PrecomputeSourceAndReceiverKernel } } // end loop over all sources - // Step 2: locate the receivers, and precompute the receiver term /// loop over all the receivers that haven't been found yet @@ -196,16 +194,13 @@ struct PrecomputeSourceAndReceiverKernel elemsToNodes[k], nodeCoords, coordsOnRefElem ); - receiverIsLocal[ircv] = 1; - real64 Ntest[numNodesPerElem]; - FE_TYPE::calcN( coordsOnRefElem, Ntest ); for( localIndex a = 0; a < numNodesPerElem; ++a ) { - receiverNodeIds[ircv][a] = elemsToNodes[k][a]; + receiverNodeIds[ircv][a] = elemsToNodes( k, a ); receiverConstants[ircv][a] = Ntest[a]; } } @@ -333,9 +328,9 @@ struct DampingMatrixKernel for( localIndex q = 0; q < numNodesPerFace; ++q ) { real32 const aux = density[e] * m_finiteElement.computeDampingTerm( q, xLocal ); - real32 const localIncrementx = aux * ( velocityVp[e] * abs( nx ) + velocityVs[e] * sqrt( pow( ny, 2 ) + pow( nz, 2 ) ) ); - real32 const localIncrementy = aux * ( velocityVp[e] * abs( ny ) + velocityVs[e] * sqrt( pow( nx, 2 ) + pow( nz, 2 ) ) ); - real32 const localIncrementz = aux * ( velocityVp[e] * abs( nz ) + velocityVs[e] * sqrt( pow( nx, 2 ) + pow( ny, 2 ) ) ); + real32 const localIncrementx = aux * ( velocityVp[e] * LvArray::math::abs( nx ) + velocityVs[e] * LvArray::math::sqrt( pow( ny, 2 ) + pow( nz, 2 ) ) ); + real32 const localIncrementy = aux * ( velocityVp[e] * LvArray::math::abs( ny ) + velocityVs[e] * LvArray::math::sqrt( pow( nx, 2 ) + pow( nz, 2 ) ) ); + real32 const localIncrementz = aux * ( velocityVp[e] * LvArray::math::abs( nz ) + velocityVs[e] * LvArray::math::sqrt( pow( nx, 2 ) + pow( ny, 2 ) ) ); RAJA::atomicAdd< ATOMIC_POLICY >( &dampingx[facesToNodes( f, q )], localIncrementx ); RAJA::atomicAdd< ATOMIC_POLICY >( &dampingy[facesToNodes( f, q )], localIncrementy ); @@ -425,9 +420,9 @@ class ExplicitElasticSEM : public finiteElement::KernelBase< SUBREGION_TYPE, m_stiffnessVectorx( nodeManager.getField< fields::StiffnessVectorx >() ), m_stiffnessVectory( nodeManager.getField< fields::StiffnessVectory >() ), m_stiffnessVectorz( nodeManager.getField< fields::StiffnessVectorz >() ), - m_density( elementSubRegion.template getField< fields::MediumDensity >() ), - m_velocityVp( elementSubRegion.template getField< fields::MediumVelocityVp >() ), - m_velocityVs( elementSubRegion.template getField< fields::MediumVelocityVs >() ), + m_density( elementSubRegion.template getField< fields::ElasticDensity >() ), + m_velocityVp( elementSubRegion.template getField< fields::ElasticVelocityVp >() ), + m_velocityVs( elementSubRegion.template getField< fields::ElasticVelocityVs >() ), m_dt( dt ) { GEOS_UNUSED_VAR( edgeManager ); @@ -478,8 +473,8 @@ class ExplicitElasticSEM : public finiteElement::KernelBase< SUBREGION_TYPE, stack.xLocal[ a ][ i ] = m_nodeCoords[ nodeIndex ][ i ]; } } - stack.mu = m_density[k] * m_velocityVs[k] * m_velocityVs[k]; - stack.lambda = m_density[k] *m_velocityVp[k] * m_velocityVp[k] - 2.0*stack.mu; + stack.mu = m_density[k] * pow( m_velocityVs[k], 2 ); + stack.lambda = m_density[k] * pow( m_velocityVp[k], 2 ) - 2.0 * stack.mu; } /** @@ -508,13 +503,13 @@ class ExplicitElasticSEM : public finiteElement::KernelBase< SUBREGION_TYPE, real32 const Ryz_ij = val*(stack.lambda*J[p][1]*J[r][2]+stack.mu*J[p][2]*J[r][1]); real32 const Rzy_ij = val*(stack.mu*J[p][1]*J[r][2]+stack.lambda*J[p][2]*J[r][1]); - real32 const localIncrementx = (Rxx_ij * m_ux_n[m_elemsToNodes[k][j]] + Rxy_ij*m_uy_n[m_elemsToNodes[k][j]] + Rxz_ij*m_uz_n[m_elemsToNodes[k][j]]); - real32 const localIncrementy = (Ryx_ij * m_ux_n[m_elemsToNodes[k][j]] + Ryy_ij*m_uy_n[m_elemsToNodes[k][j]] + Ryz_ij*m_uz_n[m_elemsToNodes[k][j]]); - real32 const localIncrementz = (Rzx_ij * m_ux_n[m_elemsToNodes[k][j]] + Rzy_ij*m_uy_n[m_elemsToNodes[k][j]] + Rzz_ij*m_uz_n[m_elemsToNodes[k][j]]); + real32 const localIncrementx = (Rxx_ij * m_ux_n[m_elemsToNodes( k, j )] + Rxy_ij*m_uy_n[m_elemsToNodes( k, j )] + Rxz_ij*m_uz_n[m_elemsToNodes( k, j )]); + real32 const localIncrementy = (Ryx_ij * m_ux_n[m_elemsToNodes( k, j )] + Ryy_ij*m_uy_n[m_elemsToNodes( k, j )] + Ryz_ij*m_uz_n[m_elemsToNodes( k, j )]); + real32 const localIncrementz = (Rzx_ij * m_ux_n[m_elemsToNodes( k, j )] + Rzy_ij*m_uy_n[m_elemsToNodes( k, j )] + Rzz_ij*m_uz_n[m_elemsToNodes( k, j )]); - RAJA::atomicAdd< parallelDeviceAtomic >( &m_stiffnessVectorx[m_elemsToNodes[k][i]], localIncrementx ); - RAJA::atomicAdd< parallelDeviceAtomic >( &m_stiffnessVectory[m_elemsToNodes[k][i]], localIncrementy ); - RAJA::atomicAdd< parallelDeviceAtomic >( &m_stiffnessVectorz[m_elemsToNodes[k][i]], localIncrementz ); + RAJA::atomicAdd< parallelDeviceAtomic >( &m_stiffnessVectorx[m_elemsToNodes( k, i )], localIncrementx ); + RAJA::atomicAdd< parallelDeviceAtomic >( &m_stiffnessVectory[m_elemsToNodes( k, i )], localIncrementy ); + RAJA::atomicAdd< parallelDeviceAtomic >( &m_stiffnessVectorz[m_elemsToNodes( k, i )], localIncrementz ); } ); } @@ -532,13 +527,13 @@ class ExplicitElasticSEM : public finiteElement::KernelBase< SUBREGION_TYPE, /// The array containing the nodal displacement array in z direction. arrayView1d< real32 > const m_uz_n; - /// The array containing the product of the stiffness matrix and the nodal pressure. + /// The array containing the product of the stiffness matrix and the nodal displacement. arrayView1d< real32 > const m_stiffnessVectorx; - /// The array containing the product of the stiffness matrix and the nodal pressure. + /// The array containing the product of the stiffness matrix and the nodal displacement. arrayView1d< real32 > const m_stiffnessVectory; - /// The array containing the product of the stiffness matrix and the nodal pressure. + /// The array containing the product of the stiffness matrix and the nodal displacement. arrayView1d< real32 > const m_stiffnessVectorz; /// The array containing the density of the medium diff --git a/src/coreComponents/physicsSolvers/wavePropagation/WaveSolverBase.cpp b/src/coreComponents/physicsSolvers/wavePropagation/WaveSolverBase.cpp index 239426e6dc9..47ef2849df0 100644 --- a/src/coreComponents/physicsSolvers/wavePropagation/WaveSolverBase.cpp +++ b/src/coreComponents/physicsSolvers/wavePropagation/WaveSolverBase.cpp @@ -201,12 +201,12 @@ void WaveSolverBase::registerDataOnMesh( Group & meshBodies ) arrayView2d< real64 const, nodes::REFERENCE_POSITION_USD > const X = nodeManager.referencePosition().toViewConst(); nodeManager.getField< fields::referencePosition32 >().resizeDimension< 1 >( X.size( 1 ) ); - arrayView2d< wsCoordType, nodes::REFERENCE_POSITION_USD > const X32 = nodeManager.getField< fields::referencePosition32 >(); + arrayView2d< wsCoordType, nodes::REFERENCE_POSITION_USD > const nodeCoords32 = nodeManager.getField< fields::referencePosition32 >(); for( int i = 0; i < X.size( 0 ); i++ ) { for( int j = 0; j < X.size( 1 ); j++ ) { - X32[i][j] = X[i][j]; + nodeCoords32[i][j] = X[i][j]; } } } ); @@ -400,6 +400,24 @@ localIndex WaveSolverBase::getNumNodesPerElem() return numNodesPerElem; } +void WaveSolverBase::computeTargetNodeSet( arrayView2d< localIndex const, cells::NODE_MAP_USD > const & elemsToNodes, + localIndex const subRegionSize, + localIndex const numQuadraturePointsPerElem ) +{ + array1d< localIndex > scratch( subRegionSize * numQuadraturePointsPerElem ); + localIndex i = 0; + for( localIndex e = 0; e < subRegionSize; ++e ) + { + for( localIndex q = 0; q < numQuadraturePointsPerElem; ++q ) + { + scratch[i++] = elemsToNodes( e, q ); + } + } + std::ptrdiff_t const numUniqueValues = LvArray::sortedArrayManipulation::makeSortedUnique( scratch.begin(), scratch.end() ); + + m_solverTargetNodesSet.insert( scratch.begin(), scratch.begin() + numUniqueValues ); +} + void WaveSolverBase::incrementIndexSeismoTrace( real64 const time_n ) { while( (m_dtSeismoTrace * m_indexSeismoTrace) <= (time_n + epsilonLoc) && m_indexSeismoTrace < m_nsamplesSeismoTrace ) diff --git a/src/coreComponents/physicsSolvers/wavePropagation/WaveSolverBase.hpp b/src/coreComponents/physicsSolvers/wavePropagation/WaveSolverBase.hpp index 820f5e8f58c..cc67eba65de 100644 --- a/src/coreComponents/physicsSolvers/wavePropagation/WaveSolverBase.hpp +++ b/src/coreComponents/physicsSolvers/wavePropagation/WaveSolverBase.hpp @@ -124,6 +124,12 @@ class WaveSolverBase : public SolverBase */ void reinit() override final; + SortedArray< localIndex > const & getSolverNodesSet() { return m_solverTargetNodesSet; } + + void computeTargetNodeSet( arrayView2d< localIndex const, cells::NODE_MAP_USD > const & elemsToNodes, + localIndex const subRegionSize, + localIndex const numQuadraturePointsPerElem ); + protected: virtual void postProcessInput() override; @@ -317,6 +323,9 @@ class WaveSolverBase : public SolverBase /// LIFO to store p_dt2 std::unique_ptr< LifoStorage< real32, localIndex > > m_lifo; + /// A set of target nodes IDs that will be handled by the current solver + SortedArray< localIndex > m_solverTargetNodesSet; + struct parametersPML { /// Mininum (x,y,z) coordinates of inner PML boundaries diff --git a/src/coreComponents/physicsSolvers/wavePropagation/WaveSolverBaseFields.hpp b/src/coreComponents/physicsSolvers/wavePropagation/WaveSolverBaseFields.hpp index c55fdb8cc2d..333139c21c1 100644 --- a/src/coreComponents/physicsSolvers/wavePropagation/WaveSolverBaseFields.hpp +++ b/src/coreComponents/physicsSolvers/wavePropagation/WaveSolverBaseFields.hpp @@ -160,8 +160,8 @@ DECLARE_FIELD( ForcingRHS, WRITE_AND_READ, "RHS" ); -DECLARE_FIELD( MassVector, - "massVector", +DECLARE_FIELD( AcousticMassVector, + "acousticMassVector", array1d< real32 >, 0, NOPLOT, @@ -208,32 +208,32 @@ DECLARE_FIELD( DampingVector_qp, WRITE_AND_READ, "Diagonal of the Damping Matrix for p terms in q equation." ); -DECLARE_FIELD( MediumVelocity, - "mediumVelocity", +DECLARE_FIELD( AcousticVelocity, + "acousticVelocity", array1d< real32 >, 0, NOPLOT, WRITE_AND_READ, "Medium velocity of the cell" ); -DECLARE_FIELD( MediumDensity, - "mediumDensity", +DECLARE_FIELD( AcousticDensity, + "acousticDensity", array1d< real32 >, 0, NOPLOT, WRITE_AND_READ, "Medium density of the cell" ); -DECLARE_FIELD( FreeSurfaceFaceIndicator, - "freeSurfaceFaceIndicator", +DECLARE_FIELD( AcousticFreeSurfaceFaceIndicator, + "acousticFreeSurfaceFaceIndicator", array1d< localIndex >, 0, NOPLOT, WRITE_AND_READ, "Free surface indicator, 1 if a face is on free surface 0 otherwise." ); -DECLARE_FIELD( FreeSurfaceNodeIndicator, - "freeSurfaceNodeIndicator", +DECLARE_FIELD( AcousticFreeSurfaceNodeIndicator, + "acousticFreeSurfaceNodeIndicator", array1d< localIndex >, 0, NOPLOT, @@ -272,6 +272,14 @@ DECLARE_FIELD( BottomSurfaceNodeIndicator, WRITE_AND_READ, "Bottom surface indicator, 1 if a face is on the bottom surface 0 otherwise." ); +DECLARE_FIELD( ElasticMassVector, + "elasticMassVector", + array1d< real32 >, + 0, + NOPLOT, + WRITE_AND_READ, + "Diagonal of the Mass Matrix." ); + DECLARE_FIELD( Displacementx_np1, "displacementx_np1", array1d< real32 >, @@ -368,22 +376,46 @@ DECLARE_FIELD( DampingVectorz, WRITE_AND_READ, "Diagonal Damping Matrix in z-direction." ); -DECLARE_FIELD( MediumVelocityVp, - "mediumVelocityVp", +DECLARE_FIELD( ElasticVelocityVp, + "elasticVelocityVp", array1d< real32 >, 0, NOPLOT, WRITE_AND_READ, "P-waves speed in the cell" ); -DECLARE_FIELD( MediumVelocityVs, - "mediumVelocityVs", +DECLARE_FIELD( ElasticVelocityVs, + "elasticVelocityVs", array1d< real32 >, 0, NOPLOT, WRITE_AND_READ, "S-waves speed in the cell" ); +DECLARE_FIELD( ElasticDensity, + "elasticDensity", + array1d< real32 >, + 0, + NOPLOT, + WRITE_AND_READ, + "Medium density of the cell" ); + +DECLARE_FIELD( ElasticFreeSurfaceFaceIndicator, + "elasticFreeSurfaceFaceIndicator", + array1d< localIndex >, + 0, + NOPLOT, + WRITE_AND_READ, + "Free surface indicator, 1 if a face is on free surface 0 otherwise." ); + +DECLARE_FIELD( ElasticFreeSurfaceNodeIndicator, + "elasticFreeSurfaceNodeIndicator", + array1d< localIndex >, + 0, + NOPLOT, + WRITE_AND_READ, + "Free surface indicator, 1 if a node is on free surface 0 otherwise." ); + DECLARE_FIELD( Lambda, "lambda", array1d< real32 >, diff --git a/src/coreComponents/physicsSolvers/wavePropagation/WaveSolverUtils.hpp b/src/coreComponents/physicsSolvers/wavePropagation/WaveSolverUtils.hpp index ca52e3af4c0..794df9fe54e 100644 --- a/src/coreComponents/physicsSolvers/wavePropagation/WaveSolverUtils.hpp +++ b/src/coreComponents/physicsSolvers/wavePropagation/WaveSolverUtils.hpp @@ -80,9 +80,12 @@ struct WaveSolverUtils */ static void initTrace( char const * prefix, string const & name, + bool const outputSeismoTrace, localIndex const nReceivers, arrayView1d< localIndex const > const receiverIsLocal ) { + if( !outputSeismoTrace ) return; + string const outputDir = OutputBase::getOutputDirectory(); RAJA::ReduceSum< ReducePolicy< serialPolicy >, localIndex > count( 0 ); @@ -101,7 +104,7 @@ struct WaveSolverUtils } /** - * @brief Convenient helper for 3D vectors calling 3 times the scalar version. + * @brief Convenient helper for 3D vectors calling 3 times the scalar version with only the sampled variable argument changed. */ static void writeSeismoTraceVector( char const * prefix, string const & name, @@ -140,6 +143,7 @@ struct WaveSolverUtils std::ofstream f( fn, std::ios::app ); if( f ) { + GEOS_LOG_RANK( GEOS_FMT( "Append to seismo trace file {}", fn ) ); for( localIndex iSample = 0; iSample < nsamplesSeismoTrace; ++iSample ) { // index - time - value @@ -171,7 +175,7 @@ struct WaveSolverUtils { real64 const time_np1 = time_n + dt; - real32 const a1 = abs( dt ) < epsilonLoc ? 1.0 : (time_np1 - timeSeismo) / dt; + real32 const a1 = LvArray::math::abs( dt ) < epsilonLoc ? 1.0 : (time_np1 - timeSeismo) / dt; real32 const a2 = 1.0 - a1; localIndex const nReceivers = receiverConstants.size( 0 ); @@ -208,7 +212,7 @@ struct WaveSolverUtils arrayView2d< real32 const > const var_n, arrayView2d< real32 > varAtReceivers ) { - real64 const time_np1 = time_n+dt; + real64 const time_np1 = time_n + dt; real32 const a1 = dt < epsilonLoc ? 1.0 : (time_np1 - timeSeismo) / dt; real32 const a2 = 1.0 - a1; diff --git a/src/coreComponents/python/.gitignore b/src/coreComponents/python/.gitignore deleted file mode 100644 index 800e83a4bb7..00000000000 --- a/src/coreComponents/python/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*.pyc -*.egg-info -build diff --git a/src/coreComponents/python/CMakeLists.txt b/src/coreComponents/python/CMakeLists.txt deleted file mode 100644 index fd387879b74..00000000000 --- a/src/coreComponents/python/CMakeLists.txt +++ /dev/null @@ -1,78 +0,0 @@ - - -if ( Python3_EXECUTABLE ) - # Select the version of python to target - if( ENABLE_PYGEOSX ) - set( PYTHON_POST_EXECUTABLE ${CMAKE_BINARY_DIR}/lib/PYGEOSX/bin/python CACHE PATH "" FORCE ) - - # Check for the virtualenv package - execute_process( - COMMAND ${Python3_EXECUTABLE} -c "import virtualenv" - RESULT_VARIABLE VIRTUALENV_AVAILABLE - ) - - if (NOT ${VIRTUALENV_AVAILABLE} EQUAL 0) - message(FATAL_ERROR "To build the PYGEOSX interface, the \"virtualenv\" package should be installed in the target python environment. Please install it (i.e.: \"${Python3_EXECUTABLE} -m pip install virtualenv\") or use a different python distribution.") - endif() - - else() - set( PYTHON_POST_EXECUTABLE ${Python3_EXECUTABLE} CACHE PATH "" FORCE ) - endif() - - # Build targets - set( GEOSX_PYTHON_TOOLS_BINS - "${CMAKE_BINARY_DIR}/bin/preprocess_xml" - "${CMAKE_BINARY_DIR}/bin/format_xml" - ) - - add_custom_command( OUTPUT ${GEOSX_PYTHON_TOOLS_BINS} - COMMAND bash ${CMAKE_SOURCE_DIR}/../scripts/setupPythonEnvironment.bash -p ${PYTHON_POST_EXECUTABLE} -b ${CMAKE_BINARY_DIR}/bin - WORKING_DIRECTORY ${CMAKE_BINARY_DIR} - ) - - if( ENABLE_PYGEOSX ) - add_custom_target( geosx_python_tools - DEPENDS pygeosx ${GEOSX_PYTHON_TOOLS_BINS} ) - else() - add_custom_target( geosx_python_tools - DEPENDS ${GEOSX_PYTHON_TOOLS_BINS} ) - endif() - - add_custom_target( geosx_python_tools_test - COMMAND ${CMAKE_BINARY_DIR}/python/geosx/bin/test_geosx_xml_tools - COMMAND rm -r ${CMAKE_BINARY_DIR}/python/geosx_xml_tools_tests* - WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/python - DEPENDS geosx_python_tools - ) - - add_custom_target( geosx_format_all_xml_files - COMMAND bash ${CMAKE_SOURCE_DIR}/../scripts/formatXMLFiles.bash -g ${CMAKE_BINARY_DIR}/bin/format_xml ${CMAKE_SOURCE_DIR} ${CMAKE_SOURCE_DIR}/../examples - WORKING_DIRECTORY ${CMAKE_BINARY_DIR} - DEPENDS geosx_xml_tools - ) - -else() - message(WARNING "Building the GEOSX python tools requires Python >= 3.7.") - message(STATUS "If you need these, try setting Python3_ROOT_DIR and/or Python3_EXECUTABLE in your host config.") -endif() - - -# Python formatting -if ( ENABLE_YAPF ) - set( python_module_sources ) - file( GLOB_RECURSE python_module_sources "*.py" ) - - # Note: blt throws an error if sources doesn't include a c-file, so include dummy.cpp - blt_add_code_checks( PREFIX python_modules_yapf_style - SOURCES ${python_module_sources} ${CMAKE_SOURCE_DIR}/coreComponents/dummy.cpp - YAPF_CFG_FILE ${PROJECT_SOURCE_DIR}/yapf.cfg ) - - set( python_script_sources ) - file( GLOB_RECURSE python_script_sources "${CMAKE_SOURCE_DIR}/../scripts/*.py" ) - - blt_add_code_checks( PREFIX python_scripts_yapf_style - SOURCES ${python_script_sources} ${CMAKE_SOURCE_DIR}/coreComponents/dummy.cpp - YAPF_CFG_FILE ${PROJECT_SOURCE_DIR}/yapf.cfg ) -endif() - - diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/__init__.py b/src/coreComponents/python/modules/geosx_mesh_doctor/checks/__init__.py deleted file mode 100644 index b7db25411d0..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Empty diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/check_fractures.py b/src/coreComponents/python/modules/geosx_mesh_doctor/checks/check_fractures.py deleted file mode 100644 index b2c241b1ae1..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/check_fractures.py +++ /dev/null @@ -1,178 +0,0 @@ -from dataclasses import dataclass -import logging - -from typing import ( - Collection, - FrozenSet, - Iterable, - Sequence, - Set, - Tuple, -) - -from tqdm import tqdm -import numpy - -from vtkmodules.vtkCommonDataModel import ( - vtkUnstructuredGrid, - vtkCell, -) -from vtkmodules.vtkCommonCore import ( - vtkPoints, -) -from vtkmodules.vtkIOXML import ( - vtkXMLMultiBlockDataReader, -) -from vtkmodules.util.numpy_support import ( - vtk_to_numpy, -) -from vtk_utils import ( - vtk_iter, -) - - -@dataclass(frozen=True) -class Options: - tolerance: float - matrix_name: str - fracture_name: str - collocated_nodes_field_name: str - - -@dataclass(frozen=True) -class Result: - # First index is the local index of the fracture mesh. - # Second is the local index of the matrix mesh. - # Third is the global index in the matrix mesh. - errors: Sequence[tuple[int, int, int]] - - -def __read_multiblock(vtk_input_file: str, matrix_name: str, fracture_name: str) -> Tuple[vtkUnstructuredGrid, vtkUnstructuredGrid]: - reader = vtkXMLMultiBlockDataReader() - reader.SetFileName(vtk_input_file) - reader.Update() - multi_block = reader.GetOutput() - for b in range(multi_block.GetNumberOfBlocks()): - block_name: str = multi_block.GetMetaData(b).Get(multi_block.NAME()) - if block_name == matrix_name: - matrix: vtkUnstructuredGrid = multi_block.GetBlock(b) - if block_name == fracture_name: - fracture: vtkUnstructuredGrid = multi_block.GetBlock(b) - assert matrix and fracture - return matrix, fracture - - -def format_collocated_nodes(fracture_mesh: vtkUnstructuredGrid) -> Sequence[Iterable[int]]: - """ - Extract the collocated nodes information from the mesh and formats it in a python way. - :param fracture_mesh: The mesh of the fracture (with 2d cells). - :return: An iterable over all the buckets of collocated nodes. - """ - collocated_nodes: numpy.ndarray = vtk_to_numpy(fracture_mesh.GetPointData().GetArray("collocated_nodes")) - if len(collocated_nodes.shape) == 1: - collocated_nodes: numpy.ndarray = collocated_nodes.reshape((collocated_nodes.shape[0], 1)) - generator = (tuple(sorted(bucket[bucket > -1])) for bucket in collocated_nodes) - return tuple(generator) - - -def __check_collocated_nodes_positions(matrix_points: Sequence[Tuple[float, float, float]], - fracture_points: Sequence[Tuple[float, float, float]], - g2l: Sequence[int], - collocated_nodes: Iterable[Iterable[int]]) -> Collection[Tuple[int, Iterable[int], Iterable[Tuple[float, float, float]]]]: - issues = [] - for li, bucket in enumerate(collocated_nodes): - matrix_nodes = (fracture_points[li], ) + tuple(map(lambda gi: matrix_points[g2l[gi]], bucket)) - m = numpy.array(matrix_nodes) - rank: int = numpy.linalg.matrix_rank(m) - if rank > 1: - issues.append((li, bucket, tuple(map(lambda gi: matrix_points[g2l[gi]], bucket)))) - return issues - - -def my_iter(ccc): - car, cdr = ccc[0], ccc[1:] - for i in car: - if cdr: - for j in my_iter(cdr): - yield i, *j - else: - yield (i, ) - - -def __check_neighbors(matrix: vtkUnstructuredGrid, - fracture: vtkUnstructuredGrid, - g2l: Sequence[int], - collocated_nodes: Sequence[Iterable[int]]): - fracture_nodes: Set[int] = set() - for bucket in collocated_nodes: - for gi in bucket: - fracture_nodes.add(g2l[gi]) - # For each face of each cell, - # if all the points of the face are "made" of collocated nodes, - # then this is a fracture face. - fracture_faces: Set[FrozenSet[int]] = set() - for c in range(matrix.GetNumberOfCells()): - cell: vtkCell = matrix.GetCell(c) - for f in range(cell.GetNumberOfFaces()): - face: vtkCell = cell.GetFace(f) - point_ids = frozenset(vtk_iter(face.GetPointIds())) - if point_ids <= fracture_nodes: - fracture_faces.add(point_ids) - # Finding the cells - for c in tqdm(range(fracture.GetNumberOfCells()), desc="Finding neighbor cell pairs"): - cell: vtkCell = fracture.GetCell(c) - cns: Set[FrozenSet[int]] = set() # subset of collocated_nodes - point_ids = frozenset(vtk_iter(cell.GetPointIds())) - for point_id in point_ids: - bucket = collocated_nodes[point_id] - local_bucket = frozenset(map(g2l.__getitem__, bucket)) - cns.add(local_bucket) - found = 0 - tmp = tuple(map(tuple, cns)) - for node_combinations in my_iter(tmp): - f = frozenset(node_combinations) - if f in fracture_faces: - found += 1 - if found != 2: - logging.warning(f"Something went wrong since we should have found 2 fractures faces (we found {found}) for collocated nodes {cns}.") - - -def __check(vtk_input_file: str, options: Options) -> Result: - matrix, fracture = __read_multiblock(vtk_input_file, options.matrix_name, options.fracture_name) - matrix_points: vtkPoints = matrix.GetPoints() - fracture_points: vtkPoints = fracture.GetPoints() - - collocated_nodes: Sequence[Iterable[int]] = format_collocated_nodes(fracture) - assert matrix.GetPointData().GetGlobalIds() and matrix.GetCellData().GetGlobalIds() and \ - fracture.GetPointData().GetGlobalIds() and fracture.GetCellData().GetGlobalIds() - - point_ids = vtk_to_numpy(matrix.GetPointData().GetGlobalIds()) - g2l = numpy.ones(len(point_ids), dtype=int) * -1 - for loc, glo in enumerate(point_ids): - g2l[glo] = loc - g2l.flags.writeable = False - - issues = __check_collocated_nodes_positions(vtk_to_numpy(matrix.GetPoints().GetData()), - vtk_to_numpy(fracture.GetPoints().GetData()), - g2l, - collocated_nodes) - assert len(issues) == 0 - - __check_neighbors(matrix, fracture, g2l, collocated_nodes) - - errors = [] - for i, duplicates in enumerate(collocated_nodes): - for duplicate in filter(lambda i: i > -1, duplicates): - p0 = matrix_points.GetPoint(g2l[duplicate]) - p1 = fracture_points.GetPoint(i) - if numpy.linalg.norm(numpy.array(p1) - numpy.array(p0)) > options.tolerance: - errors.append((i, g2l[duplicate], duplicate)) - return Result(errors=errors) - - -def check(vtk_input_file: str, options: Options) -> Result: - try: - return __check(vtk_input_file, options) - except BaseException as e: - logging.error(e) - return Result(errors=()) diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/collocated_nodes.py b/src/coreComponents/python/modules/geosx_mesh_doctor/checks/collocated_nodes.py deleted file mode 100644 index 7a5273ec448..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/collocated_nodes.py +++ /dev/null @@ -1,78 +0,0 @@ -from collections import defaultdict -from dataclasses import dataclass -import logging -from typing import ( - Collection, - Iterable, -) -import numpy - -from vtkmodules.vtkCommonCore import ( - reference, - vtkPoints, -) -from vtkmodules.vtkCommonDataModel import ( - vtkIncrementalOctreePointLocator, ) - -from . import vtk_utils - - -@dataclass(frozen=True) -class Options: - tolerance: float - - -@dataclass(frozen=True) -class Result: - nodes_buckets: Iterable[Iterable[int]] # Each bucket contains the duplicated node indices. - wrong_support_elements: Collection[int] # Element indices with support node indices appearing more than once. - - -def __check(mesh, options: Options) -> Result: - points = mesh.GetPoints() - - locator = vtkIncrementalOctreePointLocator() - locator.SetTolerance(options.tolerance) - output = vtkPoints() - locator.InitPointInsertion(output, points.GetBounds()) - - # original ids to/from filtered ids. - filtered_to_original = numpy.ones(points.GetNumberOfPoints(), dtype=int) * -1 - - rejected_points = defaultdict(list) - point_id = reference(0) - for i in range(points.GetNumberOfPoints()): - is_inserted = locator.InsertUniquePoint(points.GetPoint(i), point_id) - if not is_inserted: - # If it's not inserted, `point_id` contains the node that was already at that location. - # But in that case, `point_id` is the new numbering in the destination points array. - # It's more useful for the user to get the old index in the original mesh, so he can look for it in his data. - logging.debug( - f"Point {i} at {points.GetPoint(i)} has been rejected, point {filtered_to_original[point_id.get()]} is already inserted." - ) - rejected_points[point_id.get()].append(i) - else: - # If it's inserted, `point_id` contains the new index in the destination array. - # We store this information to be able to connect the source and destination arrays. - # original_to_filtered[i] = point_id.get() - filtered_to_original[point_id.get()] = i - - tmp = [] - for n, ns in rejected_points.items(): - tmp.append((n, *ns)) - - # Checking that the support node indices appear only once per element. - wrong_support_elements = [] - for c in range(mesh.GetNumberOfCells()): - cell = mesh.GetCell(c) - num_points_per_cell = cell.GetNumberOfPoints() - if len({cell.GetPointId(i) for i in range(num_points_per_cell)}) != num_points_per_cell: - wrong_support_elements.append(c) - - return Result(nodes_buckets=tmp, - wrong_support_elements=wrong_support_elements) - - -def check(vtk_input_file: str, options: Options) -> Result: - mesh = vtk_utils.read_mesh(vtk_input_file) - return __check(mesh, options) diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/element_volumes.py b/src/coreComponents/python/modules/geosx_mesh_doctor/checks/element_volumes.py deleted file mode 100644 index 4dfd917247a..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/element_volumes.py +++ /dev/null @@ -1,82 +0,0 @@ -import logging -from dataclasses import dataclass -from typing import List, Tuple -import uuid - -from vtkmodules.vtkCommonDataModel import ( - VTK_HEXAHEDRON, - VTK_PYRAMID, - VTK_TETRA, - VTK_WEDGE, -) -from vtkmodules.vtkFiltersVerdict import ( - vtkCellSizeFilter, - vtkMeshQuality, -) -from vtkmodules.util.numpy_support import ( - vtk_to_numpy, -) - - -from . import vtk_utils - - -@dataclass(frozen=True) -class Options: - min_volume: float - - -@dataclass(frozen=True) -class Result: - element_volumes: List[Tuple[int, float]] - - -def __check(mesh, options: Options) -> Result: - cs = vtkCellSizeFilter() - - cs.ComputeAreaOff() - cs.ComputeLengthOff() - cs.ComputeSumOff() - cs.ComputeVertexCountOff() - cs.ComputeVolumeOn() - volume_array_name = "__MESH_DOCTOR_VOLUME-" + str(uuid.uuid4()) # Making the name unique - cs.SetVolumeArrayName(volume_array_name) - - cs.SetInputData(mesh) - cs.Update() - - mq = vtkMeshQuality() - SUPPORTED_TYPES = [VTK_HEXAHEDRON, VTK_TETRA] - - mq.SetTetQualityMeasureToVolume() - mq.SetHexQualityMeasureToVolume() - if hasattr(mq, "SetPyramidQualityMeasureToVolume"): # This feature is quite recent - mq.SetPyramidQualityMeasureToVolume() - SUPPORTED_TYPES.append(VTK_PYRAMID) - mq.SetWedgeQualityMeasureToVolume() - SUPPORTED_TYPES.append(VTK_WEDGE) - else: - logging.warning("Your \"pyvtk\" version does not bring pyramid nor wedge support with vtkMeshQuality. Using the fallback solution.") - - mq.SetInputData(mesh) - mq.Update() - - volume = cs.GetOutput().GetCellData().GetArray(volume_array_name) - quality = mq.GetOutput().GetCellData().GetArray("Quality") # Name is imposed by vtk. - - assert volume is not None - assert quality is not None - volume = vtk_to_numpy(volume) - quality = vtk_to_numpy(quality) - small_volumes: List[Tuple[int, float]] = [] - for i, pack in enumerate(zip(volume, quality)): - v, q = pack - vol = q if mesh.GetCellType(i) in SUPPORTED_TYPES else v - if vol < options.min_volume: - small_volumes.append((i, vol)) - return Result(element_volumes=small_volumes) - - -def check(vtk_input_file: str, options: Options) -> Result: - mesh = vtk_utils.read_mesh(vtk_input_file) - return __check(mesh, options) diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/fix_elements_orderings.py b/src/coreComponents/python/modules/geosx_mesh_doctor/checks/fix_elements_orderings.py deleted file mode 100644 index 61dd034d4cd..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/fix_elements_orderings.py +++ /dev/null @@ -1,66 +0,0 @@ -from dataclasses import dataclass -import logging -from typing import ( - List, - Dict, - Set, - FrozenSet, -) - -from vtkmodules.vtkCommonCore import ( - vtkIdList, -) - -from . import vtk_utils -from .vtk_utils import ( - to_vtk_id_list, - VtkOutput, -) - - -@dataclass(frozen=True) -class Options: - vtk_output: VtkOutput - cell_type_to_ordering: Dict[int, List[int]] - - -@dataclass(frozen=True) -class Result: - output: str - unchanged_cell_types: FrozenSet[int] - - -def __check(mesh, options: Options) -> Result: - # The vtk cell type is an int and will be the key of the following mapping, - # that will point to the relevant permutation. - cell_type_to_ordering: Dict[int, List[int]] = options.cell_type_to_ordering - unchanged_cell_types: Set[int] = set() # For logging purpose - - # Preparing the output mesh by first keeping the same instance type. - output_mesh = mesh.NewInstance() - output_mesh.CopyStructure(mesh) - output_mesh.CopyAttributes(mesh) - - # `output_mesh` now contains a full copy of the input mesh. - # We'll now modify the support nodes orderings in place if needed. - cells = output_mesh.GetCells() - for cell_idx in range(output_mesh.GetNumberOfCells()): - cell_type: int = output_mesh.GetCell(cell_idx).GetCellType() - new_ordering = cell_type_to_ordering.get(cell_type) - if new_ordering: - support_point_ids = vtkIdList() - cells.GetCellAtId(cell_idx, support_point_ids) - new_support_point_ids = [] - for i, v in enumerate(new_ordering): - new_support_point_ids.append(support_point_ids.GetId(new_ordering[i])) - cells.ReplaceCellAtId(cell_idx, to_vtk_id_list(new_support_point_ids)) - else: - unchanged_cell_types.add(cell_type) - is_written_error = vtk_utils.write_mesh(output_mesh, options.vtk_output) - return Result(output=options.vtk_output.output if not is_written_error else "", - unchanged_cell_types=frozenset(unchanged_cell_types)) - - -def check(vtk_input_file: str, options: Options) -> Result: - mesh = vtk_utils.read_mesh(vtk_input_file) - return __check(mesh, options) diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/generate_cube.py b/src/coreComponents/python/modules/geosx_mesh_doctor/checks/generate_cube.py deleted file mode 100644 index f8625f50453..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/generate_cube.py +++ /dev/null @@ -1,160 +0,0 @@ -from dataclasses import dataclass -import logging -from typing import Sequence, Iterable - -import numpy - -from vtkmodules.vtkCommonCore import ( - vtkPoints, -) -from vtkmodules.vtkCommonDataModel import ( - VTK_HEXAHEDRON, - vtkCellArray, - vtkHexahedron, - vtkRectilinearGrid, - vtkUnstructuredGrid, -) -from vtkmodules.util.numpy_support import ( - numpy_to_vtk, -) - -from . import vtk_utils -from .vtk_utils import ( - VtkOutput, -) - -from .generate_global_ids import __build_global_ids - - -@dataclass(frozen=True) -class Result: - info: str - - -@dataclass(frozen=True) -class FieldInfo: - name: str - dimension: int - support: str - - -@dataclass(frozen=True) -class Options: - vtk_output: VtkOutput - generate_cells_global_ids: bool - generate_points_global_ids: bool - xs: Sequence[float] - ys: Sequence[float] - zs: Sequence[float] - nxs: Sequence[int] - nys: Sequence[int] - nzs: Sequence[int] - fields: Iterable[FieldInfo] - - -@dataclass(frozen=True) -class XYZ: - x: numpy.ndarray - y: numpy.ndarray - z: numpy.ndarray - - -def build_rectilinear_blocks_mesh(xyzs: Iterable[XYZ]) -> vtkUnstructuredGrid: - """ - Builds an unstructured vtk grid from the `xyzs` blocks. Kind of InternalMeshGenerator. - :param xyzs: The blocks. - :return: The unstructured mesh, even if it's topologically structured. - """ - rgs = [] - for xyz in xyzs: - rg = vtkRectilinearGrid() - rg.SetDimensions(len(xyz.x), len(xyz.y), len(xyz.z)) - rg.SetXCoordinates(numpy_to_vtk(xyz.x)) - rg.SetYCoordinates(numpy_to_vtk(xyz.y)) - rg.SetZCoordinates(numpy_to_vtk(xyz.z)) - rgs.append(rg) - - num_points = sum(map(lambda r: r.GetNumberOfPoints(), rgs)) - num_cells = sum(map(lambda r: r.GetNumberOfCells(), rgs)) - - points = vtkPoints() - points.Allocate(num_points) - for rg in rgs: - for i in range(rg.GetNumberOfPoints()): - points.InsertNextPoint(rg.GetPoint(i)) - - cell_types = [VTK_HEXAHEDRON] * num_cells - cells = vtkCellArray() - cells.AllocateExact(num_cells, num_cells * 8) - - m = (0, 1, 3, 2, 4, 5, 7, 6) # VTK_VOXEL and VTK_HEXAHEDRON do not share the same ordering. - offset = 0 - for rg in rgs: - for i in range(rg.GetNumberOfCells()): - c = rg.GetCell(i) - new_cell = vtkHexahedron() - for j in range(8): - new_cell.GetPointIds().SetId(j, offset + c.GetPointId(m[j])) - cells.InsertNextCell(new_cell) - offset += rg.GetNumberOfPoints() - - mesh = vtkUnstructuredGrid() - mesh.SetPoints(points) - mesh.SetCells(cell_types, cells) - - return mesh - - -def __add_fields(mesh: vtkUnstructuredGrid, fields: Iterable[FieldInfo]) -> vtkUnstructuredGrid: - for field_info in fields: - if field_info.support == "CELLS": - data = mesh.GetCellData() - n = mesh.GetNumberOfCells() - elif field_info.support == "POINTS": - data = mesh.GetPointData() - n = mesh.GetNumberOfPoints() - array = numpy.ones((n, field_info.dimension), dtype=float) - vtk_array = numpy_to_vtk(array) - vtk_array.SetName(field_info.name) - data.AddArray(vtk_array) - return mesh - - -def __build(options: Options): - def build_coordinates(positions, num_elements): - result = [] - it = zip(zip(positions, positions[1:]), num_elements) - try: - coords, n = next(it) - while True: - start, stop = coords - end_point = False - tmp = numpy.linspace(start=start, stop=stop, num=n+end_point, endpoint=end_point) - coords, n = next(it) - result.append(tmp) - except StopIteration: - end_point = True - tmp = numpy.linspace(start=start, stop=stop, num=n+end_point, endpoint=end_point) - result.append(tmp) - return numpy.concatenate(result) - x = build_coordinates(options.xs, options.nxs) - y = build_coordinates(options.ys, options.nys) - z = build_coordinates(options.zs, options.nzs) - cube = build_rectilinear_blocks_mesh((XYZ(x, y, z),)) - cube = __add_fields(cube, options.fields) - __build_global_ids(cube, options.generate_cells_global_ids, options.generate_points_global_ids) - return cube - - -def __check(options: Options) -> Result: - output_mesh = __build(options) - vtk_utils.write_mesh(output_mesh, options.vtk_output) - return Result(info=f"Mesh was written to {options.vtk_output.output}") - - -def check(vtk_input_file: str, options: Options) -> Result: - try: - return __check(options) - except BaseException as e: - logging.error(e) - return Result(info="Something went wrong.") diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/generate_fractures.py b/src/coreComponents/python/modules/geosx_mesh_doctor/checks/generate_fractures.py deleted file mode 100644 index 22fbadcb956..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/generate_fractures.py +++ /dev/null @@ -1,482 +0,0 @@ -from collections import defaultdict -from dataclasses import dataclass -import logging -from typing import ( - Collection, - Dict, - FrozenSet, - Iterable, - List, - Mapping, - Optional, - Set, - Sequence, - Tuple, -) -from enum import Enum - -from tqdm import tqdm -import networkx -import numpy - -from vtkmodules.vtkCommonCore import ( - vtkIdList, - vtkPoints, -) -from vtkmodules.vtkCommonDataModel import ( - vtkCell, - vtkCellArray, - vtkPolygon, - vtkUnstructuredGrid, - VTK_POLYGON, - VTK_POLYHEDRON, -) -from vtkmodules.util.numpy_support import ( - vtk_to_numpy, - numpy_to_vtk, -) -from vtkmodules.util.vtkConstants import VTK_ID_TYPE - -from . import vtk_utils -from .vtk_utils import ( - vtk_iter, - VtkOutput, - to_vtk_id_list, -) -from .vtk_polyhedron import ( - FaceStream, -) - - -class FracturePolicy(Enum): - FIELD = 0 - INTERNAL_SURFACES = 1 - - -@dataclass(frozen=True) -class Options: - policy: FracturePolicy - field: str - field_values: FrozenSet[int] - vtk_output: VtkOutput - vtk_fracture_output: VtkOutput - - -@dataclass(frozen=True) -class Result: - info: str - - -@dataclass(frozen=True) -class FractureInfo: - node_to_cells: Mapping[int, Iterable[int]] # For each _fracture_ node, gives all the cells that use this node. - face_nodes: Iterable[Collection[int]] # For each fracture face, returns the nodes of this face - - -def build_node_to_cells(mesh: vtkUnstructuredGrid, - face_nodes: Iterable[Iterable[int]]) -> Mapping[int, Iterable[int]]: - node_to_cells: Dict[int, Set[int]] = defaultdict(set) # TODO normally, just a list and not a set should be enough. - - fracture_nodes: Set[int] = set() - for fns in face_nodes: - for n in fns: - fracture_nodes.add(n) - - for cell_id in tqdm(range(mesh.GetNumberOfCells()), desc="Computing the node to cells mapping"): - cell_points: FrozenSet[int] = frozenset(vtk_iter(mesh.GetCell(cell_id).GetPointIds())) - intersection: Iterable[int] = cell_points & fracture_nodes - for node in intersection: - node_to_cells[node].add(cell_id) - - return node_to_cells - - -def __build_fracture_info_from_fields(mesh: vtkUnstructuredGrid, - f: Sequence[int], - field_values: FrozenSet[int]) -> FractureInfo: - cells_to_faces: Dict[int, List[int]] = defaultdict(list) - # For each face of each cell, we search for the unique neighbor cell (if it exists). - # Then, if the 2 values of the two cells match the field requirements, - # we store the cell and its local face index: this is indeed part of the surface that we'll need to be split. - cell: vtkCell - for cell_id in tqdm(range(mesh.GetNumberOfCells()), desc="Computing the cell to faces mapping"): - if f[cell_id] not in field_values: # No need to consider a cell if its field value is not in the target range. - continue - cell = mesh.GetCell(cell_id) - for i in range(cell.GetNumberOfFaces()): - neighbor_cell_ids = vtkIdList() - mesh.GetCellNeighbors(cell_id, cell.GetFace(i).GetPointIds(), neighbor_cell_ids) - assert neighbor_cell_ids.GetNumberOfIds() < 2 - for j in range(neighbor_cell_ids.GetNumberOfIds()): # It's 0 or 1... - neighbor_cell_id = neighbor_cell_ids.GetId(j) - if f[neighbor_cell_id] != f[cell_id] and f[neighbor_cell_id] in field_values: - cells_to_faces[cell_id].append(i) # TODO add this (cell_is, face_id) information to the fracture_info? - face_nodes: List[Collection[int]] = list() - face_nodes_hashes: Set[FrozenSet[int]] = set() # A temporary not to add multiple times the same face. - for cell_id, faces_ids in tqdm(cells_to_faces.items(), desc="Extracting the faces of the fractures"): - cell = mesh.GetCell(cell_id) - for face_id in faces_ids: - fn: Collection[int] = tuple(vtk_iter(cell.GetFace(face_id).GetPointIds())) - fnh = frozenset(fn) - if fnh not in face_nodes_hashes: - face_nodes_hashes.add(fnh) - face_nodes.append(fn) - node_to_cells: Mapping[int, Iterable[int]] = build_node_to_cells(mesh, face_nodes) - - return FractureInfo(node_to_cells=node_to_cells, face_nodes=face_nodes) - - -def __build_fracture_info_from_internal_surfaces(mesh: vtkUnstructuredGrid, - f: Sequence[int], - field_values: FrozenSet[int]) -> FractureInfo: - node_to_cells: Dict[int, List[int]] = {} - face_nodes: List[Collection[int]] = [] - for cell_id in tqdm(range(mesh.GetNumberOfCells()), desc="Computing the face to nodes mapping"): - cell = mesh.GetCell(cell_id) - if cell.GetCellDimension() == 2: - if f[cell_id] in field_values: - nodes = [] - for v in range(cell.GetNumberOfPoints()): - point_id: int = cell.GetPointId(v) - node_to_cells[point_id] = [] - nodes.append(point_id) - face_nodes.append(tuple(nodes)) - - for cell_id in tqdm(range(mesh.GetNumberOfCells()), desc="Computing the node to cells mapping"): - cell = mesh.GetCell(cell_id) - if cell.GetCellDimension() == 3: - for v in range(cell.GetNumberOfPoints()): - if cell.GetPointId(v) in node_to_cells: - node_to_cells[cell.GetPointId(v)].append(cell_id) - - return FractureInfo(node_to_cells=node_to_cells, face_nodes=face_nodes) - - -def build_fracture_info(mesh: vtkUnstructuredGrid, - options: Options) -> FractureInfo: - field = options.field - field_values = options.field_values - cell_data = mesh.GetCellData() - if cell_data.HasArray(field): - f = vtk_to_numpy(cell_data.GetArray(field)) - else: - raise ValueError(f"Cell field {field} does not exist in mesh, nothing done") - - if options.policy == FracturePolicy.FIELD: - return __build_fracture_info_from_fields(mesh, f, field_values) - elif options.policy == FracturePolicy.INTERNAL_SURFACES: - return __build_fracture_info_from_internal_surfaces(mesh, f, field_values) - - -def build_cell_to_cell_graph(mesh: vtkUnstructuredGrid, - fracture: FractureInfo) -> networkx.Graph: - """ - Connects all the cells that touch the fracture by at least one node. - Two cells are connected when they share at least a face which is not a face of the fracture. - :param mesh: The input mesh. - :param fracture: The fracture info. - :return: The graph: each node of this graph is the index of the cell. - There's an edge between two nodes of the graph if the cells share a face. - """ - # Faces are identified by their nodes. But the order of those nodes may vary while referring to the same face. - # Therefore we compute some kinds of hashes of those face to easily detect if a face is part of the fracture. - tmp: List[FrozenSet[int]] = [] - for fn in fracture.face_nodes: - tmp.append(frozenset(fn)) - face_hashes: FrozenSet[FrozenSet[int]] = frozenset(tmp) - - # We extract the list of the cells that touch the fracture by at least one node. - cells: Set[int] = set() - for cell_ids in fracture.node_to_cells.values(): - for cell_id in cell_ids: - cells.add(cell_id) - - # Using the last precomputed containers, we're now building the dict which connects - # every face (hash) of the fracture to the cells that touch the face... - face_to_cells: Dict[FrozenSet[int], List[int]] = defaultdict(list) - for cell_id in tqdm(cells, desc="Computing the cell to cell graph"): - cell: vtkCell = mesh.GetCell(cell_id) - for face_id in range(cell.GetNumberOfFaces()): - face_hash: FrozenSet[int] = frozenset(vtk_iter(cell.GetFace(face_id).GetPointIds())) - if face_hash not in face_hashes: - face_to_cells[face_hash].append(cell_id) - - # ... eventually, when a face touches two cells, this means that those two cells share the same face - # and should be connected in the final cell to cell graph. - cell_to_cell = networkx.Graph() - cell_to_cell.add_nodes_from(cells) - cell_to_cell.add_edges_from(filter(lambda cs: len(cs) == 2, face_to_cells.values())) - - return cell_to_cell - - -def __identify_split(num_points: int, - cell_to_cell: networkx.Graph, - node_to_cells: Mapping[int, Iterable[int]]) -> Mapping[int, Mapping[int, int]]: - """ - For each cell, compute the node indices replacements. - :param num_points: Number of points in the whole mesh (not the fracture). - :param cell_to_cell: The cell to cell graph (connection through common faces). - :param node_to_cells: Maps the nodes of the fracture to the cells relying on this node. - :return: For each cell (first key), returns a mapping from the current index - and the new index that should replace the current index. - Note that the current index and the new index can be identical: no replacement should be done then. - """ - - class NewIndex: - """ - Returns the next available index. - Note that the first time an index is met, the index itself is returned: - we do not want to change an index if we do not have to. - """ - def __init__(self, num_nodes: int): - self.__current_last_index = num_nodes - 1 - self.__seen: Set[int] = set() - - def __call__(self, index: int) -> int: - if index in self.__seen: - self.__current_last_index += 1 - return self.__current_last_index - else: - self.__seen.add(index) - return index - - build_new_index = NewIndex(num_points) - result: Dict[int, Dict[int, int]] = defaultdict(dict) - for node, cells in tqdm(sorted(node_to_cells.items()), # Iteration over `sorted` nodes to have a predictable result for tests. - desc="Identifying the node splits"): - for connected_cells in networkx.connected_components(cell_to_cell.subgraph(cells)): - # Each group of connect cells need around `node` must consider the same `node`. - # Separate groups must have different (duplicated) nodes. - new_index: int = build_new_index(node) - for cell in connected_cells: - result[cell][node] = new_index - return result - - -def __copy_fields(old_mesh: vtkUnstructuredGrid, - new_mesh: vtkUnstructuredGrid, - collocated_nodes: Sequence[int]) -> None: - """ - Copies the fields from the old mesh to the new one. - Point data will be duplicated for collocated nodes. - :param old_mesh: The mesh before the split. - :param new_mesh: The mesh after the split. Will receive the fields in place. - :param collocated_nodes: New index to old index. - :return: None - """ - # Copying the cell data. - # The cells are the same, just their nodes support have changed. - input_cell_data = old_mesh.GetCellData() - for i in range(input_cell_data.GetNumberOfArrays()): - input_array = input_cell_data.GetArray(i) - logging.info(f"Copying cell field \"{input_array.GetName()}\".") - new_mesh.GetCellData().AddArray(input_array) - - # Copying field data. This data is a priori not related to geometry. - input_field_data = old_mesh.GetFieldData() - for i in range(input_field_data.GetNumberOfArrays()): - input_array = input_field_data.GetArray(i) - logging.info(f"Copying field data \"{input_array.GetName()}\".") - new_mesh.GetFieldData().AddArray(input_array) - - # Copying the point data. - input_point_data = old_mesh.GetPointData() - for i in range(input_point_data.GetNumberOfArrays()): - input_array = input_point_data.GetArray(i) - logging.info(f"Copying point field \"{input_array.GetName()}\"") - tmp = input_array.NewInstance() - tmp.SetName(input_array.GetName()) - tmp.SetNumberOfComponents(input_array.GetNumberOfComponents()) - tmp.SetNumberOfTuples(new_mesh.GetNumberOfPoints()) - for p in range(tmp.GetNumberOfTuples()): - tmp.SetTuple(p, input_array.GetTuple(collocated_nodes[p])) - new_mesh.GetPointData().AddArray(tmp) - - -def __perform_split(old_mesh: vtkUnstructuredGrid, - cell_to_node_mapping: Mapping[int, Mapping[int, int]]) -> vtkUnstructuredGrid: - """ - Split the main 3d mesh based on the node duplication information contained in @p cell_to_node_mapping - :param old_mesh: The main 3d mesh. - :param cell_to_node_mapping: For each cell, gives the nodes that must be duplicated and their new index. - :return: The main 3d mesh split at the fracture location. - """ - added_points: Set[int] = set() - for node_mapping in cell_to_node_mapping.values(): - for i, o in node_mapping.items(): - if i != o: - added_points.add(o) - num_new_points: int = old_mesh.GetNumberOfPoints() + len(added_points) - - # Creating the new points for the new mesh. - old_points: vtkPoints = old_mesh.GetPoints() - new_points = vtkPoints() - new_points.SetNumberOfPoints(num_new_points) - collocated_nodes = numpy.ones(num_new_points, dtype=int) * -1 - # Copying old points into the new container. - for p in range(old_points.GetNumberOfPoints()): - new_points.SetPoint(p, old_points.GetPoint(p)) - collocated_nodes[p] = p - # Creating the new collocated/duplicated points based on the old points positions. - for node_mapping in cell_to_node_mapping.values(): - for i, o in node_mapping.items(): - if i != o: - new_points.SetPoint(o, old_points.GetPoint(i)) - collocated_nodes[o] = i - collocated_nodes.flags.writeable = False - - # We are creating a new mesh. - # The cells will be the same, except that their nodes may be duplicated or renumbered nodes. - # In vtk, the polyhedron and the standard cells are managed differently. - # Also, it looks like the internal representation is being modified - # (see https://gitlab.kitware.com/vtk/vtk/-/merge_requests/9812) - # so we'll try nothing fancy for the moment. - # Maybe in the future using a `DeepCopy` of the vtkCellArray can be considered? - # The cell point ids could be modified in place then. - new_mesh = old_mesh.NewInstance() - new_mesh.SetPoints(new_points) - new_mesh.Allocate(old_mesh.GetNumberOfCells()) - - for c in tqdm(range(old_mesh.GetNumberOfCells()), desc="Performing the mesh split"): - node_mapping: Mapping[int, int] = cell_to_node_mapping.get(c, {}) - cell: vtkCell = old_mesh.GetCell(c) - cell_type: int = cell.GetCellType() - # For polyhedron, we'll manipulate the face stream directly. - if cell_type == VTK_POLYHEDRON: - face_stream = vtkIdList() - old_mesh.GetFaceStream(c, face_stream) - new_face_nodes: List[List[int]] = [] - for face_nodes in FaceStream.build_from_vtk_id_list(face_stream).face_nodes: - new_point_ids = [] - for current_point_id in face_nodes: - new_point_id: int = node_mapping.get(current_point_id, current_point_id) - new_point_ids.append(new_point_id) - new_face_nodes.append(new_point_ids) - new_mesh.InsertNextCell(cell_type, to_vtk_id_list(FaceStream(new_face_nodes).dump())) - else: - # For the standard cells, we extract the point ids of the cell directly. - # Then the values will be (potentially) overwritten in place, before being sent back into the cell. - cell_point_ids: vtkIdList = cell.GetPointIds() - for i in range(cell_point_ids.GetNumberOfIds()): - current_point_id: int = cell_point_ids.GetId(i) - new_point_id: int = node_mapping.get(current_point_id, current_point_id) - cell_point_ids.SetId(i, new_point_id) - new_mesh.InsertNextCell(cell_type, cell_point_ids) - - __copy_fields(old_mesh, new_mesh, collocated_nodes) - - return new_mesh - - -def __generate_fracture_mesh(mesh_points: vtkPoints, - fracture_info: FractureInfo, - cell_to_node_mapping: Mapping[int, Mapping[int, int]]) -> vtkUnstructuredGrid: - """ - Generates the mesh of the fracture. - :param mesh_points: The points of the main 3d mesh. - :param fracture_info: The fracture description. - :param cell_to_node_mapping: For each cell, gives the nodes that must be duplicated and their new index. - :return: The fracture mesh. - """ - logging.info("Generating the meshes") - - is_node_duplicated = numpy.zeros(mesh_points.GetNumberOfPoints(), dtype=bool) # defaults to False - for node_mapping in cell_to_node_mapping.values(): - for i, o in node_mapping.items(): - if not is_node_duplicated[i]: - is_node_duplicated[i] = i != o - - # Some elements can have all their nodes not duplicated. - # In this case, it's mandatory not get rid of this element - # because the neighboring 3d elements won't follow. - face_nodes: List[Collection[int]] = [] - discarded_face_nodes: Set[Iterable[int]] = set() - for ns in fracture_info.face_nodes: - if any(map(is_node_duplicated.__getitem__, ns)): - face_nodes.append(ns) - else: - discarded_face_nodes.add(ns) - - if discarded_face_nodes: - # tmp = [] - # for dfns in discarded_face_nodes: - # tmp.append(", ".join(map(str, dfns))) - msg: str = "(" + '), ('.join(map(lambda dfns: ", ".join(map(str, dfns)), discarded_face_nodes)) + ")" - # logging.info(f"The {len(tmp)} faces made of nodes ({'), ('.join(tmp)}) were/was discarded from the fracture mesh because none of their/its nodes were duplicated.") - # print(f"The {len(tmp)} faces made of nodes ({'), ('.join(tmp)}) were/was discarded from the fracture mesh because none of their/its nodes were duplicated.") - print(f"The faces made of nodes [{msg}] were/was discarded from the fracture mesh because none of their/its nodes were duplicated.") - - fracture_nodes_tmp = numpy.ones(mesh_points.GetNumberOfPoints(), dtype=int) * -1 - for ns in face_nodes: - for n in ns: - fracture_nodes_tmp[n] = n - fracture_nodes: Collection[int] = tuple(filter(lambda n: n > -1, fracture_nodes_tmp)) - num_points: int = len(fracture_nodes) - points = vtkPoints() - points.SetNumberOfPoints(num_points) - node_3d_to_node_2d: Dict[int, int] = {} # Building the node mapping, from 3d mesh nodes to 2d fracture nodes. - for i, n in enumerate(fracture_nodes): - coords: Tuple[float, float, float] = mesh_points.GetPoint(n) - points.SetPoint(i, coords) - node_3d_to_node_2d[n] = i - - polygons = vtkCellArray() - for ns in face_nodes: - polygon = vtkPolygon() - polygon.GetPointIds().SetNumberOfIds(len(ns)) - for i, n in enumerate(ns): - polygon.GetPointIds().SetId(i, node_3d_to_node_2d[n]) - polygons.InsertNextCell(polygon) - - buckets: Dict[int, Set[int]] = defaultdict(set) - for node_mapping in cell_to_node_mapping.values(): - for i, o in node_mapping.items(): - k: Optional[int] = node_3d_to_node_2d.get(min(i, o)) - if k is not None: - buckets[k].update((i, o)) - - assert set(buckets.keys()) == set(range(num_points)) - max_collocated_nodes: int = max(map(len, buckets.values())) if buckets.values() else 0 - collocated_nodes = numpy.ones((num_points, max_collocated_nodes), dtype=int) * -1 - for i, bucket in buckets.items(): - for j, val in enumerate(bucket): - collocated_nodes[i, j] = val - array = numpy_to_vtk(collocated_nodes, array_type=VTK_ID_TYPE) - array.SetName("collocated_nodes") - - fracture_mesh = vtkUnstructuredGrid() # We could be using vtkPolyData, but it's not supported by GEOS for now. - fracture_mesh.SetPoints(points) - if polygons.GetNumberOfCells() > 0: - fracture_mesh.SetCells([VTK_POLYGON] * polygons.GetNumberOfCells(), polygons) - fracture_mesh.GetPointData().AddArray(array) - return fracture_mesh - - -def __split_mesh_on_fracture(mesh: vtkUnstructuredGrid, - options: Options) -> Tuple[vtkUnstructuredGrid, vtkUnstructuredGrid]: - fracture: FractureInfo = build_fracture_info(mesh, options) - cell_to_cell: networkx.Graph = build_cell_to_cell_graph(mesh, fracture) - cell_to_node_mapping: Mapping[int, Mapping[int, int]] = __identify_split(mesh.GetNumberOfPoints(), - cell_to_cell, - fracture.node_to_cells) - output_mesh: vtkUnstructuredGrid = __perform_split(mesh, cell_to_node_mapping) - fractured_mesh: vtkUnstructuredGrid = __generate_fracture_mesh(mesh.GetPoints(), fracture, cell_to_node_mapping) - return output_mesh, fractured_mesh - - -def __check(mesh, options: Options) -> Result: - output_mesh, fracture_mesh = __split_mesh_on_fracture(mesh, options) - vtk_utils.write_mesh(output_mesh, options.vtk_output) - vtk_utils.write_mesh(fracture_mesh, options.vtk_fracture_output) - # TODO provide statistics about what was actually performed (size of the fracture, number of split nodes...). - return Result(info="OK") - - -def check(vtk_input_file: str, options: Options) -> Result: - try: - mesh = vtk_utils.read_mesh(vtk_input_file) - return __check(mesh, options) - except BaseException as e: - logging.error(e) - return Result(info="Something went wrong") diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/generate_global_ids.py b/src/coreComponents/python/modules/geosx_mesh_doctor/checks/generate_global_ids.py deleted file mode 100644 index 80474e22358..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/generate_global_ids.py +++ /dev/null @@ -1,68 +0,0 @@ -from dataclasses import dataclass -import logging - -from vtkmodules.vtkCommonCore import ( - vtkIdTypeArray, ) - -from . import vtk_utils -from .vtk_utils import ( - VtkOutput, -) - - -@dataclass(frozen=True) -class Options: - vtk_output: VtkOutput - generate_cells_global_ids: bool - generate_points_global_ids: bool - - -@dataclass(frozen=True) -class Result: - info: str - - -def __build_global_ids(mesh, - generate_cells_global_ids: bool, - generate_points_global_ids: bool) -> None: - """ - Adds the global ids for cells and points in place into the mesh instance. - :param mesh: - :return: None - """ - # Building GLOBAL_IDS for points and cells.g GLOBAL_IDS for points and cells. - # First for points... - if mesh.GetPointData().GetGlobalIds(): - logging.error("Mesh already has globals ids for points; nothing done.") - elif generate_points_global_ids: - point_global_ids = vtkIdTypeArray() - point_global_ids.SetName("GLOBAL_IDS_POINTS") - point_global_ids.Allocate(mesh.GetNumberOfPoints()) - for i in range(mesh.GetNumberOfPoints()): - point_global_ids.InsertNextValue(i) - mesh.GetPointData().SetGlobalIds(point_global_ids) - # ... then for cells. - if mesh.GetCellData().GetGlobalIds(): - logging.error("Mesh already has globals ids for cells; nothing done.") - elif generate_cells_global_ids: - cells_global_ids = vtkIdTypeArray() - cells_global_ids.SetName("GLOBAL_IDS_CELLS") - cells_global_ids.Allocate(mesh.GetNumberOfCells()) - for i in range(mesh.GetNumberOfCells()): - cells_global_ids.InsertNextValue(i) - mesh.GetCellData().SetGlobalIds(cells_global_ids) - - -def __check(mesh, options: Options) -> Result: - __build_global_ids(mesh, options.generate_cells_global_ids, options.generate_points_global_ids) - vtk_utils.write_mesh(mesh, options.vtk_output) - return Result(info=f"Mesh was written to {options.vtk_output.output}") - - -def check(vtk_input_file: str, options: Options) -> Result: - try: - mesh = vtk_utils.read_mesh(vtk_input_file) - return __check(mesh, options) - except BaseException as e: - logging.error(e) - return Result(info="Something went wrong.") diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/non_conformal.py b/src/coreComponents/python/modules/geosx_mesh_doctor/checks/non_conformal.py deleted file mode 100644 index 43f26e2391d..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/non_conformal.py +++ /dev/null @@ -1,432 +0,0 @@ -from dataclasses import dataclass -import math -from typing import List, Tuple, Any -import numpy - -from tqdm import tqdm - -from vtkmodules.vtkCommonCore import ( - vtkIdList, - vtkPoints, -) -from vtkmodules.vtkCommonDataModel import ( - VTK_POLYHEDRON, - vtkBoundingBox, - vtkCell, - vtkCellArray, - vtkPointSet, - vtkPolyData, - vtkStaticCellLocator, - vtkStaticPointLocator, - vtkUnstructuredGrid, -) -from vtkmodules.vtkCommonTransforms import ( - vtkTransform, -) -from vtkmodules.vtkFiltersCore import ( - vtkPolyDataNormals, -) -from vtkmodules.vtkFiltersGeometry import ( - vtkDataSetSurfaceFilter, -) -from vtkmodules.vtkFiltersModeling import ( - vtkCollisionDetectionFilter, - vtkLinearExtrusionFilter, -) -from vtk import reference as vtk_reference - -from .reorient_mesh import reorient_mesh - -from . import vtk_utils - -from .vtk_polyhedron import ( - vtk_iter, -) - -from . import triangle_distance - - -@dataclass(frozen=True) -class Options: - angle_tolerance: float - point_tolerance: float - face_tolerance: float - - -@dataclass(frozen=True) -class Result: - non_conformal_cells: List[Tuple[int, int]] - - -class BoundaryMesh: - """ - A BoundaryMesh is the envelope of the 3d mesh on which we want to perform the simulations. - It is computed by vtk. But we want to be sure that the normals of the envelope are directed outwards. - The `vtkDataSetSurfaceFilter` does not have the same behavior for standard vtk cells (like tets or hexs), - and for polyhedron meshes, for which the result is a bit brittle. - Therefore, we reorient the polyhedron cells ourselves, so we're sure that they point outwards. - And then we compute the boundary meshes for both meshes, given that the computing options are not identical. - """ - def __init__(self, mesh: vtkUnstructuredGrid): - """ - Builds a boundary mesh. - :param mesh: The 3d mesh. - """ - # Building the boundary meshes - boundary_mesh, __normals, self.__original_cells = BoundaryMesh.__build_boundary_mesh(mesh) - cells_to_reorient = filter(lambda c: mesh.GetCell(c).GetCellType() == VTK_POLYHEDRON, - map(self.__original_cells.GetValue, - range(self.__original_cells.GetNumberOfValues()))) - reoriented_mesh = reorient_mesh(mesh, cells_to_reorient) - self.re_boundary_mesh, re_normals, _ = BoundaryMesh.__build_boundary_mesh(reoriented_mesh, consistency=False) - num_cells = boundary_mesh.GetNumberOfCells() - # Precomputing the underlying cell type - self.__is_underlying_cell_type_a_polyhedron = numpy.zeros(num_cells, dtype=bool) - for ic in range(num_cells): - self.__is_underlying_cell_type_a_polyhedron[ic] = mesh.GetCell(self.__original_cells.GetValue(ic)).GetCellType() == VTK_POLYHEDRON - # Precomputing the normals - self.__normals: numpy.ndarray = numpy.empty((num_cells, 3), dtype=numpy.double, order='C') # Do not modify the storage layout - for ic in range(num_cells): - if self.__is_underlying_cell_type_a_polyhedron[ic]: - self.__normals[ic, :] = re_normals.GetTuple3(ic) - else: - self.__normals[ic, :] = __normals.GetTuple3(ic) - @staticmethod - def __build_boundary_mesh(mesh: vtkUnstructuredGrid, consistency=True) -> Tuple[vtkUnstructuredGrid, Any, Any]: - """ - From a 3d mesh, build the envelope meshes. - :param mesh: The input 3d mesh. - :param consistency: The vtk option passed to the `vtkDataSetSurfaceFilter`. - :return: A tuple containing the boundary mesh, the normal vectors array, - an array that maps the id of the boundary element to the id of the 3d cell it touches. - """ - f = vtkDataSetSurfaceFilter() - f.PassThroughCellIdsOn() - f.PassThroughPointIdsOff() - f.FastModeOff() - - # Note that we do not need the original points, but we could keep them as well if needed - original_cells_key = "ORIGINAL_CELLS" - f.SetOriginalCellIdsName(original_cells_key) - - boundary_mesh = vtkPolyData() - f.UnstructuredGridExecute(mesh, boundary_mesh) - - n = vtkPolyDataNormals() - n.SetConsistency(consistency) - n.SetAutoOrientNormals(consistency) - n.FlipNormalsOff() - n.ComputeCellNormalsOn() - n.SetInputData(boundary_mesh) - n.Update() - normals = n.GetOutput().GetCellData().GetArray("Normals") - assert normals - assert normals.GetNumberOfComponents() == 3 - assert normals.GetNumberOfTuples() == boundary_mesh.GetNumberOfCells() - original_cells = boundary_mesh.GetCellData().GetArray(original_cells_key) - assert original_cells - return boundary_mesh, normals, original_cells - - def GetNumberOfCells(self) -> int: - """ - The number of cells. - :return: An integer. - """ - return self.re_boundary_mesh.GetNumberOfCells() - - def GetNumberOfPoints(self) -> int: - """ - The number of points. - :return: An integer. - """ - return self.re_boundary_mesh.GetNumberOfPoints() - - def bounds(self, i) -> Tuple[float, float, float, float, float, float]: - """ - The boundrary box of cell `i`. - :param i: The boundary cell index. - :return: The vtk bounding box. - """ - return self.re_boundary_mesh.GetCell(i).GetBounds() - - def normals(self, i) -> numpy.ndarray: - """ - The normal of cell `i`. This normal will be directed outwards - :param i: The boundary cell index. - :return: The normal as a length-3 numpy array. - """ - return self.__normals[i] - - def GetCell(self, i) -> vtkCell: - """ - Cell i of the boundary mesh. This cell will have its normal directed outwards. - :param i: The boundary cell index. - :return: The cell instance. - :warning: This member function relies on the vtkUnstructuredGrid.GetCell member function which is not thread safe. - """ - return self.re_boundary_mesh.GetCell(i) - - def GetPoint(self, i) -> Tuple[float, float, float]: - """ - Point i of the boundary mesh. - :param i: The boundary point index. - :return: A length-3 tuple containing the coordinates of the point. - :warning: This member function relies on the vtkUnstructuredGrid.GetPoint member function which is not thread safe. - """ - return self.re_boundary_mesh.GetPoint(i) - - @property - def original_cells(self): - """ - Returns the 2d boundary cell to the 3d cell index of the original mesh. - :return: A 1d array. - """ - return self.__original_cells - - -def build_poly_data_for_extrusion(i: int, boundary_mesh: BoundaryMesh) -> vtkPolyData: - """ - Creates a vtkPolyData containing the unique cell `i` of the boundary mesh. - This operation is needed to use the vtk extrusion filter. - :param i: The boundary cell index that will eventually be extruded. - :param boundary_mesh: - :return: The created vtkPolyData. - """ - cell = boundary_mesh.GetCell(i) - copied_cell = cell.NewInstance() - copied_cell.DeepCopy(cell) - points_ids_mapping = [] - for i in range(copied_cell.GetNumberOfPoints()): - copied_cell.GetPointIds().SetId(i, i) - points_ids_mapping.append(cell.GetPointId(i)) - polygons = vtkCellArray() - polygons.InsertNextCell(copied_cell) - points = vtkPoints() - points.SetNumberOfPoints(len(points_ids_mapping)) - for i, v in enumerate(points_ids_mapping): - points.SetPoint(i, boundary_mesh.GetPoint(v)) - polygon_poly_data = vtkPolyData() - polygon_poly_data.SetPoints(points) - polygon_poly_data.SetPolys(polygons) - return polygon_poly_data - - -def are_points_conformal(point_tolerance: float, cell_i: vtkCell, cell_j: vtkCell) -> bool: - """ - Checks if points of cell `i` matches, one by one, the points of cell `j`. - :param point_tolerance: The point tolerance to consider that two points match. - :param cell_i: The first cell. - :param cell_j: The second cell. - :return: A boolean. - """ - # In this last step, we check that the nodes are (or not) matching each other. - if cell_i.GetNumberOfPoints() != cell_j.GetNumberOfPoints(): - return True - - point_locator = vtkStaticPointLocator() - points = vtkPointSet() - points.SetPoints(cell_i.GetPoints()) - point_locator.SetDataSet(points) - point_locator.BuildLocator() - found_points = set() - for ip in range(cell_j.GetNumberOfPoints()): - p = cell_j.GetPoints().GetPoint(ip) - squared_dist = vtk_reference(0.) # unused - found_point = point_locator.FindClosestPointWithinRadius(point_tolerance, p, squared_dist) - found_points.add(found_point) - return found_points == set(range(cell_i.GetNumberOfPoints())) - - -class Extruder: - """ - Computes and stores all the extrusions of the boundary faces. - The main reason for this class is to be lazy and cache the extrusions. - """ - def __init__(self, boundary_mesh: BoundaryMesh, face_tolerance: float): - self.__extrusions: List[vtkPolyData] = [None, ] * boundary_mesh.GetNumberOfCells() - self.__boundary_mesh = boundary_mesh - self.__face_tolerance = face_tolerance - - def __extrude(self, polygon_poly_data, normal) -> vtkPolyData: - """ - Extrude the polygon data to create a volume that will be used for intersection. - :param polygon_poly_data: The data to extrude - :param normal: The (uniform) direction of the extrusion. - :return: The extrusion. - """ - extruder = vtkLinearExtrusionFilter() - extruder.SetExtrusionTypeToVectorExtrusion() - extruder.SetVector(normal) - extruder.SetScaleFactor(self.__face_tolerance / 2.) - extruder.SetInputData(polygon_poly_data) - extruder.Update() - return extruder.GetOutput() - - def __getitem__(self, i) -> vtkPolyData: - """ - Returns the vtk extrusion for boundary element i. - :param i: The cell index. - :return: The vtk instance. - """ - extrusion = self.__extrusions[i] - if extrusion: - return extrusion - extrusion = self.__extrude(build_poly_data_for_extrusion(i, self.__boundary_mesh), - self.__boundary_mesh.normals(i)) - self.__extrusions[i] = extrusion - return extrusion - - -def are_faces_conformal_using_extrusions(extrusions: Extruder, - i: int, j: int, - boundary_mesh: vtkUnstructuredGrid, - point_tolerance: float) -> bool: - """ - Tests if two boundary faces are conformal, checking for intersection between their normal extruded volumes. - :param extrusions: The extrusions cache. - :param i: The cell index of the first cell. - :param j: The cell index of the second cell. - :param boundary_mesh: The boundary mesh. - :param point_tolerance: The point tolerance to consider that two points match. - :return: A boolean. - """ - collision = vtkCollisionDetectionFilter() - collision.SetCollisionModeToFirstContact() - collision.SetInputData(0, extrusions[i]) - collision.SetInputData(1, extrusions[j]) - m_i = vtkTransform() - m_j = vtkTransform() - collision.SetTransform(0, m_i) - collision.SetTransform(1, m_j) - collision.Update() - - if collision.GetNumberOfContacts() == 0: - return True - - # Duplicating data not to risk anything w.r.t. thread safety of the GetCell function. - cell_i = boundary_mesh.GetCell(i) - copied_cell_i = cell_i.NewInstance() - copied_cell_i.DeepCopy(cell_i) - - return are_points_conformal(point_tolerance, copied_cell_i, boundary_mesh.GetCell(j)) - - -def are_faces_conformal_using_distances(i: int, j: int, - boundary_mesh: vtkUnstructuredGrid, - face_tolerance: float, point_tolerance: float) -> bool: - """ - Tests if two boundary faces are conformal, checking the minimal distance between triangulated surfaces. - :param i: The cell index of the first cell. - :param j: The cell index of the second cell. - :param boundary_mesh: The boundary mesh. - :param face_tolerance: The tolerance under which we should consider the two faces "touching" each other. - :param point_tolerance: The point tolerance to consider that two points match. - :return: A boolean. - """ - cp_i = boundary_mesh.GetCell(i).NewInstance() - cp_i.DeepCopy(boundary_mesh.GetCell(i)) - cp_j = boundary_mesh.GetCell(j).NewInstance() - cp_j.DeepCopy(boundary_mesh.GetCell(j)) - - def triangulate(cell): - assert cell.GetCellDimension() == 2 - __points_ids = vtkIdList() - __points = vtkPoints() - cell.Triangulate(0, __points_ids, __points) - __points_ids = tuple(vtk_iter(__points_ids)) - assert len(__points_ids) % 3 == 0 - assert __points.GetNumberOfPoints() % 3 == 0 - return __points_ids, __points - - points_ids_i, points_i = triangulate(cp_i) - points_ids_j, points_j = triangulate(cp_j) - - def build_numpy_triangles(points_ids): - __triangles = [] - for __i in range(0, len(points_ids), 3): - __t = [] - for __pi in points_ids[__i: __i + 3]: - __t.append(boundary_mesh.GetPoint(__pi)) - __triangles.append(numpy.array(__t, dtype=float)) - return __triangles - - triangles_i = build_numpy_triangles(points_ids_i) - triangles_j = build_numpy_triangles(points_ids_j) - - min_dist = numpy.inf - for ti, tj in [(ti, tj) for ti in triangles_i for tj in triangles_j]: - # Note that here, we compute the exact distance to compare with the threshold. - # We could improve by exiting the iterative distance computation as soon as - # we're sure we're smaller than the threshold. No need of the exact solution. - dist, _, _ = triangle_distance.distance_between_two_triangles(ti, tj) - if dist < min_dist: - min_dist = dist - if min_dist < face_tolerance: - break - if min_dist > face_tolerance: - return True - - return are_points_conformal(point_tolerance, cp_i, cp_j) - - -def __check(mesh: vtkUnstructuredGrid, options: Options) -> Result: - """ - Checks if the mesh is "conformal" (i.e. if some of its boundary faces may not be too close to each other without matching nodes). - :param mesh: The vtk mesh - :param options: The check options. - :return: The Result instance. - """ - boundary_mesh = BoundaryMesh(mesh) - cos_theta = abs(math.cos(numpy.deg2rad(options.angle_tolerance))) - num_cells = boundary_mesh.GetNumberOfCells() - - # Computing the exact number of cells per node - num_cells_per_node = numpy.zeros(boundary_mesh.GetNumberOfPoints(), dtype=int) - for ic in range(boundary_mesh.GetNumberOfCells()): - c = boundary_mesh.GetCell(ic) - point_ids = c.GetPointIds() - for point_id in vtk_iter(point_ids): - num_cells_per_node[point_id] += 1 - - cell_locator = vtkStaticCellLocator() - cell_locator.Initialize() - cell_locator.SetNumberOfCellsPerNode(num_cells_per_node.max()) - cell_locator.SetDataSet(boundary_mesh.re_boundary_mesh) - cell_locator.BuildLocator() - - # Precomputing the bounding boxes. - # The options are important to directly interact with memory in C++. - bounding_boxes = numpy.empty((boundary_mesh.GetNumberOfCells(), 6), dtype=numpy.double, order="C") - for i in range(boundary_mesh.GetNumberOfCells()): - bb = vtkBoundingBox(boundary_mesh.bounds(i)) - bb.Inflate(2 * options.face_tolerance) - assert bounding_boxes[i, :].data.contiguous # Do not modify the storage layout since vtk deals with raw memory here. - bb.GetBounds(bounding_boxes[i, :]) - - non_conformal_cells = [] - extrusions = Extruder(boundary_mesh, options.face_tolerance) - close_cells = vtkIdList() - # Looping on all the pairs of boundary cells. We'll hopefully discard most of the pairs. - for i in tqdm(range(num_cells), desc="Non conformal elements"): - cell_locator.FindCellsWithinBounds(bounding_boxes[i], close_cells) - for j in vtk_iter(close_cells): - if j < i: - continue - # Discarding pairs that are not facing each others (with a threshold). - normal_i, normal_j = boundary_mesh.normals(i), boundary_mesh.normals(j) - if numpy.dot(normal_i, normal_j) > -cos_theta: # opposite directions only (can be facing or not) - continue - # At this point, back-to-back and face-to-face pairs of elements are considered. - if not are_faces_conformal_using_extrusions(extrusions, i, j, boundary_mesh, options.point_tolerance): - non_conformal_cells.append((i, j)) - # Extracting the original 3d element index (and not the index of the boundary mesh). - tmp = [] - for i, j in non_conformal_cells: - tmp.append((boundary_mesh.original_cells.GetValue(i), boundary_mesh.original_cells.GetValue(j))) - - return Result(non_conformal_cells=tmp) - - -def check(vtk_input_file: str, options: Options) -> Result: - mesh = vtk_utils.read_mesh(vtk_input_file) - return __check(mesh, options) diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/reorient_mesh.py b/src/coreComponents/python/modules/geosx_mesh_doctor/checks/reorient_mesh.py deleted file mode 100644 index efb664bc06c..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/reorient_mesh.py +++ /dev/null @@ -1,178 +0,0 @@ -import logging -from typing import ( - Dict, - FrozenSet, - Iterator, - List, - Tuple, -) - -import numpy - -from tqdm import tqdm - -import networkx - -from vtkmodules.vtkCommonCore import ( - vtkIdList, - vtkPoints, -) -from vtkmodules.vtkCommonDataModel import ( - VTK_POLYHEDRON, - VTK_TRIANGLE, - vtkCellArray, - vtkPolyData, - vtkPolygon, - vtkUnstructuredGrid, - vtkTetra, -) -from vtkmodules.vtkFiltersCore import ( - vtkTriangleFilter, -) -from .vtk_utils import ( - to_vtk_id_list, -) - -from .vtk_polyhedron import ( - FaceStream, - build_face_to_face_connectivity_through_edges, -) - - -def __compute_volume(mesh_points: vtkPoints, face_stream: FaceStream) -> float: - """ - Computes the volume of a polyhedron element (defined by its face_stream). - :param mesh_points: The mesh points, needed to compute the volume. - :param face_stream: The vtk face stream. - :return: The volume of the element. - :note: The faces of the polyhedron are triangulated and the volumes of the tetrahedra - from the barycenter to the triangular bases are summed. - The normal of each face plays critical role, - since the volume of each tetrahedron can be positive or negative. - """ - # Triangulating the envelope of the polyhedron for further volume computation. - polygons = vtkCellArray() - for face_nodes in face_stream.face_nodes: - polygon = vtkPolygon() - polygon.GetPointIds().SetNumberOfIds(len(face_nodes)) - # We use the same global points numbering for the polygons than for the input mesh. - # There will be a lot of points in the poly data that won't be used as a support for the polygons. - # But the algorithm deals with it, and it's actually faster (and easier) to do this - # than to renumber and allocate a new fit-for-purpose set of points just for the polygons. - for i, point_id in enumerate(face_nodes): - polygon.GetPointIds().SetId(i, point_id) - polygons.InsertNextCell(polygon) - polygon_poly_data = vtkPolyData() - polygon_poly_data.SetPoints(mesh_points) - polygon_poly_data.SetPolys(polygons) - - f = vtkTriangleFilter() - f.SetInputData(polygon_poly_data) - f.Update() - triangles = f.GetOutput() - # Computing the barycenter that will be used as the tip of all the tetra which mesh the polyhedron. - # (The basis of all the tetra being the triangles of the envelope). - # We could take any point, not only the barycenter. - # But in order to work with figure of the same magnitude, let's compute the barycenter. - tmp_barycenter = numpy.empty((face_stream.num_support_points, 3), dtype=float) - for i, point_id in enumerate(face_stream.support_point_ids): - tmp_barycenter[i, :] = mesh_points.GetPoint(point_id) - barycenter = tmp_barycenter[:, 0].mean(), tmp_barycenter[:, 1].mean(), tmp_barycenter[:, 2].mean() - # Looping on all the triangles of the envelope of the polyhedron, creating the matching tetra. - # Then the volume of all the tetra are added to get the final polyhedron volume. - cell_volume = 0. - for i in range(triangles.GetNumberOfCells()): - triangle = triangles.GetCell(i) - assert triangle.GetCellType() == VTK_TRIANGLE - p = triangle.GetPoints() - cell_volume += vtkTetra.ComputeVolume(barycenter, p.GetPoint(0), p.GetPoint(1), p.GetPoint(2)) - return cell_volume - - -def __select_and_flip_faces(mesh_points: vtkPoints, - colors: Dict[FrozenSet[int], int], - face_stream: FaceStream) -> FaceStream: - """ - Given a polyhedra, given that we were able to paint the faces in two colors, - we now need to select which faces/color to flip such that the volume of the element is positive. - :param mesh_points: The mesh points, needed to compute the volume. - :param colors: Maps the nodes of each connected component (defined as a frozenset) to its color. - :param face_stream: the polyhedron. - :return: The face stream that leads to a positive volume. - """ - # Flipping either color 0 or 1. - color_to_nodes: Dict[int, List[int]] = {0: [], 1: []} - for connected_components_indices, color in colors.items(): - color_to_nodes[color] += connected_components_indices - # This implementation works even if there is one unique color. - # Admittedly, there will be one face stream that won't be flipped. - fs: Tuple[FaceStream, FaceStream] = face_stream.flip_faces(color_to_nodes[0]), face_stream.flip_faces(color_to_nodes[1]) - volumes = __compute_volume(mesh_points, fs[0]), __compute_volume(mesh_points, fs[1]) - # We keep the flipped element for which the volume is largest - # (i.e. positive, since they should be the opposite of each other). - return fs[numpy.argmax(volumes)] - - -def __reorient_element(mesh_points: vtkPoints, face_stream_ids: vtkIdList) -> vtkIdList: - """ - Considers a vtk face stream and flips the appropriate faces to get an element with normals directed outwards. - :param mesh_points: The mesh points, needed to compute the volume. - :param face_stream_ids: The raw vtk face stream, not converted into a more practical python class. - :return: The raw vtk face stream with faces properly flipped. - """ - face_stream = FaceStream.build_from_vtk_id_list(face_stream_ids) - face_graph = build_face_to_face_connectivity_through_edges(face_stream, add_compatibility=True) - # Removing the non-compatible connections to build the non-connected components. - g = networkx.Graph() - g.add_nodes_from(face_graph.nodes) - g.add_edges_from(filter(lambda uvd: uvd[2]["compatible"] == "+", face_graph.edges(data=True))) - connected_components = tuple(networkx.connected_components(g)) - # Squashing all the connected nodes that need to receive the normal direction flip (or not) together. - quotient_graph = networkx.algorithms.quotient_graph(face_graph, connected_components) - # Coloring the new graph lets us know how which cluster of faces need to eventually receive the same flip. - # W.r.t. the nature of our problem (a normal can be directed inwards or outwards), - # two colors should be enough to color the face graph. - # `colors` maps the nodes of each connected component to its color. - colors: Dict[FrozenSet[int], int] = networkx.algorithms.greedy_color(quotient_graph) - assert len(colors) in (1, 2) - # We now compute the face stream which generates outwards normal vectors. - flipped_face_stream = __select_and_flip_faces(mesh_points, colors, face_stream) - return to_vtk_id_list(flipped_face_stream.dump()) - - -def reorient_mesh(mesh, cell_indices: Iterator[int]) -> vtkUnstructuredGrid: - """ - Reorient the polyhedron elements such that they all have their normals directed outwards. - :param mesh: The input vtk mesh. - :param cell_indices: We may need to only flip a limited number of polyhedron cells (only on the boundary for example). - :return: The vtk mesh with the desired polyhedron cells directed outwards. - """ - num_cells = mesh.GetNumberOfCells() - # Building an indicator/predicate from the list - needs_to_be_reoriented = numpy.zeros(num_cells, dtype=bool) - for ic in cell_indices: - needs_to_be_reoriented[ic] = True - - output_mesh = mesh.NewInstance() - # I did not manage to call `output_mesh.CopyStructure(mesh)` because I could not modify the polyhedron in place. - # Therefore, I insert the cells one by one... - output_mesh.SetPoints(mesh.GetPoints()) - logging.info("Reorienting the polyhedron cells to enforce normals directed outward.") - with tqdm(total=needs_to_be_reoriented.sum(), desc="Reorienting polyhedra") as progress_bar: # For smoother progress, we only update on reoriented elements. - for ic in range(num_cells): - cell = mesh.GetCell(ic) - cell_type = cell.GetCellType() - if cell_type == VTK_POLYHEDRON: - face_stream_ids = vtkIdList() - mesh.GetFaceStream(ic, face_stream_ids) - if needs_to_be_reoriented[ic]: - new_face_stream_ids = __reorient_element(mesh.GetPoints(), face_stream_ids) - else: - new_face_stream_ids = face_stream_ids - output_mesh.InsertNextCell(VTK_POLYHEDRON, new_face_stream_ids) - else: - output_mesh.InsertNextCell(cell_type, cell.GetPointIds()) - if needs_to_be_reoriented[ic]: - progress_bar.update(1) - assert output_mesh.GetNumberOfCells() == mesh.GetNumberOfCells() - return output_mesh diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/self_intersecting_elements.py b/src/coreComponents/python/modules/geosx_mesh_doctor/checks/self_intersecting_elements.py deleted file mode 100644 index 0e98d4f5b49..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/self_intersecting_elements.py +++ /dev/null @@ -1,92 +0,0 @@ -from dataclasses import dataclass -import logging -from typing import ( - Collection, - List, -) - -from vtkmodules.vtkFiltersGeneral import ( - vtkCellValidator -) -from vtkmodules.vtkCommonCore import ( - vtkOutputWindow, - vtkFileOutputWindow -) -from vtkmodules.util.numpy_support import ( - vtk_to_numpy, -) - -from . import vtk_utils - - -@dataclass(frozen=True) -class Options: - tolerance: float - - -@dataclass(frozen=True) -class Result: - wrong_number_of_points_elements: Collection[int] - intersecting_edges_elements: Collection[int] - intersecting_faces_elements: Collection[int] - non_contiguous_edges_elements: Collection[int] - non_convex_elements: Collection[int] - faces_are_oriented_incorrectly_elements: Collection[int] - - -def __check(mesh, options: Options) -> Result: - err_out = vtkFileOutputWindow() - err_out.SetFileName("/dev/null") # vtkCellValidator outputs loads for each cell... - vtk_std_err_out = vtkOutputWindow() - vtk_std_err_out.SetInstance(err_out) - - valid = 0x0 - wrong_number_of_points = 0x01 - intersecting_edges = 0x02 - intersecting_faces = 0x04 - non_contiguous_edges = 0x08 - non_convex = 0x10 - faces_are_oriented_incorrectly = 0x20 - - wrong_number_of_points_elements: List[int] = [] - intersecting_edges_elements: List[int] = [] - intersecting_faces_elements: List[int] = [] - non_contiguous_edges_elements: List[int] = [] - non_convex_elements: List[int] = [] - faces_are_oriented_incorrectly_elements: List[int] = [] - - f = vtkCellValidator() - f.SetTolerance(options.tolerance) - - f.SetInputData(mesh) - f.Update() - output = f.GetOutput() - - validity = output.GetCellData().GetArray("ValidityState") # Could not change name using the vtk interface. - assert validity is not None - validity = vtk_to_numpy(validity) - for i, v in enumerate(validity): - if not v & valid: - if v & wrong_number_of_points: - wrong_number_of_points_elements.append(i) - if v & intersecting_edges: - intersecting_edges_elements.append(i) - if v & intersecting_faces: - intersecting_faces_elements.append(i) - if v & non_contiguous_edges: - non_contiguous_edges_elements.append(i) - if v & non_convex: - non_convex_elements.append(i) - if v & faces_are_oriented_incorrectly: - faces_are_oriented_incorrectly_elements.append(i) - return Result(wrong_number_of_points_elements=wrong_number_of_points_elements, - intersecting_edges_elements=intersecting_edges_elements, - intersecting_faces_elements=intersecting_faces_elements, - non_contiguous_edges_elements=non_contiguous_edges_elements, - non_convex_elements=non_convex_elements, - faces_are_oriented_incorrectly_elements=faces_are_oriented_incorrectly_elements) - - -def check(vtk_input_file: str, options: Options) -> Result: - mesh = vtk_utils.read_mesh(vtk_input_file) - return __check(mesh, options) diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/supported_elements.py b/src/coreComponents/python/modules/geosx_mesh_doctor/checks/supported_elements.py deleted file mode 100644 index 84c5fcbaf74..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/supported_elements.py +++ /dev/null @@ -1,163 +0,0 @@ -from dataclasses import dataclass -import logging -import multiprocessing -from typing import ( - Collection, - FrozenSet, - Iterable, - Mapping, - Optional, - Sequence, - Set, -) - -from tqdm import tqdm - -import networkx -import numpy - -from vtkmodules.vtkCommonCore import ( - vtkIdList, -) -from vtkmodules.vtkCommonDataModel import ( - vtkCellTypes, - vtkUnstructuredGrid, - VTK_HEXAGONAL_PRISM, - VTK_HEXAHEDRON, - VTK_PENTAGONAL_PRISM, - VTK_POLYHEDRON, - VTK_PYRAMID, - VTK_TETRA, - VTK_VOXEL, - VTK_WEDGE, -) -from vtkmodules.util.numpy_support import ( - vtk_to_numpy, -) - -from . import vtk_utils -from .vtk_utils import vtk_iter -from .vtk_polyhedron import build_face_to_face_connectivity_through_edges, FaceStream - - -@dataclass(frozen=True) -class Options: - num_proc: int - chunk_size: int - - -@dataclass(frozen=True) -class Result: - unsupported_std_elements_types: FrozenSet[int] # list of unsupported types - unsupported_polyhedron_elements: FrozenSet[int] # list of polyhedron elements that could not be converted to supported std elements - - -MESH: Optional[vtkUnstructuredGrid] = None # for multiprocessing, vtkUnstructuredGrid cannot be pickled. Let's use a global variable instead. - - -class IsPolyhedronConvertible: - def __init__(self, mesh: vtkUnstructuredGrid): - global MESH # for multiprocessing, vtkUnstructuredGrid cannot be pickled. Let's use a global variable instead. - MESH = mesh - - def build_prism_graph(n: int, name: str) -> networkx.Graph: - """ - Builds the face to face connectivities (through edges) for prism graphs. - :param n: The number of nodes of the basis (i.e. the pentagonal prims gets n = 5) - :param name: A human-readable name for logging purpose. - :return: A graph instance. - """ - tmp = networkx.cycle_graph(n) - for node in range(n): - tmp.add_edge(node, n) - tmp.add_edge(node, n + 1) - tmp.name = name - return tmp - - # Building the reference graphs - tet_graph = networkx.complete_graph(4) - tet_graph.name = "Tetrahedron" - pyr_graph = build_prism_graph(4, "Pyramid") - pyr_graph.remove_node(5) # Removing a node also removes its associated edges. - self.__reference_graphs: Mapping[int, Iterable[networkx.Graph]] = { - 4: (tet_graph,), - 5: (pyr_graph, build_prism_graph(3, "Wedge")), - 6: (build_prism_graph(4, "Hexahedron"),), - 7: (build_prism_graph(5, "Prism5"),), - 8: (build_prism_graph(6, "Prism6"),), - 9: (build_prism_graph(7, "Prism7"),), - 10: (build_prism_graph(8, "Prism8"),), - 11: (build_prism_graph(9, "Prism9"),), - 12: (build_prism_graph(10, "Prism10"),), - 13: (build_prism_graph(11, "Prism11"),), - } - - def __is_polyhedron_supported(self, face_stream) -> str: - """ - Checks if a polyhedron can be converted into a supported cell. - If so, returns the name of the type. If not, the returned name will be empty. - :param face_stream: The polyhedron. - :return: The name of the supported type or an empty string. - """ - cell_graph = build_face_to_face_connectivity_through_edges(face_stream, add_compatibility=True) - for reference_graph in self.__reference_graphs[cell_graph.order()]: - if networkx.is_isomorphic(reference_graph, cell_graph): - return str(reference_graph.name) - return "" - - def __call__(self, ic: int) -> int: - """ - Checks if a vtk polyhedron cell can be converted into a supported GEOSX element. - :param ic: The index element. - :return: -1 if the polyhedron vtk element can be converted into a supported element type. The index otherwise. - """ - global MESH - assert MESH is not None - if MESH.GetCellType(ic) != VTK_POLYHEDRON: - return -1 - pt_ids = vtkIdList() - MESH.GetFaceStream(ic, pt_ids) - face_stream = FaceStream.build_from_vtk_id_list(pt_ids) - converted_type_name = self.__is_polyhedron_supported(face_stream) - if converted_type_name: - logging.debug(f"Polyhedron cell {ic} can be converted into \"{converted_type_name}\"") - return -1 - else: - logging.debug(f"Polyhedron cell {ic} cannot be converted into any supported element.") - return ic - - -def __check(mesh: vtkUnstructuredGrid, options: Options) -> Result: - if hasattr(mesh, "GetDistinctCellTypesArray"): # For more recent versions of vtk. - cell_types = set(vtk_to_numpy(mesh.GetDistinctCellTypesArray())) - else: - cell_types = vtkCellTypes() - mesh.GetCellTypes(cell_types) - cell_types = set(vtk_iter(cell_types)) - supported_cell_types = { - VTK_HEXAGONAL_PRISM, - VTK_HEXAHEDRON, - VTK_PENTAGONAL_PRISM, - VTK_POLYHEDRON, - VTK_PYRAMID, - VTK_TETRA, - VTK_VOXEL, - VTK_WEDGE - } - unsupported_std_elements_types = cell_types - supported_cell_types - - # Dealing with polyhedron elements. - num_cells = mesh.GetNumberOfCells() - result = numpy.ones(num_cells, dtype=int) * -1 - with multiprocessing.Pool(processes=options.num_proc) as pool: - generator = pool.imap_unordered(IsPolyhedronConvertible(mesh), range(num_cells), chunksize=options.chunk_size) - for i, val in enumerate(tqdm(generator, total=num_cells, desc="Testing support for elements")): - result[i] = val - unsupported_polyhedron_elements = [i for i in result if i > -1] - return Result(unsupported_std_elements_types=frozenset(unsupported_std_elements_types), - unsupported_polyhedron_elements=frozenset(unsupported_polyhedron_elements)) - - -def check(vtk_input_file: str, options: Options) -> Result: - mesh: vtkUnstructuredGrid = vtk_utils.read_mesh(vtk_input_file) - return __check(mesh, options) diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/triangle_distance.py b/src/coreComponents/python/modules/geosx_mesh_doctor/checks/triangle_distance.py deleted file mode 100644 index ef1f3c98dac..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/triangle_distance.py +++ /dev/null @@ -1,186 +0,0 @@ -import itertools -from math import sqrt -from typing import Tuple, Union - -import numpy -from numpy.linalg import norm - - -def __div_clamp(num: float, den :float) -> float: - """ - Computes the division `num / den`. and clamps the result between 0 and 1. - If `den` is zero, the result of the division is set to 0. - :param num: The numerator. - :param den: The denominator. - :return: The result between 0 and 1. - """ - if den == 0.: - return 0. - tmp: float = num / den - if tmp < 0: - return 0. - elif tmp > 1: - return 1. - else: - return tmp - - -def distance_between_two_segments(x0: numpy.ndarray, d0: numpy.ndarray, - x1: numpy.ndarray, d1: numpy.ndarray) -> Tuple[numpy.ndarray, numpy.ndarray]: - """ - Compute the minimum distance between two segments. - :param x0: First point of segment 0. - :param d0: Director vector such that x0 + d0 is the second point of segment 0. - :param x1: First point of segment 1. - :param d1: Director vector such that x1 + d1 is the second point of segment 1. - :return: A tuple containing the two points closest point for segments 0 and 1 respectively. - """ - # The reference paper is: - # "On fast computation of distance between line segments" by Vladimir J. Lumelsky. - # Information Processing Letters, Vol. 21, number 2, pages 55-61, 08/16/1985. - - # In the reference, the indices start at 1, while in this implementation, they start at 0. - tmp: numpy.ndarray = x1 - x0 - D0: float = numpy.dot(d0, d0) # As such, this is D1 in the reference paper. - D1: float = numpy.dot(d1, d1) - R: float = numpy.dot(d0, d1) - S0: float = numpy.dot(d0, tmp) - S1: float = numpy.dot(d1, tmp) - - # `t0` parameterizes line 0: - # - when t0 = 0 the point is p0. - # - when t0 = 1, the point is p0 + u0, the other side of the segment - # Same for `t1` and line 1. - - # Step 1 of the algorithm considers degenerate cases. - # They'll be considered along the line using `div_clamp`. - - # Step 2: Computing t0 using eq (11). - t0: float = __div_clamp(S0 * D1 - S1 * R, D0 * D1 - R * R) - - # Step 3: compute t1 for point on line 1 closest to point at t0. - t1: float = __div_clamp(t0 * R - S1, D1) # Eq (10, right) - sol_1: numpy.ndarray = x1 + t1 * d1 # Eq (3) - t0: float = __div_clamp(t1 * R + S0, D0) # Eq (10, left) - sol_0: numpy.ndarray = x0 + t0 * d0 # Eq (4) - - return sol_0, sol_1 - - -def __compute_nodes_to_triangle_distance(tri_0, edges_0, tri_1) -> Tuple[Union[float, None], Union[numpy.ndarray, None], Union[numpy.ndarray, None], bool]: - """ - Computes the distance from nodes of `tri_1` points onto `tri_0`. - :param tri_0: First triangle. - :param edges_0: The edges of triangle 0. First element being edge [0, 1], etc. - :param tri_1: Second triangle - :return: The distance, the closest point on triangle 0, the closest on triangle 1 - and a boolean indicating of the triangles are disjoint. If nothing was found, - then the first three arguments are None. The boolean being still defined. - """ - are_disjoint: bool = False - tri_0_normal: numpy.ndarray = numpy.cross(edges_0[0], edges_0[1]) - tri_0_normal_norm: float = numpy.dot(tri_0_normal, tri_0_normal) - - # Forget about degenerate cases. - if tri_0_normal_norm > numpy.finfo(float).eps: - # Build projection lengths of `tri_1` points. - tri_1_proj = numpy.empty(3, dtype=float) - for i in range(3): - tri_1_proj[i] = numpy.dot(tri_0[0] - tri_1[i], tri_0_normal) - - # Considering `tri_0` separates the space in 2, - # let's check if `tri_1` is on one side only. - # If so, let's take the closest point. - point: int = -1 - if numpy.all(tri_1_proj > 0): - point = numpy.argmin(tri_1_proj) - elif numpy.all(tri_1_proj < 0): - point = numpy.argmax(tri_1_proj) - - # So if `tri_1` is actually "on one side", - # point `tri_1[point]` is candidate to be the closest point. - if point > -1: - are_disjoint = True - # But we must check that its projection is inside `tri_0`. - if numpy.dot(tri_1[point] - tri_0[0], numpy.cross(tri_0_normal, edges_0[0])) > 0: - if numpy.dot(tri_1[point] - tri_0[1], numpy.cross(tri_0_normal, edges_0[1])) > 0: - if numpy.dot(tri_1[point] - tri_0[2], numpy.cross(tri_0_normal, edges_0[2])) > 0: - # It is! - sol_0 = tri_1[point] - sol_1 = tri_1[point] + (tri_1_proj[point] / tri_0_normal_norm) * tri_0_normal - return norm(sol_1 - sol_0), sol_0, sol_1, are_disjoint - return None, None, None, are_disjoint - - -def distance_between_two_triangles(tri_0: numpy.ndarray, - tri_1: numpy.ndarray) -> Tuple[float, numpy.ndarray, numpy.ndarray]: - """ - Returns the minimum distance between two triangles, and the two points where this minimum occurs. - If the two triangles touch, then distance is exactly 0. - But the two points are dummy and cannot be used as contact points (they are still though). - :param tri_0: The first 3x3 triangle points. - :param tri_1: The second 3x3 triangle points. - :return: The distance and the two points. - """ - # Compute vectors along the 6 sides - edges_0 = numpy.empty((3, 3), dtype=float) - edges_1 = numpy.empty((3, 3), dtype=float) - for i in range(3): - edges_0[i][:] = tri_0[(i + 1) % 3] - tri_0[i] - edges_1[i][:] = tri_1[(i + 1) % 3] - tri_1[i] - - min_sol_0 = numpy.empty(3, dtype=float) - min_sol_1 = numpy.empty(3, dtype=float) - are_disjoint: bool = False - - min_dist = numpy.inf - - # Looping over all the pair of edges. - for i, j in itertools.product(range(3), repeat=2): - # Find the closest points on edges i and j. - sol_0, sol_1 = distance_between_two_segments(tri_0[i], edges_0[i], tri_1[j], edges_1[j]) - # Computing the distance between the two solutions. - delta_sol = sol_1 - sol_0 - dist: float = numpy.dot(delta_sol, delta_sol) - # Update minimum if relevant and check if it's the closest pair of points. - if dist <= min_dist: - min_sol_0[:] = sol_0 - min_sol_1[:] = sol_1 - min_dist = dist - - # `tri_0[(i + 2) % 3]` is the points opposite to edges_0[i] where the closest point sol_0 lies. - # Computing those scalar products and checking the signs somehow let us determine - # if the triangles are getting closer to each other when approaching the sol_(0|1) nodes. - # If so, we have a minimum. - a: float = numpy.dot(tri_0[(i + 2) % 3] - sol_0, delta_sol) - b: float = numpy.dot(tri_1[(j + 2) % 3] - sol_1, delta_sol) - if a <= 0 <= b: - return sqrt(dist), sol_0, sol_1 - - if a < 0: - a = 0 - if b > 0: - b = 0 - # `dist - a + b` expands to `numpy.dot(tri_1[(j + 2) % 3] - tri_0[(i + 2) % 3], sol_1 - sol_0)`. - # If the "directions" of the (sol_1 - sol_0) vector and the vector joining the extra points of the triangles - # (i.e. not involved in the current edge check) re the "same", then the triangles do not intersect. - if dist - a + b > 0: - are_disjoint = True - # No edge pair contained the closest points. - # Checking the node/face situation. - distance, sol_0, sol_1, are_disjoint_tmp = __compute_nodes_to_triangle_distance(tri_0, edges_0, tri_1) - if distance: - return distance, sol_0, sol_1 - are_disjoint = are_disjoint or are_disjoint_tmp - - distance, sol_0, sol_1, are_disjoint_tmp = __compute_nodes_to_triangle_distance(tri_1, edges_1, tri_0) - if distance: - return distance, sol_0, sol_1 - are_disjoint = are_disjoint or are_disjoint_tmp - # It's not a node/face situation. - # If the triangles do not overlap, let's return the minimum found during the edges loop. - # (maybe an edge was parallel to the other face, and we could not decide for a unique closest point). - if are_disjoint: - return sqrt(min_dist), min_sol_0, min_sol_1 - else: # Surely overlapping or degenerate triangles. - return 0., numpy.zeros(3, dtype=float), numpy.zeros(3, dtype=float) diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/vtk_polyhedron.py b/src/coreComponents/python/modules/geosx_mesh_doctor/checks/vtk_polyhedron.py deleted file mode 100644 index e246a573bde..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/vtk_polyhedron.py +++ /dev/null @@ -1,212 +0,0 @@ -from collections import defaultdict -from dataclasses import dataclass -from typing import ( - Collection, - Dict, - FrozenSet, - Iterable, - List, - Sequence, - Tuple, -) - -from vtkmodules.vtkCommonCore import ( - vtkIdList, -) - -import networkx - -from .vtk_utils import ( - vtk_iter, -) - - -@dataclass(frozen=True) -class Options: - dummy: float - - -@dataclass(frozen=True) -class Result: - dummy: float - - -def parse_face_stream(ids: vtkIdList) -> Sequence[Sequence[int]]: - """ - Parses the face stream raw information and converts it into a tuple of tuple of integers, - each tuple of integer being the nodes of a face. - :param ids: The raw vtk face stream. - :return: The tuple of tuple of integers. - """ - result = [] - it = vtk_iter(ids) - num_faces = next(it) - try: - while True: - num_nodes = next(it) - tmp = [] - for i in range(num_nodes): - tmp.append(next(it)) - result.append(tuple(tmp)) - except StopIteration: - pass - assert len(result) == num_faces - assert sum(map(len, result)) + len(result) + 1 == ids.GetNumberOfIds() - - return tuple(result) - - -class FaceStream: - """ - Helper class to manipulate the vtk face streams. - """ - def __init__(self, data: Sequence[Sequence[int]]): - # self.__data contains the list of faces nodes, like it appears in vtk face streams. - # Except that the additional size information is removed - # in favor of the __len__ of the containers. - self.__data: Sequence[Sequence[int]] = data - - @staticmethod - def build_from_vtk_id_list(ids: vtkIdList): - """ - Builds a FaceStream from the raw vtk face stream. - :param ids: The vtk face stream. - :return: A new FaceStream instance. - """ - return FaceStream(parse_face_stream(ids)) - - @property - def face_nodes(self) -> Iterable[Sequence[int]]: - """ - Iterate on the nodes of all the faces. - :return: An iterator. - """ - return iter(self.__data) - - @property - def num_faces(self) -> int: - """ - Number of faces in the face stream - :return: An integer - """ - return len(self.__data) - - @property - def support_point_ids(self) -> Collection[int]: - """ - The list of all (unique) support points of the face stream, in no specific order. - :return: The set of all the point ids. - """ - tmp: List[int] = [] - for nodes in self.face_nodes: - tmp += nodes - return frozenset(tmp) - - @property - def num_support_points(self) -> int: - """ - The number of unique support nodes of the polyhedron. - :return: An integer. - """ - return len(self.support_point_ids) - - def __getitem__(self, face_index) -> Sequence[int]: - """ - The support point ids for the `face_index` face. - :param face_index: The face index (within the face stream). - :return: A tuple containing all the point ids. - """ - return self.__data[face_index] - - def flip_faces(self, face_indices): - """ - Returns a new FaceStream instance with the face indices defined in face_indices flipped., - :param face_indices: The faces (local) indices to flip. - :return: A newly created instance. - """ - result = [] - for face_index, face_nodes in enumerate(self.__data): - result.append(tuple(reversed(face_nodes)) if face_index in face_indices else face_nodes) - return FaceStream(tuple(result)) - - def dump(self) -> Sequence[int]: - """ - Returns the face stream awaited by vtk, but in a python container. - The content can be used, once converted to a vtkIdList, to define another polyhedron in vtk. - :return: The face stream in a python container. - """ - result = [len(self.__data)] - for face_nodes in self.__data: - result.append(len(face_nodes)) - result += face_nodes - return tuple(result) - - def __repr__(self): - result = [str(len(self.__data))] - for face_nodes in self.__data: - result.append(str(len(face_nodes))) - result.append(", ".join(map(str, face_nodes))) - return ",\n".join(result) - - -def build_face_to_face_connectivity_through_edges(face_stream: FaceStream, add_compatibility=False) -> networkx.Graph: - """ - Given a face stream/polyhedron, builds the connections between the faces. - Those connections happen when two faces share an edge. - :param face_stream: The face stream description of the polyhedron. - :param add_compatibility: Two faces are considered compatible if their normals point in the same direction (inwards or outwards). - If `add_compatibility=True`, we add a `compatible={"-", "+"}` flag on the edges - to indicate that the two connected faces are compatible or not. - If `add_compatibility=False`, non-compatible faces are simply not connected by any edge. - :return: A graph which nodes are actually the faces of the polyhedron. - Two nodes of the graph are connected if they share an edge. - """ - edges_to_face_indices: Dict[FrozenSet[int], List[int]] = defaultdict(list) - for face_index, face_nodes in enumerate(face_stream.face_nodes): - # Each edge is defined by two nodes. We do a small trick to loop on consecutive points. - face_indices: Tuple[int, int] - for face_indices in zip(face_nodes, face_nodes[1:] + (face_nodes[0], )): - edges_to_face_indices[frozenset(face_indices)].append(face_index) - # We are doing here some small validations w.r.t. the connections of the faces - # which may only make sense in the context of numerical simulations. - # As such, an error will be thrown in case the polyhedron is not closed. - # So there may be a lack of absolute genericity, and the code may evolve if needed. - for face_indices in edges_to_face_indices.values(): - assert len(face_indices) == 2 - # Computing the graph degree for validation - degrees: Dict[int, int] = defaultdict(int) - for face_indices in edges_to_face_indices.values(): - for face_index in face_indices: - degrees[face_index] += 1 - for face_index, degree in degrees.items(): - assert len(face_stream[face_index]) == degree - # Validation that there is one unique edge connecting two faces. - face_indices_to_edge_index = defaultdict(list) - for edge_index, face_indices in edges_to_face_indices.items(): - face_indices_to_edge_index[frozenset(face_indices)].append(edge_index) - for edge_indices in face_indices_to_edge_index.values(): - assert len(edge_indices) == 1 - # Connecting the faces. Neighbor faces with consistent normals (i.e. facing both inward or outward) - # will be connected. This will allow us to extract connected components with consistent orientations. - # Another step will then reconcile all the components such that all the normals of the cell - # will consistently point outward. - graph = networkx.Graph() - graph.add_nodes_from(range(face_stream.num_faces)) - for edge, face_indices in edges_to_face_indices.items(): - face_index_0, face_index_1 = face_indices - face_nodes_0 = face_stream[face_index_0] + (face_stream[face_index_0][0],) - face_nodes_1 = face_stream[face_index_1] + (face_stream[face_index_1][0],) - node_0, node_1 = edge - order_0 = 1 if face_nodes_0[face_nodes_0.index(node_0) + 1] == node_1 else -1 - order_1 = 1 if face_nodes_1[face_nodes_1.index(node_0) + 1] == node_1 else -1 - # Same order of nodes means that the normals of the faces - # are _not_ both in the same "direction" (inward or outward). - if order_0 * order_1 == 1: - if add_compatibility: - graph.add_edge(face_index_0, face_index_1, compatible="-") - else: - if add_compatibility: - graph.add_edge(face_index_0, face_index_1, compatible="+") - else: - graph.add_edge(face_index_0, face_index_1) - return graph diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/vtk_utils.py b/src/coreComponents/python/modules/geosx_mesh_doctor/checks/vtk_utils.py deleted file mode 100644 index 2604609ef0c..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/checks/vtk_utils.py +++ /dev/null @@ -1,143 +0,0 @@ -from dataclasses import dataclass -import os.path -import logging -import sys -from typing import ( - Any, - Iterator, - Optional, -) - -from vtkmodules.vtkCommonCore import ( - vtkIdList, -) -from vtkmodules.vtkCommonDataModel import ( - vtkUnstructuredGrid, -) -from vtkmodules.vtkIOLegacy import ( - vtkUnstructuredGridWriter, - vtkUnstructuredGridReader, -) -from vtkmodules.vtkIOXML import ( - vtkXMLUnstructuredGridReader, - vtkXMLUnstructuredGridWriter, -) - - -@dataclass(frozen=True) -class VtkOutput: - output: str - is_data_mode_binary: bool - - -def to_vtk_id_list(data) -> vtkIdList: - result = vtkIdList() - result.Allocate(len(data)) - for d in data: - result.InsertNextId(d) - return result - - -def vtk_iter(l) -> Iterator[Any]: - """ - Utility function transforming a vtk "container" (e.g. vtkIdList) into an iterable to be used for building built-ins python containers. - :param l: A vtk container. - :return: The iterator. - """ - if hasattr(l, "GetNumberOfIds"): - for i in range(l.GetNumberOfIds()): - yield l.GetId(i) - elif hasattr(l, "GetNumberOfTypes"): - for i in range(l.GetNumberOfTypes()): - yield l.GetCellType(i) - - -def __read_vtk(vtk_input_file: str) -> Optional[vtkUnstructuredGrid]: - reader = vtkUnstructuredGridReader() - logging.info(f"Testing file format \"{vtk_input_file}\" using legacy format reader...") - reader.SetFileName(vtk_input_file) - if reader.IsFileUnstructuredGrid(): - logging.info(f"Reader matches. Reading file \"{vtk_input_file}\" using legacy format reader.") - reader.Update() - return reader.GetOutput() - else: - logging.info("Reader did not match the input file format.") - return None - - -def __read_vtu(vtk_input_file: str) -> Optional[vtkUnstructuredGrid]: - reader = vtkXMLUnstructuredGridReader() - logging.info(f"Testing file format \"{vtk_input_file}\" using XML format reader...") - if reader.CanReadFile(vtk_input_file): - reader.SetFileName(vtk_input_file) - logging.info(f"Reader matches. Reading file \"{vtk_input_file}\" using XML format reader.") - reader.Update() - return reader.GetOutput() - else: - logging.info("Reader did not match the input file format.") - return None - - -def read_mesh(vtk_input_file: str) -> vtkUnstructuredGrid: - """ - Read the vtk file and builds an unstructured grid from it. - :param vtk_input_file: The file name. The extension will be used to guess the file format. - If first guess does not work, eventually all the others reader available will be tested. - :return: A unstructured grid. - """ - file_extension = os.path.splitext(vtk_input_file)[-1] - extension_to_reader = {".vtk": __read_vtk, - ".vtu": __read_vtu} - # Testing first the reader that should match - if file_extension in extension_to_reader: - output_mesh = extension_to_reader.pop(file_extension)(vtk_input_file) - if output_mesh: - return output_mesh - # If it does not match, then test all the others. - for reader in extension_to_reader.values(): - output_mesh = reader(vtk_input_file) - if output_mesh: - return output_mesh - # No reader did work. Dying. - logging.critical(f"Could not find the appropriate VTK reader for file \"{vtk_input_file}\". Dying...") - sys.exit(1) - - -def __write_vtk(mesh: vtkUnstructuredGrid, output: str) -> int: - logging.info(f"Writing mesh into file \"{output}\" using legacy format.") - writer = vtkUnstructuredGridWriter() - writer.SetFileName(output) - writer.SetInputData(mesh) - return writer.Write() - - -def __write_vtu(mesh: vtkUnstructuredGrid, output: str, is_data_mode_binary: bool) -> int: - logging.info(f"Writing mesh into file \"{output}\" using XML format.") - writer = vtkXMLUnstructuredGridWriter() - writer.SetFileName(output) - writer.SetInputData(mesh) - writer.SetDataModeToBinary() if is_data_mode_binary else writer.SetDataModeToAscii() - return writer.Write() - - -def write_mesh(mesh: vtkUnstructuredGrid, vtk_output: VtkOutput) -> int: - """ - Writes the mesh to disk. - Nothing will be done if the file already exists. - :param mesh: The unstructured grid to write. - :param vtk_output: Where to write. The file extension will be used to select the VTK file format. - :return: 0 in case of success. - """ - if os.path.exists(vtk_output.output): - logging.error(f"File \"{vtk_output.output}\" already exists, nothing done.") - return 1 - file_extension = os.path.splitext(vtk_output.output)[-1] - if file_extension == ".vtk": - success_code = __write_vtk(mesh, vtk_output.output) - elif file_extension == ".vtu": - success_code = __write_vtu(mesh, vtk_output.output, vtk_output.is_data_mode_binary) - else: - # No writer found did work. Dying. - logging.critical(f"Could not find the appropriate VTK writer for extension \"{file_extension}\". Dying...") - sys.exit(1) - return 0 if success_code else 2 # the Write member function return 1 in case of success, 0 otherwise. diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/mesh_doctor.py b/src/coreComponents/python/modules/geosx_mesh_doctor/mesh_doctor.py deleted file mode 100644 index f28cc7e9747..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/mesh_doctor.py +++ /dev/null @@ -1,35 +0,0 @@ -import sys - -try: - min_python_version = (3, 7) - assert sys.version_info >= min_python_version -except AssertionError as e: - print(f"Please update python to at least version {'.'.join(map(str, min_python_version))}.") - sys.exit(1) - -import logging - -from parsing import CheckHelper -from parsing.cli_parsing import parse_and_set_verbosity -import register - - -def main(): - logging.basicConfig(format='[%(asctime)s][%(levelname)s] %(message)s') - parse_and_set_verbosity(sys.argv) - main_parser, all_checks, all_checks_helpers = register.register() - args = main_parser.parse_args(sys.argv[1:]) - logging.info(f"Checking mesh \"{args.vtk_input_file}\".") - check_options = all_checks_helpers[args.subparsers].convert(vars(args)) - try: - check = all_checks[args.subparsers] - except KeyError as e: - logging.critical(f"Check {args.subparsers} is not a valid check.") - sys.exit(1) - helper: CheckHelper = all_checks_helpers[args.subparsers] - result = check(args.vtk_input_file, check_options) - helper.display_results(check_options, result) - - -if __name__ == '__main__': - main() diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/__init__.py b/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/__init__.py deleted file mode 100644 index 0d06f736cce..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -import argparse -from dataclasses import dataclass -from typing import Callable, Any - - -COLLOCATES_NODES = "collocated_nodes" -ELEMENT_VOLUMES = "element_volumes" -FIX_ELEMENTS_ORDERINGS = "fix_elements_orderings" -GENERATE_CUBE = "generate_cube" -GENERATE_FRACTURES = "generate_fractures" -GENERATE_GLOBAL_IDS = "generate_global_ids" -NON_CONFORMAL = "non_conformal" -SELF_INTERSECTING_ELEMENTS = "self_intersecting_elements" -SUPPORTED_ELEMENTS = "supported_elements" - - -@dataclass(frozen=True) -class CheckHelper: - fill_subparser: Callable[[Any], argparse.ArgumentParser] - convert: Callable[[Any], Any] - display_results: Callable[[Any, Any], None] diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/check_fractures_parsing.py b/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/check_fractures_parsing.py deleted file mode 100644 index 3f43bca60a6..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/check_fractures_parsing.py +++ /dev/null @@ -1 +0,0 @@ -# empty: the check is not available yet! \ No newline at end of file diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/cli_parsing.py b/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/cli_parsing.py deleted file mode 100644 index a2eb20ed7c9..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/cli_parsing.py +++ /dev/null @@ -1,73 +0,0 @@ -import argparse -import logging -import textwrap -from typing import List - -__VERBOSE_KEY = "verbose" -__QUIET_KEY = "quiet" - -__VERBOSITY_FLAG = "v" -__QUIET_FLAG = "q" - - -def parse_and_set_verbosity(cli_args: List[str]) -> None: - """ - Parse the verbosity flag only. And sets the logger's level accordingly. - :param cli_args: The list of arguments (as strings) - :return: None - """ - dummy_verbosity_parser = argparse.ArgumentParser(add_help=False) - dummy_verbosity_parser.add_argument('-' + __VERBOSITY_FLAG, - '--' + __VERBOSE_KEY, - action='count', - default=2, - dest=__VERBOSE_KEY) - dummy_verbosity_parser.add_argument('-' + __QUIET_FLAG, - '--' + __QUIET_KEY, - action='count', - default=0, - dest=__QUIET_KEY) - args = dummy_verbosity_parser.parse_known_args(cli_args[1:])[0] - d = vars(args) - v = d[__VERBOSE_KEY] - d[__QUIET_KEY] - verbosity = logging.CRITICAL - (10 * v) - if verbosity < logging.DEBUG: - verbosity = logging.DEBUG - elif verbosity > logging.CRITICAL: - verbosity = logging.CRITICAL - logging.getLogger().setLevel(verbosity) - logging.info(f"Logger level set to \"{logging.getLevelName(verbosity)}\"") - - -def init_parser() -> argparse.ArgumentParser: - vtk_input_file_key = "vtk_input_file" - - epilog_msg = f"""\ - Note that checks are dynamically loaded. - An option may be missing because of an unloaded module. - Increase verbosity (-{__VERBOSITY_FLAG}, -{__VERBOSITY_FLAG * 2}) to get full information. - """ - formatter = lambda prog: argparse.RawTextHelpFormatter(prog, max_help_position=8) - parser = argparse.ArgumentParser(description='Inspects meshes for GEOSX.', - epilog=textwrap.dedent(epilog_msg), - formatter_class=formatter) - # Nothing will be done with this verbosity/quiet input. - # It's only here for the `--help` message. - # `parse_verbosity` does the real parsing instead. - parser.add_argument('-' + __VERBOSITY_FLAG, - action='count', - default=2, - dest=__VERBOSE_KEY, - help=f"Use -{__VERBOSITY_FLAG} 'INFO', -{__VERBOSITY_FLAG * 2} for 'DEBUG'. Defaults to 'WARNING'.") - parser.add_argument('-' + __QUIET_FLAG, - action='count', - default=0, - dest=__QUIET_KEY, - help=f"Use -{__QUIET_FLAG} to reduce the verbosity of the output.") - parser.add_argument('-i', - '--vtk-input-file', - metavar='VTK_MESH_FILE', - type=str, - required=True, - dest=vtk_input_file_key) - return parser diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/collocated_nodes_parsing.py b/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/collocated_nodes_parsing.py deleted file mode 100644 index 421ae95b993..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/collocated_nodes_parsing.py +++ /dev/null @@ -1,49 +0,0 @@ -import logging - -from typing import ( - FrozenSet, - List, -) - -from checks.collocated_nodes import Options, Result - -from . import COLLOCATES_NODES - -__TOLERANCE = "tolerance" - - -def convert(parsed_options) -> Options: - return Options(parsed_options[__TOLERANCE]) - - -def fill_subparser(subparsers) -> None: - p = subparsers.add_parser(COLLOCATES_NODES, - help="Checks if nodes are collocated.") - p.add_argument('--' + __TOLERANCE, - type=float, - required=True, - help="[float]: The absolute distance between two nodes for them to be considered collocated.") - - -def display_results(options: Options, result: Result): - all_collocated_nodes: List[int] = [] - for bucket in result.nodes_buckets: - for node in bucket: - all_collocated_nodes.append(node) - all_collocated_nodes: FrozenSet[int] = frozenset(all_collocated_nodes) # Surely useless - if all_collocated_nodes: - logging.error(f"You have {len(all_collocated_nodes)} collocated nodes (tolerance = {options.tolerance}).") - - logging.info("Here are all the buckets of collocated nodes.") - tmp: List[str] = [] - for bucket in result.nodes_buckets: - tmp.append(f"({', '.join(map(str, bucket))})") - logging.info(f"({', '.join(tmp)})") - else: - logging.error(f"You have no collocated node (tolerance = {options.tolerance}).") - - if result.wrong_support_elements: - tmp: str = ", ".join(map(str, result.wrong_support_elements)) - logging.error(f"You have {len(result.wrong_support_elements)} elements with duplicated support nodes.\n" + tmp) - else: - logging.error("You have no element with duplicated support nodes.") diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/element_volumes_parsing.py b/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/element_volumes_parsing.py deleted file mode 100644 index 3b196822fbd..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/element_volumes_parsing.py +++ /dev/null @@ -1,34 +0,0 @@ -import logging - -from checks.element_volumes import Options, Result - -from . import ELEMENT_VOLUMES - -__MIN_VOLUME = "min" -__MIN_VOLUME_DEFAULT = 0. - - -def fill_subparser(subparsers) -> None: - p = subparsers.add_parser(ELEMENT_VOLUMES, - help=f"Checks if the volumes of the elements are greater than \"{__MIN_VOLUME}\".") - p.add_argument('--' + __MIN_VOLUME, - type=float, - metavar=__MIN_VOLUME_DEFAULT, - default=__MIN_VOLUME_DEFAULT, - required=True, - help=f"[float]: The minimum acceptable volume. Defaults to {__MIN_VOLUME_DEFAULT}.") - - -def convert(parsed_options) -> Options: - """ - From the parsed cli options, return the converted options for elements volumes check. - :param options_str: Parsed cli options. - :return: Options instance. - """ - return Options(min_volume=parsed_options[__MIN_VOLUME]) - - -def display_results(options: Options, result: Result): - logging.error(f"You have {len(result.element_volumes)} elements with volumes smaller than {options.min_volume}.") - if result.element_volumes: - logging.error("The elements indices and their volumes are:\n" + "\n".join(map(str, result.element_volumes))) diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/fix_elements_orderings_parsing.py b/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/fix_elements_orderings_parsing.py deleted file mode 100644 index c105792560b..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/fix_elements_orderings_parsing.py +++ /dev/null @@ -1,84 +0,0 @@ -import logging -import random - -from vtkmodules.vtkCommonDataModel import ( - VTK_HEXAGONAL_PRISM, - VTK_HEXAHEDRON, - VTK_PENTAGONAL_PRISM, - VTK_PYRAMID, - VTK_TETRA, - VTK_VOXEL, - VTK_WEDGE, -) - -from checks.fix_elements_orderings import Options, Result - -from . import vtk_output_parsing, FIX_ELEMENTS_ORDERINGS - - -__CELL_TYPE_MAPPING = { - "Hexahedron": VTK_HEXAHEDRON, - "Prism5": VTK_PENTAGONAL_PRISM, - "Prism6": VTK_HEXAGONAL_PRISM, - "Pyramid": VTK_PYRAMID, - "Tetrahedron": VTK_TETRA, - "Voxel": VTK_VOXEL, - "Wedge": VTK_WEDGE, -} - -__CELL_TYPE_SUPPORT_SIZE = { - VTK_HEXAHEDRON: 8, - VTK_PENTAGONAL_PRISM: 10, - VTK_HEXAGONAL_PRISM: 12, - VTK_PYRAMID: 5, - VTK_TETRA: 4, - VTK_VOXEL: 8, - VTK_WEDGE: 6, -} - - -def fill_subparser(subparsers) -> None: - p = subparsers.add_parser(FIX_ELEMENTS_ORDERINGS, - help="Reorders the support nodes for the given cell types.") - for key, vtk_key in __CELL_TYPE_MAPPING.items(): - tmp = list(range(__CELL_TYPE_SUPPORT_SIZE[vtk_key])) - random.Random(4).shuffle(tmp) - p.add_argument('--' + key, - type=str, - metavar=",".join(map(str, tmp)), - default=None, - required=False, - help=f"[list of integers]: node permutation for \"{key}\".") - vtk_output_parsing.fill_vtk_output_subparser(p) - - -def convert(parsed_options) -> Options: - """ - From the parsed cli options, return the converted options for self intersecting elements check. - :param options_str: Parsed cli options. - :return: Options instance. - """ - cell_type_to_ordering = {} - for key, vtk_key in __CELL_TYPE_MAPPING.items(): - raw_mapping = parsed_options[key] - if raw_mapping: - tmp = tuple(map(int, raw_mapping.split(","))) - if not set(tmp) == set(range(__CELL_TYPE_SUPPORT_SIZE[vtk_key])): - err_msg = f"Permutation {raw_mapping} for type {key} is not valid." - logging.error(err_msg) - raise ValueError(err_msg) - cell_type_to_ordering[vtk_key] = tmp - vtk_output = vtk_output_parsing.convert(parsed_options) - return Options(vtk_output=vtk_output, - cell_type_to_ordering=cell_type_to_ordering) - - -def display_results(options: Options, result: Result): - if result.output: - logging.info(f"New mesh was written to file '{result.output}'") - if result.unchanged_cell_types: - logging.info(f"Those vtk types were not reordered: [{', '.join(map(str, result.unchanged_cell_types))}].") - else: - logging.info("All the cells of the mesh were reordered.") - else: - logging.info("No output file was written.") diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/generate_cube_parsing.py b/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/generate_cube_parsing.py deleted file mode 100644 index 41c0e045127..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/generate_cube_parsing.py +++ /dev/null @@ -1,87 +0,0 @@ -import logging - -from checks.generate_cube import Options, Result, FieldInfo - -from . import vtk_output_parsing, generate_global_ids_parsing, GENERATE_CUBE -from .generate_global_ids_parsing import GlobalIdsInfo - - -__X, __Y, __Z, __NX, __NY, __NZ = "x", "y", "z", "nx", "ny", "nz" -__FIELDS = "fields" - - -def convert(parsed_options) -> Options: - def check_discretizations(x, nx, title): - if len(x) != len(nx) + 1: - raise ValueError(f"{title} information (\"{x}\" and \"{nx}\") does not have consistent size.") - check_discretizations(parsed_options[__X], parsed_options[__NX], __X) - check_discretizations(parsed_options[__Y], parsed_options[__NY], __Y) - check_discretizations(parsed_options[__Z], parsed_options[__NZ], __Z) - - def parse_fields(s): - name, support, dim = s.split(":") - if support not in ("CELLS", "POINTS"): - raise ValueError(f"Support {support} for field \"{name}\" must be one of \"CELLS\" or \"POINTS\".") - try: - dim = int(dim) - assert dim > 0 - except ValueError: - raise ValueError(f"Dimension {dim} cannot be converted to an integer.") - except AssertionError: - raise ValueError(f"Dimension {dim} must be a positive integer") - return FieldInfo(name=name, support=support, dimension=dim) - - gids: GlobalIdsInfo = generate_global_ids_parsing.convert_global_ids(parsed_options) - - return Options(vtk_output=vtk_output_parsing.convert(parsed_options), - generate_cells_global_ids=gids.cells, - generate_points_global_ids=gids.points, - xs=parsed_options[__X], - ys=parsed_options[__Y], - zs=parsed_options[__Z], - nxs=parsed_options[__NX], - nys=parsed_options[__NY], - nzs=parsed_options[__NZ], - fields=tuple(map(parse_fields, parsed_options[__FIELDS]))) - - -def fill_subparser(subparsers) -> None: - p = subparsers.add_parser(GENERATE_CUBE, - help="Generate a cube and its fields.") - p.add_argument('--' + __X, - type=lambda s: tuple(map(float, s.split(":"))), - metavar="0:1.5:3", - help="[list of floats]: X coordinates of the points.") - p.add_argument('--' + __Y, - type=lambda s: tuple(map(float, s.split(":"))), - metavar="0:5:10", - help="[list of floats]: Y coordinates of the points.") - p.add_argument('--' + __Z, - type=lambda s: tuple(map(float, s.split(":"))), - metavar="0:1", - help="[list of floats]: Z coordinates of the points.") - p.add_argument('--' + __NX, - type=lambda s: tuple(map(int, s.split(":"))), - metavar="2:2", - help="[list of integers]: Number of elements in the X direction.") - p.add_argument('--' + __NY, - type=lambda s: tuple(map(int, s.split(":"))), - metavar="1:1", - help="[list of integers]: Number of elements in the Y direction.") - p.add_argument('--' + __NZ, - type=lambda s: tuple(map(int, s.split(":"))), - metavar="4", - help="[list of integers]: Number of elements in the Z direction.") - p.add_argument('--' + __FIELDS, - type=str, - metavar="name:support:dim", - nargs="+", - required=False, - default=(), - help="Create fields on CELLS or POINTS, with given dimension (typically 1 or 3).") - generate_global_ids_parsing.fill_generate_global_ids_subparser(p) - vtk_output_parsing.fill_vtk_output_subparser(p) - - -def display_results(options: Options, result: Result): - logging.info(result.info) diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/generate_fractures_parsing.py b/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/generate_fractures_parsing.py deleted file mode 100644 index 47897933908..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/generate_fractures_parsing.py +++ /dev/null @@ -1,66 +0,0 @@ -import logging - -from checks.generate_fractures import Options, Result, FracturePolicy - -from . import vtk_output_parsing, GENERATE_FRACTURES - -__POLICY = "policy" -__FIELD_POLICY = "field" -__INTERNAL_SURFACES_POLICY = "internal_surfaces" -__POLICIES = (__FIELD_POLICY, __INTERNAL_SURFACES_POLICY ) - -__FIELD_NAME = "name" -__FIELD_VALUES = "values" - -__FRACTURE_PREFIX = "fracture" - - -def convert_to_fracture_policy(s: str) -> FracturePolicy: - """ - Converts the user input to the proper enum chosen. - I do not want to use the auto conversion already available to force explicit conversion. - :param s: The user input - :return: The matching enum. - """ - if s == __FIELD_POLICY: - return FracturePolicy.FIELD - elif s == __INTERNAL_SURFACES_POLICY: - return FracturePolicy.INTERNAL_SURFACES - raise ValueError(f"Policy {s} is not valid. Please use one of \"{', '.join(map(str, __POLICIES))}\".") - - -def fill_subparser(subparsers) -> None: - p = subparsers.add_parser(GENERATE_FRACTURES, - help="Splits the mesh to generate the faults and fractures. [EXPERIMENTAL]") - p.add_argument('--' + __POLICY, - type=convert_to_fracture_policy, - metavar=", ".join(__POLICIES), - required=True, - help=f"[string]: The criterion to define the surfaces that will be changed into fracture zones. " - f"Possible values are \"{', '.join(__POLICIES)}\"") - p.add_argument('--' + __FIELD_NAME, - type=str, - help=f"[string]: If the \"{__FIELD_POLICY}\" {__POLICY} is selected, defines which field will be considered to define the fractures. " - f"If the \"{__INTERNAL_SURFACES_POLICY}\" {__POLICY} is selected, defines the name of the attribute will be considered to identify the fractures. ") - p.add_argument('--' + __FIELD_VALUES, - type=str, - help=f"[list of comma separated integers]: If the \"{__FIELD_POLICY}\" {__POLICY} is selected, which changes of the field will be considered as a fracture. If the \"{__INTERNAL_SURFACES_POLICY}\" {__POLICY} is selected, list of the fracture attributes.") - vtk_output_parsing.fill_vtk_output_subparser(p) - vtk_output_parsing.fill_vtk_output_subparser(p, prefix=__FRACTURE_PREFIX) - - -def convert(parsed_options) -> Options: - policy = parsed_options[__POLICY] - field = parsed_options[__FIELD_NAME] - field_values = frozenset(map(int, parsed_options[__FIELD_VALUES].split(","))) - vtk_output = vtk_output_parsing.convert(parsed_options) - vtk_fracture_output = vtk_output_parsing.convert(parsed_options, prefix=__FRACTURE_PREFIX) - return Options(policy=policy, - field=field, - field_values=field_values, - vtk_output=vtk_output, - vtk_fracture_output=vtk_fracture_output) - - -def display_results(options: Options, result: Result): - pass diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/generate_global_ids_parsing.py b/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/generate_global_ids_parsing.py deleted file mode 100644 index 730599a41cc..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/generate_global_ids_parsing.py +++ /dev/null @@ -1,57 +0,0 @@ -from dataclasses import dataclass -import logging - -from checks.generate_global_ids import Options, Result - -from . import vtk_output_parsing, GENERATE_GLOBAL_IDS - - -__CELLS, __POINTS = "cells", "points" - - -@dataclass(frozen=True) -class GlobalIdsInfo: - cells: bool - points: bool - - -def convert_global_ids(parsed_options) -> GlobalIdsInfo: - return GlobalIdsInfo(cells=parsed_options[__CELLS], - points=parsed_options[__POINTS]) - - -def convert(parsed_options) -> Options: - gids: GlobalIdsInfo = convert_global_ids(parsed_options) - return Options(vtk_output=vtk_output_parsing.convert(parsed_options), - generate_cells_global_ids=gids.cells, - generate_points_global_ids=gids.points) - - -def fill_generate_global_ids_subparser(p): - p.add_argument('--' + __CELLS, - action="store_true", - help=f"[bool]: Generate global ids for cells. Defaults to true.") - p.add_argument('--no-' + __CELLS, - action="store_false", - dest=__CELLS, - help=f"[bool]: Don't generate global ids for cells.") - p.set_defaults(**{__CELLS: True}) - p.add_argument('--' + __POINTS, - action="store_true", - help=f"[bool]: Generate global ids for points. Defaults to true.") - p.add_argument('--no-' + __POINTS, - action="store_false", - dest=__POINTS, - help=f"[bool]: Don't generate global ids for points.") - p.set_defaults(**{__POINTS: True}) - - -def fill_subparser(subparsers) -> None: - p = subparsers.add_parser(GENERATE_GLOBAL_IDS, - help="Adds globals ids for points and cells.") - fill_generate_global_ids_subparser(p) - vtk_output_parsing.fill_vtk_output_subparser(p) - - -def display_results(options: Options, result: Result): - logging.info(result.info) diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/non_conformal_parsing.py b/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/non_conformal_parsing.py deleted file mode 100644 index 33625f68f01..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/non_conformal_parsing.py +++ /dev/null @@ -1,48 +0,0 @@ -import logging - -from typing import ( - FrozenSet, - List, -) - -from checks.non_conformal import Options, Result - -from . import NON_CONFORMAL - -__ANGLE_TOLERANCE = "angle_tolerance" -__POINT_TOLERANCE = "point_tolerance" -__FACE_TOLERANCE = "face_tolerance" - -__ANGLE_TOLERANCE_DEFAULT = 10. - -__ALL_KEYWORDS = {__ANGLE_TOLERANCE, __POINT_TOLERANCE, __FACE_TOLERANCE} - - -def convert(parsed_options) -> Options: - return Options(angle_tolerance=parsed_options[__ANGLE_TOLERANCE], - point_tolerance=parsed_options[__POINT_TOLERANCE], - face_tolerance=parsed_options[__FACE_TOLERANCE]) - - -def fill_subparser(subparsers) -> None: - p = subparsers.add_parser(NON_CONFORMAL, - help="Detects non conformal elements. [EXPERIMENTAL]") - p.add_argument('--' + __ANGLE_TOLERANCE, - type=float, - metavar=__ANGLE_TOLERANCE_DEFAULT, - default=__ANGLE_TOLERANCE_DEFAULT, - help=f"[float]: angle tolerance in degrees. Defaults to {__ANGLE_TOLERANCE_DEFAULT}") - p.add_argument('--' + __POINT_TOLERANCE, - type=float, - help=f"[float]: tolerance for two points to be considered collocated.") - p.add_argument('--' + __FACE_TOLERANCE, - type=float, - help=f"[float]: tolerance for two faces to be considered \"touching\".") - - -def display_results(options: Options, result: Result): - non_conformal_cells: List[int] = [] - for i, j in result.non_conformal_cells: - non_conformal_cells += i, j - non_conformal_cells: FrozenSet[int] = frozenset(non_conformal_cells) - logging.error(f"You have {len(non_conformal_cells)} non conformal cells.\n{', '.join(map(str, sorted(non_conformal_cells)))}") diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/self_intersecting_elements_parsing.py b/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/self_intersecting_elements_parsing.py deleted file mode 100644 index 70f5d6a9a37..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/self_intersecting_elements_parsing.py +++ /dev/null @@ -1,36 +0,0 @@ -import logging - -import numpy - -from checks.self_intersecting_elements import Options, Result - -from . import SELF_INTERSECTING_ELEMENTS - -__TOLERANCE = "min" -__TOLERANCE_DEFAULT = numpy.finfo(float).eps - - -def convert(parsed_options) -> Options: - tolerance = parsed_options[__TOLERANCE] - if tolerance == 0: - logging.warning("Having tolerance set to 0 can induce lots of false positive results (adjacent faces may be considered intersecting).") - elif tolerance < 0: - raise ValueError(f"Negative tolerance ({tolerance}) in the {SELF_INTERSECTING_ELEMENTS} check is not allowed.") - return Options(tolerance=tolerance) - - -def fill_subparser(subparsers) -> None: - p = subparsers.add_parser(SELF_INTERSECTING_ELEMENTS, - help="Checks if the faces of the elements are self intersecting.") - p.add_argument('--' + __TOLERANCE, - type=float, - required=False, - metavar=__TOLERANCE_DEFAULT, - default=__TOLERANCE_DEFAULT, - help=f"[float]: The tolerance in the computation. Defaults to your machine precision {__TOLERANCE_DEFAULT}.") - - -def display_results(options: Options, result: Result): - logging.error(f"You have {len(result.intersecting_faces_elements)} elements with self intersecting faces.") - if result.intersecting_faces_elements: - logging.error("The elements indices are:\n" + ", ".join(map(str, result.intersecting_faces_elements))) diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/supported_elements_parsing.py b/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/supported_elements_parsing.py deleted file mode 100644 index c68905bea6b..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/supported_elements_parsing.py +++ /dev/null @@ -1,49 +0,0 @@ -import logging -import multiprocessing - -from checks.supported_elements import Options, Result - -from . import SUPPORTED_ELEMENTS - -__CHUNK_SIZE = "chunck_size" -__NUM_PROC = "nproc" - - -__ALL_KEYWORDS = {__CHUNK_SIZE, __NUM_PROC} - -__CHUNK_SIZE_DEFAULT = 1 -__NUM_PROC_DEFAULT = multiprocessing.cpu_count() - - -def convert(parsed_options) -> Options: - return Options(chunk_size=parsed_options[__CHUNK_SIZE], - num_proc=parsed_options[__NUM_PROC]) - - -def fill_subparser(subparsers) -> None: - p = subparsers.add_parser(SUPPORTED_ELEMENTS, - help="Check that all the elements of the mesh are supported by GEOSX.") - p.add_argument('--' + __CHUNK_SIZE, - type=int, - required=False, - metavar=__CHUNK_SIZE_DEFAULT, - default=__CHUNK_SIZE_DEFAULT, - help=f"[int]: Defaults chunk size for parallel processing to {__CHUNK_SIZE_DEFAULT}") - p.add_argument('--' + __NUM_PROC, - type=int, - required=False, - metavar=__NUM_PROC_DEFAULT, - default=__NUM_PROC_DEFAULT, - help=f"[int]: Number of threads used for parallel processing. Defaults to your CPU count {__NUM_PROC_DEFAULT}.") - - -def display_results(options: Options, result: Result): - if result.unsupported_polyhedron_elements: - logging.error(f"There is/are {len(result.unsupported_polyhedron_elements)} polyhedra that may not be converted to supported elements.") - logging.error(f"The list of the unsupported polyhedra is\n{tuple(sorted(result.unsupported_polyhedron_elements))}.") - else: - logging.info("All the polyhedra (if any) can be converted to supported elements.") - if result.unsupported_std_elements_types: - logging.error(f"There are unsupported vtk standard element types. The list of those vtk types is {tuple(sorted(result.unsupported_std_elements_types))}.") - else: - logging.info("All the standard vtk element types (if any) are supported.") \ No newline at end of file diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/vtk_output_parsing.py b/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/vtk_output_parsing.py deleted file mode 100644 index 6e9b7d5d663..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/parsing/vtk_output_parsing.py +++ /dev/null @@ -1,45 +0,0 @@ -import os.path -import logging -import textwrap - -from checks.vtk_utils import VtkOutput - - -__OUTPUT_FILE = "output" -__OUTPUT_BINARY_MODE = "data-mode" -__OUTPUT_BINARY_MODE_VALUES = "binary", "ascii" -__OUTPUT_BINARY_MODE_DEFAULT = __OUTPUT_BINARY_MODE_VALUES[0] - - -def get_vtk_output_help(): - msg = \ - f"""{__OUTPUT_FILE} [string]: The vtk output file destination. - {__OUTPUT_BINARY_MODE} [string]: For ".vtu" output format, the data mode can be {" or ".join(__OUTPUT_BINARY_MODE_VALUES)}. Defaults to {__OUTPUT_BINARY_MODE_DEFAULT}.""" - return textwrap.dedent(msg) - - -def __build_arg(prefix, main): - return "-".join(filter(None, (prefix, main))) - - -def fill_vtk_output_subparser(parser, prefix="") -> None: - parser.add_argument('--' + __build_arg(prefix, __OUTPUT_FILE), - type=str, - required=True, - help=f"[string]: The vtk output file destination.") - parser.add_argument('--' + __build_arg(prefix, __OUTPUT_BINARY_MODE), - type=str, - metavar=", ".join(__OUTPUT_BINARY_MODE_VALUES), - default=__OUTPUT_BINARY_MODE_DEFAULT, - help=f"""[string]: For ".vtu" output format, the data mode can be {" or ".join(__OUTPUT_BINARY_MODE_VALUES)}. Defaults to {__OUTPUT_BINARY_MODE_DEFAULT}.""") - - -def convert(parsed_options, prefix="") -> VtkOutput: - output_key = __build_arg(prefix, __OUTPUT_FILE).replace("-", "_") - binary_mode_key = __build_arg(prefix, __OUTPUT_BINARY_MODE).replace("-", "_") - output = parsed_options[output_key] - if parsed_options[binary_mode_key] and os.path.splitext(output)[-1] == ".vtk": - logging.info("VTK data mode will be ignored for legacy file format \"vtk\".") - is_data_mode_binary: bool = parsed_options[binary_mode_key] == __OUTPUT_BINARY_MODE_DEFAULT - return VtkOutput(output=output, - is_data_mode_binary=is_data_mode_binary) diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/pyproject.toml b/src/coreComponents/python/modules/geosx_mesh_doctor/pyproject.toml deleted file mode 100644 index 20051350aef..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/pyproject.toml +++ /dev/null @@ -1,15 +0,0 @@ -[tool.pytest.ini_options] -addopts = [ - "--import-mode=importlib", -] -pythonpath = [ - "checks", "parsing", -] - -[tool.mypy] -python_version = "3.11" -warn_return_any = true -warn_unused_configs = true -ignore_missing_imports = true -allow_redefinition = true -plugins = "numpy.typing.mypy_plugin" diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/register.py b/src/coreComponents/python/modules/geosx_mesh_doctor/register.py deleted file mode 100644 index a36001e5fa7..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/register.py +++ /dev/null @@ -1,72 +0,0 @@ -import argparse -import importlib -import logging -from typing import Dict, Callable, Any, Tuple - -import parsing -from parsing import CheckHelper, cli_parsing - - -__HELPERS: Dict[str, Callable[[None], CheckHelper]] = dict() -__CHECKS: Dict[str, Callable[[None], Any]] = dict() - - -def __load_module_check(module_name: str, check_fct="check"): - module = importlib.import_module("checks." + module_name) - return getattr(module, check_fct) - - -def __load_module_check_helper(module_name: str, parsing_fct_suffix="_parsing"): - module = importlib.import_module("parsing." + module_name + parsing_fct_suffix) - return CheckHelper(fill_subparser=module.fill_subparser, - convert=module.convert, - display_results=module.display_results) - - -def __load_checks() -> Dict[str, Callable[[str, Any], Any]]: - """ - Loads all the checks. - This function acts like a protection layer if a module fails to load. - A check that fails to load won't stop the process. - :return: The checks. - """ - loaded_checks: Dict[str, Callable[[str, Any], Any]] = dict() - for check_name, check_provider in __CHECKS.items(): - try: - loaded_checks[check_name] = check_provider() - logging.debug(f"Check \"{check_name}\" is loaded.") - except Exception as e: - logging.warning(f"Could not load module \"{check_name}\": {e}") - return loaded_checks - - -def register() -> Tuple[argparse.ArgumentParser, Dict[str, Callable[[str, Any], Any]], Dict[str, CheckHelper]]: - """ - Register all the parsing checks. Eventually initiate the registration of all the checks too. - :return: The checks and the checks helpers. - """ - parser = cli_parsing.init_parser() - subparsers = parser.add_subparsers(help="Modules", dest="subparsers") - - def closure_trick(cn: str): - __HELPERS[check_name] = lambda: __load_module_check_helper(cn) - __CHECKS[check_name] = lambda: __load_module_check(cn) - # Register the modules to load here. - for check_name in (parsing.COLLOCATES_NODES, - parsing.ELEMENT_VOLUMES, - parsing.FIX_ELEMENTS_ORDERINGS, - parsing.GENERATE_CUBE, - parsing.GENERATE_FRACTURES, - parsing.GENERATE_GLOBAL_IDS, - parsing.NON_CONFORMAL, - parsing.SELF_INTERSECTING_ELEMENTS, - parsing.SUPPORTED_ELEMENTS): - closure_trick(check_name) - loaded_checks: Dict[str, Callable[[str, Any], Any]] = __load_checks() - loaded_checks_helpers: Dict[str, CheckHelper] = dict() - for check_name in loaded_checks.keys(): - h = __HELPERS[check_name]() - h.fill_subparser(subparsers) - loaded_checks_helpers[check_name] = h - logging.debug(f"Parsing for check \"{check_name}\" is loaded.") - return parser, loaded_checks, loaded_checks_helpers diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/requirements.txt b/src/coreComponents/python/modules/geosx_mesh_doctor/requirements.txt deleted file mode 100644 index 4c9b176327d..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -vtk >= 9.1 -networkx >= 2.4 -tqdm -numpy \ No newline at end of file diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/setup.py b/src/coreComponents/python/modules/geosx_mesh_doctor/setup.py deleted file mode 100644 index 1d0b9915f33..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/setup.py +++ /dev/null @@ -1,3 +0,0 @@ -from setuptools import setup, find_packages - -setup(name='mesh_doctor', version='0.0.1', packages=find_packages()) diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_cli_parsing.py b/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_cli_parsing.py deleted file mode 100644 index 445b7c92d1c..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_cli_parsing.py +++ /dev/null @@ -1,72 +0,0 @@ -import argparse -from dataclasses import dataclass - -from typing import ( - Iterator, - Sequence, -) - -import pytest - -from checks.vtk_utils import ( - VtkOutput, -) - -from checks.generate_fractures import ( - FracturePolicy, - Options, -) -from parsing.generate_fractures_parsing import ( - convert, - display_results, - fill_subparser, -) - - -@dataclass(frozen=True) -class TestCase: - __test__ = False - cli_args: Sequence[str] - options: Options - exception: bool = False - - -def __generate_generate_fractures_parsing_test_data() -> Iterator[TestCase]: - field: str = "attribute" - main_mesh: str = "output.vtu" - fracture_mesh: str = "fracture.vtu" - - cli_gen: str = f"generate_fractures --policy {{}} --name {field} --values 0,1 --output {main_mesh} --fracture-output {fracture_mesh}" - all_cli_args = cli_gen.format("field").split(), cli_gen.format("internal_surfaces").split(), cli_gen.format("dummy").split() - policies = FracturePolicy.FIELD, FracturePolicy.INTERNAL_SURFACES, FracturePolicy.FIELD - exceptions = False, False, True - for cli_args, policy, exception in zip(all_cli_args, policies, exceptions): - options: Options = Options(policy=policy, field=field, field_values=frozenset((0, 1)), - vtk_output=VtkOutput(output=main_mesh, is_data_mode_binary=True), - vtk_fracture_output=VtkOutput(output=fracture_mesh, is_data_mode_binary=True)) - yield TestCase(cli_args, options, exception) - - -def __f(test_case: TestCase): - parser = argparse.ArgumentParser(description='Testing.') - subparsers = parser.add_subparsers() - fill_subparser(subparsers) - args = parser.parse_args(test_case.cli_args) - options = convert(vars(args)) - assert options.policy == test_case.options.policy - assert options.field == test_case.options.field - assert options.field_values == test_case.options.field_values - - -def test_display_results(): - # Dummy test for code coverage only. Shame on me! - display_results(None, None) - - -@pytest.mark.parametrize("test_case", __generate_generate_fractures_parsing_test_data()) -def test(test_case: TestCase): - if test_case.exception: - with pytest.raises(SystemExit): - __f(test_case) - else: - __f(test_case) diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_collocated_nodes.py b/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_collocated_nodes.py deleted file mode 100644 index 6936331d14c..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_collocated_nodes.py +++ /dev/null @@ -1,76 +0,0 @@ -from typing import Iterator, Tuple - -import pytest - -from vtkmodules.vtkCommonCore import ( - vtkPoints, -) -from vtkmodules.vtkCommonDataModel import ( - VTK_TETRA, - vtkCellArray, - vtkTetra, - vtkUnstructuredGrid, -) - -from checks.collocated_nodes import Options, __check - - -def get_points() -> Iterator[Tuple[vtkPoints, int]]: - """ - Generates the data for the cases. - One case has two nodes at the exact same position. - The other has two differente nodes - :return: Generator to (vtk points, number of expected duplicated locations) - """ - for p0, p1 in ((0, 0, 0), (1, 1, 1)), ((0, 0, 0), (0, 0, 0)): - points = vtkPoints() - points.SetNumberOfPoints(2) - points.SetPoint(0, p0) - points.SetPoint(1, p1) - num_nodes_bucket = 1 if p0 == p1 else 0 - yield points, num_nodes_bucket - - -@pytest.mark.parametrize("data", get_points()) -def test_simple_collocated_points(data: Tuple[vtkPoints, int]): - points, num_nodes_bucket = data - - mesh = vtkUnstructuredGrid() - mesh.SetPoints(points) - - result = __check(mesh, Options(tolerance=1.e-12)) - - assert len(result.wrong_support_elements) == 0 - assert len(result.nodes_buckets) == num_nodes_bucket - if num_nodes_bucket == 1: - assert len(result.nodes_buckets[0]) == points.GetNumberOfPoints() - - -def test_wrong_support_elements(): - points = vtkPoints() - points.SetNumberOfPoints(4) - points.SetPoint(0, (0, 0, 0)) - points.SetPoint(1, (1, 0, 0)) - points.SetPoint(2, (0, 1, 0)) - points.SetPoint(3, (0, 0, 1)) - - cell_types = [VTK_TETRA] - cells = vtkCellArray() - cells.AllocateExact(1, 4) - - tet = vtkTetra() - tet.GetPointIds().SetId(0, 0) - tet.GetPointIds().SetId(1, 1) - tet.GetPointIds().SetId(2, 2) - tet.GetPointIds().SetId(3, 0) # Intentionally wrong - cells.InsertNextCell(tet) - - mesh = vtkUnstructuredGrid() - mesh.SetPoints(points) - mesh.SetCells(cell_types, cells) - - result = __check(mesh, Options(tolerance=1.e-12)) - - assert len(result.nodes_buckets) == 0 - assert len(result.wrong_support_elements) == 1 - assert result.wrong_support_elements[0] == 0 diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_element_volumes.py b/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_element_volumes.py deleted file mode 100644 index e37c22c60c0..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_element_volumes.py +++ /dev/null @@ -1,48 +0,0 @@ -import numpy - -from vtkmodules.vtkCommonCore import ( - vtkPoints, -) -from vtkmodules.vtkCommonDataModel import ( - VTK_TETRA, - vtkCellArray, - vtkTetra, - vtkUnstructuredGrid, -) - -from checks.element_volumes import Options, __check - - -def test_simple_tet(): - # creating a simple tetrahedron - points = vtkPoints() - points.SetNumberOfPoints(4) - points.SetPoint(0, (0, 0, 0)) - points.SetPoint(1, (1, 0, 0)) - points.SetPoint(2, (0, 1, 0)) - points.SetPoint(3, (0, 0, 1)) - - cell_types = [VTK_TETRA] - cells = vtkCellArray() - cells.AllocateExact(1, 4) - - tet = vtkTetra() - tet.GetPointIds().SetId(0, 0) - tet.GetPointIds().SetId(1, 1) - tet.GetPointIds().SetId(2, 2) - tet.GetPointIds().SetId(3, 3) - cells.InsertNextCell(tet) - - mesh = vtkUnstructuredGrid() - mesh.SetPoints(points) - mesh.SetCells(cell_types, cells) - - result = __check(mesh, Options(min_volume=1.)) - - assert len(result.element_volumes) == 1 - assert result.element_volumes[0][0] == 0 - assert abs(result.element_volumes[0][1] - 1./6.) < 10 * numpy.finfo(float).eps - - result = __check(mesh, Options(min_volume=0.)) - - assert len(result.element_volumes) == 0 diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_generate_cube.py b/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_generate_cube.py deleted file mode 100644 index 4d93abdd280..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_generate_cube.py +++ /dev/null @@ -1,24 +0,0 @@ -from checks.generate_cube import __build, Options, FieldInfo - - -def test_generate_cube(): - options = Options( - vtk_output=None, - generate_cells_global_ids=True, - generate_points_global_ids=False, - xs=(0, 5, 10), - ys=(0, 4, 8), - zs=(0, 1), - nxs=(5, 2), - nys=(1, 1), - nzs=(1,), - fields=( - FieldInfo(name="test", dimension=2, support="CELLS"), - ) - ) - output = __build(options) - assert output.GetNumberOfCells() == 14 - assert output.GetNumberOfPoints() == 48 - assert output.GetCellData().GetArray("test").GetNumberOfComponents() == 2 - assert output.GetCellData().GetGlobalIds() - assert not output.GetPointData().GetGlobalIds() diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_generate_fractures.py b/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_generate_fractures.py deleted file mode 100644 index f197731c1d9..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_generate_fractures.py +++ /dev/null @@ -1,262 +0,0 @@ -from dataclasses import dataclass - -from typing import ( - Tuple, - Iterable, - Iterator, - Sequence, -) - -import numpy - -import pytest - -from vtkmodules.vtkCommonDataModel import ( - vtkUnstructuredGrid, - VTK_HEXAHEDRON, - VTK_POLYHEDRON, - VTK_QUAD, -) -from vtkmodules.util.numpy_support import ( - numpy_to_vtk, -) - -from checks.vtk_utils import ( - to_vtk_id_list, -) - -from checks.check_fractures import format_collocated_nodes -from checks.generate_cube import build_rectilinear_blocks_mesh, XYZ -from checks.generate_fractures import __split_mesh_on_fracture, Options, FracturePolicy - - -@dataclass(frozen=True) -class TestResult: - __test__ = False - main_mesh_num_points: int - main_mesh_num_cells: int - fracture_mesh_num_points: int - fracture_mesh_num_cells: int - - -@dataclass(frozen=True) -class TestCase: - __test__ = False - input_mesh: vtkUnstructuredGrid - options: Options - collocated_nodes: Sequence[Sequence[int]] - result: TestResult - - -def __build_test_case(xs: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray], - attribute: Iterable[int], - field_values: Iterable[int] = None, - policy: FracturePolicy = FracturePolicy.FIELD): - xyz = XYZ(*xs) - - mesh: vtkUnstructuredGrid = build_rectilinear_blocks_mesh((xyz, )) - - ref = numpy.array(attribute, dtype=int) - if policy == FracturePolicy.FIELD: - assert len(ref) == mesh.GetNumberOfCells() - attr = numpy_to_vtk(ref) - attr.SetName("attribute") - mesh.GetCellData().AddArray(attr) - - if field_values is None: - fv = frozenset(attribute) - else: - fv = frozenset(field_values) - - options = Options(policy=policy, - field="attribute", - field_values=fv, - vtk_output=None, - vtk_fracture_output=None) - return mesh, options - - -# Utility class to generate the new indices of the newly created collocated nodes. -class Incrementor: - def __init__(self, start): - self.__val = start - - def next(self, num: int) -> Iterable[int]: - self.__val += num - return range(self.__val - num, self.__val) - - -def __generate_test_data() -> Iterator[TestCase]: - two_nodes = numpy.arange(2, dtype=float) - three_nodes = numpy.arange(3, dtype=float) - four_nodes = numpy.arange(4, dtype=float) - - # Split in 2 - mesh, options = __build_test_case((three_nodes, three_nodes, three_nodes), (0, 1, 0, 1, 0, 1, 0, 1)) - yield TestCase(input_mesh=mesh, options=options, - collocated_nodes=tuple(map(lambda i: (1 + 3 * i, 27 + i), range(9))), - result=TestResult(9 * 4, 8, 9, 4)) - - # Split in 3 - inc = Incrementor(27) - collocated_nodes: Sequence[Sequence[int]] = ( - (1, *inc.next(1)), - (3, *inc.next(1)), - (4, *inc.next(2)), - (7, *inc.next(1)), - (1 + 9, *inc.next(1)), - (3 + 9, *inc.next(1)), - (4 + 9, *inc.next(2)), - (7 + 9, *inc.next(1)), - (1 + 18, *inc.next(1)), - (3 + 18, *inc.next(1)), - (4 + 18, *inc.next(2)), - (7 + 18, *inc.next(1)), - ) - mesh, options = __build_test_case((three_nodes, three_nodes, three_nodes), (0, 1, 2, 1, 0, 1, 2, 1)) - yield TestCase(input_mesh=mesh, options=options, collocated_nodes=collocated_nodes, - result=TestResult(9 * 4 + 6, 8, 12, 6)) - - # Split in 8 - inc = Incrementor(27) - collocated_nodes: Sequence[Sequence[int]] = ( - (1, *inc.next(1)), - (3, *inc.next(1)), - (4, *inc.next(3)), - (5, *inc.next(1)), - (7, *inc.next(1)), - (0 + 9, *inc.next(1)), - (1 + 9, *inc.next(3)), - (2 + 9, *inc.next(1)), - (3 + 9, *inc.next(3)), - (4 + 9, *inc.next(7)), - (5 + 9, *inc.next(3)), - (6 + 9, *inc.next(1)), - (7 + 9, *inc.next(3)), - (8 + 9, *inc.next(1)), - (1 + 18, *inc.next(1)), - (3 + 18, *inc.next(1)), - (4 + 18, *inc.next(3)), - (5 + 18, *inc.next(1)), - (7 + 18, *inc.next(1)), - ) - mesh, options = __build_test_case((three_nodes, three_nodes, three_nodes), range(8)) - yield TestCase(input_mesh=mesh, options=options, collocated_nodes=collocated_nodes, - result=TestResult(8 * 8, 8, 3 * 3 * 3 - 8, 12)) - - # Straight notch - inc = Incrementor(27) - collocated_nodes: Sequence[Sequence[int]] = ( - (1, *inc.next(1)), - (4,), - (1 + 9, *inc.next(1)), - (4 + 9,), - (1 + 18, *inc.next(1)), - (4 + 18,), - ) - mesh, options = __build_test_case((three_nodes, three_nodes, three_nodes), (0, 1, 2, 2, 0, 1, 2, 2), field_values=(0, 1)) - yield TestCase(input_mesh=mesh, options=options, collocated_nodes=collocated_nodes, - result=TestResult(3 * 3 * 3 + 3, 8, 6, 2)) - - # L-shaped notch - inc = Incrementor(27) - collocated_nodes: Sequence[Sequence[int]] = ( - (1, *inc.next(1)), - (4, *inc.next(1)), - (7, *inc.next(1)), - (1 + 9, *inc.next(1)), - (4 + 9,), - (7 + 9,), - (1 + 18, *inc.next(1)), - (4 + 18,), - ) - mesh, options = __build_test_case((three_nodes, three_nodes, three_nodes), (0, 1, 0, 1, 0, 1, 2, 2), field_values=(0, 1)) - yield TestCase(input_mesh=mesh, options=options, collocated_nodes=collocated_nodes, - result=TestResult(3 * 3 * 3 + 5, 8, 8, 3)) - - # 3x1x1 split - inc = Incrementor(2 * 2 * 4) - collocated_nodes: Sequence[Sequence[int]] = ( - (1, *inc.next(1)), - (2, *inc.next(1)), - (5, *inc.next(1)), - (6, *inc.next(1)), - (1 + 8, *inc.next(1)), - (2 + 8, *inc.next(1)), - (5 + 8, *inc.next(1)), - (6 + 8, *inc.next(1)), - ) - mesh, options = __build_test_case((four_nodes, two_nodes, two_nodes), (0, 1, 2)) - yield TestCase(input_mesh=mesh, options=options, collocated_nodes=collocated_nodes, - result=TestResult(6 * 4, 3, 2 * 4, 2)) - - # Discarded fracture element if no node duplication. - collocated_nodes: Sequence[Sequence[int]] = () - mesh, options = __build_test_case((three_nodes, four_nodes, four_nodes), [0, ] * 8 + [1, 2] + [0, ] * 8, field_values=(1, 2)) - yield TestCase(input_mesh=mesh, options=options, collocated_nodes=collocated_nodes, - result=TestResult(3 * 4 * 4, 2 * 3 * 3, 0, 0)) - - # Fracture on a corner - inc = Incrementor(3 * 4 * 4) - collocated_nodes: Sequence[Sequence[int]] = ( - (1 + 12,), - (4 + 12,), - (7 + 12,), - (1 + 12 * 2, *inc.next(1)), - (4 + 12 * 2, *inc.next(1)), - (7 + 12 * 2,), - (1 + 12 * 3, *inc.next(1)), - (4 + 12 * 3, *inc.next(1)), - (7 + 12 * 3,), - ) - mesh, options = __build_test_case((three_nodes, four_nodes, four_nodes), [0, ] * 6 + [1, 2, 1, 2, 0, 0, 1, 2, 1, 2, 0, 0], field_values=(1, 2)) - yield TestCase(input_mesh=mesh, options=options, collocated_nodes=collocated_nodes, - result=TestResult(3 * 4 * 4 + 4, 2 * 3 * 3, 9, 4)) - - # Generate mesh with 2 hexs, one being a standard hex, the other a 42 hex. - inc = Incrementor(3 * 2 * 2) - collocated_nodes: Sequence[Sequence[int]] = ( - (1, *inc.next(1)), - (1 + 3, *inc.next(1)), - (1 + 6, *inc.next(1)), - (1 + 9, *inc.next(1)), - ) - mesh, options = __build_test_case((three_nodes, two_nodes, two_nodes), (0, 1)) - polyhedron_mesh = vtkUnstructuredGrid() - polyhedron_mesh.SetPoints(mesh.GetPoints()) - polyhedron_mesh.Allocate(2) - polyhedron_mesh.InsertNextCell(VTK_HEXAHEDRON, to_vtk_id_list((1, 2, 5, 4, 7, 8, 10, 11))) - poly = to_vtk_id_list([6] + [4, 0, 1, 7, 6] + [4, 1, 4, 10, 7] + [4, 4, 3, 9, 10] + [4, 3, 0, 6, 9] + [4, 6, 7, 10, 9] + [4, 1, 0, 3, 4]) - polyhedron_mesh.InsertNextCell(VTK_POLYHEDRON, poly) - polyhedron_mesh.GetCellData().AddArray(mesh.GetCellData().GetArray("attribute")) - - yield TestCase(input_mesh=polyhedron_mesh, options=options, collocated_nodes=collocated_nodes, - result=TestResult(4 * 4, 2, 4, 1)) - - # Split in 2 using the internal fracture description - inc = Incrementor(3 * 2 * 2) - collocated_nodes: Sequence[Sequence[int]] = ( - (1, *inc.next(1)), - (1 + 3, *inc.next(1)), - (1 + 6, *inc.next(1)), - (1 + 9, *inc.next(1)), - ) - mesh, options = __build_test_case((three_nodes, two_nodes, two_nodes), attribute=(0, 0, 0), field_values=(0,), - policy=FracturePolicy.INTERNAL_SURFACES) - mesh.InsertNextCell(VTK_QUAD, to_vtk_id_list((1, 4, 7, 10))) # Add a fracture on the fly - yield TestCase(input_mesh=mesh, options=options, - collocated_nodes=collocated_nodes, - result=TestResult(4 * 4, 3, 4, 1)) - - -@pytest.mark.parametrize("test_case", __generate_test_data()) -def test_generate_fracture(test_case: TestCase): - main_mesh, fracture_mesh = __split_mesh_on_fracture(test_case.input_mesh, test_case.options) - assert main_mesh.GetNumberOfPoints() == test_case.result.main_mesh_num_points - assert main_mesh.GetNumberOfCells() == test_case.result.main_mesh_num_cells - assert fracture_mesh.GetNumberOfPoints() == test_case.result.fracture_mesh_num_points - assert fracture_mesh.GetNumberOfCells() == test_case.result.fracture_mesh_num_cells - - res = format_collocated_nodes(fracture_mesh) - assert res == test_case.collocated_nodes - assert len(res) == test_case.result.fracture_mesh_num_points diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_generate_global_ids.py b/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_generate_global_ids.py deleted file mode 100644 index 5dc7c1bad4a..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_generate_global_ids.py +++ /dev/null @@ -1,32 +0,0 @@ -from vtkmodules.vtkCommonCore import ( - vtkPoints, ) -from vtkmodules.vtkCommonDataModel import ( - VTK_VERTEX, - vtkCellArray, - vtkUnstructuredGrid, - vtkVertex, -) - -from checks.generate_global_ids import __build_global_ids - - -def test_generate_global_ids(): - points = vtkPoints() - points.InsertNextPoint(0, 0, 0) - - vertex = vtkVertex() - vertex.GetPointIds().SetId(0, 0) - - vertices = vtkCellArray() - vertices.InsertNextCell(vertex) - - mesh = vtkUnstructuredGrid() - mesh.SetPoints(points) - mesh.SetCells([VTK_VERTEX], vertices) - - __build_global_ids(mesh, True, True) - - global_cell_ids = mesh.GetCellData().GetGlobalIds() - global_point_ids = mesh.GetPointData().GetGlobalIds() - assert global_cell_ids.GetNumberOfValues() == 1 - assert global_point_ids.GetNumberOfValues() == 1 diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_non_conformal.py b/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_non_conformal.py deleted file mode 100644 index bcf60fe962b..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_non_conformal.py +++ /dev/null @@ -1,67 +0,0 @@ -import numpy - -from checks.non_conformal import Options, __check -from checks.generate_cube import ( - build_rectilinear_blocks_mesh, - XYZ, -) - - -def test_two_close_hexs(): - delta = 1.e-6 - tmp = numpy.arange(2, dtype=float) - xyz0 = XYZ(tmp, tmp, tmp) - xyz1 = XYZ(tmp + 1 + delta, tmp, tmp) - mesh = build_rectilinear_blocks_mesh((xyz0, xyz1)) - - # Close enough, but points tolerance is too strict to consider the faces matching. - options = Options(angle_tolerance=1., point_tolerance=delta / 2, face_tolerance=delta * 2) - results = __check(mesh, options) - assert len(results.non_conformal_cells) == 1 - assert set(results.non_conformal_cells[0]) == {0, 1} - - # Close enough, and points tolerance is loose enough to consider the faces matching. - options = Options(angle_tolerance=1., point_tolerance=delta * 2, face_tolerance=delta * 2) - results = __check(mesh, options) - assert len(results.non_conformal_cells) == 0 - - -def test_two_distant_hexs(): - delta = 1 - tmp = numpy.arange(2, dtype=float) - xyz0 = XYZ(tmp, tmp, tmp) - xyz1 = XYZ(tmp + 1 + delta, tmp, tmp) - mesh = build_rectilinear_blocks_mesh((xyz0, xyz1)) - - options = Options(angle_tolerance=1., point_tolerance=delta / 2., face_tolerance=delta / 2.) - - results = __check(mesh, options) - assert len(results.non_conformal_cells) == 0 - - -def test_two_close_shifted_hexs(): - delta_x, delta_y = 1.e-6, 0.5 - tmp = numpy.arange(2, dtype=float) - xyz0 = XYZ(tmp, tmp, tmp) - xyz1 = XYZ(tmp + 1 + delta_x, tmp + delta_y, tmp + delta_y) - mesh = build_rectilinear_blocks_mesh((xyz0, xyz1)) - - options = Options(angle_tolerance=1., point_tolerance=delta_x * 2, face_tolerance=delta_x * 2) - - results = __check(mesh, options) - assert len(results.non_conformal_cells) == 1 - assert set(results.non_conformal_cells[0]) == {0, 1} - - -def test_big_elem_next_to_small_elem(): - delta = 1.e-6 - tmp = numpy.arange(2, dtype=float) - xyz0 = XYZ(tmp, tmp + 1, tmp + 1) - xyz1 = XYZ(3 * tmp + 1 + delta, 3 * tmp, 3 * tmp) - mesh = build_rectilinear_blocks_mesh((xyz0, xyz1)) - - options = Options(angle_tolerance=1., point_tolerance=delta * 2, face_tolerance=delta * 2) - - results = __check(mesh, options) - assert len(results.non_conformal_cells) == 1 - assert set(results.non_conformal_cells[0]) == {0, 1} diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_reorient_mesh.py b/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_reorient_mesh.py deleted file mode 100644 index 1136bbb7704..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_reorient_mesh.py +++ /dev/null @@ -1,109 +0,0 @@ -from dataclasses import dataclass -from typing import Generator - -import pytest - -from vtkmodules.vtkCommonCore import ( - vtkIdList, - vtkPoints, -) -from vtkmodules.vtkCommonDataModel import ( - VTK_POLYHEDRON, - vtkUnstructuredGrid, -) - -import numpy - -from checks.reorient_mesh import reorient_mesh -from checks.vtk_polyhedron import FaceStream -from checks.vtk_utils import ( - to_vtk_id_list, - vtk_iter, -) - - -@dataclass(frozen=True) -class Expected: - mesh: vtkUnstructuredGrid - face_stream: FaceStream - - -def __build_test_meshes() -> Generator[Expected, None, None]: - # Creating the support nodes for the polyhedron. - # It has a C shape and is actually non-convex, non star-shaped. - front_nodes = numpy.array(( - (0, 0, 0), - (3, 0, 0), - (3, 1, 0), - (1, 1, 0), - (1, 2, 0), - (3, 2, 0), - (3, 3, 0), - (0, 3, 0), - ), dtype=float) - front_nodes = numpy.array(front_nodes, dtype=float) - back_nodes = front_nodes - (0., 0., 1.) - - n = len(front_nodes) - - points = vtkPoints() - points.Allocate(2 * n) - for coords in front_nodes: - points.InsertNextPoint(coords) - for coords in back_nodes: - points.InsertNextPoint(coords) - - # Creating the polyhedron with faces all directed outward. - faces = [] - # Creating the side faces - for i in range(n): - faces.append( - (i % n + n, (i + 1) % n + n, (i + 1) % n, i % n) - ) - # Creating the front faces - faces.append(tuple(range(n))) - faces.append(tuple(reversed(range(n, 2 * n)))) - face_stream = FaceStream(faces) - - # Creating multiple meshes, each time with one unique polyhedron, - # but with different "face flip status". - # First case, no face is flipped. - mesh = vtkUnstructuredGrid() - mesh.Allocate(1) - mesh.SetPoints(points) - mesh.InsertNextCell(VTK_POLYHEDRON, to_vtk_id_list( - face_stream.dump() - )) - yield Expected(mesh=mesh, face_stream=face_stream) - - # Here, two faces are flipped. - mesh = vtkUnstructuredGrid() - mesh.Allocate(1) - mesh.SetPoints(points) - mesh.InsertNextCell(VTK_POLYHEDRON, to_vtk_id_list( - face_stream.flip_faces((1, 2)).dump() - )) - yield Expected(mesh=mesh, face_stream=face_stream) - - # Last, all faces are flipped. - mesh = vtkUnstructuredGrid() - mesh.Allocate(1) - mesh.SetPoints(points) - mesh.InsertNextCell(VTK_POLYHEDRON, to_vtk_id_list( - face_stream.flip_faces(range(len(faces))).dump() - )) - yield Expected(mesh=mesh, face_stream=face_stream) - - -@pytest.mark.parametrize("expected", __build_test_meshes()) -def test_reorient_polyhedron(expected: Expected): - output_mesh = reorient_mesh(expected.mesh, range(expected.mesh.GetNumberOfCells())) - assert output_mesh.GetNumberOfCells() == 1 - assert output_mesh.GetCell(0).GetCellType() == VTK_POLYHEDRON - face_stream_ids = vtkIdList() - output_mesh.GetFaceStream(0, face_stream_ids) - # Note that the following makes a raw (but simple) check. - # But one may need to be more precise some day, - # since triangular faces (0, 1, 2) and (1, 2, 0) should be considered as equivalent. - # And the current simpler check does not consider this case. - assert tuple(vtk_iter(face_stream_ids)) == expected.face_stream.dump() diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_self_intersecting_elements.py b/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_self_intersecting_elements.py deleted file mode 100644 index 8993e68bf48..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_self_intersecting_elements.py +++ /dev/null @@ -1,50 +0,0 @@ -from vtkmodules.vtkCommonCore import ( - vtkPoints, -) -from vtkmodules.vtkCommonDataModel import ( - VTK_HEXAHEDRON, - vtkCellArray, - vtkHexahedron, - vtkUnstructuredGrid, -) - - -from checks.self_intersecting_elements import Options, __check - - -def test_jumbled_hex(): - # creating a simple hexahedron - points = vtkPoints() - points.SetNumberOfPoints(8) - points.SetPoint(0, (0, 0, 0)) - points.SetPoint(1, (1, 0, 0)) - points.SetPoint(2, (1, 1, 0)) - points.SetPoint(3, (0, 1, 0)) - points.SetPoint(4, (0, 0, 1)) - points.SetPoint(5, (1, 0, 1)) - points.SetPoint(6, (1, 1, 1)) - points.SetPoint(7, (0, 1, 1)) - - cell_types = [VTK_HEXAHEDRON] - cells = vtkCellArray() - cells.AllocateExact(1, 8) - - hex = vtkHexahedron() - hex.GetPointIds().SetId(0, 0) - hex.GetPointIds().SetId(1, 1) - hex.GetPointIds().SetId(2, 3) # Intentionally wrong - hex.GetPointIds().SetId(3, 2) # Intentionally wrong - hex.GetPointIds().SetId(4, 4) - hex.GetPointIds().SetId(5, 5) - hex.GetPointIds().SetId(6, 6) - hex.GetPointIds().SetId(7, 7) - cells.InsertNextCell(hex) - - mesh = vtkUnstructuredGrid() - mesh.SetPoints(points) - mesh.SetCells(cell_types, cells) - - result = __check(mesh, Options(tolerance=0.)) - - assert len(result.intersecting_faces_elements) == 1 - assert result.intersecting_faces_elements[0] == 0 diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_supported_elements.py b/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_supported_elements.py deleted file mode 100644 index 639d9043d8b..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_supported_elements.py +++ /dev/null @@ -1,125 +0,0 @@ -import os -from typing import Tuple - -import pytest - -from vtkmodules.vtkCommonCore import ( - vtkIdList, - vtkPoints, -) -from vtkmodules.vtkCommonDataModel import ( - VTK_POLYHEDRON, - vtkUnstructuredGrid, -) - -from checks.supported_elements import Options, check, __check -from checks.vtk_polyhedron import parse_face_stream, build_face_to_face_connectivity_through_edges, FaceStream -from checks.vtk_utils import ( - to_vtk_id_list, -) - - -@pytest.mark.parametrize("base_name", - ("supportedElements.vtk", "supportedElementsAsVTKPolyhedra.vtk")) -def test_supported_elements(base_name) -> None: - """ - Testing that the supported elements are properly detected as supported! - :param base_name: Supported elements are provided as standard elements or polyhedron elements. - """ - directory = os.path.dirname(os.path.realpath(__file__)) - supported_elements_file_name = os.path.join(directory, "../../../../unitTests/meshTests", base_name) - options = Options(chunk_size=1, num_proc=4) - result = check(supported_elements_file_name, options) - assert not result.unsupported_std_elements_types - assert not result.unsupported_polyhedron_elements - - -def make_dodecahedron() -> Tuple[vtkPoints, vtkIdList]: - """ - Returns the points and faces for a dodecahedron. - This code was adapted from an official vtk example. - :return: The tuple of points and faces (as vtk instances). - """ - points = ( - (1.21412, 0, 1.58931), - (0.375185, 1.1547, 1.58931), - (-0.982247, 0.713644, 1.58931), - (-0.982247, -0.713644, 1.58931), - (0.375185, -1.1547, 1.58931), - (1.96449, 0, 0.375185), - (0.607062, 1.86835, 0.375185), - (-1.58931, 1.1547, 0.375185), - (-1.58931, -1.1547, 0.375185), - (0.607062, -1.86835, 0.375185), - (1.58931, 1.1547, -0.375185), - (-0.607062, 1.86835, -0.375185), - (-1.96449, 0, -0.375185), - (-0.607062, -1.86835, -0.375185), - (1.58931, -1.1547, -0.375185), - (0.982247, 0.713644, -1.58931), - (-0.375185, 1.1547, -1.58931), - (-1.21412, 0, -1.58931), - (-0.375185, -1.1547, -1.58931), - (0.982247, -0.713644, -1.58931) - ) - - faces = (12, # number of faces - 5, 0, 1, 2, 3, 4, # number of ids on face, ids - 5, 0, 5, 10, 6, 1, - 5, 1, 6, 11, 7, 2, - 5, 2, 7, 12, 8, 3, - 5, 3, 8, 13, 9, 4, - 5, 4, 9, 14, 5, 0, - 5, 15, 10, 5, 14, 19, - 5, 16, 11, 6, 10, 15, - 5, 17, 12, 7, 11, 16, - 5, 18, 13, 8, 12, 17, - 5, 19, 14, 9, 13, 18, - 5, 19, 18, 17, 16, 15) - - p = vtkPoints() - p.Allocate(len(points)) - for coords in points: - p.InsertNextPoint(coords) - - f = to_vtk_id_list(faces) - - return p, f - - -def test_dodecahedron() -> None: - """ - Tests that a dodecahedron is not supported by GEOSX. - """ - points, faces = make_dodecahedron() - mesh = vtkUnstructuredGrid() - mesh.Allocate(1) - mesh.SetPoints(points) - mesh.InsertNextCell(VTK_POLYHEDRON, faces) - - result = __check(mesh, Options(num_proc=1, chunk_size=1)) - assert set(result.unsupported_polyhedron_elements) == {0} - assert not result.unsupported_std_elements_types - - -def test_parse_face_stream() -> None: - _, faces = make_dodecahedron() - result = parse_face_stream(faces) - expected = ( - (0, 1, 2, 3, 4), - (0, 5, 10, 6, 1), - (1, 6, 11, 7, 2), - (2, 7, 12, 8, 3), - (3, 8, 13, 9, 4), - (4, 9, 14, 5, 0), - (15, 10, 5, 14, 19), - (16, 11, 6, 10, 15), - (17, 12, 7, 11, 16), - (18, 13, 8, 12, 17), - (19, 14, 9, 13, 18), - (19, 18, 17, 16, 15) - ) - assert result == expected - face_stream = FaceStream.build_from_vtk_id_list(faces) - assert face_stream.num_faces == 12 - assert face_stream.num_support_points == 20 diff --git a/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_triangle_distance.py b/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_triangle_distance.py deleted file mode 100644 index 605169b644f..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_doctor/tests/test_triangle_distance.py +++ /dev/null @@ -1,178 +0,0 @@ -from dataclasses import dataclass - -import numpy -from numpy.linalg import norm -import pytest - -from checks.triangle_distance import distance_between_two_segments, distance_between_two_triangles - - -@dataclass(frozen=True) -class ExpectedSeg: - p0: numpy.array - u0: numpy.array - p1: numpy.array - u1: numpy.array - x: numpy.array - y: numpy.array - - @classmethod - def from_tuples(cls, p0, u0, p1, u1, x, y): - return cls( - numpy.array(p0), - numpy.array(u0), - numpy.array(p1), - numpy.array(u1), - numpy.array(x), - numpy.array(y) - ) - - -def __get_segments_references(): - # Node to node configuration. - yield ExpectedSeg.from_tuples( - p0=(0., 0., 0.), - u0=(1., 0., 0.), - p1=(2., 0., 0.), - u1=(1., 0., 0.), - x=(1., 0., 0.), - y=(2., 0., 0.), - ) - # Node to edge configuration. - yield ExpectedSeg.from_tuples( - p0=(0., 0., 0.), - u0=(1., 0., 0.), - p1=(2., -1., -1.), - u1=(0., 1., 1.), - x=(1., 0., 0.), - y=(2., 0., 0.), - ) - # Edge to edge configuration. - yield ExpectedSeg.from_tuples( - p0=(0., 0., -1.), - u0=(0., 0., 2.), - p1=(1., -1., -1.), - u1=(0., 2., 2.), - x=(0., 0., 0.), - y=(1., 0., 0.), - ) - # Example from "On fast computation of distance between line segments" by Vladimir J. Lumelsky. - # Information Processing Letters, Vol. 21, number 2, pages 55-61, 08/16/1985. - # It's a node to edge configuration. - yield ExpectedSeg.from_tuples( - p0=(0., 0., 0.), - u0=(1., 2., 1.), - p1=(1., 0., 0.), - u1=(1., 1., 0.), - x=(1./6., 2./6., 1./6.), - y=(1., 0., 0.), - ) - # Overlapping edges. - yield ExpectedSeg.from_tuples( - p0=(0., 0., 0.), - u0=(2., 0., 0.), - p1=(1., 0., 0.), - u1=(2., 0., 0.), - x=(0., 0., 0.), - y=(0., 0., 0.), - ) - # Crossing edges. - yield ExpectedSeg.from_tuples( - p0=(0., 0., 0.), - u0=(2., 0., 0.), - p1=(1., -1., 0.), - u1=(0., 2., 0.), - x=(0., 0., 0.), - y=(0., 0., 0.), - ) - - -@pytest.mark.parametrize("expected", __get_segments_references()) -def test_segments(expected: ExpectedSeg): - eps = numpy.finfo(float).eps - x, y = distance_between_two_segments(expected.p0, expected.u0, expected.p1, expected.u1) - if norm(expected.x - expected.y) == 0: - assert norm(x - y) == 0. - else: - assert norm(expected.x - x) < eps - assert norm(expected.y - y) < eps - - -@dataclass(frozen=True) -class ExpectedTri: - t0: numpy.array - t1: numpy.array - d: float - p0: numpy.array - p1: numpy.array - - @classmethod - def from_tuples(cls, t0, t1, d, p0, p1): - return cls( - numpy.array(t0), - numpy.array(t1), - float(d), - numpy.array(p0), - numpy.array(p1) - ) - - -def __get_triangles_references(): - # Node to node configuration. - yield ExpectedTri.from_tuples( - t0=((0., 0., 0.), (1., 0., 0.), (0., 1., 1.)), - t1=((2., 0., 0.), (3., 0., 0.), (2., 1., 1.)), - d=1., - p0=(1., 0., 0.), - p1=(2., 0., 0.) - ) - # Node to edge configuration. - yield ExpectedTri.from_tuples( - t0=((0., 0., 0.), (1., 0., 0.), (0., 1., 1.)), - t1=((2., -1., 0.), (3., 0., 0.), (2., 1., 0.)), - d=1., - p0=(1., 0., 0.), - p1=(2., 0., 0.) - ) - # Edge to edge configuration. - yield ExpectedTri.from_tuples( - t0=((0., 0., 0.), (1., 1., 1.), (1., -1., -1.)), - t1=((2., -1., 0.), (2., 1., 0.), (3., 0., 0.)), - d=1., - p0=(1., 0., 0.), - p1=(2., 0., 0.) - ) - # Point to face configuration. - yield ExpectedTri.from_tuples( - t0=((0., 0., 0.), (1., 0., 0.), (0., 1., 1.)), - t1=((2., -1., 0.), (2., 1., -1.), (2, 1., 1.)), - d=1., - p0=(1., 0., 0.), - p1=(2., 0., 0.) - ) - # Same triangles configuration. - yield ExpectedTri.from_tuples( - t0=((0., 0., 0.), (1., 0., 0.), (0., 1., 1.)), - t1=((0., 0., 0.), (1., 0., 0.), (0., 1., 1.)), - d=0., - p0=(0., 0., 0.), - p1=(0., 0., 0.) - ) - # Crossing triangles configuration. - yield ExpectedTri.from_tuples( - t0=((0., 0., 0.), (2., 0., 0.), (2., 0., 1.)), - t1=((1., -1., 0.), (1., 1., 0.), (1., 1., 1.)), - d=0., - p0=(0., 0., 0.), - p1=(0., 0., 0.) - ) - - -@pytest.mark.parametrize("expected", __get_triangles_references()) -def test_triangles(expected: ExpectedTri): - eps = numpy.finfo(float).eps - d, p0, p1 = distance_between_two_triangles(expected.t0, expected.t1) - assert abs(d - expected.d) < eps - if d != 0: - assert norm(p0 - expected.p0) < eps - assert norm(p1 - expected.p1) < eps diff --git a/src/coreComponents/python/modules/geosx_mesh_tools_package/geosx_mesh_tools/__init__.py b/src/coreComponents/python/modules/geosx_mesh_tools_package/geosx_mesh_tools/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/src/coreComponents/python/modules/geosx_mesh_tools_package/geosx_mesh_tools/abaqus_converter.py b/src/coreComponents/python/modules/geosx_mesh_tools_package/geosx_mesh_tools/abaqus_converter.py deleted file mode 100644 index 14f62992d92..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_tools_package/geosx_mesh_tools/abaqus_converter.py +++ /dev/null @@ -1,170 +0,0 @@ -import meshio # type: ignore[import] -from meshio._mesh import CellBlock # type: ignore[import] -import numpy as np -import logging - - -def convert_abaqus_to_gmsh(input_mesh: str, output_mesh: str, logger: logging.Logger = None) -> int: - """ - Convert an abaqus mesh to gmsh 2 format, preserving nodeset information. - - If the code encounters any issues with region/element indices, - the conversion will attempt to continue, with errors - indicated by -1 values in the output file. - - Args: - input_mesh (str): path of the input abaqus file - output_mesh (str): path of the output gmsh file - logger (logging.Logger): an instance of logging.Logger - - Returns: - int: Number of potential warnings encountered during conversion - """ - # Initialize the logger if it is empty - if not logger: - logging.basicConfig(level=logging.WARNING) - logger = logging.getLogger(__name__) - - # Keep track of the number of warnings - n_warnings = 0 - - # Load the mesh - logger.info('Reading abaqus mesh...') - mesh = meshio.read(input_mesh, file_format="abaqus") - - # Convert the element regions to tags - logger.info('Converting region tags...') - region_list = list(mesh.cell_sets.keys()) - n_regions = len(region_list) - cell_ids = [] - for block_id, block in enumerate(mesh.cells): - cell_ids.append(np.zeros(len(block[1]), dtype=int) - 1) - for region_id, region in enumerate(region_list): - mesh.field_data[region] = [region_id + 1, 3] - cell_ids[block_id][mesh.cell_sets[region][block_id]] = region_id + 1 - - # Check for bad element region conversions - if (-1 in cell_ids[-1]): - logger.warning('Some element regions in block %i did not convert correctly to tags!' % (block_id)) - logger.warning('Note: These will be indicated by a -1 in the output file.') - n_warnings += 1 - - # Add to the meshio datastructure - # Note: the copy here is required, so that later appends - # do not break these dicts - mesh.cell_data['gmsh:physical'] = cell_ids.copy() - mesh.cell_data['gmsh:geometrical'] = cell_ids.copy() - - # Build the face elements - logger.info('Converting nodesets to face elements, tags...') - new_tris, tri_nodeset, tri_region = [], [], [] - new_quads, quad_nodeset, quad_region = [], [], [] - - for nodeset_id, nodeset_name in enumerate(mesh.point_sets): - logger.info(' %s' % (nodeset_name)) - mesh.field_data[nodeset_name] = [nodeset_id + n_regions + 1, 2] - nodeset = mesh.point_sets[nodeset_name] - - # Search by block, then element - for block_id, block in enumerate(mesh.cells): - for element_id, element in enumerate(block[1]): - # Find any matching nodes - matching_nodes = [x for x in element if x in nodeset] - - # Add a new face element if there are enough nodes - n_matching = len(matching_nodes) - if (n_matching >= 3): - # Find the region - region_id = -1 - for region in region_list: - if (element_id in mesh.cell_sets[region][block_id]): - region_id = mesh.field_data[region][block_id] - - # Test to see if the element is a quad or triangle - tag_id = mesh.field_data[nodeset_name][0] - if (n_matching == 3): - new_tris.append(matching_nodes) - tri_nodeset.append(tag_id) - tri_region.append(region_id) - - elif (n_matching == 4): - new_quads.append(matching_nodes) - quad_nodeset.append(tag_id) - quad_region.append(region_id) - - else: - logger.warning(' Discarding an element with an unexpected number of nodes') - logger.warning(' n_nodes=%i, element=%i, set=%s' % (n_matching, element_id, nodeset_name)) - n_warnings += 1 - - # Add new tris - if new_tris: - logger.info(' Adding %i new triangles...' % (len(new_tris))) - if (-1 in tri_region): - logger.warning('Triangles with empty region information found!') - logger.warning('Note: These will be indicated by a -1 in the output file.') - n_warnings += 1 - mesh.cells.append(CellBlock('triangle', np.array(new_tris))) - mesh.cell_data['gmsh:geometrical'].append(np.array(tri_region)) - mesh.cell_data['gmsh:physical'].append(np.array(tri_nodeset)) - - # Add new quads - if new_quads: - logger.info(' Adding %i new quads...' % (len(new_quads))) - if (-1 in quad_region): - logger.warning('Quads with empty region information found!') - logger.warning('Note: These will be indicated by a -1 in the output file.') - n_warnings += 1 - mesh.cells.append(CellBlock('quad', np.array(new_quads))) - mesh.cell_data['gmsh:geometrical'].append(np.array(quad_region)) - mesh.cell_data['gmsh:physical'].append(np.array(quad_nodeset)) - - # Write the final mesh - logger.info('Writing gmsh mesh...') - meshio.write(output_mesh, mesh, file_format="gmsh22", binary=False) - logger.info('Done!') - - return (n_warnings > 0) - - -def convert_abaqus_to_vtu(input_mesh: str, output_mesh: str, logger: logging.Logger = None) -> int: - """ - Convert an abaqus mesh to vtu format, preserving nodeset information. - - If the code encounters any issues with region/element indices, the conversion will - attempt to continue, with errors indicated by -1 values in the output file. - - Args: - input_mesh (str): path of the input abaqus file - output_mesh (str): path of the output vtu file - logger (logging.Logger): a logger instance - - Returns: - int: Number of potential warnings encountered during conversion - """ - # Initialize the logger if it is empty - if not logger: - logging.basicConfig(level=logging.WARNING) - logger = logging.getLogger(__name__) - - # Keep track of the number of warnings - n_warnings = 0 - - # Load the mesh - logger.info('Reading abaqus mesh...') - mesh = meshio.read(input_mesh, file_format="abaqus") - - # Converting nodesets to binary masks - for k, nodeset in mesh.point_sets.items(): - mesh.point_data[k] = np.zeros(len(mesh.points), dtype=int) - mesh.point_data[k][nodeset] = 1 - - # Overwrite point sets to suppress conversion warnings - mesh.point_sets = {} - - # Write the final mesh - logger.info('Writing vtu mesh...') - meshio.write(output_mesh, mesh, file_format="vtu") - logger.info('Done!') - - return (n_warnings > 0) diff --git a/src/coreComponents/python/modules/geosx_mesh_tools_package/geosx_mesh_tools/main.py b/src/coreComponents/python/modules/geosx_mesh_tools_package/geosx_mesh_tools/main.py deleted file mode 100644 index 1637d07fd8f..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_tools_package/geosx_mesh_tools/main.py +++ /dev/null @@ -1,51 +0,0 @@ -import argparse -import logging -import sys - - -def build_abaqus_converter_input_parser() -> argparse.ArgumentParser: - """Build the input argument parser - - Returns: - argparse.ArgumentParser: a parser instance - """ - parser = argparse.ArgumentParser() - parser.add_argument('input', type=str, help='Input abaqus mesh file name') - parser.add_argument('output', type=str, help='Output gmsh/vtu mesh file name') - parser.add_argument('-v', '--verbose', help='Increase verbosity level', action="store_true") - return parser - - -def main() -> None: - """ - Entry point for the abaqus convertor console script - - Args: - input (str): Input abaqus mesh file name - output (str): Output mesh file name - -v/--verbose (flag): Increase verbosity level - """ - from geosx_mesh_tools import abaqus_converter - - # Parse the user arguments - parser = build_abaqus_converter_input_parser() - args = parser.parse_args() - - # Set up a logger - logging.basicConfig(level=logging.WARNING) - logger = logging.getLogger(__name__) - if args.verbose: - logger.setLevel(logging.INFO) - - # Call the converter - err = 0 - if ('.msh' in args.output): - err = abaqus_converter.convert_abaqus_to_gmsh(args.input, args.output, logger) - else: - err = abaqus_converter.convert_abaqus_to_vtu(args.input, args.output, logger) - if err: - sys.exit('Warnings detected: check the output file for potential errors!') - - -if __name__ == '__main__': - main() diff --git a/src/coreComponents/python/modules/geosx_mesh_tools_package/geosx_mesh_tools/py.typed b/src/coreComponents/python/modules/geosx_mesh_tools_package/geosx_mesh_tools/py.typed deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/src/coreComponents/python/modules/geosx_mesh_tools_package/pyproject.toml b/src/coreComponents/python/modules/geosx_mesh_tools_package/pyproject.toml deleted file mode 100644 index c2f433afcb5..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_tools_package/pyproject.toml +++ /dev/null @@ -1,8 +0,0 @@ -[build-system] -requires = ["setuptools>=42", "wheel"] -build-backend = "setuptools.build_meta" - -[tool.mypy] -python_version = "3.8" -warn_return_any = true -warn_unused_configs = true diff --git a/src/coreComponents/python/modules/geosx_mesh_tools_package/setup.cfg b/src/coreComponents/python/modules/geosx_mesh_tools_package/setup.cfg deleted file mode 100644 index 06ca8c2c7df..00000000000 --- a/src/coreComponents/python/modules/geosx_mesh_tools_package/setup.cfg +++ /dev/null @@ -1,22 +0,0 @@ -[metadata] -name = geosx_mesh_tools -version = 0.2.0 -description = Tools for managing meshes in GEOSX -author = Christopher Sherman -author_email = sherman27@llnl.gov -license = LGPL-2.1 - -[options] -packages = - geosx_mesh_tools -install_requires = - meshio>=5.3.2 - numpy -python_requires = >=3.6 - -[options.package_data] -geosx_mesh_tools = py.typed - -[options.entry_points] -console_scripts = - convert_abaqus = geosx_mesh_tools.main:main diff --git a/src/coreComponents/python/modules/geosx_xml_tools_package/.gitignore b/src/coreComponents/python/modules/geosx_xml_tools_package/.gitignore deleted file mode 100644 index 59d52651e06..00000000000 --- a/src/coreComponents/python/modules/geosx_xml_tools_package/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -build -*.egg-info diff --git a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/__init__.py b/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/__init__.py deleted file mode 100644 index c51fba5a2c9..00000000000 --- a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -""" -A python module that enables advanced xml features for GEOSX. -""" diff --git a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/attribute_coverage.py b/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/attribute_coverage.py deleted file mode 100644 index 2b64df8c540..00000000000 --- a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/attribute_coverage.py +++ /dev/null @@ -1,187 +0,0 @@ -from lxml import etree as ElementTree # type: ignore[import] -import os -from pathlib import Path -from typing import Any, Iterable, Dict -from geosx_xml_tools import command_line_parsers - -record_type = Dict[str, Dict[str, Any]] - - -def parse_schema_element(root: ElementTree.Element, - node: ElementTree.Element, - xsd: str = '{http://www.w3.org/2001/XMLSchema}', - recursive_types: Iterable[str] = ['PeriodicEvent', 'SoloEvent', 'HaltEvent'], - folders: Iterable[str] = ['src', 'examples']) -> record_type: - """Parse the xml schema at the current level - - Args: - root (lxml.etree.Element): the root schema node - node (lxml.etree.Element): current schema node - xsd (str): the file namespace - recursive_types (list): node tags that allow recursive nesting - folders (list): folders to sort xml attribute usage into - - Returns: - dict: Dictionary of attributes and children for the current node - """ - - element_type = node.get('type') - element_name = node.get('name') - element_def = root.find("%scomplexType[@name='%s']" % (xsd, element_type)) - local_types: record_type = {'attributes': {}, 'children': {}} - - # Parse attributes - for attribute in element_def.findall('%sattribute' % (xsd)): - attribute_name = attribute.get('name') - local_types['attributes'][attribute_name] = {ka: [] for ka in folders} - if ('default' in attribute.attrib): - local_types['attributes'][attribute_name]['default'] = attribute.get('default') - - # Parse children - choice_node = element_def.findall('%schoice' % (xsd)) - if choice_node: - for child in choice_node[0].findall('%selement' % (xsd)): - child_name = child.get('name') - if not ((child_name in recursive_types) and (element_name in recursive_types)): - local_types['children'][child_name] = parse_schema_element(root, child) - - return local_types - - -def parse_schema(fname: str) -> record_type: - """Parse the schema file into the xml attribute usage dict - - Args: - fname (str): schema name - - Returns: - dict: Dictionary of attributes and children for the entire schema - """ - xml_tree = ElementTree.parse(fname) - xml_root = xml_tree.getroot() - problem_node = xml_root.find("{http://www.w3.org/2001/XMLSchema}element") - return {'Problem': parse_schema_element(xml_root, problem_node)} - - -def collect_xml_attributes_level(local_types: record_type, node: ElementTree.Element, folder: str) -> None: - """Collect xml attribute usage at the current level - - Args: - local_types (dict): dictionary containing attribute usage - node (lxml.etree.Element): current xml node - folder (str): the source folder for the current file - """ - for ka in node.attrib.keys(): - local_types['attributes'][ka][folder].append(node.get(ka)) - - for child in node: - if child.tag in local_types['children']: - collect_xml_attributes_level(local_types['children'][child.tag], child, folder) - - -def collect_xml_attributes(xml_types: record_type, fname: str, folder: str) -> None: - """Collect xml attribute usage in a file - - Args: - xml_types (dict): dictionary containing attribute usage - fname (str): name of the target file - folder (str): the source folder for the current file - """ - parser = ElementTree.XMLParser(remove_comments=True, remove_blank_text=True) - xml_tree = ElementTree.parse(fname, parser=parser) - xml_root = xml_tree.getroot() - - collect_xml_attributes_level(xml_types['Problem'], xml_root, folder) - - -def write_attribute_usage_xml_level(local_types: record_type, - node: ElementTree.Element, - folders: Iterable[str] = ['src', 'examples']) -> None: - """Write xml attribute usage file at a given level - - Args: - local_types (dict): dict containing attribute usage at the current level - node (lxml.etree.Element): current xml node - """ - - # Write attributes - for ka in local_types['attributes'].keys(): - attribute_node = ElementTree.Element(ka) - node.append(attribute_node) - - if ('default' in local_types['attributes'][ka]): - attribute_node.set('default', local_types['attributes'][ka]['default']) - - unique_values = [] - for f in folders: - sub_values = list(set(local_types['attributes'][ka][f])) - unique_values.extend(sub_values) - attribute_node.set(f, ' | '.join(sub_values)) - - unique_length = len(set(unique_values)) - attribute_node.set('unique_values', str(unique_length)) - - # Write children - for ka in sorted(local_types['children']): - child = ElementTree.Element(ka) - node.append(child) - write_attribute_usage_xml_level(local_types['children'][ka], child) - - -def write_attribute_usage_xml(xml_types: record_type, fname: str) -> None: - """Write xml attribute usage file - - Args: - xml_types (dict): dictionary containing attribute usage by xml type - fname (str): output file name - """ - xml_root = ElementTree.Element('Problem') - xml_tree = ElementTree.ElementTree(xml_root) - - write_attribute_usage_xml_level(xml_types['Problem'], xml_root) - xml_tree.write(fname, pretty_print=True) - - -def process_xml_files(geosx_root: str, output_name: str) -> None: - """Test for xml attribute usage - - Args: - geosx_root (str): GEOSX root directory - output_name (str): output file name - """ - - # Parse the schema - geosx_root = os.path.expanduser(geosx_root) - schema = '%ssrc/coreComponents/schema/schema.xsd' % (geosx_root) - xml_types = parse_schema(schema) - - # Find all xml files, collect their attributes - for folder in ['src', 'examples']: - print(folder) - xml_files = Path(os.path.join(geosx_root, folder)).rglob('*.xml') - for f in xml_files: - print(' %s' % (str(f))) - collect_xml_attributes(xml_types, str(f), folder) - - # Consolidate attributes - write_attribute_usage_xml(xml_types, output_name) - - -def main() -> None: - """Entry point for the xml attribute usage test script - - Args: - -r/--root (str): GEOSX root directory - -o/--output (str): output file name - """ - - # Parse the user arguments - parser = command_line_parsers.build_attribute_coverage_input_parser() - args = parser.parse_args() - - # Parse the xml files - process_xml_files(args.root, args.output) - - -if __name__ == "__main__": - main() diff --git a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/command_line_parsers.py b/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/command_line_parsers.py deleted file mode 100644 index 4c07d11f4e5..00000000000 --- a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/command_line_parsers.py +++ /dev/null @@ -1,88 +0,0 @@ -import argparse -from typing import Tuple, Iterable - - -def build_preprocessor_input_parser() -> argparse.ArgumentParser: - """Build the argument parser - - Returns: - argparse.ArgumentParser: The parser - """ - # Parse the user arguments - parser = argparse.ArgumentParser() - parser.add_argument('-i', '--input', type=str, action='append', help='Input file name (multiple allowed)') - parser.add_argument('-c', - '--compiled-name', - type=str, - help='Compiled xml file name (otherwise, it is randomly genrated)', - default='') - parser.add_argument('-s', '--schema', type=str, help='GEOSX schema to use for validation', default='') - parser.add_argument('-v', '--verbose', type=int, help='Verbosity of outputs', default=0) - parser.add_argument('-p', - '--parameters', - nargs='+', - action='append', - help='Parameter overrides (name value, multiple allowed)', - default=[]) - - return parser - - -def parse_xml_preprocessor_arguments() -> Tuple[argparse.Namespace, Iterable[str]]: - """Parse user arguments - - Args: - -i/--input (str): Input file name (multiple allowed) - -c/--compiled-name (str): Compiled xml file name - -s/--schema (str): Path to schema to use for validation - -v/--verbose (int): Verbosity of outputs - -p/--parameters (str): Parameter overrides (name and value, multiple allowed) - - Returns: - list: The remaining unparsed argument strings - """ - parser = build_preprocessor_input_parser() - return parser.parse_known_args() - - -def build_xml_formatter_input_parser() -> argparse.ArgumentParser: - """Build the argument parser - - Returns: - argparse.ArgumentParser: the parser instance - """ - - parser = argparse.ArgumentParser() - parser.add_argument('input', type=str, help='Input file name') - parser.add_argument('-i', '--indent', type=int, help='Indent size', default=2) - parser.add_argument('-s', '--style', type=int, help='Indent style', default=0) - parser.add_argument('-d', '--depth', type=int, help='Block separation depth', default=2) - parser.add_argument('-a', '--alphebitize', type=int, help='Alphebetize attributes', default=0) - parser.add_argument('-c', '--close', type=int, help='Close tag style', default=0) - parser.add_argument('-n', '--namespace', type=int, help='Include namespace', default=0) - return parser - - -def build_attribute_coverage_input_parser() -> argparse.ArgumentParser: - """Build attribute coverage redundancy input parser - - Returns: - argparse.ArgumentParser: parser instance - """ - - parser = argparse.ArgumentParser() - parser.add_argument('-r', '--root', type=str, help='GEOSX root', default='') - parser.add_argument('-o', '--output', type=str, help='Output file name', default='attribute_test.xml') - return parser - - -def build_xml_redundancy_input_parser() -> argparse.ArgumentParser: - """Build xml redundancy input parser - - Returns: - argparse.ArgumentParser: parser instance - """ - - parser = argparse.ArgumentParser() - parser.add_argument('-r', '--root', type=str, help='GEOSX root', default='') - return parser diff --git a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/main.py b/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/main.py deleted file mode 100644 index b5110288f93..00000000000 --- a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/main.py +++ /dev/null @@ -1,163 +0,0 @@ -"""Command line tools for geosx_xml_tools""" - -import sys -import argparse -import os -import time -from geosx_xml_tools import xml_processor, command_line_parsers -from typing import Callable, Any, Union, Tuple, Iterable - - -def check_mpi_rank() -> int: - """Check the MPI rank - - Returns: - int: MPI rank - """ - rank = 0 - mpi_rank_key_options = ['OMPI_COMM_WORLD_RANK', 'PMI_RANK'] - for k in mpi_rank_key_options: - if k in os.environ: - rank = int(os.environ[k]) - return rank - - -TFunc = Callable[..., Any] - - -def wait_for_file_write_rank_0(target_file_argument: Union[int, str] = 0, - max_wait_time: float = 100, - max_startup_delay: float = 1) -> Callable[[TFunc], TFunc]: - """Constructor for a function decorator that waits for a target file to be written on rank 0 - - Args: - target_file_argument (int, str): Index or keyword of the filename argument in the decorated function - max_wait_time (float): Maximum amount of time to wait (seconds) - max_startup_delay (float): Maximum delay allowed for thread startup (seconds) - - Returns: - Wrapped function - """ - - def wait_for_file_write_rank_0_inner(writer: TFunc) -> TFunc: - """Intermediate constructor for the function decorator - - Args: - writer (typing.Callable): A function that writes to a file - """ - - def wait_for_file_write_rank_0_decorator(*args, **kwargs) -> Any: - """Apply the writer on rank 0, and wait for completion on other ranks - """ - # Check the target file status - rank = check_mpi_rank() - fname = '' - if isinstance(target_file_argument, int): - fname = args[target_file_argument] - else: - fname = kwargs[target_file_argument] - - target_file_exists = os.path.isfile(fname) - target_file_edit_time = 0.0 - if target_file_exists: - target_file_edit_time = os.path.getmtime(fname) - - # Variations in thread startup times may mean the file has already been processed - # If the last edit was done within the specified time, then allow the thread to proceed - if (abs(target_file_edit_time - time.time()) < max_startup_delay): - target_file_edit_time = 0.0 - - # Go into the target process or wait for the expected file update - if (rank == 0): - return writer(*args, **kwargs) - else: - ta = time.time() - while (time.time() - ta < max_wait_time): - if target_file_exists: - if (os.path.getmtime(fname) > target_file_edit_time): - break - else: - if os.path.isfile(fname): - break - time.sleep(0.1) - - return wait_for_file_write_rank_0_decorator - - return wait_for_file_write_rank_0_inner - - -def preprocess_serial() -> None: - """ - Entry point for the geosx_xml_tools console script - """ - # Process the xml file - args, unknown_args = command_line_parsers.parse_xml_preprocessor_arguments() - - # Attempt to only process the file on rank 0 - # Note: The rank here is determined by inspecting the system environment variables - # While this is not the preferred way of doing so, it avoids mpi environment errors - # If the rank detection fails, then it will preprocess the file on all ranks, which - # sometimes cause a (seemingly harmless) file write conflict. - # processor = xml_processor.process - processor = wait_for_file_write_rank_0(target_file_argument='outputFile', max_wait_time=100)(xml_processor.process) - - compiled_name = processor(args.input, - outputFile=args.compiled_name, - schema=args.schema, - verbose=args.verbose, - parameter_override=args.parameters) - if not compiled_name: - if args.compiled_name: - compiled_name = args.compiled_name - else: - raise Exception( - 'When applying the preprocessor in parallel (outside of pygeosx), the --compiled_name argument is required' - ) - - # Note: the return value may be passed to sys.exit, and cause bash to report an error - # return format_geosx_arguments(compiled_name, unknown_args) - print(compiled_name) - - -def preprocess_parallel() -> Iterable[str]: - """ - MPI aware xml preprocesing - """ - # Process the xml file - from mpi4py import MPI # type: ignore[import] - comm = MPI.COMM_WORLD - rank = comm.Get_rank() - - args, unknown_args = command_line_parsers.parse_xml_preprocessor_arguments() - compiled_name = '' - if (rank == 0): - compiled_name = xml_processor.process(args.input, - outputFile=args.compiled_name, - schema=args.schema, - verbose=args.verbose, - parameter_override=args.parameters) - compiled_name = comm.bcast(compiled_name, root=0) - return format_geosx_arguments(compiled_name, unknown_args) - - -def format_geosx_arguments(compiled_name: str, unknown_args: Iterable[str]) -> Iterable[str]: - """Format GEOSX arguments - - Args: - compiled_name (str): Name of the compiled xml file - unknown_args (list): List of unprocessed arguments - - Returns: - list: List of arguments to pass to GEOSX - """ - geosx_args = [sys.argv[0], '-i', compiled_name] - if unknown_args: - geosx_args.extend(unknown_args) - - # Print the output name for use in bash scripts - print(compiled_name) - return geosx_args - - -if __name__ == "__main__": - preprocess_serial() diff --git a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/py.typed b/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/py.typed deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/regex_tools.py b/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/regex_tools.py deleted file mode 100644 index ded5c1a8783..00000000000 --- a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/regex_tools.py +++ /dev/null @@ -1,78 +0,0 @@ -"""Tools for managing regular expressions in geosx_xml_tools""" - -import re -from typing import Union, Dict -""" -Define regex patterns used throughout the module: - -Pattern | Example targets | Notes ------------------------------------------------------------------------------------- -parameters | $Parameter, $Parameter$ | Matches entire parameter string -units | 9.81[m**2/s], 1.0 [bbl/day] | Matches entire unit string -units_b | m, bbl, day | Matches unit names -symbolic | `1 + 2.34e5*2` | Matches the entire symbolic string -sanitize | | Removes any residual characters before - | | evaluating symbolic expressions -strip_trailing | 3.0000, 5.150050 | Removes unnecessary float strings -strip_trailing_b| 3.0000e0, 1.23e0 | Removes unnecessary float strings -""" - -patterns: Dict[str, str] = { - 'parameters': r"\$:?([a-zA-Z_0-9]*)\$?", - 'units': r"([0-9]*?\.?[0-9]+(?:[eE][-+]?[0-9]*?)?)\ *?\[([-+.*/()a-zA-Z0-9]*)\]", - 'units_b': r"([a-zA-Z]*)", - 'symbolic': r"\`([-+.*/() 0-9eE]*)\`", - 'sanitize': r"[a-z-[e]A-Z-[E]]", - 'strip_trailing': r"\.?0+(?=e)", - 'strip_trailing_b': r"e\+00|\+0?|(?<=-)0" -} - -# String formatting for symbolic expressions -symbolic_format = '%1.6e' - - -def SymbolicMathRegexHandler(match: re.Match) -> str: - """Evaluate symbolic expressions that are identified using the regex_tools.patterns['symbolic']. - - Args: - match (re.match): A matching string identified by the regex. - """ - k = match.group(1) - if k: - # Sanitize the input - sanitized = re.sub(patterns['sanitize'], '', k).strip() - value = eval(sanitized, {'__builtins__': None}) - - # Format the string, removing any trailing zeros, decimals, etc. - str_value = re.sub(patterns['strip_trailing'], '', symbolic_format % (value)) - str_value = re.sub(patterns['strip_trailing_b'], '', str_value) - return str_value - else: - return '' - - -class DictRegexHandler(): - """This class is used to substitute matched values with those stored in a dict.""" - - def __init__(self) -> None: - """Initialize the handler with an empty target list. - The key/value pairs of self.target indicate which values - to look for and the values they will replace with. - """ - self.target: Dict[str, str] = {} - - def __call__(self, match: re.Match) -> str: - """Replace the matching strings with their target. - - Args: - match (re.match): A matching string identified by the regex. - """ - - k = match.group(1) - if k: - if (k not in self.target.keys()): - raise Exception('Error: Target (%s) is not defined in the regex handler' % k) - value = self.target[k] - return str(value) - else: - return '' diff --git a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/table_generator.py b/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/table_generator.py deleted file mode 100644 index bb4a63c0959..00000000000 --- a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/table_generator.py +++ /dev/null @@ -1,75 +0,0 @@ -"""Tools for reading/writing GEOSX ascii tables""" - -import numpy as np -from typing import Tuple, Iterable, Dict - - -def write_GEOS_table(axes_values: Iterable[np.ndarray], - properties: Dict[str, np.ndarray], - axes_names: Iterable[str] = ['x', 'y', 'z', 't'], - string_format: str = '%1.5e') -> None: - """Write an GEOS-compatible ascii table. - - Args: - axes_values (list): List of arrays containing the coordinates for each axis of the table. - properties (dict): Dict of arrays with dimensionality/size defined by the axes_values - axes_names (list): Names for each axis (default = ['x', 'y', 'z', 't']) - string_format (str): Format for output values (default = %1.5e) - """ - - # Check to make sure the axes/property files have the correct shape - axes_shape = tuple([len(x) for x in axes_values]) - for k in properties.keys(): - if (np.shape(properties[k]) != axes_shape): - raise Exception("Shape of parameter %s is incompatible with given axes" % (k)) - - # Write axes files - for ka, x in zip(axes_names, axes_values): - np.savetxt('%s.geos' % (ka), x, fmt=string_format, delimiter=',') - - # Write property files - for k in properties.keys(): - tmp = np.reshape(properties[k], (-1), order='F') - np.savetxt('%s.geos' % (k), tmp, fmt=string_format, delimiter=',') - - -def read_GEOS_table(axes_files: Iterable[str], - property_files: Iterable[str]) -> Tuple[Iterable[np.ndarray], Dict[str, np.ndarray]]: - """Read an GEOS-compatible ascii table. - - Args: - axes_files (list): List of the axes file names in order. - property_files (list): List of property file names - - Returns: - tuple: List of axis definitions, dict of property values - """ - axes_values = [] - for f in axes_files: - axes_values.append(np.loadtxt('%s.geos' % (f), unpack=True, delimiter=',')) - axes_shape = tuple([len(x) for x in axes_values]) - - # Open property files - properties = {} - for f in property_files: - tmp = np.loadtxt('%s.geos' % (f), unpack=True, delimiter=',') - properties[f] = np.reshape(tmp, axes_shape, order='F') - - return axes_values, properties - - -def write_read_GEOS_table_example() -> None: - """Table read / write example.""" - - # Define table axes - a = np.array([0.0, 1.0]) - b = np.array([0.0, 0.5, 1.0]) - axes_values = [a, b] - - # Generate table values (note: the indexing argument is important) - A, B = np.meshgrid(a, b, indexing='ij') - properties = {'c': A + 2.0 * B} - - # Write, then read tables - write_GEOS_table(axes_values, properties, axes_names=['a', 'b']) - axes_b, properties_b = read_GEOS_table(['a', 'b'], ['c']) diff --git a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/tests/__init__.py b/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/tests/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/tests/generate_test_xml.py b/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/tests/generate_test_xml.py deleted file mode 100644 index 3c1b7714d56..00000000000 --- a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/tests/generate_test_xml.py +++ /dev/null @@ -1,374 +0,0 @@ -"""Tool for generating test xml files for processing.""" - -from lxml import etree as ElementTree -import os -from geosx_xml_tools import xml_formatter - - -def generate_test_xml_files(root_dir): - """Build example input/output xml files, which can be used to test the parser. - These are derived from a GEOSX integrated test xml. - - @param root_dir The folder to write the example xml files. - """ - - # Build segments of an xml file that can be compiled to form a test - # File header/footer - xml_header = """""" - - xml_footer = """""" - - # Parameters - xml_parameters = """ - - - - - """ - - # Includes - xml_includes = """ - - - - -""" - - # Base segments - xml_base_a = """ - - - - - - - - - - - - - - - -""" - - xml_base_b = """ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -""" - - # Field specifications with parameters, symbolic math, and their compiled equivalents - field_string_with_parameters = """ - - - - - - - - - - - - - - - -""" - - field_string_with_symbolic = """ - - - - - - - - - - - - - - - -""" - - field_string_base = """ - - - - - - - - - - - - - - - -""" - - field_string_alt = """ - - - - - - - - - - - - - - - -""" - - # Write the files, and apply pretty_print to targets for easy matches - # No advanced features case - with open('%s/no_advanced_features_input.xml' % (root_dir), 'w') as f: - f.write(xml_header + xml_base_a + xml_base_b + field_string_base + xml_footer) - with open('%s/no_advanced_features_target.xml' % (root_dir), 'w') as f: - f.write(xml_header + xml_base_a + xml_base_b + field_string_base + xml_footer) - xml_formatter.format_file('%s/no_advanced_features_target.xml' % (root_dir)) - - # Parameters case - with open('%s/parameters_input.xml' % (root_dir), 'w') as f: - f.write(xml_header + xml_parameters + xml_base_a + xml_base_b + field_string_with_parameters + xml_footer) - with open('%s/parameters_target.xml' % (root_dir), 'w') as f: - f.write(xml_header + xml_base_a + xml_base_b + field_string_base + xml_footer) - xml_formatter.format_file('%s/parameters_target.xml' % (root_dir)) - - # Symbolic + parameters case - with open('%s/symbolic_parameters_input.xml' % (root_dir), 'w') as f: - f.write(xml_header + xml_parameters + xml_base_a + xml_base_b + field_string_with_symbolic + xml_footer) - with open('%s/symbolic_parameters_target.xml' % (root_dir), 'w') as f: - f.write(xml_header + xml_base_a + xml_base_b + field_string_alt + xml_footer) - xml_formatter.format_file('%s/symbolic_parameters_target.xml' % (root_dir)) - - # Included case - os.makedirs('%s/included' % (root_dir), exist_ok=True) - with open('%s/included_input.xml' % (root_dir), 'w') as f: - f.write(xml_header + xml_includes + xml_footer) - with open('%s/included/included_a.xml' % (root_dir), 'w') as f: - f.write(xml_header + xml_base_a + xml_footer) - with open('%s/included/included_b.xml' % (root_dir), 'w') as f: - f.write(xml_header + xml_base_b + xml_footer) - with open('%s/included/included_c.xml' % (root_dir), 'w') as f: - f.write(xml_header + field_string_base + xml_footer) - with open('%s/included_target.xml' % (root_dir), 'w') as f: - f.write(xml_header + xml_base_a + xml_base_b + field_string_base + xml_footer) - xml_formatter.format_file('%s/included_target.xml' % (root_dir)) diff --git a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/tests/test_manager.py b/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/tests/test_manager.py deleted file mode 100644 index 54e60a7051d..00000000000 --- a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/tests/test_manager.py +++ /dev/null @@ -1,173 +0,0 @@ -import unittest -import re -import os -import filecmp -from geosx_xml_tools import regex_tools, unit_manager, xml_processor -from geosx_xml_tools.tests import generate_test_xml -import argparse -from parameterized import parameterized - -# Create an instance of the unit manager -unitManager = unit_manager.UnitManager() - - -# Test the unit manager definitions -class TestUnitManager(unittest.TestCase): - - @classmethod - def setUpClass(cls): - cls.tol = 1e-6 - - def test_unit_dict(self): - unitManager.buildUnits() - self.assertTrue(bool(unitManager.units)) - - # Scale value tests - @parameterized.expand([['meter', '2', 2.0], ['meter', '1.234', 1.234], ['meter', '1.234e1', 12.34], - ['meter', '1.234E1', 12.34], ['meter', '1.234e+1', 12.34], ['meter', '1.234e-1', 0.1234], - ['mumeter', '1', 1.0e-6], ['micrometer', '1', 1.0e-6], ['kilometer', '1', 1.0e3], - ['ms', '1', 1.0e-3], ['millisecond', '1', 1.0e-3], ['Ms', '1', 1.0e6], ['m/s', '1', 1.0], - ['micrometer/s', '1', 1.0e-6], ['micrometer/ms', '1', 1.0e-3], - ['micrometer/microsecond', '1', 1.0], ['m**2', '1', 1.0], ['km**2', '1', 1.0e6], - ['kilometer**2', '1', 1.0e6], ['(km*mm)', '1', 1.0], ['(km*mm)**2', '1', 1.0], - ['km^2', '1', 1.0e6, True], ['bbl/day', '1', 0.000001840130728333], ['cP', '1', 0.001]]) - def test_units(self, unit, scale, expected_value, expect_fail=False): - try: - val = float(unitManager([scale, unit])) - self.assertTrue((abs(val - expected_value) < self.tol) != expect_fail) - except TypeError: - self.assertTrue(expect_fail) - - -# Test the behavior of the parameter regex -class TestParameterRegex(unittest.TestCase): - - @classmethod - def setUpClass(cls): - cls.regexHandler = regex_tools.DictRegexHandler() - cls.regexHandler.target['foo'] = '1.23' - cls.regexHandler.target['bar'] = '4.56e7' - - @parameterized.expand([['$:foo*1.234', '1.23*1.234'], ['$:foo*1.234/$:bar', '1.23*1.234/4.56e7'], - ['$:foo*1.234/($:bar*$:foo)', '1.23*1.234/(4.56e7*1.23)'], - ['$foo*1.234/$bar', '1.23*1.234/4.56e7'], ['$foo$*1.234/$bar', '1.23*1.234/4.56e7'], - ['$foo$*1.234/$bar$', '1.23*1.234/4.56e7'], - ['$blah$*1.234/$bar$', '1.23*1.234/4.56e7', True], - ['$foo$*1.234/$bar$', '4.56e7*1.234/4.56e7', True]]) - def test_parameter_regex(self, parameterInput, expectedValue, expect_fail=False): - try: - result = re.sub(regex_tools.patterns['parameters'], self.regexHandler, parameterInput) - self.assertTrue((result == expectedValue) != expect_fail) - except Exception: - self.assertTrue(expect_fail) - - -# Test the behavior of the unit regex -class TestUnitsRegex(unittest.TestCase): - - @classmethod - def setUpClass(cls): - cls.tol = 1e-6 - - @parameterized.expand([['1.234[m**2/s]', '1.234'], ['1.234 [m**2/s]', '1.234'], ['1.234[m**2/s]*3.4', '1.234*3.4'], - ['1.234[m**2/s] + 5.678[mm/s]', '1.234 + 5.678e-3'], - ['1.234 [m**2/s] + 5.678 [mm/s]', '1.234 + 5.678e-3'], - ['(1.234[m**2/s])*5.678', '(1.234)*5.678']]) - def test_units_regex(self, unitInput, expectedValue, expect_fail=False): - try: - result = re.sub(regex_tools.patterns['units'], unitManager.regexHandler, unitInput) - self.assertTrue((result == expectedValue) != expect_fail) - except Exception: - self.assertTrue(expect_fail) - - -# Test the symbolic math regex -class TestSymbolicRegex(unittest.TestCase): - - @classmethod - def setUpClass(cls): - cls.tol = 1e-6 - - @parameterized.expand([['`1.234`', '1.234'], ['`1.234*2.0`', '2.468'], ['`10`', '1e1'], ['`10*2`', '2e1'], - ['`1.0/2.0`', '5e-1'], ['`2.0**2`', '4'], ['`1.0 + 2.0**2`', '5'], ['`(1.0 + 2.0)**2`', '9'], - ['`((1.0 + 2.0)**2)**(0.5)`', '3'], ['`(1.2e3)*2`', '2.4e3'], ['`1.2e3*2`', '2.4e3'], - ['`2.0^2`', '4', True], ['`sqrt(4.0)`', '2', True]]) - def test_symbolic_regex(self, symbolicInput, expectedValue, expect_fail=False): - try: - result = re.sub(regex_tools.patterns['symbolic'], regex_tools.SymbolicMathRegexHandler, symbolicInput) - self.assertTrue((result == expectedValue) != expect_fail) - except Exception: - self.assertTrue(expect_fail) - - -# Test the complete xml processor -class TestXMLProcessor(unittest.TestCase): - - @classmethod - def setUpClass(cls): - generate_test_xml.generate_test_xml_files('.') - - @parameterized.expand([['no_advanced_features_input.xml', 'no_advanced_features_target.xml'], - ['parameters_input.xml', 'parameters_target.xml'], - ['included_input.xml', 'included_target.xml'], - ['symbolic_parameters_input.xml', 'symbolic_parameters_target.xml']]) - def test_xml_processor(self, input_file, target_file, expect_fail=False): - try: - tmp = xml_processor.process(input_file, - outputFile=input_file + '.processed', - verbose=0, - keep_parameters=False, - keep_includes=False) - self.assertTrue(filecmp.cmp(tmp, target_file) != expect_fail) - except Exception: - self.assertTrue(expect_fail) - - -# Main entry point for the unit tests -def run_unit_tests(test_dir, verbose): - # Create and move to the test directory - pwd = os.getcwd() - os.makedirs(test_dir, exist_ok=True) - os.chdir(test_dir) - - # Unit manager tests - suite = unittest.TestLoader().loadTestsFromTestCase(TestUnitManager) - unittest.TextTestRunner(verbosity=verbose).run(suite) - - # Parameter regex handler tests - suite = unittest.TestLoader().loadTestsFromTestCase(TestParameterRegex) - unittest.TextTestRunner(verbosity=verbose).run(suite) - - # Regex handler tests - suite = unittest.TestLoader().loadTestsFromTestCase(TestUnitsRegex) - unittest.TextTestRunner(verbosity=verbose).run(suite) - - # Symbolic regex handler tests - suite = unittest.TestLoader().loadTestsFromTestCase(TestSymbolicRegex) - unittest.TextTestRunner(verbosity=verbose).run(suite) - - # xml processor tests - suite = unittest.TestLoader().loadTestsFromTestCase(TestXMLProcessor) - unittest.TextTestRunner(verbosity=verbose).run(suite) - - os.chdir(pwd) - - -def main(): - """Entry point for the geosx_xml_tools unit tests - - @arg -o/--output Output directory (default = ./test_results) - """ - - # Parse the user arguments - parser = argparse.ArgumentParser() - parser.add_argument('-t', '--test_dir', type=str, help='Test output directory', default='./test_results') - parser.add_argument('-v', '--verbose', type=int, help='Verbosity level', default=2) - args = parser.parse_args() - - # Process the xml file - run_unit_tests(args.test_dir, args.verbose) - - -if __name__ == "__main__": - main() diff --git a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/unit_manager.py b/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/unit_manager.py deleted file mode 100644 index 44360b07939..00000000000 --- a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/unit_manager.py +++ /dev/null @@ -1,151 +0,0 @@ -"""Tools for managing units in GEOSX""" - -import re -from geosx_xml_tools import regex_tools -from typing import List, Any, Dict, Union - - -class UnitManager(): - """This class is used to manage unit definitions.""" - - def __init__(self) -> None: - """Initialize the class by creating an instance of the dict regex handler, building units.""" - self.units: Dict[str, str] = {} - self.unitMatcher = regex_tools.DictRegexHandler() - self.buildUnits() - - def __call__(self, unitStruct: List[Any]) -> str: - """Evaluate the symbolic expression for matched strings. - - Args: - unitStruct (list): A list containing the variable scale and the unit definition. - - Returns: - str: The string with evaluated unit definitions - """ - - # Replace all instances of units in the string with their scale defined in self.units - symbolicUnits = re.sub(regex_tools.patterns['units_b'], self.unitMatcher, unitStruct[1]) - - # Strip out any undesired characters and evaluate - # Note: the only allowed alpha characters are e and E. This could be relaxed to allow - # functions such as sin, cos, etc. - symbolicUnits_sanitized = re.sub(regex_tools.patterns['sanitize'], '', symbolicUnits).strip() - value = float(unitStruct[0]) * eval(symbolicUnits_sanitized, {'__builtins__': None}) - - # Format the string, removing any trailing zeros, decimals, extraneous exponential formats - str_value = re.sub(regex_tools.patterns['strip_trailing'], '', regex_tools.symbolic_format % (value)) - str_value = re.sub(regex_tools.patterns['strip_trailing_b'], '', str_value) - return str_value - - def regexHandler(self, match: re.Match) -> str: - """Split the matched string into a scale and unit definition. - - Args: - match (re.match): The matching string from the regex. - - Returns: - str: The string with evaluated unit definitions - """ - # The first matched group includes the scale of the value (e.g. 1.234) - # The second matches the string inside the unit definition (e.g. m/s**2) - return self.__call__([match.group(1), match.group(2)]) - - def buildUnits(self) -> None: - """Build the unit definitions.""" - - # yapf: disable - # Long, short names for SI prefixes - unit_dict_type = Dict[str, Dict[str, Any]] - - prefixes: unit_dict_type = { - 'giga': {'value': 1e9, 'alt': 'G'}, - 'mega': {'value': 1e6, 'alt': 'M'}, - 'kilo': {'value': 1e3, 'alt': 'k'}, - 'hecto': {'value': 1e2, 'alt': 'H'}, - 'deca': {'value': 1e1, 'alt': 'D'}, - '': {'value': 1.0, 'alt': ''}, - 'deci': {'value': 1e-1, 'alt': 'd'}, - 'centi': {'value': 1e-2, 'alt': 'c'}, - 'milli': {'value': 1e-3, 'alt': 'm'}, - 'micro': {'value': 1e-6, 'alt': 'mu'}, - 'nano': {'value': 1e-9, 'alt': 'n'} - } - - # Base units, and their abbreviations - # Note: setting (usePrefix = True) instructs the manager to expand using SI prefixes - unit_defs: unit_dict_type = { - 'gram': {'value': 1e-3, 'alt': ['g', 'grams'], 'usePrefix': True}, - 'meter': {'value': 1.0, 'alt': ['m', 'meters'], 'usePrefix': True}, - 'second': {'value': 1.0, 'alt': ['s', 'seconds'], 'usePrefix': True}, - 'minute': {'value': 60.0, 'alt': ['min', 'minutes'], 'usePrefix': True}, - 'hour': {'value': 3600.0, 'alt': ['hr', 'hours', 'hrs'], 'usePrefix': True}, - 'day': {'value': 3600.0*24.0, 'alt': ['d', 'dy'], 'usePrefix': True}, - 'year': {'value': 3600.0*24.0*365.25, 'alt': ['yr', 'years'], 'usePrefix': True}, - 'pascal': {'value': 1.0, 'alt': ['Pa'], 'usePrefix': True}, - 'newton': {'value': 1.0, 'alt': ['N'], 'usePrefix': True}, - 'joule': {'value': 1.0, 'alt': ['J'], 'usePrefix': True}, - 'watt': {'value': 1.0, 'alt': ['W'], 'usePrefix': True} - } - - # Imperial units, and their abbreviations - imp_defs: unit_dict_type = { - 'pound': {'value': 0.453592, 'alt': ['lb', 'pounds', 'lbs'], 'usePrefix': True}, - 'poundforce': {'value': 0.453592*9.81, 'alt': ['lbf'], 'usePrefix': True}, - 'stone': {'value': 6.35029, 'alt': ['st'], 'usePrefix': True}, - 'ton': {'value': 907.185, 'alt': ['tons'], 'usePrefix': True}, - 'inch': {'value': 1.0/(3.281*12), 'alt': ['in', 'inches'], 'usePrefix': False}, - 'foot': {'value': 1.0/3.281, 'alt': ['ft', 'feet'], 'usePrefix': True}, - 'yard': {'value': 3.0/3.281, 'alt': ['yd', 'yards'], 'usePrefix': True}, - 'rod': {'value': 16.5/3.281, 'alt': ['rd', 'rods'], 'usePrefix': True}, - 'mile': {'value': 5280.0/3.281, 'alt': ['mi', 'miles'], 'usePrefix': True}, - 'acre': {'value': 4046.86, 'alt': ['acres'], 'usePrefix': True}, - 'gallon': {'value': 0.00378541, 'alt': ['gal', 'gallons'], 'usePrefix': True}, - 'psi': {'value': 6894.76, 'alt': [], 'usePrefix': True}, - 'psf': {'value': 1853.184, 'alt': [], 'usePrefix': True} - } - - # Other commonly used units: - other_defs: unit_dict_type = { - 'dyne': {'value': 1.0e-5, 'alt': ['dynes'], 'usePrefix': True}, - 'bar': {'value': 1.0e5, 'alt': ['bars'], 'usePrefix': True}, - 'atmosphere': {'value': 101325.0, 'alt': ['atm', 'atmospheres'], 'usePrefix': True}, - 'poise': {'value': 0.1, 'alt': ['P'], 'usePrefix': True}, - 'barrel': {'value': 0.1589873, 'alt': ['bbl', 'barrels'], 'usePrefix': True}, - 'horsepower': {'value': 745.7, 'alt': ['hp', 'horsepowers'], 'usePrefix': True} - } - # yapf: enable - - # Combine the unit dicts - unit_defs.update(imp_defs) - unit_defs.update(other_defs) - - # Use brute-force to generate a list of potential units, rather than trying to parse - # unit strings on the fly. This is still quite fast, and allows us to do simple - # checks for overlapping definitions - - # Expand prefix and alternate names - for p in list(prefixes.keys()): - if prefixes[p]['alt']: - prefixes[prefixes[p]['alt']] = {'value': prefixes[p]['value']} - for u in list(unit_defs.keys()): - for alt in unit_defs[u]['alt']: - unit_defs[alt] = {'value': unit_defs[u]['value'], 'usePrefix': unit_defs[u]['usePrefix']} - - # Combine the results into the final dictionary - for u in unit_defs.keys(): - if (unit_defs[u]['usePrefix']): - for p in prefixes.keys(): - self.units[p + u] = prefixes[p]['value'] * unit_defs[u]['value'] - else: - self.units[u] = unit_defs[u]['value'] - - # Test to make sure that there are no overlapping unit definitions - from collections import Counter - tmp = list(self.units.keys()) - duplicates = [k for k, v in Counter(tmp).items() if v > 1] - if (duplicates): - print(duplicates) - raise Exception('Error: There are overlapping unit definitions in the UnitManager') - - self.unitMatcher.target = self.units diff --git a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/xml_formatter.py b/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/xml_formatter.py deleted file mode 100644 index eb745abcbcb..00000000000 --- a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/xml_formatter.py +++ /dev/null @@ -1,205 +0,0 @@ -import os -from lxml import etree as ElementTree # type: ignore[import] -import re -from typing import List, Any, TextIO -from geosx_xml_tools import command_line_parsers - - -def format_attribute(attribute_indent: str, ka: str, attribute_value: str) -> str: - """Format xml attribute strings - - Args: - attribute_indent (str): Attribute indent string - ka (str): Attribute name - attribute_value (str): Attribute value - - Returns: - str: Formatted attribute value - """ - # Make sure that a space follows commas - attribute_value = re.sub(r",\s*", ", ", attribute_value) - - # Handle external brackets - attribute_value = re.sub(r"{\s*", "{ ", attribute_value) - attribute_value = re.sub(r"\s*}", " }", attribute_value) - - # Consolidate whitespace - attribute_value = re.sub(r"\s+", " ", attribute_value) - - # Identify and split multi-line attributes - if re.match(r"\s*{\s*({[-+.,0-9a-zA-Z\s]*},?\s*)*\s*}", attribute_value): - split_positions: List[Any] = [match.end() for match in re.finditer(r"}\s*,", attribute_value)] - newline_indent = '\n%s' % (' ' * (len(attribute_indent) + len(ka) + 4)) - new_values = [] - for a, b in zip([None] + split_positions, split_positions + [None]): - new_values.append(attribute_value[a:b].strip()) - if new_values: - attribute_value = newline_indent.join(new_values) - - return attribute_value - - -def format_xml_level(output: TextIO, - node: ElementTree.Element, - level: int, - indent: str = ' ' * 2, - block_separation_max_depth: int = 2, - modify_attribute_indent: bool = False, - sort_attributes: bool = False, - close_tag_newline: bool = False, - include_namespace: bool = False) -> None: - """Iteratively format the xml file - - Args: - output (file): the output text file handle - node (lxml.etree.Element): the current xml element - level (int): the xml depth - indent (str): the xml indent style - block_separation_max_depth (int): the maximum depth to separate adjacent elements - modify_attribute_indent (bool): option to have flexible attribute indentation - sort_attributes (bool): option to sort attributes alphabetically - close_tag_newline (bool): option to place close tag on a separate line - include_namespace (bool): option to include the xml namespace in the output - """ - - # Handle comments - if node.tag is ElementTree.Comment: - output.write('\n%s' % (indent * level, node.text)) - - else: - # Write opening line - opening_line = '\n%s<%s' % (indent * level, node.tag) - output.write(opening_line) - - # Write attributes - if (len(node.attrib) > 0): - # Choose indentation - attribute_indent = '%s' % (indent * (level + 1)) - if modify_attribute_indent: - attribute_indent = ' ' * (len(opening_line)) - - # Get a copy of the attributes - attribute_dict = {} - if ((level == 0) & include_namespace): - # Handle the optional namespace information at the root level - # Note: preferably, this would point to a schema we host online - attribute_dict['xmlns:xsi'] = 'http://www.w3.org/2001/XMLSchema-instance' - attribute_dict['xsi:noNamespaceSchemaLocation'] = '/usr/gapps/GEOS/schema/schema.xsd' - elif (level > 0): - attribute_dict = node.attrib - - # Sort attribute names - akeys = list(attribute_dict.keys()) - if sort_attributes: - akeys = sorted(akeys) - - # Format attributes - for ka in akeys: - # Avoid formatting mathpresso expressions - if not (node.tag in ["SymbolicFunction", "CompositeFunction"] and ka == "expression"): - attribute_dict[ka] = format_attribute(attribute_indent, ka, attribute_dict[ka]) - - for ii in range(0, len(akeys)): - k = akeys[ii] - if ((ii == 0) & modify_attribute_indent): - output.write(' %s=\"%s\"' % (k, attribute_dict[k])) - else: - output.write('\n%s%s=\"%s\"' % (attribute_indent, k, attribute_dict[k])) - - # Write children - if len(node): - output.write('>') - Nc = len(node) - for ii, child in zip(range(Nc), node): - format_xml_level(output, child, level + 1, indent, block_separation_max_depth, modify_attribute_indent, - sort_attributes, close_tag_newline, include_namespace) - - # Add space between blocks - if ((level < block_separation_max_depth) & (ii < Nc - 1) & (child.tag is not ElementTree.Comment)): - output.write('\n') - - # Write the end tag - output.write('\n%s' % (indent * level, node.tag)) - else: - if close_tag_newline: - output.write('\n%s/>' % (indent * level)) - else: - output.write('/>') - - -def format_file(input_fname: str, - indent_size: int = 2, - indent_style: bool = False, - block_separation_max_depth: int = 2, - alphebitize_attributes: bool = False, - close_style: bool = False, - namespace: bool = False) -> None: - """Script to format xml files - - Args: - input_fname (str): Input file name - indent_size (int): Indent size - indent_style (bool): Style of indentation (0=fixed, 1=hanging) - block_separation_max_depth (int): Max depth to separate xml blocks - alphebitize_attributes (bool): Alphebitize attributes - close_style (bool): Style of close tag (0=same line, 1=new line) - namespace (bool): Insert this namespace in the xml description - """ - fname = os.path.expanduser(input_fname) - try: - tree = ElementTree.parse(fname) - root = tree.getroot() - prologue_comments = [tmp.text for tmp in root.itersiblings(preceding=True)] - epilog_comments = [tmp.text for tmp in root.itersiblings()] - - with open(fname, 'w') as f: - f.write('\n') - - for comment in reversed(prologue_comments): - f.write('\n' % (comment)) - - format_xml_level(f, - root, - 0, - indent=' ' * indent_size, - block_separation_max_depth=block_separation_max_depth, - modify_attribute_indent=indent_style, - sort_attributes=alphebitize_attributes, - close_tag_newline=close_style, - include_namespace=namespace) - - for comment in epilog_comments: - f.write('\n' % (comment)) - f.write('\n') - - except ElementTree.ParseError as err: - print('\nCould not load file: %s' % (fname)) - print(err.msg) - raise Exception('\nCheck input file!') - - -def main() -> None: - """Script to format xml files - - Args: - input (str): Input file name - -i/--indent (int): Indent size - -s/--style (int): Indent style - -d/--depth (int): Block separation depth - -a/--alphebitize (int): Alphebitize attributes - -c/--close (int): Close tag style - -n/--namespace (int): Include namespace - """ - parser = command_line_parsers.build_xml_formatter_input_parser() - args = parser.parse_args() - format_file(args.input, - indent_size=args.indent, - indent_style=args.style, - block_separation_max_depth=args.depth, - alphebitize_attributes=args.alphebitize, - close_style=args.close, - namespace=args.namespace) - - -if __name__ == "__main__": - main() diff --git a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/xml_processor.py b/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/xml_processor.py deleted file mode 100644 index 757d0257ce4..00000000000 --- a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/xml_processor.py +++ /dev/null @@ -1,321 +0,0 @@ -"""Tools for processing xml files in GEOSX""" - -from lxml import etree as ElementTree # type: ignore[import] -from lxml.etree import XMLSyntaxError # type: ignore[import] -import re -import os -from geosx_xml_tools import regex_tools, unit_manager -from geosx_xml_tools import xml_formatter -from typing import Iterable, Tuple, List - -# Create an instance of the unit, parameter regex handlers -unitManager = unit_manager.UnitManager() -parameterHandler = regex_tools.DictRegexHandler() - - -def merge_xml_nodes(existingNode: ElementTree.Element, targetNode: ElementTree.Element, level: int) -> None: - """Merge nodes in an included file into the current structure level by level. - - Args: - existingNode (lxml.etree.Element): The current node in the base xml structure. - targetNode (lxml.etree.Element): The node to insert. - level (int): The xml file depth. - """ - - # Copy attributes on the current level - for tk in targetNode.attrib.keys(): - existingNode.set(tk, targetNode.get(tk)) - - # Copy target children into the xml structure - currentTag = '' - matchingSubNodes = [] - - for target in targetNode.getchildren(): - insertCurrentLevel = True - - # Check to see if a node with the appropriate type - # exists at this level - if (currentTag != target.tag): - currentTag = target.tag - matchingSubNodes = existingNode.findall(target.tag) - - if (matchingSubNodes): - targetName = target.get('name') - - # Special case for the root Problem node (which may be unnamed) - if (level == 0): - insertCurrentLevel = False - merge_xml_nodes(matchingSubNodes[0], target, level + 1) - - # Handle named xml nodes - elif (targetName and (currentTag not in ['Nodeset'])): - for match in matchingSubNodes: - if (match.get('name') == targetName): - insertCurrentLevel = False - merge_xml_nodes(match, target, level + 1) - - # Insert any unnamed nodes or named nodes that aren't present - # in the current xml structure - if (insertCurrentLevel): - existingNode.insert(-1, target) - - -def merge_included_xml_files(root: ElementTree.Element, fname: str, includeCount: int, maxInclude: int = 100) -> None: - """Recursively merge included files into the current structure. - - Args: - root (lxml.etree.Element): The root node of the base xml structure. - fname (str): The name of the target xml file to merge. - includeCount (int): The current recursion depth. - maxInclude (int): The maximum number of xml files to include (default = 100) - """ - - # Expand the input path - pwd = os.getcwd() - includePath, fname = os.path.split(os.path.abspath(os.path.expanduser(fname))) - os.chdir(includePath) - - # Check to see if the code has fallen into a loop - includeCount += 1 - if (includeCount > maxInclude): - raise Exception('Reached maximum recursive includes... Is there an include loop?') - - # Check to make sure the file exists - if (not os.path.isfile(fname)): - print('Included file does not exist: %s' % (fname)) - raise Exception('Check included file path!') - - # Load target xml - try: - parser = ElementTree.XMLParser(remove_comments=True, remove_blank_text=True) - includeTree = ElementTree.parse(fname, parser) - includeRoot = includeTree.getroot() - except XMLSyntaxError as err: - print('\nCould not load included file: %s' % (fname)) - print(err.msg) - raise Exception('\nCheck included file!') - - # Recursively add the includes: - for includeNode in includeRoot.findall('Included'): - for f in includeNode.findall('File'): - merge_included_xml_files(root, f.get('name'), includeCount) - - # Merge the results into the xml tree - merge_xml_nodes(root, includeRoot, 0) - os.chdir(pwd) - - -def apply_regex_to_node(node: ElementTree.Element) -> None: - """Apply regexes that handle parameters, units, and symbolic math to each - xml attribute in the structure. - - Args: - node (lxml.etree.Element): The target node in the xml structure. - """ - - for k in node.attrib.keys(): - value = node.get(k) - - # Parameter format: $Parameter or $:Parameter - ii = 0 - while ('$' in value): - value = re.sub(regex_tools.patterns['parameters'], parameterHandler, value) - ii += 1 - if (ii > 100): - raise Exception('Reached maximum parameter expands (Node=%s, value=%s)' % (node.tag, value)) - - # Unit format: 9.81[m**2/s] or 1.0 [bbl/day] - if ('[' in value): - value = re.sub(regex_tools.patterns['units'], unitManager.regexHandler, value) - - # Symbolic format: `1 + 2.34e5*2 * ...` - ii = 0 - while ('`' in value): - value = re.sub(regex_tools.patterns['symbolic'], regex_tools.SymbolicMathRegexHandler, value) - ii += 1 - if (ii > 100): - raise Exception('Reached maximum symbolic expands (Node=%s, value=%s)' % (node.tag, value)) - - node.set(k, value) - - for subNode in node.getchildren(): - apply_regex_to_node(subNode) - - -def generate_random_name(prefix: str = '', suffix: str = '.xml') -> str: - """If the target name is not specified, generate a random name for the compiled xml - - Args: - prefix (str): The file prefix (default = ''). - suffix (str): The file suffix (default = '.xml') - - Returns: - str: Random file name - """ - from hashlib import md5 - from time import time - from os import getpid - - tmp = str(time()) + str(getpid()) - return '%s%s%s' % (prefix, md5(tmp.encode('utf-8')).hexdigest(), suffix) - - -def process(inputFiles: Iterable[str], - outputFile: str = '', - schema: str = '', - verbose: int = 0, - parameter_override: List[Tuple[str, str]] = [], - keep_parameters: bool = True, - keep_includes: bool = True) -> str: - """Process an xml file - - Args: - inputFiles (list): Input file names. - outputFile (str): Output file name (if not specified, then generate randomly). - schema (str): Schema file name to validate the final xml (if not specified, then do not validate). - verbose (int): Verbosity level. - parameter_override (list): Parameter value overrides - keep_parameters (bool): If True, then keep parameters in the compiled file (default = True) - keep_includes (bool): If True, then keep includes in the compiled file (default = True) - - Returns: - str: Output file name - """ - if verbose: - print('\nReading input xml parameters and parsing symbolic math...') - - # Check the type of inputFiles - if isinstance(inputFiles, str): - inputFiles = [inputFiles] - - # Expand the input path - pwd = os.getcwd() - expanded_files = [os.path.abspath(os.path.expanduser(f)) for f in inputFiles] - single_path, single_input = os.path.split(expanded_files[0]) - os.chdir(single_path) - - # Handle single vs. multiple command line inputs - root = ElementTree.Element("Problem") - tree = ElementTree.ElementTree() - if (len(expanded_files) == 1): - # Load single files directly - try: - parser = ElementTree.XMLParser(remove_comments=True, remove_blank_text=True) - tree = ElementTree.parse(single_input, parser=parser) - root = tree.getroot() - except XMLSyntaxError as err: - print('\nCould not load input file: %s' % (single_input)) - print(err.msg) - raise Exception('\nCheck input file!') - - else: - # For multiple inputs, create a simple xml structure to hold - # the included files. These will be saved as comments in the compiled file - root = ElementTree.Element('Problem') - tree = ElementTree.ElementTree(root) - included_node = ElementTree.Element("Included") - root.append(included_node) - for f in expanded_files: - included_file = ElementTree.Element("File") - included_file.set('name', f) - included_node.append(included_file) - - # Add the included files to the xml structure - # Note: doing this first assumes that parameters aren't used in Included block - includeCount = 0 - for includeNode in root.findall('Included'): - for f in includeNode.findall('File'): - merge_included_xml_files(root, f.get('name'), includeCount) # type: ignore[attr-defined] - os.chdir(pwd) - - # Build the parameter map - Pmap = {} - for parameters in root.findall('Parameters'): - for p in parameters.findall('Parameter'): - Pmap[p.get('name')] = p.get('value') - - # Apply any parameter overrides - if len(parameter_override): - # Save overriden values to a new xml element - command_override_node = ElementTree.Element("CommandLineOverride") - root.append(command_override_node) - for ii in range(len(parameter_override)): - pname = parameter_override[ii][0] - pval = ' '.join(parameter_override[ii][1:]) - Pmap[pname] = pval - override_parameter = ElementTree.Element("Parameter") - override_parameter.set('name', pname) - override_parameter.set('value', pval) - command_override_node.append(override_parameter) - - # Add the parameter map to the handler - parameterHandler.target = Pmap - - # Process any parameters, units, and symbolic math in the xml - apply_regex_to_node(root) - - # Comment out or remove the Parameter, Included nodes - for includeNode in root.findall('Included'): - if keep_includes: - root.insert(-1, ElementTree.Comment(ElementTree.tostring(includeNode))) - root.remove(includeNode) - for parameterNode in root.findall('Parameters'): - if keep_parameters: - root.insert(-1, ElementTree.Comment(ElementTree.tostring(parameterNode))) - root.remove(parameterNode) - for overrideNode in root.findall('CommandLineOverride'): - if keep_parameters: - root.insert(-1, ElementTree.Comment(ElementTree.tostring(overrideNode))) - root.remove(overrideNode) - - # Generate a random output name if not specified - if not outputFile: - outputFile = generate_random_name(prefix='prep_') - - # Write the output file - tree.write(outputFile, pretty_print=True) - - # Check for un-matched special characters - with open(outputFile, 'r') as ofile: - for line in ofile: - if any([sc in line for sc in ['$', '[', ']', '`']]): - raise Exception( - 'Found un-matched special characters in the pre-processed input file on line:\n%s\n Check your input xml for errors!' - % (line)) - - # Apply formatting to the file - xml_formatter.format_file(outputFile) - - if verbose: - print('Preprocessed xml file stored in %s' % (outputFile)) - - if schema: - validate_xml(outputFile, schema, verbose) - - return outputFile - - -def validate_xml(fname: str, schema: str, verbose: int) -> None: - """Validate an xml file, and parse the warnings. - - Args: - fname (str): Target xml file name. - schema (str): Schema file name. - verbose (int): Verbosity level. - """ - if verbose: - print('Validating the xml against the schema...') - try: - ofile = ElementTree.parse(fname) - sfile = ElementTree.XMLSchema(ElementTree.parse(os.path.expanduser(schema))) - sfile.assertValid(ofile) - except ElementTree.DocumentInvalid as err: - print(err) - print('\nWarning: input XML contains potentially invalid input parameters:') - print('-' * 20 + '\n') - print(sfile.error_log) - print('\n' + '-' * 20) - print('(Total schema warnings: %i)\n' % (len(sfile.error_log))) - - if verbose: - print('Done!') diff --git a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/xml_redundancy_check.py b/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/xml_redundancy_check.py deleted file mode 100644 index 251e1df0ea7..00000000000 --- a/src/coreComponents/python/modules/geosx_xml_tools_package/geosx_xml_tools/xml_redundancy_check.py +++ /dev/null @@ -1,98 +0,0 @@ -from geosx_xml_tools.attribute_coverage import parse_schema -from geosx_xml_tools.xml_formatter import format_file -from lxml import etree as ElementTree # type: ignore[import] -import os -from pathlib import Path -from geosx_xml_tools import command_line_parsers -from typing import Iterable, Dict, Any - - -def check_redundancy_level(local_schema: Dict[str, Any], - node: ElementTree.Element, - whitelist: Iterable[str] = ['component']) -> int: - """Check xml redundancy at the current level - - Args: - local_schema (dict): Schema definitions - node (lxml.etree.Element): current xml node - whitelist (list): always match nodes containing these attributes - - Returns: - int: Number of required attributes in the node and its children - """ - node_is_required = 0 - for ka in node.attrib.keys(): - if (ka in whitelist): - node_is_required += 1 - elif (ka not in local_schema['attributes']): - node_is_required += 1 - elif ('default' not in local_schema['attributes'][ka]): - node_is_required += 1 - elif (node.get(ka) != local_schema['attributes'][ka]['default']): - node_is_required += 1 - else: - node.attrib.pop(ka) - - for child in node: - # Comments will not appear in the schema - if child.tag in local_schema['children']: - child_is_required = check_redundancy_level(local_schema['children'][child.tag], child) - node_is_required += child_is_required - if not child_is_required: - node.remove(child) - - return node_is_required - - -def check_xml_redundancy(schema: Dict[str, Any], fname: str) -> None: - """Check redundancy in an xml file - - Args: - schema (dict): Schema definitions - fname (str): Name of the target file - """ - xml_tree = ElementTree.parse(fname) - xml_root = xml_tree.getroot() - check_redundancy_level(schema['Problem'], xml_root) - xml_tree.write(fname) - format_file(fname) - - -def process_xml_files(geosx_root: str) -> None: - """Test for xml redundancy - - Args: - geosx_root (str): GEOSX root directory - """ - - # Parse the schema - geosx_root = os.path.expanduser(geosx_root) - schema_fname = '%ssrc/coreComponents/schema/schema.xsd' % (geosx_root) - schema = parse_schema(schema_fname) - - # Find all xml files, collect their attributes - for folder in ['src', 'examples']: - print(folder) - xml_files = Path(os.path.join(geosx_root, folder)).rglob('*.xml') - for f in xml_files: - print(' %s' % (str(f))) - check_xml_redundancy(schema, str(f)) - - -def main() -> None: - """Entry point for the xml attribute usage test script - - Args: - -r/--root (str): GEOSX root directory - """ - - # Parse the user arguments - parser = command_line_parsers.build_xml_redundancy_input_parser() - args = parser.parse_args() - - # Parse the xml files - process_xml_files(args.root) - - -if __name__ == "__main__": - main() diff --git a/src/coreComponents/python/modules/geosx_xml_tools_package/pyproject.toml b/src/coreComponents/python/modules/geosx_xml_tools_package/pyproject.toml deleted file mode 100644 index c2f433afcb5..00000000000 --- a/src/coreComponents/python/modules/geosx_xml_tools_package/pyproject.toml +++ /dev/null @@ -1,8 +0,0 @@ -[build-system] -requires = ["setuptools>=42", "wheel"] -build-backend = "setuptools.build_meta" - -[tool.mypy] -python_version = "3.8" -warn_return_any = true -warn_unused_configs = true diff --git a/src/coreComponents/python/modules/geosx_xml_tools_package/setup.cfg b/src/coreComponents/python/modules/geosx_xml_tools_package/setup.cfg deleted file mode 100644 index b369ca6f88b..00000000000 --- a/src/coreComponents/python/modules/geosx_xml_tools_package/setup.cfg +++ /dev/null @@ -1,28 +0,0 @@ -[metadata] -name = geosx_xml_tools -version = 0.6.0 -description = Tools for enabling advanced xml features in GEOSX -author = Christopher Sherman -author_email = sherman27@llnl.gov -license = LGPL-2.1 - -[options] -packages = - geosx_xml_tools - geosx_xml_tools.tests -install_requires = - lxml>=4.5.0 - parameterized - numpy -python_requires = >=3.6 - -[options.package_data] -geosx_xml_tools = py.typed - -[options.entry_points] -console_scripts = - preprocess_xml = geosx_xml_tools.main:preprocess_serial - format_xml = geosx_xml_tools.xml_formatter:main - test_geosx_xml_tools = geosx_xml_tools.tests.test_manager:main - check_xml_attribute_coverage = geosx_xml_tools.attribute_coverage:main - check_xml_redundancy = geosx_xml_tools.xml_redundancy_check:main diff --git a/src/coreComponents/python/modules/hdf5_wrapper_package/hdf5_wrapper/__init__.py b/src/coreComponents/python/modules/hdf5_wrapper_package/hdf5_wrapper/__init__.py deleted file mode 100644 index 0f724ef67a9..00000000000 --- a/src/coreComponents/python/modules/hdf5_wrapper_package/hdf5_wrapper/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .wrapper import hdf5_wrapper diff --git a/src/coreComponents/python/modules/hdf5_wrapper_package/hdf5_wrapper/py.typed b/src/coreComponents/python/modules/hdf5_wrapper_package/hdf5_wrapper/py.typed deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/src/coreComponents/python/modules/hdf5_wrapper_package/hdf5_wrapper/use_example.py b/src/coreComponents/python/modules/hdf5_wrapper_package/hdf5_wrapper/use_example.py deleted file mode 100644 index 5bbdbe3d27a..00000000000 --- a/src/coreComponents/python/modules/hdf5_wrapper_package/hdf5_wrapper/use_example.py +++ /dev/null @@ -1,89 +0,0 @@ -import numpy as np -import hdf5_wrapper -from typing import Union, Dict - - -def print_database_iterative(database: hdf5_wrapper.hdf5_wrapper, level: int = 0) -> None: - """ - Print the database targets iteratively by level - - Args: - database (hdf5_wrapper.hdf5_wrapper) the wrapper for the current database - level (int): the depth within the database - """ - # Note: you can also iterate over the hdf5_wrapper object directly - for k in database.keys(): - print('%s%s' % (' ' * level, k)) - - if isinstance(database[k], hdf5_wrapper.hdf5_wrapper): - # This is a group, so continue iterating downward - print_database_iterative(database[k], level + 1) - else: - # This is likely to be an array - print(database[k]) - print() - - -def read_write_hdf5_database_example() -> None: - """ - Simple demonstration of hdf5_wrapper - """ - - # ------------------------ - # Generate test data - # ------------------------ - nested_dict_type = Dict[str, Union[np.ndarray, Dict[str, np.ndarray]]] - source_a: nested_dict_type = { - '1D_double_array': np.random.randn(10), - 'string_array': np.array(['a', 'list', 'of', 'strings']), - 'child_a': { - '2D_double_array': np.random.randn(2, 3) - } - } - - source_b: nested_dict_type = { - '1D_integer_array': np.random.randint(0, 100, 5), - 'child_b': { - '3D_double_array': np.random.randn(4, 5, 2) - } - } - - # ------------------------ - # Write databases to file - # ------------------------ - # Write the first piece-by-piece to an hdf5_file - # Note: when you exit the following scope, the database is automatically closed - with hdf5_wrapper.hdf5_wrapper('database_a.hdf5', mode='a') as database_a: - # Assign the two array objects to this level - database_a['1D_double_array'] = source_a['1D_double_array'] - database_a['string_array'] = source_a['string_array'] - - # Create a child group and assign the final array - child_a = database_a['child_a'] - child_a['2D_double_array'] = source_a['child_a']['2D_double_array'] - - # Automatically write the second source to a second database - with hdf5_wrapper.hdf5_wrapper('database_b.hdf5', mode='a') as database_b: - database_b['/'] = source_b - - # Create a third database that links the either two - with hdf5_wrapper.hdf5_wrapper('database_c.hdf5', mode='a') as database_c: - database_c.link('database_a', 'database_a.hdf5') - database_c.link('database_b', 'database_b.hdf5') - - # --------------------------------------- - # Read the databases from the filesystem - # --------------------------------------- - print('Database contents:') - with hdf5_wrapper.hdf5_wrapper('database_c.hdf5') as database_c: - # Iteratively print the database contents - print_database_iterative(database_c, 1) - - # As a final note, you can also access low-level h5py functionality - # by interacting directly with the database target, e.g.: - print('Database attributes:') - print(' ', database_c.target.attrs) - - -if __name__ == "__main__": - read_write_hdf5_database_example() diff --git a/src/coreComponents/python/modules/hdf5_wrapper_package/hdf5_wrapper/wrapper.py b/src/coreComponents/python/modules/hdf5_wrapper_package/hdf5_wrapper/wrapper.py deleted file mode 100644 index a8a11b89930..00000000000 --- a/src/coreComponents/python/modules/hdf5_wrapper_package/hdf5_wrapper/wrapper.py +++ /dev/null @@ -1,206 +0,0 @@ -import h5py # type: ignore[import] -import numpy as np -from numpy.core.defchararray import encode, decode -from typing import Union, Dict, Any, Iterable, Optional, Tuple - -# Note: I would like to replace Any here with str, float, int, np.ndarray, etc. -# However, this heterogeneous pattern causes issues with mypy indexing -hdf5_get_types = Union['hdf5_wrapper', Any] -nested_dict_type = Dict[str, Any] -hdf5_set_types = Union['hdf5_wrapper', nested_dict_type, Any] - - -class hdf5_wrapper(): - """ - A class for reading/writing hdf5 files, which behaves similar to a native dict - """ - - def __init__(self, fname: str = '', target: Optional[h5py.File] = None, mode: str = 'r') -> None: - """ - Initialize the hdf5_wrapper class - - If the fname is supplied (either by a positional or keyword argument), - the wrapper will open a hdf5 database from the filesystem. - The recommended options for the mode flag include 'r' for read-only - and 'a' for read/write access. - If write mode is enabled, and the fname does not point - to an existing file, a new database will be created. - - If the target is supplied, then a new instance of the wrapper will - be created using an existing database handle. - - Args: - fname (str): the filename of a new or existing hdf5 database - target (hdf5_wrapper): the handle of an existing hdf5 dataset - mode (str): the read/write behavior of the database (default='r') - """ - self.mode: str = mode - self.target: h5py.File = target - if fname: - self.target = h5py.File(fname, self.mode) - - def __getitem__(self, k: str) -> hdf5_get_types: - """ - Get a target from the database - - If the target is not present in the datastructure and the - database is open in read/write mode, the wrapper will create a - new group and return an hdf5_wrapper. Otherwise it will throw an error - - Args: - k (str): name of target group or array - - Returns: - hdf5_wrapper/np.ndarray: The returned value - """ - if (k not in self.target): - if (self.mode in ['w', 'a']): - self.target.create_group(k) - else: - raise ValueError('Entry does not exist in database: %s' % (k)) - - tmp = self.target[k] - - if isinstance(tmp, h5py._hl.group.Group): - return hdf5_wrapper(target=tmp, mode=self.mode) - elif isinstance(tmp, h5py._hl.dataset.Dataset): - tmp = np.array(tmp) - - # Decode any string types - if (tmp.dtype.kind in ['S', 'U', 'O']): - tmp = decode(tmp) - - # Convert any 0-length arrays to native types - if not tmp.shape: - tmp = tmp[()] - - return tmp - else: - return tmp - - def __setitem__(self, k: str, value: hdf5_set_types): - """ - Write an object to the database if write-mode is enabled - - Args: - k (str): the name of the object - value (dict, np.ndarray, float, int, str): the object to be written - """ - if (self.mode in ['w', 'a']): - if isinstance(value, (dict, hdf5_wrapper)): - # Recursively add groups and their children - if (k not in self.target): - self.target.create_group(k) - new_group = self[k] - for kb, x in value.items(): - new_group[kb] = x - else: - # Delete the old copy if necessary - if (k in self.target): - del (self.target[k]) - - # Add everything else as an ndarray - tmp = np.array(value) - if (tmp.dtype.kind in ['S', 'U', 'O']): - tmp = encode(tmp) - self.target[k] = tmp - else: - raise ValueError( - 'Cannot write to an hdf5 opened in read-only mode! This can be changed by overriding the default mode argument for the wrapper.' - ) - - def link(self, k: str, target: str) -> None: - """ - Link an external hdf5 file to this location in the database - - Args: - k (str): the name of the new link in the database - target (str): the path to the external database - """ - self.target[k] = h5py.ExternalLink(target, '/') - - def keys(self) -> Iterable[str]: - """ - Get a list of groups and arrays located at the current level - - Returns: - list: a list of key names pointing to objects at the current level - """ - if isinstance(self.target, h5py._hl.group.Group): - return list(self.target) - else: - raise ValueError('Object not a group!') - - def values(self) -> Iterable[hdf5_get_types]: - """ - Get a list of values located on the current level - """ - return [self[k] for k in self.keys()] - - def items(self) -> Iterable[Tuple[str, hdf5_get_types]]: - return zip(self.keys(), self.values()) - - def __enter__(self): - """ - Entry point for an iterator - """ - return self - - def __exit__(self, type, value, traceback) -> None: - """ - End point for an iterator - """ - self.target.close() - - def __del__(self) -> None: - """ - Closes the database on wrapper deletion - """ - try: - if isinstance(self.target, h5py._hl.files.File): - self.target.close() - except: - pass - - def close(self) -> None: - """ - Closes the database - """ - if isinstance(self.target, h5py._hl.files.File): - self.target.close() - - def get_copy(self) -> nested_dict_type: - """ - Copy the entire database into memory - - Returns: - dict: a dictionary holding the database contents - """ - result: Dict[Union[str, int], Any] = {} - for k in self.keys(): - tmp = self[k] - if isinstance(tmp, hdf5_wrapper): - result[k] = tmp.get_copy() - else: - result[k] = tmp - - return result - - def copy(self) -> nested_dict_type: - """ - Copy the entire database into memory - - Returns: - dict: a dictionary holding the database contents - """ - return self.get_copy() - - def insert(self, x: Union[nested_dict_type, 'hdf5_wrapper']) -> None: - """ - Insert the contents of the target object to the current location - - Args: - x (dict, hdf5_wrapper): the dictionary to insert - """ - for k, v in x.items(): - self[k] = v diff --git a/src/coreComponents/python/modules/hdf5_wrapper_package/hdf5_wrapper/wrapper_tests.py b/src/coreComponents/python/modules/hdf5_wrapper_package/hdf5_wrapper/wrapper_tests.py deleted file mode 100644 index 7e496ee954e..00000000000 --- a/src/coreComponents/python/modules/hdf5_wrapper_package/hdf5_wrapper/wrapper_tests.py +++ /dev/null @@ -1,124 +0,0 @@ -import unittest -import os -import argparse -import numpy as np -import random -import string -import hdf5_wrapper - - -def random_string(N): - return ''.join(random.choices(string.ascii_uppercase + string.ascii_lowercase + string.digits, k=N)) - - -def build_test_dict(depth=0, max_depth=3): - r = [np.random.randint(2, 20) for x in range(5)] - test = { - 'int': np.random.randint(-1000000, 1000000), - 'float': np.random.random(), - '1d_array': np.random.randn(r[0]), - '3d_array': np.random.randn(r[1], r[2], r[3]), - 'string': random_string(10), - 'string_array': np.array([random_string(x + 10) for x in range(r[4])]) - } - if (depth < max_depth): - test['child_a'] = build_test_dict(depth + 1, max_depth) - test['child_b'] = build_test_dict(depth + 1, max_depth) - test['child_c'] = build_test_dict(depth + 1, max_depth) - - return test - - -# Test the unit manager definitions -class TestHDF5Wrapper(unittest.TestCase): - - @classmethod - def setUpClass(cls): - cls.test_dir = 'wrapper_tests' - os.makedirs(cls.test_dir, exist_ok=True) - cls.test_dict = build_test_dict() - - def compare_wrapper_dict(self, x, y): - kx = x.keys() - ky = y.keys() - - for k in kx: - if k not in ky: - raise Exception('y key not in x object (%s)' % (k)) - - for k in ky: - if k not in kx: - raise Exception('x key not in y object (%s)' % (k)) - - vx, vy = x[k], y[k] - tx, ty = type(vx), type(vy) - if ((tx != ty) and not (isinstance(vx, (dict, hdf5_wrapper.hdf5_wrapper)) - and isinstance(vy, (dict, hdf5_wrapper.hdf5_wrapper)))): - self.assertTrue(np.issubdtype(tx, ty)) - - if isinstance(vx, (dict, hdf5_wrapper.hdf5_wrapper)): - self.compare_wrapper_dict(vx, vy) - else: - if isinstance(vx, np.ndarray): - self.assertTrue(np.shape(vx) == np.shape(vy)) - self.assertTrue((vx == vy).all()) - else: - self.assertTrue(vx == vy) - - def test_a_insert_write(self): - data = hdf5_wrapper.hdf5_wrapper(os.path.join(self.test_dir, 'test_insert.hdf5'), mode='w') - data.insert(self.test_dict) - - def test_b_manual_write(self): - data = hdf5_wrapper.hdf5_wrapper(os.path.join(self.test_dir, 'test_manual.hdf5'), mode='w') - for k, v in self.test_dict.items(): - data[k] = v - - def test_c_link_write(self): - data = hdf5_wrapper.hdf5_wrapper(os.path.join(self.test_dir, 'test_linked.hdf5'), mode='w') - for k, v in self.test_dict.items(): - if ('child' in k): - child_path = os.path.join(self.test_dir, 'test_%s.hdf5' % (k)) - data_child = hdf5_wrapper.hdf5_wrapper(child_path, mode='w') - data_child.insert(v) - data.link(k, child_path) - else: - data[k] = v - - def test_d_compare_wrapper(self): - data = hdf5_wrapper.hdf5_wrapper(os.path.join(self.test_dir, 'test_insert.hdf5')) - self.compare_wrapper_dict(self.test_dict, data) - - def test_e_compare_wrapper_copy(self): - data = hdf5_wrapper.hdf5_wrapper(os.path.join(self.test_dir, 'test_insert.hdf5')) - tmp = data.copy() - self.compare_wrapper_dict(self.test_dict, tmp) - - def test_f_compare_wrapper(self): - data = hdf5_wrapper.hdf5_wrapper(os.path.join(self.test_dir, 'test_manual.hdf5')) - self.compare_wrapper_dict(self.test_dict, data) - - def test_g_compare_wrapper(self): - data = hdf5_wrapper.hdf5_wrapper(os.path.join(self.test_dir, 'test_linked.hdf5')) - self.compare_wrapper_dict(self.test_dict, data) - - -def main(): - """Entry point for the geosx_xml_tools unit tests - - Args: - -v/--verbose (int): Output verbosity - """ - - # Parse the user arguments - parser = argparse.ArgumentParser() - parser.add_argument('-v', '--verbose', type=int, help='Verbosity level', default=2) - args = parser.parse_args() - - # Unit manager tests - suite = unittest.TestLoader().loadTestsFromTestCase(TestHDF5Wrapper) - unittest.TextTestRunner(verbosity=args.verbose).run(suite) - - -if __name__ == "__main__": - main() diff --git a/src/coreComponents/python/modules/hdf5_wrapper_package/pyproject.toml b/src/coreComponents/python/modules/hdf5_wrapper_package/pyproject.toml deleted file mode 100644 index c2f433afcb5..00000000000 --- a/src/coreComponents/python/modules/hdf5_wrapper_package/pyproject.toml +++ /dev/null @@ -1,8 +0,0 @@ -[build-system] -requires = ["setuptools>=42", "wheel"] -build-backend = "setuptools.build_meta" - -[tool.mypy] -python_version = "3.8" -warn_return_any = true -warn_unused_configs = true diff --git a/src/coreComponents/python/modules/hdf5_wrapper_package/setup.cfg b/src/coreComponents/python/modules/hdf5_wrapper_package/setup.cfg deleted file mode 100644 index 13db52e1b82..00000000000 --- a/src/coreComponents/python/modules/hdf5_wrapper_package/setup.cfg +++ /dev/null @@ -1,23 +0,0 @@ -[metadata] -name = hdf5_wrapper -version = 0.2.0 -description = Simple wrapper for h5py objects -author = Christopher Sherman -author_email = sherman27@llnl.gov -license = LGPL-2.1 - -[options] -packages = - hdf5_wrapper -install_requires = - h5py>=2.10.0 - numpy>=1.16.2 -python_requires = >=3.6 - -[options.package_data] -hdf5_wrapper = py.typed - -[options.entry_points] -console_scripts = - hdf5_wrapper_tests = hdf5_wrapper.wrapper_tests:main - diff --git a/src/coreComponents/python/modules/pygeosx_tools_package/pygeosx_tools/__init__.py b/src/coreComponents/python/modules/pygeosx_tools_package/pygeosx_tools/__init__.py deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/src/coreComponents/python/modules/pygeosx_tools_package/pygeosx_tools/file_io.py b/src/coreComponents/python/modules/pygeosx_tools_package/pygeosx_tools/file_io.py deleted file mode 100644 index 12093e8d076..00000000000 --- a/src/coreComponents/python/modules/pygeosx_tools_package/pygeosx_tools/file_io.py +++ /dev/null @@ -1,86 +0,0 @@ -import os -import numpy as np -from typing import Dict, Iterable, List, Tuple - - -def save_tables(axes: Iterable[np.ndarray], - properties: Dict[str, np.ndarray], - table_root: str = './tables', - axes_names: List[str] = []) -> None: - """ - Saves a set of tables in GEOSX format - - The shape of these arrays should match the length of each axis in the specified order. - The output directory will be created if it does not exist yet. - If axes_names are not supplied, then they will be selected based - on the dimensionality of the grid: 1D=[t]; 3D=[x, y, z]; 4D=[x, y, z, t]. - - Args: - axes (list): A list of numpy ndarrays defining the table axes - properties (dict): A dict of numpy ndarrays defning the table values - table_root (str): The root path for the output directory - axes_names (list): A list of names for each potential axis (optional) - """ - # Check to see if the axes, properties have consistent shapes - axes_size = tuple([len(x) for x in axes]) - axes_dimension = len(axes_size) - for k, p in properties.items(): - property_size = np.shape(p) - if (property_size != axes_size): - print('Property:', k) - print('Grid size:', axes_size) - print('Property size', property_size) - raise Exception('Table dimensions do not match proprerties') - - # Check the axes names - if axes_names: - if (axes_dimension != len(axes_names)): - print('Axes dimensions:', axes_dimension) - print('Number of axis names provided:', len(axes_names)) - raise Exception('The grid dimensions and axes names do not match') - else: - if (axes_dimension == 1): - axes_names = ['t'] - elif (axes_dimension == 3): - axes_names = ['x', 'y', 'z'] - elif (axes_dimension == 4): - axes_names = ['x', 'y', 'z', 't'] - else: - axes_names = ['x%i' % (ii) for ii in range(axes_dimension)] - - # Write the axes - os.makedirs(table_root, exist_ok=True) - for g, a in zip(axes, axes_names): - np.savetxt('%s/%s.csv' % (table_root, a), g, fmt='%1.5f', delimiter=',') - - for k, p in properties.items(): - np.savetxt('%s/%s.csv' % (table_root, k), np.reshape(p, (-1), order='F'), fmt='%1.5e', delimiter=',') - - -def load_tables(axes_names: Iterable[str], - property_names: Iterable[str], - table_root: str = './tables', - extension: str = 'csv') -> Tuple[Iterable[np.ndarray], Dict[str, np.ndarray]]: - """ - Load a set of tables in GEOSX format - - Args: - axes_names (list): Axis file names in the target directory (with no extension) - property_names (list): Property file names in the target directory (with not extension) - table_root (str): Root path for the table directory - extension (str): Table file extension (default = 'csv') - - Returns: - tuple: List of axes values, and dictionary of table values - """ - # Load axes - axes = [np.loadtxt('%s/%s.%s' % (table_root, axis, extension), unpack=True) for axis in axes_names] - N = tuple([len(x) for x in axes]) - - # Load properties - properties = { - p: np.reshape(np.loadtxt('%s/%s.%s' % (table_root, p, extension)), N, order='F') - for p in property_names - } - - return axes, properties diff --git a/src/coreComponents/python/modules/pygeosx_tools_package/pygeosx_tools/mesh_interpolation.py b/src/coreComponents/python/modules/pygeosx_tools_package/pygeosx_tools/mesh_interpolation.py deleted file mode 100644 index 49b20112044..00000000000 --- a/src/coreComponents/python/modules/pygeosx_tools_package/pygeosx_tools/mesh_interpolation.py +++ /dev/null @@ -1,110 +0,0 @@ -import numpy as np -from scipy import stats # type: ignore[import] -from typing import Dict, Iterable, List, Tuple, Callable, Union - - -def apply_to_bins(fn: Callable[[Union[float, np.ndarray]], float], - position: np.ndarray, - value: np.ndarray, - bins: np.ndarray, - collapse_edges: bool = True): - """ - Apply a function to values that are located within a series of bins - Note: if a bin is empty, this function will fill a nan value - - Args: - fn (function): Function that takes a single scalar or array input - position (np.ndarray): A 1D list/array describing the location of each sample - value (np.ndarray): A 1D list/array of values at each location - bins (np.ndarray): The bin edges for the position data - collapse_edges (bool): Controls the behavior of edge-data (default=True) - - Returns: - np.ndarray: an array of function results for each bin - """ - # Sort values into bins - Nr = len(bins) + 1 - Ibin = np.digitize(position, bins) - - if collapse_edges: - Nr -= 2 - Ibin -= 1 - Ibin[Ibin == -1] = 0 - Ibin[Ibin == Nr] = Nr - 1 - - # Apply functions to bins - binned_values = np.zeros(Nr) - for ii in range(Nr): - tmp = (Ibin == ii) - if np.sum(tmp): - binned_values[ii] = fn(value[tmp]) - else: - # Empty bin - binned_values[ii] = np.NaN - - return binned_values - - -def extrapolate_nan_values(x, y, slope_scale=0.0): - """ - Fill in any nan values in two 1D arrays by extrapolating - - Args: - x (np.ndarray): 1D list/array of positions - y (np.ndarray): 1D list/array of values - slope_scale (float): value to scale the extrapolation slope (default=0.0) - - Returns: - np.ndarray: The input array with nan values replaced by extrapolated data - """ - Inan = np.isnan(y) - reg = stats.linregress(x[~Inan], y[~Inan]) - y[Inan] = reg[0] * x[Inan] * slope_scale + reg[1] - return y - - -def get_random_realization(x, bins, value, rand_fill=0, rand_scale=0, slope_scale=0): - """ - Get a random realization for a noisy signal with a set of bins - - Args: - x (np.ndarray): 1D list/array of positions - bins (np.ndarray): 1D list/array of bin edges - value (np.ndarray): 1D list/array of values - rand_fill (float): The standard deviation to use where data is not defined (default=0) - rand_scale (float): Value to scale the standard deviation for the realization (default=0) - slope_scale (float): Value to scale the extrapolation slope (default=0.0) - - Returns: - np.ndarray: An array containing the random realization - """ - y_mean = apply_to_bins(np.mean, x, value, bins) - y_std = apply_to_bins(np.std, x, value, bins) - - # Extrapolate to fill the upper/lower bounds - x_mid = bins[:-1] + 0.5 * (bins[1] - bins[0]) - y_mean = extrapolate_nan_values(x_mid, y_mean, slope_scale) - y_std[np.isnan(y_std)] = rand_fill - - # Add a random perturbation to the target value to match missing high/lows - y_final = y_mean + (rand_scale * y_std * np.random.randn(len(y_mean))) - return y_final - - -def get_realizations(x, bins, targets): - """ - Get random realizations for noisy signals on target bins - - Args: - x (np.ndarray): 1D list/array of positions - bins (np.ndarray): 1D list/array of bin edges - targets (dict): Dict of geosx target keys, inputs to get_random_realization - - Returns: - dict: Dictionary of random realizations - - """ - results = {} - for k, t in targets.items(): - results[k] = get_random_realization(x, bins, **t) - return results diff --git a/src/coreComponents/python/modules/pygeosx_tools_package/pygeosx_tools/py.typed b/src/coreComponents/python/modules/pygeosx_tools_package/pygeosx_tools/py.typed deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/src/coreComponents/python/modules/pygeosx_tools_package/pygeosx_tools/well_log.py b/src/coreComponents/python/modules/pygeosx_tools_package/pygeosx_tools/well_log.py deleted file mode 100644 index 58453f0f968..00000000000 --- a/src/coreComponents/python/modules/pygeosx_tools_package/pygeosx_tools/well_log.py +++ /dev/null @@ -1,96 +0,0 @@ -import numpy as np -import re - - -def parse_las(fname, variable_start='~C', body_start='~A'): - """ - Parse an las format log file - - Args: - fname (str): Path to the log file - variable_start (str): A string that indicates the start of variable header information (default = '~CURVE INFORMATION') - body_start (str): a string that indicates the start of the log body (default = '~A') - - Returns: - np.ndarray: a dict containing the values and unit definitions for each variable in the log - """ - results = {} - variable_order = [] - - # The expected format of the varible definition block is: - # name.units code:description - variable_regex = re.compile('\s*([^\.^\s]*)\s*(\.[^ ]*) ([^:]*):(.*)') - - with open(fname) as f: - file_location = 0 - for line in f: - line = line.split('#')[0] - if line: - # Preamble - if (file_location == 0): - if variable_start in line: - file_location += 1 - - # Variable definitions - elif (file_location == 1): - # This is not a comment line - if body_start in line: - file_location += 1 - else: - match = variable_regex.match(line) - if match: - variable_order.append(match[1]) - results[match[1]] = { - 'units': match[2][0:], - 'code': match[3], - 'description': match[4], - 'values': [] - } - else: - # As a fall-back use the full line - variable_order.append(line[:-1]) - results[line[:-1]] = {'units': '', 'code': '', 'description': '', 'values': []} - - # Body - else: - for k, v in zip(variable_order, line.split()): - results[k]['values'].append(float(v)) - - # Convert values to numpy arrays - for k in results: - results[k]['values'] = np.array(results[k]['values']) - - return results - - -def convert_E_nu_to_K_G(E, nu): - """ - Convert young's modulus and poisson's ratio to bulk and shear modulus - - Args: - E (float, np.ndarray): Young's modulus - nu (float, np.ndarray): Poisson's ratio - - Returns: - tuple: bulk modulus, shear modulus with same size as inputs - """ - K = E / (3.0 * (1 - 2.0 * nu)) - G = E / (2.0 * (1 + nu)) - return K, G - - -def estimate_shmin(z, rho, nu): - """ - Estimate the minimum horizontal stress using the poisson's ratio - - Args: - z (float, np.ndarray): Depth - rho (float, np.ndarray): Density - nu (float, np.ndarray): Poisson's ratio - - Returns: - float: minimum horizontal stress - """ - k = nu / (1.0 - nu) - sigma_h = k * rho * 9.81 * z - return sigma_h diff --git a/src/coreComponents/python/modules/pygeosx_tools_package/pygeosx_tools/wrapper.py b/src/coreComponents/python/modules/pygeosx_tools_package/pygeosx_tools/wrapper.py deleted file mode 100644 index 7c46e742bbf..00000000000 --- a/src/coreComponents/python/modules/pygeosx_tools_package/pygeosx_tools/wrapper.py +++ /dev/null @@ -1,406 +0,0 @@ -import sys -import numpy as np -from mpi4py import MPI -import matplotlib.pyplot as plt -import pylvarray -import pygeosx - -# Get the MPI rank -comm = MPI.COMM_WORLD -rank = comm.Get_rank() - - -def get_wrapper(problem, target_key, write_flag=False): - """ - Get a local copy of a wrapper as a numpy ndarray - - Args: - filename (str): Catalog file name - problem (pygeosx.Group): GEOSX problem handle - target_key (str): Key for the target wrapper - write_flag (bool): Sets write mode (default=False) - - Returns: - np.ndarray: The wrapper as a numpy ndarray - """ - local_values = problem.get_wrapper(target_key).value() - - if hasattr(local_values, "set_access_level"): - # Array types will have the set_access_level method - # These require additional manipulation before use - if write_flag: - local_values.set_access_level(pylvarray.MODIFIABLE, pylvarray.CPU) - else: - local_values.set_access_level(pylvarray.CPU) - - if hasattr(local_values, "to_numpy"): - local_values = local_values.to_numpy() - return local_values - - -def get_wrapper_par(problem, target_key, allgather=False, ghost_key=''): - """ - Get a global copy of a wrapper as a numpy ndarray. - Note: if ghost_key is set, it will try to remove any ghost elements - - Args: - problem (pygeosx.Group): GEOSX problem handle - target_key (str): Key for the target wrapper - allgather (bool): Flag to trigger allgather across ranks (False) - ghost_key (str): Key for the corresponding ghost wrapper (default='') - - Returns: - np.ndarray: The wrapper as a numpy ndarray - """ - if (comm.size == 1): - # This is a serial problem - return get_wrapper(problem, target_key) - - else: - # This is a parallel problem - # Get the local wrapper size, shape - local_values = get_wrapper(problem, target_key) - - # Filter out ghost ranks if requested - if ghost_key: - ghost_values = get_wrapper(problem, ghost_key) - local_values = local_values[ghost_values < -0.5] - - # Find buffer size - N = np.shape(local_values) - M = np.prod(N) - all_M = [] - max_M = 0 - if allgather: - all_M = comm.allgather(M) - max_M = np.amax(all_M) - else: - all_M = comm.gather(M, root=0) - if (rank == 0): - max_M = np.amax(all_M) - max_M = comm.bcast(max_M, root=0) - - # Pack the array into a buffer - send_buff = np.zeros(max_M) - send_buff[:M] = np.reshape(local_values, (-1)) - receive_buff = np.zeros((comm.size, max_M)) - - # Gather the buffers - if allgather: - comm.Allgather([send_buff, MPI.DOUBLE], [receive_buff, MPI.DOUBLE]) - else: - comm.Gather([send_buff, MPI.DOUBLE], [receive_buff, MPI.DOUBLE], root=0) - - # Unpack the buffers - all_values = [] - R = list(N) - R[0] = -1 - if ((rank == 0) | allgather): - # Reshape each rank's contribution - for ii in range(comm.size): - if (all_M[ii] > 0): - tmp = np.reshape(receive_buff[ii, :all_M[ii]], R) - all_values.append(tmp) - - # Concatenate into a single array - all_values = np.concatenate(all_values, axis=0) - return all_values - - -def gather_wrapper(problem, key, ghost_key=''): - """ - Get a global copy of a wrapper as a numpy ndarray on rank 0 - - Args: - problem (pygeosx.Group): GEOSX problem handle - target_key (str): Key for the target wrapper - - Returns: - np.ndarray: The wrapper as a numpy ndarray - """ - return get_wrapper_par(problem, key, ghost_key=ghost_key) - - -def allgather_wrapper(problem, key, ghost_key=''): - """ - Get a global copy of a wrapper as a numpy ndarray on all ranks - - Args: - problem (pygeosx.Group): GEOSX problem handle - target_key (str): Key for the target wrapper - - Returns: - np.ndarray: The wrapper as a numpy ndarray - """ - return get_wrapper_par(problem, key, allgather=True, ghost_key=ghost_key) - - -def get_global_value_range(problem, key): - """ - Get the range of a target value across all processes - - Args: - problem (pygeosx.Group): GEOSX problem handle - target_key (str): Key for the target wrapper - - Returns: - tuple: The global min/max of the target - """ - local_values = get_wrapper(problem, key) - - # 1D arrays will return a scalar, ND arrays an array - N = np.shape(local_values) - local_min = 1e100 - local_max = -1e100 - if (len(N) > 1): - local_min = np.zeros(N[1]) + 1e100 - local_max = np.zeros(N[1]) - 1e100 - - # For >1D arrays, keep the last dimension - query_axis = 0 - if (len(N) > 2): - query_axis = tuple([ii for ii in range(0, len(N) - 1)]) - - # Ignore zero-length results - if len(local_values): - local_min = np.amin(local_values, axis=query_axis) - local_max = np.amax(local_values, axis=query_axis) - - # Gather the results onto rank 0 - all_min = comm.gather(local_min, root=0) - all_max = comm.gather(local_max, root=0) - global_min = 1e100 - global_max = -1e100 - if (rank == 0): - global_min = np.amin(np.array(all_min), axis=0) - global_max = np.amax(np.array(all_max), axis=0) - return global_min, global_max - - -def print_global_value_range(problem, key, header, scale=1.0, precision='%1.4f'): - """ - Print the range of a target value across all processes - - Args: - problem (pygeosx.Group): GEOSX problem handle - target_key (str): Key for the target wrapper - header (str): Header to print with the range - scale (float): Multiply the range with this value before printing (default = 1.0) - precision (str): Format for printing the range (default = %1.4f) - - Returns: - tuple: The global min/max of the target - """ - global_min, global_max = get_global_value_range(problem, key) - global_min *= scale - global_max *= scale - - if (rank == 0): - if isinstance(global_min, np.ndarray): - min_str = ', '.join([precision % (x) for x in global_min]) - max_str = ', '.join([precision % (x) for x in global_max]) - print('%s: min=[%s], max=[%s]' % (header, min_str, max_str)) - else: - min_str = precision % (global_min) - max_str = precision % (global_max) - print('%s: min=%s, max=%s' % (header, min_str, max_str)) - - # Return a copy of the min/max in case we want to use them - return global_min, global_max - - -def set_wrapper_to_value(problem, key, value): - """ - Set the value of a wrapper - - Args: - problem (pygeosx.Group): GEOSX problem handle - target_key (str): Key for the target wrapper - value (float): Value to set the wrapper - """ - local_values = get_wrapper(problem, key, write_flag=True) - local_values[...] = value - - -def set_wrapper_with_function(problem, target_key, input_keys, fn, target_index=-1): - """ - Set the value of a wrapper using a function - - Args: - problem (pygeosx.Group): GEOSX problem handle - target_key (str): Key for the target wrapper - input_keys (str, list): The input key(s) - fn (function): Vectorized function used to calculate target values - target_index (int): Target index to write the output (default = all) - """ - if isinstance(input_keys, str): - input_keys = [input_keys] - local_target = get_wrapper(problem, target_key, write_flag=True) - local_inputs = [get_wrapper(problem, k) for k in input_keys] - - # Run the function, check the shape of outputs/target - fn_output = fn(*local_inputs) - N = np.shape(local_target) - M = np.shape(fn_output) - - if (target_index < 0): - if (N == M): - # Function output, target shapes are the same - local_target[...] = fn_output - - elif (len(M) == 1): - # Apply the function output across each of the target dims - local_target[...] = np.tile(np.expand_dims(fn_output, axis=1), (1, N[1])) - - else: - raise Exception('Shape of function output %s is not compatible with target %s' % (str(M), str(N))) - elif (len(M) == 1): - if (len(N) == 2): - # 2D target, with 1D output applied to a given index - local_target[:, target_index] = fn_output - - else: - # ND target, with 1D output tiled across intermediate indices - expand_axes = tuple([ii for ii in range(1, len(N) - 1)]) - tile_axes = tuple([1] + [ii for ii in N[1:-1]]) - local_target[..., target_index] = np.tile(np.expand_dims(fn_output, axis=expand_axes), tile_axes) - - else: - raise Exception('Shape of function output %s is not compatible with target %s (target axis=%i)' % - (str(M), str(N), target_index)) - - -def search_datastructure_wrappers_recursive(group, filters, matching_paths, level=0, group_path=[]): - """ - Recursively search the group and its children for wrappers that match the filters - - Args: - problem (pygeosx.Group): GEOSX problem handle - filters (list): a list of strings - matching_paths (list): a list of matching values - """ - for wrapper in group.wrappers(): - wrapper_path = str(wrapper).split()[0] - wrapper_test = group_path + [wrapper_path[wrapper_path.rfind('/') + 1:]] - if all(f in wrapper_test for f in filters): - matching_paths.append('/'.join(wrapper_test)) - - for sub_group in group.groups(): - sub_group_name = str(sub_group).split()[0].split('/')[-1] - search_datastructure_wrappers_recursive(sub_group, - filters, - matching_paths, - level=level + 1, - group_path=group_path + [sub_group_name]) - - -def get_matching_wrapper_path(problem, filters): - """ - Recursively search the group and its children for wrappers that match the filters - A successful match is identified if the wrapper path contains all of the - strings in the filter. - For example, if filters=['a', 'b', 'c'], the following could match any of the following: - 'a/b/c', 'c/b/a', 'd/e/c/f/b/a/a' - - Args: - problem (pygeosx.Group): GEOSX problem handle - filters (list): a list of strings - - Returns: - str: Key of the matching wrapper - """ - matching_paths = [] - search_datastructure_wrappers_recursive(problem, filters, matching_paths) - - if (len(matching_paths) == 1): - if (rank == 0): - print('Found matching wrapper: %s' % (matching_paths[0])) - return matching_paths[0] - - else: - if (rank == 0): - print('Error occured while looking for wrappers:') - print('Filters: [%s]' % (', '.join(filters))) - print('Matching wrappers: [%s]' % (', '.join(matching_paths))) - raise Exception('Search resulted in 0 or >1 wrappers mathching filters') - - -def run_queries(problem, records): - """ - Query the current GEOSX datastructure - Note: The expected record request format is as follows. - For now, the only supported query is to find the min/max values of the target - record = {'path/of/wrapper': {'label': 'aperture (m)', # A label to include with plots - 'scale': 1.0, # Value to scale results by - 'history: [], # A list to store values over time - 'fhandle': plt.figure() # A figure handle }} - - Args: - problem (pygeosx.Group): GEOSX problem handle - records (dict): A dict of dicts that specifies the queries to run - """ - for k in records.keys(): - if (k == 'time'): - current_time = get_wrapper(problem, "Events/time") - records[k]['history'].append(current_time * records[k]['scale']) - else: - tmp = print_global_value_range(problem, k, records[k]['label'], scale=records[k]['scale']) - records[k]['history'].append(tmp) - sys.stdout.flush() - - -def plot_history(records, output_root='.', save_figures=True, show_figures=True): - """ - Plot the time-histories for the records structure. - Note: If figures are shown, the GEOSX process will be blocked until they are closed - - Args: - records (dict): A dict of dicts containing the queries - output_root (str): Path to save figures (default = './') - save_figures (bool): Flag to indicate whether figures should be saved (default = True) - show_figures (bool): Flag to indicate whether figures should be drawn (default = False) - """ - if (rank == 0): - for k in records.keys(): - if (k != 'time'): - # Set the active figure - fa = plt.figure(records[k]['fhandle'].number) - - # Assemble values to plot - t = np.array(records['time']['history']) - x = np.array(records[k]['history']) - N = np.shape(x) # (time, min/max, dimensions) - - # Add plots - if (len(N) == 2): - # This is a 1D field - plt.gca().cla() - plt.plot(t, x[:, 0], label='min') - plt.plot(t, x[:, 1], label='max') - plt.xlabel(records['time']['label']) - plt.ylabel(records[k]['label']) - - else: - # This is a 2D field - columns = 2 - rows = int(np.ceil(N[2] / float(columns))) - - # Setup axes - if (('axes' not in records[k]) or (len(fa.axes) == 0)): - records[k]['axes'] = [plt.subplot(rows, columns, ii + 1) for ii in range(0, N[2])] - - for ii in range(0, N[2]): - ax = records[k]['axes'][ii] - ax.cla() - ax.plot(t, x[:, 0, ii], label='min') - ax.plot(t, x[:, 1, ii], label='max') - ax.set_xlabel(records['time']['label']) - ax.set_ylabel('%s (dim=%i)' % (records[k]['label'], ii)) - plt.legend(loc=2) - records[k]['fhandle'].tight_layout(pad=1.5) - - if save_figures: - fname = k[k.rfind('/') + 1:] - plt.savefig('%s/%s.png' % (output_root, fname), format='png') - if show_figures: - plt.show() diff --git a/src/coreComponents/python/modules/pygeosx_tools_package/pyproject.toml b/src/coreComponents/python/modules/pygeosx_tools_package/pyproject.toml deleted file mode 100644 index c2f433afcb5..00000000000 --- a/src/coreComponents/python/modules/pygeosx_tools_package/pyproject.toml +++ /dev/null @@ -1,8 +0,0 @@ -[build-system] -requires = ["setuptools>=42", "wheel"] -build-backend = "setuptools.build_meta" - -[tool.mypy] -python_version = "3.8" -warn_return_any = true -warn_unused_configs = true diff --git a/src/coreComponents/python/modules/pygeosx_tools_package/setup.cfg b/src/coreComponents/python/modules/pygeosx_tools_package/setup.cfg deleted file mode 100644 index 4df92e3dcfd..00000000000 --- a/src/coreComponents/python/modules/pygeosx_tools_package/setup.cfg +++ /dev/null @@ -1,20 +0,0 @@ -[metadata] -name = pygeosx_tools -version = 0.1.0 -description = Tools for interacting with pygeosx -author = Christopher Sherman -author_email = sherman27@llnl.gov -license = LGPL-2.1 - -[options] -packages = - pygeosx_tools -install_requires = - matplotlib - numpy - scipy - mpi4py -python_requires = >=3.6 - -[options.package_data] -pygeosx_tools = py.typed diff --git a/src/coreComponents/python/modules/timehistory_package/pyproject.toml b/src/coreComponents/python/modules/timehistory_package/pyproject.toml deleted file mode 100644 index c2f433afcb5..00000000000 --- a/src/coreComponents/python/modules/timehistory_package/pyproject.toml +++ /dev/null @@ -1,8 +0,0 @@ -[build-system] -requires = ["setuptools>=42", "wheel"] -build-backend = "setuptools.build_meta" - -[tool.mypy] -python_version = "3.8" -warn_return_any = true -warn_unused_configs = true diff --git a/src/coreComponents/python/modules/timehistory_package/setup.cfg b/src/coreComponents/python/modules/timehistory_package/setup.cfg deleted file mode 100644 index e6e34647a51..00000000000 --- a/src/coreComponents/python/modules/timehistory_package/setup.cfg +++ /dev/null @@ -1,16 +0,0 @@ -[metadata] -name = time_history_plotting -version = 0.1.0 -description = Scripts to plot time-series data from GEOSX time-history output files -author = William Tobin -author_email = tobin6@llnl.gov -license = LGPL-2.1 - -[options] -packages = - plot_time_history -install_requires = - matplotlib - hdf5_wrapper - numpy -python_requires = >=3.6 diff --git a/src/coreComponents/python/modules/timehistory_package/timehistory/__init__.py b/src/coreComponents/python/modules/timehistory_package/timehistory/__init__.py deleted file mode 100644 index b288976bbee..00000000000 --- a/src/coreComponents/python/modules/timehistory_package/timehistory/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .plot_time_history import getHistorySeries diff --git a/src/coreComponents/python/modules/timehistory_package/timehistory/plot_time_history.py b/src/coreComponents/python/modules/timehistory_package/timehistory/plot_time_history.py deleted file mode 100644 index 839288cebf9..00000000000 --- a/src/coreComponents/python/modules/timehistory_package/timehistory/plot_time_history.py +++ /dev/null @@ -1,157 +0,0 @@ -import numpy as np -from hdf5_wrapper import hdf5_wrapper as h5w -import matplotlib as mpl -import matplotlib.pyplot as plt -import os -import sys -import argparse - -import re - - -def isiterable(obj): - try: - it = iter(obj) - except TypeError: - return False - return True - - -def getHistorySeries(database, variable, setname, indices=None, components=None): - """ - Retrieve a series of time history structures suitable for plotting in addition to the specific set index and component for the time series - - Args: - database (hdf5_wrapper.hdf5_wrapper): database to retrieve time history data from - variable (str): the name of the time history variable for which to retrieve time-series data - setname (str): the name of the index set as specified in the geosx input xml for which to query time-series data - indices (int, list): the indices in the named set to query for, if None, defaults to all - components (int, list): the components in the flattened data types to retrieve, defaults to all - - Returns: - list: list of (time, data, idx, comp) timeseries tuples for each time history data component - """ - - set_regex = re.compile(variable + '(.*?)', re.IGNORECASE) - if setname is not None: - set_regex = re.compile(variable + '\s*' + str(setname), re.IGNORECASE) - time_regex = re.compile('Time', re.IGNORECASE) # need to make this per-set, thought that was in already? - - set_match = list(filter(set_regex.match, database.keys())) - time_match = list(filter(time_regex.match, database.keys())) - - if len(set_match) == 0: - print(f"Error: can't locate time history data for variable/set described by regex {set_regex.pattern}") - return None - if len(time_match) == 0: - print(f"Error: can't locate time history data for set time variable described by regex {time_regex.pattern}") - return None - - if len(set_match) > 1: - print(f"Warning: variable/set specification matches multiple datasets: {', '.join(set_match)}") - if len(time_match) > 1: - print(f"Warning: set specification matches multiple time datasets: {', '.join(time_match)}") - - set_match = set_match[0] - time_match = time_match[0] - - data_series = database[set_match] - time_series = database[time_match] - - if time_series.shape[0] != data_series.shape[0]: - print( - f"Error: The length of the time-series {time_match} and data-series {set_match} do not match: {time_series.shape} and {data_series.shape} !" - ) - - if indices is not None: - if type(indices) is int: - indices = list(indices) - if isiterable(indices): - oob_idxs = list(filter(lambda idx: not 0 <= idx < data_series.shape[1], indices)) - if len(oob_idxs) > 0: - print(f"Error: The specified indices: ({', '.join(oob_idxs)}) " + "\n\t" + - f" are out of the dataset index range: [0,{data_series.shape[1]})") - indices = list(set(indices) - set(oob_idxs)) - else: - print(f"Error: unsupported indices type: {type(indices)}") - else: - indices = range(data_series.shape[1]) - - if components is not None: - if type(components) is int: - components = list(components) - if isiterable(components): - oob_comps = list(filter(lambda comp: not 0 <= comp < data_series.shape[2], components)) - if len(oob_comps) > 0: - print(f"Error: The specified components: ({', '.join(oob_comps)}) " + "\n\t" + - " is out of the dataset component range: [0,{data_series.shape[1]})") - components = list(set(components) - set(oob_comps)) - else: - print(f"Error: unsupported components type: {type(components)}") - else: - components = range(data_series.shape[2]) - - return [(time_series[:, 0], data_series[:, idx, comp], idx, comp) for idx in indices for comp in components] - - -def commandLinePlotGen(): - parser = argparse.ArgumentParser( - description="A script that parses geosx HDF5 time-history files and produces time-history plots using matplotlib" - ) - parser.add_argument("filename", metavar="history_file", type=str, help="The time history file to parse") - - parser.add_argument("variable", - metavar="variable_name", - type=str, - help="Which time-history variable collected by GEOSX to generate a plot file for.") - - parser.add_argument( - "--sets", - metavar="name", - type=str, - action='append', - default=[None], - nargs="+", - help= - "Which index set of time-history data collected by GEOSX to generate a plot file for, may be specified multiple times with different indices/components for each set." - ) - - parser.add_argument("--indices", - metavar="index", - type=int, - default=[], - nargs="+", - help="An optional list of specific indices in the most-recently specified set.") - - parser.add_argument("--components", - metavar="int", - type=int, - default=[], - nargs="+", - help="An optional list of specific variable components") - - args = parser.parse_args() - result = 0 - - if not os.path.isfile(args.filename): - print(f"Error: file '{args.filename}' not found.") - result = -1 - else: - with h5w(args.filename, mode='r') as database: - for setname in args.sets: - ds = getHistorySeries(database, args.variable, setname, args.indices, args.components) - if ds is None: - result = -1 - break - figname = args.variable + ("_" + setname if setname is not None else "") - fig, ax = plt.subplots() - ax.set_title(figname) - for d in ds: - ax.plot(d[0], d[1]) - fig.savefig(figname + "_history.png") - - return result - - -if __name__ == "__main__": - commandLinePlotGen() diff --git a/src/coreComponents/python/visitMacros/visitVTKConversion.py b/src/coreComponents/python/visitMacros/visitVTKConversion.py deleted file mode 100644 index 8a135d18d5b..00000000000 --- a/src/coreComponents/python/visitMacros/visitVTKConversion.py +++ /dev/null @@ -1,68 +0,0 @@ -""" -visitVTKConversion.py - -VisIt macros designed to convert GEOSX vtk format files containing multiple -regions into separate files, which can then be normally read by the tool. -To install the macros, do the following: - 1) Select Controls/Command from the top menu in VisIt - 2) In the newly opened Commands window, select the Macros tab - 3) Copy the following scripts to the panel and click Update Macros - 4) The macro will appear under the name 'Convert pvd' in the macros window - -If the macros window is not visible, select Controls/Macros from the top menu in VisIt. -When you select 'Convert pvd', visit will attempt to convert any pvd files in the current -directory (where the visit command was run). The macro will create a new set of .vtm files -in the vtk output directory (named '[Region]_*.vtm'). - -For multi-region problems, you can load each set of .vtm files individual. -After adding plots to the interface, you may be prompted to 'Correlate databases'. -If you select 'Yes', VisIt will add a new time slider 'Correlation*' to the interface, which can -be used to change the visualization time, while keeping the datasets syncronized. - -Note: The VisIt python interpreter does not allow empty lines within functions or multi-line arguments -""" - - -def convert_vtm_file(vtm_file, output_dir, root_path): - from xml.etree import ElementTree - import os - vtm_dir = os.path.split(vtm_file)[0] - vtm_header = os.path.split(vtm_file)[1].split('.')[0] - vtm_tree = ElementTree.parse(vtm_file) - vtm_root = vtm_tree.getroot() - multiblock = vtm_root[0] - for block in multiblock: - for child_block in block: - # Setup a new tree and copy parent elements - visit_root = ElementTree.Element(vtm_root.tag, attrib=vtm_root.attrib) - visit_multiblock = ElementTree.Element(multiblock.tag, attrib=multiblock.attrib) - visit_root.append(visit_multiblock) - visit_block = ElementTree.Element(block.tag, attrib=block.attrib) - visit_multiblock.append(visit_block) - for dataset in child_block: - # Copy the dataset, then correct the relative path - visit_block.append(dataset) - dataset_path = os.path.relpath(os.path.join(root_path, vtm_dir, dataset.get('file')), start=output_dir) - dataset.set('file', dataset_path) - visit_tree = ElementTree.ElementTree(element=visit_root) - visit_tree.write(os.path.join(output_dir, '%s_%s.vtm' % (child_block.get('name'), vtm_header))) - - -def user_macro_convert_pvd_files(): - import glob - import os - from xml.etree import ElementTree - print('Converting pvd files in current directory:') - for file in glob.glob('*.pvd'): - print(' ' + file) - pvd_dir = os.path.split(file)[0] - output_dir = file[:file.rfind('.')] - pvd_tree = ElementTree.parse(file) - pvd_root = pvd_tree.getroot() - collection_root = pvd_root[0] - for dataset in collection_root: - convert_vtm_file(dataset.get('file'), output_dir, pvd_dir) - print('Done!') - - -RegisterMacro("Convert pvd", user_macro_convert_pvd_files) diff --git a/src/coreComponents/schema/CMakeLists.txt b/src/coreComponents/schema/CMakeLists.txt index cebb95e9551..ccee0a436dd 100644 --- a/src/coreComponents/schema/CMakeLists.txt +++ b/src/coreComponents/schema/CMakeLists.txt @@ -24,4 +24,3 @@ blt_add_library( NAME schema target_include_directories( schema PUBLIC ${CMAKE_SOURCE_DIR}/coreComponents) -geosx_add_code_checks(PREFIX schema ) diff --git a/src/coreComponents/schema/docs/BiotPorosity.rst b/src/coreComponents/schema/docs/BiotPorosity.rst index 906b76b732a..2bc5c5f2678 100644 --- a/src/coreComponents/schema/docs/BiotPorosity.rst +++ b/src/coreComponents/schema/docs/BiotPorosity.rst @@ -1,12 +1,12 @@ -================================== ========= ======== =========================================== -Name Type Default Description -================================== ========= ======== =========================================== -defaultReferencePorosity real64 required Default value of the reference porosity -defaultThermalExpansionCoefficient real64 0 Default thermal expansion coefficient -grainBulkModulus real64 required Grain bulk modulus -name groupName required A name is required for any non-unique nodes -================================== ========= ======== =========================================== +======================== ========= ======== =========================================== +Name Type Default Description +======================== ========= ======== =========================================== +defaultPorosityTEC real64 0 Default thermal expansion coefficient +defaultReferencePorosity real64 required Default value of the reference porosity +grainBulkModulus real64 required Grain bulk modulus +name groupName required A name is required for any non-unique nodes +======================== ========= ======== =========================================== diff --git a/src/coreComponents/schema/docs/BiotPorosity_other.rst b/src/coreComponents/schema/docs/BiotPorosity_other.rst index 962677d8251..49c9bd3d373 100644 --- a/src/coreComponents/schema/docs/BiotPorosity_other.rst +++ b/src/coreComponents/schema/docs/BiotPorosity_other.rst @@ -1,19 +1,19 @@ -===================================== ============== ==================================================================================================== -Name Type Description -===================================== ============== ==================================================================================================== -averageMeanEffectiveStressIncrement_k real64_array Mean effective stress increment averaged over quadrature points at the previous sequential iteration -biotCoefficient real64_array Biot coefficient -dPorosity_dPressure real64_array2d Derivative of rock porosity with respect to pressure -dPorosity_dTemperature real64_array2d Derivative of rock porosity with respect to temperature -initialPorosity real64_array2d Initial porosity -meanEffectiveStressIncrement_k real64_array2d Mean effective stress increment at quadrature points at the previous sequential iteration -porosity real64_array2d Rock porosity -porosity_n real64_array2d Rock porosity at the previous converged time step -referencePorosity real64_array Reference porosity -solidBulkModulus real64_array Solid bulk modulus -thermalExpansionCoefficient real64_array Thermal expansion coefficient -===================================== ============== ==================================================================================================== +================================= ============== ================================================================================================ +Name Type Description +================================= ============== ================================================================================================ +averageMeanTotalStressIncrement_k real64_array Mean total stress increment averaged over quadrature points at the previous sequential iteration +biotCoefficient real64_array Biot coefficient +dPorosity_dPressure real64_array2d Derivative of rock porosity with respect to pressure +dPorosity_dTemperature real64_array2d Derivative of rock porosity with respect to temperature +initialPorosity real64_array2d Initial porosity +meanTotalStressIncrement_k real64_array2d Mean total stress increment at quadrature points at the previous sequential iteration +porosity real64_array2d Rock porosity +porosity_n real64_array2d Rock porosity at the previous converged time step +referencePorosity real64_array Reference porosity +solidBulkModulus real64_array Solid bulk modulus +thermalExpansionCoefficient real64_array Thermal expansion coefficient +================================= ============== ================================================================================================ diff --git a/src/coreComponents/schema/docs/BrooksCoreyStone2RelativePermeability.rst b/src/coreComponents/schema/docs/BrooksCoreyStone2RelativePermeability.rst new file mode 100644 index 00000000000..c10a2a48b2f --- /dev/null +++ b/src/coreComponents/schema/docs/BrooksCoreyStone2RelativePermeability.rst @@ -0,0 +1,19 @@ + + +======================= ================== ======== ========================================================================================================================================================== +Name Type Default Description +======================= ================== ======== ========================================================================================================================================================== +gasOilRelPermExponent real64_array {1} | Rel perm power law exponent for the pair (gas phase, oil phase) at residual water saturation + | The expected format is "{ gasExp, oilExp }", in that order +gasOilRelPermMaxValue real64_array {0} | Maximum rel perm value for the pair (gas phase, oil phase) at residual water saturation + | The expected format is "{ gasMax, oilMax }", in that order +name groupName required A name is required for any non-unique nodes +phaseMinVolumeFraction real64_array {0} Minimum volume fraction value for each phase +phaseNames groupNameRef_array required List of fluid phases +waterOilRelPermExponent real64_array {1} | Rel perm power law exponent for the pair (water phase, oil phase) at residual gas saturation + | The expected format is "{ waterExp, oilExp }", in that order +waterOilRelPermMaxValue real64_array {0} | Maximum rel perm value for the pair (water phase, oil phase) at residual gas saturation + | The expected format is "{ waterMax, oilMax }", in that order +======================= ================== ======== ========================================================================================================================================================== + + diff --git a/src/coreComponents/schema/docs/BrooksCoreyStone2RelativePermeability_other.rst b/src/coreComponents/schema/docs/BrooksCoreyStone2RelativePermeability_other.rst new file mode 100644 index 00000000000..e6dd24aeb13 --- /dev/null +++ b/src/coreComponents/schema/docs/BrooksCoreyStone2RelativePermeability_other.rst @@ -0,0 +1,15 @@ + + +=============================== ============== ======================================================================================================================= +Name Type Description +=============================== ============== ======================================================================================================================= +dPhaseRelPerm_dPhaseVolFraction real64_array4d Derivative of phase relative permeability with respect to phase volume fraction +phaseOrder integer_array (no description available) +phaseRelPerm real64_array3d Phase relative permeability +phaseRelPerm_n real64_array3d Phase relative permeability at previous time +phaseTrappedVolFraction real64_array3d Phase trapped volume fraction +phaseTypes integer_array (no description available) +volFracScale real64 Factor used to scale the phase capillary pressure, defined as: one minus the sum of the phase minimum volume fractions. +=============================== ============== ======================================================================================================================= + + diff --git a/src/coreComponents/schema/docs/CO2BrineEzrokhiFluid.rst b/src/coreComponents/schema/docs/CO2BrineEzrokhiFluid.rst index 4642cc407cf..8b77d7787d5 100644 --- a/src/coreComponents/schema/docs/CO2BrineEzrokhiFluid.rst +++ b/src/coreComponents/schema/docs/CO2BrineEzrokhiFluid.rst @@ -7,6 +7,7 @@ checkPVTTablesRanges integer 1 Enable (1) or disable (0) an er componentMolarWeight real64_array {0} Component molar weights componentNames string_array {} List of component names flashModelParaFile path required Name of the file defining the parameters of the flash model +logLevel integer 0 Log level name groupName required A name is required for any non-unique nodes phaseNames groupNameRef_array {} List of fluid phases phasePVTParaFiles path_array required Names of the files defining the parameters of the viscosity and density models diff --git a/src/coreComponents/schema/docs/CO2BrineEzrokhiThermalFluid.rst b/src/coreComponents/schema/docs/CO2BrineEzrokhiThermalFluid.rst index 4642cc407cf..8b77d7787d5 100644 --- a/src/coreComponents/schema/docs/CO2BrineEzrokhiThermalFluid.rst +++ b/src/coreComponents/schema/docs/CO2BrineEzrokhiThermalFluid.rst @@ -7,6 +7,7 @@ checkPVTTablesRanges integer 1 Enable (1) or disable (0) an er componentMolarWeight real64_array {0} Component molar weights componentNames string_array {} List of component names flashModelParaFile path required Name of the file defining the parameters of the flash model +logLevel integer 0 Log level name groupName required A name is required for any non-unique nodes phaseNames groupNameRef_array {} List of fluid phases phasePVTParaFiles path_array required Names of the files defining the parameters of the viscosity and density models diff --git a/src/coreComponents/schema/docs/CO2BrinePhillipsFluid.rst b/src/coreComponents/schema/docs/CO2BrinePhillipsFluid.rst index 4642cc407cf..8b77d7787d5 100644 --- a/src/coreComponents/schema/docs/CO2BrinePhillipsFluid.rst +++ b/src/coreComponents/schema/docs/CO2BrinePhillipsFluid.rst @@ -7,6 +7,7 @@ checkPVTTablesRanges integer 1 Enable (1) or disable (0) an er componentMolarWeight real64_array {0} Component molar weights componentNames string_array {} List of component names flashModelParaFile path required Name of the file defining the parameters of the flash model +logLevel integer 0 Log level name groupName required A name is required for any non-unique nodes phaseNames groupNameRef_array {} List of fluid phases phasePVTParaFiles path_array required Names of the files defining the parameters of the viscosity and density models diff --git a/src/coreComponents/schema/docs/CO2BrinePhillipsThermalFluid.rst b/src/coreComponents/schema/docs/CO2BrinePhillipsThermalFluid.rst index 4642cc407cf..8b77d7787d5 100644 --- a/src/coreComponents/schema/docs/CO2BrinePhillipsThermalFluid.rst +++ b/src/coreComponents/schema/docs/CO2BrinePhillipsThermalFluid.rst @@ -7,6 +7,7 @@ checkPVTTablesRanges integer 1 Enable (1) or disable (0) an er componentMolarWeight real64_array {0} Component molar weights componentNames string_array {} List of component names flashModelParaFile path required Name of the file defining the parameters of the flash model +logLevel integer 0 Log level name groupName required A name is required for any non-unique nodes phaseNames groupNameRef_array {} List of fluid phases phasePVTParaFiles path_array required Names of the files defining the parameters of the viscosity and density models diff --git a/src/coreComponents/schema/docs/CeramicDamage.rst b/src/coreComponents/schema/docs/CeramicDamage.rst index b79c9c86c20..fa671dd48bc 100644 --- a/src/coreComponents/schema/docs/CeramicDamage.rst +++ b/src/coreComponents/schema/docs/CeramicDamage.rst @@ -1,19 +1,19 @@ -================================== ========= ======== ==================================================================== -Name Type Default Description -================================== ========= ======== ==================================================================== -compressiveStrength real64 required Compressive strength -crackSpeed real64 required Crack speed -defaultBulkModulus real64 -1 Default Bulk Modulus Parameter -defaultDensity real64 required Default Material Density -defaultPoissonRatio real64 -1 Default Poisson's Ratio -defaultShearModulus real64 -1 Default Shear Modulus Parameter -defaultThermalExpansionCoefficient real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame -defaultYoungModulus real64 -1 Default Young's Modulus -maximumStrength real64 required Maximum theoretical strength -name groupName required A name is required for any non-unique nodes -tensileStrength real64 required Tensile strength -================================== ========= ======== ==================================================================== +======================= ========= ======== ==================================================================== +Name Type Default Description +======================= ========= ======== ==================================================================== +compressiveStrength real64 required Compressive strength +crackSpeed real64 required Crack speed +defaultBulkModulus real64 -1 Default Bulk Modulus Parameter +defaultDensity real64 required Default Material Density +defaultDrainedLinearTEC real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame +defaultPoissonRatio real64 -1 Default Poisson's Ratio +defaultShearModulus real64 -1 Default Shear Modulus Parameter +defaultYoungModulus real64 -1 Default Young's Modulus +maximumStrength real64 required Maximum theoretical strength +name groupName required A name is required for any non-unique nodes +tensileStrength real64 required Tensile strength +======================= ========= ======== ==================================================================== diff --git a/src/coreComponents/schema/docs/CompositionalMultiphaseFVM.rst b/src/coreComponents/schema/docs/CompositionalMultiphaseFVM.rst index ae387d086bc..0fb7bf0fb89 100644 --- a/src/coreComponents/schema/docs/CompositionalMultiphaseFVM.rst +++ b/src/coreComponents/schema/docs/CompositionalMultiphaseFVM.rst @@ -25,6 +25,7 @@ scalingType geos_CompositionalMultiphaseFVM_Scalin | * Global | * Local solutionChangeScalingFactor real64 0.5 Damping factor for solution change targets +targetFlowCFL real64 -1 Target CFL condition `CFL condition `_when computing the next timestep. targetPhaseVolFractionChangeInTimeStep real64 0.2 Target (absolute) change in phase volume fraction in a time step targetRegions groupNameRef_array required Allowable regions that the solver may be applied to. Note that this does not indicate that the solver will be applied to these regions, only that allocation will occur such that the solver may be applied to these regions. The decision about what regions this solver will beapplied to rests in the EventManager. targetRelativePressureChangeInTimeStep real64 0.2 Target (relative) change in pressure in a time step (expected value between 0 and 1) diff --git a/src/coreComponents/schema/docs/CompositionalMultiphaseHybridFVM.rst b/src/coreComponents/schema/docs/CompositionalMultiphaseHybridFVM.rst index 56e166a6161..d2a1bd31852 100644 --- a/src/coreComponents/schema/docs/CompositionalMultiphaseHybridFVM.rst +++ b/src/coreComponents/schema/docs/CompositionalMultiphaseHybridFVM.rst @@ -17,6 +17,7 @@ maxRelativeTemperatureChange real64 0.5 Maximum (r minCompDens real64 1e-10 Minimum allowed global component density name groupName required A name is required for any non-unique nodes solutionChangeScalingFactor real64 0.5 Damping factor for solution change targets +targetFlowCFL real64 -1 Target CFL condition `CFL condition `_when computing the next timestep. targetPhaseVolFractionChangeInTimeStep real64 0.2 Target (absolute) change in phase volume fraction in a time step targetRegions groupNameRef_array required Allowable regions that the solver may be applied to. Note that this does not indicate that the solver will be applied to these regions, only that allocation will occur such that the solver may be applied to these regions. The decision about what regions this solver will beapplied to rests in the EventManager. targetRelativePressureChangeInTimeStep real64 0.2 Target (relative) change in pressure in a time step (expected value between 0 and 1) diff --git a/src/coreComponents/schema/docs/CompositionalMultiphaseReservoirPoromechanics.rst b/src/coreComponents/schema/docs/CompositionalMultiphaseReservoirPoromechanics.rst new file mode 100644 index 00000000000..c85eff65ff8 --- /dev/null +++ b/src/coreComponents/schema/docs/CompositionalMultiphaseReservoirPoromechanics.rst @@ -0,0 +1,24 @@ + + +=========================== ==================================== ======== ====================================================================================================================================================================================================================================================================================================================== +Name Type Default Description +=========================== ==================================== ======== ====================================================================================================================================================================================================================================================================================================================== +cflFactor real64 0.5 Factor to apply to the `CFL condition `_ when calculating the maximum allowable time step. Values should be in the interval (0,1] +initialDt real64 1e+99 Initial time-step value required by the solver to the event manager. +isThermal integer 0 Flag indicating whether the problem is thermal or not. Set isThermal="1" to enable the thermal coupling +logLevel integer 0 Log level +name groupName required A name is required for any non-unique nodes +reservoirAndWellsSolverName groupNameRef required Name of the reservoirAndWells solver used by the coupled solver +solidSolverName groupNameRef required Name of the solid solver used by the coupled solver +stabilizationMultiplier real64 1 Constant multiplier of stabilization strength. +stabilizationRegionNames groupNameRef_array {} Regions where stabilization is applied. +stabilizationType geos_stabilization_StabilizationType None | Stabilization type. Options are: + | None - Add no stabilization to mass equation, + | Global - Add stabilization to all faces, + | Local - Add stabilization only to interiors of macro elements. +targetRegions groupNameRef_array required Allowable regions that the solver may be applied to. Note that this does not indicate that the solver will be applied to these regions, only that allocation will occur such that the solver may be applied to these regions. The decision about what regions this solver will beapplied to rests in the EventManager. +LinearSolverParameters node unique :ref:`XML_LinearSolverParameters` +NonlinearSolverParameters node unique :ref:`XML_NonlinearSolverParameters` +=========================== ==================================== ======== ====================================================================================================================================================================================================================================================================================================================== + + diff --git a/src/coreComponents/schema/docs/CompositionalMultiphaseReservoirPoromechanicsInitialization.rst b/src/coreComponents/schema/docs/CompositionalMultiphaseReservoirPoromechanicsInitialization.rst new file mode 100644 index 00000000000..27129634c0b --- /dev/null +++ b/src/coreComponents/schema/docs/CompositionalMultiphaseReservoirPoromechanicsInitialization.rst @@ -0,0 +1,12 @@ + + +=========================== ============ ======== ========================================================================== +Name Type Default Description +=========================== ============ ======== ========================================================================== +logLevel integer 0 Log level +name groupName required A name is required for any non-unique nodes +performStressInitialization integer required Flag to indicate that the solver is going to perform stress initialization +poromechanicsSolverName groupNameRef required Name of the poromechanics solver +=========================== ============ ======== ========================================================================== + + diff --git a/src/coreComponents/schema/docs/CompositionalMultiphaseReservoirPoromechanicsInitialization_other.rst b/src/coreComponents/schema/docs/CompositionalMultiphaseReservoirPoromechanicsInitialization_other.rst new file mode 100644 index 00000000000..adf1c1b8aec --- /dev/null +++ b/src/coreComponents/schema/docs/CompositionalMultiphaseReservoirPoromechanicsInitialization_other.rst @@ -0,0 +1,9 @@ + + +==== ==== ============================ +Name Type Description +==== ==== ============================ + (no documentation available) +==== ==== ============================ + + diff --git a/src/coreComponents/schema/docs/CompositionalMultiphaseReservoirPoromechanics_other.rst b/src/coreComponents/schema/docs/CompositionalMultiphaseReservoirPoromechanics_other.rst new file mode 100644 index 00000000000..80b71ab722d --- /dev/null +++ b/src/coreComponents/schema/docs/CompositionalMultiphaseReservoirPoromechanics_other.rst @@ -0,0 +1,15 @@ + + +=========================== ============================================================================================================================================================== ======================================================================================================================================================================================================================================================================================================================== +Name Type Description +=========================== ============================================================================================================================================================== ======================================================================================================================================================================================================================================================================================================================== +discretization groupNameRef Name of discretization object (defined in the :ref:`NumericalMethodsManager`) to use for this solver. For instance, if this is a Finite Element Solver, the name of a :ref:`FiniteElement` should be specified. If this is a Finite Volume Method, the name of a :ref:`FiniteVolume` discretization should be specified. +maxStableDt real64 Value of the Maximum Stable Timestep for this solver. +meshTargets geos_mapBase< std_pair< string, string >, LvArray_Array< string, 1, camp_int_seq< long, 0l >, int, LvArray_ChaiBuffer >, std_integral_constant< bool, true > > MeshBody/Region combinations that the solver will be applied to. +performStressInitialization integer Flag to indicate that the solver is going to perform stress initialization +LinearSolverParameters node :ref:`DATASTRUCTURE_LinearSolverParameters` +NonlinearSolverParameters node :ref:`DATASTRUCTURE_NonlinearSolverParameters` +SolverStatistics node :ref:`DATASTRUCTURE_SolverStatistics` +=========================== ============================================================================================================================================================== ======================================================================================================================================================================================================================================================================================================================== + + diff --git a/src/coreComponents/schema/docs/CompositionalMultiphaseStatistics.rst b/src/coreComponents/schema/docs/CompositionalMultiphaseStatistics.rst index 6be91e9fb68..4a386d1d1c3 100644 --- a/src/coreComponents/schema/docs/CompositionalMultiphaseStatistics.rst +++ b/src/coreComponents/schema/docs/CompositionalMultiphaseStatistics.rst @@ -9,6 +9,7 @@ flowSolverName groupNameRef required Name of the flow solver logLevel integer 0 Log level name groupName required A name is required for any non-unique nodes relpermThreshold real64 1e-06 Flag to decide whether a phase is considered mobile (when the relperm is above the threshold) or immobile (when the relperm is below the threshold) in metric 2 +writeCSV integer 0 Write statistics into a CSV file ======================= ============ ======== =============================================================================================================================================================== diff --git a/src/coreComponents/schema/docs/CompositionalMultiphaseWell.rst b/src/coreComponents/schema/docs/CompositionalMultiphaseWell.rst index 30233ab1445..b072785ff00 100644 --- a/src/coreComponents/schema/docs/CompositionalMultiphaseWell.rst +++ b/src/coreComponents/schema/docs/CompositionalMultiphaseWell.rst @@ -13,6 +13,7 @@ maxRelativePressureChange real64 1 Maximum (relative) cha name groupName required A name is required for any non-unique nodes targetRegions groupNameRef_array required Allowable regions that the solver may be applied to. Note that this does not indicate that the solver will be applied to these regions, only that allocation will occur such that the solver may be applied to these regions. The decision about what regions this solver will beapplied to rests in the EventManager. useMass integer 0 Use total mass equation +writeCSV integer 0 Write rates into a CSV file LinearSolverParameters node unique :ref:`XML_LinearSolverParameters` NonlinearSolverParameters node unique :ref:`XML_NonlinearSolverParameters` WellControls node :ref:`XML_WellControls` diff --git a/src/coreComponents/schema/docs/Constitutive.rst b/src/coreComponents/schema/docs/Constitutive.rst index 5f0b249cc69..2f7c4bad4e0 100644 --- a/src/coreComponents/schema/docs/Constitutive.rst +++ b/src/coreComponents/schema/docs/Constitutive.rst @@ -57,7 +57,10 @@ PorousElasticIsotropic node :ref:`XML_PorousElast PorousElasticOrthotropic node :ref:`XML_PorousElasticOrthotropic` PorousElasticTransverseIsotropic node :ref:`XML_PorousElasticTransverseIsotropic` PorousExtendedDruckerPrager node :ref:`XML_PorousExtendedDruckerPrager` -PorousModifiedCamClay node :ref:`XML_PorousModifiedCamClay` +PorousModifiedCamClay node :ref:`XML_PorousModifiedCamClay` +PorousViscoDruckerPrager node :ref:`XML_PorousViscoDruckerPrager` +PorousViscoExtendedDruckerPrager node :ref:`XML_PorousViscoExtendedDruckerPrager` +PorousViscoModifiedCamClay node :ref:`XML_PorousViscoModifiedCamClay` PressurePorosity node :ref:`XML_PressurePorosity` ProppantPermeability node :ref:`XML_ProppantPermeability` ProppantPorosity node :ref:`XML_ProppantPorosity` diff --git a/src/coreComponents/schema/docs/Constitutive_other.rst b/src/coreComponents/schema/docs/Constitutive_other.rst index 3c79a6b6906..06999731356 100644 --- a/src/coreComponents/schema/docs/Constitutive_other.rst +++ b/src/coreComponents/schema/docs/Constitutive_other.rst @@ -57,7 +57,10 @@ PorousElasticIsotropic node :ref:`DATASTRUCTURE_PorousEla PorousElasticOrthotropic node :ref:`DATASTRUCTURE_PorousElasticOrthotropic` PorousElasticTransverseIsotropic node :ref:`DATASTRUCTURE_PorousElasticTransverseIsotropic` PorousExtendedDruckerPrager node :ref:`DATASTRUCTURE_PorousExtendedDruckerPrager` -PorousModifiedCamClay node :ref:`DATASTRUCTURE_PorousModifiedCamClay` +PorousModifiedCamClay node :ref:`DATASTRUCTURE_PorousModifiedCamClay` +PorousViscoDruckerPrager node :ref:`DATASTRUCTURE_PorousViscoDruckerPrager` +PorousViscoExtendedDruckerPrager node :ref:`DATASTRUCTURE_PorousViscoExtendedDruckerPrager` +PorousViscoModifiedCamClay node :ref:`DATASTRUCTURE_PorousViscoModifiedCamClay` PressurePorosity node :ref:`DATASTRUCTURE_PressurePorosity` ProppantPermeability node :ref:`DATASTRUCTURE_ProppantPermeability` ProppantPorosity node :ref:`DATASTRUCTURE_ProppantPorosity` diff --git a/src/coreComponents/schema/docs/DamageElasticIsotropic.rst b/src/coreComponents/schema/docs/DamageElasticIsotropic.rst index e44a8e4181a..19236b59dcf 100644 --- a/src/coreComponents/schema/docs/DamageElasticIsotropic.rst +++ b/src/coreComponents/schema/docs/DamageElasticIsotropic.rst @@ -1,23 +1,23 @@ -================================== ========= ======== ==================================================================== -Name Type Default Description -================================== ========= ======== ==================================================================== -compressiveStrength real64 0 Compressive strength from the uniaxial compression test -criticalFractureEnergy real64 required Critical fracture energy -criticalStrainEnergy real64 required Critical stress in a 1d tension test -defaultBulkModulus real64 -1 Default Bulk Modulus Parameter -defaultDensity real64 required Default Material Density -defaultPoissonRatio real64 -1 Default Poisson's Ratio -defaultShearModulus real64 -1 Default Shear Modulus Parameter -defaultThermalExpansionCoefficient real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame -defaultYoungModulus real64 -1 Default Young's Modulus -degradationLowerLimit real64 0 The lower limit of the degradation function -deltaCoefficient real64 -1 Coefficient in the calculation of the external driving force -extDrivingForceFlag integer 0 Whether to have external driving force. Can be 0 or 1 -lengthScale real64 required Length scale l in the phase-field equation -name groupName required A name is required for any non-unique nodes -tensileStrength real64 0 Tensile strength from the uniaxial tension test -================================== ========= ======== ==================================================================== +======================= ========= ======== ==================================================================== +Name Type Default Description +======================= ========= ======== ==================================================================== +compressiveStrength real64 0 Compressive strength from the uniaxial compression test +criticalFractureEnergy real64 required Critical fracture energy +criticalStrainEnergy real64 required Critical stress in a 1d tension test +defaultBulkModulus real64 -1 Default Bulk Modulus Parameter +defaultDensity real64 required Default Material Density +defaultDrainedLinearTEC real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame +defaultPoissonRatio real64 -1 Default Poisson's Ratio +defaultShearModulus real64 -1 Default Shear Modulus Parameter +defaultYoungModulus real64 -1 Default Young's Modulus +degradationLowerLimit real64 0 The lower limit of the degradation function +deltaCoefficient real64 -1 Coefficient in the calculation of the external driving force +extDrivingForceFlag integer 0 Whether to have external driving force. Can be 0 or 1 +lengthScale real64 required Length scale l in the phase-field equation +name groupName required A name is required for any non-unique nodes +tensileStrength real64 0 Tensile strength from the uniaxial tension test +======================= ========= ======== ==================================================================== diff --git a/src/coreComponents/schema/docs/DamageSpectralElasticIsotropic.rst b/src/coreComponents/schema/docs/DamageSpectralElasticIsotropic.rst index e44a8e4181a..19236b59dcf 100644 --- a/src/coreComponents/schema/docs/DamageSpectralElasticIsotropic.rst +++ b/src/coreComponents/schema/docs/DamageSpectralElasticIsotropic.rst @@ -1,23 +1,23 @@ -================================== ========= ======== ==================================================================== -Name Type Default Description -================================== ========= ======== ==================================================================== -compressiveStrength real64 0 Compressive strength from the uniaxial compression test -criticalFractureEnergy real64 required Critical fracture energy -criticalStrainEnergy real64 required Critical stress in a 1d tension test -defaultBulkModulus real64 -1 Default Bulk Modulus Parameter -defaultDensity real64 required Default Material Density -defaultPoissonRatio real64 -1 Default Poisson's Ratio -defaultShearModulus real64 -1 Default Shear Modulus Parameter -defaultThermalExpansionCoefficient real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame -defaultYoungModulus real64 -1 Default Young's Modulus -degradationLowerLimit real64 0 The lower limit of the degradation function -deltaCoefficient real64 -1 Coefficient in the calculation of the external driving force -extDrivingForceFlag integer 0 Whether to have external driving force. Can be 0 or 1 -lengthScale real64 required Length scale l in the phase-field equation -name groupName required A name is required for any non-unique nodes -tensileStrength real64 0 Tensile strength from the uniaxial tension test -================================== ========= ======== ==================================================================== +======================= ========= ======== ==================================================================== +Name Type Default Description +======================= ========= ======== ==================================================================== +compressiveStrength real64 0 Compressive strength from the uniaxial compression test +criticalFractureEnergy real64 required Critical fracture energy +criticalStrainEnergy real64 required Critical stress in a 1d tension test +defaultBulkModulus real64 -1 Default Bulk Modulus Parameter +defaultDensity real64 required Default Material Density +defaultDrainedLinearTEC real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame +defaultPoissonRatio real64 -1 Default Poisson's Ratio +defaultShearModulus real64 -1 Default Shear Modulus Parameter +defaultYoungModulus real64 -1 Default Young's Modulus +degradationLowerLimit real64 0 The lower limit of the degradation function +deltaCoefficient real64 -1 Coefficient in the calculation of the external driving force +extDrivingForceFlag integer 0 Whether to have external driving force. Can be 0 or 1 +lengthScale real64 required Length scale l in the phase-field equation +name groupName required A name is required for any non-unique nodes +tensileStrength real64 0 Tensile strength from the uniaxial tension test +======================= ========= ======== ==================================================================== diff --git a/src/coreComponents/schema/docs/DamageVolDevElasticIsotropic.rst b/src/coreComponents/schema/docs/DamageVolDevElasticIsotropic.rst index e44a8e4181a..19236b59dcf 100644 --- a/src/coreComponents/schema/docs/DamageVolDevElasticIsotropic.rst +++ b/src/coreComponents/schema/docs/DamageVolDevElasticIsotropic.rst @@ -1,23 +1,23 @@ -================================== ========= ======== ==================================================================== -Name Type Default Description -================================== ========= ======== ==================================================================== -compressiveStrength real64 0 Compressive strength from the uniaxial compression test -criticalFractureEnergy real64 required Critical fracture energy -criticalStrainEnergy real64 required Critical stress in a 1d tension test -defaultBulkModulus real64 -1 Default Bulk Modulus Parameter -defaultDensity real64 required Default Material Density -defaultPoissonRatio real64 -1 Default Poisson's Ratio -defaultShearModulus real64 -1 Default Shear Modulus Parameter -defaultThermalExpansionCoefficient real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame -defaultYoungModulus real64 -1 Default Young's Modulus -degradationLowerLimit real64 0 The lower limit of the degradation function -deltaCoefficient real64 -1 Coefficient in the calculation of the external driving force -extDrivingForceFlag integer 0 Whether to have external driving force. Can be 0 or 1 -lengthScale real64 required Length scale l in the phase-field equation -name groupName required A name is required for any non-unique nodes -tensileStrength real64 0 Tensile strength from the uniaxial tension test -================================== ========= ======== ==================================================================== +======================= ========= ======== ==================================================================== +Name Type Default Description +======================= ========= ======== ==================================================================== +compressiveStrength real64 0 Compressive strength from the uniaxial compression test +criticalFractureEnergy real64 required Critical fracture energy +criticalStrainEnergy real64 required Critical stress in a 1d tension test +defaultBulkModulus real64 -1 Default Bulk Modulus Parameter +defaultDensity real64 required Default Material Density +defaultDrainedLinearTEC real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame +defaultPoissonRatio real64 -1 Default Poisson's Ratio +defaultShearModulus real64 -1 Default Shear Modulus Parameter +defaultYoungModulus real64 -1 Default Young's Modulus +degradationLowerLimit real64 0 The lower limit of the degradation function +deltaCoefficient real64 -1 Coefficient in the calculation of the external driving force +extDrivingForceFlag integer 0 Whether to have external driving force. Can be 0 or 1 +lengthScale real64 required Length scale l in the phase-field equation +name groupName required A name is required for any non-unique nodes +tensileStrength real64 0 Tensile strength from the uniaxial tension test +======================= ========= ======== ==================================================================== diff --git a/src/coreComponents/schema/docs/DelftEgg.rst b/src/coreComponents/schema/docs/DelftEgg.rst index 96ec77b6129..a509cdd295e 100644 --- a/src/coreComponents/schema/docs/DelftEgg.rst +++ b/src/coreComponents/schema/docs/DelftEgg.rst @@ -1,20 +1,20 @@ -================================== ========= ======== ==================================================================== -Name Type Default Description -================================== ========= ======== ==================================================================== -defaultBulkModulus real64 -1 Default Bulk Modulus Parameter -defaultCslSlope real64 1 Slope of the critical state line -defaultDensity real64 required Default Material Density -defaultPoissonRatio real64 -1 Default Poisson's Ratio -defaultPreConsolidationPressure real64 -1.5 Initial preconsolidation pressure -defaultRecompressionIndex real64 0.002 Recompresion Index -defaultShapeParameter real64 1 Shape parameter for the yield surface -defaultShearModulus real64 -1 Default Shear Modulus Parameter -defaultThermalExpansionCoefficient real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame -defaultVirginCompressionIndex real64 0.005 Virgin compression index -defaultYoungModulus real64 -1 Default Young's Modulus -name groupName required A name is required for any non-unique nodes -================================== ========= ======== ==================================================================== +=============================== ========= ======== ==================================================================== +Name Type Default Description +=============================== ========= ======== ==================================================================== +defaultBulkModulus real64 -1 Default Bulk Modulus Parameter +defaultCslSlope real64 1 Slope of the critical state line +defaultDensity real64 required Default Material Density +defaultDrainedLinearTEC real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame +defaultPoissonRatio real64 -1 Default Poisson's Ratio +defaultPreConsolidationPressure real64 -1.5 Initial preconsolidation pressure +defaultRecompressionIndex real64 0.002 Recompresion Index +defaultShapeParameter real64 1 Shape parameter for the yield surface +defaultShearModulus real64 -1 Default Shear Modulus Parameter +defaultVirginCompressionIndex real64 0.005 Virgin compression index +defaultYoungModulus real64 -1 Default Young's Modulus +name groupName required A name is required for any non-unique nodes +=============================== ========= ======== ==================================================================== diff --git a/src/coreComponents/schema/docs/DruckerPrager.rst b/src/coreComponents/schema/docs/DruckerPrager.rst index 92559d3ebc8..3f5d4ef7249 100644 --- a/src/coreComponents/schema/docs/DruckerPrager.rst +++ b/src/coreComponents/schema/docs/DruckerPrager.rst @@ -1,19 +1,19 @@ -================================== ========= ======== ==================================================================== -Name Type Default Description -================================== ========= ======== ==================================================================== -defaultBulkModulus real64 -1 Default Bulk Modulus Parameter -defaultCohesion real64 0 Initial cohesion -defaultDensity real64 required Default Material Density -defaultDilationAngle real64 30 Dilation angle (degrees) -defaultFrictionAngle real64 30 Friction angle (degrees) -defaultHardeningRate real64 0 Cohesion hardening/softening rate -defaultPoissonRatio real64 -1 Default Poisson's Ratio -defaultShearModulus real64 -1 Default Shear Modulus Parameter -defaultThermalExpansionCoefficient real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame -defaultYoungModulus real64 -1 Default Young's Modulus -name groupName required A name is required for any non-unique nodes -================================== ========= ======== ==================================================================== +======================= ========= ======== ==================================================================== +Name Type Default Description +======================= ========= ======== ==================================================================== +defaultBulkModulus real64 -1 Default Bulk Modulus Parameter +defaultCohesion real64 0 Initial cohesion +defaultDensity real64 required Default Material Density +defaultDilationAngle real64 30 Dilation angle (degrees) +defaultDrainedLinearTEC real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame +defaultFrictionAngle real64 30 Friction angle (degrees) +defaultHardeningRate real64 0 Cohesion hardening/softening rate +defaultPoissonRatio real64 -1 Default Poisson's Ratio +defaultShearModulus real64 -1 Default Shear Modulus Parameter +defaultYoungModulus real64 -1 Default Young's Modulus +name groupName required A name is required for any non-unique nodes +======================= ========= ======== ==================================================================== diff --git a/src/coreComponents/schema/docs/ElasticIsotropic.rst b/src/coreComponents/schema/docs/ElasticIsotropic.rst index 84c48b891ff..15e42d76f2b 100644 --- a/src/coreComponents/schema/docs/ElasticIsotropic.rst +++ b/src/coreComponents/schema/docs/ElasticIsotropic.rst @@ -1,15 +1,15 @@ -================================== ========= ======== ==================================================================== -Name Type Default Description -================================== ========= ======== ==================================================================== -defaultBulkModulus real64 -1 Default Bulk Modulus Parameter -defaultDensity real64 required Default Material Density -defaultPoissonRatio real64 -1 Default Poisson's Ratio -defaultShearModulus real64 -1 Default Shear Modulus Parameter -defaultThermalExpansionCoefficient real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame -defaultYoungModulus real64 -1 Default Young's Modulus -name groupName required A name is required for any non-unique nodes -================================== ========= ======== ==================================================================== +======================= ========= ======== ==================================================================== +Name Type Default Description +======================= ========= ======== ==================================================================== +defaultBulkModulus real64 -1 Default Bulk Modulus Parameter +defaultDensity real64 required Default Material Density +defaultDrainedLinearTEC real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame +defaultPoissonRatio real64 -1 Default Poisson's Ratio +defaultShearModulus real64 -1 Default Shear Modulus Parameter +defaultYoungModulus real64 -1 Default Young's Modulus +name groupName required A name is required for any non-unique nodes +======================= ========= ======== ==================================================================== diff --git a/src/coreComponents/schema/docs/ElasticIsotropicPressureDependent.rst b/src/coreComponents/schema/docs/ElasticIsotropicPressureDependent.rst index faad5c074ce..96015fd3861 100644 --- a/src/coreComponents/schema/docs/ElasticIsotropicPressureDependent.rst +++ b/src/coreComponents/schema/docs/ElasticIsotropicPressureDependent.rst @@ -1,15 +1,15 @@ -================================== ========= ======== ==================================================================== -Name Type Default Description -================================== ========= ======== ==================================================================== -defaultDensity real64 required Default Material Density -defaultRecompressionIndex real64 0.002 Recompresion Index -defaultRefPressure real64 -1 Reference Pressure -defaultRefStrainVol real64 0 Reference Volumetric Strain -defaultShearModulus real64 -1 Elastic Shear Modulus Parameter -defaultThermalExpansionCoefficient real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame -name groupName required A name is required for any non-unique nodes -================================== ========= ======== ==================================================================== +========================= ========= ======== ==================================================================== +Name Type Default Description +========================= ========= ======== ==================================================================== +defaultDensity real64 required Default Material Density +defaultDrainedLinearTEC real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame +defaultRecompressionIndex real64 0.002 Recompresion Index +defaultRefPressure real64 -1 Reference Pressure +defaultRefStrainVol real64 0 Reference Volumetric Strain +defaultShearModulus real64 -1 Elastic Shear Modulus Parameter +name groupName required A name is required for any non-unique nodes +========================= ========= ======== ==================================================================== diff --git a/src/coreComponents/schema/docs/ElasticOrthotropic.rst b/src/coreComponents/schema/docs/ElasticOrthotropic.rst index 2fc6d92fe1a..f2d55b8530f 100644 --- a/src/coreComponents/schema/docs/ElasticOrthotropic.rst +++ b/src/coreComponents/schema/docs/ElasticOrthotropic.rst @@ -1,29 +1,29 @@ -================================== ========= ======== ==================================================================== -Name Type Default Description -================================== ========= ======== ==================================================================== -defaultC11 real64 -1 Default C11 Component of Voigt Stiffness Tensor -defaultC12 real64 -1 Default C12 Component of Voigt Stiffness Tensor -defaultC13 real64 -1 Default C13 Component of Voigt Stiffness Tensor -defaultC22 real64 -1 Default C22 Component of Voigt Stiffness Tensor -defaultC23 real64 -1 Default C23 Component of Voigt Stiffness Tensor -defaultC33 real64 -1 Default C33 Component of Voigt Stiffness Tensor -defaultC44 real64 -1 Default C44 Component of Voigt Stiffness Tensor -defaultC55 real64 -1 Default C55 Component of Voigt Stiffness Tensor -defaultC66 real64 -1 Default C66 Component of Voigt Stiffness Tensor -defaultDensity real64 required Default Material Density -defaultE1 real64 -1 Default Young's Modulus E1 -defaultE2 real64 -1 Default Young's Modulus E2 -defaultE3 real64 -1 Default Young's Modulus E3 -defaultG12 real64 -1 Default Shear Modulus G12 -defaultG13 real64 -1 Default Shear Modulus G13 -defaultG23 real64 -1 Default Shear Modulus G23 -defaultNu12 real64 -1 Default Poission's Ratio Nu12 -defaultNu13 real64 -1 Default Poission's Ratio Nu13 -defaultNu23 real64 -1 Default Poission's Ratio Nu23 -defaultThermalExpansionCoefficient real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame -name groupName required A name is required for any non-unique nodes -================================== ========= ======== ==================================================================== +======================= ========= ======== ==================================================================== +Name Type Default Description +======================= ========= ======== ==================================================================== +defaultC11 real64 -1 Default C11 Component of Voigt Stiffness Tensor +defaultC12 real64 -1 Default C12 Component of Voigt Stiffness Tensor +defaultC13 real64 -1 Default C13 Component of Voigt Stiffness Tensor +defaultC22 real64 -1 Default C22 Component of Voigt Stiffness Tensor +defaultC23 real64 -1 Default C23 Component of Voigt Stiffness Tensor +defaultC33 real64 -1 Default C33 Component of Voigt Stiffness Tensor +defaultC44 real64 -1 Default C44 Component of Voigt Stiffness Tensor +defaultC55 real64 -1 Default C55 Component of Voigt Stiffness Tensor +defaultC66 real64 -1 Default C66 Component of Voigt Stiffness Tensor +defaultDensity real64 required Default Material Density +defaultDrainedLinearTEC real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame +defaultE1 real64 -1 Default Young's Modulus E1 +defaultE2 real64 -1 Default Young's Modulus E2 +defaultE3 real64 -1 Default Young's Modulus E3 +defaultG12 real64 -1 Default Shear Modulus G12 +defaultG13 real64 -1 Default Shear Modulus G13 +defaultG23 real64 -1 Default Shear Modulus G23 +defaultNu12 real64 -1 Default Poission's Ratio Nu12 +defaultNu13 real64 -1 Default Poission's Ratio Nu13 +defaultNu23 real64 -1 Default Poission's Ratio Nu23 +name groupName required A name is required for any non-unique nodes +======================= ========= ======== ==================================================================== diff --git a/src/coreComponents/schema/docs/ElasticTransverseIsotropic.rst b/src/coreComponents/schema/docs/ElasticTransverseIsotropic.rst index 8f7b6e25362..e07838a1fd3 100644 --- a/src/coreComponents/schema/docs/ElasticTransverseIsotropic.rst +++ b/src/coreComponents/schema/docs/ElasticTransverseIsotropic.rst @@ -9,10 +9,10 @@ defaultC33 real64 -1 Default Stiffness Paramete defaultC44 real64 -1 Default Stiffness Parameter C44 defaultC66 real64 -1 Default Stiffness Parameter C66 defaultDensity real64 required Default Material Density +defaultDrainedLinearTEC real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame defaultPoissonRatioAxialTransverse real64 -1 Default Axial-Transverse Poisson's Ratio defaultPoissonRatioTransverse real64 -1 Default Transverse Poisson's Ratio defaultShearModulusAxialTransverse real64 -1 Default Axial-Transverse Shear Modulus -defaultThermalExpansionCoefficient real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame defaultYoungModulusAxial real64 -1 Default Axial Young's Modulus defaultYoungModulusTransverse real64 -1 Default Transverse Young's Modulus name groupName required A name is required for any non-unique nodes diff --git a/src/coreComponents/schema/docs/ExtendedDruckerPrager.rst b/src/coreComponents/schema/docs/ExtendedDruckerPrager.rst index 5973577923e..a196499ef95 100644 --- a/src/coreComponents/schema/docs/ExtendedDruckerPrager.rst +++ b/src/coreComponents/schema/docs/ExtendedDruckerPrager.rst @@ -1,20 +1,20 @@ -================================== ========= ======== ==================================================================== -Name Type Default Description -================================== ========= ======== ==================================================================== -defaultBulkModulus real64 -1 Default Bulk Modulus Parameter -defaultCohesion real64 0 Initial cohesion -defaultDensity real64 required Default Material Density -defaultDilationRatio real64 1 Dilation ratio [0,1] (ratio = tan dilationAngle / tan frictionAngle) -defaultHardening real64 0 Hardening parameter (hardening rate is faster for smaller values) -defaultInitialFrictionAngle real64 30 Initial friction angle (degrees) -defaultPoissonRatio real64 -1 Default Poisson's Ratio -defaultResidualFrictionAngle real64 30 Residual friction angle (degrees) -defaultShearModulus real64 -1 Default Shear Modulus Parameter -defaultThermalExpansionCoefficient real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame -defaultYoungModulus real64 -1 Default Young's Modulus -name groupName required A name is required for any non-unique nodes -================================== ========= ======== ==================================================================== +============================ ========= ======== ==================================================================== +Name Type Default Description +============================ ========= ======== ==================================================================== +defaultBulkModulus real64 -1 Default Bulk Modulus Parameter +defaultCohesion real64 0 Initial cohesion +defaultDensity real64 required Default Material Density +defaultDilationRatio real64 1 Dilation ratio [0,1] (ratio = tan dilationAngle / tan frictionAngle) +defaultDrainedLinearTEC real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame +defaultHardening real64 0 Hardening parameter (hardening rate is faster for smaller values) +defaultInitialFrictionAngle real64 30 Initial friction angle (degrees) +defaultPoissonRatio real64 -1 Default Poisson's Ratio +defaultResidualFrictionAngle real64 30 Residual friction angle (degrees) +defaultShearModulus real64 -1 Default Shear Modulus Parameter +defaultYoungModulus real64 -1 Default Young's Modulus +name groupName required A name is required for any non-unique nodes +============================ ========= ======== ==================================================================== diff --git a/src/coreComponents/schema/docs/ModifiedCamClay.rst b/src/coreComponents/schema/docs/ModifiedCamClay.rst index 5bc050cd142..1784d9660fb 100644 --- a/src/coreComponents/schema/docs/ModifiedCamClay.rst +++ b/src/coreComponents/schema/docs/ModifiedCamClay.rst @@ -1,18 +1,18 @@ -================================== ========= ======== ==================================================================== -Name Type Default Description -================================== ========= ======== ==================================================================== -defaultCslSlope real64 1 Slope of the critical state line -defaultDensity real64 required Default Material Density -defaultPreConsolidationPressure real64 -1.5 Initial preconsolidation pressure -defaultRecompressionIndex real64 0.002 Recompresion Index -defaultRefPressure real64 -1 Reference Pressure -defaultRefStrainVol real64 0 Reference Volumetric Strain -defaultShearModulus real64 -1 Elastic Shear Modulus Parameter -defaultThermalExpansionCoefficient real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame -defaultVirginCompressionIndex real64 0.005 Virgin compression index -name groupName required A name is required for any non-unique nodes -================================== ========= ======== ==================================================================== +=============================== ========= ======== ==================================================================== +Name Type Default Description +=============================== ========= ======== ==================================================================== +defaultCslSlope real64 1 Slope of the critical state line +defaultDensity real64 required Default Material Density +defaultDrainedLinearTEC real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame +defaultPreConsolidationPressure real64 -1.5 Initial preconsolidation pressure +defaultRecompressionIndex real64 0.002 Recompresion Index +defaultRefPressure real64 -1 Reference Pressure +defaultRefStrainVol real64 0 Reference Volumetric Strain +defaultShearModulus real64 -1 Elastic Shear Modulus Parameter +defaultVirginCompressionIndex real64 0.005 Virgin compression index +name groupName required A name is required for any non-unique nodes +=============================== ========= ======== ==================================================================== diff --git a/src/coreComponents/schema/docs/NonlinearSolverParameters.rst b/src/coreComponents/schema/docs/NonlinearSolverParameters.rst index 4067911d031..c8511aad1ff 100644 --- a/src/coreComponents/schema/docs/NonlinearSolverParameters.rst +++ b/src/coreComponents/schema/docs/NonlinearSolverParameters.rst @@ -25,6 +25,7 @@ minNormalizer real64 newtonMaxIter integer 5 Maximum number of iterations that are allowed in a Newton loop. newtonMinIter integer 1 Minimum number of iterations that are required before exiting the Newton loop. newtonTol real64 1e-06 The required tolerance in order to exit the Newton iteration loop. +nonlinearAccelerationType geos_NonlinearSolverParameters_NonlinearAccelerationType None Nonlinear acceleration type for sequential solver. normType geos_solverBaseKernels_NormType Linfinity | Norm used by the flow solver to check nonlinear convergence. Valid options: | * Linfinity | * L2 diff --git a/src/coreComponents/schema/docs/PerfectlyPlastic.rst b/src/coreComponents/schema/docs/PerfectlyPlastic.rst index a229423a361..019ce1379dc 100644 --- a/src/coreComponents/schema/docs/PerfectlyPlastic.rst +++ b/src/coreComponents/schema/docs/PerfectlyPlastic.rst @@ -1,16 +1,16 @@ -================================== ========= ============ ==================================================================== -Name Type Default Description -================================== ========= ============ ==================================================================== -defaultBulkModulus real64 -1 Default Bulk Modulus Parameter -defaultDensity real64 required Default Material Density -defaultPoissonRatio real64 -1 Default Poisson's Ratio -defaultShearModulus real64 -1 Default Shear Modulus Parameter -defaultThermalExpansionCoefficient real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame -defaultYieldStress real64 1.79769e+308 Default yield stress -defaultYoungModulus real64 -1 Default Young's Modulus -name groupName required A name is required for any non-unique nodes -================================== ========= ============ ==================================================================== +======================= ========= ============ ==================================================================== +Name Type Default Description +======================= ========= ============ ==================================================================== +defaultBulkModulus real64 -1 Default Bulk Modulus Parameter +defaultDensity real64 required Default Material Density +defaultDrainedLinearTEC real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame +defaultPoissonRatio real64 -1 Default Poisson's Ratio +defaultShearModulus real64 -1 Default Shear Modulus Parameter +defaultYieldStress real64 1.79769e+308 Default yield stress +defaultYoungModulus real64 -1 Default Young's Modulus +name groupName required A name is required for any non-unique nodes +======================= ========= ============ ==================================================================== diff --git a/src/coreComponents/schema/docs/PorousViscoDruckerPrager.rst b/src/coreComponents/schema/docs/PorousViscoDruckerPrager.rst new file mode 100644 index 00000000000..e15edba5a2f --- /dev/null +++ b/src/coreComponents/schema/docs/PorousViscoDruckerPrager.rst @@ -0,0 +1,13 @@ + + +============================ ====== ======== =========================================== +Name Type Default Description +============================ ====== ======== =========================================== +name string required A name is required for any non-unique nodes +permeabilityModelName string required Name of the permeability model. +porosityModelName string required Name of the porosity model. +solidInternalEnergyModelName string Name of the solid internal energy model. +solidModelName string required Name of the solid model. +============================ ====== ======== =========================================== + + diff --git a/src/coreComponents/schema/docs/PorousViscoDruckerPrager_other.rst b/src/coreComponents/schema/docs/PorousViscoDruckerPrager_other.rst new file mode 100644 index 00000000000..adf1c1b8aec --- /dev/null +++ b/src/coreComponents/schema/docs/PorousViscoDruckerPrager_other.rst @@ -0,0 +1,9 @@ + + +==== ==== ============================ +Name Type Description +==== ==== ============================ + (no documentation available) +==== ==== ============================ + + diff --git a/src/coreComponents/schema/docs/PorousViscoExtendedDruckerPrager.rst b/src/coreComponents/schema/docs/PorousViscoExtendedDruckerPrager.rst new file mode 100644 index 00000000000..e15edba5a2f --- /dev/null +++ b/src/coreComponents/schema/docs/PorousViscoExtendedDruckerPrager.rst @@ -0,0 +1,13 @@ + + +============================ ====== ======== =========================================== +Name Type Default Description +============================ ====== ======== =========================================== +name string required A name is required for any non-unique nodes +permeabilityModelName string required Name of the permeability model. +porosityModelName string required Name of the porosity model. +solidInternalEnergyModelName string Name of the solid internal energy model. +solidModelName string required Name of the solid model. +============================ ====== ======== =========================================== + + diff --git a/src/coreComponents/schema/docs/PorousViscoExtendedDruckerPrager_other.rst b/src/coreComponents/schema/docs/PorousViscoExtendedDruckerPrager_other.rst new file mode 100644 index 00000000000..adf1c1b8aec --- /dev/null +++ b/src/coreComponents/schema/docs/PorousViscoExtendedDruckerPrager_other.rst @@ -0,0 +1,9 @@ + + +==== ==== ============================ +Name Type Description +==== ==== ============================ + (no documentation available) +==== ==== ============================ + + diff --git a/src/coreComponents/schema/docs/PorousViscoModifiedCamClay.rst b/src/coreComponents/schema/docs/PorousViscoModifiedCamClay.rst new file mode 100644 index 00000000000..e15edba5a2f --- /dev/null +++ b/src/coreComponents/schema/docs/PorousViscoModifiedCamClay.rst @@ -0,0 +1,13 @@ + + +============================ ====== ======== =========================================== +Name Type Default Description +============================ ====== ======== =========================================== +name string required A name is required for any non-unique nodes +permeabilityModelName string required Name of the permeability model. +porosityModelName string required Name of the porosity model. +solidInternalEnergyModelName string Name of the solid internal energy model. +solidModelName string required Name of the solid model. +============================ ====== ======== =========================================== + + diff --git a/src/coreComponents/schema/docs/PorousViscoModifiedCamClay_other.rst b/src/coreComponents/schema/docs/PorousViscoModifiedCamClay_other.rst new file mode 100644 index 00000000000..adf1c1b8aec --- /dev/null +++ b/src/coreComponents/schema/docs/PorousViscoModifiedCamClay_other.rst @@ -0,0 +1,9 @@ + + +==== ==== ============================ +Name Type Description +==== ==== ============================ + (no documentation available) +==== ==== ============================ + + diff --git a/src/coreComponents/schema/docs/SinglePhaseReservoirPoromechanics.rst b/src/coreComponents/schema/docs/SinglePhaseReservoirPoromechanics.rst new file mode 100644 index 00000000000..e19bd9b1ac7 --- /dev/null +++ b/src/coreComponents/schema/docs/SinglePhaseReservoirPoromechanics.rst @@ -0,0 +1,18 @@ + + +=========================== ================== ======== ====================================================================================================================================================================================================================================================================================================================== +Name Type Default Description +=========================== ================== ======== ====================================================================================================================================================================================================================================================================================================================== +cflFactor real64 0.5 Factor to apply to the `CFL condition `_ when calculating the maximum allowable time step. Values should be in the interval (0,1] +initialDt real64 1e+99 Initial time-step value required by the solver to the event manager. +isThermal integer 0 Flag indicating whether the problem is thermal or not. Set isThermal="1" to enable the thermal coupling +logLevel integer 0 Log level +name groupName required A name is required for any non-unique nodes +reservoirAndWellsSolverName groupNameRef required Name of the reservoirAndWells solver used by the coupled solver +solidSolverName groupNameRef required Name of the solid solver used by the coupled solver +targetRegions groupNameRef_array required Allowable regions that the solver may be applied to. Note that this does not indicate that the solver will be applied to these regions, only that allocation will occur such that the solver may be applied to these regions. The decision about what regions this solver will beapplied to rests in the EventManager. +LinearSolverParameters node unique :ref:`XML_LinearSolverParameters` +NonlinearSolverParameters node unique :ref:`XML_NonlinearSolverParameters` +=========================== ================== ======== ====================================================================================================================================================================================================================================================================================================================== + + diff --git a/src/coreComponents/schema/docs/SinglePhaseReservoirPoromechanicsInitialization.rst b/src/coreComponents/schema/docs/SinglePhaseReservoirPoromechanicsInitialization.rst new file mode 100644 index 00000000000..27129634c0b --- /dev/null +++ b/src/coreComponents/schema/docs/SinglePhaseReservoirPoromechanicsInitialization.rst @@ -0,0 +1,12 @@ + + +=========================== ============ ======== ========================================================================== +Name Type Default Description +=========================== ============ ======== ========================================================================== +logLevel integer 0 Log level +name groupName required A name is required for any non-unique nodes +performStressInitialization integer required Flag to indicate that the solver is going to perform stress initialization +poromechanicsSolverName groupNameRef required Name of the poromechanics solver +=========================== ============ ======== ========================================================================== + + diff --git a/src/coreComponents/schema/docs/SinglePhaseReservoirPoromechanicsInitialization_other.rst b/src/coreComponents/schema/docs/SinglePhaseReservoirPoromechanicsInitialization_other.rst new file mode 100644 index 00000000000..adf1c1b8aec --- /dev/null +++ b/src/coreComponents/schema/docs/SinglePhaseReservoirPoromechanicsInitialization_other.rst @@ -0,0 +1,9 @@ + + +==== ==== ============================ +Name Type Description +==== ==== ============================ + (no documentation available) +==== ==== ============================ + + diff --git a/src/coreComponents/schema/docs/SinglePhaseReservoirPoromechanics_other.rst b/src/coreComponents/schema/docs/SinglePhaseReservoirPoromechanics_other.rst new file mode 100644 index 00000000000..80b71ab722d --- /dev/null +++ b/src/coreComponents/schema/docs/SinglePhaseReservoirPoromechanics_other.rst @@ -0,0 +1,15 @@ + + +=========================== ============================================================================================================================================================== ======================================================================================================================================================================================================================================================================================================================== +Name Type Description +=========================== ============================================================================================================================================================== ======================================================================================================================================================================================================================================================================================================================== +discretization groupNameRef Name of discretization object (defined in the :ref:`NumericalMethodsManager`) to use for this solver. For instance, if this is a Finite Element Solver, the name of a :ref:`FiniteElement` should be specified. If this is a Finite Volume Method, the name of a :ref:`FiniteVolume` discretization should be specified. +maxStableDt real64 Value of the Maximum Stable Timestep for this solver. +meshTargets geos_mapBase< std_pair< string, string >, LvArray_Array< string, 1, camp_int_seq< long, 0l >, int, LvArray_ChaiBuffer >, std_integral_constant< bool, true > > MeshBody/Region combinations that the solver will be applied to. +performStressInitialization integer Flag to indicate that the solver is going to perform stress initialization +LinearSolverParameters node :ref:`DATASTRUCTURE_LinearSolverParameters` +NonlinearSolverParameters node :ref:`DATASTRUCTURE_NonlinearSolverParameters` +SolverStatistics node :ref:`DATASTRUCTURE_SolverStatistics` +=========================== ============================================================================================================================================================== ======================================================================================================================================================================================================================================================================================================================== + + diff --git a/src/coreComponents/schema/docs/SinglePhaseStatistics.rst b/src/coreComponents/schema/docs/SinglePhaseStatistics.rst index d3b6b815b17..ff3105519bd 100644 --- a/src/coreComponents/schema/docs/SinglePhaseStatistics.rst +++ b/src/coreComponents/schema/docs/SinglePhaseStatistics.rst @@ -6,6 +6,7 @@ Name Type Default Description flowSolverName groupNameRef required Name of the flow solver logLevel integer 0 Log level name groupName required A name is required for any non-unique nodes +writeCSV integer 0 Write statistics into a CSV file ============== ============ ======== =========================================== diff --git a/src/coreComponents/schema/docs/SinglePhaseWell.rst b/src/coreComponents/schema/docs/SinglePhaseWell.rst index b285015b443..ba2a66c6297 100644 --- a/src/coreComponents/schema/docs/SinglePhaseWell.rst +++ b/src/coreComponents/schema/docs/SinglePhaseWell.rst @@ -8,6 +8,7 @@ initialDt real64 1e+99 Initial time-step value re logLevel integer 0 Log level name groupName required A name is required for any non-unique nodes targetRegions groupNameRef_array required Allowable regions that the solver may be applied to. Note that this does not indicate that the solver will be applied to these regions, only that allocation will occur such that the solver may be applied to these regions. The decision about what regions this solver will beapplied to rests in the EventManager. +writeCSV integer 0 Write rates into a CSV file LinearSolverParameters node unique :ref:`XML_LinearSolverParameters` NonlinearSolverParameters node unique :ref:`XML_NonlinearSolverParameters` WellControls node :ref:`XML_WellControls` diff --git a/src/coreComponents/schema/docs/SolidMechanicsStatistics.rst b/src/coreComponents/schema/docs/SolidMechanicsStatistics.rst index c1caaa49863..2395830a112 100644 --- a/src/coreComponents/schema/docs/SolidMechanicsStatistics.rst +++ b/src/coreComponents/schema/docs/SolidMechanicsStatistics.rst @@ -6,6 +6,7 @@ Name Type Default Description logLevel integer 0 Log level name groupName required A name is required for any non-unique nodes solidSolverName groupNameRef required Name of the solid solver +writeCSV integer 0 Write statistics into a CSV file =============== ============ ======== =========================================== diff --git a/src/coreComponents/schema/docs/ThermalCompressibleSinglePhaseFluid.rst b/src/coreComponents/schema/docs/ThermalCompressibleSinglePhaseFluid.rst index 1a465fa0ee0..469050d50fa 100644 --- a/src/coreComponents/schema/docs/ThermalCompressibleSinglePhaseFluid.rst +++ b/src/coreComponents/schema/docs/ThermalCompressibleSinglePhaseFluid.rst @@ -20,13 +20,13 @@ referenceInternalEnergy real64 0.001 Ref referencePressure real64 0 Reference pressure referenceTemperature real64 0 Reference temperature referenceViscosity real64 0.001 Reference fluid viscosity +specificHeatCapacity real64 0 Fluid heat capacity. Unit: J/kg/K thermalExpansionCoeff real64 0 Fluid thermal expansion coefficient. Unit: 1/K viscosibility real64 0 Fluid viscosity exponential coefficient viscosityModelType geos_constitutive_ExponentApproximationType linear | Type of viscosity model. Valid options: | * exponential | * linear | * quadratic -volumetricHeatCapacity real64 0 Fluid volumetric heat capacity. Unit: J/kg/K ======================= =========================================== ======== =================================================================================== diff --git a/src/coreComponents/schema/docs/VTK.rst b/src/coreComponents/schema/docs/VTK.rst index 4ba3e3b3870..3a376546ee3 100644 --- a/src/coreComponents/schema/docs/VTK.rst +++ b/src/coreComponents/schema/docs/VTK.rst @@ -6,6 +6,7 @@ Name Type Default Description childDirectory string Child directory path fieldNames groupNameRef_array {} Names of the fields to output. If this attribute is specified, GEOSX outputs all the fields specified by the user, regardless of their `plotLevel` format geos_vtk_VTKOutputMode binary Output data format. Valid options: ``binary``, ``ascii`` +levelNames string_array {} Names of mesh levels to output. logLevel integer 0 Log level name groupName required A name is required for any non-unique nodes onlyPlotSpecifiedFieldNames integer 0 If this flag is equal to 1, then we only plot the fields listed in `fieldNames`. Otherwise, we plot all the fields with the required `plotLevel`, plus the fields listed in `fieldNames` diff --git a/src/coreComponents/schema/docs/VanGenuchtenStone2RelativePermeability.rst b/src/coreComponents/schema/docs/VanGenuchtenStone2RelativePermeability.rst new file mode 100644 index 00000000000..00d74f74608 --- /dev/null +++ b/src/coreComponents/schema/docs/VanGenuchtenStone2RelativePermeability.rst @@ -0,0 +1,19 @@ + + +========================== ================== ======== ================================================================================================================================================================== +Name Type Default Description +========================== ================== ======== ================================================================================================================================================================== +gasOilRelPermExponentInv real64_array {0.5} | Rel perm power law exponent inverse for the pair (gas phase, oil phase) at residual water saturation + | The expected format is "{ gasExp, oilExp }", in that order +gasOilRelPermMaxValue real64_array {0} | Maximum rel perm value for the pair (gas phase, oil phase) at residual water saturation + | The expected format is "{ gasMax, oilMax }", in that order +name groupName required A name is required for any non-unique nodes +phaseMinVolumeFraction real64_array {0} Minimum volume fraction value for each phase +phaseNames groupNameRef_array required List of fluid phases +waterOilRelPermExponentInv real64_array {0.5} | Rel perm power law exponent inverse for the pair (water phase, oil phase) at residual gas saturation + | The expected format is "{ waterExp, oilExp }", in that order +waterOilRelPermMaxValue real64_array {0} | Maximum rel perm value for the pair (water phase, oil phase) at residual gas saturation + | The expected format is "{ waterMax, oilMax }", in that order +========================== ================== ======== ================================================================================================================================================================== + + diff --git a/src/coreComponents/schema/docs/VanGenuchtenStone2RelativePermeability_other.rst b/src/coreComponents/schema/docs/VanGenuchtenStone2RelativePermeability_other.rst new file mode 100644 index 00000000000..e6dd24aeb13 --- /dev/null +++ b/src/coreComponents/schema/docs/VanGenuchtenStone2RelativePermeability_other.rst @@ -0,0 +1,15 @@ + + +=============================== ============== ======================================================================================================================= +Name Type Description +=============================== ============== ======================================================================================================================= +dPhaseRelPerm_dPhaseVolFraction real64_array4d Derivative of phase relative permeability with respect to phase volume fraction +phaseOrder integer_array (no description available) +phaseRelPerm real64_array3d Phase relative permeability +phaseRelPerm_n real64_array3d Phase relative permeability at previous time +phaseTrappedVolFraction real64_array3d Phase trapped volume fraction +phaseTypes integer_array (no description available) +volFracScale real64 Factor used to scale the phase capillary pressure, defined as: one minus the sum of the phase minimum volume fractions. +=============================== ============== ======================================================================================================================= + + diff --git a/src/coreComponents/schema/docs/ViscoDruckerPrager.rst b/src/coreComponents/schema/docs/ViscoDruckerPrager.rst index 9ed800b1d7b..0fcfe196f5d 100644 --- a/src/coreComponents/schema/docs/ViscoDruckerPrager.rst +++ b/src/coreComponents/schema/docs/ViscoDruckerPrager.rst @@ -1,20 +1,20 @@ -================================== ========= ======== ==================================================================== -Name Type Default Description -================================== ========= ======== ==================================================================== -defaultBulkModulus real64 -1 Default Bulk Modulus Parameter -defaultCohesion real64 0 Initial cohesion -defaultDensity real64 required Default Material Density -defaultDilationAngle real64 30 Dilation angle (degrees) -defaultFrictionAngle real64 30 Friction angle (degrees) -defaultHardeningRate real64 0 Cohesion hardening/softening rate -defaultPoissonRatio real64 -1 Default Poisson's Ratio -defaultShearModulus real64 -1 Default Shear Modulus Parameter -defaultThermalExpansionCoefficient real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame -defaultYoungModulus real64 -1 Default Young's Modulus -name groupName required A name is required for any non-unique nodes -relaxationTime real64 required Relaxation time -================================== ========= ======== ==================================================================== +======================= ========= ======== ==================================================================== +Name Type Default Description +======================= ========= ======== ==================================================================== +defaultBulkModulus real64 -1 Default Bulk Modulus Parameter +defaultCohesion real64 0 Initial cohesion +defaultDensity real64 required Default Material Density +defaultDilationAngle real64 30 Dilation angle (degrees) +defaultDrainedLinearTEC real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame +defaultFrictionAngle real64 30 Friction angle (degrees) +defaultHardeningRate real64 0 Cohesion hardening/softening rate +defaultPoissonRatio real64 -1 Default Poisson's Ratio +defaultShearModulus real64 -1 Default Shear Modulus Parameter +defaultYoungModulus real64 -1 Default Young's Modulus +name groupName required A name is required for any non-unique nodes +relaxationTime real64 required Relaxation time +======================= ========= ======== ==================================================================== diff --git a/src/coreComponents/schema/docs/ViscoExtendedDruckerPrager.rst b/src/coreComponents/schema/docs/ViscoExtendedDruckerPrager.rst index 2f3b4d6d43f..6f9cc0dda2b 100644 --- a/src/coreComponents/schema/docs/ViscoExtendedDruckerPrager.rst +++ b/src/coreComponents/schema/docs/ViscoExtendedDruckerPrager.rst @@ -1,21 +1,21 @@ -================================== ========= ======== ==================================================================== -Name Type Default Description -================================== ========= ======== ==================================================================== -defaultBulkModulus real64 -1 Default Bulk Modulus Parameter -defaultCohesion real64 0 Initial cohesion -defaultDensity real64 required Default Material Density -defaultDilationRatio real64 1 Dilation ratio [0,1] (ratio = tan dilationAngle / tan frictionAngle) -defaultHardening real64 0 Hardening parameter (hardening rate is faster for smaller values) -defaultInitialFrictionAngle real64 30 Initial friction angle (degrees) -defaultPoissonRatio real64 -1 Default Poisson's Ratio -defaultResidualFrictionAngle real64 30 Residual friction angle (degrees) -defaultShearModulus real64 -1 Default Shear Modulus Parameter -defaultThermalExpansionCoefficient real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame -defaultYoungModulus real64 -1 Default Young's Modulus -name groupName required A name is required for any non-unique nodes -relaxationTime real64 required Relaxation time -================================== ========= ======== ==================================================================== +============================ ========= ======== ==================================================================== +Name Type Default Description +============================ ========= ======== ==================================================================== +defaultBulkModulus real64 -1 Default Bulk Modulus Parameter +defaultCohesion real64 0 Initial cohesion +defaultDensity real64 required Default Material Density +defaultDilationRatio real64 1 Dilation ratio [0,1] (ratio = tan dilationAngle / tan frictionAngle) +defaultDrainedLinearTEC real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame +defaultHardening real64 0 Hardening parameter (hardening rate is faster for smaller values) +defaultInitialFrictionAngle real64 30 Initial friction angle (degrees) +defaultPoissonRatio real64 -1 Default Poisson's Ratio +defaultResidualFrictionAngle real64 30 Residual friction angle (degrees) +defaultShearModulus real64 -1 Default Shear Modulus Parameter +defaultYoungModulus real64 -1 Default Young's Modulus +name groupName required A name is required for any non-unique nodes +relaxationTime real64 required Relaxation time +============================ ========= ======== ==================================================================== diff --git a/src/coreComponents/schema/docs/ViscoModifiedCamClay.rst b/src/coreComponents/schema/docs/ViscoModifiedCamClay.rst index b0600507f3b..2649b896438 100644 --- a/src/coreComponents/schema/docs/ViscoModifiedCamClay.rst +++ b/src/coreComponents/schema/docs/ViscoModifiedCamClay.rst @@ -1,19 +1,19 @@ -================================== ========= ======== ==================================================================== -Name Type Default Description -================================== ========= ======== ==================================================================== -defaultCslSlope real64 1 Slope of the critical state line -defaultDensity real64 required Default Material Density -defaultPreConsolidationPressure real64 -1.5 Initial preconsolidation pressure -defaultRecompressionIndex real64 0.002 Recompresion Index -defaultRefPressure real64 -1 Reference Pressure -defaultRefStrainVol real64 0 Reference Volumetric Strain -defaultShearModulus real64 -1 Elastic Shear Modulus Parameter -defaultThermalExpansionCoefficient real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame -defaultVirginCompressionIndex real64 0.005 Virgin compression index -name groupName required A name is required for any non-unique nodes -relaxationTime real64 required Relaxation time -================================== ========= ======== ==================================================================== +=============================== ========= ======== ==================================================================== +Name Type Default Description +=============================== ========= ======== ==================================================================== +defaultCslSlope real64 1 Slope of the critical state line +defaultDensity real64 required Default Material Density +defaultDrainedLinearTEC real64 0 Default Linear Thermal Expansion Coefficient of the Solid Rock Frame +defaultPreConsolidationPressure real64 -1.5 Initial preconsolidation pressure +defaultRecompressionIndex real64 0.002 Recompresion Index +defaultRefPressure real64 -1 Reference Pressure +defaultRefStrainVol real64 0 Reference Volumetric Strain +defaultShearModulus real64 -1 Elastic Shear Modulus Parameter +defaultVirginCompressionIndex real64 0.005 Virgin compression index +name groupName required A name is required for any non-unique nodes +relaxationTime real64 required Relaxation time +=============================== ========= ======== ==================================================================== diff --git a/src/coreComponents/schema/schema.xsd b/src/coreComponents/schema/schema.xsd index 3498d60b3e9..20d51a57d98 100644 --- a/src/coreComponents/schema/schema.xsd +++ b/src/coreComponents/schema/schema.xsd @@ -729,6 +729,18 @@ + + + + + + + + + + + + @@ -1807,6 +1819,8 @@ the relative residual norm satisfies: + + @@ -1843,6 +1857,11 @@ the relative residual norm satisfies: + + + + + @@ -1981,6 +2000,8 @@ the relative residual norm satisfies: + + @@ -2251,6 +2272,8 @@ the relative residual norm satisfies: + + @@ -2313,6 +2336,8 @@ the relative residual norm satisfies: + + @@ -2412,6 +2437,8 @@ Local - Add stabilization only to interiors of macro elements.--> + + @@ -3121,6 +3148,8 @@ Local - Add stabilization only to interiors of macro elements.--> + + @@ -3365,6 +3394,8 @@ Local - Add stabilization only to interiors of macro elements.--> + + @@ -3475,6 +3506,8 @@ Local - Add stabilization only to interiors of macro elements.--> + + @@ -3495,6 +3528,8 @@ Local - Add stabilization only to interiors of macro elements.--> + + @@ -3582,6 +3617,9 @@ Local - Add stabilization only to interiors of macro elements.--> + + + @@ -3606,10 +3644,10 @@ Local - Add stabilization only to interiors of macro elements.--> + + - - @@ -3722,6 +3760,8 @@ The expected format is "{ waterMax, oilMax }", in that order--> + + @@ -3738,6 +3778,8 @@ The expected format is "{ waterMax, oilMax }", in that order--> + + @@ -3754,6 +3796,8 @@ The expected format is "{ waterMax, oilMax }", in that order--> + + @@ -3770,6 +3814,8 @@ The expected format is "{ waterMax, oilMax }", in that order--> + + @@ -3796,12 +3842,12 @@ The expected format is "{ waterMax, oilMax }", in that order--> + + - - @@ -4033,12 +4079,12 @@ The expected format is "{ waterMax, oilMax }", in that order--> + + - - @@ -4065,12 +4111,12 @@ The expected format is "{ waterMax, oilMax }", in that order--> + + - - @@ -4097,12 +4143,12 @@ The expected format is "{ waterMax, oilMax }", in that order--> + + - - @@ -4157,6 +4203,8 @@ For instance, if "oil" is before "gas" in "phaseNames", the table order should b + + @@ -4167,8 +4215,6 @@ For instance, if "oil" is before "gas" in "phaseNames", the table order should b - - @@ -4185,6 +4231,8 @@ For instance, if "oil" is before "gas" in "phaseNames", the table order should b + + @@ -4193,8 +4241,6 @@ For instance, if "oil" is before "gas" in "phaseNames", the table order should b - - @@ -4205,12 +4251,12 @@ For instance, if "oil" is before "gas" in "phaseNames", the table order should b + + - - @@ -4219,6 +4265,8 @@ For instance, if "oil" is before "gas" in "phaseNames", the table order should b + + @@ -4227,8 +4275,6 @@ For instance, if "oil" is before "gas" in "phaseNames", the table order should b - - @@ -4253,6 +4299,8 @@ For instance, if "oil" is before "gas" in "phaseNames", the table order should b + + @@ -4271,8 +4319,6 @@ For instance, if "oil" is before "gas" in "phaseNames", the table order should b - - @@ -4289,14 +4335,14 @@ For instance, if "oil" is before "gas" in "phaseNames", the table order should b + + - - @@ -4321,6 +4367,8 @@ For instance, if "oil" is before "gas" in "phaseNames", the table order should b + + @@ -4331,8 +4379,6 @@ For instance, if "oil" is before "gas" in "phaseNames", the table order should b - - @@ -4411,6 +4457,8 @@ If you want to do a three-phase simulation, please use instead wettingIntermedia + + @@ -4421,8 +4469,6 @@ If you want to do a three-phase simulation, please use instead wettingIntermedia - - @@ -4495,12 +4541,12 @@ If you want to do a three-phase simulation, please use instead wettingIntermedia + + - - @@ -4596,6 +4642,42 @@ If you want to do a three-phase simulation, please use instead wettingIntermedia + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -4828,6 +4910,8 @@ To neglect hysteresis on this phase, just use the same table name for the draina + + @@ -4837,8 +4921,6 @@ To neglect hysteresis on this phase, just use the same table name for the draina * linear * quadratic--> - - @@ -4905,6 +4987,8 @@ The expected format is "{ waterMax, oilMax }", in that order--> + + @@ -4913,8 +4997,6 @@ The expected format is "{ waterMax, oilMax }", in that order--> - - @@ -4931,6 +5013,8 @@ The expected format is "{ waterMax, oilMax }", in that order--> + + @@ -4941,8 +5025,6 @@ The expected format is "{ waterMax, oilMax }", in that order--> - - @@ -4955,6 +5037,8 @@ The expected format is "{ waterMax, oilMax }", in that order--> + + @@ -4965,8 +5049,6 @@ The expected format is "{ waterMax, oilMax }", in that order--> - - diff --git a/src/coreComponents/schema/schema.xsd.other b/src/coreComponents/schema/schema.xsd.other index b7344dc01a1..ca7aaa0f57c 100644 --- a/src/coreComponents/schema/schema.xsd.other +++ b/src/coreComponents/schema/schema.xsd.other @@ -1334,6 +1334,9 @@ + + + @@ -1358,8 +1361,8 @@ - - + + @@ -1368,8 +1371,8 @@ - - + + @@ -2300,6 +2303,9 @@ + + + diff --git a/src/coreComponents/unitTests/constitutiveTests/CMakeLists.txt b/src/coreComponents/unitTests/constitutiveTests/CMakeLists.txt index 408bc222462..5ec347f77c6 100644 --- a/src/coreComponents/unitTests/constitutiveTests/CMakeLists.txt +++ b/src/coreComponents/unitTests/constitutiveTests/CMakeLists.txt @@ -19,7 +19,9 @@ set( gtest_triaxial_xmls set( gtest_pvt_xmls testPVT.xml - testPVT_PhaseComposition.xml ) + testPVT_CO2Brine.xml + testPVT_PhaseComposition.xml + ) set( gtest_reactivefluid_xmls testReactiveFluid.xml ) diff --git a/src/coreComponents/unitTests/constitutiveTests/testCO2BrinePVTModels.cpp b/src/coreComponents/unitTests/constitutiveTests/testCO2BrinePVTModels.cpp index 24cee2654a9..723a30af1da 100644 --- a/src/coreComponents/unitTests/constitutiveTests/testCO2BrinePVTModels.cpp +++ b/src/coreComponents/unitTests/constitutiveTests/testCO2BrinePVTModels.cpp @@ -216,7 +216,8 @@ void testNumericalDerivatives( FLASH_WRAPPER const & flashModelWrapper, real64 const temperature, arraySlice1d< real64 const > const & compFraction, real64 const perturbParameter, - real64 const relTol ) + real64 const relTol, + real64 const absTol = std::numeric_limits< real64 >::epsilon() ) { using namespace multifluid; using Deriv = multifluid::DerivativeOffset; @@ -312,12 +313,12 @@ void testNumericalDerivatives( FLASH_WRAPPER const & flashModelWrapper, { checkRelativeError( (perturbedPhaseFracAndDeriv.value[j]-phaseFracAndDeriv.value[j])/dC, phaseFracAndDeriv.derivs[j][Deriv::dC+i], - relTol ); + relTol, absTol ); for( integer k = 0; k < numComp; ++k ) { checkRelativeError( (perturbedPhaseCompFracAndDeriv.value[j][k]-phaseCompFracAndDeriv.value[j][k])/dC, phaseCompFracAndDeriv.derivs[j][k][Deriv::dC+i], - relTol ); + relTol, absTol ); } } } @@ -364,7 +365,8 @@ std::unique_ptr< MODEL > makePVTFunction( string const & filename, pvtFunction = std::make_unique< MODEL >( strs[1], strs, componentNames, - componentMolarWeight ); + componentMolarWeight, + true ); // print PVT tables } } GEOS_ERROR_IF( pvtFunction == nullptr, @@ -405,7 +407,8 @@ std::unique_ptr< MODEL > makeFlashModel( string const & filename, strs, phaseNames, componentNames, - componentMolarWeight ); + componentMolarWeight, + true ); // print PVT tables } } GEOS_ERROR_IF( flashModel == nullptr, @@ -874,8 +877,9 @@ TEST_F( CO2SolubilityTest, co2SolubilityValuesAndDeriv ) real64 const deltaComp = 0.2; real64 const eps = sqrt( std::numeric_limits< real64 >::epsilon()); - real64 const relTolPrevImpl = 5e-4; - real64 const relTolDeriv = 5e-5; + real64 constexpr relTolPrevImpl = 5e-4; + real64 constexpr relTolDeriv = 5e-5; + real64 constexpr absTolDeriv = 1.0e-7; real64 const savedGasPhaseFrac[] = { 0.298158785, 0.298183347, 0.2982033821, 0.295950309, 0.2959791448, 0.2960026365, 0.2926988393, 0.292724834, 0.2927459702, 0.499837295, 0.499854799, 0.4998690769, 0.4982634386, 0.4982839883, @@ -899,7 +903,7 @@ TEST_F( CO2SolubilityTest, co2SolubilityValuesAndDeriv ) testValuesAgainstPreviousImplementation( flashModelWrapper, P[iPres], TC[iTemp], comp, savedGasPhaseFrac[counter], savedWaterPhaseGasComp[counter], relTolPrevImpl ); - testNumericalDerivatives( flashModelWrapper, P[iPres], TC[iTemp], comp, eps, relTolDeriv ); + testNumericalDerivatives( flashModelWrapper, P[iPres], TC[iTemp], comp, eps, relTolDeriv, absTolDeriv ); counter++; } } diff --git a/src/coreComponents/unitTests/constitutiveTests/testMultiFluid.cpp b/src/coreComponents/unitTests/constitutiveTests/testMultiFluid.cpp index 3e56c1629b1..2297aaba3f3 100644 --- a/src/coreComponents/unitTests/constitutiveTests/testMultiFluid.cpp +++ b/src/coreComponents/unitTests/constitutiveTests/testMultiFluid.cpp @@ -904,14 +904,14 @@ TEST_F( CO2BrinePhillipsFluidTest, checkAgainstPreviousImplementationMolar ) dynamicCast< CO2BrinePhillipsFluid * >( fluid )->createKernelWrapper(); real64 const savedTotalDens[] = - { 5881.8128183956969224, 5869.522096458530541, 5854.9469601674582009, 9180.9455320478591602, 9157.2045503913905122, 9129.1751063784995495, 15755.475565136142905, 15696.691553847707837, - 15627.990771463533747 }; + { 5881.9010529428224, 5869.6094131788523, 5855.0332090690354, 9181.3523596865525, 9157.6071613646127, 9129.5728206336826, 15757.685798517123, 15698.877814368472, + 15630.149353340639 }; real64 const savedGasPhaseFrac[] = { 0.29413690046142371148, 0.29415754810481165027, 0.29418169867697463449, 0.29194010802017489326, 0.29196434961986583723, 0.29199266189550621142, 0.2890641335638892695, 0.28908718137828937067, 0.28911404840933618843 }; real64 const savedWaterDens[] = - { 53286.457784368176362, 53264.389103437584708, 53237.751306267287873, 53229.257940878436784, 53207.597127679167897, 53181.436584967217641, 53197.49848403003125, 53176.033397316634364, - 53150.105086882285832 }; + { 53296.719183517576, 53274.578175308554, 53247.856162690216, 53248.577831698305, 53226.801031868054, 53200.505577694363, 53232.959859840405, 53211.345942175059, + 53185.244751356993 }; real64 const savedGasDens[] = { 1876.2436091302606656, 1872.184636376355229, 1867.3711104617746059, 3053.1548401973859654, 3044.5748249030266379, 3034.4507978134674886, 5769.0622621289458039, 5742.8476745352018042, 5712.2837704249559465 }; @@ -970,11 +970,11 @@ TEST_F( CO2BrinePhillipsFluidTest, checkAgainstPreviousImplementationMass ) dynamicCast< CO2BrinePhillipsFluid * >( fluid )->createKernelWrapper(); real64 const savedTotalDens[] = - { 238.33977561940088208, 237.86350488026934613, 237.29874890241927687, 354.01144731214282046, 353.18618684355078585, 352.21120673560858449, 550.02182875764299297, 548.3889751707506548, - 546.47580480217254717 }; + { 238.31504112633914, 237.83897306400553, 237.27445306546298, 353.95258514794097, 353.12773295711992, 352.1532278769692, 549.90502586392017, 548.2725957521294, + 546.35992000222234 }; real64 const savedGasPhaseFrac[] = - { 0.28562868803317220667, 0.28567941665326646028, 0.285738749802139258, 0.28022484140718162404, 0.2802844989853667812, 0.28035417162546172332, 0.2731355646393489045, 0.27319238868618361815, - 0.2732586251114847431 }; + { 0.28566797890570228, 0.28571845092287312, 0.28577748565482669, 0.28029804182709406, 0.28035729907078311, 0.2804265068556816, 0.27326788204506247, 0.27332422114692956, + 0.27338989611171055 }; real64 const savedWaterDens[] = { 970.85108546544745423, 970.4075834766143771, 969.87385780866463847, 974.23383396044232541, 973.78856424100911227, 973.25280170872576946, 979.48333010951580491, 979.04147229150635212, 978.50977403260912979 }; @@ -994,11 +994,11 @@ TEST_F( CO2BrinePhillipsFluidTest, checkAgainstPreviousImplementationMass ) { 1.9042384704865343673e-05, 1.9062615947696152414e-05, 1.9086923154230274463e-05, 2.0061713844617985449e-05, 2.0075955757102255573e-05, 2.0093249989250199265e-05, 2.3889596884008691474e-05, 2.3865756080512667728e-05, 2.3839170076324036522e-05 }; real64 const savedWaterPhaseGasComp[] = - { 0.02005966592318779787, 0.019990461277537684842, 0.019909503061226688919, 0.027365230280837819082, 0.027285226317914228894, 0.027191770514265831832, 0.036759501299346700187, - 0.036684965747010883641, 0.036598063202886929601 }; + { 0.020063528822832473, 0.019994285300494085, 0.019913282008777046, 0.027375162661670061, 0.027295074213708581, 0.02720152052681666, 0.036784005129927563, + 0.036709327088310859, 0.036622259649145526 }; real64 const savedWaterPhaseWaterComp[] = - { 0.9797478006656266114, 0.97981828292617156873, 0.97990072673671935188, 0.97227194517798976037, 0.97235397979974458327, 0.97244979317916002692, 0.9625743441996873484, 0.9626514061444874093, - 0.962741233539301966 }; + { 0.97993647117716742, 0.98000571469950604, 0.98008671799122282, 0.97262483733832983, 0.97270492578629131, 0.97279847947318321, 0.96321599487007259, 0.96329067291168902, + 0.96337774035085455 }; integer counter = 0; for( integer i = 0; i < 3; ++i ) diff --git a/src/coreComponents/unitTests/constitutiveTests/testPVT_CO2.txt b/src/coreComponents/unitTests/constitutiveTests/testPVT_CO2.txt index 86b4d12b8dc..ee1e6fa8ebe 100644 --- a/src/coreComponents/unitTests/constitutiveTests/testPVT_CO2.txt +++ b/src/coreComponents/unitTests/constitutiveTests/testPVT_CO2.txt @@ -5,53 +5,53 @@ # columns 5-6 = phase fractions # columns 7-8 = phase densities # columns 9-10 = phase viscosities -0.0000e+00 1.0000e+06 3.5000e+02 1.5581e+01 1.0000e+00 4.1138e-11 1.5581e+01 1.0033e+03 1.7476e-05 9.9525e-04 -2.0408e-02 2.0000e+06 3.5000e+02 3.2165e+01 1.0000e+00 4.1359e-11 3.2165e+01 1.0050e+03 1.7601e-05 9.9525e-04 -4.0816e-02 3.0000e+06 3.5000e+02 4.9901e+01 1.0000e+00 4.1563e-11 4.9901e+01 1.0066e+03 1.7778e-05 9.9525e-04 -6.1224e-02 4.0000e+06 3.5000e+02 6.8976e+01 1.0000e+00 4.1749e-11 6.8976e+01 1.0081e+03 1.8019e-05 9.9525e-04 -8.1633e-02 5.0000e+06 3.5000e+02 8.9619e+01 1.0000e+00 4.1919e-11 8.9619e+01 1.0096e+03 1.8338e-05 9.9525e-04 -1.0204e-01 6.0000e+06 3.5000e+02 1.1212e+02 1.0000e+00 4.2074e-11 1.1212e+02 1.0109e+03 1.8757e-05 9.9525e-04 -1.2245e-01 7.0000e+06 3.5000e+02 1.3682e+02 1.0000e+00 4.2213e-11 1.3682e+02 1.0122e+03 1.9300e-05 9.9525e-04 -1.4286e-01 8.0000e+06 3.5000e+02 1.6416e+02 1.0000e+00 4.2339e-11 1.6416e+02 1.0134e+03 2.0004e-05 9.9525e-04 -1.6327e-01 9.0000e+06 3.5000e+02 1.9463e+02 1.0000e+00 4.2450e-11 1.9463e+02 1.0145e+03 2.0915e-05 9.9525e-04 -1.8367e-01 1.0000e+07 3.5000e+02 2.2880e+02 1.0000e+00 4.2549e-11 2.2880e+02 1.0156e+03 2.2097e-05 9.9525e-04 -2.0408e-01 1.1000e+07 3.5000e+02 2.6713e+02 1.0000e+00 4.2635e-11 2.6713e+02 1.0166e+03 2.3623e-05 9.9525e-04 -2.2449e-01 1.2000e+07 3.5000e+02 3.0970e+02 1.0000e+00 4.2710e-11 3.0970e+02 1.0175e+03 2.5569e-05 9.9525e-04 -2.4490e-01 1.3000e+07 3.5000e+02 3.5571e+02 1.0000e+00 4.2774e-11 3.5571e+02 1.0184e+03 2.7974e-05 9.9525e-04 -2.6531e-01 1.4000e+07 3.5000e+02 4.0315e+02 1.0000e+00 4.2830e-11 4.0315e+02 1.0193e+03 3.0787e-05 9.9525e-04 -2.8571e-01 1.5000e+07 3.5000e+02 4.4920e+02 1.0000e+00 4.2878e-11 4.4920e+02 1.0201e+03 3.3852e-05 9.9525e-04 -3.0612e-01 1.6000e+07 3.5000e+02 4.9148e+02 1.0000e+00 4.2921e-11 4.9148e+02 1.0209e+03 3.6971e-05 9.9525e-04 -3.2653e-01 1.7000e+07 3.5000e+02 5.2886e+02 1.0000e+00 4.2959e-11 5.2886e+02 1.0216e+03 3.9987e-05 9.9525e-04 -3.4694e-01 1.8000e+07 3.5000e+02 5.6137e+02 1.0000e+00 4.2994e-11 5.6137e+02 1.0224e+03 4.2821e-05 9.9525e-04 -3.6735e-01 1.9000e+07 3.5000e+02 5.8957e+02 1.0000e+00 4.3027e-11 5.8957e+02 1.0231e+03 4.5454e-05 9.9525e-04 -3.8776e-01 2.0000e+07 3.5000e+02 6.1418e+02 1.0000e+00 4.3057e-11 6.1418e+02 1.0238e+03 4.7891e-05 9.9525e-04 -4.0816e-01 2.1000e+07 3.5000e+02 6.3583e+02 1.0000e+00 4.3086e-11 6.3583e+02 1.0245e+03 5.0154e-05 9.9525e-04 -4.2857e-01 2.2000e+07 3.5000e+02 6.5507e+02 1.0000e+00 4.3114e-11 6.5507e+02 1.0252e+03 5.2266e-05 9.9525e-04 -4.4898e-01 2.3000e+07 3.5000e+02 6.7233e+02 1.0000e+00 4.3140e-11 6.7233e+02 1.0259e+03 5.4246e-05 9.9525e-04 -4.6939e-01 2.4000e+07 3.5000e+02 6.8796e+02 1.0000e+00 4.3166e-11 6.8796e+02 1.0266e+03 5.6114e-05 9.9525e-04 -4.8980e-01 2.5000e+07 3.5000e+02 7.0222e+02 1.0000e+00 4.3191e-11 7.0222e+02 1.0273e+03 5.7883e-05 9.9525e-04 -5.1020e-01 2.6000e+07 3.5000e+02 7.1531e+02 1.0000e+00 4.3215e-11 7.1531e+02 1.0280e+03 5.9568e-05 9.9525e-04 -5.3061e-01 2.7000e+07 3.5000e+02 7.2741e+02 1.0000e+00 4.3239e-11 7.2741e+02 1.0287e+03 6.1177e-05 9.9525e-04 -5.5102e-01 2.8000e+07 3.5000e+02 7.3865e+02 1.0000e+00 4.3262e-11 7.3865e+02 1.0294e+03 6.2721e-05 9.9525e-04 -5.7143e-01 2.9000e+07 3.5000e+02 7.4914e+02 1.0000e+00 4.3284e-11 7.4914e+02 1.0300e+03 6.4206e-05 9.9525e-04 -5.9184e-01 3.0000e+07 3.5000e+02 7.5898e+02 1.0000e+00 4.3306e-11 7.5898e+02 1.0307e+03 6.5640e-05 9.9525e-04 -6.1224e-01 3.1000e+07 3.5000e+02 7.6825e+02 1.0000e+00 4.3328e-11 7.6825e+02 1.0314e+03 6.7027e-05 9.9525e-04 -6.3265e-01 3.2000e+07 3.5000e+02 7.7700e+02 1.0000e+00 4.3349e-11 7.7700e+02 1.0320e+03 6.8372e-05 9.9525e-04 -6.5306e-01 3.3000e+07 3.5000e+02 7.8529e+02 1.0000e+00 4.3370e-11 7.8529e+02 1.0327e+03 6.9679e-05 9.9525e-04 -6.7347e-01 3.4000e+07 3.5000e+02 7.9317e+02 1.0000e+00 4.3391e-11 7.9317e+02 1.0333e+03 7.0953e-05 9.9525e-04 -6.9388e-01 3.5000e+07 3.5000e+02 8.0068e+02 1.0000e+00 4.3411e-11 8.0068e+02 1.0340e+03 7.2195e-05 9.9525e-04 -7.1429e-01 3.6000e+07 3.5000e+02 8.0785e+02 1.0000e+00 4.3431e-11 8.0785e+02 1.0346e+03 7.3409e-05 9.9525e-04 -7.3469e-01 3.7000e+07 3.5000e+02 8.1472e+02 1.0000e+00 4.3450e-11 8.1472e+02 1.0353e+03 7.4597e-05 9.9525e-04 -7.5510e-01 3.8000e+07 3.5000e+02 8.2130e+02 1.0000e+00 4.3470e-11 8.2130e+02 1.0359e+03 7.5761e-05 9.9525e-04 -7.7551e-01 3.9000e+07 3.5000e+02 8.2763e+02 1.0000e+00 4.3489e-11 8.2763e+02 1.0366e+03 7.6904e-05 9.9525e-04 -7.9592e-01 4.0000e+07 3.5000e+02 8.3372e+02 1.0000e+00 4.3508e-11 8.3372e+02 1.0372e+03 7.8025e-05 9.9525e-04 -8.1633e-01 4.1000e+07 3.5000e+02 8.3960e+02 1.0000e+00 4.3527e-11 8.3960e+02 1.0378e+03 7.9128e-05 9.9525e-04 -8.3673e-01 4.2000e+07 3.5000e+02 8.4526e+02 1.0000e+00 4.3545e-11 8.4526e+02 1.0385e+03 8.0214e-05 9.9525e-04 -8.5714e-01 4.3000e+07 3.5000e+02 8.5074e+02 1.0000e+00 4.3564e-11 8.5074e+02 1.0391e+03 8.1283e-05 9.9525e-04 -8.7755e-01 4.4000e+07 3.5000e+02 8.5605e+02 1.0000e+00 4.3582e-11 8.5605e+02 1.0398e+03 8.2337e-05 9.9525e-04 -8.9796e-01 4.5000e+07 3.5000e+02 8.6119e+02 1.0000e+00 4.3600e-11 8.6119e+02 1.0404e+03 8.3376e-05 9.9525e-04 -9.1837e-01 4.6000e+07 3.5000e+02 8.6617e+02 1.0000e+00 4.3618e-11 8.6617e+02 1.0410e+03 8.4403e-05 9.9525e-04 -9.3878e-01 4.7000e+07 3.5000e+02 8.7102e+02 1.0000e+00 4.3636e-11 8.7102e+02 1.0416e+03 8.5416e-05 9.9525e-04 -9.5918e-01 4.8000e+07 3.5000e+02 8.7572e+02 1.0000e+00 4.3653e-11 8.7572e+02 1.0423e+03 8.6418e-05 9.9525e-04 -9.7959e-01 4.9000e+07 3.5000e+02 8.8030e+02 1.0000e+00 4.3670e-11 8.8030e+02 1.0429e+03 8.7409e-05 9.9525e-04 -1.0000e+00 5.0000e+07 3.5000e+02 8.8476e+02 1.0000e+00 4.3688e-11 8.8476e+02 1.0435e+03 8.8389e-05 9.9525e-04 +0.0000e+00 1.0000e+06 3.5000e+02 1.5581e+01 1.0000e+00 0.0000e+00 1.5581e+01 1.0022e+03 1.7476e-05 4.1330e-04 +2.0408e-02 2.0000e+06 3.5000e+02 3.2165e+01 1.0000e+00 0.0000e+00 3.2165e+01 1.0028e+03 1.7601e-05 4.1330e-04 +4.0816e-02 3.0000e+06 3.5000e+02 4.9901e+01 1.0000e+00 0.0000e+00 4.9901e+01 1.0034e+03 1.7778e-05 4.1330e-04 +6.1224e-02 4.0000e+06 3.5000e+02 6.8976e+01 1.0000e+00 0.0000e+00 6.8976e+01 1.0041e+03 1.8019e-05 4.1330e-04 +8.1633e-02 5.0000e+06 3.5000e+02 8.9619e+01 1.0000e+00 0.0000e+00 8.9619e+01 1.0047e+03 1.8338e-05 4.1330e-04 +1.0204e-01 6.0000e+06 3.5000e+02 1.1212e+02 1.0000e+00 0.0000e+00 1.1212e+02 1.0053e+03 1.8757e-05 4.1330e-04 +1.2245e-01 7.0000e+06 3.5000e+02 1.3682e+02 1.0000e+00 0.0000e+00 1.3682e+02 1.0059e+03 1.9300e-05 4.1330e-04 +1.4286e-01 8.0000e+06 3.5000e+02 1.6416e+02 1.0000e+00 0.0000e+00 1.6416e+02 1.0065e+03 2.0004e-05 4.1330e-04 +1.6327e-01 9.0000e+06 3.5000e+02 1.9463e+02 1.0000e+00 0.0000e+00 1.9463e+02 1.0072e+03 2.0915e-05 4.1330e-04 +1.8367e-01 1.0000e+07 3.5000e+02 2.2880e+02 1.0000e+00 0.0000e+00 2.2880e+02 1.0078e+03 2.2097e-05 4.1330e-04 +2.0408e-01 1.1000e+07 3.5000e+02 2.6713e+02 1.0000e+00 0.0000e+00 2.6713e+02 1.0084e+03 2.3623e-05 4.1330e-04 +2.2449e-01 1.2000e+07 3.5000e+02 3.0970e+02 1.0000e+00 0.0000e+00 3.0970e+02 1.0090e+03 2.5569e-05 4.1330e-04 +2.4490e-01 1.3000e+07 3.5000e+02 3.5571e+02 1.0000e+00 0.0000e+00 3.5571e+02 1.0096e+03 2.7974e-05 4.1330e-04 +2.6531e-01 1.4000e+07 3.5000e+02 4.0315e+02 1.0000e+00 0.0000e+00 4.0315e+02 1.0102e+03 3.0787e-05 4.1330e-04 +2.8571e-01 1.5000e+07 3.5000e+02 4.4920e+02 1.0000e+00 0.0000e+00 4.4920e+02 1.0108e+03 3.3852e-05 4.1330e-04 +3.0612e-01 1.6000e+07 3.5000e+02 4.9148e+02 1.0000e+00 0.0000e+00 4.9148e+02 1.0114e+03 3.6971e-05 4.1330e-04 +3.2653e-01 1.7000e+07 3.5000e+02 5.2886e+02 1.0000e+00 0.0000e+00 5.2886e+02 1.0120e+03 3.9987e-05 4.1330e-04 +3.4694e-01 1.8000e+07 3.5000e+02 5.6137e+02 1.0000e+00 0.0000e+00 5.6137e+02 1.0126e+03 4.2821e-05 4.1330e-04 +3.6735e-01 1.9000e+07 3.5000e+02 5.8957e+02 1.0000e+00 0.0000e+00 5.8957e+02 1.0133e+03 4.5454e-05 4.1330e-04 +3.8776e-01 2.0000e+07 3.5000e+02 6.1418e+02 1.0000e+00 0.0000e+00 6.1418e+02 1.0139e+03 4.7891e-05 4.1330e-04 +4.0816e-01 2.1000e+07 3.5000e+02 6.3583e+02 1.0000e+00 0.0000e+00 6.3583e+02 1.0145e+03 5.0154e-05 4.1330e-04 +4.2857e-01 2.2000e+07 3.5000e+02 6.5507e+02 1.0000e+00 0.0000e+00 6.5507e+02 1.0151e+03 5.2266e-05 4.1330e-04 +4.4898e-01 2.3000e+07 3.5000e+02 6.7233e+02 1.0000e+00 0.0000e+00 6.7233e+02 1.0157e+03 5.4246e-05 4.1330e-04 +4.6939e-01 2.4000e+07 3.5000e+02 6.8796e+02 1.0000e+00 0.0000e+00 6.8796e+02 1.0163e+03 5.6114e-05 4.1330e-04 +4.8980e-01 2.5000e+07 3.5000e+02 7.0222e+02 1.0000e+00 0.0000e+00 7.0222e+02 1.0169e+03 5.7883e-05 4.1330e-04 +5.1020e-01 2.6000e+07 3.5000e+02 7.1531e+02 1.0000e+00 0.0000e+00 7.1531e+02 1.0174e+03 5.9568e-05 4.1330e-04 +5.3061e-01 2.7000e+07 3.5000e+02 7.2741e+02 1.0000e+00 0.0000e+00 7.2741e+02 1.0180e+03 6.1177e-05 4.1330e-04 +5.5102e-01 2.8000e+07 3.5000e+02 7.3865e+02 1.0000e+00 0.0000e+00 7.3865e+02 1.0186e+03 6.2721e-05 4.1330e-04 +5.7143e-01 2.9000e+07 3.5000e+02 7.4914e+02 1.0000e+00 0.0000e+00 7.4914e+02 1.0192e+03 6.4206e-05 4.1330e-04 +5.9184e-01 3.0000e+07 3.5000e+02 7.5898e+02 1.0000e+00 0.0000e+00 7.5898e+02 1.0198e+03 6.5640e-05 4.1330e-04 +6.1224e-01 3.1000e+07 3.5000e+02 7.6825e+02 1.0000e+00 0.0000e+00 7.6825e+02 1.0204e+03 6.7027e-05 4.1330e-04 +6.3265e-01 3.2000e+07 3.5000e+02 7.7700e+02 1.0000e+00 0.0000e+00 7.7700e+02 1.0210e+03 6.8372e-05 4.1330e-04 +6.5306e-01 3.3000e+07 3.5000e+02 7.8529e+02 1.0000e+00 0.0000e+00 7.8529e+02 1.0216e+03 6.9679e-05 4.1330e-04 +6.7347e-01 3.4000e+07 3.5000e+02 7.9317e+02 1.0000e+00 0.0000e+00 7.9317e+02 1.0222e+03 7.0953e-05 4.1330e-04 +6.9388e-01 3.5000e+07 3.5000e+02 8.0068e+02 1.0000e+00 0.0000e+00 8.0068e+02 1.0228e+03 7.2195e-05 4.1330e-04 +7.1429e-01 3.6000e+07 3.5000e+02 8.0785e+02 1.0000e+00 0.0000e+00 8.0785e+02 1.0233e+03 7.3409e-05 4.1330e-04 +7.3469e-01 3.7000e+07 3.5000e+02 8.1472e+02 1.0000e+00 0.0000e+00 8.1472e+02 1.0239e+03 7.4597e-05 4.1330e-04 +7.5510e-01 3.8000e+07 3.5000e+02 8.2130e+02 1.0000e+00 0.0000e+00 8.2130e+02 1.0245e+03 7.5761e-05 4.1330e-04 +7.7551e-01 3.9000e+07 3.5000e+02 8.2763e+02 1.0000e+00 0.0000e+00 8.2763e+02 1.0251e+03 7.6904e-05 4.1330e-04 +7.9592e-01 4.0000e+07 3.5000e+02 8.3372e+02 1.0000e+00 0.0000e+00 8.3372e+02 1.0257e+03 7.8025e-05 4.1330e-04 +8.1633e-01 4.1000e+07 3.5000e+02 8.3960e+02 1.0000e+00 0.0000e+00 8.3960e+02 1.0263e+03 7.9128e-05 4.1330e-04 +8.3673e-01 4.2000e+07 3.5000e+02 8.4526e+02 1.0000e+00 0.0000e+00 8.4526e+02 1.0268e+03 8.0214e-05 4.1330e-04 +8.5714e-01 4.3000e+07 3.5000e+02 8.5074e+02 1.0000e+00 0.0000e+00 8.5074e+02 1.0274e+03 8.1283e-05 4.1330e-04 +8.7755e-01 4.4000e+07 3.5000e+02 8.5605e+02 1.0000e+00 0.0000e+00 8.5605e+02 1.0280e+03 8.2337e-05 4.1330e-04 +8.9796e-01 4.5000e+07 3.5000e+02 8.6119e+02 1.0000e+00 0.0000e+00 8.6119e+02 1.0286e+03 8.3376e-05 4.1330e-04 +9.1837e-01 4.6000e+07 3.5000e+02 8.6617e+02 1.0000e+00 0.0000e+00 8.6617e+02 1.0292e+03 8.4403e-05 4.1330e-04 +9.3878e-01 4.7000e+07 3.5000e+02 8.7102e+02 1.0000e+00 0.0000e+00 8.7102e+02 1.0297e+03 8.5416e-05 4.1330e-04 +9.5918e-01 4.8000e+07 3.5000e+02 8.7572e+02 1.0000e+00 0.0000e+00 8.7572e+02 1.0303e+03 8.6418e-05 4.1330e-04 +9.7959e-01 4.9000e+07 3.5000e+02 8.8030e+02 1.0000e+00 0.0000e+00 8.8030e+02 1.0309e+03 8.7409e-05 4.1330e-04 +1.0000e+00 5.0000e+07 3.5000e+02 8.8476e+02 1.0000e+00 0.0000e+00 8.8476e+02 1.0315e+03 8.8389e-05 4.1330e-04 diff --git a/src/coreComponents/unitTests/constitutiveTests/testPVT_CO2Brine.xml b/src/coreComponents/unitTests/constitutiveTests/testPVT_CO2Brine.xml new file mode 100644 index 00000000000..7363117393c --- /dev/null +++ b/src/coreComponents/unitTests/constitutiveTests/testPVT_CO2Brine.xml @@ -0,0 +1,118 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/coreComponents/unitTests/constitutiveTests/testPVT_CO2Brine_testCo2BrineEzrokhiMixtureA.txt b/src/coreComponents/unitTests/constitutiveTests/testPVT_CO2Brine_testCo2BrineEzrokhiMixtureA.txt new file mode 100644 index 00000000000..9df0c13cef2 --- /dev/null +++ b/src/coreComponents/unitTests/constitutiveTests/testPVT_CO2Brine_testCo2BrineEzrokhiMixtureA.txt @@ -0,0 +1,31 @@ +# column 1 = time +# column 2 = pressure +# column 3 = temperature +# column 4 = density +# column 5 = total compressibility +# columns 6-7 = phase fractions +# columns 8-9 = phase densities +# columns 10-11 = phase viscosities +# columns 12-13 = gas phase fractions [co2, water] +# columns 14-15 = water phase fractions [co2, water] +0.0000e+00 1.0000e+06 3.5000e+02 7.0237e+01 2.5631e-11 2.0920e-01 7.9080e-01 1.5581e+01 9.7518e+02 1.7476e-05 3.6959e-04 1.0000e+00 0.0000e+00 2.2780e-03 9.9772e-01 +5.0000e-02 6.4444e+06 3.5000e+02 4.2182e+02 1.3896e-07 1.9034e-01 8.0966e-01 1.2309e+02 9.8215e+02 1.8998e-05 3.6959e-04 1.0000e+00 0.0000e+00 1.1952e-02 9.8805e-01 +1.0000e-01 1.1889e+07 3.5000e+02 7.0391e+02 6.0490e-08 1.7984e-01 8.2016e-01 3.0497e+02 9.8703e+02 2.5353e-05 3.6959e-04 1.0000e+00 0.0000e+00 1.7255e-02 9.8275e-01 +1.5000e-01 1.7333e+07 3.5000e+02 8.6432e+02 1.7707e-08 1.7488e-01 8.2512e-01 5.3970e+02 9.9060e+02 4.0932e-05 3.6959e-04 1.0000e+00 0.0000e+00 1.9739e-02 9.8026e-01 +2.0000e-01 2.2778e+07 3.5000e+02 9.1703e+02 6.6890e-09 1.7189e-01 8.2811e-01 6.6850e+02 9.9372e+02 5.3806e-05 3.6959e-04 1.0000e+00 0.0000e+00 2.1232e-02 9.7877e-01 +2.5000e-01 2.8222e+07 3.5000e+02 9.4163e+02 3.5690e-09 1.6949e-01 8.3051e-01 7.4098e+02 9.9671e+02 6.3051e-05 3.6959e-04 1.0000e+00 0.0000e+00 2.2429e-02 9.7757e-01 +3.0000e-01 3.3667e+07 3.5000e+02 9.5727e+02 2.5032e-09 1.6736e-01 8.3264e-01 7.9054e+02 9.9964e+02 7.0528e-05 3.6959e-04 1.0000e+00 0.0000e+00 2.3484e-02 9.7652e-01 +3.5000e-01 3.9111e+07 3.5000e+02 9.6883e+02 1.8830e-09 1.6542e-01 8.3458e-01 8.2831e+02 1.0025e+03 7.7028e-05 3.6959e-04 1.0000e+00 0.0000e+00 2.4448e-02 9.7555e-01 +4.0000e-01 4.4556e+07 3.5000e+02 9.7812e+02 1.5612e-09 1.6360e-01 8.3640e-01 8.5890e+02 1.0054e+03 8.2914e-05 3.6959e-04 1.0000e+00 0.0000e+00 2.5348e-02 9.7465e-01 +4.5000e-01 5.0000e+07 3.5000e+02 9.8599e+02 3.6882e-10 1.6188e-01 8.3812e-01 8.8476e+02 1.0083e+03 8.8389e-05 3.6959e-04 1.0000e+00 0.0000e+00 2.6197e-02 9.7380e-01 +5.0000e-01 3.0000e+07 3.0400e+02 9.7666e+02 2.5443e-09 1.6783e-01 8.3217e-01 8.0121e+02 1.0218e+03 7.1971e-05 7.8492e-04 1.0000e+00 0.0000e+00 2.3250e-02 9.7675e-01 +5.5000e-01 1.0000e+07 2.5800e+02 6.6446e+02 8.1110e-08 1.8014e-01 8.1986e-01 2.5862e+02 1.0141e+03 2.2796e-05 1.7914e-03 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +6.0000e-01 1.0000e+07 2.7156e+02 6.6447e+02 8.1111e-08 1.8014e-01 8.1986e-01 2.5862e+02 1.0142e+03 2.2796e-05 1.7914e-03 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +6.5000e-01 1.0000e+07 2.8511e+02 6.6429e+02 8.1088e-08 1.8014e-01 8.1986e-01 2.5862e+02 1.0136e+03 2.2796e-05 1.2463e-03 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +7.0000e-01 1.0000e+07 2.9867e+02 6.6334e+02 8.0969e-08 1.8014e-01 8.1986e-01 2.5862e+02 1.0110e+03 2.2796e-05 8.8041e-04 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +7.5000e-01 1.0000e+07 3.1222e+02 6.6170e+02 8.0763e-08 1.8014e-01 8.1986e-01 2.5862e+02 1.0063e+03 2.2796e-05 6.6611e-04 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +8.0000e-01 1.0000e+07 3.2578e+02 6.5950e+02 8.0485e-08 1.8014e-01 8.1986e-01 2.5862e+02 1.0001e+03 2.2796e-05 5.2535e-04 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +8.5000e-01 1.0000e+07 3.3933e+02 6.5685e+02 8.0151e-08 1.8014e-01 8.1986e-01 2.5862e+02 9.9273e+02 2.2796e-05 4.2735e-04 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +9.0000e-01 1.0000e+07 3.5289e+02 6.0588e+02 7.6868e-08 1.8266e-01 8.1734e-01 2.2287e+02 9.8369e+02 2.2020e-05 3.5529e-04 1.0000e+00 0.0000e+00 1.5834e-02 9.8417e-01 +9.5000e-01 1.0000e+07 3.6644e+02 5.8269e+02 7.5133e-08 1.8266e-01 8.1734e-01 2.0825e+02 9.7414e+02 2.1800e-05 3.0346e-04 1.0000e+00 0.0000e+00 1.5834e-02 9.8417e-01 +1.0000e+00 1.0000e+07 3.8000e+02 5.7962e+02 7.4722e-08 1.8266e-01 8.1734e-01 2.0825e+02 9.6370e+02 2.1860e-05 2.6310e-04 1.0000e+00 0.0000e+00 1.5834e-02 9.8417e-01 diff --git a/src/coreComponents/unitTests/constitutiveTests/testPVT_CO2Brine_testCo2BrineEzrokhiMixtureB.txt b/src/coreComponents/unitTests/constitutiveTests/testPVT_CO2Brine_testCo2BrineEzrokhiMixtureB.txt new file mode 100644 index 00000000000..658585d20f6 --- /dev/null +++ b/src/coreComponents/unitTests/constitutiveTests/testPVT_CO2Brine_testCo2BrineEzrokhiMixtureB.txt @@ -0,0 +1,31 @@ +# column 1 = time +# column 2 = pressure +# column 3 = temperature +# column 4 = density +# column 5 = total compressibility +# columns 6-7 = phase fractions +# columns 8-9 = phase densities +# columns 10-11 = phase viscosities +# columns 12-13 = gas phase fractions [co2, water] +# columns 14-15 = water phase fractions [co2, water] +0.0000e+00 1.0000e+06 3.5000e+02 3.0153e+01 6.8331e-12 5.0890e-01 4.9110e-01 1.5581e+01 9.7518e+02 1.7476e-05 3.6959e-04 1.0000e+00 0.0000e+00 2.2780e-03 9.9772e-01 +5.0000e-02 6.4444e+06 3.5000e+02 2.1973e+02 1.8071e-07 4.9719e-01 5.0281e-01 1.2309e+02 9.8215e+02 1.8998e-05 3.6959e-04 1.0000e+00 0.0000e+00 1.1952e-02 9.8805e-01 +1.0000e-01 1.1889e+07 3.5000e+02 4.7060e+02 1.0674e-07 4.9067e-01 5.0933e-01 3.0497e+02 9.8703e+02 2.5353e-05 3.6959e-04 1.0000e+00 0.0000e+00 1.7255e-02 9.8275e-01 +1.5000e-01 1.7333e+07 3.5000e+02 7.0387e+02 3.8725e-08 4.8759e-01 5.1241e-01 5.3970e+02 9.9060e+02 4.0932e-05 3.6959e-04 1.0000e+00 0.0000e+00 1.9739e-02 9.8026e-01 +2.0000e-01 2.2778e+07 3.5000e+02 8.0378e+02 1.5410e-08 4.8573e-01 5.1427e-01 6.6850e+02 9.9372e+02 5.3806e-05 3.6959e-04 1.0000e+00 0.0000e+00 2.1232e-02 9.7877e-01 +2.5000e-01 2.8222e+07 3.5000e+02 8.5399e+02 8.1962e-09 4.8424e-01 5.1576e-01 7.4098e+02 9.9671e+02 6.3051e-05 3.6959e-04 1.0000e+00 0.0000e+00 2.2429e-02 9.7757e-01 +3.0000e-01 3.3667e+07 3.5000e+02 8.8642e+02 5.6757e-09 4.8292e-01 5.1708e-01 7.9054e+02 9.9964e+02 7.0528e-05 3.6959e-04 1.0000e+00 0.0000e+00 2.3484e-02 9.7652e-01 +3.5000e-01 3.9111e+07 3.5000e+02 9.1030e+02 4.1615e-09 4.8171e-01 5.1829e-01 8.2831e+02 1.0025e+03 7.7028e-05 3.6959e-04 1.0000e+00 0.0000e+00 2.4448e-02 9.7555e-01 +4.0000e-01 4.4556e+07 3.5000e+02 9.2924e+02 3.3747e-09 4.8058e-01 5.1942e-01 8.5890e+02 1.0054e+03 8.2914e-05 3.6959e-04 1.0000e+00 0.0000e+00 2.5348e-02 9.7465e-01 +4.5000e-01 5.0000e+07 3.5000e+02 9.4501e+02 2.1952e-10 4.7951e-01 5.2049e-01 8.8476e+02 1.0083e+03 8.8389e-05 3.6959e-04 1.0000e+00 0.0000e+00 2.6197e-02 9.7380e-01 +5.0000e-01 3.0000e+07 3.0400e+02 9.0182e+02 5.7438e-09 4.8321e-01 5.1679e-01 8.0121e+02 1.0218e+03 7.1971e-05 7.8492e-04 1.0000e+00 0.0000e+00 2.3250e-02 9.7675e-01 +5.5000e-01 1.0000e+07 2.5800e+02 4.1666e+02 1.3361e-07 4.9085e-01 5.0915e-01 2.5862e+02 1.0141e+03 2.2796e-05 1.7914e-03 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +6.0000e-01 1.0000e+07 2.7156e+02 4.1666e+02 1.3361e-07 4.9085e-01 5.0915e-01 2.5862e+02 1.0142e+03 2.2796e-05 1.7914e-03 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +6.5000e-01 1.0000e+07 2.8511e+02 4.1662e+02 1.3360e-07 4.9085e-01 5.0915e-01 2.5862e+02 1.0136e+03 2.2796e-05 1.2463e-03 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +7.0000e-01 1.0000e+07 2.9867e+02 4.1638e+02 1.3352e-07 4.9085e-01 5.0915e-01 2.5862e+02 1.0110e+03 2.2796e-05 8.8041e-04 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +7.5000e-01 1.0000e+07 3.1222e+02 4.1598e+02 1.3339e-07 4.9085e-01 5.0915e-01 2.5862e+02 1.0063e+03 2.2796e-05 6.6611e-04 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +8.0000e-01 1.0000e+07 3.2578e+02 4.1544e+02 1.3321e-07 4.9085e-01 5.0915e-01 2.5862e+02 1.0001e+03 2.2796e-05 5.2535e-04 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +8.5000e-01 1.0000e+07 3.3933e+02 4.1479e+02 1.3300e-07 4.9085e-01 5.0915e-01 2.5862e+02 9.9273e+02 2.2796e-05 4.2735e-04 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +9.0000e-01 1.0000e+07 3.5289e+02 3.6691e+02 1.2027e-07 4.9242e-01 5.0758e-01 2.2287e+02 9.8369e+02 2.2020e-05 3.5529e-04 1.0000e+00 0.0000e+00 1.5834e-02 9.8417e-01 +9.5000e-01 1.0000e+07 3.6644e+02 3.4654e+02 1.1513e-07 4.9242e-01 5.0758e-01 2.0825e+02 9.7414e+02 2.1800e-05 3.0346e-04 1.0000e+00 0.0000e+00 1.5834e-02 9.8417e-01 +1.0000e+00 1.0000e+07 3.8000e+02 3.4587e+02 1.1490e-07 4.9242e-01 5.0758e-01 2.0825e+02 9.6370e+02 2.1860e-05 2.6310e-04 1.0000e+00 0.0000e+00 1.5834e-02 9.8417e-01 diff --git a/src/coreComponents/unitTests/constitutiveTests/testPVT_CO2Brine_testCo2BrinePhillipsMixtureA.txt b/src/coreComponents/unitTests/constitutiveTests/testPVT_CO2Brine_testCo2BrinePhillipsMixtureA.txt new file mode 100644 index 00000000000..8906785dbfb --- /dev/null +++ b/src/coreComponents/unitTests/constitutiveTests/testPVT_CO2Brine_testCo2BrinePhillipsMixtureA.txt @@ -0,0 +1,31 @@ +# column 1 = time +# column 2 = pressure +# column 3 = temperature +# column 4 = density +# column 5 = total compressibility +# columns 6-7 = phase fractions +# columns 8-9 = phase densities +# columns 10-11 = phase viscosities +# columns 12-13 = gas phase fractions [co2, water] +# columns 14-15 = water phase fractions [co2, water] +0.0000e+00 1.0000e+06 3.5000e+02 7.0350e+01 -0.0000e+00 2.0920e-01 7.9080e-01 1.5581e+01 1.0033e+03 1.7476e-05 4.1330e-04 1.0000e+00 0.0000e+00 2.2780e-03 9.9772e-01 +5.0000e-02 6.4444e+06 3.5000e+02 4.2620e+02 1.4060e-07 1.9028e-01 8.0972e-01 1.2309e+02 1.0115e+03 1.8998e-05 4.1330e-04 1.0000e+00 0.0000e+00 1.1952e-02 9.8805e-01 +1.0000e-01 1.1889e+07 3.5000e+02 7.1657e+02 6.1766e-08 1.7971e-01 8.2029e-01 3.0497e+02 1.0174e+03 2.5353e-05 4.1330e-04 1.0000e+00 0.0000e+00 1.7255e-02 9.8275e-01 +1.5000e-01 1.7333e+07 3.5000e+02 8.8389e+02 1.8255e-08 1.7472e-01 8.2528e-01 5.3970e+02 1.0219e+03 4.0932e-05 4.1330e-04 1.0000e+00 0.0000e+00 1.9739e-02 9.8026e-01 +2.0000e-01 2.2778e+07 3.5000e+02 9.3956e+02 6.9892e-09 1.7171e-01 8.2829e-01 6.6850e+02 1.0258e+03 5.3806e-05 4.1330e-04 1.0000e+00 0.0000e+00 2.1232e-02 9.7877e-01 +2.5000e-01 2.8222e+07 3.5000e+02 9.6584e+02 3.7839e-09 1.6928e-01 8.3072e-01 7.4098e+02 1.0295e+03 6.3051e-05 4.1330e-04 1.0000e+00 0.0000e+00 2.2429e-02 9.7757e-01 +3.0000e-01 3.3667e+07 3.5000e+02 9.8271e+02 2.6836e-09 1.6714e-01 8.3286e-01 7.9054e+02 1.0331e+03 7.0528e-05 4.1330e-04 1.0000e+00 0.0000e+00 2.3484e-02 9.7652e-01 +3.5000e-01 3.9111e+07 3.5000e+02 9.9529e+02 2.0386e-09 1.6518e-01 8.3482e-01 8.2831e+02 1.0366e+03 7.7028e-05 4.1330e-04 1.0000e+00 0.0000e+00 2.4448e-02 9.7555e-01 +4.0000e-01 4.4556e+07 3.5000e+02 1.0055e+03 1.7012e-09 1.6334e-01 8.3666e-01 8.5890e+02 1.0401e+03 8.2914e-05 4.1330e-04 1.0000e+00 0.0000e+00 2.5348e-02 9.7465e-01 +4.5000e-01 5.0000e+07 3.5000e+02 1.0141e+03 -0.0000e+00 1.6160e-01 8.3840e-01 8.8476e+02 1.0435e+03 8.8389e-05 4.1330e-04 1.0000e+00 0.0000e+00 2.6197e-02 9.7380e-01 +5.0000e-01 3.0000e+07 3.0400e+02 9.8820e+02 2.6714e-09 1.6761e-01 8.3239e-01 8.0121e+02 1.0369e+03 7.1971e-05 8.6631e-04 1.0000e+00 0.0000e+00 2.3250e-02 9.7675e-01 +5.5000e-01 1.0000e+07 2.5800e+02 6.6631e+02 8.1373e-08 1.8000e-01 8.2000e-01 2.5862e+02 1.0189e+03 2.2796e-05 1.9511e-03 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +6.0000e-01 1.0000e+07 2.7156e+02 6.6683e+02 8.1481e-08 1.8000e-01 8.2000e-01 2.5862e+02 1.0204e+03 2.2796e-05 1.9588e-03 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +6.5000e-01 1.0000e+07 2.8511e+02 6.6723e+02 8.1565e-08 1.8001e-01 8.1999e-01 2.5862e+02 1.0216e+03 2.2796e-05 1.3681e-03 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +7.0000e-01 1.0000e+07 2.9867e+02 6.6753e+02 8.1627e-08 1.8001e-01 8.1999e-01 2.5862e+02 1.0224e+03 2.2796e-05 9.7022e-04 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +7.5000e-01 1.0000e+07 3.1222e+02 6.6772e+02 8.1666e-08 1.8001e-01 8.1999e-01 2.5862e+02 1.0230e+03 2.2796e-05 7.3691e-04 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +8.0000e-01 1.0000e+07 3.2578e+02 6.6781e+02 8.1685e-08 1.8002e-01 8.1998e-01 2.5862e+02 1.0232e+03 2.2796e-05 5.8345e-04 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +8.5000e-01 1.0000e+07 3.3933e+02 6.6780e+02 8.1682e-08 1.8002e-01 8.1998e-01 2.5862e+02 1.0232e+03 2.2796e-05 4.7645e-04 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +9.0000e-01 1.0000e+07 3.5289e+02 6.1511e+02 7.8253e-08 1.8256e-01 8.1744e-01 2.2287e+02 1.0135e+03 2.2020e-05 3.9764e-04 1.0000e+00 0.0000e+00 1.5834e-02 9.8417e-01 +9.5000e-01 1.0000e+07 3.6644e+02 5.9255e+02 7.6620e-08 1.8256e-01 8.1744e-01 2.0825e+02 1.0080e+03 2.1800e-05 3.4093e-04 1.0000e+00 0.0000e+00 1.5834e-02 9.8417e-01 +1.0000e+00 1.0000e+07 3.8000e+02 5.9234e+02 7.6569e-08 1.8255e-01 8.1745e-01 2.0825e+02 1.0072e+03 2.1860e-05 2.9672e-04 1.0000e+00 0.0000e+00 1.5834e-02 9.8417e-01 diff --git a/src/coreComponents/unitTests/constitutiveTests/testPVT_CO2Brine_testCo2BrinePhillipsMixtureB.txt b/src/coreComponents/unitTests/constitutiveTests/testPVT_CO2Brine_testCo2BrinePhillipsMixtureB.txt new file mode 100644 index 00000000000..8bb1d224ed8 --- /dev/null +++ b/src/coreComponents/unitTests/constitutiveTests/testPVT_CO2Brine_testCo2BrinePhillipsMixtureB.txt @@ -0,0 +1,31 @@ +# column 1 = time +# column 2 = pressure +# column 3 = temperature +# column 4 = density +# column 5 = total compressibility +# columns 6-7 = phase fractions +# columns 8-9 = phase densities +# columns 10-11 = phase viscosities +# columns 12-13 = gas phase fractions [co2, water] +# columns 14-15 = water phase fractions [co2, water] +0.0000e+00 1.0000e+06 3.5000e+02 3.0166e+01 -0.0000e+00 5.0890e-01 4.9110e-01 1.5581e+01 1.0033e+03 1.7476e-05 4.1330e-04 1.0000e+00 0.0000e+00 2.2780e-03 9.9772e-01 +5.0000e-02 6.4444e+06 3.5000e+02 2.2049e+02 1.8140e-07 4.9709e-01 5.0291e-01 1.2309e+02 1.0115e+03 1.8998e-05 4.1330e-04 1.0000e+00 0.0000e+00 1.1952e-02 9.8805e-01 +1.0000e-01 1.1889e+07 3.5000e+02 4.7415e+02 1.0761e-07 4.9045e-01 5.0955e-01 3.0497e+02 1.0174e+03 2.5353e-05 4.1330e-04 1.0000e+00 0.0000e+00 1.7255e-02 9.8275e-01 +1.5000e-01 1.7333e+07 3.5000e+02 7.1192e+02 3.9232e-08 4.8731e-01 5.1269e-01 5.3970e+02 1.0219e+03 4.0932e-05 4.1330e-04 1.0000e+00 0.0000e+00 1.9739e-02 9.8026e-01 +2.0000e-01 2.2778e+07 3.5000e+02 8.1448e+02 1.5684e-08 4.8541e-01 5.1459e-01 6.6850e+02 1.0258e+03 5.3806e-05 4.1330e-04 1.0000e+00 0.0000e+00 2.1232e-02 9.7877e-01 +2.5000e-01 2.8222e+07 3.5000e+02 8.6629e+02 8.3806e-09 4.8387e-01 5.1613e-01 7.4098e+02 1.0295e+03 6.3051e-05 4.1330e-04 1.0000e+00 0.0000e+00 2.2429e-02 9.7757e-01 +3.0000e-01 3.3667e+07 3.5000e+02 8.9988e+02 5.8250e-09 4.8252e-01 5.1748e-01 7.9054e+02 1.0331e+03 7.0528e-05 4.1330e-04 1.0000e+00 0.0000e+00 2.3484e-02 9.7652e-01 +3.5000e-01 3.9111e+07 3.5000e+02 9.2471e+02 4.2862e-09 4.8127e-01 5.1873e-01 8.2831e+02 1.0366e+03 7.7028e-05 4.1330e-04 1.0000e+00 0.0000e+00 2.4448e-02 9.7555e-01 +4.0000e-01 4.4556e+07 3.5000e+02 9.4444e+02 3.4851e-09 4.8011e-01 5.1989e-01 8.5890e+02 1.0401e+03 8.2914e-05 4.1330e-04 1.0000e+00 0.0000e+00 2.5348e-02 9.7465e-01 +4.5000e-01 5.0000e+07 3.5000e+02 9.6092e+02 -0.0000e+00 4.7901e-01 5.2099e-01 8.8476e+02 1.0435e+03 8.8389e-05 4.1330e-04 1.0000e+00 0.0000e+00 2.6197e-02 9.7380e-01 +5.0000e-01 3.0000e+07 3.0400e+02 9.0796e+02 5.8367e-09 4.8281e-01 5.1719e-01 8.0121e+02 1.0369e+03 7.1971e-05 8.6631e-04 1.0000e+00 0.0000e+00 2.3250e-02 9.7675e-01 +5.5000e-01 1.0000e+07 2.5800e+02 4.1718e+02 1.3377e-07 4.9062e-01 5.0938e-01 2.5862e+02 1.0189e+03 2.2796e-05 1.9511e-03 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +6.0000e-01 1.0000e+07 2.7156e+02 4.1731e+02 1.3383e-07 4.9063e-01 5.0937e-01 2.5862e+02 1.0204e+03 2.2796e-05 1.9588e-03 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +6.5000e-01 1.0000e+07 2.8511e+02 4.1740e+02 1.3388e-07 4.9063e-01 5.0937e-01 2.5862e+02 1.0216e+03 2.2796e-05 1.3681e-03 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +7.0000e-01 1.0000e+07 2.9867e+02 4.1747e+02 1.3391e-07 4.9064e-01 5.0936e-01 2.5862e+02 1.0224e+03 2.2796e-05 9.7022e-04 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +7.5000e-01 1.0000e+07 3.1222e+02 4.1752e+02 1.3393e-07 4.9064e-01 5.0936e-01 2.5862e+02 1.0230e+03 2.2796e-05 7.3691e-04 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +8.0000e-01 1.0000e+07 3.2578e+02 4.1754e+02 1.3394e-07 4.9064e-01 5.0936e-01 2.5862e+02 1.0232e+03 2.2796e-05 5.8345e-04 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +8.5000e-01 1.0000e+07 3.3933e+02 4.1754e+02 1.3394e-07 4.9064e-01 5.0936e-01 2.5862e+02 1.0232e+03 2.2796e-05 4.7645e-04 1.0000e+00 0.0000e+00 1.7105e-02 9.8290e-01 +9.0000e-01 1.0000e+07 3.5289e+02 3.6904e+02 1.2104e-07 4.9224e-01 5.0776e-01 2.2287e+02 1.0135e+03 2.2020e-05 3.9764e-04 1.0000e+00 0.0000e+00 1.5834e-02 9.8417e-01 +9.5000e-01 1.0000e+07 3.6644e+02 3.4874e+02 1.1593e-07 4.9224e-01 5.0776e-01 2.0825e+02 1.0080e+03 2.1800e-05 3.4093e-04 1.0000e+00 0.0000e+00 1.5834e-02 9.8417e-01 +1.0000e+00 1.0000e+07 3.8000e+02 3.4869e+02 1.1591e-07 4.9223e-01 5.0777e-01 2.0825e+02 1.0072e+03 2.1860e-05 2.9672e-04 1.0000e+00 0.0000e+00 1.5834e-02 9.8417e-01 diff --git a/src/coreComponents/unitTests/constitutiveTests/testPVT_data/brinePVTEzrokhi.txt b/src/coreComponents/unitTests/constitutiveTests/testPVT_data/brinePVTEzrokhi.txt new file mode 100755 index 00000000000..01dc19cc64f --- /dev/null +++ b/src/coreComponents/unitTests/constitutiveTests/testPVT_data/brinePVTEzrokhi.txt @@ -0,0 +1,2 @@ +DensityFun EzrokhiBrineDensity 0.1033 -2.2991e-5 -2.3658e-6 +ViscosityFun EzrokhiBrineViscosity 0 0 0 diff --git a/src/coreComponents/unitTests/fluidFlowTests/testCompFlowUtils.hpp b/src/coreComponents/unitTests/fluidFlowTests/testCompFlowUtils.hpp index eebfa0a114c..5b336a0481a 100644 --- a/src/coreComponents/unitTests/fluidFlowTests/testCompFlowUtils.hpp +++ b/src/coreComponents/unitTests/fluidFlowTests/testCompFlowUtils.hpp @@ -273,8 +273,8 @@ void testCompositionNumericalDerivatives( CompositionalMultiphaseFVM & solver, compDens[ei][jc] += dRho; } ); - // recompute component fractions - solver.updateComponentFraction( subRegion ); + // recompute global component fractions + solver.updateGlobalComponentFraction( subRegion ); // check values in each cell forAll< serialPolicy >( subRegion.size(), [=] ( localIndex const ei ) diff --git a/src/coreComponents/unitTests/fluidFlowTests/testThermalSinglePhaseFlow.cpp b/src/coreComponents/unitTests/fluidFlowTests/testThermalSinglePhaseFlow.cpp index 04315a4c1fd..05a369aa969 100644 --- a/src/coreComponents/unitTests/fluidFlowTests/testThermalSinglePhaseFlow.cpp +++ b/src/coreComponents/unitTests/fluidFlowTests/testThermalSinglePhaseFlow.cpp @@ -107,7 +107,7 @@ char const * xmlInput = compressibility="5e-10" thermalExpansionCoeff="7e-4" viscosibility="0.0" - volumetricHeatCapacity="4.5e3" /> + specificHeatCapacity="4.5e3" /> diff --git a/src/coreComponents/unitTests/wavePropagationTests/testWavePropagation.cpp b/src/coreComponents/unitTests/wavePropagationTests/testWavePropagation.cpp index 44fcc1dce8a..4edd9e63d6f 100644 --- a/src/coreComponents/unitTests/wavePropagationTests/testWavePropagation.cpp +++ b/src/coreComponents/unitTests/wavePropagationTests/testWavePropagation.cpp @@ -124,14 +124,14 @@ char const * xmlInput = name="cellVelocity" initialCondition="1" objectPath="ElementRegions/Region/cb" - fieldName="mediumVelocity" + fieldName="acousticVelocity" scale="1500" setNames="{ all }"/> diff --git a/src/docs/sphinx/CompleteXMLSchema.rst b/src/docs/sphinx/CompleteXMLSchema.rst index 0ed2e64afe2..ff88d7a1a73 100644 --- a/src/docs/sphinx/CompleteXMLSchema.rst +++ b/src/docs/sphinx/CompleteXMLSchema.rst @@ -864,6 +864,27 @@ Element: PorousModifiedCamClay .. include:: ../../coreComponents/schema/docs/PorousModifiedCamClay.rst +.. _XML_PorousViscoDruckerPrager: + +Element: PorousViscoDruckerPrager +============================== +.. include:: ../../coreComponents/schema/docs/PorousViscoDruckerPrager.rst + + +.. _XML_PorousViscoExtendedDruckerPrager: + +Element: PorousViscoExtendedDruckerPrager +============================== +.. include:: ../../coreComponents/schema/docs/PorousViscoExtendedDruckerPrager.rst + + +.. _XML_PorousViscoModifiedCamClay: + +Element: PorousViscoModifiedCamClay +============================== +.. include:: ../../coreComponents/schema/docs/PorousViscoModifiedCamClay.rst + + .. _XML_PressurePorosity: Element: PressurePorosity @@ -2233,6 +2254,27 @@ Datastructure: PorousModifiedCamClay .. include:: ../../coreComponents/schema/docs/PorousModifiedCamClay_other.rst +.. _DATASTRUCTURE_PorousViscoDruckerPrager: + +Datastructure: PorousViscoDruckerPrager +==================================== +.. include:: ../../coreComponents/schema/docs/PorousViscoDruckerPrager_other.rst + + +.. _DATASTRUCTURE_PorousViscoExtendedDruckerPrager: + +Datastructure: PorousViscoExtendedDruckerPrager +==================================== +.. include:: ../../coreComponents/schema/docs/PorousViscoExtendedDruckerPrager_other.rst + + +.. _DATASTRUCTURE_PorousViscoModifiedCamClay: + +Datastructure: PorousViscoModifiedCamClay +==================================== +.. include:: ../../coreComponents/schema/docs/PorousViscoModifiedCamClay_other.rst + + .. _DATASTRUCTURE_PressurePorosity: Datastructure: PressurePorosity diff --git a/src/docs/sphinx/QuickStart.rst b/src/docs/sphinx/QuickStart.rst index 35f709ee90b..111918a8aa8 100644 --- a/src/docs/sphinx/QuickStart.rst +++ b/src/docs/sphinx/QuickStart.rst @@ -89,7 +89,7 @@ The main repository of interest is obviously GEOS itself: `GEOS `_ or the streamlined CMake-based foundation `BTL `_ or the streamlined CMake-based foundation `BLT `_ . These packages are handled as `Git Submodules `_, which provides a transparent way of coordinating multiple code development projects. Most users will never have to worry that these modules are in fact separate projects from GEOS. diff --git a/src/docs/sphinx/advancedExamples/validationStudies/carbonStorage/isothermalHystInjection/Example.rst b/src/docs/sphinx/advancedExamples/validationStudies/carbonStorage/isothermalHystInjection/Example.rst index bc15312c9fe..03796a32495 100644 --- a/src/docs/sphinx/advancedExamples/validationStudies/carbonStorage/isothermalHystInjection/Example.rst +++ b/src/docs/sphinx/advancedExamples/validationStudies/carbonStorage/isothermalHystInjection/Example.rst @@ -37,7 +37,7 @@ presented in `(Class et al., 2009) `__. The setup is illustrated in the figure below. -The mesh can be found in `GEOSXDATA `__ and +The mesh can be found in `GEOSDATA `__ and was provided for the benchmark. It discretizes the widely-used `Johansen` reservoir, which consists in a tilted reservoir with a main fault. The model domain has the following dimensions: 9600 x 8900 x [90-140] m. diff --git a/src/docs/sphinx/advancedExamples/validationStudies/faultMechanics/sneddon/Example.rst b/src/docs/sphinx/advancedExamples/validationStudies/faultMechanics/sneddon/Example.rst index 9d2515ba0e5..6e555c74745 100644 --- a/src/docs/sphinx/advancedExamples/validationStudies/faultMechanics/sneddon/Example.rst +++ b/src/docs/sphinx/advancedExamples/validationStudies/faultMechanics/sneddon/Example.rst @@ -84,7 +84,7 @@ verbosity levels, target regions, and other solver-specific attributes. Additionally, we need to specify another solver of type, ``EmbeddedSurfaceGenerator``, which is used to discretize the fracture planes. -.. literalinclude:: ../../../../../../../inputFiles/efemFractureMechanics/Sneddon_embeddedFrac_base.xml +.. literalinclude:: ../../../../../../../inputFiles/efemFractureMechanics/Sneddon_embeddedFrac_verification.xml :language: xml :start-after: :end-before: @@ -234,7 +234,7 @@ In this example, a task is specified to output fracture aperture (normal opening - The test case with EmbeddedFractures solver: -.. literalinclude:: ../../../../../../../inputFiles/efemFractureMechanics/Sneddon_embeddedFrac_verification.xml +.. literalinclude:: ../../../../../../../inputFiles/efemFractureMechanics/Sneddon_embeddedFrac_base.xml :language: xml :start-after: :end-before: diff --git a/src/docs/sphinx/advancedExamples/validationStudies/faultMechanics/sneddon/displacementJump_contactMechanics.hdf5 b/src/docs/sphinx/advancedExamples/validationStudies/faultMechanics/sneddon/displacementJump_contactMechanics.hdf5 index 1646430aa8b..5a5aa97bf3c 100644 Binary files a/src/docs/sphinx/advancedExamples/validationStudies/faultMechanics/sneddon/displacementJump_contactMechanics.hdf5 and b/src/docs/sphinx/advancedExamples/validationStudies/faultMechanics/sneddon/displacementJump_contactMechanics.hdf5 differ diff --git a/src/docs/sphinx/advancedExamples/validationStudies/faultMechanics/sneddon/displacementJump_embeddedFrac.hdf5 b/src/docs/sphinx/advancedExamples/validationStudies/faultMechanics/sneddon/displacementJump_embeddedFrac.hdf5 index dabc71790ba..6ca11781b7f 100644 Binary files a/src/docs/sphinx/advancedExamples/validationStudies/faultMechanics/sneddon/displacementJump_embeddedFrac.hdf5 and b/src/docs/sphinx/advancedExamples/validationStudies/faultMechanics/sneddon/displacementJump_embeddedFrac.hdf5 differ diff --git a/src/docs/sphinx/advancedExamples/validationStudies/faultMechanics/sneddon/displacementJump_hydroFrac.hdf5 b/src/docs/sphinx/advancedExamples/validationStudies/faultMechanics/sneddon/displacementJump_hydroFrac.hdf5 index ae790cd8e0d..37424fc7b84 100644 Binary files a/src/docs/sphinx/advancedExamples/validationStudies/faultMechanics/sneddon/displacementJump_hydroFrac.hdf5 and b/src/docs/sphinx/advancedExamples/validationStudies/faultMechanics/sneddon/displacementJump_hydroFrac.hdf5 differ diff --git a/src/docs/sphinx/advancedExamples/validationStudies/faultMechanics/sneddon/sneddonFigure.py b/src/docs/sphinx/advancedExamples/validationStudies/faultMechanics/sneddon/sneddonFigure.py index c16c015da0c..5d3db7e4687 100644 --- a/src/docs/sphinx/advancedExamples/validationStudies/faultMechanics/sneddon/sneddonFigure.py +++ b/src/docs/sphinx/advancedExamples/validationStudies/faultMechanics/sneddon/sneddonFigure.py @@ -96,13 +96,13 @@ def main(): loc_HydroFrac = x[0, :, 1] #-------- Extract info from XML - xmlFilePath = "../../../../../../../inputFiles/efemFractureMechanics/Sneddon_embeddedFrac_base.xml" + xmlFilePath = "../../../../../../../inputFiles/efemFractureMechanics/Sneddon_embeddedFrac" - mechanicalParameters = getMechanicalParametersFromXML(xmlFilePath) - appliedPressure = getFracturePressureFromXML(xmlFilePath) + mechanicalParameters = getMechanicalParametersFromXML(xmlFilePath+"_base.xml") + appliedPressure = getFracturePressureFromXML(xmlFilePath+"_base.xml") # Get length of the fracture - length, origin = getFractureLengthFromXML(xmlFilePath) + length, origin = getFractureLengthFromXML(xmlFilePath+"_verification.xml") # Initialize Sneddon's analytical solution sneddonAnalyticalSolution = Sneddon(mechanicalParameters, length, appliedPressure) diff --git a/src/docs/sphinx/advancedExamples/validationStudies/hydraulicFracture/kgdToughnessDominated/Example.rst b/src/docs/sphinx/advancedExamples/validationStudies/hydraulicFracture/kgdToughnessDominated/Example.rst index 4081cb5f501..ff659fda37c 100644 --- a/src/docs/sphinx/advancedExamples/validationStudies/hydraulicFracture/kgdToughnessDominated/Example.rst +++ b/src/docs/sphinx/advancedExamples/validationStudies/hydraulicFracture/kgdToughnessDominated/Example.rst @@ -236,7 +236,7 @@ the HDF5 output is postprocessed and temporal evolution of fracture characterisc 8 2.446e+05 0.0001277 4.5 10 2.411e+05 0.0001409 5 -Note: GEOS python tools ``geosx_xml_tools`` should be installed to run the query script (See :ref:`PythonToolsSetup` for details). +Note: GEOS python tools ``geosx_xml_tools`` should be installed to run the query script (See `Python Tools Setup `_ for details). A good agreement between GEOS results and analytical solutions is shown in the comparison below, which is generated using the visualization script: diff --git a/src/docs/sphinx/advancedExamples/validationStudies/hydraulicFracture/kgdValidation/Example.rst b/src/docs/sphinx/advancedExamples/validationStudies/hydraulicFracture/kgdValidation/Example.rst index 8a4fdf8cfa0..ec4183a473c 100644 --- a/src/docs/sphinx/advancedExamples/validationStudies/hydraulicFracture/kgdValidation/Example.rst +++ b/src/docs/sphinx/advancedExamples/validationStudies/hydraulicFracture/kgdValidation/Example.rst @@ -245,7 +245,7 @@ By running the query script ``kgdValidationQueries.py``, the HDF5 output is post 0.4 1.183e+07 0 0 0 0.0005662 0.5 1.125e+07 0 0 0 0.0005662 -Note: GEOS python tools ``geosx_xml_tools`` should be installed to run the query script (See :ref:`PythonToolsSetup` for details). +Note: GEOS python tools ``geosx_xml_tools`` should be installed to run the query script (See `Python Tools Setup `_ for details). The figure below shows simulation results of the fracture extent at the end of the injection, which is generated using the visualization script ``kgdValidationFigure.py``. The temporal evolution of the fracture characteristics (length, aperture and pressure) from the GEOS simulation are extracted and compared with the experimental data gathered at specific locations. As observed, the time history plots of the modelling predictions (green curves) for the pressure at three gage locations, the fracture length, and the fracture aperture at LVDT location correlate well with the experimental data (blue circles). diff --git a/src/docs/sphinx/advancedExamples/validationStudies/hydraulicFracture/kgdViscosityDominated/Example.rst b/src/docs/sphinx/advancedExamples/validationStudies/hydraulicFracture/kgdViscosityDominated/Example.rst index 34dee2860d6..0d4f26b0df6 100644 --- a/src/docs/sphinx/advancedExamples/validationStudies/hydraulicFracture/kgdViscosityDominated/Example.rst +++ b/src/docs/sphinx/advancedExamples/validationStudies/hydraulicFracture/kgdViscosityDominated/Example.rst @@ -89,7 +89,7 @@ the HDF5 output is postprocessed and temporal evolution of fracture characterisc 8 7.28e+05 0.000209 3 10 6.512e+05 0.000222 3.5 -Note: GEOS python tools ``geosx_xml_tools`` should be installed to run the query script (See :ref:`PythonToolsSetup` for details). +Note: GEOS python tools ``geosx_xml_tools`` should be installed to run the query script (See `Python Tools Setup `_ for details). A good agreement between GEOS results and analytical solutions is shown in the comparison below, which is generated using the visualization script: diff --git a/src/docs/sphinx/advancedExamples/validationStudies/hydraulicFracture/pennyFracToughnessDominated/Example.rst b/src/docs/sphinx/advancedExamples/validationStudies/hydraulicFracture/pennyFracToughnessDominated/Example.rst index 89bf205a6bc..b453eaee88d 100644 --- a/src/docs/sphinx/advancedExamples/validationStudies/hydraulicFracture/pennyFracToughnessDominated/Example.rst +++ b/src/docs/sphinx/advancedExamples/validationStudies/hydraulicFracture/pennyFracToughnessDominated/Example.rst @@ -258,7 +258,7 @@ the HDF5 output is postprocessed and temporal evolution of fracture characterisc 8 6.07e+05 0.0006163 13.73 10 6.32e+05 0.0006827 14.45 -Note: GEOS python tools ``geosx_xml_tools`` should be installed to run the query script (See :ref:`PythonToolsSetup` for details). +Note: GEOS python tools ``geosx_xml_tools`` should be installed to run the query script (See `Python Tools Setup `_ for details). Next, the figure below compares the asymptotic solutions (curves) and the GEOS simulation results (markers) for this analysis, which is generated using the visualization script: diff --git a/src/docs/sphinx/advancedExamples/validationStudies/hydraulicFracture/pennyFracViscosityDominated/Example.rst b/src/docs/sphinx/advancedExamples/validationStudies/hydraulicFracture/pennyFracViscosityDominated/Example.rst index 1b394c51d7c..7f0bbfc9b49 100644 --- a/src/docs/sphinx/advancedExamples/validationStudies/hydraulicFracture/pennyFracViscosityDominated/Example.rst +++ b/src/docs/sphinx/advancedExamples/validationStudies/hydraulicFracture/pennyFracViscosityDominated/Example.rst @@ -263,7 +263,7 @@ the HDF5 output is postprocessed and temporal evolution of fracture characterisc 8 1.005e+06 0.0007918 13.73 10 9.482e+05 0.0008189 15.14 -Note: GEOS python tools ``geosx_xml_tools`` should be installed to run the query script (See :ref:`PythonToolsSetup` for details). +Note: GEOS python tools ``geosx_xml_tools`` should be installed to run the query script (See `Python Tools Setup `_ for details). Next, GEOS simulation results (markers) and asymptotic solutions (curves) for the case with viscosity-storage dominated assumptions are plotted together in the following figure, which is generated using the visualization script: diff --git a/src/docs/sphinx/advancedExamples/validationStudies/hydraulicFracture/pknFracViscosityDominated/Example.rst b/src/docs/sphinx/advancedExamples/validationStudies/hydraulicFracture/pknFracViscosityDominated/Example.rst index b01f651a189..8f4b2b2ef1a 100644 --- a/src/docs/sphinx/advancedExamples/validationStudies/hydraulicFracture/pknFracViscosityDominated/Example.rst +++ b/src/docs/sphinx/advancedExamples/validationStudies/hydraulicFracture/pknFracViscosityDominated/Example.rst @@ -253,7 +253,7 @@ the HDF5 output is postprocessed and temporal evolution of fracture characterisc 8 1.044e+06 0.0008482 12.8 10 1.047e+06 0.0009098 14.8 -Note: GEOS python tools ``geosx_xml_tools`` should be installed to run the query script (See :ref:`PythonToolsSetup` for details). +Note: GEOS python tools ``geosx_xml_tools`` should be installed to run the query script (See `Python Tools Setup `_ for details). Next, figure below shows the comparisons between the results from GEOS simulations (markers) and the corresponding analytical solutions (curves) for the example with viscosity-storage dominated assumptions, which is generated using the visualization script: diff --git a/src/docs/sphinx/advancedExamples/validationStudies/poromechanics/mandel/Example.rst b/src/docs/sphinx/advancedExamples/validationStudies/poromechanics/mandel/Example.rst index 313f9944f5a..ea1107077f7 100644 --- a/src/docs/sphinx/advancedExamples/validationStudies/poromechanics/mandel/Example.rst +++ b/src/docs/sphinx/advancedExamples/validationStudies/poromechanics/mandel/Example.rst @@ -265,7 +265,7 @@ The next figure shows the distribution of vertical displacement (:math:`u_z(x,z, The figure below compares the results from GEOS (marks) and the corresponding analytical solution (lines) for the pore pressure along the x-direction and vertical displacement along the z-direction. GEOS reliably captures the short-term Mandel-Cryer effect and shows excellent agreement with the analytical solution at various times. -.. plot:: docs/sphinx/advancedExamples/validationStudies/faultMechanics/mandel/mandelFigure.py +.. plot:: docs/sphinx/advancedExamples/validationStudies/poromechanics/mandel/mandelFigure.py diff --git a/src/docs/sphinx/advancedExamples/validationStudies/poromechanics/mandel/Mandel_Verification.png b/src/docs/sphinx/advancedExamples/validationStudies/poromechanics/mandel/Mandel_Verification.png deleted file mode 100644 index d9ffdb71df3..00000000000 Binary files a/src/docs/sphinx/advancedExamples/validationStudies/poromechanics/mandel/Mandel_Verification.png and /dev/null differ diff --git a/src/docs/sphinx/advancedExamples/validationStudies/wellboreProblems/deviatedPoroElasticWellbore/Example2.rst b/src/docs/sphinx/advancedExamples/validationStudies/wellboreProblems/deviatedPoroElasticWellbore/Example2.rst index 0e72064ba11..00bd5351de7 100644 --- a/src/docs/sphinx/advancedExamples/validationStudies/wellboreProblems/deviatedPoroElasticWellbore/Example2.rst +++ b/src/docs/sphinx/advancedExamples/validationStudies/wellboreProblems/deviatedPoroElasticWellbore/Example2.rst @@ -10,7 +10,7 @@ Problem description ------------------------------------------------------------------ -This example deals with the problem of drilling a deviated poro-elastic wellbore. This is an extension of the poroelastic wellbore example :ref:`AdvancedExampleDeviatedPoroElasticWellbore` with the consideration of in-situ stresses and in-situ pore pressure. Both pore pressure and mud pressure are supposed to be nil at the borehole wall following the consideration of `(Abousleiman and Cui, 1998) `__. Also, the in-situ horizontal stresses are anisotropic, i.e. :math:`\sigma_hmax` > :math:`\sigma_hmin`. The wellbore trajectory is deviated from the directions of the in-situ stresses. Analytical solutions of the pore pressure, the radial and hoop stresses in the near wellbore region are given by `(Abousleiman and Cui, 1998) `__. They are hereby used to verify the modeling predictions. +This example deals with the problem of drilling a deviated poro-elastic wellbore. This is an extension of the poroelastic wellbore example :ref:`AdvancedExampleDeviatedPoroElasticWellbore` with the consideration of in-situ stresses and in-situ pore pressure. Both pore pressure and mud pressure are supposed to be nil at the borehole wall following the consideration of `(Abousleiman and Cui, 1998) `__. Also, the in-situ horizontal stresses are anisotropic, i.e. :math:`\sigma_{hmax}` > :math:`\sigma_{hmin}`. The wellbore trajectory is deviated from the directions of the in-situ stresses. Analytical solutions of the pore pressure, the radial and hoop stresses in the near wellbore region are given by `(Abousleiman and Cui, 1998) `__. They are hereby used to verify the modeling predictions. **Input file** diff --git a/src/docs/sphinx/advancedExamples/validationStudies/wellboreProblems/pureThermalDiffusion/Example.rst b/src/docs/sphinx/advancedExamples/validationStudies/wellboreProblems/pureThermalDiffusion/Example.rst index f99d3d2cb21..84752f872ea 100644 --- a/src/docs/sphinx/advancedExamples/validationStudies/wellboreProblems/pureThermalDiffusion/Example.rst +++ b/src/docs/sphinx/advancedExamples/validationStudies/wellboreProblems/pureThermalDiffusion/Example.rst @@ -102,13 +102,6 @@ A good agreement between the GEOS results and analytical results is shown in the .. plot:: docs/sphinx/advancedExamples/validationStudies/wellboreProblems/pureThermalDiffusion/pureThermalDiffusionAroundWellbore.py -.. _resultsThermalDiffusionWellboreFig: -.. figure:: radialThermalDiffusionResults.png - :align: center - :width: 500 - :figclass: align-center - - Radial thermal diffusion around a wellbore: a validation against analytical results ------------------------------------------------------------------ To go further diff --git a/src/docs/sphinx/advancedExamples/validationStudies/wellboreProblems/pureThermalDiffusion/radialThermalDiffusionResults.png b/src/docs/sphinx/advancedExamples/validationStudies/wellboreProblems/pureThermalDiffusion/radialThermalDiffusionResults.png deleted file mode 100644 index c6226e33579..00000000000 Binary files a/src/docs/sphinx/advancedExamples/validationStudies/wellboreProblems/pureThermalDiffusion/radialThermalDiffusionResults.png and /dev/null differ diff --git a/src/docs/sphinx/advancedExamples/validationStudies/wellboreProblems/thermoPoroElasticWellbore/analyticalResults.py b/src/docs/sphinx/advancedExamples/validationStudies/wellboreProblems/thermoPoroElasticWellbore/analyticalResults.py index bab0ff411e9..c4ae5d7c182 100644 --- a/src/docs/sphinx/advancedExamples/validationStudies/wellboreProblems/thermoPoroElasticWellbore/analyticalResults.py +++ b/src/docs/sphinx/advancedExamples/validationStudies/wellboreProblems/thermoPoroElasticWellbore/analyticalResults.py @@ -32,7 +32,7 @@ def getDataFromXML(xmlFilePathPrefix): drainedBulkModulusRock = float( tree.find('Constitutive/ElasticIsotropic').get('defaultBulkModulus') ) defaultShearModulus = float( tree.find('Constitutive/ElasticIsotropic').get('defaultShearModulus') ) - defaultThermalExpansionCoefficient = float( tree.find('Constitutive/ElasticIsotropic').get('defaultThermalExpansionCoefficient') ) + defaultDrainedLinearTEC = float( tree.find('Constitutive/ElasticIsotropic').get('defaultDrainedLinearTEC') ) defaultReferencePorosity = float( tree.find('Constitutive/BiotPorosity').get('defaultReferencePorosity') ) grainBulkModulus = float( tree.find('Constitutive/BiotPorosity').get('grainBulkModulus') ) fluidCompressibility = float( tree.find('Constitutive/ThermalCompressibleSinglePhaseFluid').get('compressibility') ) @@ -42,7 +42,7 @@ def getDataFromXML(xmlFilePathPrefix): volumetricHeatCapacity = float( tree.find('Constitutive/SolidInternalEnergy').get('volumetricHeatCapacity') ) permeability = float( extractDataFromXMLList( tree.find('Constitutive/ConstantPermeability').get('permeabilityComponents') )[0] ) - return [ri, Ti, drainedBulkModulusRock, defaultShearModulus, defaultThermalExpansionCoefficient, defaultReferencePorosity, grainBulkModulus, fluidCompressibility, fluidViscosity, fluidThermalExpansionCoefficient, permeability, thermalConductivity, volumetricHeatCapacity] + return [ri, Ti, drainedBulkModulusRock, defaultShearModulus, defaultDrainedLinearTEC, defaultReferencePorosity, grainBulkModulus, fluidCompressibility, fluidViscosity, fluidThermalExpansionCoefficient, permeability, thermalConductivity, volumetricHeatCapacity] def analyticalResults(t): xmlFilePathPrefix = "../../../../../../../inputFiles/wellbore/ThermoPoroElasticWellbore" @@ -85,7 +85,7 @@ def analyticalResults(t): Ku = K + M*alpha*alpha S = (3.0*Ku + 4.0*G) /M /(3.0*K+4.0*G) - beta_s = beta_d + beta_s = beta_d # TODO: update for the case porosityTEC != drainedLinearTEC beta_v = porosity*(beta_f - beta_s) beta_e = beta_d*alpha + beta_v diff --git a/src/docs/sphinx/basicExamples/multiphaseFlowWithWells/Example.rst b/src/docs/sphinx/basicExamples/multiphaseFlowWithWells/Example.rst index f0fdd28eece..0c1d7be2a45 100644 --- a/src/docs/sphinx/basicExamples/multiphaseFlowWithWells/Example.rst +++ b/src/docs/sphinx/basicExamples/multiphaseFlowWithWells/Example.rst @@ -29,12 +29,12 @@ This example is based on the XML file located at ../../../../../inputFiles/compositionalMultiphaseWell/benchmarks/Egg/deadOilEgg_benchmark.xml -The mesh file corresponding to the Egg model is stored in the GEOSXDATA repository. -Therefore, you must first download the GEOSXDATA repository in the same folder +The mesh file corresponding to the Egg model is stored in the GEOSDATA repository. +Therefore, you must first download the GEOSDATA repository in the same folder as the GEOS repository to run this test case. .. note:: - `GEOSXDATA `_ is a separate repository in which we store large mesh files in order to keep the main GEOS repository lightweight. + `GEOSDATA `_ is a separate repository in which we store large mesh files in order to keep the main GEOS repository lightweight. The XML file considered here follows the typical structure of the GEOS input files: diff --git a/src/docs/sphinx/buildGuide/BuildProcess.rst b/src/docs/sphinx/buildGuide/BuildProcess.rst index e01da32d298..37a5d66d9d3 100644 --- a/src/docs/sphinx/buildGuide/BuildProcess.rst +++ b/src/docs/sphinx/buildGuide/BuildProcess.rst @@ -73,8 +73,8 @@ Option Default Explanation ``ENABLE_WARNINGS_AS_ERRORS`` ``ON`` Treat all warnings as errors ``ENABLE_PVTPackage`` ``ON`` Enable PVTPackage library (required for compositional flow runs) ``ENABLE_TOTALVIEW_OUTPUT`` ``OFF`` Enables TotalView debugger custom view of GEOS data structures +``ENABLE_COV`` ``OFF`` Enables code coverage ``GEOS_ENABLE_TESTS`` ``ON`` Enables unit testing targets -``GEOSX_ENABLE_FPE`` ``ON`` Enable floating point exception trapping ``GEOSX_LA_INTERFACE`` ``Hypre`` Choiсe of Linear Algebra backend (Hypre/Petsc/Trilinos) ``GEOSX_BUILD_OBJ_LIBS`` ``ON`` Use CMake Object Libraries build ``GEOSX_BUILD_SHARED_LIBS`` ``OFF`` Build ``geosx_core`` as a shared library instead of static diff --git a/src/docs/sphinx/pythonTools/geosx_mesh_tools.rst b/src/docs/sphinx/pythonTools/geosx_mesh_tools.rst deleted file mode 100644 index efb90d4c185..00000000000 --- a/src/docs/sphinx/pythonTools/geosx_mesh_tools.rst +++ /dev/null @@ -1,51 +0,0 @@ - -GEOS Mesh Tools --------------------------- - -The `geosx_mesh_tools` python package includes tools for converting meshes from common formats (abaqus, etc.) to those that can be read by GEOS (gmsh, vtk). -See :ref:`PythonToolsSetup` for details on setup instructions, and :ref:`ExternalMeshUsage` for a detailed description of how to use external meshes in GEOS. -The available console scripts for this package and its API are described below. - - -convert_abaqus -^^^^^^^^^^^^^^ - -Compile an xml file with advanced features into a single file that can be read by GEOS. - -.. argparse:: - :module: geosx_mesh_tools.main - :func: build_abaqus_converter_input_parser - :prog: convert_abaqus - - -.. note:: - For vtk format meshes, the user also needs to determine the region ID numbers and names of nodesets to import into GEOS. - The following shows how these could look in an input XML file for a mesh with three regions (*REGIONA*, *REGIONB*, and *REGIONC*) and six nodesets (*xneg*, *xpos*, *yneg*, *ypos*, *zneg*, and *zpos*): - - -.. code-block:: xml - - - - - - - - - - - - -API -^^^ - -.. automodule:: geosx_mesh_tools.abaqus_converter - :members: diff --git a/src/docs/sphinx/pythonTools/geosx_xml_tools.rst b/src/docs/sphinx/pythonTools/geosx_xml_tools.rst deleted file mode 100644 index 92be5ad6df0..00000000000 --- a/src/docs/sphinx/pythonTools/geosx_xml_tools.rst +++ /dev/null @@ -1,82 +0,0 @@ - -.. _XMLToolsPackage: - -GEOS XML Tools --------------------------- - -The `geosx_xml_tools` python package adds a set of advanced features to the GEOS xml format: units, parameters, and symbolic expressions. -See :ref:`PythonToolsSetup` for details on setup instructions, and :ref:`AdvancedXMLFeatures` for a detailed description of the input format. -The available console scripts for this package and its API are described below. - - -convert_abaqus -^^^^^^^^^^^^^^ - -Convert an abaqus format mesh file to gmsh or vtk format. - -.. argparse:: - :module: geosx_xml_tools.command_line_parsers - :func: build_preprocessor_input_parser - :prog: preprocess_xml - - -format_xml -^^^^^^^^^^^^^^ - -Formats an xml file. - -.. argparse:: - :module: geosx_xml_tools.command_line_parsers - :func: build_xml_formatter_input_parser - :prog: format_xml - - -check_xml_attribute_coverage -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Checks xml attribute coverage for files in the GEOS repository. - -.. argparse:: - :module: geosx_xml_tools.command_line_parsers - :func: build_attribute_coverage_input_parser - :prog: check_xml_attribute_coverage - - -check_xml_redundancy -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Checks for redundant attribute definitions in an xml file, such as those that duplicate the default value. - -.. argparse:: - :module: geosx_xml_tools.command_line_parsers - :func: build_xml_redundancy_input_parser - :prog: check_xml_redundancy - - -API -^^^ - -.. automodule:: geosx_xml_tools.main - :members: - -.. automodule:: geosx_xml_tools.xml_processor - :members: - -.. automodule:: geosx_xml_tools.xml_formatter - :members: - -.. automodule:: geosx_xml_tools.unit_manager - :members: - -.. automodule:: geosx_xml_tools.regex_tools - :members: - -.. automodule:: geosx_xml_tools.xml_redundancy_check - :members: - -.. automodule:: geosx_xml_tools.attribute_coverage - :members: - -.. automodule:: geosx_xml_tools.table_generator - :members: - diff --git a/src/docs/sphinx/pythonTools/hdf5_wrapper.rst b/src/docs/sphinx/pythonTools/hdf5_wrapper.rst deleted file mode 100644 index 01f1ad9fe4f..00000000000 --- a/src/docs/sphinx/pythonTools/hdf5_wrapper.rst +++ /dev/null @@ -1,60 +0,0 @@ - -HDF5 Wrapper --------------------------- - -The `hdf5_wrapper` python package adds a wrapper to `h5py` that greatly simplifies reading/writing to/from hdf5-format files. - - -Usage -^^^^^^^ - -Once loaded, the contents of a file can be navigated in the same way as a native python dictionary. - -.. code-block:: python - - import hdf5_wrapper - - data = hdf5_wrapper.hdf5_wrapper('data.hdf5') - - test = data['test'] - for k, v in data.items(): - print('key: %s, value: %s' % (k, str(v))) - - -If the user indicates that a file should be opened in write-mode (`w`) or read/write-mode (`a`), then the file can be created or modified. -Note: for these changes to be written to the disk, the wrapper may need to be closed or deleted. - -.. code-block:: python - - import hdf5_wrapper - import numpy as np - - data = hdf5_wrapper.hdf5_wrapper('data.hdf5', mode='w') - data['string'] = 'string' - data['integer'] = 123 - data['array'] = np.random.randn(3, 4, 5) - data['child'] = {'float': 1.234} - - -Existing dictionaries can be placed on the current level: - -.. code-block:: python - - existing_dict = {'some': 'value'} - data.insert(existing_dict) - - -And external hdf5 format files can be linked together: - -.. code-block:: python - - for k in ['child_a', 'child_b']: - data.link(k, '%s.hdf5' % (k)) - - - -API -^^^^^ - -.. automodule:: hdf5_wrapper.wrapper - :members: diff --git a/src/docs/sphinx/pythonTools/mesh_doctor.rst b/src/docs/sphinx/pythonTools/mesh_doctor.rst deleted file mode 100644 index d9de402e15f..00000000000 --- a/src/docs/sphinx/pythonTools/mesh_doctor.rst +++ /dev/null @@ -1,124 +0,0 @@ -``mesh_doctor`` ---------------- - -``mesh_doctor`` is a ``python`` executable that can be used through the command line to perform various checks, validations, and tiny fixes to the ``vtk`` mesh that are meant to be used in ``geos``. -``mesh_doctor`` is organized as a collection of modules with their dedicated sets of options. -The current page will introduce those modules, but the details and all the arguments can be retrieved by using the ``--help`` option for each module. - -Modules -^^^^^^^ - -To list all the modules available through ``mesh_doctor``, you can simply use the ``--help`` option, which will list all available modules as well as a quick summary. - -.. command-output:: python mesh_doctor.py --help - :cwd: ../../../coreComponents/python/modules/geosx_mesh_doctor - -Then, if you are interested in a specific module, you can ask for its documentation using the ``mesh_doctor module_name --help`` pattern. -For example - -.. command-output:: python mesh_doctor.py collocated_nodes --help - :cwd: ../../../coreComponents/python/modules/geosx_mesh_doctor - -``mesh_doctor`` loads its module dynamically. -If a module can't be loaded, ``mesh_doctor`` will proceed and try to load other modules. -If you see a message like - -.. code-block:: bash - - [1970-04-14 03:07:15,625][WARNING] Could not load module "collocated_nodes": No module named 'vtkmodules' - -then most likely ``mesh_doctor`` could not load the ``collocated_nodes`` module, because the ``vtk`` python package was not found. -Thereafter, the documentation for module ``collocated_nodes`` will not be displayed. -You can solve this issue by installing the dependencies of ``mesh_doctor`` defined in its ``requirements.txt`` file (``python -m pip install -r requirements.txt``). - -Here is a list and brief description of all the modules available. - -``collocated_nodes`` -"""""""""""""""""""" - -Displays the neighboring nodes that are closer to each other than a prescribed threshold. -It is not uncommon to define multiple nodes for the exact same position, which will typically be an issue for ``geos`` and should be fixed. - -.. command-output:: python mesh_doctor.py collocated_nodes --help - :cwd: ../../../coreComponents/python/modules/geosx_mesh_doctor - -``element_volumes`` -""""""""""""""""""" - -Computes the volumes of all the cells and displays the ones that are below a prescribed threshold. -Cells with negative volumes will typically be an issue for ``geos`` and should be fixed. - -.. command-output:: python mesh_doctor.py element_volumes --help - :cwd: ../../../coreComponents/python/modules/geosx_mesh_doctor - -``fix_elements_orderings`` -"""""""""""""""""""""""""" - -It sometimes happens that an exported mesh does not abide by the ``vtk`` orderings. -The ``fix_elements_orderings`` module can rearrange the nodes of given types of elements. -This can be convenient if you cannot regenerate the mesh. - -.. command-output:: python mesh_doctor.py fix_elements_orderings --help - :cwd: ../../../coreComponents/python/modules/geosx_mesh_doctor - -``generate_cube`` -""""""""""""""""" - -This module conveniently generates cubic meshes in ``vtk``. -It can also generate fields with simple values. -This tool can also be useful to generate a trial mesh that will later be refined or customized. - -.. command-output:: python mesh_doctor.py generate_cube --help - :cwd: ../../../coreComponents/python/modules/geosx_mesh_doctor - -``generate_fractures`` -"""""""""""""""""""""" - -For a conformal fracture to be defined in a mesh, ``geos`` requires the mesh to be split at the faces where the fracture gets across the mesh. -The ``generate_fractures`` module will split the mesh and generate the multi-block ``vtk`` files. - -.. command-output:: python mesh_doctor.py generate_fractures --help - :cwd: ../../../coreComponents/python/modules/geosx_mesh_doctor - -``generate_global_ids`` -""""""""""""""""""""""" - -When running ``geos`` in parallel, `global ids` can be used to refer to data across multiple ranks. -The ``generate_global_ids`` can generate `global ids` for the imported ``vtk`` mesh. - -.. command-output:: python mesh_doctor.py generate_global_ids --help - :cwd: ../../../coreComponents/python/modules/geosx_mesh_doctor - -``non_conformal`` -""""""""""""""""" - -This module will detect elements which are close enough (there's a user defined threshold) but which are not in front of each other (another threshold can be defined). -`Close enough` can be defined in terms or proximity of the nodes and faces of the elements. -The angle between two faces can also be precribed. -This module can be a bit time consuming. - -.. command-output:: python mesh_doctor.py non_conformal --help - :cwd: ../../../coreComponents/python/modules/geosx_mesh_doctor - -``self_intersecting_elements`` -"""""""""""""""""""""""""""""" - -Some meshes can have cells that auto-intersect. -This module will display the elements that have faces intersecting. - -.. command-output:: python mesh_doctor.py self_intersecting_elements --help - :cwd: ../../../coreComponents/python/modules/geosx_mesh_doctor - -``supported_elements`` -"""""""""""""""""""""" - -``geos`` supports a specific set of elements. -Let's cite the standard elements like `tetrahedra`, `wedges`, `pyramids` or `hexahedra`. -But also prismes up to 11 faces. -``geos`` also supports the generic ``VTK_POLYHEDRON``/``42`` elements, which are converted on the fly into one of the elements just described. - -The ``supported_elements`` check will validate that no unsupported element is included in the input mesh. -It will also verify that the ``VTK_POLYHEDRON`` cells can effectively get converted into a supported type of element. - -.. command-output:: python mesh_doctor.py supported_elements --help - :cwd: ../../../coreComponents/python/modules/geosx_mesh_doctor diff --git a/src/docs/sphinx/pythonTools/pygeosx_tools.rst b/src/docs/sphinx/pythonTools/pygeosx_tools.rst deleted file mode 100644 index 9da66b74361..00000000000 --- a/src/docs/sphinx/pythonTools/pygeosx_tools.rst +++ /dev/null @@ -1,24 +0,0 @@ - -PyGEOSX Tools --------------------------- - -The `pygeosx_tools` python package adds a variety of tools for working with pygeosx objects. -These include common operations such as setting the value of geosx wrappers with python functions, parallel communication, and file IO. -Examples using these tools can be found here: :ref:`pygeosxExamples`. - - -API -^^^^^ - -.. automodule:: pygeosx_tools.wrapper - :members: - -.. automodule:: pygeosx_tools.file_io - :members: - -.. automodule:: pygeosx_tools.mesh_interpolation - :members: - -.. automodule:: pygeosx_tools.well_log - :members: - diff --git a/src/docs/sphinx/pythonTools/pythonAPI.rst b/src/docs/sphinx/pythonTools/pythonAPI.rst deleted file mode 100644 index b5300e8f607..00000000000 --- a/src/docs/sphinx/pythonTools/pythonAPI.rst +++ /dev/null @@ -1,54 +0,0 @@ - -Python Tools -========================== - - -.. _PythonToolsSetup: - -Python Tools Setup ---------------------------------- - -The preferred method to setup the GEOSX python tools is to run the following command in the build directory: - -.. code-block:: bash - - make geosx_python_tools - - -This will attempt to install the required packages into the python distribution indicated via the `Python3_EXECUTABLE` cmake variable (also used by pygeosx). - -If the user does not have write access for the target python distribution, the installation will attempt to create a new virtual python environment (Note: this requires that the virtualenv package be installed). -If any package dependencies are missing, then the install script will attempt to fetch them from the internet using pip. -After installation, these packages will be available for import within the associated python distribution, and a set of console scripts will be available within the GEOSX build bin directory. - -Alternatively, these packages can be installed manually into a python environment using pip: - -.. code-block:: bash - - cd GEOSX/src/coreComponents/python/modules/geosx_mesh_tools_package - pip install --upgrade . - - cd ../geosx_xml_tools_package - pip install --upgrade . - - # Etc. - - -Packages ------------------------ - - -.. toctree:: - :maxdepth: 1 - - hdf5_wrapper - - geosx_mesh_tools - - geosx_xml_tools - - pygeosx_tools - - timehistory - - mesh_doctor diff --git a/src/docs/sphinx/pythonTools/timehistory.rst b/src/docs/sphinx/pythonTools/timehistory.rst deleted file mode 100644 index cf3ac46eb8e..00000000000 --- a/src/docs/sphinx/pythonTools/timehistory.rst +++ /dev/null @@ -1,6 +0,0 @@ - -Time History Tools --------------------------- - -.. automodule:: timehistory.plot_time_history - :members: diff --git a/src/docs/sphinx/requirements.txt b/src/docs/sphinx/requirements.txt index 0fb4f2f3c11..7ddae753d66 100644 --- a/src/docs/sphinx/requirements.txt +++ b/src/docs/sphinx/requirements.txt @@ -5,14 +5,9 @@ h5py mpmath docutils>=0.18 pandas -# using plantuml for diagrams Sphinx>=7.0.0 -# pydata-sphinx-theme sphinx_rtd_theme sphinxcontrib-plantuml sphinx-argparse sphinx-design -# Running CLI programs and capture outputs sphinxcontrib-programoutput>=0.17 -# Installing the mesh_doctor requirements to be able to load all the modules and run the help. --r ../../coreComponents/python/modules/geosx_mesh_doctor/requirements.txt \ No newline at end of file diff --git a/src/docs/sphinx/userGuide/Index.rst b/src/docs/sphinx/userGuide/Index.rst index 46a76ccab80..1e7330a3f13 100644 --- a/src/docs/sphinx/userGuide/Index.rst +++ b/src/docs/sphinx/userGuide/Index.rst @@ -1,3 +1,5 @@ +.. _UserGuide: + ############################################################################### User Guide ############################################################################### diff --git a/src/index.rst b/src/index.rst index 6fe0dd5862e..9dedc1f4d75 100644 --- a/src/index.rst +++ b/src/index.rst @@ -94,6 +94,38 @@ you have suggestions for improving the guides below, please post an issue on our To the Advanced Examples + .. grid-item-card:: + + User Guide + ^^^^^^^^^^^^^^^^^^^ + + Detailed instructions on how to construct input files, configure problems, manage outputs, etc. + + +++ + + .. button-ref:: UserGuide + :expand: + :color: info + :click-parent: + + To the User Guide + + .. grid-item-card:: + + Python Tools + ^^^^^^^^^^^^^^^^^^^ + + Documentation for the python packages distributed alongside GEOS used to manage xml files, condition numerical meshes, read outputs, etc. + + +++ + + .. button-link:: https://geosx-geosx.readthedocs-hosted.com/projects/geosx-geospythonpackages/en/latest/ + :expand: + :color: info + :click-parent: + + To the Python Tools Documentation + ******************** Table of Contents @@ -116,8 +148,6 @@ Table of Contents docs/sphinx/Doxygen - docs/sphinx/pythonTools/pythonAPI - docs/sphinx/buildGuide/Index docs/sphinx/CompleteXMLSchema diff --git a/src/main/main.cpp b/src/main/main.cpp index 62c7137e8a8..795228f32e3 100644 --- a/src/main/main.cpp +++ b/src/main/main.cpp @@ -16,6 +16,7 @@ #include "common/DataTypes.hpp" #include "common/Format.hpp" #include "common/TimingMacros.hpp" +#include "common/Units.hpp" #include "mainInterface/initialization.hpp" #include "mainInterface/ProblemManager.hpp" #include "mainInterface/GeosxState.hpp" @@ -60,9 +61,9 @@ int main( int argc, char *argv[] ) std::chrono::system_clock::duration totalTime = endTime - startTime; GEOS_LOG_RANK_0( GEOS_FMT( "Finished at {:%Y-%m-%d %H:%M:%S}", endTime ) ); - GEOS_LOG_RANK_0( GEOS_FMT( "total time {:%H:%M:%S}", totalTime ) ); - GEOS_LOG_RANK_0( GEOS_FMT( "initialization time {:%H:%M:%S}", initTime ) ); - GEOS_LOG_RANK_0( GEOS_FMT( "run time {:%H:%M:%S}", runTime ) ); + GEOS_LOG_RANK_0( GEOS_FMT( "total time {}", units::TimeFormatInfo::fromDuration( totalTime ) ) ); + GEOS_LOG_RANK_0( GEOS_FMT( "initialization time {}", units::TimeFormatInfo::fromDuration( initTime ) ) ); + GEOS_LOG_RANK_0( GEOS_FMT( "run time {}", units::TimeFormatInfo::fromDuration( runTime ) ) ); return 0; }