From 90f5f4982909cc80243ff9a72f4833bdd682fb68 Mon Sep 17 00:00:00 2001 From: FouadAmin Date: Fri, 19 Dec 2025 20:33:43 +0400 Subject: [PATCH 1/2] test --- test.txt | 0 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 test.txt diff --git a/test.txt b/test.txt new file mode 100644 index 00000000000..e69de29bb2d From cbf59f6579ab9044eb9805fb7f17be87fffec8fc Mon Sep 17 00:00:00 2001 From: FouadAmin Date: Sat, 20 Dec 2025 02:41:46 +0400 Subject: [PATCH 2/2] [WIP] initial ifenn integration steps --- .../physicsSolvers/CMakeLists.txt | 4 + .../physicsSolvers/DLPhysicsSolverBase.cpp | 743 +++++++++ .../physicsSolvers/DLPhysicsSolverBase.hpp | 213 +++ .../physicsSolvers/DLSharedMemoryManager.cpp | 63 + .../physicsSolvers/DLSharedMemoryManager.hpp | 132 ++ .../physicsSolvers/fluidFlow/CMakeLists.txt | 6 + .../fluidFlow/DLFlowSolverBase.cpp | 1119 +++++++++++++ .../fluidFlow/DLFlowSolverBase.hpp | 372 +++++ .../fluidFlow/DLSinglePhaseBase.cpp | 1401 +++++++++++++++++ .../fluidFlow/DLSinglePhaseBase.hpp | 424 +++++ .../fluidFlow/DLSinglePhaseFVM.cpp | 973 ++++++++++++ .../fluidFlow/DLSinglePhaseFVM.hpp | 220 +++ .../multiphysics/CMakeLists.txt | 2 + .../multiphysics/IFENNPoromechanics.cpp | 361 +++++ .../multiphysics/IFENNPoromechanics.hpp | 163 ++ 15 files changed, 6196 insertions(+) create mode 100644 src/coreComponents/physicsSolvers/DLPhysicsSolverBase.cpp create mode 100644 src/coreComponents/physicsSolvers/DLPhysicsSolverBase.hpp create mode 100644 src/coreComponents/physicsSolvers/DLSharedMemoryManager.cpp create mode 100644 src/coreComponents/physicsSolvers/DLSharedMemoryManager.hpp create mode 100644 src/coreComponents/physicsSolvers/fluidFlow/DLFlowSolverBase.cpp create mode 100644 src/coreComponents/physicsSolvers/fluidFlow/DLFlowSolverBase.hpp create mode 100644 src/coreComponents/physicsSolvers/fluidFlow/DLSinglePhaseBase.cpp create mode 100644 src/coreComponents/physicsSolvers/fluidFlow/DLSinglePhaseBase.hpp create mode 100644 src/coreComponents/physicsSolvers/fluidFlow/DLSinglePhaseFVM.cpp create mode 100644 src/coreComponents/physicsSolvers/fluidFlow/DLSinglePhaseFVM.hpp create mode 100644 src/coreComponents/physicsSolvers/multiphysics/IFENNPoromechanics.cpp create mode 100644 src/coreComponents/physicsSolvers/multiphysics/IFENNPoromechanics.hpp diff --git a/src/coreComponents/physicsSolvers/CMakeLists.txt b/src/coreComponents/physicsSolvers/CMakeLists.txt index 246fc46be5f..d127817cf6d 100644 --- a/src/coreComponents/physicsSolvers/CMakeLists.txt +++ b/src/coreComponents/physicsSolvers/CMakeLists.txt @@ -28,6 +28,8 @@ set( physicsSolversBase_headers NonlinearSolverParameters.hpp PhysicsSolverBase.hpp PhysicsSolverBaseKernels.hpp + DLPhysicsSolverBase.hpp + DLSharedMemoryManager.hpp SolverStatistics.hpp FieldStatisticsBase.hpp ) # @@ -37,6 +39,8 @@ set( physicsSolversBase_sources LinearSolverParameters.cpp NonlinearSolverParameters.cpp PhysicsSolverBase.cpp + DLPhysicsSolverBase.cpp + DLSharedMemoryManager.cpp SolverStatistics.cpp ) set( dependencyList ${parallelDeps} fileIO discretizationMethods events linearAlgebra ) diff --git a/src/coreComponents/physicsSolvers/DLPhysicsSolverBase.cpp b/src/coreComponents/physicsSolvers/DLPhysicsSolverBase.cpp new file mode 100644 index 00000000000..8e16f38f79c --- /dev/null +++ b/src/coreComponents/physicsSolvers/DLPhysicsSolverBase.cpp @@ -0,0 +1,743 @@ +/* + * ------------------------------------------------------------------------------------------------------------ + * SPDX-License-Identifier: LGPL-2.1-only + * + * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC + * Copyright (c) 2018-2024 TotalEnergies + * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University + * Copyright (c) 2023-2024 Chevron + * Copyright (c) 2019- GEOS/GEOSX Contributors + * All rights reserved + * + * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details. + * ------------------------------------------------------------------------------------------------------------ + */ + +#include "DLPhysicsSolverBase.hpp" +#include "PhysicsSolverManager.hpp" + +#include "common/MpiWrapper.hpp" +#include "codingUtilities/RTTypes.hpp" +#include "common/format/EnumStrings.hpp" +#include "dataRepository/Group.hpp" +#include "physicsSolvers/LogLevelsInfo.hpp" +#include "common/format/LogPart.hpp" +#include "common/TimingMacros.hpp" +#include "linearAlgebra/solvers/KrylovSolver.hpp" +#include "mesh/DomainPartition.hpp" +#include "math/interpolation/Interpolation.hpp" +#include "common/Timer.hpp" +#include "common/Units.hpp" + +#if defined(GEOS_USE_PYGEOSX) +#include "python/PySolverType.hpp" +#endif + +namespace geos +{ + + using namespace dataRepository; + + DLPhysicsSolverBase::DLPhysicsSolverBase(string const &name, + Group *const parent) + : PhysicsSolverBase(name, parent), + m_sharedMemoryManager("DLSharedMemoryManager", parent) + { + } + + DLPhysicsSolverBase::~DLPhysicsSolverBase() = default; + + void DLPhysicsSolverBase::setupSystem( DomainPartition & domain, + DofManager & dofManager, + CRSMatrix< real64, globalIndex > & localMatrix, + ParallelVector & rhs, + ParallelVector & solution, + bool const setSparsity ) + { + GEOS_MARK_FUNCTION; + //TODO: Consider cancelling initalization of unused vectors/matrices in PhysicsSolverBase + PhysicsSolverBase::setupSystem( domain, + dofManager, + localMatrix, + rhs, + solution, + setSparsity ); + + // initialize Data Vectors needed for DL simulations + m_dofXCoords.setName( this->getName() + "/dofXCoords" ); + m_dofXCoords.create( dofManager.numLocalDofs(), MPI_COMM_GEOS ); + + m_dofYCoords.setName( this->getName() + "/dofYCoords" ); + m_dofYCoords.create( dofManager.numLocalDofs(), MPI_COMM_GEOS ); + + m_dofZCoords.setName( this->getName() + "/dofZCoords" ); + m_dofZCoords.create( dofManager.numLocalDofs(), MPI_COMM_GEOS ); + + m_prevSolution.setName( this->getName() + "/prevSolution" ); + m_prevSolution.create( dofManager.numLocalDofs(), MPI_COMM_GEOS ); + + m_strainTrace.setName( this->getName() + "/strainTrace" ); + m_strainTrace.create( dofManager.numLocalDofs(), MPI_COMM_GEOS ); + + } + + real64 DLPhysicsSolverBase::nonlinearImplicitStep(real64 const &time_n, + real64 const &dt, + integer const cycleNumber, + DomainPartition &domain) + { + GEOS_MARK_FUNCTION; + + GEOS_LOG_LEVEL_RANK_0(logInfo::NonlinearSolver, + GEOS_FMT("--CustomLog--DLPhysicsSolverBase::nonlinearImplicitStep time:{} dt:{} cycle:{} solver:{} catalogName:{} ", + time_n, dt, cycleNumber, getName(), getCatalogName())); + + // In a DL approach, there is no forseeable need for a dt cut during the course of this step + real64 stepDt = dt; + + integer const maxNumberDtCuts = m_nonlinearSolverParameters.m_maxTimeStepCuts; + real64 const dtCutFactor = m_nonlinearSolverParameters.m_timeStepCutFactor; + + bool const allowNonConverged = m_nonlinearSolverParameters.m_allowNonConverged > 0; + + integer &dtAttempt = m_nonlinearSolverParameters.m_numTimeStepAttempts; + + integer const &maxConfigurationIter = m_nonlinearSolverParameters.m_maxNumConfigurationAttempts; + + integer &configurationLoopIter = m_nonlinearSolverParameters.m_numConfigurationAttempts; + + bool isConfigurationLoopConverged = false; + + GEOS_LOG_LEVEL_RANK_0(logInfo::NonlinearSolver, + GEOS_FMT("--CustomLog--DLPhysicsSolverBase::nonlinearImplicitStep maxNumberDtCuts:{} dtCutFactor:{} allowNonConverged:{} dtAttempt:{} maxConfigurationIter:{} configurationLoopIter:{} ", + maxNumberDtCuts, dtCutFactor, allowNonConverged, dtAttempt, maxConfigurationIter, configurationLoopIter)); + + // TODO: check if DL methods need/enable a dt cutting strategy + // outer loop attempts to apply full timestep, and managed the cutting of the timestep if + // required. + for( dtAttempt = 0; dtAttempt < maxNumberDtCuts; ++dtAttempt ) + { + // reset the solver state, since we are restarting the time step + if( dtAttempt > 0 ) + { + Timer timer( m_timers.get_inserted( "reset state" ) ); + + resetStateToBeginningOfStep( domain ); + resetConfigurationToBeginningOfStep( domain ); + } + + // it's the simplest configuration that can be attempted whenever Newton's fails as a last resource. + bool attemptedSimplestConfiguration = false; + + // TODO: check if configurationLoop is needed in DL methods, or if a single configuration is sufficient. + // Configuration loop + for( configurationLoopIter = 0; configurationLoopIter < maxConfigurationIter; ++configurationLoopIter ) + { + if( isLogLevelActive< logInfo::NonlinearSolver >( getLogLevel() ) ) + { + outputConfigurationStatistics( domain ); + } + + bool const isNewtonConverged = solveNonlinearSystemUsingDL( time_n, + stepDt, + cycleNumber, + domain ); + + if( isNewtonConverged ) + { + isConfigurationLoopConverged = updateConfiguration( domain, configurationLoopIter ); + + if( isConfigurationLoopConverged ) + { + break; // get out of configuration loop coz everything converged. + } + else + { + // increment the solver statistics for reporting purposes + getIterationStats().incrementConfigIteration(); + GEOS_LOG_LEVEL_RANK_0( logInfo::NonlinearSolver, + "---------- Configuration did not converge. Testing new configuration. ----------" ); + } + } + else if( !attemptedSimplestConfiguration ) + { + resetStateToBeginningOfStep( domain ); + bool const breakLoop = resetConfigurationToDefault( domain ); + attemptedSimplestConfiguration = true; + if( breakLoop ) + { + break; + } + else + { + GEOS_LOG_LEVEL_RANK_0( logInfo::NonlinearSolver, + "---------- Restarting Newton loop using default configuration. ----------" ); + } + } + else + { + break; // get out of configuration loop and cut the time-step if you can. + } + + } // end of configuration loop + + if( isConfigurationLoopConverged ) + { + // get out of outer loop + break; + } + else + { + // TODO: update the logic for dt cutting based on the DL method specifics + // cut timestep, go back to beginning of step and restart the Newton loop + const real64 old_stepDt = stepDt; + stepDt *= dtCutFactor; + m_numTimestepsSinceLastDtCut = 0; + GEOS_LOG_LEVEL_RANK_0(logInfo::TimeStep, GEOS_FMT("\nWarning!\n ----------------------------------- \nTime step cut from {} to {}", old_stepDt, stepDt)); + GEOS_LOG_LEVEL_RANK_0(logInfo::TimeStep, "Ensure that the DL model can handle the new time step appropriately.\n -----------------------------------"); + + // notify the solver statistics counter that this is a time step cut + getIterationStats().updateTimeStepCut(); + getIterationStats().writeIterationStatsToTable(); + } + } // end of outer loop (dt chopping strategy) + + if( !isConfigurationLoopConverged ) + { + GEOS_LOG_RANK_0( "Convergence not achieved." ); + + if( allowNonConverged ) + { + GEOS_LOG_RANK_0( "The accepted solution may be inaccurate." ); + } + else + { + GEOS_ERROR( "Nonconverged solutions not allowed. Terminating..." ); + } + } + + // return the achieved timestep + return stepDt; + } + + bool DLPhysicsSolverBase::solveNonlinearSystemUsingDL(real64 const &time_n, + real64 const &stepDt, + integer const cycleNumber, + DomainPartition &domain) + { + integer const maxNewtonIter = m_nonlinearSolverParameters.m_maxIterNewton; + integer &dtAttempt = m_nonlinearSolverParameters.m_numTimeStepAttempts; + integer &configurationLoopIter = m_nonlinearSolverParameters.m_numConfigurationAttempts; + integer const minNewtonIter = m_nonlinearSolverParameters.m_minIterNewton; + real64 const newtonTol = m_nonlinearSolverParameters.m_newtonTol; + + // keep residual from previous iteration in case we need to do a line search + real64 lastResidual = 1e99; + integer &newtonIter = m_nonlinearSolverParameters.m_numNewtonIterations; + real64 scaleFactor = 1.0; + + bool isNewtonConverged = false; + + GEOS_LOG_LEVEL_RANK_0(logInfo::NonlinearSolver, + GEOS_FMT("--CustomLog--DLPhysicsSolverBase::solveNonlinearSystemUsingDL time:{} dt:{} cycle:{} solver:{} catalogName:{} ", + time_n, stepDt, cycleNumber, getName(), getCatalogName())); + GEOS_LOG_LEVEL_RANK_0(logInfo::NonlinearSolver, + GEOS_FMT("--CustomLog--DLPhysicsSolverBase::solveNonlinearSystemUsingDL maxNewtonIter:{} dtAttempt:{} configurationLoopIter:{} minNewtonIter:{} newtonTol:{} ", + maxNewtonIter, dtAttempt, configurationLoopIter, minNewtonIter, newtonTol)); + + for (newtonIter = 0; newtonIter < maxNewtonIter; ++newtonIter) + { + + GEOS_LOG_LEVEL_RANK_0(logInfo::NonlinearSolver, + GEOS_FMT(" Attempt: {:2}, ConfigurationIter: {:2}, NewtonIter: {:2}", + dtAttempt, configurationLoopIter, newtonIter)); + + // assemble system matrix and rhs + { + Timer timer( m_timers.get_inserted( "assemble" ) ); + + // We sync the nonlinear convergence history. The coupled solver parameters are the one being + // used. We want to propagate the info to subsolvers. It can be important for solvers that + // have special treatment for specific iterations. + synchronizeNonlinearSolverParameters(); + + // zero out matrix/rhs before assembly + m_localMatrix.zero(); + m_rhs.zero(); + + arrayView1d const localRhs = m_rhs.open(); + + // call assemble to fill the matrix and the rhs + // NOTE: in a DL approach, assembleSystem populates the required inputs for the DL model + assembleSystem(time_n, + stepDt, + domain, + m_dofManager, + m_localMatrix.toViewConstSizes(), + localRhs); + + // apply boundary conditions to system + applyBoundaryConditions(time_n, + stepDt, + domain, + m_dofManager, + m_localMatrix.toViewConstSizes(), + localRhs); + + m_rhs.close(); + + if (m_assemblyCallback) + { + // Make a copy of LA objects and ship off to the callback + array1d localRhsCopy(m_rhs.localSize()); + localRhsCopy.setValues>(m_rhs.values()); + m_assemblyCallback(m_localMatrix, std::move(localRhsCopy)); + } + } + + // TODO: Consider deleting (residual norm calculation and checks) + // calculate residual norm + real64 residualNorm = 0; + { + Timer timer( m_timers.get_inserted( "convergence check" ) ); + + // get residual norm + residualNorm = calculateResidualNorm(time_n, stepDt, domain, m_dofManager, m_rhs.values()); + GEOS_LOG_LEVEL_RANK_0( logInfo::ResidualNorm, + GEOS_FMT( " ( R ) = ( {:4.2e} )", residualNorm ) ); + getConvergenceStats().setResidualValue( "R", residualNorm ); + updateAndWriteConvergenceStep( time_n, stepDt, cycleNumber, newtonIter ); + } + + // TODO: Consider deleting (check for Newton convergence) + // // if the residual norm is less than the Newton tolerance we denote that we have + // // converged and break from the Newton loop immediately. + // if (residualNorm < newtonTol && newtonIter >= minNewtonIter) + // { + // isNewtonConverged = true; + // break; + // } + + // TODO: Consider deleting (check for max allowed residual norm) + // // if the residual norm is above the max allowed residual norm, we break from + // // the Newton loop to avoid crashes due to Newton divergence + // if (residualNorm > m_nonlinearSolverParameters.m_maxAllowedResidualNorm) + // { + // string const maxAllowedResidualNormString = NonlinearSolverParameters::viewKeysStruct::maxAllowedResidualNormString(); + // GEOS_LOG_LEVEL_RANK_0(logInfo::Convergence, + // GEOS_FMT(" The residual norm is above the {} of {}. Newton loop terminated.", + // maxAllowedResidualNormString, + // m_nonlinearSolverParameters.m_maxAllowedResidualNorm)); + // isNewtonConverged = false; + // break; + // } + + // TODO: Consider deleting (line search strategy) + // // do line search in case residual has increased + // if (m_nonlinearSolverParameters.m_lineSearchAction != NonlinearSolverParameters::LineSearchAction::None && residualNorm > lastResidual * m_nonlinearSolverParameters.m_lineSearchResidualFactor && newtonIter >= m_nonlinearSolverParameters.m_lineSearchStartingIteration) + // { + // bool lineSearchSuccess = false; + // if (m_nonlinearSolverParameters.m_lineSearchInterpType == NonlinearSolverParameters::LineSearchInterpolationType::Linear) + // { + // residualNorm = lastResidual; + // lineSearchSuccess = lineSearch(time_n, + // stepDt, + // cycleNumber, + // domain, + // m_dofManager, + // m_localMatrix.toViewConstSizes(), + // m_rhs, + // m_solution, + // scaleFactor, + // residualNorm); + // } + // else + // { + // lineSearchSuccess = lineSearchWithParabolicInterpolation(time_n, + // stepDt, + // cycleNumber, + // domain, + // m_dofManager, + // m_localMatrix.toViewConstSizes(), + // m_rhs, + // m_solution, + // scaleFactor, + // lastResidual, + // residualNorm); + // } + + // if (!lineSearchSuccess) + // { + // if (m_nonlinearSolverParameters.m_lineSearchAction == NonlinearSolverParameters::LineSearchAction::Attempt) + // { + // GEOS_LOG_LEVEL_RANK_0(logInfo::LineSearch, + // " Line search failed to produce reduced residual. Accepting iteration."); + // } + // else if (m_nonlinearSolverParameters.m_lineSearchAction == NonlinearSolverParameters::LineSearchAction::Require) + // { + // // if line search failed, then break out of the main Newton loop. Timestep will be cut. + // GEOS_LOG_LEVEL_RANK_0(logInfo::LineSearch, + // " Line search failed to produce reduced residual. Exiting Newton Loop."); + // break; + // } + // } + // } + + // TODO: Applying Linear Solver (to be replaced with DL model calls) + { + Timer timer( m_timers.get_inserted( "linear solver total" ) ); + + // if using adaptive Krylov tolerance scheme, update tolerance. + LinearSolverParameters::Krylov &krylovParams = m_linearSolverParameters.get().krylov; + if (krylovParams.useAdaptiveTol) + { + krylovParams.relTolerance = newtonIter > 0 ? eisenstatWalker(residualNorm, lastResidual, krylovParams) : krylovParams.weakestTol; + } + + { + Timer timer_setup( m_timers.get_inserted( "linear solver create" ) ); + + // Compose parallel LA matrix/rhs out of local LA matrix/rhs + // + m_matrix.create(m_localMatrix.toViewConst(), m_dofManager.numLocalDofs(), MPI_COMM_GEOS); + } + + // Output the linear system matrix/rhs for debugging purposes + debugOutputSystem(time_n, cycleNumber, newtonIter, m_matrix, m_rhs); + + // Solve the linear system + solveLinearSystem(m_dofManager, m_matrix, m_rhs, m_solution); + + { + // TODO: add timer for DL implementation + // Timer timer_destroy(m_timers["DL imlementaiton"]); + // Print m_solution size (how many DOFs are being solved) + GEOS_LOG_LEVEL_RANK_0(logInfo::NonlinearSolver, GEOS_FMT("--CustomLog--DLPhysicsSolverBase::solveNonlinearSystemUsingDL - Solution size: {}", m_solution.globalSize())); + shareDLModelInputs(time_n, stepDt, cycleNumber, domain); + readDLModelOutputs(time_n, stepDt, cycleNumber, domain); + } + + // Increment the solver statistics for reporting purposes + getIterationStats().updateNonlinearIteration( m_linearSolverResult.numIterations ); + + // Output the linear system solution for debugging purposes + debugOutputSolution(time_n, cycleNumber, newtonIter, m_solution); + + // TODO: Consider deleting + // // Do not allow non converged linear solver - cut time step + // if (!m_allowNonConvergedLinearSolverSolution && m_linearSolverResult.status == LinearSolverResult::Status::NotConverged) + // { + // return false; + // } + } + + // apply solution and update state + // TODO: check the scale factor logic in DL methods & checkSystemSolution implementation + { + Timer timer( m_timers.get_inserted( "apply solution" ) ); + + // Compute the scaling factor for the Newton update + scaleFactor = scalingForSystemSolution(domain, m_dofManager, m_solution.values()); + + GEOS_LOG_LEVEL_RANK_0(logInfo::Solution, + GEOS_FMT(" {}: Global solution scaling factor = {}", getName(), scaleFactor)); + + // TODO : Consider deleting (check system solution validity) + // if (!checkSystemSolution(domain, m_dofManager, m_solution.values(), scaleFactor)) + // { + // // TODO try chopping (similar to line search) + // GEOS_LOG_RANK_0(GEOS_FMT(" {}: Solution check failed. Newton loop terminated.", getName())); + // break; + // } + + // apply the system solution to the fields/variables + applySystemSolution(m_dofManager, m_solution.values(), scaleFactor, stepDt, domain); + } + + { + Timer timer( m_timers.get_inserted( "update state" ) ); + + // update non-primary variables (constitutive models) + updateState(domain); + } + + lastResidual = residualNorm; + + // Terminate Newton loop if converged + // TODO : consider future scenarios. For now, DL is assumed to always converge within one iteration. + { + isNewtonConverged = true; + break; + } + } + + return isNewtonConverged; + } + + void DLPhysicsSolverBase::initializePostInitialConditionsPostSubGroups() + { + + initializeSharedMemories(); + } + + + void DLPhysicsSolverBase::populateDofCoords( DofManager const & /*dofManager*/, + DomainPartition & domain, + string const & elemDofKey ) + { + GEOS_MARK_FUNCTION; + + // TODO : Check implementation of + // SolidMechanicsLagrangianFEM::registerDataOnMesh + // PoromechanicsSolver::registerDataOnMesh + // DLFlowSolverBase::registerDataOnMesh + // DLPhysicsSolverBase::registerDataOnMesh + + // open device-accessible views for writing + arrayView1d< real64 > dofX = m_dofXCoords.open(); + arrayView1d< real64 > dofY = m_dofYCoords.open(); + arrayView1d< real64 > dofZ = m_dofZCoords.open(); + + // initialize with large values so missing entries are noticeable + forAll< parallelHostPolicy >( m_dofXCoords.localSize(), [=] ( localIndex const i ) + { + dofX[i] = 1e99; + dofY[i] = 1e99; + dofZ[i] = 1e99; + } ); + + globalIndex const lowerbound = m_solution.ilower(); + globalIndex const upperbound = m_solution.iupper(); + + // iterate over mesh bodies & element subregions, collect element-centers and element->dof mapping + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, + MeshLevel & mesh, + string_array const & regionNames ) + { + ElementRegionManager & elemManager = mesh.getElemManager(); + + elemManager.forElementSubRegions< CellElementSubRegion >( regionNames, + [&]( localIndex const, + ElementSubRegionBase & subRegion ) + { + // element centers + arrayView2d< real64 const > const elemCenter = subRegion.getElementCenter(); + + // element to dof mapping + arrayView1d< globalIndex const > const & elemDofNumber = + subRegion.getReference< array1d< globalIndex > >( elemDofKey ); + + // for each element, store its center in the corresponding DOF slot + forAll< parallelDevicePolicy<> >( subRegion.size(), [=] GEOS_HOST_DEVICE ( localIndex const ei ) + { + globalIndex const eGDof = elemDofNumber[ei]; + // only fill entries owned by this rank + if( eGDof >= lowerbound && eGDof < upperbound ) + { + dofX[eGDof - lowerbound] = elemCenter[ei][0]; + dofY[eGDof - lowerbound] = elemCenter[ei][1]; + dofZ[eGDof - lowerbound] = elemCenter[ei][2]; + } + } ); + } ); + } ); + + m_dofXCoords.close(); + m_dofYCoords.close(); + m_dofZCoords.close(); + + + // // redirect to shared mem stream output + // static std::ofstream logFile_shared_mem("geosx_sharedMem_rank_" + std::to_string(MpiWrapper::commRank(MPI_COMM_GEOS)) + ".log", + // std::ios::out | std::ios::app ); + // std::cout.rdbuf(logFile_shared_mem.rdbuf()); + + // Temporary debug changes in m_dofXCoords, m_dofYCoords, m_dofZCoords + GEOS_LOG_LEVEL(logInfo::NonlinearSolver, + GEOS_FMT("--CustomLog--DLSinglePhaseBase::populateDofCoords: Dof X Coords Global Size:{}, Local Size:{} ", m_dofXCoords.globalSize(), m_dofXCoords.localSize())); + m_dofXCoords.print(); + + GEOS_LOG_LEVEL(logInfo::NonlinearSolver, + GEOS_FMT("--CustomLog--DLSinglePhaseBase::populateDofCoords: Dof Y Coords Global Size:{}, Local Size:{} ", m_dofYCoords.globalSize(), m_dofYCoords.localSize())); + m_dofYCoords.print(); + + GEOS_LOG_LEVEL(logInfo::NonlinearSolver, + GEOS_FMT("--CustomLog--DLSinglePhaseBase::populateDofCoords: Dof Z Coords Global Size:{}, Local Size:{} ", m_dofZCoords.globalSize(), m_dofZCoords.localSize())); + m_dofZCoords.print(); + + + // // redirect to default stream output + // static std::ofstream logFile("geosx_output_rank_" + std::to_string(MpiWrapper::commRank(MPI_COMM_GEOS)) + ".log", + // std::ios::out | std::ios::app ); + // std::cout.rdbuf(logFile.rdbuf()); + + } + + + + void DLPhysicsSolverBase::populateDofStrainTrace( DofManager const & /*dofManager*/, + DomainPartition & domain, + string const & elemDofKey ) + { + GEOS_MARK_FUNCTION; + // open device-accessible views for writing + arrayView1d< real64 > dofStrainTrace = m_strainTrace.open(); + // initialize with large values so missing entries are noticeable + forAll< parallelHostPolicy >( m_strainTrace.localSize(), [=] ( localIndex const i ) + { + dofStrainTrace[i] = 1e99; + } ); + + globalIndex const lowerbound = m_solution.ilower(); + globalIndex const upperbound = m_solution.iupper(); + + // iterate over mesh bodies & element subregions + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, + MeshLevel & mesh, + string_array const & regionNames ) + { + ElementRegionManager & elemManager = mesh.getElemManager(); + + elemManager.forElementSubRegions< CellElementSubRegion >( regionNames, + [&]( localIndex const, + ElementSubRegionBase & subRegion ) + { + arrayView1d< globalIndex const > const & elemDofNumber = + subRegion.getReference< array1d< globalIndex > >( elemDofKey ); + + fields::solidMechanics::arrayView2dLayoutStrain const strain = + subRegion.getField< fields::solidMechanics::averageStrain >(); + + forAll< parallelHostPolicy >( subRegion.size(), [=] ( localIndex ei ) + { + // TODO: Check strain components indices + real64 const trace = strain[ei][0] + strain[ei][1] + strain[ei][2]; + globalIndex const eGDof = elemDofNumber[ei]; + if( eGDof >= lowerbound && eGDof < upperbound ) + { + dofStrainTrace[eGDof - lowerbound] = trace; + } + } ); + } ); + } ); + + m_strainTrace.close(); + + + // // redirect to shared mem stream output + // static std::ofstream logFile_shared_mem("geosx_sharedMem_rank_" + std::to_string(MpiWrapper::commRank(MPI_COMM_GEOS)) + ".log", + // std::ios::out | std::ios::app ); + // std::cout.rdbuf(logFile_shared_mem.rdbuf()); + + // Temporary debug changes in m_strainTrace + GEOS_LOG_LEVEL(logInfo::NonlinearSolver, + GEOS_FMT("--CustomLog--DLSinglePhaseBase::populateDofStrainTrace Dof Strain Trace Global Size:{}, Local Size:{} ", m_strainTrace.globalSize(), m_strainTrace.localSize())); + m_strainTrace.print(); + + // // redirect to default stream output + // static std::ofstream logFile("geosx_output_rank_" + std::to_string(MpiWrapper::commRank(MPI_COMM_GEOS)) + ".log", + // std::ios::out | std::ios::app ); + // std::cout.rdbuf(logFile.rdbuf()); + } + + //TODO: Move to DLPhysicsSolverBase and make it abstract in DLPhysicsSolverBase + void DLPhysicsSolverBase::populateDofPrevSolution( DofManager const & /*dofManager*/, + DomainPartition & domain, + string const & elemDofKey) + { + GEOS_MARK_FUNCTION; + + // Open destination view + arrayView1d< real64 > prevSolView = m_prevSolution.open(); + + // Initialize with a large value for easy spotting of gaps + forAll< parallelHostPolicy >( m_prevSolution.localSize(), [=] ( localIndex const i ) + { + prevSolView[i] = 1e99; + } ); + + // Local ownership bounds for DOF indices + globalIndex const lowerbound = m_solution.ilower(); + globalIndex const upperbound = m_solution.iupper(); + + + // Iterate mesh targets and subregions + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, + MeshLevel & mesh, + string_array const & regionNames ) + { + ElementRegionManager & elemManager = mesh.getElemManager(); + + elemManager.forElementSubRegions< CellElementSubRegion >( regionNames, + [&]( localIndex const, + ElementSubRegionBase & subRegion ) + { + arrayView1d< globalIndex const > const & elemDofNumber = + subRegion.getReference< array1d< globalIndex > >( elemDofKey ); + + // Previous pressure time level (pressure_n) + // TODO: Double check flow::pressure_n vs flow::pressure + arrayView1d< real64 const > const pres_n = + subRegion.getField< fields::flow::pressure_n >(); + + // Map element scalar to owned DOF slot + forAll< parallelHostPolicy >( subRegion.size(), [=] ( localIndex ei ) + { + globalIndex const eGDof = elemDofNumber[ei]; + if( eGDof >= lowerbound && eGDof < upperbound ) + { + prevSolView[eGDof - lowerbound] = pres_n[ei]; + } + } ); + } ); + } ); + + m_prevSolution.close(); + + // // redirect to shared mem stream output + // static std::ofstream logFile_shared_mem("geosx_sharedMem_rank_" + std::to_string(MpiWrapper::commRank(MPI_COMM_GEOS)) + ".log", + // std::ios::out | std::ios::app ); + // std::cout.rdbuf(logFile_shared_mem.rdbuf()); + + // Temporary debug changes in m_prevSolution + GEOS_LOG_LEVEL(logInfo::NonlinearSolver, + GEOS_FMT("--CustomLog--DLSinglePhaseBase::populateDofPrevSolution Dof Prev Solution Global Size:{}, Local Size:{} ", m_prevSolution.globalSize(), m_prevSolution.localSize())); + m_prevSolution.print(); + + // // redirect to default stream output + // static std::ofstream logFile("geosx_output_rank_" + std::to_string(MpiWrapper::commRank(MPI_COMM_GEOS)) + ".log", + // std::ios::out | std::ios::app ); + // std::cout.rdbuf(logFile.rdbuf()); + } + + void DLPhysicsSolverBase::initializeSharedMemories() + { + GEOS_ERROR("DLPhysicsSolverBase::initializeSharedMemories called!. Should be overridden."); + } + + void DLPhysicsSolverBase::shareDLModelInputs(real64 const &time_n, + real64 const &dt, + integer const cycleNumber, + DomainPartition &domain) + { + GEOS_UNUSED_VAR(time_n, dt, cycleNumber, domain); + GEOS_ERROR("DLPhysicsSolverBase::shareDLModelInputs called!. Should be overridden."); + } + + void DLPhysicsSolverBase::readDLModelOutputs(real64 const &time_n, + real64 const &dt, + integer const cycleNumber, + DomainPartition &domain) + { + GEOS_UNUSED_VAR(time_n, dt, cycleNumber, domain); + GEOS_ERROR("DLPhysicsSolverBase::readDLModelOutputs called!. Should be overridden."); + } + +#if defined(GEOS_USE_PYGEOSX) + PyTypeObject *DLPhysicsSolverBase::getPythonType() const + { + return python::getPySolverType(); + } +#endif + +} // namespace geos diff --git a/src/coreComponents/physicsSolvers/DLPhysicsSolverBase.hpp b/src/coreComponents/physicsSolvers/DLPhysicsSolverBase.hpp new file mode 100644 index 00000000000..783441d7f57 --- /dev/null +++ b/src/coreComponents/physicsSolvers/DLPhysicsSolverBase.hpp @@ -0,0 +1,213 @@ +/* + * ------------------------------------------------------------------------------------------------------------ + * SPDX-License-Identifier: LGPL-2.1-only + * + * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC + * Copyright (c) 2018-2024 TotalEnergies + * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University + * Copyright (c) 2023-2024 Chevron + * Copyright (c) 2019- GEOS/GEOSX Contributors + * All rights reserved + * + * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details. + * ------------------------------------------------------------------------------------------------------------ + */ + +/** + * @file DLPhysicsSolverBase.hpp + */ + +#ifndef GEOS_PHYSICSSOLVERS_DLPHYSICSSOLVERBASE_HPP_ +#define GEOS_PHYSICSSOLVERS_DLPHYSICSSOLVERBASE_HPP_ + +#include "codingUtilities/traits.hpp" +#include "common/DataTypes.hpp" +#include "common/format/LogPart.hpp" +#include "dataRepository/ExecutableGroup.hpp" +#include "dataRepository/RestartFlags.hpp" +#include "linearAlgebra/interfaces/InterfaceTypes.hpp" +#include "linearAlgebra/utilities/LinearSolverResult.hpp" +#include "linearAlgebra/DofManager.hpp" +#include "mesh/MeshBody.hpp" +#include "physicsSolvers/NonlinearSolverParameters.hpp" +#include "physicsSolvers/LinearSolverParameters.hpp" +#include "physicsSolvers/SolverStatistics.hpp" +#include "physicsSolvers/PhysicsSolverBase.hpp" +#include "DLSharedMemoryManager.hpp" +#include "physicsSolvers/solidMechanics/SolidMechanicsFields.hpp" +#include "physicsSolvers/fluidFlow/FlowSolverBaseFields.hpp" + +#include + +namespace geos +{ + + class DomainPartition; + + /** + * @class DLPhysicsSolverBase + * @brief Base class for all DL physics solvers + * + * This class provides the base interface for all DL physics solvers. It provides the basic + * functionality for setting up and solving a system using DL, as well as the interface for + * performing a timestep. + */ + class DLPhysicsSolverBase : public PhysicsSolverBase + { + public: + /** + * @brief Constructor for DLPhysicsSolverBase + * @param name the name of this instantiation of DLPhysicsSolverBase + * @param parent the parent group of this instantiation of DLPhysicsSolverBase + */ + explicit DLPhysicsSolverBase(string const &name, + Group *const parent); + + /** + * @brief Move constructor for DLPhysicsSolverBase + */ + DLPhysicsSolverBase(DLPhysicsSolverBase &&) = default; + + /** + * @brief Destructor for DLPhysicsSolverBase + */ + virtual ~DLPhysicsSolverBase() override; + + /** + * @brief Deleted constructor + */ + DLPhysicsSolverBase() = delete; + + /** + * @brief Deleted copy constructor + */ + DLPhysicsSolverBase(DLPhysicsSolverBase const &) = delete; + + /** + * @brief Deleted copy assignment operator + */ + DLPhysicsSolverBase &operator=(DLPhysicsSolverBase const &) = delete; + + /** + * @brief Deleted move assignment operator + */ + DLPhysicsSolverBase &operator=(DLPhysicsSolverBase &&) = delete; + + /** + * @brief Function for a nonlinear implicit integration step + * @param time_n time at the beginning of the step + * @param dt the perscribed timestep + * @param cycleNumber the current cycle number + * @param domain the domain object + * @return return the timestep that was achieved during the step. + * + * This function implements a nonlinear newton method for implicit DL problems. It requires that the + * other functions in the solver interface are implemented in the derived physics solver. + */ + virtual real64 nonlinearImplicitStep(real64 const &time_n, + real64 const &dt, + integer const cycleNumber, + DomainPartition &domain) override; + + + + virtual void + setupSystem( DomainPartition & domain, + DofManager & dofManager, + CRSMatrix< real64, globalIndex > & localMatrix, + ParallelVector & rhs, + ParallelVector & solution, + bool const setSparsity = true ) override; + + + protected: + /** + * @brief Solve a nonlinear system using a DL approach + * @param time_n the time at the beginning of the step + * @param dt the desired timestep + * @param cycleNumber the current cycle number + * @param domain the domain partition + * @return true if the nonlinear system was solved, false otherwise + */ + virtual bool solveNonlinearSystemUsingDL(real64 const &time_n, + real64 const &dt, + integer const cycleNumber, + DomainPartition &domain); + + virtual void initializePostInitialConditionsPostSubGroups() override; + + + /** + * @brief Populate m_dofXCoords/m_dofYCoords/m_dofZCoords for different dofs. + * + * @param dofManager DofManager holding DOF layout and rank offset + * @param domain DomainPartition used to iterate mesh levels / element subregions + * @param elemDofFieldKey The field key under which element DOF numbers are stored (e.g. "singlePhaseVariables") + */ + virtual void populateDofCoords( DofManager const & dofManager, + DomainPartition & domain, + string const & elemDofFieldKey ); + + /** + * @brief Populate m_strainTrace for different dofs. + * + * @param dofManager DofManager holding DOF layout and rank offset + * @param domain DomainPartition used to iterate mesh levels / element subregions + * @param elemDofFieldKey The field key under which element DOF numbers are stored (e.g. "singlePhaseVariables") + */ + virtual void populateDofStrainTrace( DofManager const & dofManager, + DomainPartition & domain, + string const & elemDofFieldKey ); + + /** + * @brief Populate m_prevSolution for different dofs. + * + * @param dofManager DofManager holding DOF layout and rank offset + * @param domain DomainPartition used to iterate mesh levels / element subregions + * @param elemDofFieldKey The field key under which element DOF numbers are stored (e.g. "singlePhaseVariables") + */ + virtual void populateDofPrevSolution( DofManager const & dofManager, + DomainPartition & domain, + string const & elemDofFieldKey ); + + + /** + * @brief Initialize shared memories needed for DL simulations + * @return void + */ + virtual void initializeSharedMemories(); + + /** + * @brief Share the DL model inputs through shared memory + * @return void + */ + virtual void shareDLModelInputs(real64 const &time_n, + real64 const &dt, + integer const cycleNumber, + DomainPartition &domain); + + /** + * @brief Read the DL model outputs through shared memory + * @return void + */ + virtual void readDLModelOutputs(real64 const &time_n, + real64 const &dt, + integer const cycleNumber, + DomainPartition &domain); + + DLSharedMemoryManager m_sharedMemoryManager; + + // Data vectors for DL solver + ParallelVector m_dofXCoords; + ParallelVector m_dofYCoords; + ParallelVector m_dofZCoords; + ParallelVector m_prevSolution; + ParallelVector m_strainTrace; //TODO: Consider Removing/Moving to a more suitable class. Like IFENNPoroMechanics + + + private: + }; + +} // namespace geos + +#endif /* GEOS_PHYSICSSOLVERS_DLPHYSICSSOLVERBASE_HPP_ */ diff --git a/src/coreComponents/physicsSolvers/DLSharedMemoryManager.cpp b/src/coreComponents/physicsSolvers/DLSharedMemoryManager.cpp new file mode 100644 index 00000000000..7d73c6bea5f --- /dev/null +++ b/src/coreComponents/physicsSolvers/DLSharedMemoryManager.cpp @@ -0,0 +1,63 @@ +/* + * ------------------------------------------------------------------------------------------------------------ + * SPDX-License-Identifier: LGPL-2.1-only + * + * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC + * Copyright (c) 2018-2024 TotalEnergies + * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University + * Copyright (c) 2023-2024 Chevron + * Copyright (c) 2019- GEOS/GEOSX Contributors + * All rights reserved + * + * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details. + * ------------------------------------------------------------------------------------------------------------ + */ + +#include "DLSharedMemoryManager.hpp" +#include "PhysicsSolverManager.hpp" + +#include "physicsSolvers/LogLevelsInfo.hpp" +#include "common/format/LogPart.hpp" +#include "common/TimingMacros.hpp" +#include "linearAlgebra/solvers/KrylovSolver.hpp" +#include "mesh/DomainPartition.hpp" +#include "math/interpolation/Interpolation.hpp" +#include "common/Timer.hpp" +#include "common/Units.hpp" +#include "dataRepository/LogLevelsInfo.hpp" + +#if defined(GEOS_USE_PYGEOSX) +#include "python/PySolverType.hpp" +#endif + +namespace geos +{ + + using namespace dataRepository; + + DLSharedMemoryManager::DLSharedMemoryManager(string const &name, + Group *const parent) + : dataRepository::Group(name, parent) + { + } + + DLSharedMemoryManager::~DLSharedMemoryManager() = default; + + + sem_t* DLSharedMemoryManager::openASemaphore(const std::string &mem_name) + { + // TODO: check access permissions and handle errors + sem_t* ptr = sem_open(mem_name.c_str(), O_CREAT | O_RDWR, 0666, 0); + + return ptr; + } + + +#if defined(GEOS_USE_PYGEOSX) + PyTypeObject *DLSharedMemoryManager::getPythonType() const + { + return python::getPySolverType(); + } +#endif + +} // namespace geos diff --git a/src/coreComponents/physicsSolvers/DLSharedMemoryManager.hpp b/src/coreComponents/physicsSolvers/DLSharedMemoryManager.hpp new file mode 100644 index 00000000000..f4b37539398 --- /dev/null +++ b/src/coreComponents/physicsSolvers/DLSharedMemoryManager.hpp @@ -0,0 +1,132 @@ +/* + * ------------------------------------------------------------------------------------------------------------ + * SPDX-License-Identifier: LGPL-2.1-only + * + * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC + * Copyright (c) 2018-2024 TotalEnergies + * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University + * Copyright (c) 2023-2024 Chevron + * Copyright (c) 2019- GEOS/GEOSX Contributors + * All rights reserved + * + * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details. + * ------------------------------------------------------------------------------------------------------------ + */ + +/** + * @file DLSharedMemoryManager.hpp + */ + +#ifndef GEOS_PHYSICSSOLVERS_DLSHAREDMEMORYMANAGER_HPP_ +#define GEOS_PHYSICSSOLVERS_DLSHAREDMEMORYMANAGER_HPP_ + +#include "codingUtilities/traits.hpp" +#include "common/DataTypes.hpp" +#include "common/format/LogPart.hpp" +#include "dataRepository/ExecutableGroup.hpp" +#include "dataRepository/RestartFlags.hpp" +#include "linearAlgebra/interfaces/InterfaceTypes.hpp" +#include "linearAlgebra/utilities/LinearSolverResult.hpp" +#include "linearAlgebra/DofManager.hpp" +#include "mesh/MeshBody.hpp" +#include "physicsSolvers/NonlinearSolverParameters.hpp" +#include "physicsSolvers/LinearSolverParameters.hpp" +#include "physicsSolvers/SolverStatistics.hpp" +#include "physicsSolvers/PhysicsSolverBase.hpp" + +// Shared memory includes +#include // Provides functions for memory management (e.g., mmap, munmap). +#include // Provides file control options (e.g., open, O_CREAT). +#include // Provides access to the POSIX operating system API (e.g., read, write, close, fork, exec). +#include // Provides functions for string manipulation (e.g., memcpy, memmove, memset, strlen). +#include // Provides POSIX semaphore functionality for inter-process synchronization (e.g., sem_open, sem_wait, sem_post). + +#include + +namespace geos +{ + + /** + * @class DLSharedMemoryManager + * @brief a class for intitalizing and managing shared memory in DL simulations + * + */ + class DLSharedMemoryManager : public dataRepository::Group + { + public: + /** + * @brief Constructor for DLSharedMemoryManager + * @param name the name of this instantiation of DLSharedMemoryManager + * @param parent the parent group of this instantiation of DLSharedMemoryManager + */ + explicit DLSharedMemoryManager(string const &name, + Group *const parent); + + /** + * @brief Move constructor for DLSharedMemoryManager + */ + DLSharedMemoryManager(DLSharedMemoryManager &&) = default; + + /** + * @brief Destructor for DLSharedMemoryManager + */ + virtual ~DLSharedMemoryManager() override; + + /** + * @brief Deleted constructor + */ + DLSharedMemoryManager() = delete; + + /** + * @brief Deleted copy constructor + */ + DLSharedMemoryManager(DLSharedMemoryManager const &) = delete; + + /** + * @brief Deleted copy assignment operator + */ + DLSharedMemoryManager &operator=(DLSharedMemoryManager const &) = delete; + + /** + * @brief Deleted move assignment operator + */ + DLSharedMemoryManager &operator=(DLSharedMemoryManager &&) = delete; + + // Function to initialize a shared memory for a struct + // As a template, the full definition will be in the header file + template + T * initializeASharedMemory(const std::string &mem_name, size_t size) + { + // TODO: check access permissions and handling errors + int shm_fd = shm_open(mem_name.c_str(), O_CREAT | O_RDWR, 0666); + if (shm_fd == -1) + { + std::cerr << "Error opening shared memory object: " << mem_name << std::endl; + return nullptr; + } + + if (ftruncate(shm_fd, size) == -1) + { + std::cerr << "Error in ftruncate for shared memory: " << mem_name << std::endl; + return nullptr; + } + + void *ptr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, shm_fd, 0); + if (ptr == MAP_FAILED) + { + std::cerr << "Error mapping shared memory: " << mem_name << std::endl; + return nullptr; + } + + return static_cast(ptr); + } + + sem_t *openASemaphore(const std::string &mem_name); + + protected: + private: + }; + +} // namespace geos + +#endif /* GEOS_PHYSICSSOLVERS_DLSHAREDMEMORYMANAGER_HPP_ */ diff --git a/src/coreComponents/physicsSolvers/fluidFlow/CMakeLists.txt b/src/coreComponents/physicsSolvers/fluidFlow/CMakeLists.txt index 7e6a227ee02..01fbe4e1e21 100644 --- a/src/coreComponents/physicsSolvers/fluidFlow/CMakeLists.txt +++ b/src/coreComponents/physicsSolvers/fluidFlow/CMakeLists.txt @@ -32,6 +32,9 @@ set( fluidFlowSolvers_headers ReactiveCompositionalMultiphaseOBL.hpp ReactiveCompositionalMultiphaseOBLFields.hpp SourceFluxStatistics.hpp + DLFlowSolverBase.hpp + DLSinglePhaseBase.hpp + DLSinglePhaseFVM.hpp SinglePhaseBase.hpp SinglePhaseBaseFields.hpp SinglePhaseStatistics.hpp @@ -134,6 +137,9 @@ set( fluidFlowSolvers_sources ImmiscibleMultiphaseFlow.cpp ReactiveCompositionalMultiphaseOBL.cpp FlowSolverBase.cpp + DLFlowSolverBase.cpp + DLSinglePhaseBase.cpp + DLSinglePhaseFVM.cpp SinglePhaseBase.cpp SinglePhaseStatistics.cpp SinglePhaseFVM.cpp diff --git a/src/coreComponents/physicsSolvers/fluidFlow/DLFlowSolverBase.cpp b/src/coreComponents/physicsSolvers/fluidFlow/DLFlowSolverBase.cpp new file mode 100644 index 00000000000..995fcb6f824 --- /dev/null +++ b/src/coreComponents/physicsSolvers/fluidFlow/DLFlowSolverBase.cpp @@ -0,0 +1,1119 @@ +/* + * ------------------------------------------------------------------------------------------------------------ + * SPDX-License-Identifier: LGPL-2.1-only + * + * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC + * Copyright (c) 2018-2024 TotalEnergies + * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University + * Copyright (c) 2023-2024 Chevron + * Copyright (c) 2019- GEOS/GEOSX Contributors + * All rights reserved + * + * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details. + * ------------------------------------------------------------------------------------------------------------ + */ + +/** + * @file DLFlowSolverBase.cpp + */ + +#include "DLFlowSolverBase.hpp" +#include "mainInterface/ProblemManager.hpp" + +#include "constitutive/ConstitutivePassThru.hpp" +#include "constitutive/permeability/PermeabilityFields.hpp" +#include "constitutive/solid/SolidInternalEnergy.hpp" +#include "discretizationMethods/NumericalMethodsManager.hpp" +#include "fieldSpecification/AquiferBoundaryCondition.hpp" +#include "fieldSpecification/EquilibriumInitialCondition.hpp" +#include "fieldSpecification/FieldSpecificationManager.hpp" +#include "fieldSpecification/SourceFluxBoundaryCondition.hpp" +#include "finiteVolume/FiniteVolumeManager.hpp" +#include "finiteVolume/FluxApproximationBase.hpp" +#include "mesh/DomainPartition.hpp" +#include "physicsSolvers/LogLevelsInfo.hpp" +#include "physicsSolvers/fluidFlow/FlowSolverBaseFields.hpp" +#include "physicsSolvers/fluidFlow/kernels/MinPoreVolumeMaxPorosityKernel.hpp" +#include "physicsSolvers/fluidFlow/kernels/StencilWeightsUpdateKernel.hpp" + +namespace geos +{ + +using namespace dataRepository; +using namespace constitutive; +using namespace fields; + +template< typename POROUSWRAPPER_TYPE > +void updatePorosityAndPermeabilityFromPressureAndTemperature( POROUSWRAPPER_TYPE porousWrapper, + CellElementSubRegion & subRegion, + arrayView1d< real64 const > const & pressure, + arrayView1d< real64 const > const & temperature ) +{ + forAll< parallelDevicePolicy<> >( subRegion.size(), [=] GEOS_DEVICE ( localIndex const k ) + { + for( localIndex q = 0; q < porousWrapper.numGauss(); ++q ) + { + porousWrapper.updateStateFromPressureAndTemperature( k, q, + pressure[k], + temperature[k] ); + } + } ); +} + +template< typename POROUSWRAPPER_TYPE > +void updatePorosityAndPermeabilityFixedStress( POROUSWRAPPER_TYPE porousWrapper, + CellElementSubRegion & subRegion, + arrayView1d< real64 const > const & pressure, + arrayView1d< real64 const > const & pressure_k, + arrayView1d< real64 const > const & pressure_n, + arrayView1d< real64 const > const & temperature, + arrayView1d< real64 const > const & temperature_k, + arrayView1d< real64 const > const & temperature_n ) +{ + forAll< parallelDevicePolicy<> >( subRegion.size(), [=] GEOS_DEVICE ( localIndex const k ) + { + for( localIndex q = 0; q < porousWrapper.numGauss(); ++q ) + { + porousWrapper.updateStateFixedStress( k, q, + pressure[k], + pressure_k[k], + pressure_n[k], + temperature[k], + temperature_k[k], + temperature_n[k] ); + } + } ); +} + +template< typename POROUSWRAPPER_TYPE > +void updatePorosityAndPermeabilityFromPressureAndAperture( POROUSWRAPPER_TYPE porousWrapper, + SurfaceElementSubRegion & subRegion, + arrayView1d< real64 const > const & pressure, + arrayView1d< real64 const > const & oldHydraulicAperture, + arrayView1d< real64 const > const & newHydraulicAperture ) +{ + forAll< parallelDevicePolicy<> >( subRegion.size(), [=] GEOS_DEVICE ( localIndex const k ) + { + for( localIndex q = 0; q < porousWrapper.numGauss(); ++q ) + { + porousWrapper.updateStateFromPressureAndAperture( k, q, + pressure[k], + oldHydraulicAperture[k], + newHydraulicAperture[k] ); + } + } ); +} + +DLFlowSolverBase::DLFlowSolverBase( string const & name, + Group * const parent ): + DLPhysicsSolverBase( name, parent ), + m_numDofPerCell( 0 ), + m_isThermal( 0 ), + m_keepVariablesConstantDuringInitStep( false ), + m_isFixedStressPoromechanicsUpdate( false ), + m_isJumpStabilized( false ), + m_isLaggingFractureStencilWeightsUpdate( 0 ) +{ + this->registerWrapper( viewKeyStruct::isThermalString(), &m_isThermal ). + setApplyDefaultValue( 0 ). + setInputFlag( InputFlags::OPTIONAL ). + setDescription( "Flag indicating whether the problem is thermal or not." ); + + this->registerWrapper( viewKeyStruct::allowNegativePressureString(), &m_allowNegativePressure ). + setApplyDefaultValue( 0 ). // negative pressure is not allowed by default + setInputFlag( InputFlags::OPTIONAL ). + setDescription( "Flag indicating if negative pressure is allowed" ); + + this->registerWrapper( viewKeyStruct::maxAbsolutePresChangeString(), &m_maxAbsolutePresChange ). + setSizedFromParent( 0 ). + setInputFlag( InputFlags::OPTIONAL ). + setApplyDefaultValue( -1.0 ). // disabled by default + setDescription( "Maximum (absolute) pressure change in a Newton iteration" ); + + this->registerWrapper( viewKeyStruct::maxSequentialPresChangeString(), &m_maxSequentialPresChange ). + setSizedFromParent( 0 ). + setInputFlag( InputFlags::OPTIONAL ). + setApplyDefaultValue( 1e5 ). // 0.1 bar = 1e5 Pa + setDescription( "Maximum (absolute) pressure change in a sequential iteration, used for outer loop convergence check" ); + + this->registerWrapper( viewKeyStruct::maxSequentialTempChangeString(), &m_maxSequentialTempChange ). + setSizedFromParent( 0 ). + setInputFlag( InputFlags::OPTIONAL ). + setApplyDefaultValue( 0.1 ). + setDescription( "Maximum (absolute) temperature change in a sequential iteration, used for outer loop convergence check" ); + + // allow the user to select a norm + getNonlinearSolverParameters().getWrapper< physicsSolverBaseKernels::NormType >( NonlinearSolverParameters::viewKeysStruct::normTypeString() ).setInputFlag( InputFlags::OPTIONAL ); +} + +void DLFlowSolverBase::registerDataOnMesh( Group & meshBodies ) +{ + DLPhysicsSolverBase::registerDataOnMesh( meshBodies ); + + forDiscretizationOnMeshTargets( meshBodies, [&] ( string const &, + MeshLevel & mesh, + string_array const & regionNames ) + { + + ElementRegionManager & elemManager = mesh.getElemManager(); + + elemManager.forElementSubRegions< ElementSubRegionBase >( regionNames, + [&]( localIndex const, + ElementSubRegionBase & subRegion ) + { + subRegion.registerField< flow::deltaVolume >( getName() ); + subRegion.registerField< flow::gravityCoefficient >( getName() ); + subRegion.registerField< flow::netToGross >( getName() ); + + subRegion.registerField< flow::pressure >( getName() ); + subRegion.registerField< flow::pressure_n >( getName() ); + subRegion.registerField< flow::initialPressure >( getName() ); + subRegion.registerField< flow::deltaPressure >( getName() ); // for reporting/stats purposes + subRegion.registerField< flow::bcPressure >( getName() ); // needed for the application of boundary conditions + if( m_isFixedStressPoromechanicsUpdate ) + { + subRegion.registerField< flow::pressure_k >( getName() ); // needed for the fixed-stress porosity update + } + + subRegion.registerField< flow::temperature >( getName() ); + subRegion.registerField< flow::temperature_n >( getName() ); + subRegion.registerField< flow::initialTemperature >( getName() ); + subRegion.registerField< flow::bcTemperature >( getName() ); // needed for the application of boundary conditions + if( m_isFixedStressPoromechanicsUpdate ) + { + subRegion.registerField< flow::temperature_k >( getName() ); // needed for the fixed-stress porosity update + } + if( m_isThermal ) + { + subRegion.registerField< flow::energy >( getName() ); + subRegion.registerField< flow::energy_n >( getName() ); + } + } ); + + elemManager.forElementSubRegionsComplete< SurfaceElementSubRegion >( [&]( localIndex const, + localIndex const, + ElementRegionBase & region, + SurfaceElementSubRegion & subRegion ) + { + SurfaceElementRegion & faceRegion = dynamicCast< SurfaceElementRegion & >( region ); + + subRegion.registerField< flow::aperture0 >( getName() ). + setApplyDefaultValue( faceRegion.getDefaultAperture() ); + + subRegion.registerField< flow::hydraulicAperture >( getName() ). + setApplyDefaultValue( faceRegion.getDefaultAperture() ); + + } ); + + FaceManager & faceManager = mesh.getFaceManager(); + { + faceManager.registerField< flow::facePressure >( getName() ); + faceManager.registerField< flow::gravityCoefficient >( getName() ); + faceManager.registerField< flow::transMultiplier >( getName() ); + } + + } ); + + DomainPartition & domain = this->getGroupByPath< DomainPartition >( "/Problem/domain" ); + + // fill stencil targetRegions + NumericalMethodsManager & numericalMethodManager = domain.getNumericalMethodManager(); + FiniteVolumeManager & fvManager = numericalMethodManager.getFiniteVolumeManager(); + + if( fvManager.hasGroup< FluxApproximationBase >( m_discretizationName ) ) + { + + FluxApproximationBase & fluxApprox = fvManager.getFluxApproximation( m_discretizationName ); + fluxApprox.addFieldName( flow::pressure::key() ); + fluxApprox.setCoeffName( permeability::permeability::key() ); + if( m_isThermal ) + { + fluxApprox.addFieldName( flow::temperature::key() ); + } + } +} + +void DLFlowSolverBase::saveConvergedState( ElementSubRegionBase & subRegion ) const +{ + arrayView1d< real64 const > const pres = subRegion.template getField< flow::pressure >(); + arrayView1d< real64 > const pres_n = subRegion.template getField< flow::pressure_n >(); + pres_n.setValues< parallelDevicePolicy<> >( pres ); + + arrayView1d< real64 const > const temp = subRegion.template getField< flow::temperature >(); + arrayView1d< real64 > const temp_n = subRegion.template getField< flow::temperature_n >(); + temp_n.setValues< parallelDevicePolicy<> >( temp ); + + if( m_isThermal ) + { + arrayView1d< real64 const > const energy = subRegion.template getField< flow::energy >(); + arrayView1d< real64 > const energy_n = subRegion.template getField< flow::energy_n >(); + energy_n.setValues< parallelDevicePolicy<> >( energy ); + } + + if( m_isFixedStressPoromechanicsUpdate ) + { + arrayView1d< real64 > const pres_k = subRegion.template getField< flow::pressure_k >(); + arrayView1d< real64 > const temp_k = subRegion.template getField< flow::temperature_k >(); + pres_k.setValues< parallelDevicePolicy<> >( pres ); + temp_k.setValues< parallelDevicePolicy<> >( temp ); + } +} + +void DLFlowSolverBase::saveSequentialIterationState( DomainPartition & domain ) +{ + GEOS_ASSERT( m_isFixedStressPoromechanicsUpdate ); + + real64 maxPresChange = 0.0; + real64 maxTempChange = 0.0; + forDiscretizationOnMeshTargets ( domain.getMeshBodies(), [&]( string const &, + MeshLevel & mesh, + string_array const & regionNames ) + { + mesh.getElemManager().forElementSubRegions ( regionNames, + [&]( localIndex const, + ElementSubRegionBase & subRegion ) + { + arrayView1d< integer const > const ghostRank = subRegion.ghostRank(); + + arrayView1d< real64 const > const pres = subRegion.getField< flow::pressure >(); + arrayView1d< real64 > const pres_k = subRegion.getField< flow::pressure_k >(); + arrayView1d< real64 const > const temp = subRegion.getField< flow::temperature >(); + arrayView1d< real64 > const temp_k = subRegion.getField< flow::temperature_k >(); + + RAJA::ReduceMax< parallelDeviceReduce, real64 > subRegionMaxPresChange( 0.0 ); + RAJA::ReduceMax< parallelDeviceReduce, real64 > subRegionMaxTempChange( 0.0 ); + + forAll< parallelDevicePolicy<> >( subRegion.size(), [=] GEOS_HOST_DEVICE ( localIndex const ei ) + { + if( ghostRank[ei] < 0 ) + { + subRegionMaxPresChange.max( LvArray::math::abs( pres[ei] - pres_k[ei] ) ); + pres_k[ei] = pres[ei]; + subRegionMaxTempChange.max( LvArray::math::abs( temp[ei] - temp_k[ei] ) ); + temp_k[ei] = temp[ei]; + } + } ); + + maxPresChange = LvArray::math::max( maxPresChange, subRegionMaxPresChange.get() ); + maxTempChange = LvArray::math::max( maxTempChange, subRegionMaxTempChange.get() ); + } ); + } ); + + // store to be later used in convergence check + m_sequentialPresChange = MpiWrapper::max( maxPresChange ); + m_sequentialTempChange = m_isThermal ? MpiWrapper::max( maxTempChange ) : 0.0; +} + +void DLFlowSolverBase::setConstitutiveNamesCallSuper( ElementSubRegionBase & subRegion ) const +{ + DLPhysicsSolverBase::setConstitutiveNamesCallSuper( subRegion ); + + setConstitutiveName< CoupledSolidBase >( subRegion, viewKeyStruct::solidNamesString(), "coupled solid" ); + + setConstitutiveName< PermeabilityBase >( subRegion, viewKeyStruct::permeabilityNamesString(), "permeability" ); + + if( m_isThermal ) + { + setConstitutiveName< SolidInternalEnergy >( subRegion, viewKeyStruct::solidInternalEnergyNamesString(), "solid internal energy" ); + } +} + +void DLFlowSolverBase::setConstitutiveNames( ElementSubRegionBase & subRegion ) const +{ + GEOS_UNUSED_VAR( subRegion ); +} + +void DLFlowSolverBase::initializePreSubGroups() +{ + DLPhysicsSolverBase::initializePreSubGroups(); + + DomainPartition & domain = this->getGroupByPath< DomainPartition >( "/Problem/domain" ); + + // fill stencil targetRegions + NumericalMethodsManager & numericalMethodManager = domain.getNumericalMethodManager(); + FiniteVolumeManager & fvManager = numericalMethodManager.getFiniteVolumeManager(); + + if( fvManager.hasGroup< FluxApproximationBase >( m_discretizationName ) ) + { + FluxApproximationBase & fluxApprox = fvManager.getFluxApproximation( m_discretizationName ); + + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const & meshBodyName, + MeshLevel &, + string_array const & regionNames ) + { + string_array & stencilTargetRegions = fluxApprox.targetRegions( meshBodyName ); + std::set< string > stencilTargetRegionsSet( stencilTargetRegions.begin(), stencilTargetRegions.end() ); + stencilTargetRegionsSet.insert( regionNames.begin(), regionNames.end() ); + stencilTargetRegions.clear(); + for( auto const & targetRegion: stencilTargetRegionsSet ) + { + stencilTargetRegions.emplace_back( targetRegion ); + } + } ); + } +} + +void DLFlowSolverBase::checkDiscretizationName() const +{ + DomainPartition const & domain = this->getGroupByPath< DomainPartition >( "/Problem/domain" ); + NumericalMethodsManager const & numericalMethodManager = domain.getNumericalMethodManager(); + FiniteVolumeManager const & finiteVolumeManager = numericalMethodManager.getFiniteVolumeManager(); + + if( !finiteVolumeManager.hasGroup< FluxApproximationBase >( m_discretizationName ) ) + { + string_array discretizationMethods; + finiteVolumeManager.forSubGroups< FluxApproximationBase >( [&]( FluxApproximationBase const & fv ) + { + discretizationMethods.push_back( fv.getName() ); + } ); + + if( !discretizationMethods.empty()) + { + GEOS_ERROR( GEOS_FMT( "{}: can not find discretization named '{}' in 'FiniteVolume'.\nFound discretization : {}", + getDataContext(), m_discretizationName, discretizationMethods, + stringutilities::join( discretizationMethods, ", " ))); + } + else + { + GEOS_ERROR( GEOS_FMT( "{}: can not find discretization named '{}' in 'FiniteVolume'.\n" \ + "No discretization found, check that you have correctly entered a numerical method", + getDataContext(), m_discretizationName )); + } + } +} + +void DLFlowSolverBase::validatePoreVolumes( DomainPartition const & domain ) const +{ + real64 minPoreVolume = LvArray::NumericLimits< real64 >::max; + real64 maxPorosity = -LvArray::NumericLimits< real64 >::max; + globalIndex numElemsBelowPoreVolumeThreshold = 0; + globalIndex numElemsAbovePorosityThreshold = 0; + + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, + MeshLevel const & mesh, + string_array const & regionNames ) + { + mesh.getElemManager().forElementSubRegions< ElementSubRegionBase >( regionNames, [&]( localIndex const, + ElementSubRegionBase const & subRegion ) + { + + string const & solidName = subRegion.template getReference< string >( viewKeyStruct::solidNamesString() ); + CoupledSolidBase const & porousMaterial = getConstitutiveModel< CoupledSolidBase >( subRegion, solidName ); + + arrayView2d< real64 const > const porosity = porousMaterial.getPorosity(); + arrayView1d< real64 const > const volume = subRegion.getElementVolume(); + arrayView1d< integer const > const ghostRank = subRegion.ghostRank(); + + real64 minPoreVolumeInSubRegion = 0.0; + real64 maxPorosityInSubRegion = 0.0; + localIndex numElemsBelowPoreVolumeThresholdInSubRegion = 0; + localIndex numElemsAbovePorosityThresholdInSubRegion = 0; + + flowSolverBaseKernels::MinPoreVolumeMaxPorosityKernel:: + computeMinPoreVolumeMaxPorosity( subRegion.size(), + ghostRank, + porosity, + volume, + minPoreVolumeInSubRegion, + maxPorosityInSubRegion, + numElemsBelowPoreVolumeThresholdInSubRegion, + numElemsAbovePorosityThresholdInSubRegion ); + + if( minPoreVolumeInSubRegion < minPoreVolume ) + { + minPoreVolume = minPoreVolumeInSubRegion; + } + if( maxPorosityInSubRegion > maxPorosity ) + { + maxPorosity = maxPorosityInSubRegion; + } + + numElemsBelowPoreVolumeThreshold += numElemsBelowPoreVolumeThresholdInSubRegion; + numElemsAbovePorosityThreshold += numElemsAbovePorosityThresholdInSubRegion; + } ); + } ); + + minPoreVolume = MpiWrapper::min( minPoreVolume ); + maxPorosity = MpiWrapper::max( maxPorosity ); + numElemsBelowPoreVolumeThreshold = MpiWrapper::sum( numElemsBelowPoreVolumeThreshold ); + numElemsAbovePorosityThreshold = MpiWrapper::sum( numElemsAbovePorosityThreshold ); + + GEOS_LOG_RANK_0_IF( numElemsBelowPoreVolumeThreshold > 0, + GEOS_FMT( "\nWarning! The mesh contains {} elements with a pore volume below {} m^3." + "\nThe minimum pore volume is {} m^3." + "\nOur recommendation is to check the validity of mesh and/or increase the porosity in these elements.\n", + numElemsBelowPoreVolumeThreshold, flowSolverBaseKernels::poreVolumeThreshold, minPoreVolume ) ); + GEOS_LOG_RANK_0_IF( numElemsAbovePorosityThreshold > 0, + GEOS_FMT( "\nWarning! The mesh contains {} elements with a porosity above 1." + "\nThe maximum porosity is {}.\n", + numElemsAbovePorosityThreshold, maxPorosity ) ); +} + +void DLFlowSolverBase::initializePostInitialConditionsPreSubGroups() +{ + DLPhysicsSolverBase::initializePostInitialConditionsPreSubGroups(); + + DomainPartition & domain = this->getGroupByPath< DomainPartition >( "/Problem/domain" ); + + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, + MeshLevel & mesh, + string_array const & regionNames ) + { + precomputeData( mesh, regionNames ); + + FieldIdentifiers fieldsToBeSync; + fieldsToBeSync.addElementFields( { flow::pressure::key(), flow::temperature::key() }, + regionNames ); + + CommunicationTools::getInstance().synchronizeFields( fieldsToBeSync, mesh, domain.getNeighbors(), false ); + } ); +} + +void DLFlowSolverBase::precomputeData( MeshLevel & mesh, + string_array const & regionNames ) +{ + FaceManager & faceManager = mesh.getFaceManager(); + real64 const gravVector[3] = LVARRAY_TENSOROPS_INIT_LOCAL_3( gravityVector() ); + + mesh.getElemManager().forElementSubRegions< ElementSubRegionBase >( regionNames, [&]( localIndex const, + ElementSubRegionBase & subRegion ) + { + arrayView2d< real64 const > const elemCenter = subRegion.getElementCenter(); + + arrayView1d< real64 > const gravityCoef = + subRegion.getField< flow::gravityCoefficient >(); + + forAll< parallelHostPolicy >( subRegion.size(), [=] ( localIndex const ei ) + { + gravityCoef[ ei ] = LvArray::tensorOps::AiBi< 3 >( elemCenter[ ei ], gravVector ); + } ); + } ); + + { + arrayView2d< real64 const > const faceCenter = faceManager.faceCenter(); + + arrayView1d< real64 > const gravityCoef = + faceManager.getField< flow::gravityCoefficient >(); + + forAll< parallelHostPolicy >( faceManager.size(), [=] ( localIndex const kf ) + { + gravityCoef[ kf ] = LvArray::tensorOps::AiBi< 3 >( faceCenter[ kf ], gravVector ); + } ); + } +} + +void DLFlowSolverBase::initializeState( DomainPartition & domain ) +{ + // Compute hydrostatic equilibrium in the regions for which corresponding field specification tag has been specified + computeHydrostaticEquilibrium( domain ); + + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, + MeshLevel & mesh, + string_array const & regionNames ) + { + initializePorosityAndPermeability( mesh, regionNames ); + initializeHydraulicAperture( mesh, regionNames ); + + // Initialize primary variables from applied initial conditions + initializeFluidState( mesh, regionNames ); + + // Initialize the rock thermal quantities: conductivity and solid internal energy + // Note: + // - This must be called after updatePorosityAndPermeability and updatePhaseVolumeFraction + // - This step depends on porosity and phaseVolFraction + if( m_isThermal ) + { + initializeThermalState( mesh, regionNames ); + } + + // Save initial pressure and temperature fields + saveInitialPressureAndTemperature( mesh, regionNames ); + } ); + + // report to the user if some pore volumes are very small + // note: this function is here because: 1) porosity has been initialized and 2) NTG has been applied + validatePoreVolumes( domain ); +} + +void DLFlowSolverBase::initializePorosityAndPermeability( MeshLevel & mesh, string_array const & regionNames ) +{ + // Update porosity and permeability + // In addition, to avoid multiplying permeability/porosity bay netToGross in the assembly kernel, we do it once and for all here + mesh.getElemManager().forElementSubRegions< CellElementSubRegion, SurfaceElementSubRegion >( regionNames, [&]( localIndex const, + auto & subRegion ) + { + // Apply netToGross to reference porosity and horizontal permeability + CoupledSolidBase const & porousSolid = + getConstitutiveModel< CoupledSolidBase >( subRegion, subRegion.template getReference< string >( viewKeyStruct::solidNamesString() ) ); + PermeabilityBase const & permeability = + getConstitutiveModel< PermeabilityBase >( subRegion, subRegion.template getReference< string >( viewKeyStruct::permeabilityNamesString() ) ); + arrayView1d< real64 const > const netToGross = subRegion.template getField< flow::netToGross >(); + porousSolid.scaleReferencePorosity( netToGross ); + permeability.scaleHorizontalPermeability( netToGross ); + + // in some initializeState versions it uses newPorosity, so let's run updatePorosityAndPermeability to compute something + saveConvergedState( subRegion ); // necessary for a meaningful porosity update in sequential schemes + updatePorosityAndPermeability( subRegion ); + porousSolid.initializeState(); + + // run final update + saveConvergedState( subRegion ); // necessary for a meaningful porosity update in sequential schemes + updatePorosityAndPermeability( subRegion ); + + // Save the computed porosity into the old porosity + // Note: + // - This must be called after updatePorosityAndPermeability + // - This step depends on porosity + porousSolid.saveConvergedState(); + } ); +} + +void DLFlowSolverBase::initializeHydraulicAperture( MeshLevel & mesh, string_array const & regionNames ) +{ + mesh.getElemManager().forElementRegions< SurfaceElementRegion >( regionNames, + [&]( localIndex const, + SurfaceElementRegion & region ) + { + region.forElementSubRegions< SurfaceElementSubRegion >( [&]( SurfaceElementSubRegion & subRegion ) + { subRegion.getWrapper< real64_array >( flow::hydraulicAperture::key()).setApplyDefaultValue( region.getDefaultAperture()); } ); + } ); +} + +void DLFlowSolverBase::saveInitialPressureAndTemperature( MeshLevel & mesh, string_array const & regionNames ) +{ + mesh.getElemManager().forElementSubRegions( regionNames, [&]( localIndex const, + ElementSubRegionBase & subRegion ) + { + arrayView1d< real64 const > const pres = subRegion.getField< flow::pressure >(); + arrayView1d< real64 > const initPres = subRegion.getField< flow::initialPressure >(); + arrayView1d< real64 const > const temp = subRegion.getField< flow::temperature >(); + arrayView1d< real64 > const initTemp = subRegion.template getField< flow::initialTemperature >(); + initPres.setValues< parallelDevicePolicy<> >( pres ); + initTemp.setValues< parallelDevicePolicy<> >( temp ); + } ); +} + +void DLFlowSolverBase::updatePorosityAndPermeability( CellElementSubRegion & subRegion ) const +{ + GEOS_MARK_FUNCTION; + + arrayView1d< real64 const > const & pressure = subRegion.getField< flow::pressure >(); + arrayView1d< real64 const > const & temperature = subRegion.getField< flow::temperature >(); + + string const & solidName = subRegion.getReference< string >( viewKeyStruct::solidNamesString() ); + CoupledSolidBase & porousSolid = subRegion.template getConstitutiveModel< CoupledSolidBase >( solidName ); + + constitutive::ConstitutivePassThru< CoupledSolidBase >::execute( porousSolid, [=, &subRegion] ( auto & castedPorousSolid ) + { + typename TYPEOFREF( castedPorousSolid ) ::KernelWrapper porousWrapper = castedPorousSolid.createKernelUpdates(); + if( m_isFixedStressPoromechanicsUpdate ) + { + arrayView1d< real64 const > const & pressure_n = subRegion.getField< flow::pressure_n >(); + arrayView1d< real64 const > const & pressure_k = subRegion.getField< flow::pressure_k >(); + arrayView1d< real64 const > const & temperature_n = subRegion.getField< flow::temperature_n >(); + arrayView1d< real64 const > const & temperature_k = subRegion.getField< flow::temperature_k >(); + updatePorosityAndPermeabilityFixedStress( porousWrapper, subRegion, pressure, pressure_k, pressure_n, temperature, temperature_k, temperature_n ); + } + else + { + updatePorosityAndPermeabilityFromPressureAndTemperature( porousWrapper, subRegion, pressure, temperature ); + } + } ); +} + +void DLFlowSolverBase::updatePorosityAndPermeability( SurfaceElementSubRegion & subRegion ) const +{ + GEOS_MARK_FUNCTION; + + arrayView1d< real64 const > const & pressure = subRegion.getField< flow::pressure >(); + + arrayView1d< real64 const > const newHydraulicAperture = subRegion.getField< flow::hydraulicAperture >(); + arrayView1d< real64 const > const oldHydraulicAperture = subRegion.getField< flow::aperture0 >(); + + string const & solidName = subRegion.getReference< string >( viewKeyStruct::solidNamesString() ); + CoupledSolidBase & porousSolid = subRegion.getConstitutiveModel< CoupledSolidBase >( solidName ); + + constitutive::ConstitutivePassThru< CompressibleSolidBase >::execute( porousSolid, [=, &subRegion] ( auto & castedPorousSolid ) + { + typename TYPEOFREF( castedPorousSolid ) ::KernelWrapper porousWrapper = castedPorousSolid.createKernelUpdates(); + + updatePorosityAndPermeabilityFromPressureAndAperture( porousWrapper, subRegion, pressure, oldHydraulicAperture, newHydraulicAperture ); + + } ); +} + + +void DLFlowSolverBase::findMinMaxElevationInEquilibriumTarget( DomainPartition & domain, // cannot be const... + stdMap< string, localIndex > const & equilNameToEquilId, + arrayView1d< real64 > const & maxElevation, + arrayView1d< real64 > const & minElevation ) const +{ + array1d< real64 > localMaxElevation( equilNameToEquilId.size() ); + array1d< real64 > localMinElevation( equilNameToEquilId.size() ); + localMaxElevation.setValues< parallelHostPolicy >( -1e99 ); + localMinElevation.setValues< parallelHostPolicy >( 1e99 ); + + FieldSpecificationManager & fsManager = FieldSpecificationManager::getInstance(); + + fsManager.apply< ElementSubRegionBase, + EquilibriumInitialCondition >( 0.0, + domain.getMeshBody( 0 ).getMeshLevel( m_discretizationName ), + EquilibriumInitialCondition::catalogName(), + [&] ( EquilibriumInitialCondition const & fs, + string const &, + SortedArrayView< localIndex const > const & targetSet, + ElementSubRegionBase & subRegion, + string const & ) + { + RAJA::ReduceMax< parallelDeviceReduce, real64 > targetSetMaxElevation( -1e99 ); + RAJA::ReduceMin< parallelDeviceReduce, real64 > targetSetMinElevation( 1e99 ); + + arrayView2d< real64 const > const elemCenter = subRegion.getElementCenter(); + + // TODO: move to FlowSolverBaseKernels to make this function "protected" + forAll< parallelDevicePolicy<> >( targetSet.size(), [=] GEOS_HOST_DEVICE ( localIndex const i ) + { + localIndex const k = targetSet[i]; + targetSetMaxElevation.max( elemCenter[k][2] ); + targetSetMinElevation.min( elemCenter[k][2] ); + } ); + + localIndex const equilIndex = equilNameToEquilId.at( fs.getName() ); + localMaxElevation[equilIndex] = LvArray::math::max( targetSetMaxElevation.get(), localMaxElevation[equilIndex] ); + localMinElevation[equilIndex] = LvArray::math::min( targetSetMinElevation.get(), localMinElevation[equilIndex] ); + + } ); + + MpiWrapper::allReduce( localMaxElevation.toView(), + maxElevation, + MpiWrapper::Reduction::Max, + MPI_COMM_GEOS ); + MpiWrapper::allReduce( localMinElevation.toView(), + minElevation, + MpiWrapper::Reduction::Min, + MPI_COMM_GEOS ); +} + +void DLFlowSolverBase::computeSourceFluxSizeScalingFactor( real64 const & time, + real64 const & dt, + DomainPartition & domain, // cannot be const... + stdMap< string, localIndex > const & bcNameToBcId, + arrayView1d< globalIndex > const & bcAllSetsSize ) const +{ + FieldSpecificationManager & fsManager = FieldSpecificationManager::getInstance(); + + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, + MeshLevel & mesh, + string_array const & ) + { + fsManager.apply< ElementSubRegionBase, + SourceFluxBoundaryCondition >( time + dt, + mesh, + SourceFluxBoundaryCondition::catalogName(), + [&]( SourceFluxBoundaryCondition const & fs, + string const &, + SortedArrayView< localIndex const > const & targetSet, + ElementSubRegionBase & subRegion, + string const & ) + { + arrayView1d< integer const > const ghostRank = subRegion.ghostRank(); + + // TODO: move to FlowSolverBaseKernels to make this function "protected" + // loop over all the elements of this target set + RAJA::ReduceSum< ReducePolicy< parallelDevicePolicy<> >, localIndex > localSetSize( 0 ); + forAll< parallelDevicePolicy<> >( targetSet.size(), + [targetSet, ghostRank, localSetSize] GEOS_HOST_DEVICE ( localIndex const k ) + { + localIndex const ei = targetSet[k]; + if( ghostRank[ei] < 0 ) + { + localSetSize += 1; + } + } ); + + // increment the set size for this source flux boundary conditions + bcAllSetsSize[bcNameToBcId.at( fs.getName())] += localSetSize.get(); + } ); + } ); + + // synchronize the set size over all the MPI ranks + MpiWrapper::allReduce( bcAllSetsSize, + bcAllSetsSize, + MpiWrapper::Reduction::Sum, + MPI_COMM_GEOS ); +} + +void DLFlowSolverBase::initializeSharedMemories() +{ + GEOS_MARK_FUNCTION; + GEOS_LOG_LEVEL_RANK_0(logInfo::NonlinearSolver, + GEOS_FMT("Initializing DLFlowSolverBase Shared Memory: name='{}'", m_mem_name_meta_data)); + + m_shared_metadata = m_sharedMemoryManager.initializeASharedMemory(m_mem_name_meta_data, sizeof(SharedMetadataStruct)); + + m_sem_python_output_ready = m_sharedMemoryManager.openASemaphore(m_st_semaphore_python_ready); + m_sem_cpp_input_ready = m_sharedMemoryManager.openASemaphore(m_st_semaphore_cpp_ready); +} + +void DLFlowSolverBase::shareDLModelInputs(real64 const &time_n, + real64 const &dt, + integer const cycleNumber, + DomainPartition &domain) +{ + GEOS_UNUSED_VAR( time_n, dt, cycleNumber, domain ); + + // Updating shared memory with (distributed) solution values (for all ranks) + arrayView1d m_solution_values = m_solution.values(); + arrayView1d m_dofXCoords_values = m_dofXCoords.values(); + arrayView1d m_dofYCoords_values = m_dofYCoords.values(); + arrayView1d m_dofZCoords_values = m_dofZCoords.values(); + arrayView1d m_strainTrace_values = m_strainTrace.values(); + arrayView1d m_prevSolution_values = m_prevSolution.values(); + + globalIndex lowerbound = m_solution.ilower(); + globalIndex upperbound = m_solution.iupper(); + + for (int i = lowerbound; i < upperbound; i++) + { + m_shared_metadata->m_shared_solution[i] = m_solution_values[i-lowerbound]; + m_shared_metadata->m_shared_dofXCoords[i] = m_dofXCoords_values[i-lowerbound]; + m_shared_metadata->m_shared_dofYCoords[i] = m_dofYCoords_values[i-lowerbound]; + m_shared_metadata->m_shared_dofZCoords[i] = m_dofZCoords_values[i-lowerbound]; + m_shared_metadata->m_shared_strainTrace[i] = m_strainTrace_values[i-lowerbound]; + m_shared_metadata->m_shared_prevSolution[i] = m_prevSolution_values[i-lowerbound]; + } + + + // Updating shared memory with (common) solution values (for rank 0) + if( MpiWrapper::commRank( MPI_COMM_GEOS ) == 0 ) + { + std::cout << "Updating Shared Data" << std::endl; + m_shared_metadata->m_t_time_n = time_n; + m_shared_metadata->m_t_stepDt = dt; + m_shared_metadata->m_t_step_no = cycleNumber; + // print testing shared data + std::cout << "DLFlowSolverBase::shareDLModelInputs " << cycleNumber << " started" << std::endl; + } + + // sync with all other ranks + MPI_Barrier( MPI_COMM_GEOS ); + + if( MpiWrapper::commRank( MPI_COMM_GEOS ) == 0 ) + { + // Signal Python to read the input array (only rank 0 will signal) + sem_post(m_sem_cpp_input_ready); + std::cout << "Python Notified to start" << std::endl; + } +} + +void DLFlowSolverBase::readDLModelOutputs(real64 const &time_n, + real64 const &dt, + integer const cycleNumber, + DomainPartition &domain) +{ + GEOS_UNUSED_VAR( time_n, dt, cycleNumber, domain ); + + // Wait for Python to signal that output array is ready (only rank 0 will wait) + if( MpiWrapper::commRank( MPI_COMM_GEOS ) == 0 ) + { + std::cout << "DLFlowSolverBase::readDLModelOutputs " << cycleNumber << " started" << std::endl; + sem_wait(m_sem_python_output_ready); + std::cout << "Python Finished updating response" << std::endl; + } + + // sync with all other ranks + MPI_Barrier( MPI_COMM_GEOS ); + + // Reading shared memory with (distributed) solution values (for all ranks) + arrayView1d m_solution_values = m_solution.open(); + + globalIndex lowerbound = m_solution.ilower(); + globalIndex upperbound = m_solution.iupper(); + + for (int i = lowerbound; i < upperbound; i++) + { + m_solution_values[i-lowerbound] = m_shared_metadata->m_shared_solution[i]; + } + + m_solution.close(); + + // reading shared memory with (common) solution values (for rank 0) + if( MpiWrapper::commRank( MPI_COMM_GEOS ) == 0 ) + { + // print updated shared data + std::cout << "m_t_time_n: " << m_shared_metadata->m_t_time_n << std::endl; + std::cout << "m_t_stepDt: " << m_shared_metadata->m_t_stepDt << std::endl; + std::cout << "m_t_step_no: " << m_shared_metadata->m_t_step_no << std::endl; + } + + +} + +void DLFlowSolverBase::saveAquiferConvergedState( real64 const & time, + real64 const & dt, + DomainPartition & domain ) +{ + GEOS_MARK_FUNCTION; + + FieldSpecificationManager & fsManager = FieldSpecificationManager::getInstance(); + MeshLevel & mesh = domain.getMeshBody( 0 ).getBaseDiscretization(); + + NumericalMethodsManager const & numericalMethodManager = domain.getNumericalMethodManager(); + FiniteVolumeManager const & fvManager = numericalMethodManager.getFiniteVolumeManager(); + FluxApproximationBase const & fluxApprox = fvManager.getFluxApproximation( m_discretizationName ); + ElementRegionManager const & elemManager = mesh.getElemManager(); + + // This step requires three passes: + // - First we count the number of individual aquifers + // - Second we loop over all the stencil entries to compute the sum of aquifer influxes + // - Third we loop over the aquifers to save the sums of each individual aquifer + + // Step 1: count individual aquifers + + stdMap< string, localIndex > aquiferNameToAquiferId; + localIndex aquiferCounter = 0; + + fsManager.forSubGroups< AquiferBoundaryCondition >( [&] ( AquiferBoundaryCondition const & bc ) + { + aquiferNameToAquiferId.insert( {bc.getName(), aquiferCounter} ); + aquiferCounter++; + } ); + + // Step 2: sum the aquifer fluxes for each individual aquifer + + array1d< real64 > globalSumFluxes( aquiferNameToAquiferId.size() ); + array1d< real64 > localSumFluxes( aquiferNameToAquiferId.size() ); + + fsManager.apply< FaceManager, AquiferBoundaryCondition >( time + dt, + mesh, + AquiferBoundaryCondition::catalogName(), + [&] ( AquiferBoundaryCondition const & bc, + string const & setName, + SortedArrayView< localIndex const > const &, + FaceManager &, + string const & ) + { + BoundaryStencil const & stencil = fluxApprox.getStencil< BoundaryStencil >( mesh, setName ); + if( stencil.size() == 0 ) + { + return; + } + + AquiferBoundaryCondition::KernelWrapper aquiferBCWrapper = bc.createKernelWrapper(); + + ElementRegionManager::ElementViewAccessor< arrayView1d< real64 const > > pressure = + elemManager.constructFieldAccessor< flow::pressure >(); + pressure.setName( getName() + "/accessors/" + flow::pressure::key() ); + + ElementRegionManager::ElementViewAccessor< arrayView1d< real64 const > > pressure_n = + elemManager.constructFieldAccessor< flow::pressure_n >(); + pressure_n.setName( getName() + "/accessors/" + flow::pressure_n::key() ); + + ElementRegionManager::ElementViewAccessor< arrayView1d< real64 const > > gravCoef = + elemManager.constructFieldAccessor< flow::gravityCoefficient >(); + gravCoef.setName( getName() + "/accessors/" + flow::gravityCoefficient::key() ); + + real64 const targetSetSumFluxes = sumAquiferFluxes( stencil, + aquiferBCWrapper, + pressure.toNestedViewConst(), + pressure_n.toNestedViewConst(), + gravCoef.toNestedViewConst(), + time, + dt ); + + localIndex const aquiferIndex = aquiferNameToAquiferId.at( bc.getName() ); + localSumFluxes[aquiferIndex] += targetSetSumFluxes; + } ); + + MpiWrapper::allReduce( localSumFluxes, + globalSumFluxes, + MpiWrapper::Reduction::Sum, + MPI_COMM_GEOS ); + + // Step 3: we are ready to save the summed fluxes for each individual aquifer + + fsManager.forSubGroups< AquiferBoundaryCondition >( [&] ( AquiferBoundaryCondition & bc ) + { + localIndex const aquiferIndex = aquiferNameToAquiferId.at( bc.getName() ); + + GEOS_LOG_LEVEL_RANK_0_ON_GROUP( logInfo::BoundaryConditions, + GEOS_FMT( "{} {}: at time {} s, the boundary condition produces a volume of {} m3.", + bc.getCatalogName(), bc.getName(), + time + dt, dt * globalSumFluxes[aquiferIndex] ), + bc ); + bc.saveConvergedState( dt * globalSumFluxes[aquiferIndex] ); + } ); +} + +/** + * @brief Function to sum the aquiferBC fluxes (as later save them) at the end of the time step + * This function is applicable for both single-phase and multiphase flow + */ +real64 +DLFlowSolverBase::sumAquiferFluxes( BoundaryStencil const & stencil, + AquiferBoundaryCondition::KernelWrapper const & aquiferBCWrapper, + ElementViewConst< arrayView1d< real64 const > > const & pres, + ElementViewConst< arrayView1d< real64 const > > const & presOld, + ElementViewConst< arrayView1d< real64 const > > const & gravCoef, + real64 const & timeAtBeginningOfStep, + real64 const & dt ) +{ + using Order = BoundaryStencil::Order; + + BoundaryStencil::IndexContainerViewConstType const & seri = stencil.getElementRegionIndices(); + BoundaryStencil::IndexContainerViewConstType const & sesri = stencil.getElementSubRegionIndices(); + BoundaryStencil::IndexContainerViewConstType const & sefi = stencil.getElementIndices(); + BoundaryStencil::WeightContainerViewConstType const & weight = stencil.getWeights(); + + RAJA::ReduceSum< parallelDeviceReduce, real64 > targetSetSumFluxes( 0.0 ); + + forAll< parallelDevicePolicy<> >( stencil.size(), [=] GEOS_HOST_DEVICE ( localIndex const iconn ) + { + localIndex const er = seri( iconn, Order::ELEM ); + localIndex const esr = sesri( iconn, Order::ELEM ); + localIndex const ei = sefi( iconn, Order::ELEM ); + real64 const areaFraction = weight( iconn, Order::ELEM ); + + // compute the aquifer influx rate using the pressure influence function and the aquifer props + real64 dAquiferVolFlux_dPres = 0.0; + real64 const aquiferVolFlux = aquiferBCWrapper.compute( timeAtBeginningOfStep, + dt, + pres[er][esr][ei], + presOld[er][esr][ei], + gravCoef[er][esr][ei], + areaFraction, + dAquiferVolFlux_dPres ); + targetSetSumFluxes += aquiferVolFlux; + } ); + return targetSetSumFluxes.get(); +} + +void DLFlowSolverBase::prepareStencilWeights( DomainPartition & domain ) const +{ + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, + MeshLevel & mesh, + string_array const & ) + { + NumericalMethodsManager const & numericalMethodManager = domain.getNumericalMethodManager(); + FiniteVolumeManager const & fvManager = numericalMethodManager.getFiniteVolumeManager(); + FluxApproximationBase const & fluxApprox = fvManager.getFluxApproximation( getDiscretizationName() ); + ElementRegionManager::ElementViewAccessor< arrayView1d< real64 const > > hydraulicAperture = + m_isLaggingFractureStencilWeightsUpdate ? + mesh.getElemManager().constructViewAccessor< array1d< real64 >, arrayView1d< real64 const > >( flow::aperture0::key() ) : + mesh.getElemManager().constructViewAccessor< array1d< real64 >, arrayView1d< real64 const > >( flow::hydraulicAperture::key() ); + + fluxApprox.forStencils< SurfaceElementStencil, FaceElementToCellStencil, EmbeddedSurfaceToCellStencil >( mesh, [&]( auto & stencil ) + { + using STENCILWRAPPER_TYPE = typename TYPEOFREF( stencil ) ::KernelWrapper; + + STENCILWRAPPER_TYPE stencilWrapper = stencil.createKernelWrapper(); + + flowSolverBaseKernels::stencilWeightsUpdateKernel< STENCILWRAPPER_TYPE >::prepareStencilWeights( stencilWrapper, hydraulicAperture.toNestedViewConst() ); + } ); + } ); +} + +void DLFlowSolverBase::updateStencilWeights( DomainPartition & domain ) const +{ + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, + MeshLevel & mesh, + string_array const & ) + { + NumericalMethodsManager const & numericalMethodManager = domain.getNumericalMethodManager(); + FiniteVolumeManager const & fvManager = numericalMethodManager.getFiniteVolumeManager(); + FluxApproximationBase const & fluxApprox = fvManager.getFluxApproximation( getDiscretizationName() ); + ElementRegionManager::ElementViewAccessor< arrayView1d< real64 const > > hydraulicAperture = + mesh.getElemManager().constructViewAccessor< array1d< real64 >, arrayView1d< real64 const > >( flow::hydraulicAperture::key() ); + + fluxApprox.forStencils< SurfaceElementStencil, FaceElementToCellStencil, EmbeddedSurfaceToCellStencil >( mesh, [&]( auto & stencil ) + { + using STENCILWRAPPER_TYPE = typename TYPEOFREF( stencil ) ::KernelWrapper; + + STENCILWRAPPER_TYPE stencilWrapper = stencil.createKernelWrapper(); + + flowSolverBaseKernels::stencilWeightsUpdateKernel< STENCILWRAPPER_TYPE >::updateStencilWeights( stencilWrapper, hydraulicAperture.toNestedViewConst() ); + } ); + } ); +} + +bool DLFlowSolverBase::checkSequentialSolutionIncrements( DomainPartition & GEOS_UNUSED_PARAM( domain ) ) const +{ + + GEOS_LOG_LEVEL_RANK_0( logInfo::Convergence, + GEOS_FMT( " {}: Max pressure change during outer iteration: {} Pa", + getName(), GEOS_FMT( "{:.{}f}", m_sequentialPresChange, 3 ) ) ); + + if( m_isThermal ) + { + GEOS_LOG_LEVEL_RANK_0( logInfo::Convergence, + GEOS_FMT( " {}: Max temperature change during outer iteration: {} K", + getName(), GEOS_FMT( "{:.{}f}", m_sequentialTempChange, 3 ) ) ); + } + + return (m_sequentialPresChange < m_maxSequentialPresChange) && (m_sequentialTempChange < m_maxSequentialTempChange); +} + +string DLFlowSolverBase::BCMessage::generateMessage( string_view baseMessage, + string_view fieldName, string_view setName ) +{ + return GEOS_FMT( "{}\nCheck if you have added or applied the appropriate fields to " + "the FieldSpecification component with fieldName=\"{}\" " + "and setNames=\"{}\"\n", baseMessage, fieldName, setName ); +} + +string DLFlowSolverBase::BCMessage::pressureConflict( string_view regionName, string_view subRegionName, + string_view setName, string_view fieldName ) +{ + return generateMessage( GEOS_FMT( "Conflicting pressure boundary conditions on set {}/{}/{}", + regionName, subRegionName, setName ), fieldName, setName ); +} + +string DLFlowSolverBase::BCMessage::temperatureConflict( string_view regionName, string_view subRegionName, + string_view setName, string_view fieldName ) +{ + return generateMessage( GEOS_FMT( "Conflicting temperature boundary conditions on set {}/{}/{}", + regionName, subRegionName, setName ), fieldName, setName ); +} + +string DLFlowSolverBase::BCMessage::missingPressure( string_view regionName, string_view subRegionName, + string_view setName, string_view fieldName ) +{ + return generateMessage( GEOS_FMT( "Pressure boundary condition not prescribed on set {}/{}/{}", + regionName, subRegionName, setName ), fieldName, setName ); +} + +string DLFlowSolverBase::BCMessage::missingTemperature( string_view regionName, string_view subRegionName, + string_view setName, string_view fieldName ) +{ + return generateMessage( GEOS_FMT( "Temperature boundary condition not prescribed on set {}/{}/{}", + regionName, subRegionName, setName ), fieldName, setName ); +} + +string DLFlowSolverBase::BCMessage::conflictingComposition( int comp, string_view componentName, + string_view regionName, string_view subRegionName, + string_view setName, string_view fieldName ) +{ + return generateMessage( GEOS_FMT( "Conflicting {} composition (no.{}) for boundary conditions on set {}/{}/{}", + componentName, comp, regionName, subRegionName, setName ), + fieldName, setName ); +} + +string DLFlowSolverBase::BCMessage::invalidComponentIndex( int comp, + string_view fsName, + string_view fieldName ) +{ + return generateMessage( GEOS_FMT( "Invalid component index no.{} in boundary condition {}", + comp, fsName ), fieldName, fsName ); +} + +string DLFlowSolverBase::BCMessage::notAppliedOnRegion( int componentIndex, string_view componentName, + string_view regionName, string_view subRegionName, + string_view setName, string_view fieldName ) +{ + return generateMessage( GEOS_FMT( "Boundary condition not applied to {} component (no.{})" + "on region {}/{}/{}\n", + componentName, componentIndex, regionName, subRegionName, setName ), + fieldName, setName ); +} + +} // namespace geos diff --git a/src/coreComponents/physicsSolvers/fluidFlow/DLFlowSolverBase.hpp b/src/coreComponents/physicsSolvers/fluidFlow/DLFlowSolverBase.hpp new file mode 100644 index 00000000000..0fb4b4f4bb0 --- /dev/null +++ b/src/coreComponents/physicsSolvers/fluidFlow/DLFlowSolverBase.hpp @@ -0,0 +1,372 @@ +/* + * ------------------------------------------------------------------------------------------------------------ + * SPDX-License-Identifier: LGPL-2.1-only + * + * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC + * Copyright (c) 2018-2024 TotalEnergies + * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University + * Copyright (c) 2023-2024 Chevron + * Copyright (c) 2019- GEOS/GEOSX Contributors + * All rights reserved + * + * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details. + * ------------------------------------------------------------------------------------------------------------ + */ + +/** + * @file DLFlowSolverBase.hpp + */ + +#ifndef GEOS_PHYSICSSOLVERS_FINITEVOLUME_DLFLOWSOLVERBASE_HPP_ +#define GEOS_PHYSICSSOLVERS_FINITEVOLUME_DLFLOWSOLVERBASE_HPP_ + +#include "physicsSolvers/DLPhysicsSolverBase.hpp" +#include "common/Units.hpp" +#include "finiteVolume/BoundaryStencil.hpp" +#include "fieldSpecification/AquiferBoundaryCondition.hpp" + +// Shared memory +#include // Provides functions for memory management (e.g., mmap, munmap). +#include // Provides file control options (e.g., open, O_CREAT). +#include // Provides access to the POSIX operating system API (e.g., read, write, close, fork, exec). +#include // Provides functions for string manipulation (e.g., memcpy, memmove, memset, strlen). +#include // Provides POSIX semaphore functionality for inter-process synchronization (e.g., sem_open, sem_wait, sem_post). + +namespace geos +{ + +/** + * @class DLFlowSolverBase + * + * Base class for finite volume fluid flow solvers. + * Provides some common features + */ +class DLFlowSolverBase : public DLPhysicsSolverBase +{ + template< typename VIEWTYPE > + using ElementViewConst = ElementRegionManager::ElementViewConst< VIEWTYPE >; + +public: + + /// String used to form the solverName used to register single-physics solvers in CoupledSolver + static string coupledSolverAttributePrefix() { return "flow"; } + // Shared Memory + struct SharedMetadataStruct + { + float m_t_time_n; + float m_t_stepDt; + float m_t_step_no; + float m_shared_solution[2200]; // Example array to hold solution response data + float m_shared_dofXCoords[2200]; + float m_shared_dofYCoords[2200]; + float m_shared_dofZCoords[2200]; + float m_shared_strainTrace[2200]; + float m_shared_prevSolution[2200]; + }; + // TODO: Shared memory variables and sizes to be user defined or auto calculated according to the python server needs + // TODO: The whole logic of shared memory management to be encapsulated, with help of DLSharedMemoryManager class + // DLSharedMemoryManager is already a member of DLPhysicsSolverBase (m_sharedMemoryManager) + const std::string m_mem_name_meta_data = "/mem_name_meta_data"; + const std::string m_st_semaphore_python_ready = "/sem_name_python_ready"; + const std::string m_st_semaphore_cpp_ready = "/sem_name_cpp_ready"; + SharedMetadataStruct *m_shared_metadata; + sem_t *m_sem_python_output_ready; + sem_t *m_sem_cpp_input_ready; + + /** + * @brief main constructor for Group Objects + * @param name the name of this instantiation of Group in the repository + * @param parent the parent group of this instantiation of Group + */ + DLFlowSolverBase( const string & name, + Group * const parent ); + + + /// deleted default constructor + DLFlowSolverBase() = delete; + + /// deleted copy constructor + DLFlowSolverBase( DLFlowSolverBase const & ) = delete; + + /// default move constructor + DLFlowSolverBase( DLFlowSolverBase && ) = default; + + /// deleted assignment operator + DLFlowSolverBase & operator=( DLFlowSolverBase const & ) = delete; + + /// deleted move operator + DLFlowSolverBase & operator=( DLFlowSolverBase && ) = delete; + + virtual void registerDataOnMesh( Group & MeshBodies ) override; + + struct viewKeyStruct : DLPhysicsSolverBase::viewKeyStruct + { + // misc inputs + static constexpr char const * isThermalString() { return "isThermal"; } + static constexpr char const * inputTemperatureString() { return "temperature"; } + static constexpr char const * allowNegativePressureString() { return "allowNegativePressure"; } + static constexpr char const * maxAbsolutePresChangeString() { return "maxAbsolutePressureChange"; } + static constexpr char const * maxSequentialPresChangeString() { return "maxSequentialPressureChange"; } + static constexpr char const * maxSequentialTempChangeString() { return "maxSequentialTemperatureChange"; } + + static constexpr char const * fluidNamesString() { return "fluidNames"; } + static constexpr char const * solidNamesString() { return "solidNames"; } + static constexpr char const * permeabilityNamesString() { return "permeabilityNames"; } + static constexpr char const * solidInternalEnergyNamesString() { return "solidInternalEnergyNames"; } + static constexpr char const * thermalConductivityNamesString() { return "thermalConductivityNames"; } + }; + + /** + * @brief Prepare the stencil weights by removing the contribution of the hydraulic aperture before + * the aperture is updated + * @param[in] domain the domain partition + */ + void prepareStencilWeights( DomainPartition & domain ) const; + + /** + * @brief Update the stencil weights by adding the contribution of the hydraulic aperture after + * the aperture is updated + * @param[in] domain the domain partition + */ + void updateStencilWeights( DomainPartition & domain ) const; + + void enableFixedStressPoromechanicsUpdate() { m_isFixedStressPoromechanicsUpdate = true; } + + void enableJumpStabilization() { m_isJumpStabilized = true; } + + void updatePorosityAndPermeability( CellElementSubRegion & subRegion ) const; + + virtual void updatePorosityAndPermeability( SurfaceElementSubRegion & subRegion ) const; + + /** + * @brief Utility function to save the iteration state (useful for sequential simulations) + * @param[in] domain the domain partition + */ + virtual void saveSequentialIterationState( DomainPartition & domain ) override; + + integer & isThermal() { return m_isThermal; } + + /** + * @return The unit in which we evaluate the amount of fluid per element (Mass or Mole). + */ + virtual units::Unit getMassUnit() const { return units::Unit::Mass; } + + /** + * @brief Function to activate the flag allowing negative pressure + */ + void allowNegativePressure() { m_allowNegativePressure = 1; } + + /** + * @brief Utility function to keep the flow variables during a time step (used in poromechanics simulations) + * @param[in] keepVariablesConstantDuringInitStep flag to tell the solver to freeze its primary variables during a time step + * @detail This function is meant to be called by a specific task before/after the initialization step + */ + void setKeepVariablesConstantDuringInitStep( bool const keepVariablesConstantDuringInitStep ) + { m_keepVariablesConstantDuringInitStep = keepVariablesConstantDuringInitStep; } + + virtual bool checkSequentialSolutionIncrements( DomainPartition & domain ) const override; + + void enableLaggingFractureStencilWeightsUpdate(){ m_isLaggingFractureStencilWeightsUpdate = 1; }; + + real64 sumAquiferFluxes( BoundaryStencil const & stencil, + AquiferBoundaryCondition::KernelWrapper const & aquiferBCWrapper, + ElementViewConst< arrayView1d< real64 const > > const & pres, + ElementViewConst< arrayView1d< real64 const > > const & presOld, + ElementViewConst< arrayView1d< real64 const > > const & gravCoef, + real64 const & timeAtBeginningOfStep, + real64 const & dt ); + + /** + * @brief assembles the flux terms for all cells for the hydrofracture case + * @param time_n previous time value + * @param dt time step + * @param domain the physical domain object + * @param dofManager degree-of-freedom manager associated with the linear system + * @param localMatrix the system matrix + * @param localRhs the system right-hand side vector + * @param dR_dAper + */ + virtual void assembleHydrofracFluxTerms( real64 const time_n, + real64 const dt, + DomainPartition const & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs, + CRSMatrixView< real64, localIndex const > const & dR_dAper ) + { + GEOS_UNUSED_VAR ( time_n, dt, domain, dofManager, localMatrix, localRhs, dR_dAper ); + GEOS_ERROR( "Poroelastic fluxes with conforming fractures not yet implemented." ); + } + + void initializeState( DomainPartition & domain ); + + virtual void initializeFluidState( MeshLevel & mesh, string_array const & regionNames ) { GEOS_UNUSED_VAR( mesh, regionNames ); } + + virtual void initializeThermalState( MeshLevel & mesh, string_array const & regionNames ) { GEOS_UNUSED_VAR( mesh, regionNames ); } + + /** + * @brief For each equilibrium initial condition, loop over all the target cells and compute the min/max elevation + * @param[in] domain the domain partition + * @param[in] equilNameToEquilId the map from the name of the initial condition to the initial condition index (used in min/maxElevation) + * @param[out] maxElevation the max elevation for each initial condition + * @param[out] minElevation the min elevation for each initial condition + */ + void findMinMaxElevationInEquilibriumTarget( DomainPartition & domain, // cannot be const... + stdMap< string, localIndex > const & equilNameToEquilId, + arrayView1d< real64 > const & maxElevation, + arrayView1d< real64 > const & minElevation ) const; + + /** + * @brief For each source flux boundary condition, loop over all the target cells and sum the owned cells + * @param[in] time the time at the beginning of the time step + * @param[in] dt the time step size + * @param[in] domain the domain partition + * @param[in] bcNameToBcId the map from the name of the boundary condition to the boundary condition index + * @param[out] bcAllSetsSize the total number of owned cells for each source flux boundary condition + */ + void computeSourceFluxSizeScalingFactor( real64 const & time, + real64 const & dt, + DomainPartition & domain, // cannot be const... + stdMap< string, localIndex > const & bcNameToBcId, + arrayView1d< globalIndex > const & bcAllSetsSize ) const; + + integer numberOfDofsPerCell() const { return m_numDofPerCell; } + + virtual void initializeSharedMemories() override; + + virtual void shareDLModelInputs(real64 const &time_n, + real64 const &dt, + integer const cycleNumber, + DomainPartition &domain) override; + + virtual void readDLModelOutputs(real64 const &time_n, + real64 const &dt, + integer const cycleNumber, + DomainPartition &domain) override; + +protected: + + /** + * @brief Increment the cumulative flux from each aquifer + * @param[in] time the time at the beginning of the time step + * @param[in] dt the time step size + * @param[in] domain the domain partition + * + * For now this function is here because it can be used for both single-phase flow and multiphase flow + * This may have to be revisited when aquifer BC is implemented for hybrid FVM + */ + virtual void saveAquiferConvergedState( real64 const & time, + real64 const & dt, + DomainPartition & domain ); + + /** + * @brief Utility function to save the converged state + * @param[in] subRegion the element subRegion + */ + virtual void saveConvergedState( ElementSubRegionBase & subRegion ) const; + + /** + * @brief Helper function to compute/report the elements with small pore volumes + * @param[in] domain the domain partition + */ + virtual void validatePoreVolumes( DomainPartition const & domain ) const; + + virtual void precomputeData( MeshLevel & mesh, + string_array const & regionNames ); + + virtual void initializePreSubGroups() override; + + /** + * @brief Checks the validity of the discretization name for the FiniteVolume method (errors if issues are detected) + */ + void checkDiscretizationName() const; + + virtual void initializePostInitialConditionsPreSubGroups() override; + + virtual void computeHydrostaticEquilibrium( DomainPartition & domain ) { GEOS_UNUSED_VAR( domain ); } + + void initializePorosityAndPermeability( MeshLevel & mesh, string_array const & regionNames ); + + void initializeHydraulicAperture( MeshLevel & mesh, string_array const & regionNames ); + + void saveInitialPressureAndTemperature( MeshLevel & mesh, string_array const & regionNames ); + + virtual void setConstitutiveNamesCallSuper( ElementSubRegionBase & subRegion ) const override; + + /// the number of Degrees of Freedom per cell + integer m_numDofPerCell; + + /// flag to determine whether or not this is a thermal simulation + integer m_isThermal; + + /// the input temperature + real64 m_inputTemperature; + + /// flag to freeze the initial state during initialization in coupled problems + bool m_keepVariablesConstantDuringInitStep; + + /// enable the fixed stress poromechanics update of porosity + bool m_isFixedStressPoromechanicsUpdate; + + /// enable pressure jump stabilzation for fixed-stress poromechanics + bool m_isJumpStabilized; + + /// flag if negative pressure is allowed + integer m_allowNegativePressure; + + /// maximum (absolute) pressure change in a Newton iteration + real64 m_maxAbsolutePresChange; + + /// maximum (absolute) pressure change in a sequential iteration + real64 m_sequentialPresChange; + real64 m_maxSequentialPresChange; + + /// maximum (absolute) temperature change in a sequential iteration + real64 m_sequentialTempChange; + real64 m_maxSequentialTempChange; + + /** + * @brief Class used for displaying boundary warning message + */ + class BCMessage + { +public: + static string pressureConflict( string_view regionName, string_view subRegionName, + string_view setName, string_view fieldName ); + + static string temperatureConflict( string_view regionName, string_view subRegionName, + string_view setName, string_view fieldName ); + + static string missingPressure( string_view regionName, string_view subRegionName, + string_view setName, string_view fieldName ); + + static string missingTemperature( string_view regionName, string_view subRegionName, + string_view setName, string_view fieldName ); + + static string conflictingComposition( int comp, string_view componentName, + string_view regionName, string_view subRegionName, + string_view setName, string_view fieldName ); + + static string invalidComponentIndex( int comp, + string_view fsName, string_view fieldName ); + + static string notAppliedOnRegion( int componentIndex, string_view componentName, + string_view regionName, string_view subRegionName, + string_view setName, string_view fieldName ); +private: + static string generateMessage( string_view baseMessage, + string_view fieldName, string_view setName ); + }; + +private: + virtual void setConstitutiveNames( ElementSubRegionBase & subRegion ) const override; + + // flag to determine whether or not to apply lagging update for the fracture stencil weights + integer m_isLaggingFractureStencilWeightsUpdate; + +}; + + +} + +#endif //GEOS_PHYSICSSOLVERS_FINITEVOLUME_DLFLOWSOLVERBASE_HPP_ diff --git a/src/coreComponents/physicsSolvers/fluidFlow/DLSinglePhaseBase.cpp b/src/coreComponents/physicsSolvers/fluidFlow/DLSinglePhaseBase.cpp new file mode 100644 index 00000000000..4f1460615ae --- /dev/null +++ b/src/coreComponents/physicsSolvers/fluidFlow/DLSinglePhaseBase.cpp @@ -0,0 +1,1401 @@ +/* + * ------------------------------------------------------------------------------------------------------------ + * SPDX-License-Identifier: LGPL-2.1-only + * + * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC + * Copyright (c) 2018-2024 TotalEnergies + * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University + * Copyright (c) 2023-2024 Chevron + * Copyright (c) 2019- GEOS/GEOSX Contributors + * All rights reserved + * + * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details. + * ------------------------------------------------------------------------------------------------------------ + */ + +/** + * @file DLSinglePhaseBase.cpp + */ + +#include "DLSinglePhaseBase.hpp" + + +#include "common/DataTypes.hpp" +#include "common/TimingMacros.hpp" +#include "constitutive/fluid/singlefluid/SingleFluidBase.hpp" +#include "constitutive/fluid/singlefluid/SingleFluidFields.hpp" +#include "constitutive/fluid/singlefluid/SingleFluidSelector.hpp" +#include "constitutive/solid/SolidInternalEnergy.hpp" +#include "constitutive/thermalConductivity/SinglePhaseThermalConductivitySelector.hpp" +#include "fieldSpecification/AquiferBoundaryCondition.hpp" +#include "fieldSpecification/EquilibriumInitialCondition.hpp" +#include "fieldSpecification/FieldSpecificationManager.hpp" +#include "fieldSpecification/SourceFluxBoundaryCondition.hpp" +#include "physicsSolvers/fluidFlow/SourceFluxStatistics.hpp" +#include "functions/TableFunction.hpp" +#include "mainInterface/ProblemManager.hpp" +#include "mesh/DomainPartition.hpp" +#include "physicsSolvers/LogLevelsInfo.hpp" +#include "physicsSolvers/KernelLaunchSelectors.hpp" +#include "physicsSolvers/fluidFlow/FlowSolverBaseFields.hpp" +#include "physicsSolvers/fluidFlow/SinglePhaseBaseFields.hpp" +#include "physicsSolvers/fluidFlow/kernels/singlePhase/MobilityKernel.hpp" +#include "physicsSolvers/fluidFlow/kernels/singlePhase/SolutionCheckKernel.hpp" +#include "physicsSolvers/fluidFlow/kernels/singlePhase/SolutionScalingKernel.hpp" +#include "physicsSolvers/fluidFlow/kernels/singlePhase/StatisticsKernel.hpp" +#include "physicsSolvers/fluidFlow/kernels/singlePhase/HydrostaticPressureKernel.hpp" +#include "physicsSolvers/fluidFlow/kernels/singlePhase/ThermalHydrostaticPressureKernel.hpp" +#include "physicsSolvers/fluidFlow/kernels/singlePhase/FluidUpdateKernel.hpp" +#include "physicsSolvers/fluidFlow/kernels/singlePhase/SolidInternalEnergyUpdateKernel.hpp" + + +namespace geos +{ + +using namespace dataRepository; +using namespace constitutive; +using namespace fields; +using namespace singlePhaseBaseKernels; + +DLSinglePhaseBase::DLSinglePhaseBase( const string & name, + Group * const parent ): + DLFlowSolverBase( name, parent ) +{ + this->registerWrapper( viewKeyStruct::inputTemperatureString(), &m_inputTemperature ). + setApplyDefaultValue( 0.0 ). + setInputFlag( InputFlags::OPTIONAL ). + setDescription( "Temperature" ); + + this->getWrapper< integer >( string( viewKeyStruct::isThermalString() ) ). + appendDescription( GEOS_FMT( "\nSourceFluxes application if {} is enabled :\n" + "- negative value (injection): the mass balance equation is modified to considered the additional source term,\n" + "- positive value (production): both the mass balance and the energy balance equations are modified to considered the additional source term.\n" + "For the energy balance equation, the mass flux is multiplied by the enthalpy in the cell from which the fluid is being produced.", + viewKeyStruct::isThermalString() ) ); + + addLogLevel< logInfo::BoundaryConditions >(); +} + + +void DLSinglePhaseBase::registerDataOnMesh( Group & meshBodies ) +{ + DLFlowSolverBase::registerDataOnMesh( meshBodies ); + + m_numDofPerCell = m_isThermal ? 2 : 1; + + forDiscretizationOnMeshTargets( meshBodies, [&] ( string const &, + MeshLevel & mesh, + string_array const & regionNames ) + { + + ElementRegionManager & elemManager = mesh.getElemManager(); + + elemManager.forElementSubRegions< ElementSubRegionBase >( regionNames, + [&]( localIndex const, + ElementSubRegionBase & subRegion ) + { + subRegion.registerField< flow::mobility >( getName() ); + subRegion.registerField< flow::dMobility >( getName()).reference().resizeDimension< 1 >( m_numDofPerCell ); + + subRegion.registerField< flow::mass >( getName() ); + subRegion.registerField< flow::mass_n >( getName() ); + subRegion.registerField< flow::dMass >( getName() ).reference().resizeDimension< 1 >( m_numDofPerCell ); + + if( m_isThermal ) + { + subRegion.registerField< flow::dEnergy >( getName() ).reference().resizeDimension< 1 >( m_numDofPerCell ); + } + } ); + + elemManager.forElementSubRegions< SurfaceElementSubRegion >( regionNames, + [&]( localIndex const, + SurfaceElementSubRegion & subRegion ) + { + subRegion.registerField< flow::massCreated >( getName() ); + } ); + + FaceManager & faceManager = mesh.getFaceManager(); + { + if( m_isThermal ) + { + faceManager.registerField< flow::faceTemperature >( getName() ); + } + } + } ); +} + +void DLSinglePhaseBase::setConstitutiveNames( ElementSubRegionBase & subRegion ) const +{ + setConstitutiveName< SingleFluidBase >( subRegion, viewKeyStruct::fluidNamesString(), "DL fluid" ); + + if( m_isThermal ) + { + setConstitutiveName< SinglePhaseThermalConductivityBase >( subRegion, viewKeyStruct::thermalConductivityNamesString(), "DL thermal conductivity" ); + } +} + +void DLSinglePhaseBase::initializeAquiferBC() const +{ + FieldSpecificationManager & fsManager = FieldSpecificationManager::getInstance(); + + fsManager.forSubGroups< AquiferBoundaryCondition >( [&] ( AquiferBoundaryCondition & bc ) + { + // set the gravity vector (needed later for the potential diff calculations) + bc.setGravityVector( gravityVector() ); + } ); +} + +void DLSinglePhaseBase::validateConstitutiveModels( DomainPartition & domain ) const +{ + GEOS_MARK_FUNCTION; + + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, + MeshLevel & mesh, + string_array const & regionNames ) + { + mesh.getElemManager().forElementSubRegions( regionNames, [&]( localIndex const, + ElementSubRegionBase & subRegion ) + { + string & fluidName = subRegion.getReference< string >( viewKeyStruct::fluidNamesString() ); + SingleFluidBase const & fluid = getConstitutiveModel< SingleFluidBase >( subRegion, fluidName ); + + constitutiveUpdatePassThru( fluid, [&] ( auto & castedFluid ) + { + string const fluidModelName = castedFluid.getCatalogName(); + GEOS_THROW_IF( m_isThermal && (fluidModelName != "ThermalCompressibleSinglePhaseFluid"), + GEOS_FMT( "SingleFluidBase {}: the thermal option is enabled in the solver, but the fluid model {} is not for thermal fluid", + getDataContext(), fluid.getDataContext() ), + InputError, getDataContext(), fluid.getDataContext() ); + GEOS_THROW_IF( !m_isThermal && (fluidModelName == "ThermalCompressibleSinglePhaseFluid"), + GEOS_FMT( "SingleFluidBase {}: the fluid model is for thermal fluid {}, but the solver option is incompatible with the fluid model", + getDataContext(), fluid.getDataContext() ), + InputError, getDataContext(), fluid.getDataContext() ); + } ); + } ); + } ); +} + +void DLSinglePhaseBase::initializePreSubGroups() +{ + DLFlowSolverBase::initializePreSubGroups(); + + DomainPartition & domain = this->getGroupByPath< DomainPartition >( "/Problem/domain" ); + + // 1. Validate various models against each other (must have same phases and components) + validateConstitutiveModels( domain ); + + // 2. Set the value of temperature + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, + MeshLevel & mesh, + string_array const & regionNames ) + + { + mesh.getElemManager().forElementSubRegions( regionNames, + [&]( localIndex const, + ElementSubRegionBase & subRegion ) + { + arrayView1d< real64 > const temp = subRegion.getField< flow::temperature >(); + temp.setValues< parallelHostPolicy >( m_inputTemperature ); + } ); + } ); + + // 3. Initialize the aquifer boundary condition + initializeAquiferBC(); +} + +void DLSinglePhaseBase::updateFluidModel( ObjectManagerBase & dataGroup ) const +{ + GEOS_MARK_FUNCTION; + + arrayView1d< real64 const > const pres = dataGroup.getField< flow::pressure >(); + arrayView1d< real64 const > const temp = dataGroup.getField< flow::temperature >(); + + SingleFluidBase & fluid = + getConstitutiveModel< SingleFluidBase >( dataGroup, dataGroup.getReference< string >( viewKeyStruct::fluidNamesString() ) ); + + constitutiveUpdatePassThru( fluid, [&]( auto & castedFluid ) + { + typename TYPEOFREF( castedFluid ) ::KernelWrapper fluidWrapper = castedFluid.createKernelWrapper(); + singlePhaseBaseKernels::FluidUpdateKernel::launch( fluidWrapper, pres, temp ); + } ); +} + +void DLSinglePhaseBase::updateMass( ElementSubRegionBase & subRegion ) const +{ + GEOS_MARK_FUNCTION; + + geos::internal::kernelLaunchSelectorThermalSwitch ( m_isThermal, [&] ( auto ISTHERMAL ) + { + integer constexpr IS_THERMAL = ISTHERMAL(); + updateMass< IS_THERMAL >( subRegion ); + } ); +} + +template< integer IS_THERMAL > +void DLSinglePhaseBase::updateMass( ElementSubRegionBase & subRegion ) const +{ + using DerivOffset = constitutive::singlefluid::DerivativeOffsetC< IS_THERMAL >; + + arrayView1d< real64 > const mass = subRegion.getField< flow::mass >(); + arrayView1d< real64 > const mass_n = subRegion.getField< flow::mass_n >(); + arrayView2d< real64, constitutive::singlefluid::USD_FLUID > const dMass = subRegion.getField< flow::dMass >(); + + //START_SPHINX_INCLUDE_COUPLEDSOLID + CoupledSolidBase const & porousSolid = + getConstitutiveModel< CoupledSolidBase >( subRegion, subRegion.template getReference< string >( viewKeyStruct::solidNamesString())); + //END_SPHINX_INCLUDE_COUPLEDSOLID + arrayView2d< real64 const > const porosity = porousSolid.getPorosity(); + arrayView2d< real64 const > const dPorosity_dP = porousSolid.getDporosity_dPressure(); + arrayView2d< real64 const > const porosity_n = porousSolid.getPorosity_n(); + + arrayView1d< real64 const > const volume = subRegion.getElementVolume(); + arrayView1d< real64 const > const deltaVolume = subRegion.getField< fields::flow::deltaVolume >(); + + SingleFluidBase & fluid = + getConstitutiveModel< SingleFluidBase >( subRegion, subRegion.getReference< string >( viewKeyStruct::fluidNamesString())); + arrayView2d< real64 const, constitutive::singlefluid::USD_FLUID > const density = fluid.density(); + arrayView2d< real64 const, constitutive::singlefluid::USD_FLUID > const density_n = fluid.density_n(); + arrayView3d< real64 const, constitutive::singlefluid::USD_FLUID_DER > const dDensity = fluid.dDensity(); + + forAll< parallelDevicePolicy<> >( subRegion.size(), [=] GEOS_HOST_DEVICE ( localIndex const ei ) + { + real64 const vol = volume[ei] + deltaVolume[ei]; + mass[ei] = porosity[ei][0] * density[ei][0] * vol; + dMass[ei][DerivOffset::dP] = ( dPorosity_dP[ei][0] * density[ei][0] + porosity[ei][0] * dDensity[ei][0][DerivOffset::dP] ) * vol; + if( isZero( mass_n[ei] ) ) // this is a hack for hydrofrac cases + { + mass_n[ei] = porosity_n[ei][0] * volume[ei] * density_n[ei][0]; // initialize newly created element mass + } + } ); + + if constexpr (IS_THERMAL) + { + arrayView2d< real64 const > const dPorosity_dT = porousSolid.getDporosity_dTemperature(); + forAll< parallelDevicePolicy<> >( subRegion.size(), [=] GEOS_HOST_DEVICE ( localIndex const ei ) + { + real64 const vol = volume[ei] + deltaVolume[ei]; + dMass[ei][DerivOffset::dT] = ( dPorosity_dT[ei][0] * density[ei][0] + porosity[ei][0] * dDensity[ei][0][DerivOffset::dT] ) * vol; + } ); + } +} + +void DLSinglePhaseBase::updateEnergy( ElementSubRegionBase & subRegion ) const +{ + GEOS_MARK_FUNCTION; + + using DerivOffset = constitutive::singlefluid::DerivativeOffsetC< 1 >; + + arrayView1d< real64 > const energy = subRegion.getField< flow::energy >(); + arrayView2d< real64, constitutive::singlefluid::USD_FLUID > const dEnergy = subRegion.getField< flow::dEnergy >(); + + arrayView1d< real64 const > const volume = subRegion.getElementVolume(); + arrayView1d< real64 const > const deltaVolume = subRegion.getField< flow::deltaVolume >(); + + CoupledSolidBase const & porousSolid = + getConstitutiveModel< CoupledSolidBase >( subRegion, subRegion.template getReference< string >( viewKeyStruct::solidNamesString() ) ); + arrayView2d< real64 const > const porosity = porousSolid.getPorosity(); + arrayView2d< real64 const > const dPorosity_dP = porousSolid.getDporosity_dPressure(); + arrayView2d< real64 const > const dPorosity_dT = porousSolid.getDporosity_dTemperature(); + arrayView2d< real64 const > const rockInternalEnergy = porousSolid.getInternalEnergy(); + arrayView2d< real64 const > const dRockInternalEnergy_dT = porousSolid.getDinternalEnergy_dTemperature(); + + SingleFluidBase & fluid = + getConstitutiveModel< SingleFluidBase >( subRegion, subRegion.getReference< string >( viewKeyStruct::fluidNamesString() ) ); + arrayView2d< real64 const, constitutive::singlefluid::USD_FLUID > const density = fluid.density(); + arrayView2d< real64 const, constitutive::singlefluid::USD_FLUID > const fluidInternalEnergy = fluid.internalEnergy(); + arrayView3d< real64 const, constitutive::singlefluid::USD_FLUID_DER > const dDensity = fluid.dDensity(); + arrayView3d< real64 const, constitutive::singlefluid::USD_FLUID_DER > const dFluidInternalEnergy = fluid.dInternalEnergy(); + + forAll< parallelDevicePolicy<> >( subRegion.size(), [=] GEOS_HOST_DEVICE ( localIndex const ei ) + { + real64 const vol = volume[ei] + deltaVolume[ei]; + energy[ei] = vol * + ( porosity[ei][0] * density[ei][0] * fluidInternalEnergy[ei][0] + + ( 1.0 - porosity[ei][0] ) * rockInternalEnergy[ei][0] ); + dEnergy[ei][DerivOffset::dP] = vol * + ( dPorosity_dP[ei][0] * density[ei][0] * fluidInternalEnergy[ei][0] + + porosity[ei][0] * dDensity[ei][0][DerivOffset::dP] * fluidInternalEnergy[ei][0] + + porosity[ei][0] * density[ei][0] * dFluidInternalEnergy[ei][0][DerivOffset::dP] - + dPorosity_dP[ei][0] * rockInternalEnergy[ei][0] ); + dEnergy[ei][DerivOffset::dT] = vol * + ( dPorosity_dT[ei][0] * density[ei][0] * fluidInternalEnergy[ei][0] + + porosity[ei][0] * dDensity[ei][0][DerivOffset::dT] * fluidInternalEnergy[ei][0] + + porosity[ei][0] * density[ei][0] * dFluidInternalEnergy[ei][0][DerivOffset::dT] - + dPorosity_dT[ei][0] * rockInternalEnergy[ei][0] + + ( 1.0 - porosity[ei][0] ) * dRockInternalEnergy_dT[ei][0] ); + } ); +} + +void DLSinglePhaseBase::updateSolidInternalEnergyModel( ObjectManagerBase & dataGroup ) const +{ + arrayView1d< real64 const > const temperature = dataGroup.getField< flow::temperature >(); + + string const & solidInternalEnergyName = dataGroup.getReference< string >( viewKeyStruct::solidInternalEnergyNamesString() ); + SolidInternalEnergy & solidInternalEnergy = getConstitutiveModel< SolidInternalEnergy >( dataGroup, solidInternalEnergyName ); + + SolidInternalEnergy::KernelWrapper solidInternalEnergyWrapper = solidInternalEnergy.createKernelUpdates(); + + thermalSinglePhaseBaseKernels::SolidInternalEnergyUpdateKernel::launch< parallelDevicePolicy<> >( dataGroup.size(), solidInternalEnergyWrapper, temperature ); +} + +void DLSinglePhaseBase::updateThermalConductivity( ElementSubRegionBase & subRegion ) const +{ + string const & thermalConductivityName = subRegion.template getReference< string >( viewKeyStruct::thermalConductivityNamesString() ); + SinglePhaseThermalConductivityBase const & conductivityMaterial = + getConstitutiveModel< SinglePhaseThermalConductivityBase >( subRegion, thermalConductivityName ); + + arrayView1d< real64 const > const & temperature = subRegion.template getField< flow::temperature >(); + conductivityMaterial.updateFromTemperature( temperature ); +} + +real64 DLSinglePhaseBase::updateFluidState( ElementSubRegionBase & subRegion ) const +{ + updateFluidModel( subRegion ); + updateMass( subRegion ); + updateMobility( subRegion ); + return 0.0; +} + +void DLSinglePhaseBase::updateMobility( ObjectManagerBase & dataGroup ) const +{ + GEOS_MARK_FUNCTION; + + // output + arrayView1d< real64 > const mob = dataGroup.getField< flow::mobility >(); + arrayView2d< real64, constitutive::singlefluid::USD_FLUID > const dMobility = dataGroup.getField< flow::dMobility >(); + + // input + SingleFluidBase & fluid = + getConstitutiveModel< SingleFluidBase >( dataGroup, dataGroup.getReference< string >( viewKeyStruct::fluidNamesString() ) ); + + geos::internal::kernelLaunchSelectorThermalSwitch( m_isThermal, [&] ( auto ISTHERMAL ) + { + integer constexpr NUMDOF = ISTHERMAL() + 1; + singlePhaseBaseKernels::MobilityKernel::compute_value_and_derivatives< parallelDevicePolicy<>, NUMDOF >( dataGroup.size(), + fluid.density(), + fluid.dDensity(), + fluid.viscosity(), + fluid.dViscosity(), + mob, + dMobility ); + } ); + +} + +void DLSinglePhaseBase::initializePostInitialConditionsPreSubGroups() +{ + GEOS_MARK_FUNCTION; + + allowNegativePressure(); + + DLFlowSolverBase::initializePostInitialConditionsPreSubGroups(); + + DomainPartition & domain = this->getGroupByPath< DomainPartition >( "/Problem/domain" ); + + initializeState( domain ); +} + +void DLSinglePhaseBase::computeHydrostaticEquilibrium( DomainPartition & domain ) +{ + FieldSpecificationManager & fsManager = FieldSpecificationManager::getInstance(); + + real64 const gravVector[3] = LVARRAY_TENSOROPS_INIT_LOCAL_3( gravityVector() ); + + // Step 1: count individual equilibriums (there may be multiple ones) + + stdMap< string, localIndex > equilNameToEquilId; + localIndex equilCounter = 0; + + fsManager.forSubGroups< EquilibriumInitialCondition >( [&] ( EquilibriumInitialCondition const & bc ) + { + // collect all the equil name to idx + equilNameToEquilId.insert( {bc.getName(), equilCounter} ); + equilCounter++; + + // check that the gravity vector is aligned with the z-axis + GEOS_THROW_IF( !isZero( gravVector[0] ) || !isZero( gravVector[1] ), + getCatalogName() << " " << getDataContext() << + ": the gravity vector specified in this simulation (" << gravVector[0] << " " << gravVector[1] << " " << gravVector[2] << + ") is not aligned with the z-axis. \n" + "This is incompatible with the " << bc.getCatalogName() << " " << bc.getDataContext() << + "used in this simulation. To proceed, you can either: \n" << + " - Use a gravityVector aligned with the z-axis, such as (0.0,0.0,-9.81)\n" << + " - Remove the hydrostatic equilibrium initial condition from the XML file", + InputError, getDataContext(), bc.getDataContext() ); + + // ensure that the temperature tables are defined for thermal simulations + GEOS_THROW_IF( m_isThermal && bc.getTemperatureVsElevationTableName().empty(), + getCatalogName() << " " << bc.getDataContext() + << ": " << EquilibriumInitialCondition::viewKeyStruct::temperatureVsElevationTableNameString() + << " must be provided for a thermal simulation", + InputError, getDataContext(), bc.getDataContext() ); + + //ensure that compositions are empty + GEOS_THROW_IF( !bc.getComponentFractionVsElevationTableNames().empty(), + getCatalogName() << " " << bc.getDataContext() + << ": " << EquilibriumInitialCondition::viewKeyStruct::componentFractionVsElevationTableNamesString() + << " must not be provided for a single phase simulation.", + InputError, getDataContext(), bc.getDataContext() ); + + GEOS_THROW_IF( !bc.getComponentNames().empty(), + getCatalogName() << " " << bc.getDataContext() + << ": " << EquilibriumInitialCondition::viewKeyStruct::componentNamesString() + << " must not be provided for a single phase simulation.", + InputError, getDataContext(), bc.getDataContext() ); + } ); + + if( equilCounter == 0 ) + { + return; + } + + // Step 2: find the min elevation and the max elevation in the targetSets + array1d< real64 > globalMaxElevation( equilNameToEquilId.size() ); + array1d< real64 > globalMinElevation( equilNameToEquilId.size() ); + findMinMaxElevationInEquilibriumTarget( domain, + equilNameToEquilId, + globalMaxElevation, + globalMinElevation ); + + // Step 3: for each equil, compute a fine table with hydrostatic pressure vs elevation if the region is a target region + // first compute the region filter + std::set< string > regionFilter; + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, + MeshLevel &, + string_array const & regionNames ) + { + for( string const & regionName : regionNames ) + { + regionFilter.insert( regionName ); + } + } ); + + // then start the actual table construction + fsManager.apply< ElementSubRegionBase, + EquilibriumInitialCondition >( 0.0, + domain.getMeshBody( 0 ).getBaseDiscretization(), + EquilibriumInitialCondition::catalogName(), + [&] ( EquilibriumInitialCondition const & fs, + string const &, + SortedArrayView< localIndex const > const & targetSet, + ElementSubRegionBase & subRegion, + string const & ) + { + // Step 3.1: retrieve the data necessary to construct the pressure table in this subregion + + integer const maxNumEquilIterations = fs.getMaxNumEquilibrationIterations(); + real64 const equilTolerance = fs.getEquilibrationTolerance(); + real64 const datumElevation = fs.getDatumElevation(); + real64 const datumPressure = fs.getDatumPressure(); + + localIndex const equilIndex = equilNameToEquilId.at( fs.getName() ); + real64 const minElevation = LvArray::math::min( globalMinElevation[equilIndex], datumElevation ); + real64 const maxElevation = LvArray::math::max( globalMaxElevation[equilIndex], datumElevation ); + real64 const elevationIncrement = LvArray::math::min( fs.getElevationIncrement(), maxElevation - minElevation ); + localIndex const numPointsInTable = ( elevationIncrement > 0 ) ? std::ceil( (maxElevation - minElevation) / elevationIncrement ) + 1 : 1; + + real64 const eps = 0.1 * (maxElevation - minElevation); // we add a small buffer to only log in the pathological cases + GEOS_LOG_RANK_0_IF( ( (datumElevation > globalMaxElevation[equilIndex]+eps) || (datumElevation < globalMinElevation[equilIndex]-eps) ), + getCatalogName() << " " << getDataContext() << + ": By looking at the elevation of the cell centers in this model, GEOS found that " << + "the min elevation is " << globalMinElevation[equilIndex] << " and the max elevation is " << + globalMaxElevation[equilIndex] << "\nBut, a datum elevation of " << datumElevation << + " was specified in the input file to equilibrate the model.\n " << + "The simulation is going to proceed with this out-of-bound datum elevation," << + " but the initial condition may be inaccurate." ); + + array1d< array1d< real64 > > elevationValues; + array1d< real64 > pressureValues; + elevationValues.resize( 1 ); + elevationValues[0].resize( numPointsInTable ); + pressureValues.resize( numPointsInTable ); + + // Step 3.2: retrieve the fluid model to compute densities + // we end up with the same issue as in applyDirichletBC: there is not a clean way to retrieve the fluid info + + FunctionManager & functionManager = FunctionManager::getInstance(); + TableFunction::KernelWrapper tempTableWrapper; + // Creation of Wrapper in case of TemperatureVsElevationTableName + if( m_isThermal ) + { + string const tempTableName = fs.getTemperatureVsElevationTableName(); + TableFunction const & tempTable = functionManager.getGroup< TableFunction >( tempTableName ); + tempTableWrapper = tempTable.createKernelWrapper(); + } + + // filter out region not in target + Group const & region = subRegion.getParent().getParent(); + auto it = regionFilter.find( region.getName() ); + if( it == regionFilter.end() ) + { + return; // the region is not in target, there is nothing to do + } + + string const & fluidName = subRegion.getReference< string >( viewKeyStruct::fluidNamesString()); + + // filter out the proppant fluid constitutive models + ConstitutiveBase & fluid = getConstitutiveModel( subRegion, fluidName ); + if( !dynamicCast< SingleFluidBase * >( &fluid ) ) + { + return; + } + SingleFluidBase & singleFluid = dynamicCast< SingleFluidBase & >( fluid ); + + // Step 3.3: compute the hydrostatic pressure values + + constitutiveUpdatePassThru( singleFluid, [&] ( auto & castedFluid ) + { + using FluidType = TYPEOFREF( castedFluid ); + typename FluidType::KernelWrapper fluidWrapper = castedFluid.createKernelWrapper(); + + // note: inside this kernel, serialPolicy is used, and elevation/pressure values don't go to the GPU + bool const equilHasConverged = m_isThermal ? + thermalSinglePhaseBaseKernels:: + HydrostaticPressureKernel::launch( numPointsInTable, + maxNumEquilIterations, + equilTolerance, + gravVector, + minElevation, + elevationIncrement, + datumElevation, + datumPressure, + fluidWrapper, + tempTableWrapper, + elevationValues.toNestedView(), + pressureValues.toView() ) : + singlePhaseBaseKernels:: + HydrostaticPressureKernel::launch( numPointsInTable, + maxNumEquilIterations, + equilTolerance, + gravVector, + minElevation, + elevationIncrement, + datumElevation, + datumPressure, + fluidWrapper, + elevationValues.toNestedView(), + pressureValues.toView() ); + + GEOS_THROW_IF( !equilHasConverged, + getCatalogName() << " " << getDataContext() << + ": hydrostatic pressure initialization failed to converge in region " << region.getName() << "!", + std::runtime_error, getDataContext() ); + } ); + + // Step 3.4: create hydrostatic pressure table + + string const tableName = fs.getName() + "_" + subRegion.getName() + "_table"; + TableFunction * const presTable = dynamicCast< TableFunction * >( functionManager.createChild( TableFunction::catalogName(), tableName ) ); + presTable->setTableCoordinates( elevationValues, { units::Distance } ); + presTable->setTableValues( pressureValues, units::Pressure ); + presTable->setInterpolationMethod( TableFunction::InterpolationType::Linear ); + TableFunction::KernelWrapper presTableWrapper = presTable->createKernelWrapper(); + + // Step 4: assign pressure as a function of elevation + // TODO: this last step should probably be delayed to wait for the creation of FaceElements + arrayView2d< real64 const > const elemCenter = subRegion.getElementCenter(); + arrayView1d< real64 > const pres = subRegion.getField< flow::pressure >(); + arrayView1d< real64 > const temp = subRegion.getField< flow::temperature >(); + + + RAJA::ReduceMin< parallelHostReduce, real64 > minPressure( LvArray::NumericLimits< real64 >::max ); + + forAll< parallelHostPolicy >( targetSet.size(), [=] GEOS_HOST_DEVICE ( localIndex const i ) + { + localIndex const k = targetSet[i]; + real64 const elevation = elemCenter[k][2]; + pres[k] = presTableWrapper.compute( &elevation ); + if( m_isThermal ) + { + temp[k] = tempTableWrapper.compute( &elevation ); + } + minPressure.min( pres[k] ); + } ); + + // For single phase flow, just issue a warning, because the simulation can proceed with a negative pressure + GEOS_WARNING_IF( minPressure.get() <= 0.0, + GEOS_FMT( "A negative pressure of {} Pa was found during hydrostatic initialization in region/subRegion {}/{}", + minPressure.get(), region.getName(), subRegion.getName() ) ); + } ); +} + +void DLSinglePhaseBase::initializeFluidState( MeshLevel & mesh, string_array const & regionNames ) +{ + mesh.getElemManager().forElementSubRegions< CellElementSubRegion, SurfaceElementSubRegion >( regionNames, [&]( localIndex const, + auto & subRegion ) + { + SingleFluidBase const & fluid = + getConstitutiveModel< SingleFluidBase >( subRegion, subRegion.template getReference< string >( viewKeyStruct::fluidNamesString())); + updateFluidState( subRegion ); + + // 2. save the initial density (for use in the single-phase poromechanics solver to compute the deltaBodyForce) + fluid.initializeState(); + } ); +} + +void DLSinglePhaseBase::initializeThermalState( MeshLevel & mesh, string_array const & regionNames ) +{ + mesh.getElemManager().forElementSubRegions< CellElementSubRegion, SurfaceElementSubRegion >( regionNames, [&]( localIndex const, + auto & subRegion ) + { + // initialized porosity + CoupledSolidBase const & porousSolid = + getConstitutiveModel< CoupledSolidBase >( subRegion, subRegion.template getReference< string >( viewKeyStruct::solidNamesString() ) ); + arrayView2d< real64 const > const porosity = porousSolid.getPorosity(); + + string const & thermalConductivityName = subRegion.template getReference< string >( viewKeyStruct::thermalConductivityNamesString()); + SinglePhaseThermalConductivityBase const & conductivityMaterial = + getConstitutiveModel< SinglePhaseThermalConductivityBase >( subRegion, thermalConductivityName ); + conductivityMaterial.initializeRockFluidState( porosity ); + // note that there is nothing to update here because thermal conductivity is explicit for now + + updateSolidInternalEnergyModel( subRegion ); + string const & solidInternalEnergyName = subRegion.template getReference< string >( viewKeyStruct::solidInternalEnergyNamesString()); + SolidInternalEnergy const & solidInternalEnergyMaterial = + getConstitutiveModel< SolidInternalEnergy >( subRegion, solidInternalEnergyName ); + solidInternalEnergyMaterial.saveConvergedState(); + + updateEnergy( subRegion ); + } ); +} + +void DLSinglePhaseBase::implicitStepSetup( real64 const & GEOS_UNUSED_PARAM( time_n ), + real64 const & GEOS_UNUSED_PARAM( dt ), + DomainPartition & domain ) +{ + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, + MeshLevel & mesh, + string_array const & regionNames ) + { + mesh.getElemManager().forElementSubRegions< CellElementSubRegion, SurfaceElementSubRegion >( regionNames, [&]( localIndex const, + auto & subRegion ) + { + saveConvergedState( subRegion ); + + applyDeltaVolume( subRegion ); + + // This should fix NaN density in newly created fracture elements + updatePorosityAndPermeability( subRegion ); + updateFluidState( subRegion ); + // for thermal simulations, update solid internal energy + if( m_isThermal ) + { + updateSolidInternalEnergyModel( subRegion ); + updateThermalConductivity( subRegion ); + updateEnergy( subRegion ); + } + + } ); + + mesh.getElemManager().forElementSubRegions< SurfaceElementSubRegion >( regionNames, [&]( localIndex const, + SurfaceElementSubRegion & subRegion ) + { + arrayView1d< real64 const > const aper = subRegion.getField< flow::hydraulicAperture >(); + arrayView1d< real64 > const aper0 = subRegion.getField< flow::aperture0 >(); + aper0.setValues< parallelDevicePolicy<> >( aper ); + + // Needed coz faceElems don't exist when initializing. + CoupledSolidBase const & porousSolid = + getConstitutiveModel< CoupledSolidBase >( subRegion, subRegion.getReference< string >( viewKeyStruct::solidNamesString() ) ); + porousSolid.saveConvergedState(); + + saveConvergedState( subRegion ); // necessary for a meaningful porosity update in sequential schemes + updatePorosityAndPermeability( subRegion ); + updateFluidState( subRegion ); + + // This call is required by the proppant solver, but should not be here + SingleFluidBase const & fluid = + getConstitutiveModel< SingleFluidBase >( subRegion, subRegion.getReference< string >( viewKeyStruct::fluidNamesString() ) ); + fluid.saveConvergedState(); + + } ); + + } ); +} + +void DLSinglePhaseBase::implicitStepComplete( real64 const & time, + real64 const & dt, + DomainPartition & domain ) +{ + GEOS_MARK_FUNCTION; + + // note: we have to save the aquifer state **before** updating the pressure, + // otherwise the aquifer flux is saved with the wrong pressure time level + saveAquiferConvergedState( time, dt, domain ); + + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, + MeshLevel & mesh, + string_array const & regionNames ) + { + mesh.getElemManager().forElementSubRegions( regionNames, [&]( localIndex const, + ElementSubRegionBase & subRegion ) + { + // update deltaPressure + arrayView1d< real64 const > const pres = subRegion.getField< flow::pressure >(); + arrayView1d< real64 const > const initPres = subRegion.getField< flow::initialPressure >(); + arrayView1d< real64 > const deltaPres = subRegion.getField< flow::deltaPressure >(); + singlePhaseBaseKernels::StatisticsKernel:: + saveDeltaPressure( subRegion.size(), pres, initPres, deltaPres ); + + applyDeltaVolume( subRegion ); + + SingleFluidBase const & fluid = + getConstitutiveModel< SingleFluidBase >( subRegion, subRegion.template getReference< string >( viewKeyStruct::fluidNamesString() ) ); + fluid.saveConvergedState(); + + CoupledSolidBase const & porousSolid = + getConstitutiveModel< CoupledSolidBase >( subRegion, subRegion.template getReference< string >( viewKeyStruct::solidNamesString() ) ); + if( m_keepVariablesConstantDuringInitStep ) + { + porousSolid.ignoreConvergedState(); // newPorosity <- porosity_n + } + else + { + porousSolid.saveConvergedState(); // porosity_n <- porosity + } + + if( m_isThermal ) + { + arrayView2d< real64 const > const porosity = porousSolid.getPorosity(); + + SinglePhaseThermalConductivityBase const & conductivityMaterial = + getConstitutiveModel< SinglePhaseThermalConductivityBase >( subRegion, subRegion.template getReference< string >( viewKeyStruct::thermalConductivityNamesString() ) ); + + conductivityMaterial.saveConvergedRockFluidState( porosity ); + } + + } ); + + mesh.getElemManager().forElementSubRegions< SurfaceElementSubRegion >( regionNames, [&]( localIndex const, + SurfaceElementSubRegion & subRegion ) + { + arrayView1d< integer const > const elemGhostRank = subRegion.ghostRank(); + arrayView1d< real64 const > const volume = subRegion.getElementVolume(); + arrayView1d< real64 > const creationMass = subRegion.getField< flow::massCreated >(); + + SingleFluidBase const & fluid = + getConstitutiveModel< SingleFluidBase >( subRegion, subRegion.template getReference< string >( viewKeyStruct::fluidNamesString() ) ); + arrayView2d< real64 const, constitutive::singlefluid::USD_FLUID > const density_n = fluid.density_n(); + + forAll< parallelDevicePolicy<> >( subRegion.size(), [=] GEOS_HOST_DEVICE ( localIndex const ei ) + { + if( elemGhostRank[ei] < 0 ) + { + if( volume[ei] * density_n[ei][0] > 1.1 * creationMass[ei] ) + { + creationMass[ei] *= 0.75; + if( creationMass[ei] < 1.0e-20 ) + { + creationMass[ei] = 0.0; + } + } + } + } ); + } ); + } ); +} + + +void DLSinglePhaseBase::assembleSystem( real64 const GEOS_UNUSED_PARAM( time_n ), + real64 const dt, + DomainPartition & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) +{ + GEOS_MARK_FUNCTION; + + GEOS_LOG_LEVEL_RANK_0(logInfo::NonlinearSolver, + GEOS_FMT("--CustomLog--DLSinglePhaseBase::assembleSystem dt:{} ", dt)); + + // populate the target DOF coordinates + string const presDofKey = m_dofManager.getKey( DLSinglePhaseBase::viewKeyStruct::elemDofFieldString() ); + populateDofCoords( m_dofManager, domain, presDofKey ); + //TODO: Consider Moving populateDofStrainTrace to a more suitable method like IFENNPoromechanics::mapSolutionBetweenSolvers + populateDofStrainTrace( m_dofManager, domain, presDofKey ); + //TODO: Consider Moving populateDofPrevSolution to a more suitable method + populateDofPrevSolution( m_dofManager, domain, presDofKey ); + + //TODO: Consider cancelling unused assembly features + assembleAccumulationTerms( domain, + dofManager, + localMatrix, + localRhs ); + + if( m_isJumpStabilized ) + { + assembleStabilizedFluxTerms( dt, + domain, + dofManager, + localMatrix, + localRhs ); + } + else + { + assembleFluxTerms( dt, + domain, + dofManager, + localMatrix, + localRhs ); + } +} + +void DLSinglePhaseBase::assembleAccumulationTerms( DomainPartition & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) +{ + GEOS_MARK_FUNCTION; + + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, + MeshLevel & mesh, + string_array const & regionNames ) + { + mesh.getElemManager().forElementSubRegions< CellElementSubRegion, + SurfaceElementSubRegion >( regionNames, + [&]( localIndex const, + auto & subRegion ) + { + accumulationAssemblyLaunch( dofManager, subRegion, localMatrix, localRhs ); + } ); + } ); +} + +void DLSinglePhaseBase::applyBoundaryConditions( real64 time_n, + real64 dt, + DomainPartition & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) +{ + GEOS_MARK_FUNCTION; + + if( m_keepVariablesConstantDuringInitStep ) + { + // this function is going to force the current flow state to be constant during the time step + // this is used when the poromechanics solver is performing the stress initialization + // TODO: in the future, a dedicated poromechanics kernel should eliminate the flow vars to construct a reduced system + // which will remove the need for this brittle passing aroung of flag + keepVariablesConstantDuringInitStep( time_n, dt, dofManager, domain, localMatrix.toViewConstSizes(), localRhs.toView() ); + } + else + { + applySourceFluxBC( time_n, dt, domain, dofManager, localMatrix, localRhs ); + applyDirichletBC( time_n, dt, domain, dofManager, localMatrix, localRhs ); + applyAquiferBC( time_n, dt, domain, dofManager, localMatrix, localRhs ); + } +} + +namespace +{ + +void applyAndSpecifyFieldValue( real64 const & time_n, + real64 const & dt, + MeshLevel & mesh, + globalIndex const rankOffset, + string const dofKey, + bool const, + integer const idof, + string const fieldKey, + string const boundaryFieldKey, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) +{ + FieldSpecificationManager & fsManager = FieldSpecificationManager::getInstance(); + + fsManager.apply< ElementSubRegionBase >( time_n + dt, + mesh, + fieldKey, + [&]( FieldSpecificationBase const & fs, + string const &, + SortedArrayView< localIndex const > const & lset, + ElementSubRegionBase & subRegion, + string const & ) + { + // Specify the bc value of the field + fs.applyFieldValue< FieldSpecificationEqual, + parallelDevicePolicy<> >( lset, + time_n + dt, + subRegion, + boundaryFieldKey ); + + arrayView1d< integer const > const ghostRank = subRegion.ghostRank(); + arrayView1d< globalIndex const > const dofNumber = + subRegion.getReference< array1d< globalIndex > >( dofKey ); + arrayView1d< real64 const > const bcField = + subRegion.getReference< array1d< real64 > >( boundaryFieldKey ); + arrayView1d< real64 const > const field = + subRegion.getReference< array1d< real64 > >( fieldKey ); + + forAll< parallelDevicePolicy<> >( lset.size(), [=] GEOS_HOST_DEVICE ( localIndex const a ) + { + localIndex const ei = lset[a]; + if( ghostRank[ei] >= 0 ) + { + return; + } + + globalIndex const dofIndex = dofNumber[ei]; + localIndex const localRow = dofIndex - rankOffset; + real64 rhsValue; + + // Apply field value to the matrix/rhs + FieldSpecificationEqual::SpecifyFieldValue( dofIndex + idof, + rankOffset, + localMatrix, + rhsValue, + bcField[ei], + field[ei] ); + localRhs[localRow + idof] = rhsValue; + } ); + } ); +} + +} + +void DLSinglePhaseBase::applyDirichletBC( real64 const time_n, + real64 const dt, + DomainPartition & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) const +{ + GEOS_MARK_FUNCTION; + + string const dofKey = dofManager.getKey( viewKeyStruct::elemDofFieldString() ); + globalIndex const rankOffset = dofManager.rankOffset(); + bool const isFirstNonlinearIteration = ( m_nonlinearSolverParameters.m_numNewtonIterations == 0 ); + + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, + MeshLevel & mesh, + string_array const & ) + { + applyAndSpecifyFieldValue( time_n, dt, mesh, rankOffset, dofKey, isFirstNonlinearIteration, + 0, flow::pressure::key(), flow::bcPressure::key(), + localMatrix, localRhs ); + if( m_isThermal ) + { + applyAndSpecifyFieldValue( time_n, dt, mesh, rankOffset, dofKey, isFirstNonlinearIteration, + 1, flow::temperature::key(), flow::bcTemperature::key(), + localMatrix, localRhs ); + } + } ); +} + +void DLSinglePhaseBase::applySourceFluxBC( real64 const time_n, + real64 const dt, + DomainPartition & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) const +{ + GEOS_MARK_FUNCTION; + + FieldSpecificationManager & fsManager = FieldSpecificationManager::getInstance(); + + string const dofKey = dofManager.getKey( viewKeyStruct::elemDofFieldString() ); + + // Step 1: count individual source flux boundary conditions + + stdMap< string, localIndex > bcNameToBcId; + localIndex bcCounter = 0; + + fsManager.forSubGroups< SourceFluxBoundaryCondition >( [&] ( SourceFluxBoundaryCondition const & bc ) + { + // collect all the bc names to idx + bcNameToBcId.insert( {bc.getName(), bcCounter} ); + bcCounter++; + } ); + + if( bcCounter == 0 ) + { + return; + } + + // Step 2: count the set size for each source flux (each source flux may have multiple target sets) + + array1d< globalIndex > bcAllSetsSize( bcNameToBcId.size() ); + + computeSourceFluxSizeScalingFactor( time_n, + dt, + domain, + bcNameToBcId, + bcAllSetsSize.toView() ); + + // Step 3: we are ready to impose the boundary condition, normalized by the set size + + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, + MeshLevel & mesh, + string_array const & ) + { + integer const isThermal = m_isThermal; + + fsManager.apply< ElementSubRegionBase, + SourceFluxBoundaryCondition >( time_n + dt, + mesh, + SourceFluxBoundaryCondition::catalogName(), + [&, isThermal]( SourceFluxBoundaryCondition const & fs, + string const & setName, + SortedArrayView< localIndex const > const & targetSet, + ElementSubRegionBase & subRegion, + string const & ) + { + if( targetSet.size() == 0 ) + { + return; + } + if( !subRegion.hasWrapper( dofKey ) ) + { + GEOS_LOG_LEVEL_BY_RANK_ON_GROUP( logInfo::BoundaryConditions, + GEOS_FMT( "{}: trying to apply {}, but its targetSet named '{}' intersects with non-simulated region named '{}'.", + getDataContext(), SourceFluxBoundaryCondition::catalogName(), setName, subRegion.getName() ), + fs ); + return; + } + + arrayView1d< globalIndex const > const dofNumber = subRegion.getReference< array1d< globalIndex > >( dofKey ); + arrayView1d< integer const > const ghostRank = subRegion.ghostRank(); + + // Step 3.1: get the values of the source boundary condition that need to be added to the rhs + + array1d< globalIndex > dofArray( targetSet.size() ); + array1d< real64 > rhsContributionArray( targetSet.size() ); + arrayView1d< real64 > rhsContributionArrayView = rhsContributionArray.toView(); + localIndex const rankOffset = dofManager.rankOffset(); + + RAJA::ReduceSum< parallelDeviceReduce, real64 > massProd( 0.0 ); + + // note that the dofArray will not be used after this step (simpler to use dofNumber instead) + fs.computeRhsContribution< FieldSpecificationAdd, + parallelDevicePolicy<> >( targetSet.toViewConst(), + time_n + dt, + dt, + subRegion, + dofNumber, + rankOffset, + localMatrix, + dofArray.toView(), + rhsContributionArrayView, + [] GEOS_HOST_DEVICE ( localIndex const ) + { + return 0.0; + } ); + + // Step 3.2: we are ready to add the right-hand side contributions, taking into account our equation layout + + // get the normalizer + real64 const sizeScalingFactor = bcAllSetsSize[bcNameToBcId.at( fs.getName())]; + + if( isThermal ) + { + using DerivOffset = constitutive::singlefluid::DerivativeOffsetC< 1 >; + SingleFluidBase const & fluid = + getConstitutiveModel< SingleFluidBase >( subRegion, subRegion.template getReference< string >( viewKeyStruct::fluidNamesString() ) ); + + arrayView2d< real64 const, constitutive::singlefluid::USD_FLUID > const enthalpy = fluid.enthalpy(); + arrayView3d< real64 const, constitutive::singlefluid::USD_FLUID_DER > const dEnthalpy = fluid.dEnthalpy(); + forAll< parallelDevicePolicy<> >( targetSet.size(), [sizeScalingFactor, + targetSet, + rankOffset, + ghostRank, + dofNumber, + enthalpy, + dEnthalpy, + rhsContributionArrayView, + localRhs, + localMatrix, + massProd] GEOS_HOST_DEVICE ( localIndex const a ) + { + // we need to filter out ghosts here, because targetSet may contain them + localIndex const ei = targetSet[a]; + if( ghostRank[ei] >= 0 ) + { + return; + } + + // add the value to the mass balance equation + globalIndex const massRowIndex = dofNumber[ei] - rankOffset; + globalIndex const energyRowIndex = massRowIndex + 1; + real64 const rhsValue = rhsContributionArrayView[a] / sizeScalingFactor; // scale the contribution by the sizeScalingFactor here! + localRhs[massRowIndex] += rhsValue; + massProd += rhsValue; + //add the value to the energy balance equation if the flux is positive (i.e., it's a producer) + if( rhsContributionArrayView[a] > 0.0 ) + { + globalIndex const pressureDofIndex = dofNumber[ei] - rankOffset; + globalIndex const temperatureDofIndex = pressureDofIndex + 1; + + localRhs[energyRowIndex] += enthalpy[ei][0] * rhsValue; + + globalIndex dofIndices[2]{pressureDofIndex, temperatureDofIndex}; + real64 jacobian[2]{rhsValue * dEnthalpy[ei][0][DerivOffset::dP], rhsValue * dEnthalpy[ei][0][DerivOffset::dT]}; + + localMatrix.template addToRow< serialAtomic >( energyRowIndex, + dofIndices, + jacobian, + 2 ); + } + } ); + } + else + { + forAll< parallelDevicePolicy<> >( targetSet.size(), [sizeScalingFactor, + targetSet, + rankOffset, + ghostRank, + dofNumber, + rhsContributionArrayView, + localRhs, + massProd] GEOS_HOST_DEVICE ( localIndex const a ) + { + // we need to filter out ghosts here, because targetSet may contain them + localIndex const ei = targetSet[a]; + if( ghostRank[ei] >= 0 ) + { + return; + } + + // add the value to the mass balance equation + globalIndex const rowIndex = dofNumber[ei] - rankOffset; + real64 const rhsValue = rhsContributionArrayView[a] / sizeScalingFactor; + localRhs[rowIndex] += rhsValue; + massProd += rhsValue; + } ); + } + + SourceFluxStatsAggregator::forAllFluxStatWrappers( subRegion, fs.getName(), + [&]( SourceFluxStatsAggregator::WrappedStats & wrapper ) + { + // set the new sub-region statistics for this timestep + array1d< real64 > massProdArr{ 1 }; + massProdArr[0] = massProd.get(); + wrapper.gatherTimeStepStats( time_n, dt, massProdArr.toViewConst(), targetSet.size() ); + } ); + } ); + } ); +} + +void DLSinglePhaseBase::keepVariablesConstantDuringInitStep( real64 const time, + real64 const dt, + DofManager const & dofManager, + DomainPartition & domain, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) const +{ + GEOS_MARK_FUNCTION; + + GEOS_UNUSED_VAR( time, dt ); + + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, + MeshLevel const & mesh, + string_array const & regionNames ) + { + mesh.getElemManager().forElementSubRegions( regionNames, + [&]( localIndex const, + ElementSubRegionBase const & subRegion ) + { + globalIndex const rankOffset = dofManager.rankOffset(); + string const dofKey = dofManager.getKey( viewKeyStruct::elemDofFieldString() ); + + arrayView1d< integer const > const ghostRank = subRegion.ghostRank(); + arrayView1d< globalIndex const > const dofNumber = subRegion.getReference< array1d< globalIndex > >( dofKey ); + + arrayView1d< real64 const > const pres = subRegion.getField< flow::pressure >(); + arrayView1d< real64 const > const temp = subRegion.getField< flow::temperature >(); + + integer const isThermal = m_isThermal; + forAll< parallelDevicePolicy<> >( subRegion.size(), [=] GEOS_HOST_DEVICE ( localIndex const ei ) + { + if( ghostRank[ei] >= 0 ) + { + return; + } + + globalIndex const dofIndex = dofNumber[ei]; + localIndex const localRow = dofIndex - rankOffset; + real64 rhsValue; + + // 4.1. Apply pressure value to the matrix/rhs + FieldSpecificationEqual::SpecifyFieldValue( dofIndex, + rankOffset, + localMatrix, + rhsValue, + pres[ei], // freeze the current pressure value + pres[ei] ); + localRhs[localRow] = rhsValue; + + // 4.2. Apply temperature value to the matrix/rhs + if( isThermal ) + { + FieldSpecificationEqual::SpecifyFieldValue( dofIndex + 1, + rankOffset, + localMatrix, + rhsValue, + temp[ei], // freeze the current temperature value + temp[ei] ); + localRhs[localRow + 1] = rhsValue; + } + } ); + } ); + } ); +} + + +void DLSinglePhaseBase::updateState( DomainPartition & domain ) +{ + GEOS_MARK_FUNCTION; + + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, + MeshLevel & mesh, + string_array const & regionNames ) + { + mesh.getElemManager().forElementSubRegions< CellElementSubRegion, SurfaceElementSubRegion >( regionNames, [&]( localIndex const, + auto & subRegion ) + { + updatePorosityAndPermeability( subRegion ); + updateFluidState( subRegion ); + + if( m_isThermal ) + { + updateSolidInternalEnergyModel( subRegion ); + updateEnergy( subRegion ); + } + } ); + } ); +} + +void DLSinglePhaseBase::resetStateToBeginningOfStep( DomainPartition & domain ) +{ + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, + MeshLevel & mesh, + string_array const & regionNames ) + { + mesh.getElemManager().forElementSubRegions< CellElementSubRegion, SurfaceElementSubRegion >( regionNames, [&]( localIndex const, + auto & subRegion ) + { + arrayView1d< real64 > const pres = subRegion.template getField< flow::pressure >(); + arrayView1d< real64 const > const pres_n = subRegion.template getField< flow::pressure_n >(); + pres.setValues< parallelDevicePolicy<> >( pres_n ); + + if( m_isThermal ) + { + arrayView1d< real64 > const temp = subRegion.template getField< flow::temperature >(); + arrayView1d< real64 const > const temp_n = subRegion.template getField< flow::temperature_n >(); + temp.setValues< parallelDevicePolicy<> >( temp_n ); + } + + updatePorosityAndPermeability( subRegion ); + updateFluidState( subRegion ); + + if( m_isThermal ) + { + updateSolidInternalEnergyModel( subRegion ); + updateEnergy( subRegion ); + } + } ); + } ); +} + +real64 DLSinglePhaseBase::scalingForSystemSolution( DomainPartition & domain, + DofManager const & dofManager, + arrayView1d< real64 const > const & localSolution ) +{ + GEOS_MARK_FUNCTION; + + string const dofKey = dofManager.getKey( viewKeyStruct::elemDofFieldString() ); + real64 scalingFactor = 1.0; + real64 maxDeltaPres = 0.0; + + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, + MeshLevel & mesh, + string_array const & regionNames ) + { + mesh.getElemManager().forElementSubRegions( regionNames, + [&]( localIndex const, + ElementSubRegionBase & subRegion ) + { + globalIndex const rankOffset = dofManager.rankOffset(); + arrayView1d< globalIndex const > const dofNumber = subRegion.getReference< array1d< globalIndex > >( dofKey ); + arrayView1d< integer const > const ghostRank = subRegion.ghostRank(); + + auto const subRegionData = + singlePhaseBaseKernels::SolutionScalingKernel:: + launch< parallelDevicePolicy<> >( localSolution, rankOffset, dofNumber, ghostRank, m_maxAbsolutePresChange ); + + scalingFactor = std::min( scalingFactor, subRegionData.first ); + maxDeltaPres = std::max( maxDeltaPres, subRegionData.second ); + } ); + } ); + + scalingFactor = MpiWrapper::min( scalingFactor ); + maxDeltaPres = MpiWrapper::max( maxDeltaPres ); + + GEOS_LOG_LEVEL_RANK_0( logInfo::Solution, GEOS_FMT( " {}: Max pressure change = {} Pa (before scaling)", + getName(), fmt::format( "{:.{}f}", maxDeltaPres, 3 ) ) ); + + return scalingFactor; +} + +bool DLSinglePhaseBase::checkSystemSolution( DomainPartition & domain, + DofManager const & dofManager, + arrayView1d< real64 const > const & localSolution, + real64 const scalingFactor ) +{ + GEOS_MARK_FUNCTION; + + string const dofKey = dofManager.getKey( viewKeyStruct::elemDofFieldString() ); + integer numNegativePressures = 0; + real64 minPressure = 0.0; + + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&]( string const &, + MeshLevel & mesh, + string_array const & regionNames ) + { + mesh.getElemManager().forElementSubRegions( regionNames, + [&]( localIndex const, + ElementSubRegionBase & subRegion ) + { + globalIndex const rankOffset = dofManager.rankOffset(); + arrayView1d< globalIndex const > const dofNumber = subRegion.getReference< array1d< globalIndex > >( dofKey ); + arrayView1d< integer const > const ghostRank = subRegion.ghostRank(); + arrayView1d< real64 const > const pres = subRegion.getField< flow::pressure >(); + + auto const statistics = + singlePhaseBaseKernels::SolutionCheckKernel:: + launch< parallelDevicePolicy<> >( localSolution, rankOffset, dofNumber, ghostRank, pres, scalingFactor ); + + numNegativePressures += statistics.first; + minPressure = std::min( minPressure, statistics.second ); + } ); + } ); + + numNegativePressures = MpiWrapper::sum( numNegativePressures ); + + if( numNegativePressures > 0 ) + GEOS_LOG_LEVEL_RANK_0( logInfo::Solution, GEOS_FMT( " {}: Number of negative pressure values: {}, minimum value: {} Pa", + getName(), numNegativePressures, fmt::format( "{:.{}f}", minPressure, 3 ) ) ); + + return (m_allowNegativePressure || numNegativePressures == 0) ? 1 : 0; +} + +void DLSinglePhaseBase::saveConvergedState( ElementSubRegionBase & subRegion ) const +{ + DLFlowSolverBase::saveConvergedState( subRegion ); + + arrayView1d< real64 const > const mass = subRegion.template getField< flow::mass >(); + arrayView1d< real64 > const mass_n = subRegion.template getField< flow::mass_n >(); + mass_n.setValues< parallelDevicePolicy<> >( mass ); +} + +void DLSinglePhaseBase::applyDeltaVolume( ElementSubRegionBase & subRegion ) const +{ + arrayView1d< real64 > const dVol = subRegion.template getField< flow::deltaVolume >(); + arrayView1d< real64 > const vol = subRegion.template getReference< array1d< real64 > >( CellElementSubRegion::viewKeyStruct::elementVolumeString()); + forAll< parallelDevicePolicy<> >( subRegion.size(), [=] GEOS_HOST_DEVICE ( localIndex const ei ) + { + vol[ei] += dVol[ei]; + dVol[ei] = 0.0; + } ); +} + +} /* namespace geos */ diff --git a/src/coreComponents/physicsSolvers/fluidFlow/DLSinglePhaseBase.hpp b/src/coreComponents/physicsSolvers/fluidFlow/DLSinglePhaseBase.hpp new file mode 100644 index 00000000000..6e3cb03e5f1 --- /dev/null +++ b/src/coreComponents/physicsSolvers/fluidFlow/DLSinglePhaseBase.hpp @@ -0,0 +1,424 @@ +/* + * ------------------------------------------------------------------------------------------------------------ + * SPDX-License-Identifier: LGPL-2.1-only + * + * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC + * Copyright (c) 2018-2024 TotalEnergies + * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University + * Copyright (c) 2023-2024 Chevron + * Copyright (c) 2019- GEOS/GEOSX Contributors + * All rights reserved + * + * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details. + * ------------------------------------------------------------------------------------------------------------ + */ + +/** + * @file DLSinglePhaseBase.hpp + */ + +#ifndef GEOS_PHYSICSSOLVERS_FLUIDFLOW_DLSINGLEPHASEBASE_HPP_ +#define GEOS_PHYSICSSOLVERS_FLUIDFLOW_DLSINGLEPHASEBASE_HPP_ + +#include "physicsSolvers/fluidFlow/DLFlowSolverBase.hpp" +#include "physicsSolvers/fluidFlow/kernels/singlePhase/AccumulationKernels.hpp" +#include "physicsSolvers/fluidFlow/kernels/singlePhase/ThermalAccumulationKernels.hpp" +#include "constitutive/ConstitutiveBase.hpp" +#include "constitutive/fluid/singlefluid/SingleFluidLayouts.hpp" +#include "constitutive/fluid/singlefluid/SingleFluidUtils.hpp" + + +namespace geos +{ + +namespace constitutive +{ + +class ConstitutiveBase; + +} + +/** + * @class DLSinglePhaseBase + * + * base class to perform a single phase finite volume solve. + */ +class DLSinglePhaseBase : public DLFlowSolverBase +{ +public: + using SingleFluidProp = constitutive::SingleFluidVar< real64, 2, constitutive::singlefluid::LAYOUT_FLUID, constitutive::singlefluid::LAYOUT_FLUID_DER >; + + /** + * @brief main constructor for Group Objects + * @param name the name of this instantiation of Group in the repository + * @param parent the parent group of this instantiation of Group + */ + DLSinglePhaseBase( const string & name, + Group * const parent ); + + + /// deleted default constructor + DLSinglePhaseBase() = delete; + + /// deleted copy constructor + DLSinglePhaseBase( DLSinglePhaseBase const & ) = delete; + + /// default move constructor + DLSinglePhaseBase( DLSinglePhaseBase && ) = default; + + /// deleted assignment operator + DLSinglePhaseBase & operator=( DLSinglePhaseBase const & ) = delete; + + /// deleted move operator + DLSinglePhaseBase & operator=( DLSinglePhaseBase && ) = delete; + + /** + * @brief default destructor + */ + virtual ~DLSinglePhaseBase() override + {} + + virtual void registerDataOnMesh( Group & meshBodies ) override; + + /** + * @defgroup Solver Interface Functions + * + * These functions provide the primary interface that is required for derived classes + */ + ///@{ + + virtual void + implicitStepSetup( real64 const & time_n, + real64 const & dt, + DomainPartition & domain ) override; + + virtual void + assembleSystem( real64 const time_n, + real64 const dt, + DomainPartition & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) override; + + virtual void + applyBoundaryConditions( real64 const time_n, + real64 const dt, + DomainPartition & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) override; + + virtual real64 + scalingForSystemSolution( DomainPartition & domain, + DofManager const & dofManager, + arrayView1d< real64 const > const & localSolution ) override; + + virtual bool + checkSystemSolution( DomainPartition & domain, + DofManager const & dofManager, + arrayView1d< real64 const > const & localSolution, + real64 const scalingFactor ) override; + + virtual void + resetStateToBeginningOfStep( DomainPartition & domain ) override; + + virtual void + implicitStepComplete( real64 const & time, + real64 const & dt, + DomainPartition & domain ) override; + + ///@} + + /** + * @brief assembles the accumulation terms for all cells + * @param time_n previous time value + * @param dt time step + * @param domain the physical domain object + * @param dofManager degree-of-freedom manager associated with the linear system + * @param localMatrix the system matrix + * @param localRhs the system right-hand side vector + */ + void assembleAccumulationTerms( DomainPartition & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ); + + /** + * @brief assembles the accumulation terms for all cells of a spcefici subRegion. + * @tparam SUBREGION_TYPE the subRegion type + * @param dofManager degree-of-freedom manager associated with the linear system + * @param subRegion the subRegion + * @param localMatrix the system matrix + * @param localRhs the system right-hand side vector + * + */ + template< typename SUBREGION_TYPE > + void accumulationAssemblyLaunch( DofManager const & dofManager, + SUBREGION_TYPE const & subRegion, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ); + + /** + * @brief assembles the flux terms for all cells + * @param time_n previous time value + * @param dt time step + * @param domain the physical domain object + * @param dofManager degree-of-freedom manager associated with the linear system + * @param localMatrix the system matrix + * @param localRhs the system right-hand side vector + */ + virtual void + assembleFluxTerms( real64 const dt, + DomainPartition const & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) = 0; + + /** + * @brief assembles the flux terms for all cells including jump stabilization + * @param time_n previous time value + * @param dt time step + * @param domain the physical domain object + * @param dofManager degree-of-freedom manager associated with the linear system + * @param localMatrix the system matrix + * @param localRhs the system right-hand side vector + */ + virtual void + assembleStabilizedFluxTerms( real64 const dt, + DomainPartition const & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) = 0; + + /** + * @brief assembles the flux terms for all cells for the poroelastic case + * @param time_n previous time value + * @param dt time step + * @param domain the physical domain object + * @param dofManager degree-of-freedom manager associated with the linear system + * @param localMatrix the system matrix + * @param localRhs the system right-hand side vector + * @param jumpDofKey dofKey of the displacement jump + */ + virtual void + assembleEDFMFluxTerms( real64 const time_n, + real64 const dt, + DomainPartition const & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs, + string const & jumpDofKey ) = 0; + + struct viewKeyStruct : DLFlowSolverBase::viewKeyStruct + { + static constexpr char const * elemDofFieldString() { return "singlePhaseVariables"; } + }; + + /** + * @brief Function to perform the Application of Dirichlet type BC's + * @param time current time + * @param dt time step + * @param dofManager degree-of-freedom manager associated with the linear system + * @param domain the domain + * @param localMatrix local system matrix + * @param localRhs local system right-hand side vector + */ + void + applyDirichletBC( real64 const time_n, + real64 const dt, + DomainPartition & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) const; + + /** + * @brief Apply source flux boundary conditions to the system + * @param time current time + * @param dt time step + * @param dofManager degree-of-freedom manager associated with the linear system + * @param domain the domain + * @param localMatrix local system matrix + * @param localRhs local system right-hand side vector + */ + void + applySourceFluxBC( real64 const time_n, + real64 const dt, + DomainPartition & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) const; + + /** + * @brief Apply aquifer boundary conditions to the system + * @param time current time + * @param dt time step + * @param domain the domain + * @param dofManager degree-of-freedom manager associated with the linear system + * @param localMatrix local system matrix + * @param localRhs local system right-hand side vector + */ + virtual void + applyAquiferBC( real64 const time, + real64 const dt, + DomainPartition & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) const = 0; + + virtual void + updateState ( DomainPartition & domain ) override final; + + /** + * @brief Function to update all constitutive state and dependent variables + * @param subRegion subregion that contains the fields + */ + real64 + updateFluidState( ElementSubRegionBase & subRegion ) const; + + + /** + * @brief Function to update all constitutive models + * @param dataGroup group that contains the fields + */ + virtual void + updateFluidModel( ObjectManagerBase & dataGroup ) const; + + /** + * @brief Function to update fluid mass + * @param subRegion subregion that contains the fields + */ + void + updateMass( ElementSubRegionBase & subRegion ) const; + + /** + * @brief Template function to update fluid mass + * @param subRegion subregion that contains the fields + */ + template< integer IS_THERMAL > + void updateMass( ElementSubRegionBase & subRegion ) const; + + /** + * @brief Function to update energy + * @param subRegion subregion that contains the fields + */ + void + updateEnergy( ElementSubRegionBase & subRegion ) const; + + /** + * @brief Update all relevant solid internal energy models using current values of temperature + * @param dataGroup the group storing the required fields + */ + void updateSolidInternalEnergyModel( ObjectManagerBase & dataGroup ) const; + + /** + * @brief Update thermal conductivity + * @param subRegion the group storing the required fields + */ + void updateThermalConductivity( ElementSubRegionBase & subRegion ) const; + + /** + * @brief Function to update fluid mobility + * @param dataGroup group that contains the fields + */ + void + updateMobility( ObjectManagerBase & dataGroup ) const; + + virtual void initializePreSubGroups() override; + + virtual void initializePostInitialConditionsPreSubGroups() override; + + virtual void initializeFluidState( MeshLevel & mesh, string_array const & regionNames ) override; + + virtual void initializeThermalState( MeshLevel & mesh, string_array const & regionNames ) override; + + /** + * @brief Compute the hydrostatic equilibrium using the compositions and temperature input tables + */ + virtual void computeHydrostaticEquilibrium( DomainPartition & domain ) override; + + /** + * @brief Update the cell-wise pressure gradient + */ + virtual void updatePressureGradient( DomainPartition & domain ) + { + GEOS_UNUSED_VAR( domain ); + } + + /** + * @brief Function to fix the initial state during the initialization step in coupled problems + * @param[in] time current time + * @param[in] dt time step + * @param[in] dofManager degree-of-freedom manager associated with the linear system + * @param[in] domain the domain + * @param[in] localMatrix local system matrix + * @param[in] localRhs local system right-hand side vector + * @detail This function is meant to be called when the flag m_keepVariablesConstantDuringInitStep is on + * The main use case is the initialization step in coupled problems during which we solve an elastic problem for a fixed pressure + */ + void keepVariablesConstantDuringInitStep( real64 const time, + real64 const dt, + DofManager const & dofManager, + DomainPartition & domain, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) const; + + void applyDeltaVolume( ElementSubRegionBase & subRegion ) const; + +protected: + + /** + * @brief Checks constitutive models for consistency + * @param[in] domain the domain partition + */ + virtual void validateConstitutiveModels( DomainPartition & domain ) const; + + /** + * @brief Initialize the aquifer boundary condition (gravity vector, water phase index) + */ + void initializeAquiferBC() const; + + /** + * @brief Utility function to save the converged state + * @param[in] subRegion the element subRegion + */ + virtual void saveConvergedState( ElementSubRegionBase & subRegion ) const override; + +private: + + virtual void setConstitutiveNames( ElementSubRegionBase & subRegion ) const override; + +}; + +template< typename SUBREGION_TYPE > +void DLSinglePhaseBase::accumulationAssemblyLaunch( DofManager const & dofManager, + SUBREGION_TYPE const & subRegion, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) +{ + string const dofKey = dofManager.getKey( viewKeyStruct::elemDofFieldString() ); + + if( m_isThermal ) + { + thermalSinglePhaseBaseKernels:: + AccumulationKernelFactory:: + createAndLaunch< parallelDevicePolicy<> >( dofManager.rankOffset(), + dofKey, + subRegion, + localMatrix, + localRhs ); + } + else + { + singlePhaseBaseKernels:: + AccumulationKernelFactory:: + createAndLaunch< parallelDevicePolicy<> >( dofManager.rankOffset(), + dofKey, + subRegion, + localMatrix, + localRhs ); + } +} + +} /* namespace geos */ + +#endif //GEOS_PHYSICSSOLVERS_FLUIDFLOW_DLSINGLEPHASEBASE_HPP_ diff --git a/src/coreComponents/physicsSolvers/fluidFlow/DLSinglePhaseFVM.cpp b/src/coreComponents/physicsSolvers/fluidFlow/DLSinglePhaseFVM.cpp new file mode 100644 index 00000000000..adf251700a1 --- /dev/null +++ b/src/coreComponents/physicsSolvers/fluidFlow/DLSinglePhaseFVM.cpp @@ -0,0 +1,973 @@ +/* + * ------------------------------------------------------------------------------------------------------------ + * SPDX-License-Identifier: LGPL-2.1-only + * + * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC + * Copyright (c) 2018-2024 TotalEnergies + * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University + * Copyright (c) 2023-2024 Chevron + * Copyright (c) 2019- GEOS/GEOSX Contributors + * All rights reserved + * + * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details. + * ------------------------------------------------------------------------------------------------------------ + */ + +/** + * @file DLSinglePhaseFVM.cpp + */ + +#include "DLSinglePhaseFVM.hpp" + +#include "common/TimingMacros.hpp" +#include "constitutive/permeability/PermeabilityFields.hpp" +#include "constitutive/ConstitutivePassThru.hpp" +#include "discretizationMethods/NumericalMethodsManager.hpp" +#include "finiteVolume/BoundaryStencil.hpp" +#include "finiteVolume/FiniteVolumeManager.hpp" +#include "finiteVolume/FluxApproximationBase.hpp" +#include "fieldSpecification/FieldSpecificationManager.hpp" +#include "fieldSpecification/AquiferBoundaryCondition.hpp" +#include "linearAlgebra/multiscale/MultiscalePreconditioner.hpp" +#include "physicsSolvers/LogLevelsInfo.hpp" +#include "physicsSolvers/fluidFlow/FlowSolverBaseFields.hpp" +#include "physicsSolvers/fluidFlow/SinglePhaseBaseFields.hpp" +#include "physicsSolvers/fluidFlow/kernels/singlePhase/ResidualNormKernel.hpp" +#include "physicsSolvers/fluidFlow/kernels/singlePhase/FluxComputeKernel.hpp" +#include "physicsSolvers/fluidFlow/kernels/singlePhase/ThermalFluxComputeKernel.hpp" +#include "physicsSolvers/fluidFlow/kernels/singlePhase/DirichletFluxComputeKernel.hpp" +#include "physicsSolvers/fluidFlow/kernels/singlePhase/ThermalDirichletFluxComputeKernel.hpp" +#include "physicsSolvers/fluidFlow/kernels/singlePhase/StabilizedFluxComputeKernel.hpp" +#include "physicsSolvers/fluidFlow/kernels/singlePhase/AquiferBCKernel.hpp" +// #include "physicsSolvers/fluidFlow/kernels/singlePhase/proppant/ProppantFluxKernels.hpp" +#include "physicsSolvers/multiphysics/poromechanicsKernels/SinglePhasePoromechanicsEmbeddedFractures.hpp" +#include "physicsSolvers/multiphysics/poromechanicsKernels/ThermalSinglePhasePoromechanicsEmbeddedFractures.hpp" +#include "physicsSolvers/multiphysics/poromechanicsKernels/SinglePhasePoromechanicsConformingFractures.hpp" +#include "physicsSolvers/multiphysics/poromechanicsKernels/ThermalSinglePhasePoromechanicsConformingFractures.hpp" + +/** + * @namespace the geos namespace that encapsulates the majority of the code + */ +namespace geos +{ + +using namespace dataRepository; +using namespace constitutive; +using namespace fields; +using namespace singlePhaseBaseKernels; +using namespace singlePhaseFVMKernels; + +template< typename BASE > +DLSinglePhaseFVM< BASE >::DLSinglePhaseFVM( const string & name, + Group * const parent ): + BASE( name, parent ) +{ + LinearSolverParameters & linParams = m_linearSolverParameters.get(); + linParams.multiscale.fieldName = BASE::viewKeyStruct::elemDofFieldString(); + linParams.multiscale.label = "flow"; +} + +template< typename BASE > +void DLSinglePhaseFVM< BASE >::initializePreSubGroups() +{ + BASE::initializePreSubGroups(); + + this->checkDiscretizationName(); + + if( m_isThermal ) + { + // For thermal simulations 2 pdes are considered so we let AMG know. + LinearSolverParameters & linParams = m_linearSolverParameters.get(); + linParams.amg.numFunctions = 2; + } +} + +template< typename BASE > +void DLSinglePhaseFVM< BASE >::setupDofs( DomainPartition const & domain, + DofManager & dofManager ) const +{ + dofManager.addField( BASE::viewKeyStruct::elemDofFieldString(), + FieldLocation::Elem, + m_numDofPerCell, + BASE::getMeshTargets() ); + + NumericalMethodsManager const & numericalMethodManager = domain.getNumericalMethodManager(); + FiniteVolumeManager const & fvManager = numericalMethodManager.getFiniteVolumeManager(); + FluxApproximationBase const & fluxApprox = fvManager.getFluxApproximation( m_discretizationName ); + + dofManager.addCoupling( BASE::viewKeyStruct::elemDofFieldString(), fluxApprox ); +} + +template< typename BASE > +void DLSinglePhaseFVM< BASE >::setupSystem( DomainPartition & domain, + DofManager & dofManager, + CRSMatrix< real64, globalIndex > & localMatrix, + ParallelVector & rhs, + ParallelVector & solution, + bool const setSparsity ) +{ + GEOS_MARK_FUNCTION; + BASE::setupSystem( domain, + dofManager, + localMatrix, + rhs, + solution, + setSparsity ); + + if( !m_precond && m_linearSolverParameters.get().solverType != LinearSolverParameters::SolverType::direct ) + { + m_precond = createPreconditioner( domain ); + } +} + +template< typename BASE > +std::unique_ptr< PreconditionerBase< LAInterface > > +DLSinglePhaseFVM< BASE >::createPreconditioner( DomainPartition & domain ) const +{ + LinearSolverParameters const & linParams = m_linearSolverParameters.get(); + switch( linParams.preconditionerType ) + { + case LinearSolverParameters::PreconditionerType::multiscale: + { + return std::make_unique< MultiscalePreconditioner< LAInterface > >( linParams, domain ); + } + default: + { + return PhysicsSolverBase::createPreconditioner( domain ); + } + } +} + +template< typename BASE > +real64 DLSinglePhaseFVM< BASE >::calculateResidualNorm( real64 const & GEOS_UNUSED_PARAM( time_n ), + real64 const & GEOS_UNUSED_PARAM( dt ), + DomainPartition const & domain, + DofManager const & dofManager, + arrayView1d< real64 const > const & localRhs ) +{ + GEOS_MARK_FUNCTION; + + integer constexpr numNorm = 2; // mass balance and energy balance + array1d< real64 > localResidualNorm; + array1d< real64 > localResidualNormalizer; + localResidualNorm.resize( numNorm ); + localResidualNormalizer.resize( numNorm ); + + physicsSolverBaseKernels::NormType const normType = BASE::getNonlinearSolverParameters().normType(); + + globalIndex const rankOffset = dofManager.rankOffset(); + string const dofKey = dofManager.getKey( BASE::viewKeyStruct::elemDofFieldString() ); + + this->forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, + MeshLevel const & mesh, + string_array const & regionNames ) + { + mesh.getElemManager().forElementSubRegions( regionNames, + [&]( localIndex const, + ElementSubRegionBase const & subRegion ) + { + real64 subRegionResidualNorm[numNorm]{}; + real64 subRegionResidualNormalizer[numNorm]{}; + + // step 1: compute the norm in the subRegion + + if( m_isThermal ) + { + singlePhaseBaseKernels:: + ResidualNormKernelFactory:: + createAndLaunch< parallelDevicePolicy<> >( normType, + rankOffset, + dofKey, + localRhs, + subRegion, + m_nonlinearSolverParameters.m_minNormalizer, + subRegionResidualNorm, + subRegionResidualNormalizer ); + } + else + { + real64 subRegionFlowResidualNorm[1]{}; + real64 subRegionFlowResidualNormalizer[1]{}; + singlePhaseBaseKernels:: + ResidualNormKernelFactory:: + createAndLaunch< parallelDevicePolicy<> >( normType, + rankOffset, + dofKey, + localRhs, + subRegion, + m_nonlinearSolverParameters.m_minNormalizer, + subRegionFlowResidualNorm, + subRegionFlowResidualNormalizer ); + subRegionResidualNorm[0] = subRegionFlowResidualNorm[0]; + subRegionResidualNormalizer[0] = subRegionFlowResidualNormalizer[0]; + } + + // step 2: first reduction across meshBodies/regions/subRegions + + if( normType == physicsSolverBaseKernels::NormType::Linf ) + { + physicsSolverBaseKernels::LinfResidualNormHelper:: + updateLocalNorm< numNorm >( subRegionResidualNorm, localResidualNorm ); + } + else + { + physicsSolverBaseKernels::L2ResidualNormHelper:: + updateLocalNorm< numNorm >( subRegionResidualNorm, subRegionResidualNormalizer, localResidualNorm, localResidualNormalizer ); + } + } ); + } ); + + // step 3: second reduction across MPI ranks + + real64 residualNorm = 0.0; + if( m_isThermal ) + { + array1d< real64 > globalResidualNorm; + globalResidualNorm.resize( numNorm ); + if( normType == physicsSolverBaseKernels::NormType::Linf ) + { + physicsSolverBaseKernels::LinfResidualNormHelper:: + computeGlobalNorm( localResidualNorm, globalResidualNorm ); + } + else + { + physicsSolverBaseKernels::L2ResidualNormHelper:: + computeGlobalNorm( localResidualNorm, localResidualNormalizer, globalResidualNorm ); + } + residualNorm = sqrt( globalResidualNorm[0] * globalResidualNorm[0] + globalResidualNorm[1] * globalResidualNorm[1] ); + + GEOS_LOG_LEVEL_RANK_0_NLR( logInfo::ResidualNorm, GEOS_FMT( " ( R{} ) = ( {:4.2e} ) ( Renergy ) = ( {:4.2e} )", + DLFlowSolverBase::coupledSolverAttributePrefix(), globalResidualNorm[0], globalResidualNorm[1] )); + BASE::getConvergenceStats().setResidualValue( GEOS_FMT( "R{}", DLFlowSolverBase::coupledSolverAttributePrefix()), globalResidualNorm[0] ); + BASE::getConvergenceStats().setResidualValue( "Renergy", globalResidualNorm[1] ); + } + else + { + + if( normType == physicsSolverBaseKernels::NormType::Linf ) + { + physicsSolverBaseKernels::LinfResidualNormHelper::computeGlobalNorm( localResidualNorm[0], residualNorm ); + } + else + { + physicsSolverBaseKernels::L2ResidualNormHelper::computeGlobalNorm( localResidualNorm[0], localResidualNormalizer[0], residualNorm ); + } + + GEOS_LOG_LEVEL_RANK_0_NLR( logInfo::ResidualNorm, + GEOS_FMT( " ( R{} ) = ( {:4.2e} )", DLFlowSolverBase::coupledSolverAttributePrefix(), residualNorm )); + BASE::getConvergenceStats().setResidualValue( GEOS_FMT( "R{}", DLFlowSolverBase::coupledSolverAttributePrefix()), residualNorm ); + } + return residualNorm; +} + + +template< typename BASE > +void DLSinglePhaseFVM< BASE >::applySystemSolution( DofManager const & dofManager, + arrayView1d< real64 const > const & localSolution, + real64 const scalingFactor, + real64 const dt, + DomainPartition & domain ) +{ + GEOS_UNUSED_VAR( dt ); + if( m_isThermal ) + { + DofManager::CompMask pressureMask( m_numDofPerCell, 0, 1 ); + DofManager::CompMask temperatureMask( m_numDofPerCell, 1, 2 ); + + dofManager.addVectorToField( localSolution, + BASE::viewKeyStruct::elemDofFieldString(), + flow::pressure::key(), + scalingFactor, + pressureMask ); + + dofManager.addVectorToField( localSolution, + BASE::viewKeyStruct::elemDofFieldString(), + flow::temperature::key(), + scalingFactor, + temperatureMask ); + } + else + { + dofManager.addVectorToField( localSolution, + BASE::viewKeyStruct::elemDofFieldString(), + flow::pressure::key(), + scalingFactor ); + } + + this->forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, + MeshLevel & mesh, + string_array const & regionNames ) + { + stdVector< string > fields{ flow::pressure::key() }; + + if( m_isThermal ) + { + fields.emplace_back( flow::temperature::key() ); + } + + FieldIdentifiers fieldsToBeSync; + + fieldsToBeSync.addElementFields( fields, regionNames ); + + CommunicationTools::getInstance().synchronizeFields( fieldsToBeSync, mesh, domain.getNeighbors(), true ); + } ); +} + +template<> +void DLSinglePhaseFVM<>::assembleFluxTerms( real64 const dt, + DomainPartition const & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) +{ + GEOS_MARK_FUNCTION; + + NumericalMethodsManager const & numericalMethodManager = domain.getNumericalMethodManager(); + FiniteVolumeManager const & fvManager = numericalMethodManager.getFiniteVolumeManager(); + FluxApproximationBase const & fluxApprox = fvManager.getFluxApproximation( m_discretizationName ); + + string const & dofKey = dofManager.getKey( DLSinglePhaseBase::viewKeyStruct::elemDofFieldString() ); + + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, + MeshLevel const & mesh, + string_array const & ) + { + fluxApprox.forAllStencils( mesh, [&]( auto & stencil ) + { + typename TYPEOFREF( stencil ) ::KernelWrapper stencilWrapper = stencil.createKernelWrapper(); + + + if( m_isThermal ) + { + thermalSinglePhaseFVMKernels:: + FluxComputeKernelFactory::createAndLaunch< parallelDevicePolicy<> >( dofManager.rankOffset(), + dofKey, + getName(), + mesh.getElemManager(), + stencilWrapper, + dt, + localMatrix.toViewConstSizes(), + localRhs.toView() ); + } + else + { + singlePhaseFVMKernels:: + FluxComputeKernelFactory::createAndLaunch< parallelDevicePolicy<> >( dofManager.rankOffset(), + dofKey, + getName(), + mesh.getElemManager(), + stencilWrapper, + dt, + localMatrix.toViewConstSizes(), + localRhs.toView() ); + } + + + } ); + } ); + +} + + +template< > +void DLSinglePhaseFVM< DLSinglePhaseBase >::assembleStabilizedFluxTerms( real64 const dt, + DomainPartition const & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) +{ + GEOS_MARK_FUNCTION; + + NumericalMethodsManager const & numericalMethodManager = domain.getNumericalMethodManager(); + FiniteVolumeManager const & fvManager = numericalMethodManager.getFiniteVolumeManager(); + FluxApproximationBase const & fluxApprox = fvManager.getFluxApproximation( m_discretizationName ); + + string const & dofKey = dofManager.getKey( DLSinglePhaseBase::viewKeyStruct::elemDofFieldString() ); + + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, + MeshLevel const & mesh, + string_array const & ) + { + fluxApprox.forAllStencils( mesh, [&]( auto & stencil ) + { + typename TYPEOFREF( stencil ) ::KernelWrapper stencilWrapper = stencil.createKernelWrapper(); + + // No thermal support yet + stabilizedSinglePhaseFVMKernels::FluxComputeKernelFactory::createAndLaunch< parallelDevicePolicy<> >( dofManager.rankOffset(), + dofKey, + getName(), + mesh.getElemManager(), + stencilWrapper, + dt, + localMatrix.toViewConstSizes(), + localRhs.toView() ); + + } ); + } ); + +} + + + +// template<> +// void DLSinglePhaseFVM< SinglePhaseProppantBase >::assembleFluxTerms( real64 const dt, +// DomainPartition const & domain, +// DofManager const & dofManager, +// CRSMatrixView< real64, globalIndex const > const & localMatrix, +// arrayView1d< real64 > const & localRhs ) +// { +// GEOS_MARK_FUNCTION; + +// NumericalMethodsManager const & numericalMethodManager = domain.getNumericalMethodManager(); +// FiniteVolumeManager const & fvManager = numericalMethodManager.getFiniteVolumeManager(); +// FluxApproximationBase const & fluxApprox = fvManager.getFluxApproximation( m_discretizationName ); + +// string const & dofKey = dofManager.getKey( DLSinglePhaseBase::viewKeyStruct::elemDofFieldString() ); +// forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, +// MeshLevel const & mesh, +// string_array const & ) +// { +// ElementRegionManager const & elemManager = mesh.getElemManager(); + +// ElementRegionManager::ElementViewAccessor< arrayView1d< globalIndex const > > +// elemDofNumber = elemManager.constructArrayViewAccessor< globalIndex, 1 >( dofKey ); +// elemDofNumber.setName( this->getName() + "/accessors/" + dofKey ); + +// fluxApprox.forStencils< SurfaceElementStencil >( mesh, [&]( auto & stencil ) +// { +// typename TYPEOFREF( stencil ) ::KernelWrapper stencilWrapper = stencil.createKernelWrapper(); + +// typename FluxComputeKernelBase::SinglePhaseFlowAccessors flowAccessors( elemManager, getName() ); +// typename FluxComputeKernelBase::SlurryFluidAccessors fluidAccessors( elemManager, getName() ); +// typename FluxComputeKernelBase::ProppantPermeabilityAccessors permAccessors( elemManager, getName() ); + +// singlePhaseProppantFluxKernels::FaceElementFluxKernel::launch( stencilWrapper, +// dt, +// dofManager.rankOffset(), +// elemDofNumber.toNestedViewConst(), +// flowAccessors.get< fields::ghostRank >(), +// flowAccessors.get< flow::pressure >(), +// flowAccessors.get< flow::gravityCoefficient >(), +// fluidAccessors.get< fields::singlefluid::density >(), +// fluidAccessors.get< fields::singlefluid::dDensity >(), +// flowAccessors.get< flow::mobility >(), +// flowAccessors.get< flow::dMobility >(), +// permAccessors.get< permeability::permeability >(), +// permAccessors.get< permeability::dPerm_dPressure >(), +// permAccessors.get< permeability::dPerm_dDispJump >(), +// permAccessors.get< permeability::permeabilityMultiplier >(), +// this->gravityVector(), +// localMatrix, +// localRhs ); +// } ); +// } ); +// } + +// template< > +// void DLSinglePhaseFVM< SinglePhaseProppantBase >::assembleStabilizedFluxTerms( real64 const dt, +// DomainPartition const & domain, +// DofManager const & dofManager, +// CRSMatrixView< real64, globalIndex const > const & localMatrix, +// arrayView1d< real64 > const & localRhs ) +// { +// GEOS_UNUSED_VAR( dt, domain, dofManager, localMatrix, localRhs ); +// GEOS_ERROR( "Stabilized flux not available with this flow solver" ); +// } + +template< typename BASE > +void DLSinglePhaseFVM< BASE >::assembleEDFMFluxTerms( real64 const GEOS_UNUSED_PARAM ( time_n ), + real64 const dt, + DomainPartition const & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs, + string const & jumpDofKey ) +{ + GEOS_MARK_FUNCTION; + + NumericalMethodsManager const & numericalMethodManager = domain.getNumericalMethodManager(); + FiniteVolumeManager const & fvManager = numericalMethodManager.getFiniteVolumeManager(); + FluxApproximationBase const & fluxApprox = fvManager.getFluxApproximation( m_discretizationName ); + + string const & dofKey = dofManager.getKey( DLSinglePhaseBase::viewKeyStruct::elemDofFieldString() ); + + this->forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, + MeshLevel const & mesh, + string_array const & ) + { + fluxApprox.forStencils< CellElementStencilTPFA, EmbeddedSurfaceToCellStencil >( mesh, [&]( auto & stencil ) + { + typename TYPEOFREF( stencil ) ::KernelWrapper stencilWrapper = stencil.createKernelWrapper(); + + if( m_isThermal ) + { + thermalSinglePhaseFVMKernels:: + FluxComputeKernelFactory::createAndLaunch< parallelDevicePolicy<> >( dofManager.rankOffset(), + dofKey, + this->getName(), + mesh.getElemManager(), + stencilWrapper, + dt, + localMatrix.toViewConstSizes(), + localRhs.toView() ); + } + else + { + singlePhaseFVMKernels:: + FluxComputeKernelFactory::createAndLaunch< parallelDevicePolicy<> >( dofManager.rankOffset(), + dofKey, + this->getName(), + mesh.getElemManager(), + stencilWrapper, + dt, + localMatrix.toViewConstSizes(), + localRhs.toView() ); + } + } ); + + // Eventually, EmbeddedSurfaceToCellStencil should be moved here to account for the permeability of the fracture in the matrix-frac + // connection + fluxApprox.forStencils< SurfaceElementStencil >( mesh, [&]( auto & stencil ) + { + typename TYPEOFREF( stencil ) ::KernelWrapper stencilWrapper = stencil.createKernelWrapper(); + + if( m_isThermal ) + { + thermalSinglePhasePoromechanicsEmbeddedFracturesKernels:: + ConnectorBasedAssemblyKernelFactory::createAndLaunch< parallelDevicePolicy<> >( dofManager.rankOffset(), + dofKey, + jumpDofKey, + this->getName(), + mesh.getElemManager(), + stencilWrapper, + dt, + localMatrix.toViewConstSizes(), + localRhs.toView() ); + } + else + { + singlePhasePoromechanicsEmbeddedFracturesKernels:: + ConnectorBasedAssemblyKernelFactory::createAndLaunch< parallelDevicePolicy<> >( dofManager.rankOffset(), + dofKey, + jumpDofKey, + this->getName(), + mesh.getElemManager(), + stencilWrapper, + dt, + localMatrix.toViewConstSizes(), + localRhs.toView() ); + } + } ); + } ); + +} + +template< typename BASE > +void DLSinglePhaseFVM< BASE >::assembleHydrofracFluxTerms( real64 const GEOS_UNUSED_PARAM ( time_n ), + real64 const dt, + DomainPartition const & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs, + CRSMatrixView< real64, localIndex const > const & dR_dAper ) +{ + GEOS_MARK_FUNCTION; + + NumericalMethodsManager const & numericalMethodManager = domain.getNumericalMethodManager(); + FiniteVolumeManager const & fvManager = numericalMethodManager.getFiniteVolumeManager(); + FluxApproximationBase const & fluxApprox = fvManager.getFluxApproximation( m_discretizationName ); + + string const & dofKey = dofManager.getKey( DLSinglePhaseBase::viewKeyStruct::elemDofFieldString() ); + + + this->forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, + MeshLevel const & mesh, + string_array const & ) + { + fluxApprox.forStencils< CellElementStencilTPFA, FaceElementToCellStencil >( mesh, [&]( auto & stencil ) + { + typename TYPEOFREF( stencil ) ::KernelWrapper stencilWrapper = stencil.createKernelWrapper(); + + if( m_isThermal ) + { + thermalSinglePhaseFVMKernels:: + FluxComputeKernelFactory::createAndLaunch< parallelDevicePolicy<> >( dofManager.rankOffset(), + dofKey, + this->getName(), + mesh.getElemManager(), + stencilWrapper, + dt, + localMatrix.toViewConstSizes(), + localRhs.toView() ); + } + else + { + singlePhaseFVMKernels:: + FluxComputeKernelFactory::createAndLaunch< parallelDevicePolicy<> >( dofManager.rankOffset(), + dofKey, + this->getName(), + mesh.getElemManager(), + stencilWrapper, + dt, + localMatrix.toViewConstSizes(), + localRhs.toView() ); + } + } ); + + fluxApprox.forStencils< SurfaceElementStencil >( mesh, [&]( auto & stencil ) + { + typename TYPEOFREF( stencil ) ::KernelWrapper stencilWrapper = stencil.createKernelWrapper(); + + if( m_isThermal ) + { + thermalSinglePhasePoromechanicsConformingFracturesKernels:: + ConnectorBasedAssemblyKernelFactory::createAndLaunch< parallelDevicePolicy<> >( dofManager.rankOffset(), + dofKey, + this->getName(), + mesh.getElemManager(), + stencilWrapper, + dt, + localMatrix.toViewConstSizes(), + localRhs.toView(), + dR_dAper ); + } + else + { + singlePhasePoromechanicsConformingFracturesKernels:: + ConnectorBasedAssemblyKernelFactory::createAndLaunch< parallelDevicePolicy<> >( dofManager.rankOffset(), + dofKey, + this->getName(), + mesh.getElemManager(), + stencilWrapper, + dt, + localMatrix.toViewConstSizes(), + localRhs.toView(), + dR_dAper ); + } + } ); + } ); + + +} + +template< typename BASE > +void +DLSinglePhaseFVM< BASE >::applyBoundaryConditions( real64 const time_n, + real64 const dt, + DomainPartition & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) +{ + GEOS_MARK_FUNCTION; + + BASE::applyBoundaryConditions( time_n, dt, domain, dofManager, localMatrix, localRhs ); + if( !BASE::m_keepVariablesConstantDuringInitStep ) + { + applyFaceDirichletBC( time_n, dt, dofManager, domain, localMatrix, localRhs ); + } +} + +namespace +{ +char const faceBcLogMessage[] = + "DLSinglePhaseFVM {}: at time {}s, " + "the <{}> boundary condition '{}' is applied to the face set '{}' in '{}'. " + "\nThe total number of target faces (including ghost faces) is {}. " + "\nNote that if this number is equal to zero, the boundary condition will not be applied on this face set."; + +GEOS_MAYBE_UNUSED char const incompleteBCLogmessage[] = "DLSinglePhaseFVM {}: at time {}, one or more Face boundary conditions are not complete. " + "Both pressure and temperature must be specified, one is missing."; + +} + + +template< typename BASE > +void DLSinglePhaseFVM< BASE >::applyFaceDirichletBC( real64 const time_n, + real64 const dt, + DofManager const & dofManager, + DomainPartition & domain, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) +{ + GEOS_MARK_FUNCTION; + + FieldSpecificationManager & fsManager = FieldSpecificationManager::getInstance(); + + NumericalMethodsManager const & numericalMethodManager = domain.getNumericalMethodManager(); + FiniteVolumeManager const & fvManager = numericalMethodManager.getFiniteVolumeManager(); + FluxApproximationBase const & fluxApprox = fvManager.getFluxApproximation( m_discretizationName ); + + string const & dofKey = dofManager.getKey( BASE::viewKeyStruct::elemDofFieldString() ); + + this->forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, + MeshLevel & mesh, + string_array const & ) + { + FaceManager & faceManager = mesh.getFaceManager(); + ElementRegionManager const & elemManager = mesh.getElemManager(); + + if( m_isThermal ) + { + std::set< string > pressureSets; + std::set< string > temperatureSets; + // Take BCs defined for "pressure" field and apply values to "facePressure" + fsManager.apply< FaceManager >( time_n + dt, + mesh, + flow::pressure::key(), + [&] ( FieldSpecificationBase const & fs, + string const & setName, + SortedArrayView< localIndex const > const & targetSet, + FaceManager & targetGroup, + string const & ) + { + BoundaryStencil const & stencil = fluxApprox.getStencil< BoundaryStencil >( mesh, setName ); + + if( m_nonlinearSolverParameters.m_numNewtonIterations == 0 ) + { + globalIndex const numTargetFaces = MpiWrapper::sum< globalIndex >( stencil.size() ); + GEOS_LOG_LEVEL_RANK_0_ON_GROUP( logInfo::BoundaryConditions, + GEOS_FMT( faceBcLogMessage, + this->getName(), time_n+dt, fs.getCatalogName(), fs.getName(), + setName, targetGroup.getName(), numTargetFaces ), + fs ); + } + + if( stencil.size() == 0 ) + { + return; + } + + pressureSets.insert( setName ); + // first, evaluate BC to get primary field values (pressure) + fs.applyFieldValue< FieldSpecificationEqual, + parallelDevicePolicy<> >( targetSet, + time_n + dt, + targetGroup, + flow::facePressure::key() ); + } ); + + // Take BCs defined for "temperature" field and apply values to "faceTemperature" + fsManager.apply< FaceManager >( time_n + dt, + mesh, + flow::temperature::key(), + [&] ( FieldSpecificationBase const & fs, + string const & setName, + SortedArrayView< localIndex const > const & targetSet, + FaceManager & targetGroup, + string const & ) + { + BoundaryStencil const & stencil = fluxApprox.getStencil< BoundaryStencil >( mesh, setName ); + + if( m_nonlinearSolverParameters.m_numNewtonIterations == 0 ) + { + globalIndex const numTargetFaces = MpiWrapper::sum< globalIndex >( stencil.size() ); + GEOS_LOG_LEVEL_RANK_0_ON_GROUP( logInfo::BoundaryConditions, GEOS_FMT( faceBcLogMessage, + this->getName(), time_n+dt, fs.getCatalogName(), fs.getName(), + setName, targetGroup.getName(), numTargetFaces ), + fs ); + } + + if( stencil.size() == 0 ) + { + return; + } + + temperatureSets.insert( setName ); + // Specify the bc value of the field + fs.applyFieldValue< FieldSpecificationEqual, + parallelDevicePolicy<> >( targetSet, + time_n + dt, + targetGroup, + flow::faceTemperature::key() ); + + } ); + + GEOS_ERROR_IF( pressureSets != temperatureSets, GEOS_FMT( incompleteBCLogmessage, this->getName(), time_n + dt ) ); + + // Take BCs defined for "temperature" field and apply values to "faceTemperature" + for( auto const & setName : temperatureSets ) + { + BoundaryStencil const & stencil = fluxApprox.getStencil< BoundaryStencil >( mesh, setName ); + BoundaryStencilWrapper const stencilWrapper = stencil.createKernelWrapper(); + + // TODO: currently we just use model from the first cell in this stencil + // since it's not clear how to create fluid kernel wrappers for arbitrary models. + // Can we just use cell properties for an approximate flux computation? + // Then we can forget about capturing the fluid model. + localIndex const er = stencil.getElementRegionIndices()( 0, 0 ); + localIndex const esr = stencil.getElementSubRegionIndices()( 0, 0 ); + ElementSubRegionBase & subRegion = mesh.getElemManager().getRegion( er ).getSubRegion( esr ); + string const & fluidName = subRegion.getReference< string >( BASE::viewKeyStruct::fluidNamesString() ); + SingleFluidBase & fluidBase = subRegion.getConstitutiveModel< SingleFluidBase >( fluidName ); + + thermalSinglePhaseFVMKernels:: + DirichletFluxComputeKernelFactory:: + createAndLaunch< parallelDevicePolicy<> >( dofManager.rankOffset(), + dofKey, + this->getName(), + faceManager, + elemManager, + stencilWrapper, + fluidBase, + dt, + localMatrix, + localRhs ); + } + } + else + { + // Take BCs defined for "pressure" field and apply values to "facePressure" + fsManager.apply< FaceManager >( time_n + dt, + mesh, + flow::pressure::key(), + [&] ( FieldSpecificationBase const & fs, + string const & setName, + SortedArrayView< localIndex const > const & targetSet, + FaceManager & targetGroup, + string const & ) + { + BoundaryStencil const & stencil = fluxApprox.getStencil< BoundaryStencil >( mesh, setName ); + + if( m_nonlinearSolverParameters.m_numNewtonIterations == 0 ) + { + globalIndex const numTargetFaces = MpiWrapper::sum< globalIndex >( stencil.size() ); + GEOS_LOG_LEVEL_RANK_0_ON_GROUP( logInfo::BoundaryConditions, + GEOS_FMT( faceBcLogMessage, + this->getName(), time_n+dt, fs.getCatalogName(), fs.getName(), + setName, targetGroup.getName(), numTargetFaces ), + fs ); + } + + if( stencil.size() == 0 ) + { + return; + } + + // first, evaluate BC to get primary field values (pressure) + fs.applyFieldValue< FieldSpecificationEqual, + parallelDevicePolicy<> >( targetSet, + time_n + dt, + targetGroup, + flow::facePressure::key() ); + + + // TODO: currently we just use model from the first cell in this stencil + // since it's not clear how to create fluid kernel wrappers for arbitrary models. + // Can we just use cell properties for an approximate flux computation? + // Then we can forget about capturing the fluid model. + localIndex const er = stencil.getElementRegionIndices()( 0, 0 ); + localIndex const esr = stencil.getElementSubRegionIndices()( 0, 0 ); + ElementSubRegionBase & subRegion = mesh.getElemManager().getRegion( er ).getSubRegion( esr ); + string const & fluidName = subRegion.getReference< string >( BASE::viewKeyStruct::fluidNamesString() ); + SingleFluidBase & fluidBase = subRegion.getConstitutiveModel< SingleFluidBase >( fluidName ); + + BoundaryStencilWrapper const stencilWrapper = stencil.createKernelWrapper(); + + singlePhaseFVMKernels:: + DirichletFluxComputeKernelFactory:: + createAndLaunch< parallelDevicePolicy<> >( dofManager.rankOffset(), + dofKey, + this->getName(), + faceManager, + elemManager, + stencilWrapper, + fluidBase, + dt, + localMatrix, + localRhs ); + + } ); + } + } ); +} + +// template<> +// void DLSinglePhaseFVM< SinglePhaseProppantBase >::applyAquiferBC( real64 const GEOS_UNUSED_PARAM( time ), +// real64 const GEOS_UNUSED_PARAM( dt ), +// DomainPartition & GEOS_UNUSED_PARAM( domain ), +// DofManager const & GEOS_UNUSED_PARAM( dofManager ), +// CRSMatrixView< real64, globalIndex const > const & GEOS_UNUSED_PARAM( localMatrix ), +// arrayView1d< real64 > const & GEOS_UNUSED_PARAM( localRhs ) ) const +// { +// // Aquifer does not make sense for proppant flow in fractures +// } + +template<> +void DLSinglePhaseFVM<>::applyAquiferBC( real64 const time, + real64 const dt, + DomainPartition & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) const +{ + GEOS_MARK_FUNCTION; + + FieldSpecificationManager & fsManager = FieldSpecificationManager::getInstance(); + + NumericalMethodsManager const & numericalMethodManager = domain.getNumericalMethodManager(); + FiniteVolumeManager const & fvManager = numericalMethodManager.getFiniteVolumeManager(); + FluxApproximationBase const & fluxApprox = fvManager.getFluxApproximation( m_discretizationName ); + + string const & elemDofKey = dofManager.getKey( viewKeyStruct::elemDofFieldString() ); + + forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, + MeshLevel & mesh, + string_array const & ) + { + ElementRegionManager const & elemManager = mesh.getElemManager(); + ElementRegionManager::ElementViewAccessor< arrayView1d< globalIndex const > > elemDofNumber = + elemManager.constructArrayViewAccessor< globalIndex, 1 >( elemDofKey ); + elemDofNumber.setName( this->getName() + "/accessors/" + elemDofKey ); + + typename FluxComputeKernelBase::SinglePhaseFlowAccessors flowAccessors( elemManager, this->getName() ); + typename FluxComputeKernelBase::SinglePhaseFluidAccessors fluidAccessors( elemManager, this->getName() ); + + fsManager.apply< FaceManager, + AquiferBoundaryCondition >( time + dt, + mesh, + AquiferBoundaryCondition::catalogName(), + [&] ( AquiferBoundaryCondition const & bc, + string const & setName, + SortedArrayView< localIndex const > const &, + FaceManager &, + string const & ) + { + BoundaryStencil const & stencil = fluxApprox.getStencil< BoundaryStencil >( mesh, setName ); + if( stencil.size() == 0 ) + { + return; + } + + AquiferBoundaryCondition::KernelWrapper aquiferBCWrapper = bc.createKernelWrapper(); + real64 const & aquiferDens = bc.getWaterPhaseDensity(); + + singlePhaseFVMKernels::AquiferBCKernel::launch( stencil, + dofManager.rankOffset(), + elemDofNumber.toNestedViewConst(), + flowAccessors.get< fields::ghostRank >(), + aquiferBCWrapper, + aquiferDens, + flowAccessors.get< flow::pressure >(), + flowAccessors.get< flow::pressure_n >(), + flowAccessors.get< flow::gravityCoefficient >(), + fluidAccessors.get< fields::singlefluid::density >(), + fluidAccessors.get< fields::singlefluid::dDensity >(), + time, + dt, + localMatrix.toViewConstSizes(), + localRhs.toView() ); + + } ); + } ); + +} + +namespace +{ +// typedef DLSinglePhaseFVM< SinglePhaseProppantBase > SinglePhaseFVMProppant; +// REGISTER_CATALOG_ENTRY( PhysicsSolverBase, SinglePhaseFVMProppant, string const &, Group * const ) +typedef DLSinglePhaseFVM<> DLSinglePhaseFVM; +REGISTER_CATALOG_ENTRY( PhysicsSolverBase, DLSinglePhaseFVM, string const &, Group * const ) +} +} /* namespace geos */ diff --git a/src/coreComponents/physicsSolvers/fluidFlow/DLSinglePhaseFVM.hpp b/src/coreComponents/physicsSolvers/fluidFlow/DLSinglePhaseFVM.hpp new file mode 100644 index 00000000000..db6d96e196c --- /dev/null +++ b/src/coreComponents/physicsSolvers/fluidFlow/DLSinglePhaseFVM.hpp @@ -0,0 +1,220 @@ +/* + * ------------------------------------------------------------------------------------------------------------ + * SPDX-License-Identifier: LGPL-2.1-only + * + * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC + * Copyright (c) 2018-2024 TotalEnergies + * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University + * Copyright (c) 2023-2024 Chevron + * Copyright (c) 2019- GEOS/GEOSX Contributors + * All rights reserved + * + * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details. + * ------------------------------------------------------------------------------------------------------------ + */ + +/** + * @file DLSinglePhaseFVM.hpp + */ + +#ifndef GEOS_PHYSICSSOLVERS_FLUIDFLOW_DLSINGLEPHASEFVM_HPP_ +#define GEOS_PHYSICSSOLVERS_FLUIDFLOW_DLSINGLEPHASEFVM_HPP_ + +#include "physicsSolvers/fluidFlow/DLSinglePhaseBase.hpp" +// #include "physicsSolvers/fluidFlow/SinglePhaseBase.hpp" +// #include "physicsSolvers/fluidFlow/SinglePhaseProppantBase.hpp" + +namespace geos +{ + + +/** + * @class DLSinglePhaseFVM + * + * class to perform a single phase finite volume solve + * using only cell-centered variables + * works with both TPFA and MPFA + */ +template< typename BASE = DLSinglePhaseBase > +class DLSinglePhaseFVM : public BASE +{ +public: + + // Aliasing public/protected members/methods of PhysicsSolverBase so we don't + // have to use this->member etc. + using BASE::forDiscretizationOnMeshTargets; + using BASE::m_discretizationName; + using BASE::m_linearSolverParameters; + using BASE::m_nonlinearSolverParameters; + using BASE::m_precond; + + // Aliasing public/protected members/methods of DLFlowSolverBase so we don't + // have to use this->member etc. + using BASE::m_numDofPerCell; + using BASE::m_isThermal; + + /** + * @brief main constructor for Group Objects + * @param name the name of this instantiation of Group in the repository + * @param parent the parent group of this instantiation of Group + */ + DLSinglePhaseFVM( const string & name, + dataRepository::Group * const parent ); + + + /// deleted default constructor + DLSinglePhaseFVM() = delete; + + /// deleted copy constructor + DLSinglePhaseFVM( DLSinglePhaseFVM const & ) = delete; + + /// default move constructor + DLSinglePhaseFVM( DLSinglePhaseFVM && ) = default; + + /// deleted assignment operator + DLSinglePhaseFVM & operator=( DLSinglePhaseFVM const & ) = delete; + + /// deleted move operator + DLSinglePhaseFVM & operator=( DLSinglePhaseFVM && ) = delete; + + /** + * @brief default destructor + */ + virtual ~DLSinglePhaseFVM() override = default; + + /** + * @brief name of the node manager in the object catalog + * @return string that contains the catalog name to generate a new NodeManager object through the object catalog. + */ + static string catalogName() + { + if constexpr ( std::is_same_v< BASE, DLSinglePhaseBase > ) + { + return "DLSinglePhaseFVM"; + } + // else if constexpr ( std::is_same_v< BASE, SinglePhaseProppantBase > ) + // { + // return "SinglePhaseProppantFVM"; + // } + else + { + return BASE::catalogName(); + } + } + + /** + * @copydoc PhysicsSolverBase::getCatalogName() + */ + string getCatalogName() const override { return catalogName(); } + + /** + * @defgroup Solver Interface Function + * + * This function provides the primary interface that is required for derived classes + */ + /**@{*/ + + virtual void + setupDofs( DomainPartition const & domain, + DofManager & dofManager ) const override; + + virtual void + setupSystem( DomainPartition & domain, + DofManager & dofManager, + CRSMatrix< real64, globalIndex > & localMatrix, + ParallelVector & rhs, + ParallelVector & solution, + bool const setSparsity = true ) override; + + virtual std::unique_ptr< PreconditionerBase< LAInterface > > + createPreconditioner( DomainPartition & domain ) const override; + + virtual void + applyBoundaryConditions( real64 const time_n, + real64 const dt, + DomainPartition & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) override; + + virtual real64 + calculateResidualNorm( real64 const & time_n, + real64 const & dt, + DomainPartition const & domain, + DofManager const & dofManager, + arrayView1d< real64 const > const & localRhs ) override; + + virtual void + applySystemSolution( DofManager const & dofManager, + arrayView1d< real64 const > const & localSolution, + real64 const scalingFactor, + real64 const dt, + DomainPartition & domain ) override; + virtual void + assembleFluxTerms( real64 const dt, + DomainPartition const & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) override; + + virtual void + assembleStabilizedFluxTerms( real64 const dt, + DomainPartition const & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) override; + virtual void + assembleEDFMFluxTerms( real64 const time_n, + real64 const dt, + DomainPartition const & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs, + string const & jumpDofKey ) override final; + + virtual void + assembleHydrofracFluxTerms( real64 const time_n, + real64 const dt, + DomainPartition const & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs, + CRSMatrixView< real64, localIndex const > const & dR_dAper ) override final; + + /**@}*/ + + virtual void + applyAquiferBC( real64 const time, + real64 const dt, + DomainPartition & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) const override; + + virtual void initializePreSubGroups() override; + +private: + + /** + * @brief Function to perform the application of Dirichlet BCs on faces + * @param time_n current time + * @param dt time step + * @param faceSet degree-of-freedom manager associated with the linear system + * @param domain the domain + * @param matrix the system matrix + * @param rhs the system right-hand side vector + */ + void applyFaceDirichletBC( real64 const time_n, + real64 const dt, + DofManager const & faceSet, + DomainPartition & domain, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ); + + // no data needed here, see DLSinglePhaseBase + +}; + +} /* namespace geos */ + +#endif //GEOS_PHYSICSSOLVERS_FLUIDFLOW_DLSINGLEPHASEFVM_HPP_ diff --git a/src/coreComponents/physicsSolvers/multiphysics/CMakeLists.txt b/src/coreComponents/physicsSolvers/multiphysics/CMakeLists.txt index 2d3a5e031eb..684504f946e 100644 --- a/src/coreComponents/physicsSolvers/multiphysics/CMakeLists.txt +++ b/src/coreComponents/physicsSolvers/multiphysics/CMakeLists.txt @@ -57,6 +57,7 @@ set( multiPhysicsSolvers_headers poromechanicsKernels/ThermalSinglePhasePoromechanicsEFEM_impl.hpp poromechanicsKernels/ThermalSinglePhasePoromechanicsConformingFractures.hpp poromechanicsKernels/ThermalSinglePhasePoromechanicsEmbeddedFractures.hpp + IFENNPoromechanics.hpp SinglePhasePoromechanics.hpp SinglePhasePoromechanicsEmbeddedFractures.hpp SinglePhasePoromechanicsConformingFractures.hpp @@ -76,6 +77,7 @@ set( multiPhysicsSolvers_sources PhaseFieldPoromechanicsSolver.cpp PoromechanicsInitialization.cpp SinglePhasePoromechanics.cpp + IFENNPoromechanics.cpp SinglePhasePoromechanicsEmbeddedFractures.cpp SinglePhasePoromechanicsConformingFractures.cpp SinglePhasePoromechanicsConformingFracturesALM.cpp diff --git a/src/coreComponents/physicsSolvers/multiphysics/IFENNPoromechanics.cpp b/src/coreComponents/physicsSolvers/multiphysics/IFENNPoromechanics.cpp new file mode 100644 index 00000000000..f955ee37382 --- /dev/null +++ b/src/coreComponents/physicsSolvers/multiphysics/IFENNPoromechanics.cpp @@ -0,0 +1,361 @@ +/* + * ------------------------------------------------------------------------------------------------------------ + * SPDX-License-Identifier: LGPL-2.1-only + * + * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC + * Copyright (c) 2018-2024 TotalEnergies + * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University + * Copyright (c) 2023-2024 Chevron + * Copyright (c) 2019- GEOS/GEOSX Contributors + * All rights reserved + * + * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details. + * ------------------------------------------------------------------------------------------------------------ + */ + +/** + * @file IFENNPoromechanics.cpp + */ + +#define GEOS_DISPATCH_VEM /// enables VEM in FiniteElementDispatch + +#include "IFENNPoromechanics.hpp" + +#include "constitutive/fluid/singlefluid/SingleFluidBase.hpp" +#include "linearAlgebra/multiscale/MultiscalePreconditioner.hpp" +#include "linearAlgebra/solvers/BlockPreconditioner.hpp" +#include "linearAlgebra/solvers/SeparateComponentPreconditioner.hpp" +#include "physicsSolvers/multiphysics/poromechanicsKernels/SinglePhasePoromechanicsDamage.hpp" +#include "physicsSolvers/multiphysics/poromechanicsKernels/SinglePhasePoromechanics.hpp" +#include "physicsSolvers/multiphysics/poromechanicsKernels/ThermalSinglePhasePoromechanics.hpp" +#include "physicsSolvers/solidMechanics/SolidMechanicsFields.hpp" +#include "physicsSolvers/solidMechanics/SolidMechanicsLagrangianFEM.hpp" +#include "physicsSolvers/solidMechanics/kernels/ImplicitSmallStrainQuasiStatic.hpp" +#include "physicsSolvers/solidMechanics/contact/SolidMechanicsLagrangeContact.hpp" +#include "physicsSolvers/solidMechanics/contact/SolidMechanicsAugmentedLagrangianContact.hpp" +#include "physicsSolvers/solidMechanics/contact/SolidMechanicsEmbeddedFractures.hpp" +#include "physicsSolvers/fluidFlow/SinglePhaseHybridFVM.hpp" + +#include "physicsSolvers/multiphysics/poromechanicsKernels/PoromechanicsKernelsDispatchTypeList.hpp" +#include "physicsSolvers/multiphysics/poromechanicsKernels/ThermoPoromechanicsKernelsDispatchTypeList.hpp" +#include "physicsSolvers/multiphysics/poromechanicsKernels/PoromechanicsDamageKernelsDispatchTypeList.hpp" +#include "physicsSolvers/solidMechanics/kernels/SolidMechanicsKernelsDispatchTypeList.hpp" + +namespace geos +{ + +using namespace constitutive; +using namespace dataRepository; +using namespace fields; + +template< typename FLOW_SOLVER, typename MECHANICS_SOLVER > +IFENNPoromechanics< FLOW_SOLVER, MECHANICS_SOLVER >::IFENNPoromechanics( const string & name, + Group * const parent ) + : Base( name, parent ), + m_damageFlag() +{ + this->registerWrapper( viewKeyStruct::damageFlagString(), &m_damageFlag ). + setApplyDefaultValue( 0 ). + setInputFlag( InputFlags::OPTIONAL ). + setDescription( "The flag to indicate whether a damage solid model is used" ); + + LinearSolverParameters & linearSolverParameters = this->m_linearSolverParameters.get(); + linearSolverParameters.dofsPerNode = 3; +} + +template< typename FLOW_SOLVER, typename MECHANICS_SOLVER > +void IFENNPoromechanics< FLOW_SOLVER, MECHANICS_SOLVER >::setupSystem( DomainPartition & domain, + DofManager & dofManager, + CRSMatrix< real64, globalIndex > & localMatrix, + ParallelVector & rhs, + ParallelVector & solution, + bool const setSparsity ) +{ + if( this->m_precond ) + { + this->m_precond->clear(); + } + + // setup monolithic coupled system + PhysicsSolverBase::setupSystem( domain, dofManager, localMatrix, rhs, solution, setSparsity ); + + if( !this->m_precond && this->m_linearSolverParameters.get().solverType != LinearSolverParameters::SolverType::direct ) + { + this->m_precond = createPreconditioner( domain ); + } +} + +template< typename FLOW_SOLVER, typename MECHANICS_SOLVER > +void IFENNPoromechanics< FLOW_SOLVER, MECHANICS_SOLVER >::initializePostInitialConditionsPreSubGroups() +{ + Base::initializePostInitialConditionsPreSubGroups(); + + // Populate sub-block solver parameters for block preconditioner + LinearSolverParameters & linParams = this->m_linearSolverParameters.get(); + linParams.block.resize( 2 ); + linParams.block.subParams[0] = &this->solidMechanicsSolver()->getLinearSolverParameters(); + linParams.block.subParams[1] = &this->flowSolver()->getLinearSolverParameters(); +} + +template<> +void IFENNPoromechanics<>::setMGRStrategy() +{ + LinearSolverParameters & linearSolverParameters = this->m_linearSolverParameters.get(); + + if( linearSolverParameters.preconditionerType != LinearSolverParameters::PreconditionerType::mgr ) + return; + + linearSolverParameters.mgr.separateComponents = true; + linearSolverParameters.dofsPerNode = 3; + + if( dynamic_cast< SinglePhaseHybridFVM * >( this->flowSolver() ) ) + { + if( this->m_isThermal ) + { + GEOS_ERROR( GEOS_FMT( "{}: MGR strategy is not implemented for thermal {}/{}", + this->getName(), this->getCatalogName(), this->flowSolver()->getCatalogName() )); + } + else + { + linearSolverParameters.mgr.strategy = LinearSolverParameters::MGR::StrategyType::hybridSinglePhasePoromechanics; + } + } + else + { + if( this->m_isThermal ) + { + linearSolverParameters.mgr.strategy = LinearSolverParameters::MGR::StrategyType::thermalSinglePhasePoromechanics; + } + else + { + linearSolverParameters.mgr.strategy = LinearSolverParameters::MGR::StrategyType::singlePhasePoromechanics; + } + } + GEOS_LOG_LEVEL_RANK_0( logInfo::LinearSolver, + GEOS_FMT( "{}: MGR strategy set to {}", getName(), + EnumStrings< LinearSolverParameters::MGR::StrategyType >::toString( linearSolverParameters.mgr.strategy ))); +} + +// template<> +// void IFENNPoromechanics< SinglePhaseReservoirAndWells<>, SolidMechanicsLagrangianFEM >::setMGRStrategy() +// { +// // same as SinglePhaseReservoirAndWells< IFENNPoromechanics<> >::setMGRStrategy + +// LinearSolverParameters & linearSolverParameters = m_linearSolverParameters.get(); + +// if( linearSolverParameters.preconditionerType != LinearSolverParameters::PreconditionerType::mgr ) +// return; + +// linearSolverParameters.mgr.separateComponents = true; +// linearSolverParameters.dofsPerNode = 3; + +// if( dynamic_cast< SinglePhaseHybridFVM * >( this->flowSolver() ) ) +// { +// GEOS_ERROR( GEOS_FMT( "{}: MGR strategy is not implemented for poromechanics {}/{}", +// this->getName(), this->getCatalogName(), this->flowSolver()->getCatalogName())); +// } +// else +// { +// linearSolverParameters.mgr.strategy = LinearSolverParameters::MGR::StrategyType::singlePhasePoromechanicsReservoirFVM; +// } +// GEOS_LOG_LEVEL_RANK_0( logInfo::LinearSolverConfiguration, +// GEOS_FMT( "{}: MGR strategy set to {}", this->getName(), +// EnumStrings< LinearSolverParameters::MGR::StrategyType >::toString( linearSolverParameters.mgr.strategy ))); +// } + +// template<> +// void SinglePhasePoromechanics< SinglePhaseReservoirAndWells<>, SolidMechanicsLagrangianFEM >::assembleSystem( real64 const time_n, +// real64 const dt, +// DomainPartition & domain, +// DofManager const & dofManager, +// CRSMatrixView< real64, globalIndex const > const & localMatrix, +// arrayView1d< real64 > const & localRhs ) +// { +// GEOS_MARK_FUNCTION; + +// Base::assembleSystem( time_n, dt, domain, dofManager, localMatrix, localRhs ); + +// // TODO: Check removal of the whole method +// // // assemble well contributions +// // flowSolver()->wellSolver()->assembleSystem( time_n, dt, domain, dofManager, localMatrix, localRhs ); +// // flowSolver()->assembleCouplingTerms( time_n, dt, domain, dofManager, localMatrix, localRhs ); +// } + +template< typename FLOW_SOLVER, typename MECHANICS_SOLVER > +void IFENNPoromechanics< FLOW_SOLVER, MECHANICS_SOLVER >::assembleElementBasedTerms( real64 const time_n, + real64 const dt, + DomainPartition & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) +{ + GEOS_UNUSED_VAR( time_n ); + GEOS_UNUSED_VAR( dt ); + + real64 poromechanicsMaxForce = 0.0; + real64 mechanicsMaxForce = 0.0; + + // step 1: apply the full poromechanics coupling on the target regions on the poromechanics solver + + set< string > poromechanicsRegionNames; + + this->template forDiscretizationOnMeshTargets<>( domain.getMeshBodies(), [&] ( string const &, + MeshLevel & mesh, + string_array const & regionNames ) + { + poromechanicsRegionNames.insert( regionNames.begin(), regionNames.end() ); + + string const flowDofKey = dofManager.getKey( DLSinglePhaseBase::viewKeyStruct::elemDofFieldString() ); + + if( m_damageFlag ) + { + poromechanicsMaxForce = + this->template assemblyLaunch< PoromechanicsDamageKernelsDispatchTypeList, + poromechanicsDamageKernels::SinglePhasePoromechanicsDamageKernelFactory >( mesh, + dofManager, + regionNames, + viewKeyStruct::porousMaterialNamesString(), + localMatrix, + localRhs, + dt, + flowDofKey, + this->m_performStressInitialization, + FlowSolverBase::viewKeyStruct::fluidNamesString() ); + } + else if( this->m_isThermal ) + { + poromechanicsMaxForce = + this->template assemblyLaunch< ThermoPoromechanicsKernelsDispatchTypeList, + thermalPoromechanicsKernels::ThermalSinglePhasePoromechanicsKernelFactory >( mesh, + dofManager, + regionNames, + viewKeyStruct::porousMaterialNamesString(), + localMatrix, + localRhs, + dt, + flowDofKey, + this->m_performStressInitialization, + FlowSolverBase::viewKeyStruct::fluidNamesString() ); + } + else + { + poromechanicsMaxForce = + this->template assemblyLaunch< PoromechanicsKernelsDispatchTypeList, + poromechanicsKernels::SinglePhasePoromechanicsKernelFactory >( mesh, + dofManager, + regionNames, + viewKeyStruct::porousMaterialNamesString(), + localMatrix, + localRhs, + dt, + flowDofKey, + this->m_performStressInitialization, + FlowSolverBase::viewKeyStruct::fluidNamesString() ); + } + } ); + + // step 2: apply mechanics solver on its target regions not included in the poromechanics solver target regions + + this->solidMechanicsSolver()->forDiscretizationOnMeshTargets( domain.getMeshBodies(), [&] ( string const &, + MeshLevel & mesh, + string_array const & regionNames ) + { + // collect the target region of the mechanics solver not included in the poromechanics target regions + string_array filteredRegionNames; + filteredRegionNames.reserve( regionNames.size() ); + for( string const & regionName : regionNames ) + { + // if the mechanics target region is not included in the poromechanics target region, save the string + if( poromechanicsRegionNames.count( regionName ) == 0 ) + { + filteredRegionNames.emplace_back( regionName ); + } + } + + // if the array is empty, the mechanics and poromechanics solver target regions overlap perfectly, there is nothing to do + if( filteredRegionNames.empty() ) + { + return; + } + + mechanicsMaxForce = + this->template assemblyLaunch< SolidMechanicsKernelsDispatchTypeList, + solidMechanicsLagrangianFEMKernels::QuasiStaticFactory >( mesh, + dofManager, + filteredRegionNames, + SolidMechanicsLagrangianFEM::viewKeyStruct::solidMaterialNamesString(), + localMatrix, + localRhs, + dt ); + } ); + + this->solidMechanicsSolver()->applyContactConstraint( dofManager, domain, localMatrix, localRhs ); + this->solidMechanicsSolver()->getMaxForce() = LvArray::math::max( mechanicsMaxForce, poromechanicsMaxForce ); +} + +template< typename FLOW_SOLVER, typename MECHANICS_SOLVER > +std::unique_ptr< PreconditionerBase< LAInterface > > +IFENNPoromechanics< FLOW_SOLVER, MECHANICS_SOLVER >::createPreconditioner( DomainPartition & domain ) const +{ + LinearSolverParameters const & linParams = this->m_linearSolverParameters.get(); + switch( linParams.preconditionerType ) + { + case LinearSolverParameters::PreconditionerType::block: + { + auto precond = std::make_unique< BlockPreconditioner< LAInterface > >( linParams.block ); + + precond->setupBlock( linParams.block.order[toUnderlying( Base::SolverType::SolidMechanics )], + { { solidMechanics::totalDisplacement::key(), { 3, true } } }, + this->solidMechanicsSolver()->createPreconditioner( domain ) ); + precond->setupBlock( linParams.block.order[toUnderlying( Base::SolverType::Flow )], + { { DLSinglePhaseBase::viewKeyStruct::elemDofFieldString(), { 1, true } } }, + this->flowSolver()->createPreconditioner( domain ) ); + + return precond; + } + case LinearSolverParameters::PreconditionerType::multiscale: + { + return std::make_unique< MultiscalePreconditioner< LAInterface > >( linParams, domain ); + } + default: + { + return PhysicsSolverBase::createPreconditioner( domain ); + } + } +} + +template< typename FLOW_SOLVER, typename MECHANICS_SOLVER > +void IFENNPoromechanics< FLOW_SOLVER, MECHANICS_SOLVER >::updateBulkDensity( ElementSubRegionBase & subRegion ) +{ + // get the fluid model (to access fluid density) + string const & fluidName = subRegion.getReference< string >( FlowSolverBase::viewKeyStruct::fluidNamesString() ); + SingleFluidBase const & fluid = this->template getConstitutiveModel< SingleFluidBase >( subRegion, fluidName ); + + // get the solid model (to access porosity and solid density) + string const & solidName = subRegion.getReference< string >( viewKeyStruct::porousMaterialNamesString() ); + CoupledSolidBase const & solid = this->template getConstitutiveModel< CoupledSolidBase >( subRegion, solidName ); + + // update the bulk density + poromechanicsKernels:: + SinglePhaseBulkDensityKernelFactory:: + createAndLaunch< parallelDevicePolicy<> >( fluid, + solid, + subRegion ); +} + +template class IFENNPoromechanics<>; +template class IFENNPoromechanics< DLSinglePhaseBase, SolidMechanicsLagrangeContact >; +template class IFENNPoromechanics< DLSinglePhaseBase, SolidMechanicsEmbeddedFractures >; +// template class IFENNPoromechanics< SinglePhaseReservoirAndWells<> >; +// template class IFENNPoromechanics< SinglePhaseReservoirAndWells<>, SolidMechanicsLagrangeContact >; +//template class IFENNPoromechanics< SinglePhaseReservoirAndWells<>, SolidMechanicsEmbeddedFractures >; + +namespace +{ +// typedef IFENNPoromechanics< SinglePhaseReservoirAndWells<> > IFENNReservoirPoromechanics; +// REGISTER_CATALOG_ENTRY( PhysicsSolverBase, IFENNReservoirPoromechanics, string const &, Group * const ) +typedef IFENNPoromechanics<> IFENNPoromechanicsDefault; +REGISTER_CATALOG_ENTRY( PhysicsSolverBase, IFENNPoromechanicsDefault, string const &, Group * const ) +} + +} /* namespace geos */ diff --git a/src/coreComponents/physicsSolvers/multiphysics/IFENNPoromechanics.hpp b/src/coreComponents/physicsSolvers/multiphysics/IFENNPoromechanics.hpp new file mode 100644 index 00000000000..1347a620fc4 --- /dev/null +++ b/src/coreComponents/physicsSolvers/multiphysics/IFENNPoromechanics.hpp @@ -0,0 +1,163 @@ +/* + * ------------------------------------------------------------------------------------------------------------ + * SPDX-License-Identifier: LGPL-2.1-only + * + * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC + * Copyright (c) 2018-2024 TotalEnergies + * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University + * Copyright (c) 2023-2024 Chevron + * Copyright (c) 2019- GEOS/GEOSX Contributors + * All rights reserved + * + * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details. + * ------------------------------------------------------------------------------------------------------------ + */ + +/** + * @file IFENNPoromechanics.hpp + */ + +#ifndef GEOS_PHYSICSSOLVERS_MULTIPHYSICS_IFENNPOROMECHANICS_HPP_ +#define GEOS_PHYSICSSOLVERS_MULTIPHYSICS_IFENNPOROMECHANICS_HPP_ + +#include "physicsSolvers/multiphysics/PoromechanicsSolver.hpp" +#include "physicsSolvers/fluidFlow/DLSinglePhaseBase.hpp" +// #include "physicsSolvers/multiphysics/SinglePhaseReservoirAndWells.hpp" + +namespace geos +{ + +template< typename FLOW_SOLVER = DLSinglePhaseBase, typename MECHANICS_SOLVER = SolidMechanicsLagrangianFEM > +class IFENNPoromechanics : public PoromechanicsSolver< FLOW_SOLVER, MECHANICS_SOLVER > +{ +public: + + using Base = PoromechanicsSolver< FLOW_SOLVER, MECHANICS_SOLVER >; + using Base::m_solvers; + using Base::m_dofManager; + using Base::m_localMatrix; + using Base::m_rhs; + using Base::m_solution; + using Base::m_stabilizationType; + using Base::m_stabilizationRegionNames; + using Base::m_stabilizationMultiplier; + using Base::updateBulkDensity; + + /** + * @brief main constructor for IFENNPoromechanics objects + * @param name the name of this instantiation of IFENNPoromechanics in the repository + * @param parent the parent group of this instantiation of IFENNPoromechanics + */ + IFENNPoromechanics( const string & name, + dataRepository::Group * const parent ); + + /** + * @brief name of the node manager in the object catalog + * @return string that contains the catalog name to generate a new IFENNPoromechanics object through the object catalog. + */ + static string catalogName() + { + if constexpr ( std::is_same_v< FLOW_SOLVER, DLSinglePhaseBase > ) // special case + { + return "IFENNPoromechanics"; + } + else // default + { + return FLOW_SOLVER::catalogName() + "IFENNPoromechanics"; + } + } + + /** + * @copydoc PhysicsSolverBase::getCatalogName() + */ + string getCatalogName() const override { return catalogName(); } + + /** + * @defgroup Solver Interface Functions + * + * These functions provide the primary interface that is required for derived classes + */ + /**@{*/ + + virtual void setupSystem( DomainPartition & domain, + DofManager & dofManager, + CRSMatrix< real64, globalIndex > & localMatrix, + ParallelVector & rhs, + ParallelVector & solution, + bool const setSparsity = true ) override; + + virtual std::unique_ptr< PreconditionerBase< LAInterface > > + createPreconditioner( DomainPartition & domain ) const override; + + virtual void assembleSystem( real64 const time, + real64 const dt, + DomainPartition & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) override + { Base::assembleSystem( time, dt, domain, dofManager, localMatrix, localRhs ); } + + virtual void assembleElementBasedTerms( real64 const time_n, + real64 const dt, + DomainPartition & domain, + DofManager const & dofManager, + CRSMatrixView< real64, globalIndex const > const & localMatrix, + arrayView1d< real64 > const & localRhs ) override; + + /**@}*/ + + struct viewKeyStruct : Base::viewKeyStruct + { + /// Flag to indicate if it is the phase-field formulation + constexpr static char const * damageFlagString() { return "damageFlag"; } + }; + +protected: + + virtual void initializePostInitialConditionsPreSubGroups() override; + + virtual void setMGRStrategy() override + { + if( this->m_linearSolverParameters.get().preconditionerType == LinearSolverParameters::PreconditionerType::mgr ) + GEOS_ERROR( GEOS_FMT( "{}: MGR strategy is not implemented for {}", this->getName(), this->getCatalogName())); + } + + virtual void mapSolutionBetweenSolvers( DomainPartition & domain, integer const solverType ) override + { + GEOS_MARK_FUNCTION; + + Base::mapSolutionBetweenSolvers( domain, solverType ); + + /// After the solid mechanics solver + if( solverType == static_cast< integer >( Base::SolverType::SolidMechanics ) + && !this->m_performStressInitialization ) // do not update during poromechanics initialization + { + this->template forDiscretizationOnMeshTargets<>( domain.getMeshBodies(), [&]( string const &, + MeshLevel & mesh, + string_array const & regionNames ) + { + + mesh.getElemManager().forElementSubRegions< CellElementSubRegion >( regionNames, [&]( localIndex const, + auto & subRegion ) + { + // update mass after porosity change due to mechanics solve + this->flowSolver()->updateMass( subRegion ); + } ); + } ); + } + } + + /** + * @brief Helper function to recompute the bulk density + * @param[in] subRegion the element subRegion + */ + virtual void updateBulkDensity( ElementSubRegionBase & subRegion ) override; + + virtual string getFlowDofKey() const override { return DLSinglePhaseBase::viewKeyStruct::elemDofFieldString(); } + + integer m_damageFlag; +}; + +} /* namespace geos */ + +#endif /* GEOS_PHYSICSSOLVERS_MULTIPHYSICS_IFENNPOROMECHANICS_HPP_ */