diff --git a/.github/workflows/schedule.yml b/.github/workflows/schedule.yml deleted file mode 100644 index cbec279a5..000000000 --- a/.github/workflows/schedule.yml +++ /dev/null @@ -1,21 +0,0 @@ -name: Weekly tests - -on: - schedule: - # Scheduled for 0330 UTC on Monday mornings to detect bitrot - - cron: '30 3 * * 1' - -jobs: - test_main: - uses: ./.github/workflows/test.yml - with: - source_ref: main - firedrake_docker_version: dev-release - secrets: inherit - - test_future: - uses: ./.github/workflows/test.yml - with: - source_ref: future - firedrake_docker_version: dev-main - secrets: inherit diff --git a/.github/workflows/schedule_future.yml b/.github/workflows/schedule_future.yml new file mode 100644 index 000000000..e3a464526 --- /dev/null +++ b/.github/workflows/schedule_future.yml @@ -0,0 +1,13 @@ +name: Weekly future tests + +on: + schedule: + # Scheduled for 0430 UTC on Monday mornings to detect bitrot + - cron: '30 4 * * 1' +jobs: + test_future: + uses: ./.github/workflows/test.yml + with: + source_ref: future + firedrake_docker_version: dev-main + secrets: inherit diff --git a/.github/workflows/schedule_main.yml b/.github/workflows/schedule_main.yml new file mode 100644 index 000000000..dba9d6401 --- /dev/null +++ b/.github/workflows/schedule_main.yml @@ -0,0 +1,13 @@ +name: Weekly main tests + +on: + schedule: + # Scheduled for 0130 UTC on Monday mornings to detect bitrot + - cron: '30 1 * * 1' +jobs: + test_main: + uses: ./.github/workflows/test.yml + with: + source_ref: main + firedrake_docker_version: dev-release + secrets: inherit diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 988f06b4b..ac416b8a0 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -87,6 +87,8 @@ jobs: pip install --no-binary netCDF4 --no-build-isolation netCDF4 - name: Run tests (nprocs = 1) + # Run even if earlier tests failed + if: success() || steps.install-two.conclusion == 'success' run: | . venv-gusto/bin/activate : # Use pytest-xdist here so we can have a single collated output (not possible @@ -95,7 +97,6 @@ jobs: timeout-minutes: 60 - name: Run tests (nprocs = 2) - # Run even if earlier tests failed if: success() || steps.install-two.conclusion == 'success' run: | . venv-gusto/bin/activate @@ -120,7 +121,7 @@ jobs: uses: actions/upload-artifact@v4 if: success() || steps.install-two.conclusion == 'success' with: - name: pytest-logs + name: pytest-logs-${{ inputs.firedrake_docker_version }} path: pytest_*.log retention-days: 5 @@ -128,7 +129,7 @@ jobs: uses: actions/upload-artifact@v4 if: success() || steps.install-two.conclusion == 'success' with: - name: gusto-logs + name: gusto-logs-${{ inputs.firedrake_docker_version }} path: gusto*.log retention-days: 5 diff --git a/README.md b/README.md index 5b0ff8334..d272e3028 100644 --- a/README.md +++ b/README.md @@ -44,6 +44,19 @@ Gusto can produce output in two formats: - VTU files, which can be viewed with the [Paraview](https://www.paraview.org/) software - netCDF files, which has data that can be plotted using standard python packages such as matplotlib. We suggest using the [tomplot](https://github.com/tommbendall/tomplot) Python library, which contains several routines to simplify the plotting of Gusto output. +## Working Practices + +Gusto's default development happens on the `main` branch, which is fixed to the latest release of Firedrake. +We also maintain a `future` branch which builds from the head of Firedrake's `main` branch. +Contributions to Gusto's `main` branch must pass our Continuous Integration tests. + +The relationship between `future` and `main` is as follows: +- whenever there is a new release of Firedrake, `future` should be merged into `main` *and* `main` should be merged into `future` so that the two are in-sync +- otherwise, `future` should not be merged into `main` +- we *may* choose to merge `main` into `future` out-of-cycle of Firedrake releases, which might be desirable after we fix bugs or make significant API changes + +We also use some tests with "KGOs" (Known Good Output), which are used to detect changes in science. These can be regenerated using the `integration-tests/data/update_kgos.py` script. + ## Website For more information, please see our [website](https://www.firedrakeproject.org/gusto/), and please do get in touch via the Gusto channel on the Firedrake project [Slack workspace](https://firedrakeproject.slack.com/). diff --git a/examples/boussinesq/skamarock_klemp_compressible.py b/examples/boussinesq/skamarock_klemp_compressible.py index 5fb9ce075..28508fa37 100644 --- a/examples/boussinesq/skamarock_klemp_compressible.py +++ b/examples/boussinesq/skamarock_klemp_compressible.py @@ -15,8 +15,7 @@ from gusto import ( Domain, IO, OutputParameters, SemiImplicitQuasiNewton, SSPRK3, DGUpwind, TrapeziumRule, SUPGOptions, Divergence, Perturbation, CourantNumber, - BoussinesqParameters, BoussinesqEquations, BoussinesqSolver, - boussinesq_hydrostatic_balance + BoussinesqParameters, BoussinesqEquations, boussinesq_hydrostatic_balance ) skamarock_klemp_compressible_bouss_defaults = { @@ -90,13 +89,9 @@ def skamarock_klemp_compressible_bouss( DGUpwind(eqns, "b", ibp=b_opts.ibp) ] - # Linear solver - linear_solver = BoussinesqSolver(eqns) - # Time stepper stepper = SemiImplicitQuasiNewton( - eqns, io, transported_fields, transport_methods, - linear_solver=linear_solver + eqns, io, transported_fields, transport_methods ) # ------------------------------------------------------------------------ # diff --git a/examples/boussinesq/skamarock_klemp_incompressible.py b/examples/boussinesq/skamarock_klemp_incompressible.py index 8aa3d7265..c9f2bc1c5 100644 --- a/examples/boussinesq/skamarock_klemp_incompressible.py +++ b/examples/boussinesq/skamarock_klemp_incompressible.py @@ -15,8 +15,7 @@ from gusto import ( Domain, IO, OutputParameters, SemiImplicitQuasiNewton, SSPRK3, DGUpwind, TrapeziumRule, SUPGOptions, Divergence, Perturbation, CourantNumber, - BoussinesqParameters, BoussinesqEquations, BoussinesqSolver, - boussinesq_hydrostatic_balance + BoussinesqParameters, BoussinesqEquations, boussinesq_hydrostatic_balance ) skamarock_klemp_incompressible_bouss_defaults = { @@ -87,13 +86,9 @@ def skamarock_klemp_incompressible_bouss( DGUpwind(eqns, "b", ibp=b_opts.ibp) ] - # Linear solver - linear_solver = BoussinesqSolver(eqns) - - # Time stepper + # Timestepper - pressure p is diagnostic stepper = SemiImplicitQuasiNewton( - eqns, io, transported_fields, transport_methods, - linear_solver=linear_solver + eqns, io, transported_fields, transport_methods, solver_prognostics=["u", "b"] ) # ------------------------------------------------------------------------ # diff --git a/examples/compressible_euler/dcmip_3_1_gravity_wave.py b/examples/compressible_euler/dcmip_3_1_gravity_wave.py index 5953eeed9..12381a3c4 100644 --- a/examples/compressible_euler/dcmip_3_1_gravity_wave.py +++ b/examples/compressible_euler/dcmip_3_1_gravity_wave.py @@ -15,7 +15,7 @@ from gusto import ( Domain, IO, OutputParameters, SemiImplicitQuasiNewton, SSPRK3, DGUpwind, TrapeziumRule, SUPGOptions, lonlatr_from_xyz, CompressibleParameters, - CompressibleEulerEquations, CompressibleSolver, ZonalComponent, + CompressibleEulerEquations, ZonalComponent, compressible_hydrostatic_balance, Perturbation, GeneralCubedSphereMesh, SubcyclingOptions ) @@ -105,13 +105,9 @@ def dcmip_3_1_gravity_wave( DGUpwind(eqns, field) for field in ["u", "rho", "theta"] ] - # Linear solver - linear_solver = CompressibleSolver(eqns) - # Time stepper stepper = SemiImplicitQuasiNewton( - eqns, io, transported_fields, transport_methods, - linear_solver=linear_solver + eqns, io, transported_fields, transport_methods ) # ------------------------------------------------------------------------ # diff --git a/examples/compressible_euler/dry_bryan_fritsch.py b/examples/compressible_euler/dry_bryan_fritsch.py index 81e2312b8..bd3222ca7 100644 --- a/examples/compressible_euler/dry_bryan_fritsch.py +++ b/examples/compressible_euler/dry_bryan_fritsch.py @@ -15,8 +15,7 @@ from gusto import ( Domain, IO, OutputParameters, SemiImplicitQuasiNewton, SSPRK3, DGUpwind, RecoverySpaces, BoundaryMethod, Perturbation, CompressibleParameters, - CompressibleEulerEquations, CompressibleSolver, - compressible_hydrostatic_balance + CompressibleEulerEquations, compressible_hydrostatic_balance ) dry_bryan_fritsch_defaults = { @@ -81,8 +80,7 @@ def dry_bryan_fritsch( io = IO(domain, output, diagnostic_fields=diagnostic_fields) # Transport schemes -- set up options for using recovery wrapper - boundary_methods = {'DG': BoundaryMethod.taylor, - 'HDiv': BoundaryMethod.taylor} + boundary_methods = {'DG': BoundaryMethod.taylor} recovery_spaces = RecoverySpaces( domain, boundary_methods, use_vector_spaces=True @@ -102,13 +100,10 @@ def dry_bryan_fritsch( DGUpwind(eqns, field) for field in ["u", "rho", "theta"] ] - # Linear solver - linear_solver = CompressibleSolver(eqns) - # Time stepper stepper = SemiImplicitQuasiNewton( eqns, io, transported_fields, transport_methods, - linear_solver=linear_solver + tau_values={'rho': 1.0, 'theta': 1.0} ) # ------------------------------------------------------------------------ # diff --git a/examples/compressible_euler/schaer_mountain.py b/examples/compressible_euler/schaer_mountain.py index 409fddb60..73df3419e 100644 --- a/examples/compressible_euler/schaer_mountain.py +++ b/examples/compressible_euler/schaer_mountain.py @@ -15,7 +15,7 @@ SpatialCoordinate, exp, pi, cos, Function, Mesh, Constant ) from gusto import ( - Domain, CompressibleParameters, CompressibleSolver, logger, + Domain, CompressibleParameters, logger, OutputParameters, IO, SSPRK3, DGUpwind, SemiImplicitQuasiNewton, compressible_hydrostatic_balance, SpongeLayerParameters, Exner, ZComponent, Perturbation, SUPGOptions, TrapeziumRule, MaxKernel, MinKernel, @@ -97,11 +97,11 @@ def schaer_mountain( # Equation parameters = CompressibleParameters(mesh, g=g, cp=cp) - sponge = SpongeLayerParameters( + sponge_params = SpongeLayerParameters( mesh, H=domain_height, z_level=domain_height-sponge_depth, mubar=mu_dt/dt ) eqns = CompressibleEulerEquations( - domain, parameters, sponge_options=sponge, u_transport_option=u_eqn_type + domain, parameters, sponge_options=sponge_params, u_transport_option=u_eqn_type ) # I/O @@ -134,14 +134,11 @@ def schaer_mountain( DGUpwind(eqns, "theta", ibp=theta_opts.ibp) ] - # Linear solver - tau_values = {'rho': 1.0, 'theta': 1.0} - linear_solver = CompressibleSolver(eqns, alpha, tau_values=tau_values) - # Time stepper + tau_values = {'rho': 1.0, 'theta': 1.0} stepper = SemiImplicitQuasiNewton( eqns, io, transported_fields, transport_methods, - linear_solver=linear_solver, alpha=alpha, spinup_steps=spinup_steps + alpha=alpha, tau_values=tau_values, spinup_steps=spinup_steps ) # ------------------------------------------------------------------------ # diff --git a/examples/compressible_euler/skamarock_klemp_nonhydrostatic.py b/examples/compressible_euler/skamarock_klemp_nonhydrostatic.py index d4e2fd3c1..ac7bfd40d 100644 --- a/examples/compressible_euler/skamarock_klemp_nonhydrostatic.py +++ b/examples/compressible_euler/skamarock_klemp_nonhydrostatic.py @@ -13,21 +13,19 @@ """ from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter -from petsc4py import PETSc -PETSc.Sys.popErrorHandler() import itertools from firedrake import ( as_vector, SpatialCoordinate, PeriodicIntervalMesh, ExtrudedMesh, exp, sin, - Function, pi, COMM_WORLD, sqrt + PETSc, Function, pi, COMM_WORLD, sqrt ) import numpy as np from gusto import ( - Domain, IO, OutputParameters, TRBDF2QuasiNewton, SemiImplicitQuasiNewton, SSPRK3, - DGUpwind, logger, SUPGOptions, Perturbation, CompressibleParameters, + Domain, IO, OutputParameters, TRBDF2QuasiNewton, SemiImplicitQuasiNewton, + DGUpwind, logger, EmbeddedDGOptions, Perturbation, CompressibleParameters, CompressibleEulerEquations, HydrostaticCompressibleEulerEquations, - compressible_hydrostatic_balance, RungeKuttaFormulation, CompressibleSolver, - hydrostatic_parameters, SubcyclingOptions, + compressible_hydrostatic_balance, RungeKuttaFormulation, SubcyclingOptions, SSPRK3 ) +PETSc.Sys.popErrorHandler() skamarock_klemp_nonhydrostatic_defaults = { 'ncolumns': 150, @@ -70,6 +68,11 @@ def skamarock_klemp_nonhydrostatic( element_order = 1 + if timestepper == 'TR-BDF2': + gamma = (1-sqrt(2)/2) + else: + alpha = 0.5 + # ------------------------------------------------------------------------ # # Set up model objects # ------------------------------------------------------------------------ # @@ -94,6 +97,11 @@ def skamarock_klemp_nonhydrostatic( # Adjust default directory name if hydrostatic and dirname == skamarock_klemp_nonhydrostatic_defaults['dirname']: dirname = f'hyd_switch_{dirname}' + if timestepper == 'TR-BDF2' and ( + dirname == skamarock_klemp_nonhydrostatic_defaults['dirname'] + or dirname == f'hyd_switch_{skamarock_klemp_nonhydrostatic_defaults["dirname"]}' + ): + dirname = f'{dirname}_trbdf2' # Dumping point data using legacy PointDataOutput is not supported in parallel if COMM_WORLD.size == 1: @@ -116,11 +124,10 @@ def skamarock_klemp_nonhydrostatic( io = IO(domain, output, diagnostic_fields=diagnostic_fields) # Transport schemes - theta_opts = SUPGOptions() - if timestepper == 'SIQN': - subcycling_options = SubcyclingOptions(subcycle_by_courant=0.25) - else: - subcycling_options = None + # We include subcycling here for test coverage, + # it is not necessary to subcycle for this test case! + subcycling_options = SubcyclingOptions(subcycle_by_courant=0.25) + theta_opts = EmbeddedDGOptions() transported_fields = [ SSPRK3(domain, "u", subcycling_options=subcycling_options), SSPRK3( @@ -135,38 +142,26 @@ def skamarock_klemp_nonhydrostatic( transport_methods = [ DGUpwind(eqns, "u"), DGUpwind(eqns, "rho", advective_then_flux=True), - DGUpwind(eqns, "theta", ibp=theta_opts.ibp) + DGUpwind(eqns, "theta") ] # Linear solver - if hydrostatic: - if timestepper == 'TR-BDF2': - raise ValueError('Hydrostatic equations not implmented for TR-BDF2') - - linear_solver = CompressibleSolver( - eqns, solver_parameters=hydrostatic_parameters, - overwrite_solver_parameters=True - ) - else: - linear_solver = CompressibleSolver(eqns) + # The use of advective-then-flux formulation and 2x2 Quasi-Newton iterations + # requires tau values to take implicit values for rho and theta + tau_values = {'rho': 1.0, 'theta': 1.0} + if hydrostatic and timestepper == 'TR-BDF2': + raise ValueError('Hydrostatic equations not implmented for TR-BDF2') # Time stepper if timestepper == 'TR-BDF2': - gamma = (1-sqrt(2)/2) - gamma2 = (1 - 2*float(gamma))/(2 - 2*float(gamma)) - tr_solver = CompressibleSolver(eqns, alpha=gamma) - bdf_solver = CompressibleSolver(eqns, alpha=gamma2) stepper = TRBDF2QuasiNewton( eqns, io, transported_fields, transport_methods, - gamma=gamma, - tr_solver=tr_solver, - bdf_solver=bdf_solver + gamma=gamma, tau_values_tr=tau_values, tau_values_bdf=tau_values ) elif timestepper == 'SIQN': stepper = SemiImplicitQuasiNewton( - eqns, io, transported_fields, transport_methods, - linear_solver=linear_solver + eqns, io, transported_fields, transport_methods, alpha=alpha, tau_values=tau_values ) # ------------------------------------------------------------------------ # # Initial conditions @@ -269,7 +264,7 @@ def skamarock_klemp_nonhydrostatic( default=skamarock_klemp_nonhydrostatic_defaults['hydrostatic'] ) parser.add_argument( - 'timestepper', + '--timestepper', help='Which time stepper to use, takes SIQN or TR-BDF2', type=str, choices=['SIQN', 'TR-BDF2'], diff --git a/examples/compressible_euler/straka_bubble.py b/examples/compressible_euler/straka_bubble.py index a710a1067..0c2a9c96a 100644 --- a/examples/compressible_euler/straka_bubble.py +++ b/examples/compressible_euler/straka_bubble.py @@ -16,7 +16,7 @@ Domain, IO, OutputParameters, SemiImplicitQuasiNewton, SSPRK3, DGUpwind, TrapeziumRule, SUPGOptions, CourantNumber, Perturbation, DiffusionParameters, InteriorPenaltyDiffusion, BackwardEuler, - CompressibleParameters, CompressibleEulerEquations, CompressibleSolver, + CompressibleParameters, CompressibleEulerEquations, compressible_hydrostatic_balance ) @@ -101,9 +101,6 @@ def straka_bubble( DGUpwind(eqns, "theta", ibp=theta_opts.ibp) ] - # Linear solver - linear_solver = CompressibleSolver(eqns) - # Diffusion schemes diffusion_schemes = [ BackwardEuler(domain, "u"), @@ -118,7 +115,7 @@ def straka_bubble( stepper = SemiImplicitQuasiNewton( eqns, io, transported_fields, spatial_methods=transport_methods+diffusion_methods, - linear_solver=linear_solver, diffusion_schemes=diffusion_schemes + diffusion_schemes=diffusion_schemes ) # ------------------------------------------------------------------------ # diff --git a/examples/compressible_euler/unsaturated_bubble.py b/examples/compressible_euler/unsaturated_bubble.py index 9a56cfe95..47ba48de3 100644 --- a/examples/compressible_euler/unsaturated_bubble.py +++ b/examples/compressible_euler/unsaturated_bubble.py @@ -26,7 +26,7 @@ Domain, IO, OutputParameters, SemiImplicitQuasiNewton, SSPRK3, DGUpwind, Perturbation, RecoverySpaces, BoundaryMethod, Recoverer, Fallout, Coalescence, SaturationAdjustment, EvaporationOfRain, thermodynamics, - CompressibleParameters, CompressibleEulerEquations, CompressibleSolver, + CompressibleParameters, CompressibleEulerEquations, unsaturated_hydrostatic_balance, WaterVapour, CloudWater, Rain, RelativeHumidity, ForwardEuler, MixedFSLimiter, ZeroLimiter ) @@ -102,8 +102,7 @@ def unsaturated_bubble( io = IO(domain, output, diagnostic_fields=diagnostic_fields) # Transport schemes -- specify options for using recovery wrapper - boundary_methods = {'DG': BoundaryMethod.taylor, - 'HDiv': BoundaryMethod.taylor} + boundary_methods = {'DG': BoundaryMethod.taylor} recovery_spaces = RecoverySpaces(domain, boundary_method=boundary_methods, use_vector_spaces=True) @@ -128,9 +127,6 @@ def unsaturated_bubble( ["u", "rho", "theta", "water_vapour", "cloud_water", "rain"] ] - # Linear solver - linear_solver = CompressibleSolver(eqns) - # Physics schemes Vt = domain.spaces('theta') rainfall_method = DGUpwind(eqns, 'rain', outflow=True) @@ -148,7 +144,7 @@ def unsaturated_bubble( # Time stepper stepper = SemiImplicitQuasiNewton( eqns, io, transported_fields, transport_methods, - linear_solver=linear_solver, physics_schemes=physics_schemes + final_physics_schemes=physics_schemes, tau_values={'rho': 1.0, 'theta': 1.0} ) # ------------------------------------------------------------------------ # diff --git a/examples/shallow_water/linear_thermal_galewsky_jet.py b/examples/shallow_water/linear_thermal_galewsky_jet.py index 74865e622..c777735f9 100644 --- a/examples/shallow_water/linear_thermal_galewsky_jet.py +++ b/examples/shallow_water/linear_thermal_galewsky_jet.py @@ -15,7 +15,7 @@ Domain, IO, OutputParameters, SemiImplicitQuasiNewton, DefaultTransport, DGUpwind, ForwardEuler, ShallowWaterParameters, NumericalIntegral, LinearThermalShallowWaterEquations, GeneralIcosahedralSphereMesh, - ZonalComponent, ThermalSWSolver, lonlatr_from_xyz, xyz_vector_from_lonlatr, + ZonalComponent, lonlatr_from_xyz, xyz_vector_from_lonlatr, RelativeVorticity, MeridionalComponent ) @@ -81,13 +81,8 @@ def linear_thermal_galewsky_jet( transport_schemes = [ForwardEuler(domain, "D")] transport_methods = [DefaultTransport(eqns, "D"), DGUpwind(eqns, "b")] - # Linear solver - linear_solver = ThermalSWSolver(eqns) - - # Time stepper stepper = SemiImplicitQuasiNewton( - eqns, io, transport_schemes, transport_methods, - linear_solver=linear_solver + eqns, io, transport_schemes, transport_methods ) # ------------------------------------------------------------------------ # diff --git a/examples/shallow_water/moist_convective_williamson_2.py b/examples/shallow_water/moist_convective_williamson_2.py index cc677d8fe..de5958e7d 100644 --- a/examples/shallow_water/moist_convective_williamson_2.py +++ b/examples/shallow_water/moist_convective_williamson_2.py @@ -13,13 +13,15 @@ """ from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter -from firedrake import SpatialCoordinate, sin, cos, exp, Function +from firedrake import ( + SpatialCoordinate, sin, cos, exp, Function +) from gusto import ( Domain, IO, OutputParameters, SemiImplicitQuasiNewton, SSPRK3, DGUpwind, TrapeziumRule, ShallowWaterParameters, ShallowWaterEquations, ZonalComponent, MeridionalComponent, SteadyStateError, lonlatr_from_xyz, - DG1Limiter, InstantRain, MoistConvectiveSWSolver, ForwardEuler, - RelativeVorticity, SWSaturationAdjustment, WaterVapour, CloudWater, Rain, + DG1Limiter, InstantRain, ForwardEuler, RelativeVorticity, + SWSaturationAdjustment, WaterVapour, CloudWater, Rain, GeneralIcosahedralSphereMesh, xyz_vector_from_lonlatr ) @@ -125,8 +127,6 @@ def sat_func(x_in): SSPRK3(domain, "rain", limiter=limiter) ] - linear_solver = MoistConvectiveSWSolver(eqns) - # Physics schemes sat_adj = SWSaturationAdjustment( eqns, sat_func, time_varying_saturation=True, @@ -143,9 +143,10 @@ def sat_func(x_in): ] stepper = SemiImplicitQuasiNewton( - eqns, io, transport_schemes=transported_fields, - spatial_methods=transport_methods, linear_solver=linear_solver, - physics_schemes=physics_schemes + eqns, io, + transport_schemes=transported_fields, + spatial_methods=transport_methods, + final_physics_schemes=physics_schemes ) # ------------------------------------------------------------------------ # diff --git a/examples/shallow_water/moist_thermal_equivb_gw.py b/examples/shallow_water/moist_thermal_equivb_gw.py deleted file mode 100644 index 43890ad8e..000000000 --- a/examples/shallow_water/moist_thermal_equivb_gw.py +++ /dev/null @@ -1,229 +0,0 @@ -""" -A gravity wave on the sphere, solved with the moist thermal shallow water -equations. The initial conditions are saturated and cloudy everywhere. -""" - -from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter -from firedrake import ( - SpatialCoordinate, pi, sqrt, min_value, cos, Constant, Function, exp, sin -) -from gusto import ( - Domain, IO, OutputParameters, DGUpwind, ShallowWaterParameters, - ThermalShallowWaterEquations, lonlatr_from_xyz, MeridionalComponent, - GeneralIcosahedralSphereMesh, SubcyclingOptions, ZonalComponent, - PartitionedCloud, RungeKuttaFormulation, SSPRK3, ThermalSWSolver, - SemiImplicitQuasiNewton, xyz_vector_from_lonlatr -) - -moist_thermal_gw_defaults = { - 'ncells_per_edge': 12, # number of cells per icosahedron edge - 'dt': 900.0, # 15 minutes - 'tmax': 5.*24.*60.*60., # 5 days - 'dumpfreq': 48, # dump twice per day - 'dirname': 'moist_thermal_equivb_gw' -} - - -def moist_thermal_gw( - ncells_per_edge=moist_thermal_gw_defaults['ncells_per_edge'], - dt=moist_thermal_gw_defaults['dt'], - tmax=moist_thermal_gw_defaults['tmax'], - dumpfreq=moist_thermal_gw_defaults['dumpfreq'], - dirname=moist_thermal_gw_defaults['dirname'] -): - - # ------------------------------------------------------------------------ # - # Parameters for test case - # ------------------------------------------------------------------------ # - - radius = 6371220. # planetary radius (m) - mean_depth = 5960. # reference depth (m) - q0 = 0.0115 # saturation curve coefficient (kg/kg) - beta2 = 9.80616*10 # thermal feedback coefficient (m/s^2) - nu = 1.5 # dimensionless parameter in saturation curve - R0 = pi/9. # radius of perturbation (rad) - lamda_c = -pi/2. # longitudinal centre of perturbation (rad) - phi_c = pi/6. # latitudinal centre of perturbation (rad) - phi_0 = 3.0e4 # scale factor for poleward buoyancy gradient - epsilon = 1/300 # linear air expansion coeff (1/K) - u_max = 20. # max amplitude of the zonal wind (m/s) - - # ------------------------------------------------------------------------ # - # Set up model objects - # ------------------------------------------------------------------------ # - - # Domain - mesh = GeneralIcosahedralSphereMesh(radius, ncells_per_edge, degree=2) - degree = 1 - domain = Domain(mesh, dt, "BDM", degree) - xyz = SpatialCoordinate(mesh) - - # Equation parameters - parameters = ShallowWaterParameters(mesh, H=mean_depth, q0=q0, - nu=nu, beta2=beta2) - - # Equation - eqns = ThermalShallowWaterEquations( - domain, parameters, equivalent_buoyancy=True - ) - - # IO - output = OutputParameters( - dirname=dirname, dumpfreq=dumpfreq, dump_nc=True, dump_vtus=False, - dumplist=['b_e', 'D'] - ) - diagnostic_fields = [ - ZonalComponent('u'), MeridionalComponent('u'), PartitionedCloud(eqns) - ] - io = IO(domain, output, diagnostic_fields=diagnostic_fields) - - transport_methods = [ - DGUpwind(eqns, field_name) for field_name in eqns.field_names - ] - - linear_solver = ThermalSWSolver(eqns) - - # ------------------------------------------------------------------------ # - # Timestepper - # ------------------------------------------------------------------------ # - - subcycling_opts = SubcyclingOptions(subcycle_by_courant=0.25) - transported_fields = [ - SSPRK3(domain, "u", subcycling_options=subcycling_opts), - SSPRK3( - domain, "D", subcycling_options=subcycling_opts, - rk_formulation=RungeKuttaFormulation.linear - ), - SSPRK3(domain, "b_e", subcycling_options=subcycling_opts), - SSPRK3(domain, "q_t", subcycling_options=subcycling_opts), - ] - stepper = SemiImplicitQuasiNewton( - eqns, io, transported_fields, transport_methods, - linear_solver=linear_solver, - ) - - # ------------------------------------------------------------------------ # - # Initial conditions - # ------------------------------------------------------------------------ # - - u0 = stepper.fields("u") - D0 = stepper.fields("D") - b0 = stepper.fields("b_e") - qt0 = stepper.fields("q_t") - - lamda, phi, _ = lonlatr_from_xyz(xyz[0], xyz[1], xyz[2]) - - # Velocity -- a solid body rotation - uexpr = xyz_vector_from_lonlatr(u_max*cos(phi), 0, 0, xyz) - - # Buoyancy -- dependent on latitude - g = parameters.g - Omega = parameters.Omega - w = Omega*radius*u_max + (u_max**2)/2 - sigma = w/10 - theta_0 = epsilon*phi_0**2 - numerator = ( - theta_0 + sigma*((cos(phi))**2) * ( - (w + sigma)*(cos(phi))**2 + 2*(phi_0 - w - sigma) - ) - ) - denominator = ( - phi_0**2 + (w + sigma)**2 - * (sin(phi))**4 - 2*phi_0*(w + sigma)*(sin(phi))**2 - ) - theta = numerator / denominator - b_guess = parameters.g * (1 - theta) - - # Depth -- in balance with the contribution of a perturbation - Dexpr = mean_depth - (1/g)*(w + sigma)*((sin(phi))**2) - - # Perturbation - lsq = (lamda - lamda_c)**2 - thsq = (phi - phi_c)**2 - rsq = min_value(R0**2, lsq+thsq) - r = sqrt(rsq) - pert = 2000 * (1 - r/R0) - Dexpr += pert - - # Actual initial buoyancy is specified through equivalent buoyancy - q_t = 0.03 - b_init = Function(b0.function_space()).interpolate(b_guess) - b_e_init = Function(b0.function_space()).interpolate(b_init - beta2*q_t) - q_v_init = Function(qt0.function_space()).interpolate(q_t) - - # Iterate to obtain equivalent buoyancy and saturation water vapour - n_iterations = 10 - - for _ in range(n_iterations): - q_sat_expr = q0*mean_depth/Dexpr * exp(nu*(1-b_e_init/g)) - dq_sat_dq_v_expr = nu*beta2/g*q_sat_expr - q_v_init.interpolate(q_v_init - (q_sat_expr - q_v_init)/(dq_sat_dq_v_expr - 1.0)) - b_e_init.interpolate(b_init - beta2*q_v_init) - - # Water vapour set to saturation amount - vexpr = q0*mean_depth/Dexpr * exp(nu*(1-b_e_init/g)) - - # Back out the initial buoyancy using b_e and q_v - bexpr = b_e_init + beta2*vexpr - - u0.project(uexpr) - D0.interpolate(Dexpr) - b0.interpolate(bexpr) - qt0.interpolate(Constant(0.03)) - - # Set reference profiles - Dbar = Function(D0.function_space()).assign(mean_depth) - bbar = Function(b0.function_space()).interpolate(bexpr) - stepper.set_reference_profiles([('D', Dbar), ('b_e', bbar)]) - - # ----------------------------------------------------------------- # - # Run - # ----------------------------------------------------------------- # - - stepper.run(t=0, tmax=tmax) - - -# ---------------------------------------------------------------------------- # -# MAIN -# ---------------------------------------------------------------------------- # - - -if __name__ == "__main__": - - parser = ArgumentParser( - description=__doc__, - formatter_class=ArgumentDefaultsHelpFormatter - ) - parser.add_argument( - '--ncells_per_edge', - help="The number of cells per edge of icosahedron", - type=int, - default=moist_thermal_gw_defaults['ncells_per_edge'] - ) - parser.add_argument( - '--dt', - help="The time step in seconds.", - type=float, - default=moist_thermal_gw_defaults['dt'] - ) - parser.add_argument( - "--tmax", - help="The end time for the simulation in seconds.", - type=float, - default=moist_thermal_gw_defaults['tmax'] - ) - parser.add_argument( - '--dumpfreq', - help="The frequency at which to dump field output.", - type=int, - default=moist_thermal_gw_defaults['dumpfreq'] - ) - parser.add_argument( - '--dirname', - help="The name of the directory to write to.", - type=str, - default=moist_thermal_gw_defaults['dirname'] - ) - args, unknown = parser.parse_known_args() - - moist_thermal_gw(**vars(args)) diff --git a/examples/shallow_water/moist_thermal_gw.py b/examples/shallow_water/moist_thermal_gw.py new file mode 100644 index 000000000..93b664ec3 --- /dev/null +++ b/examples/shallow_water/moist_thermal_gw.py @@ -0,0 +1,352 @@ +""" +A gravity wave on the sphere, solved with the moist thermal shallow water +equations. The initial conditions are saturated and cloudy everywhere. + +This example is implemented in two versions: +- The first uses the equivalent buoyancy formulation and uses a hybridised + linear solver. +- The second uses the standard buoyancy formulation and a monolithic linear + solver that includes moist physics. +""" + +from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter +from firedrake import ( + SpatialCoordinate, pi, sqrt, min_value, cos, Constant, Function, exp, sin, + dx, split +) +from gusto import ( + Domain, IO, OutputParameters, DGUpwind, ShallowWaterParameters, + ThermalShallowWaterEquations, lonlatr_from_xyz, SubcyclingOptions, + RungeKuttaFormulation, SSPRK3, MeridionalComponent, physics_label, + SemiImplicitQuasiNewton, ForwardEuler, WaterVapour, CloudWater, + xyz_vector_from_lonlatr, SWSaturationAdjustment, ZonalComponent, + GeneralIcosahedralSphereMesh, PartitionedCloud, prognostic, subject, + monolithic_solver_parameters +) + +moist_thermal_gw_defaults = { + 'ncells_per_edge': 16, # number of cells per icosahedron edge + 'dt': 900.0, # 15 minutes + 'tmax': 5.*24.*60.*60., # 5 days + 'dumpfreq': 96, # dump once per day + 'dirname': 'moist_thermal_gw', + 'equivb': False +} + + +def moist_thermal_gw( + ncells_per_edge=moist_thermal_gw_defaults['ncells_per_edge'], + dt=moist_thermal_gw_defaults['dt'], + tmax=moist_thermal_gw_defaults['tmax'], + dumpfreq=moist_thermal_gw_defaults['dumpfreq'], + dirname=moist_thermal_gw_defaults['dirname'], + equivb=moist_thermal_gw_defaults['equivb'] +): + + # ------------------------------------------------------------------------ # + # Parameters for test case + # ------------------------------------------------------------------------ # + + radius = 6371220. # planetary radius (m) + q0 = 0.0115 # saturation curve coefficient (kg/kg) + beta2 = 9.80616*10 # thermal feedback coefficient (m/s^2) + nu = 1.5 # dimensionless parameter in saturation curve + R0 = pi/9. # radius of perturbation (rad) + lamda_c = -pi/2. # longitudinal centre of perturbation (rad) + phi_c = pi/6. # latitudinal centre of perturbation (rad) + phi_0 = 3.0e4 # scale factor for poleward buoyancy gradient + epsilon = 1/300 # linear air expansion coeff (1/K) + u_max = 20. # max amplitude of the zonal wind (m/s) + g = 9.80616 # acceleration due to gravity (m/s^2) + mean_depth = phi_0/g # reference depth (m) + physics_beta = 0.5 # semi-implicit factor for physics term + + # ------------------------------------------------------------------------ # + # Set up model objects + # ------------------------------------------------------------------------ # + + # Domain + mesh = GeneralIcosahedralSphereMesh(radius, ncells_per_edge, degree=2) + degree = 1 + domain = Domain(mesh, dt, "BDM", degree) + xyz = SpatialCoordinate(mesh) + + # Equation parameters + parameters = ShallowWaterParameters( + mesh, H=mean_depth, q0=q0, nu=nu, beta2=beta2 + ) + + # Equation + tracers = None if equivb else [WaterVapour(space='DG'), CloudWater(space='DG')] + eqns = ThermalShallowWaterEquations( + domain, parameters, active_tracers=tracers, equivalent_buoyancy=equivb + ) + + # Need to add physics term to equation when using evaluating it in solver + if not equivb: + def sat_func(x_in): + D = x_in.subfunctions[1] + b = x_in.subfunctions[2] + qv = x_in.subfunctions[3] + b_e = b - beta2*qv + sat = q0*mean_depth/D * exp(nu*(1-b_e/g)) + return sat + + # Extract fields from equations + _, Dbar, _, _, _ = split(eqns.X_ref) + _, D, b, qv, _ = split(eqns.X) + _, _, lamda, tau1, tau2 = eqns.tests + + P_expr = qv - sat_func(eqns.X_ref)*(-D/Dbar - b*nu/g + qv*nu*beta2/g) + phys_b_form = physics_beta * lamda * beta2 * P_expr * dx + phys_qv_form = physics_beta * tau1 * P_expr * dx + phys_qc_form = -physics_beta * tau2 * P_expr * dx + eqns.residual += ( + subject(prognostic(physics_label(phys_b_form), 'b'), eqns.X) + ) + eqns.residual += ( + subject(prognostic(physics_label(phys_qv_form), 'water_vapour'), eqns.X) + ) + eqns.residual += ( + subject(prognostic(physics_label(phys_qc_form), 'cloud_water'), eqns.X) + ) + + # Add linearisation of these terms + eqns.residual = eqns.generate_linear_terms( + eqns.residual, lambda t: t.has_label(physics_label) + ) + + # IO + if dirname == moist_thermal_gw_defaults['dirname'] and equivb: + dirname += '_equivb' + + if equivb: + dumplist = ['b_e', 'D', 'q_t'] + diagnostic_fields = [ + ZonalComponent('u'), MeridionalComponent('u'), + PartitionedCloud(eqns) + ] + else: + diagnostic_fields = [MeridionalComponent('u'), ZonalComponent('u')] + dumplist = ['b', 'water_vapour', 'cloud_water', 'D'] + + output = OutputParameters( + dirname=dirname, dumpfreq=dumpfreq, dump_nc=True, dump_vtus=False, + dumplist=dumplist + ) + io = IO(domain, output, diagnostic_fields=diagnostic_fields) + + # Transport + transport_methods = [ + DGUpwind(eqns, field_name) for field_name in eqns.field_names + ] + subcycling_opts = SubcyclingOptions(subcycle_by_courant=0.25) + transported_fields = [ + SSPRK3(domain, "u", subcycling_options=subcycling_opts), + SSPRK3( + domain, "D", subcycling_options=subcycling_opts, + rk_formulation=RungeKuttaFormulation.linear + ) + ] + if equivb: + transported_fields += [ + SSPRK3(domain, "b_e", subcycling_options=subcycling_opts), + SSPRK3(domain, "q_t", subcycling_options=subcycling_opts), + ] + else: + transported_fields += [ + SSPRK3(domain, "b", subcycling_options=subcycling_opts), + SSPRK3(domain, "water_vapour", subcycling_options=subcycling_opts), + SSPRK3(domain, "cloud_water", subcycling_options=subcycling_opts) + ] + + if equivb: + tau_values = {'D': 1.0, 'b': 1.0} + solver_parameters = None + solver_prognostics = ['u', 'D', 'b_e'] + else: + tau_values = {'D': 1.0, 'b': 1.0, 'water_vapour': 1.0, 'cloud_water': 1.0} + solver_parameters = monolithic_solver_parameters() + solver_prognostics = eqns.field_names + + if equivb: + physics_schemes = None + else: + + # Physics schemes + sat_adj = SWSaturationAdjustment( + eqns, sat_func, time_varying_saturation=True, + parameters=parameters, thermal_feedback=True, beta2=beta2 + ) + + physics_schemes = [(sat_adj, ForwardEuler(domain))] + + # ------------------------------------------------------------------------ # + # Timestepper + # ------------------------------------------------------------------------ # + + stepper = SemiImplicitQuasiNewton( + eqns, io, transported_fields, transport_methods, + physics_beta=physics_beta, tau_values=tau_values, + inner_physics_schemes=physics_schemes, + num_outer=2, num_inner=2, solver_prognostics=solver_prognostics, + linear_solver_parameters=solver_parameters, reference_update_freq=10800. + ) + + # ------------------------------------------------------------------------ # + # Initial conditions + # ------------------------------------------------------------------------ # + + u0 = stepper.fields("u") + D0 = stepper.fields("D") + + if equivb: + b0 = stepper.fields("b_e") + qt0 = stepper.fields("q_t") + else: + b0 = stepper.fields("b") + v0 = stepper.fields("water_vapour") + c0 = stepper.fields("cloud_water") + + lamda, phi, _ = lonlatr_from_xyz(xyz[0], xyz[1], xyz[2]) + + # Velocity -- a solid body rotation + uexpr = xyz_vector_from_lonlatr(u_max*cos(phi), 0, 0, xyz) + + # Buoyancy -- dependent on latitude + g = parameters.g + w = parameters.Omega*radius*u_max + (u_max**2)/2 + sigma = w/10 + theta_0 = epsilon*phi_0**2 + numerator = ( + theta_0 + sigma*((cos(phi))**2) * ( + (w + sigma)*(cos(phi))**2 + 2*(phi_0 - w - sigma) + ) + ) + denominator = ( + phi_0**2 + (w + sigma)**2 + * (sin(phi))**4 - 2*phi_0*(w + sigma)*(sin(phi))**2 + ) + theta = numerator / denominator + + # Depth -- in balance before the addition of a perturbation + Dbar_expr = mean_depth - (1/g)*(w + sigma)*((sin(phi))**2) + + # Perturbation + lsq = (lamda - lamda_c)**2 + thsq = (phi - phi_c)**2 + rsq = min_value(R0**2, lsq+thsq) + r = sqrt(rsq) + Dpert = 2000 * (1 - r/R0) + Dexpr = Dbar_expr + Dpert + + # Actual initial buoyancy is specified through equivalent buoyancy + q_t = 0.03 # Large enough to prevent cloud ever going negative + bexpr = parameters.g * (1 - theta) # Find balanced b from theta + b_init = Function(b0.function_space()).interpolate(bexpr) + b_e_init = Function(b0.function_space()).interpolate(b_init - beta2*q_t) + q_v_init = Function(b0.function_space()).interpolate(q_t) + + # Iterate to obtain equivalent buoyancy and saturation water vapour + # Saturation curve depends on b_e, which depends on saturation curve + # Use Newton-Raphson method to find an appropriate solution + n_iterations = 10 + for _ in range(n_iterations): + q_sat_expr = q0*mean_depth/Dexpr * exp(nu*(1-b_e_init/g)) + dq_sat_dq_v_expr = nu*beta2/g*q_sat_expr + q_v_init.interpolate(q_v_init - (q_sat_expr - q_v_init)/(dq_sat_dq_v_expr - 1.0)) + b_e_init.interpolate(b_init - beta2*q_v_init) + + # Water vapour set to saturation amount + vexpr = q0*mean_depth/Dexpr * exp(nu*(1-b_e_init/g)) + + if equivb: + # bexpr is passed to the buoyancy variable, which in this case is b_e + bexpr = b_e_init + # else: + # the buoyancy variable is plain old b, so we already know the i.c. + # NB: to directly compare with equivalent buoyancy case, we might want to + # match the buoyancy fields numerically, so at this point we would set + # bexpr = b_e_init + beta2*vexpr for the non-equivalent case + + # Cloud is the rest of total liquid that isn't vapour + cexpr = Constant(q_t) - vexpr + + u0.project(uexpr) + D0.interpolate(Dexpr) + b0.interpolate(bexpr) + + if equivb: + qt0.interpolate(Constant(q_t)) + else: + v0.interpolate(vexpr) + c0.interpolate(cexpr) + + # Set reference profiles to initial state + Dbar = Function(D0.function_space()).interpolate(Dexpr) + bbar = Function(b0.function_space()).interpolate(bexpr) + if equivb: + stepper.set_reference_profiles([('D', Dbar), ('b_e', bbar)]) + else: + stepper.set_reference_profiles([ + ('D', Dbar), ('b', bbar), ('water_vapour', v0), + ('cloud_water', c0) + ]) + + # ----------------------------------------------------------------- # + # Run + # ----------------------------------------------------------------- # + + stepper.run(t=0, tmax=tmax) + + +# ---------------------------------------------------------------------------- # +# MAIN +# ---------------------------------------------------------------------------- # + + +if __name__ == "__main__": + + parser = ArgumentParser( + description=__doc__, + formatter_class=ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--ncells_per_edge', + help="The number of cells per edge of icosahedron", + type=int, + default=moist_thermal_gw_defaults['ncells_per_edge'] + ) + parser.add_argument( + '--dt', + help="The time step in seconds.", + type=float, + default=moist_thermal_gw_defaults['dt'] + ) + parser.add_argument( + "--tmax", + help="The end time for the simulation in seconds.", + type=float, + default=moist_thermal_gw_defaults['tmax'] + ) + parser.add_argument( + '--dumpfreq', + help="The frequency at which to dump field output.", + type=int, + default=moist_thermal_gw_defaults['dumpfreq'] + ) + parser.add_argument( + '--dirname', + help="The name of the directory to write to.", + type=str, + default=moist_thermal_gw_defaults['dirname'] + ) + parser.add_argument( + '--equivb', + help="Use equivalent buoyancy formulation.", + action='store_true', + default=moist_thermal_gw_defaults['equivb'] + ) + args, unknown = parser.parse_known_args() + + moist_thermal_gw(**vars(args)) diff --git a/examples/shallow_water/test_shallow_water_examples.py b/examples/shallow_water/test_shallow_water_examples.py index 5e9cfa379..ce684011d 100644 --- a/examples/shallow_water/test_shallow_water_examples.py +++ b/examples/shallow_water/test_shallow_water_examples.py @@ -165,18 +165,37 @@ def test_linear_thermal_galewsky_jet_parallel(): test_linear_thermal_galewsky_jet() -def test_moist_thermal_eqiuvb_gw(): - from moist_thermal_equivb_gw import moist_thermal_gw +def test_moist_thermal_gw_equivb(): + from moist_thermal_gw import moist_thermal_gw + test_name = 'moist_thermal_gw_equivb' + moist_thermal_gw( + ncells_per_edge=4, + dt=1800., + tmax=18000., + dumpfreq=10, + dirname=make_dirname(test_name), + equivb=True + ) + + +@pytest.mark.parallel(nprocs=4) +def moist_thermal_gw_equivb_parallel(): + test_moist_thermal_gw_equivb() + + +def test_moist_thermal_gw(): + from moist_thermal_gw import moist_thermal_gw test_name = 'moist_thermal_gw' moist_thermal_gw( ncells_per_edge=4, dt=1800., tmax=18000., dumpfreq=10, - dirname=make_dirname(test_name) + dirname=make_dirname(test_name), + equivb=False ) @pytest.mark.parallel(nprocs=4) -def moist_thermal_eqiuvb_gw_parallel(): - test_moist_thermal_eqiuvb_gw() +def moist_thermal_gw_parallel(): + test_moist_thermal_gw() diff --git a/examples/shallow_water/thermal_williamson_2.py b/examples/shallow_water/thermal_williamson_2.py index c10fbca18..ba2dd48fa 100644 --- a/examples/shallow_water/thermal_williamson_2.py +++ b/examples/shallow_water/thermal_williamson_2.py @@ -17,9 +17,8 @@ Domain, IO, OutputParameters, SemiImplicitQuasiNewton, SSPRK3, DGUpwind, TrapeziumRule, ShallowWaterParameters, ThermalShallowWaterEquations, RelativeVorticity, PotentialVorticity, SteadyStateError, - ZonalComponent, MeridionalComponent, ThermalSWSolver, - xyz_vector_from_lonlatr, lonlatr_from_xyz, GeneralIcosahedralSphereMesh, - SubcyclingOptions + ZonalComponent, MeridionalComponent, xyz_vector_from_lonlatr, + lonlatr_from_xyz, GeneralIcosahedralSphereMesh, SubcyclingOptions ) thermal_williamson_2_defaults = { @@ -99,13 +98,9 @@ def thermal_williamson_2( DGUpwind(eqns, "b") ] - # Linear solver - linear_solver = ThermalSWSolver(eqns) - # Time stepper stepper = SemiImplicitQuasiNewton( - eqns, io, transported_fields, transport_methods, - linear_solver=linear_solver + eqns, io, transported_fields, transport_methods ) # ------------------------------------------------------------------------ # diff --git a/examples/shallow_water/williamson_2.py b/examples/shallow_water/williamson_2.py index 22fa372b1..20eb8ea44 100644 --- a/examples/shallow_water/williamson_2.py +++ b/examples/shallow_water/williamson_2.py @@ -8,14 +8,14 @@ """ from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter -from firedrake import SpatialCoordinate, sin, cos, pi, Function +from firedrake import SpatialCoordinate, sin, cos, pi, Function, Constant from gusto import ( Domain, IO, OutputParameters, SemiImplicitQuasiNewton, SSPRK3, DGUpwind, TrapeziumRule, ShallowWaterParameters, ShallowWaterEquations, RelativeVorticity, PotentialVorticity, SteadyStateError, ShallowWaterKineticEnergy, ShallowWaterPotentialEnergy, - ShallowWaterPotentialEnstrophy, rotated_lonlatr_coords, - ZonalComponent, MeridionalComponent, rotated_lonlatr_vectors, + ShallowWaterPotentialEnstrophy, lonlatr_from_xyz, + ZonalComponent, MeridionalComponent, xyz_vector_from_lonlatr, GeneralIcosahedralSphereMesh, SubcyclingOptions ) @@ -42,7 +42,6 @@ def williamson_2( radius = 6371220. # planetary radius (m) mean_depth = 5960. # reference depth (m) - rotate_pole_to = (0.0, pi/3) # location of North pole of mesh u_max = 2*pi*radius/(12*24*60*60) # Max amplitude of the zonal wind (m/s) # ------------------------------------------------------------------------ # @@ -58,14 +57,16 @@ def williamson_2( # Domain mesh = GeneralIcosahedralSphereMesh(radius, ncells_per_edge, degree=2) - domain = Domain(mesh, dt, 'BDM', element_order, rotated_pole=rotate_pole_to) + domain = Domain(mesh, dt, 'BDM', element_order) xyz = SpatialCoordinate(mesh) # Equation parameters = ShallowWaterParameters(mesh, H=mean_depth) Omega = parameters.Omega - _, lat, _ = rotated_lonlatr_coords(xyz, rotate_pole_to) - e_lon, _, _ = rotated_lonlatr_vectors(xyz, rotate_pole_to) + _, lat, _ = lonlatr_from_xyz(xyz[0], xyz[1], xyz[2]) + e_lon = xyz_vector_from_lonlatr( + Constant(1.0), Constant(0.0), Constant(0.0), xyz + ) eqns = ShallowWaterEquations( domain, parameters, u_transport_option=u_eqn_type) @@ -81,8 +82,8 @@ def williamson_2( ShallowWaterPotentialEnergy(parameters), ShallowWaterPotentialEnstrophy(), SteadyStateError('u'), SteadyStateError('D'), - MeridionalComponent('u', rotate_pole_to), - ZonalComponent('u', rotate_pole_to) + MeridionalComponent('u'), + ZonalComponent('u') ] io = IO(domain, output, diagnostic_fields=diagnostic_fields) diff --git a/figures/boussinesq/skamarock_klemp_compressible_bouss_final.png b/figures/boussinesq/skamarock_klemp_compressible_bouss_final.png index 4107d44ec..cd31c2ff0 100644 Binary files a/figures/boussinesq/skamarock_klemp_compressible_bouss_final.png and b/figures/boussinesq/skamarock_klemp_compressible_bouss_final.png differ diff --git a/figures/boussinesq/skamarock_klemp_compressible_bouss_initial.png b/figures/boussinesq/skamarock_klemp_compressible_bouss_initial.png index ef961cc01..4e9e127b7 100644 Binary files a/figures/boussinesq/skamarock_klemp_compressible_bouss_initial.png and b/figures/boussinesq/skamarock_klemp_compressible_bouss_initial.png differ diff --git a/figures/boussinesq/skamarock_klemp_incompressible_bouss_final.png b/figures/boussinesq/skamarock_klemp_incompressible_bouss_final.png index 622266ca1..4a5a8f429 100644 Binary files a/figures/boussinesq/skamarock_klemp_incompressible_bouss_final.png and b/figures/boussinesq/skamarock_klemp_incompressible_bouss_final.png differ diff --git a/figures/boussinesq/skamarock_klemp_incompressible_bouss_initial.png b/figures/boussinesq/skamarock_klemp_incompressible_bouss_initial.png index ef961cc01..4e9e127b7 100644 Binary files a/figures/boussinesq/skamarock_klemp_incompressible_bouss_initial.png and b/figures/boussinesq/skamarock_klemp_incompressible_bouss_initial.png differ diff --git a/figures/compressible_euler/dcmip_3_1_gravity_wave_final.png b/figures/compressible_euler/dcmip_3_1_gravity_wave_final.png index d9864b820..4df2c2281 100644 Binary files a/figures/compressible_euler/dcmip_3_1_gravity_wave_final.png and b/figures/compressible_euler/dcmip_3_1_gravity_wave_final.png differ diff --git a/figures/compressible_euler/dcmip_3_1_gravity_wave_initial.png b/figures/compressible_euler/dcmip_3_1_gravity_wave_initial.png index f6febb2e8..ebfe8a094 100644 Binary files a/figures/compressible_euler/dcmip_3_1_gravity_wave_initial.png and b/figures/compressible_euler/dcmip_3_1_gravity_wave_initial.png differ diff --git a/figures/compressible_euler/dry_bryan_fritsch.png b/figures/compressible_euler/dry_bryan_fritsch.png index b763f4275..fddef3ce6 100644 Binary files a/figures/compressible_euler/dry_bryan_fritsch.png and b/figures/compressible_euler/dry_bryan_fritsch.png differ diff --git a/figures/compressible_euler/schaer_mountain_final.png b/figures/compressible_euler/schaer_mountain_final.png index a5289cca3..d888650e8 100644 Binary files a/figures/compressible_euler/schaer_mountain_final.png and b/figures/compressible_euler/schaer_mountain_final.png differ diff --git a/figures/compressible_euler/schaer_mountain_initial.png b/figures/compressible_euler/schaer_mountain_initial.png index 5e78c469a..ad9eacb87 100644 Binary files a/figures/compressible_euler/schaer_mountain_initial.png and b/figures/compressible_euler/schaer_mountain_initial.png differ diff --git a/figures/compressible_euler/skamarock_klemp_nonhydrostatic_final.png b/figures/compressible_euler/skamarock_klemp_nonhydrostatic_final.png index e76a82582..51047f851 100644 Binary files a/figures/compressible_euler/skamarock_klemp_nonhydrostatic_final.png and b/figures/compressible_euler/skamarock_klemp_nonhydrostatic_final.png differ diff --git a/figures/compressible_euler/skamarock_klemp_nonhydrostatic_initial.png b/figures/compressible_euler/skamarock_klemp_nonhydrostatic_initial.png index 94c1eb09f..7e814ffa5 100644 Binary files a/figures/compressible_euler/skamarock_klemp_nonhydrostatic_initial.png and b/figures/compressible_euler/skamarock_klemp_nonhydrostatic_initial.png differ diff --git a/figures/compressible_euler/skamarock_klemp_nonhydrostatic_trbdf2_final.png b/figures/compressible_euler/skamarock_klemp_nonhydrostatic_trbdf2_final.png new file mode 100644 index 000000000..4a15f14c4 Binary files /dev/null and b/figures/compressible_euler/skamarock_klemp_nonhydrostatic_trbdf2_final.png differ diff --git a/figures/compressible_euler/straka_bubble.png b/figures/compressible_euler/straka_bubble.png index 395bb9105..5b47c5f83 100644 Binary files a/figures/compressible_euler/straka_bubble.png and b/figures/compressible_euler/straka_bubble.png differ diff --git a/figures/compressible_euler/unsaturated_bubble_final.png b/figures/compressible_euler/unsaturated_bubble_final.png index 4f32dc099..af7b49ba8 100644 Binary files a/figures/compressible_euler/unsaturated_bubble_final.png and b/figures/compressible_euler/unsaturated_bubble_final.png differ diff --git a/figures/compressible_euler/unsaturated_bubble_initial.png b/figures/compressible_euler/unsaturated_bubble_initial.png index 993a75dfb..6fe78547e 100644 Binary files a/figures/compressible_euler/unsaturated_bubble_initial.png and b/figures/compressible_euler/unsaturated_bubble_initial.png differ diff --git a/figures/shallow_water/moist_convective_williamson_2_final.png b/figures/shallow_water/moist_convective_williamson_2_final.png index c85b82415..67d8249cd 100644 Binary files a/figures/shallow_water/moist_convective_williamson_2_final.png and b/figures/shallow_water/moist_convective_williamson_2_final.png differ diff --git a/figures/shallow_water/moist_convective_williamson_2_initial.png b/figures/shallow_water/moist_convective_williamson_2_initial.png index 07e171d25..1bde91d83 100644 Binary files a/figures/shallow_water/moist_convective_williamson_2_initial.png and b/figures/shallow_water/moist_convective_williamson_2_initial.png differ diff --git a/figures/shallow_water/moist_thermal_equivb_gw_final.png b/figures/shallow_water/moist_thermal_equivb_gw_final.png deleted file mode 100644 index e5aaa2330..000000000 Binary files a/figures/shallow_water/moist_thermal_equivb_gw_final.png and /dev/null differ diff --git a/figures/shallow_water/moist_thermal_equivb_gw_initial.png b/figures/shallow_water/moist_thermal_equivb_gw_initial.png deleted file mode 100644 index 94468de03..000000000 Binary files a/figures/shallow_water/moist_thermal_equivb_gw_initial.png and /dev/null differ diff --git a/figures/shallow_water/moist_thermal_gw_equivb_final.png b/figures/shallow_water/moist_thermal_gw_equivb_final.png new file mode 100644 index 000000000..af70310bc Binary files /dev/null and b/figures/shallow_water/moist_thermal_gw_equivb_final.png differ diff --git a/figures/shallow_water/moist_thermal_gw_equivb_initial.png b/figures/shallow_water/moist_thermal_gw_equivb_initial.png new file mode 100644 index 000000000..ebe622cd0 Binary files /dev/null and b/figures/shallow_water/moist_thermal_gw_equivb_initial.png differ diff --git a/figures/shallow_water/moist_thermal_gw_final.png b/figures/shallow_water/moist_thermal_gw_final.png new file mode 100644 index 000000000..a99e19672 Binary files /dev/null and b/figures/shallow_water/moist_thermal_gw_final.png differ diff --git a/figures/shallow_water/moist_thermal_gw_initial.png b/figures/shallow_water/moist_thermal_gw_initial.png new file mode 100644 index 000000000..ebe622cd0 Binary files /dev/null and b/figures/shallow_water/moist_thermal_gw_initial.png differ diff --git a/figures/shallow_water/moist_thermal_williamson_5_final.png b/figures/shallow_water/moist_thermal_williamson_5_final.png index cc63024af..137d2e2a6 100644 Binary files a/figures/shallow_water/moist_thermal_williamson_5_final.png and b/figures/shallow_water/moist_thermal_williamson_5_final.png differ diff --git a/figures/shallow_water/moist_thermal_williamson_5_initial.png b/figures/shallow_water/moist_thermal_williamson_5_initial.png index 3d9c8486f..c8501f806 100644 Binary files a/figures/shallow_water/moist_thermal_williamson_5_initial.png and b/figures/shallow_water/moist_thermal_williamson_5_initial.png differ diff --git a/figures/shallow_water/thermal_williamson_2_final.png b/figures/shallow_water/thermal_williamson_2_final.png index 705a1f436..24f2739f6 100644 Binary files a/figures/shallow_water/thermal_williamson_2_final.png and b/figures/shallow_water/thermal_williamson_2_final.png differ diff --git a/figures/shallow_water/thermal_williamson_2_initial.png b/figures/shallow_water/thermal_williamson_2_initial.png index 0f53c1da5..78014a7ea 100644 Binary files a/figures/shallow_water/thermal_williamson_2_initial.png and b/figures/shallow_water/thermal_williamson_2_initial.png differ diff --git a/figures/shallow_water/williamson_2_final.png b/figures/shallow_water/williamson_2_final.png index 77fbdc8b3..096687689 100644 Binary files a/figures/shallow_water/williamson_2_final.png and b/figures/shallow_water/williamson_2_final.png differ diff --git a/figures/shallow_water/williamson_2_initial.png b/figures/shallow_water/williamson_2_initial.png index dee716023..886cbf42d 100644 Binary files a/figures/shallow_water/williamson_2_initial.png and b/figures/shallow_water/williamson_2_initial.png differ diff --git a/figures/shallow_water/williamson_5_final.png b/figures/shallow_water/williamson_5_final.png index a0ea4f7d8..ca4b22b3c 100644 Binary files a/figures/shallow_water/williamson_5_final.png and b/figures/shallow_water/williamson_5_final.png differ diff --git a/figures/shallow_water/williamson_5_initial.png b/figures/shallow_water/williamson_5_initial.png index ae18f58f6..7d82319b7 100644 Binary files a/figures/shallow_water/williamson_5_initial.png and b/figures/shallow_water/williamson_5_initial.png differ diff --git a/gusto/core/coordinates.py b/gusto/core/coordinates.py index a2eedd488..3b475b3ed 100644 --- a/gusto/core/coordinates.py +++ b/gusto/core/coordinates.py @@ -3,7 +3,7 @@ Coordinate fields are stored in specified VectorFunctionSpaces. """ -from gusto.core.coord_transforms import lonlatr_from_xyz, rotated_lonlatr_coords +from gusto.core.coord_transforms import lonlatr_from_xyz from gusto.core.logging import logger from firedrake import SpatialCoordinate, Function import numpy as np @@ -14,18 +14,13 @@ class Coordinates(object): """ An object for holding and setting up coordinate fields. """ - def __init__(self, mesh, on_sphere=False, rotated_pole=None, radius=None): + def __init__(self, mesh, on_sphere=False, radius=None): """ Args: mesh (:class:`Mesh`): the model's domain object. on_sphere (bool, optional): whether the domain is on the surface of a sphere. If False, the domain is assumed to be Cartesian. Defaults to False. - rotated_pole (tuple, optional): a tuple of floats (lon, lat) of the - location to use as the north pole in a spherical coordinate - system. These are expressed in the original coordinate system. - The longitude and latitude must be expressed in radians. - Defaults to None. This is unused for non-spherical domains. radius (float, optional): the radius of a spherical domain. Defaults to None. This is unused for non-spherical domains. """ @@ -38,10 +33,7 @@ def __init__(self, mesh, on_sphere=False, rotated_pole=None, radius=None): if on_sphere: xyz = SpatialCoordinate(mesh) - if rotated_pole is not None: - lon, lat, r = rotated_lonlatr_coords(xyz, rotated_pole) - else: - lon, lat, r = lonlatr_from_xyz(xyz[0], xyz[1], xyz[2]) + lon, lat, r = lonlatr_from_xyz(xyz[0], xyz[1], xyz[2]) if mesh.extruded: self.coords = (lon, lat, r-radius) diff --git a/gusto/core/domain.py b/gusto/core/domain.py index 2caedfab3..0bbb8480d 100644 --- a/gusto/core/domain.py +++ b/gusto/core/domain.py @@ -28,7 +28,7 @@ class Domain(object): """ def __init__(self, mesh, dt, family, degree=None, horizontal_degree=None, vertical_degree=None, - rotated_pole=None, max_quad_degree=None): + max_quad_degree=None): """ Args: mesh (:class:`Mesh`): the model's mesh. @@ -44,11 +44,6 @@ def __init__(self, mesh, dt, family, degree=None, horizontal part of the DG space. Defaults to None. vertical_degree (int, optional): the element degree used for the vertical part of the DG space. Defaults to None. - rotated_pole (tuple, optional): a tuple of floats (lon, lat) of the - location to use as the north pole in a spherical coordinate - system. These are expressed in the original coordinate system. - The longitude and latitude must be expressed in radians. - Defaults to None. This is unused for non-spherical domains. max_quad_degree (int, optional): the maximum quadrature degree to use in certain non-linear terms (e.g. when using an expression for the Exner pressure). Defaults to None, in which case this @@ -156,8 +151,7 @@ def __init__(self, mesh, dt, family, degree=None, # Set up coordinates # -------------------------------------------------------------------- # - self.coords = Coordinates(mesh, on_sphere=self.on_sphere, - rotated_pole=rotated_pole, radius=radius) + self.coords = Coordinates(mesh, on_sphere=self.on_sphere, radius=radius) # Set up DG1 equispaced space, used for making metadata _ = self.spaces('DG1_equispaced') self.coords.register_space(self, 'DG1_equispaced') diff --git a/gusto/core/io.py b/gusto/core/io.py index ebba47815..42e45300a 100644 --- a/gusto/core/io.py +++ b/gusto/core/io.py @@ -57,7 +57,7 @@ def pick_up_mesh(output, mesh_name, comm=COMM_WORLD): else: dumpdir = path.join("results", output.dirname) chkfile = path.join(dumpdir, "chkpt.h5") - with CheckpointFile(chkfile, 'r', comm=comm) as chk: + with CheckpointFile(chkfile, 'r', comm) as chk: mesh = chk.load_mesh(mesh_name) if dumpdir: @@ -665,7 +665,7 @@ def pick_up_from_checkpoint(self, state_fields, comm=COMM_WORLD): step = chk.read_attribute("/", "step") else: - with CheckpointFile(chkfile, 'r', comm) as chk: + with CheckpointFile(chkfile, 'r', self.domain.mesh.comm) as chk: mesh = self.domain.mesh # Recover compulsory fields from the checkpoint for field_name in self.to_pick_up: @@ -773,7 +773,7 @@ def dump(self, state_fields, time_data): chkpt_mode = 'a' else: chkpt_mode = 'w' - with CheckpointFile(self.chkpt_path, chkpt_mode) as chk: + with CheckpointFile(self.chkpt_path, chkpt_mode, self.mesh.comm) as chk: chk.save_mesh(self.domain.mesh) for field_name in self.to_pick_up: if output.multichkpt: @@ -826,8 +826,17 @@ def create_nc_dump(self, filename, space_names): # we instead save string metadata as char arrays of fixed length. if isinstance(output_value, str): nc_field_file.createVariable(metadata_key, 'S1', ('dim_one', 'dim_string')) - output_char_array = np.array([output_value], dtype='S256') - nc_field_file[metadata_key][:] = stringtochar(output_char_array) + try: + # For more recent versions of netCDF need to specify encoding + nc_field_file[metadata_key]._Encoding = 'UTF-8' # tell netCDF4 this char var is UTF-8 text + N = nc_field_file.dimensions['dim_string'].size + max_chars = max(1, N // 4) # max number of characters that can be stored + output_char_array = np.array([output_value], dtype=f"U{max_chars}") + nc_field_file[metadata_key][:] = stringtochar(output_char_array, encoding='utf-8') + except ValueError: + # For older versions of netCDF + output_char_array = np.array([output_value], dtype='S256') + nc_field_file[metadata_key][:] = stringtochar(output_char_array) else: nc_field_file.createVariable(metadata_key, type(output_value), ('dim_one',)) nc_field_file[metadata_key][0] = output_value @@ -981,7 +990,7 @@ def make_nc_dataset(filename, access, comm): """ try: - nc_field_file = Dataset(filename, access, parallel=True) + nc_field_file = Dataset(filename, access, parallel=True, comm=comm) nc_supports_parallel = True except ValueError: # parallel netCDF not available, use the serial version instead diff --git a/gusto/core/kernels.py b/gusto/core/kernels.py index 6b78691dc..ddb046232 100644 --- a/gusto/core/kernels.py +++ b/gusto/core/kernels.py @@ -10,7 +10,7 @@ """ from firedrake import dx -from firedrake.parloops import par_loop, READ, WRITE, MIN, MAX, op2 +from firedrake.parloops import par_loop, READ, WRITE, INC, MIN, MAX, op2 import numpy as np @@ -114,6 +114,55 @@ def apply(self, field, field_in): "field_in": (field_in, READ)}) +class MeanMixingRatioWeights(): + """ + Finds the lambda values for blending a mixing ratio and its + mean DG0 field in the MeanLimiter. + + The minimum value in each cell is identified. + If the value is negative, then a lamda weight is computed + that will ensure non-negativity in the limiting step. + """ + + def __init__(self, V_DG1): + """ + Args: + V (:class:`FunctionSpace`): The space of the field for the mean + mixing ratio, which should be DG0. + """ + + shapes = {'nDOFs_DG1': V_DG1.finat_element.space_dimension()} + domain = "{{[i]: 0 <= i < {nDOFs_DG1}}}".format(**shapes) + + instrs = (""" + min_value = 0.0 + + for i + min_value = fmin(min_value, mX_field[i]) + end + + if min_value < 0.0 + lamda[0] = fmax(lamda[0],-min_value/(mean_field[0] - min_value)) + end + + """) + + self._kernel = (domain, instrs) + + def apply(self, lamda, mX_field, mean_field): + """ + Performs the par loop. + + Args: + w (:class:`Function`): the field in which to store the weights. This + lives in the continuous target space. + """ + par_loop(self._kernel, dx, + {"lamda": (lamda, INC), + "mX_field": (mX_field, READ), + "mean_field": (mean_field, READ)}) + + class MinKernel(): """Finds the minimum DoF value of a field.""" diff --git a/gusto/core/logging.py b/gusto/core/logging.py index 552dac2af..8f0946480 100644 --- a/gusto/core/logging.py +++ b/gusto/core/logging.py @@ -5,8 +5,10 @@ created internally. The primary means of configuration is via environment variables, the -same way that the standard Python root logger is. See the -:mod:`logging` page for details. +same way that the standard Python root logger is, e.g.: +`export GUSTO_LOG_LEVEL=DEBUG` + +See the :mod:`logging` page for details. Set ``GUSTO_LOG_LEVEL`` to any of ``DEBUG``, ``INFO``, ``WARNING``, ``ERROR`` or ``CRITICAL`` (from most verbose to least verbose). @@ -65,9 +67,9 @@ def capture_exceptions(exception_type, exception_value, traceback, logger=logger sys.excepthook = capture_exceptions # Set the log level based on environment variables -log_level = os.environ.get("GUSTO_LOG_LEVEL", WARNING) -logfile_level = os.environ.get("GUSTO_FILE_LOG_LEVEL", DEBUG) -logconsole_level = os.environ.get("GUSTO_CONSOLE_LOG_LEVEL", INFO) +log_level = os.environ.get("GUSTO_LOG_LEVEL", INFO) +logfile_level = os.environ.get("GUSTO_FILE_LOG_LEVEL", log_level) +logconsole_level = os.environ.get("GUSTO_CONSOLE_LOG_LEVEL", log_level) log_level_list = [log_level, logfile_level, logconsole_level] log_levels = [ logging.getLevelName(x) if isinstance(x, str) @@ -198,6 +200,7 @@ def update_logfile_location(new_path, comm): new_fh.name = "gusto-file-log" logger.addHandler(new_fh) logger.debug("Re-opening logger") + elif len(fh) > 1: raise LoggingError( "More than one log handler with name `gusto-temp-file-log`\n" diff --git a/gusto/diagnostics/diagnostics.py b/gusto/diagnostics/diagnostics.py index 4f0085bd4..121c24466 100644 --- a/gusto/diagnostics/diagnostics.py +++ b/gusto/diagnostics/diagnostics.py @@ -446,9 +446,7 @@ def setup(self, domain, state_fields): self.cell_flux_form = 2*avg(un*test)*dS_calc + un*test*ds_calc # Final Courant number expression - cell_flux = self.cell_flux.riesz_representation( - 'l2', solver_options={'function_space': V} - ) + cell_flux = Function(V, val=self.cell_flux.dat) self.expr = cell_flux * domain.dt / cell_volume super().setup(domain, state_fields) @@ -653,14 +651,10 @@ def setup(self, domain, state_fields): class SphericalComponent(VectorComponent): """Base diagnostic for computing spherical-polar components of fields.""" - def __init__(self, name, rotated_pole=None, space=None, method='interpolate'): + def __init__(self, name, space=None, method='interpolate'): """ Args: name (str): name of the field to compute the component of. - rotated_pole (tuple of floats, optional): a tuple of floats - (lon, lat) of the new pole, in the original coordinate system. - The longitude and latitude must be expressed in radians. - Defaults to None, corresponding to a pole of (0, pi/2). space (:class:`FunctionSpace`, optional): the function space to evaluate the diagnostic field in. Defaults to None, in which case the default space is the domain's DG space. @@ -668,7 +662,6 @@ def __init__(self, name, rotated_pole=None, space=None, method='interpolate'): for this diagnostic. Valid options are 'interpolate', 'project', 'assign' and 'solve'. Defaults to 'interpolate'. """ - self.rotated_pole = (0.0, pi/2) if rotated_pole is None else rotated_pole super().__init__(name=name, space=space, method=method) def _check_args(self, domain, field): @@ -707,7 +700,8 @@ def setup(self, domain, state_fields): f = state_fields(self.fname) self._check_args(domain, f) xyz = SpatialCoordinate(domain.mesh) - _, e_lat, _ = rotated_lonlatr_vectors(xyz, self.rotated_pole) + pole = (0.0, pi/2) + _, e_lat, _ = rotated_lonlatr_vectors(xyz, pole) super().setup(domain, state_fields, e_lat) @@ -729,7 +723,8 @@ def setup(self, domain, state_fields): f = state_fields(self.fname) self._check_args(domain, f) xyz = SpatialCoordinate(domain.mesh) - e_lon, _, _ = rotated_lonlatr_vectors(xyz, self.rotated_pole) + pole = (0.0, pi/2) + e_lon, _, _ = rotated_lonlatr_vectors(xyz, pole) super().setup(domain, state_fields, e_lon) @@ -751,7 +746,8 @@ def setup(self, domain, state_fields): f = state_fields(self.fname) self._check_args(domain, f) xyz = SpatialCoordinate(domain.mesh) - _, _, e_r = rotated_lonlatr_vectors(xyz, self.rotated_pole) + pole = (0.0, pi/2) + _, _, e_r = rotated_lonlatr_vectors(xyz, pole) super().setup(domain, state_fields, e_r) diff --git a/gusto/equations/boussinesq_equations.py b/gusto/equations/boussinesq_equations.py index d39eea40c..357d92f0e 100644 --- a/gusto/equations/boussinesq_equations.py +++ b/gusto/equations/boussinesq_equations.py @@ -1,6 +1,7 @@ """Defines the Boussinesq equations.""" -from firedrake import inner, dx, div, cross, split, as_vector +from firedrake import ( + inner, dx, div, cross, split, as_vector, TestFunctions, TrialFunctions, dot, grad, DirichletBC) from firedrake.fml import subject, all_terms from gusto.core.labels import ( prognostic, linearisation, @@ -217,6 +218,60 @@ def __init__(self, domain, parameters, # Add linearisations to equations self.residual = self.generate_linear_terms(residual, self.linearisation_map) + def schur_complement_form(self, alpha=0.5, tau_values=None): + domain = self.domain + dt = domain.dt + tau_values = tau_values or {} + # Set relaxation parameters. If an alternative has not been given, set + # to semi-implicit off-centering factor + beta_u = dt*tau_values.get("u", alpha) + beta_p = dt*tau_values.get("p", alpha) + beta_b = dt*tau_values.get("b", alpha) + Vu = domain.spaces("HDiv") + Vp = domain.spaces("DG") + + # Build the reduced function space for u,p + M = Vu*Vp + w, phi = TestFunctions(M) + u, p = TrialFunctions(M) + + # Get background fields + bbar = split(self.X_ref)[2] + + # Analytical (approximate) elimination of theta + k = self.domain.k # Upward pointing unit vector + b = -dot(k, u)*dot(k, grad(bbar))*beta_b + + # vertical projection + def V(u): + return k*inner(u, k) + + seqn = ( + inner(w, u)*dx + - beta_u*div(w)*p*dx + - beta_u*inner(w, k)*b*dx + ) + + if self.compressible: + cs = self.parameters.cs + seqn += phi * p * dx + beta_p * phi * cs**2 * div(u) * dx + else: + seqn += phi * div(u) * dx + + if hasattr(self, "mu"): + seqn += dt*self.mu*inner(w, k)*inner(u, k)*dx + + if self.parameters.Omega is not None: + Omega = as_vector((0, 0, self.parameter.Omega)) + seqn += inner(w, cross(2*Omega, u))*dx + + # Boundary conditions (assumes extruded mesh) + # BCs are declared for the plain velocity space. As we need them in + # a mixed problem, we replicate the BCs but for subspace of M + sbcs = [DirichletBC(M.sub(0), bc.function_arg, bc.sub_domain) for bc in self.bcs['u']] + + return seqn, sbcs + class LinearBoussinesqEquations(BoussinesqEquations): """ diff --git a/gusto/equations/common_forms.py b/gusto/equations/common_forms.py index b8caf2e5c..360866d8f 100644 --- a/gusto/equations/common_forms.py +++ b/gusto/equations/common_forms.py @@ -367,7 +367,7 @@ def split_continuity_form(equation): u_trial = TrialFunctions(W)[u_idx] qbar = split(equation.X_ref)[idx] # Add linearisation to adv_term - linear_adv_term = linear_advection_form(test, qbar, u_trial) + linear_adv_term = linear_advection_form(test, qbar, u_trial, qbar, uadv) adv_term = linearisation(adv_term, linear_adv_term) # Add linearisation to div_term linear_div_term = transporting_velocity(qbar*test*div(u_trial)*dx, u_trial) @@ -446,7 +446,7 @@ def split_linear_advection_form(test, qbar, ubar, ubar_full): :class:`LabelledForm`: a labelled transport form. """ - L = test*dot(ubar, grad(qbar))*dx + L = inner(test, dot(ubar, grad(qbar)))*dx form = transporting_velocity(L, ubar_full) return transport(form, TransportEquationType.advective) diff --git a/gusto/equations/compressible_euler_equations.py b/gusto/equations/compressible_euler_equations.py index a6157c30a..d433f1764 100644 --- a/gusto/equations/compressible_euler_equations.py +++ b/gusto/equations/compressible_euler_equations.py @@ -1,8 +1,8 @@ """Defines variants of the compressible Euler equations.""" from firedrake import ( - sin, pi, inner, dx, div, cross, FunctionSpace, FacetNormal, jump, avg, dS_v, - conditional, SpatialCoordinate, split, Constant, as_vector + sin, pi, inner, dx, div, cross, FunctionSpace, FacetNormal, jump, avg, + conditional, SpatialCoordinate, split, Constant, as_vector, dS_v ) from firedrake.fml import subject, replace_subject, all_terms from gusto.core.labels import ( @@ -34,6 +34,8 @@ class CompressibleEulerEquations(PrognosticEquationSet): pressure. """ + name = "CompressibleEulerEquations" + def __init__(self, domain, parameters, sponge_options=None, extra_terms=None, space_names=None, linearisation_map=all_terms, diff --git a/gusto/equations/prognostic_equations.py b/gusto/equations/prognostic_equations.py index 1f6035b71..61e305e63 100644 --- a/gusto/equations/prognostic_equations.py +++ b/gusto/equations/prognostic_equations.py @@ -175,6 +175,10 @@ def generate_mass_terms(self): return mass_form + def update_reference_profiles(self): + """Update the reference profiles used in the equation set. Default is to do nothing.""" + pass + # ======================================================================== # # Linearisation Routines # ======================================================================== # diff --git a/gusto/equations/shallow_water_equations.py b/gusto/equations/shallow_water_equations.py index a939f91d0..e075f25f2 100644 --- a/gusto/equations/shallow_water_equations.py +++ b/gusto/equations/shallow_water_equations.py @@ -1,8 +1,9 @@ """Classes for defining variants of the shallow-water equations.""" -from firedrake import (inner, dx, div, FunctionSpace, FacetNormal, jump, avg, +from firedrake import (inner, dx, div, FunctionSpace, FacetNormal, jump, avg, DirichletBC, dS, split, conditional, exp, sqrt, Function, - SpatialCoordinate, Constant) + SpatialCoordinate, Constant, TrialFunctions, TestFunctions, dot, grad, + interpolate, assemble) from firedrake.fml import subject, drop, all_terms from gusto.core.labels import ( linearisation, pressure_gradient, coriolis, prognostic @@ -17,6 +18,7 @@ linear_vector_invariant_form, linear_circulation_form ) from gusto.equations.prognostic_equations import PrognosticEquationSet +from gusto.core.logging import logger __all__ = ["ShallowWaterEquations", "LinearShallowWaterEquations", @@ -434,11 +436,7 @@ def _setup_residual(self, u_transport_option): + jump(bbar*w, n) * avg(D_trial) * dS + 0.5 * jump(Dbar*w, n) * avg(b_trial) * dS - beta2 * D_trial * div(qvbar*w)*dx - - 0.5 * beta2 * qvbar * div(Dbar*w) * dx + beta2 * jump(qvbar*w, n) * avg(D_trial) * dS - + 0.5 * beta2 * jump(Dbar*w, n) * avg(qvbar) * dS - - 0.5 * bbar * div(Dbar*w) * dx - + 0.5 * jump(Dbar*w, n) * avg(bbar) * dS - 0.5 * bbar * div(D_trial*w) * dx + 0.5 * jump(D_trial*w, n) * avg(bbar) * dS - beta2 * 0.5 * qvbar * div(D_trial*w) * dx @@ -463,8 +461,6 @@ def _setup_residual(self, u_transport_option): + jump(bbar*w, n) * avg(D_trial) * dS - 0.5 * b_trial * div(Dbar*w) * dx + 0.5 * jump(Dbar*w, n) * avg(b_trial) * dS - - 0.5 * bbar * div(Dbar*w) * dx - + 0.5 * jump(Dbar*w, n) * avg(bbar) * dS - 0.5 * bbar * div(D_trial*w) * dx + 0.5 * jump(D_trial*w, n) * avg(bbar) * dS, 'u'), self.X)) @@ -535,6 +531,74 @@ def compute_saturation(self, X): sat_expr = q0*H/(D+topog) * exp(nu*(1-b/g)) return sat_expr + def schur_complement_form(self, alpha=0.5, tau_values=None): + # Approximate elimination of b + domain = self.domain + dt = domain.dt + tau_values = tau_values or {} + beta_u = dt*tau_values.get("u", alpha) + beta_d = dt*tau_values.get("D", alpha) + beta_b = dt*tau_values.get("b", alpha) + + n = FacetNormal(domain.mesh) + Vu = domain.spaces("HDiv") + VD = domain.spaces("DG") + M_ = Vu*VD + u_, D_ = TrialFunctions(M_) + w_, phi_ = TestFunctions(M_) + + _, Dref, bref = self.X_ref.subfunctions[0:3] + b_ = -dot(u_, grad(bref))*beta_b + + if self.equivalent_buoyancy: + # compute q_v using q_sat to partition q_t into q_v and q_c + self.q_sat_func = Function(VD) + self.qvbar = Function(VD) + qtbar = split(self.X_ref)[3] + + # set up interpolators that use the X_ref values for D and b_e + self.q_sat_expr_interpolate = interpolate( + self.compute_saturation(self.X_ref), VD) + self.q_v_interpolate = interpolate( + conditional(qtbar < self.q_sat_func, qtbar, self.q_sat_func), + VD) + + # bbar was be_bar and here we correct to become bbar + bref += self.parameters.beta2 * self.qvbar + + seqn = ( + inner(w_, (u_)) * dx + - beta_u * (D_) * div(w_*bref) * dx + + beta_u * jump(w_*bref, n) * avg(D_) * dS + - beta_u * 0.5 * Dref * b_ * div(w_) * dx + - beta_u * 0.5 * bref * div(w_*(D_)) * dx + + beta_u * 0.5 * jump((D_)*w_, n) * avg(bref) * dS + + inner(phi_, (D_)) * dx + + beta_d * phi_ * div(Dref*u_) * dx + ) + + if 'coriolis' in self.prescribed_fields._field_names: + f = self.prescribed_fields('coriolis') + seqn += beta_u * f * inner(w_, domain.perp(u_)) * dx + + # Boundary conditions + sbcs = [DirichletBC(M_.sub(0), bc.function_arg, bc.sub_domain) for bc in self.bcs['u']] + + return seqn, sbcs + + def update_reference_profiles(self): + "Update reference profiles for equivalent buoyancy formulation." + if self.equivalent_buoyancy: + self.q_sat_func.assign(assemble(self.q_sat_expr_interpolate)) + self.qvbar.assign(assemble(self.q_v_interpolate)) + + bbar = split(self.X_ref)[2] + b = self.X.subfunctions[2] + bbar_func = Function(b.function_space()).interpolate(bbar) + if bbar_func.dat.data.max() == 0 and bbar_func.dat.data.min() == 0: + logger.warning("The reference profile for b in the linear solver is zero. To set a non-zero profile add b to the set_reference_profiles argument.") + logger.info("Updated equivalent buoyancy reference profile.") + class LinearThermalShallowWaterEquations(ThermalShallowWaterEquations): u""" diff --git a/gusto/physics/chemistry.py b/gusto/physics/chemistry.py index 79dde5ef0..2093f71cf 100644 --- a/gusto/physics/chemistry.py +++ b/gusto/physics/chemistry.py @@ -1,8 +1,9 @@ """Objects describe chemical conversion and reaction processes.""" -from firedrake import dx, split, Function +from firedrake import dx, split, Function, sqrt, exp, Constant, conditional, max_value, min_value, assemble +from firedrake.__future__ import interpolate from firedrake.fml import subject -from gusto.core.labels import prognostic +from gusto.core.labels import prognostic, source_label from gusto.core.logging import logger from gusto.physics.physics_parametrisation import PhysicsParametrisation @@ -13,7 +14,7 @@ class TerminatorToy(PhysicsParametrisation): """ Setup the Terminator Toy chemistry interaction as specified in 'The terminator toy chemistry test ...' - Lauritzen et. al. (2014). + Lauritzen et. al. (2015). The coupled equations for the two species are given by: @@ -24,7 +25,8 @@ class TerminatorToy(PhysicsParametrisation): """ def __init__(self, equation, k1=1, k2=1, - species1_name='X', species2_name='X2'): + species1_name='X_tracer', species2_name='X2_tracer', + analytical_formulation=False): """ Args: equation (:class: 'PrognosticEquationSet'): the model's equation @@ -33,14 +35,21 @@ def __init__(self, equation, k1=1, k2=1, k2(float, optional): Reaction rate for species 2 (X2). Defaults to a constant 1 over the domain. species1_name(str, optional): Name of the first interacting species. - Defaults to 'X'. + Defaults to 'X_tracer'. species2_name(str, optional): Name of the second interacting - species. Defaults to 'X2'. + species. Defaults to 'X2_tracer'. + analytical_formulation (bool, optional): whether the scheme is already + put into a Backwards Euler formulation (which allows this scheme + to actually be used with a Forwards Euler or other explicit time + discretisation). Otherwise, this is formulated more generally + and can be used with any time stepper. Defaults to False. """ label_name = 'terminator_toy' super().__init__(equation, label_name, parameters=None) + self.analytical_formulation = analytical_formulation + if species1_name not in equation.field_names: raise ValueError(f"Field {species1_name} does not exist in the equation set") if species2_name not in equation.field_names: @@ -57,20 +66,57 @@ def __init__(self, equation, k1=1, k2=1, species1 = split(self.Xq)[self.species1_idx] species2 = split(self.Xq)[self.species2_idx] - test_1 = equation.tests[self.species1_idx] - test_2 = equation.tests[self.species2_idx] + test1 = equation.tests[self.species1_idx] + test2 = equation.tests[self.species2_idx] + + W = equation.function_space + V_idxs = [self.species1_idx, self.species2_idx] + + self.dt = Constant(0.0) + + if analytical_formulation: + # This uses the anaytical formulation given in + # Appendix D of Lauritzen et al. (2015) + # This must be used with forward Euler. + X_T_0 = 4.e-6 + r = k1/(4.0*k2) + d = sqrt(r**2 + 2.0*X_T_0*r) + e = exp(-4.0*k2*d*self.dt) + + e1 = conditional(abs(d*k2*self.dt) > 1.e-16, (1.0-e)/(d*self.dt), 4.0*k2) + + source_expr = -e1 * (species1 - d + r) * (species1 + d + r) / (1.0 + e + self.dt * e1 * (species1 + r)) + + # Ensure the increments don't create a negative mixing ratio + source_expr = conditional(source_expr < 0.0, + max_value(source_expr, -species1/self.dt), + min_value(source_expr, 2.0*species2/self.dt)) + + source1_expr = source_expr + source2_expr = -source_expr/2.0 - Kx = k1*species2 - k2*(species1**2) + self.source = Function(W) + self.source_expr = [split(self.source)[V_idx] for V_idx in V_idxs] + self.source_int = [self.source.subfunctions[V_idx] for V_idx in V_idxs] - source1_expr = test_1 * 2*Kx * dx - source2_expr = test_2 * -Kx * dx + self.source_interpolate = [interpolate(source1_expr, self.source_int[0].function_space()), + interpolate(source2_expr, self.source_int[1].function_space())] - equation.residual -= self.label(subject(prognostic(source1_expr, 'X'), self.Xq), self.evaluate) - equation.residual -= self.label(subject(prognostic(source2_expr, 'X2'), self.Xq), self.evaluate) + equation.residual -= source_label(self.label(subject(prognostic(test1 * self.source_expr[0] * dx, species1_name), equation.X), self.evaluate)) + equation.residual -= source_label(self.label(subject(prognostic(test2 * self.source_expr[1] * dx, species2_name), equation.X), self.evaluate)) + + else: + Kx = k1*species2 - k2*(species1**2) + + source1_expr = test1 * 2*Kx * dx + source2_expr = test2 * -Kx * dx + + equation.residual -= self.label(subject(prognostic(source1_expr, species1_name), self.Xq), self.evaluate) + equation.residual -= self.label(subject(prognostic(source2_expr, species2_name), self.Xq), self.evaluate) def evaluate(self, x_in, dt, x_out=None): """ - Evaluates the source/sink for the coalescence process. + Evaluates the Terminator Toy interaction chemistry. Args: x_in (:class:`Function`): the (mixed) field to be evolved. @@ -79,6 +125,18 @@ def evaluate(self, x_in, dt, x_out=None): field to be outputed. """ + self.dt.assign(dt) + + if self.analytical_formulation: + self.Xq.assign(x_in) + + # Evaluate the source + for interpolator, src in zip(self.source_interpolate, self.source_int): + src.assign(assemble(interpolator)) + + if x_out is not None: + x_out.assign(self.source) + logger.info(f'Evaluating physics parametrisation {self.label.label}') pass diff --git a/gusto/recovery/recovery.py b/gusto/recovery/recovery.py index 2ce7b865e..b37e19c8f 100644 --- a/gusto/recovery/recovery.py +++ b/gusto/recovery/recovery.py @@ -10,11 +10,10 @@ import ufl from firedrake import (BrokenElement, Constant, DirichletBC, FiniteElement, - Function, FunctionSpace, Projector, + Function, FunctionSpace, Projector, dot, SpatialCoordinate, TensorProductElement, VectorFunctionSpace, as_vector, function, interval, - VectorElement, assemble) -from firedrake.__future__ import interpolate + VectorElement) from gusto.recovery import Averager from .recovery_kernels import (BoundaryRecoveryExtruded, BoundaryRecoveryHCurl, BoundaryGaussianElimination) @@ -145,14 +144,13 @@ def __init__(self, x_inout, method=BoundaryMethod.extruded, eff_coords=None): V_broken = FunctionSpace(mesh, BrokenElement(V_inout.ufl_element())) self.x_DG1_wrong = Function(V_broken) self.x_DG1_correct = Function(V_broken) - self.interpolate = interpolate(self.x_inout, V_broken) self.averager = Averager(self.x_DG1_correct, self.x_inout) self.kernel = BoundaryGaussianElimination(V_broken) def apply(self): """Applies the boundary recovery process.""" if self.method == BoundaryMethod.taylor: - self.x_DG1_wrong.assign(assemble(self.interpolate)) + self.x_DG1_wrong.interpolate(self.x_inout) self.kernel.apply(self.x_DG1_wrong, self.x_DG1_correct, self.act_coords, self.eff_coords, self.num_ext) self.averager.project() @@ -196,6 +194,7 @@ def __init__(self, x_in, x_out, method='interpolate', boundary_method=None): if not isinstance(x_in, (ufl.core.expr.Expr, function.Function)): raise ValueError("Can only recover UFL expression or Functions not '%s'" % type(x_in)) + self.x_in = x_in self.x_out = x_out V_out = x_out.function_space() mesh = V_out.mesh() @@ -217,17 +216,15 @@ def __init__(self, x_in, x_out, method='interpolate', boundary_method=None): # -------------------------------------------------------------------- # # Set up interpolation / projection # -------------------------------------------------------------------- # - x_brok = Function(V_brok) + self.x_brok = Function(V_brok) self.method = method - if method == 'interpolate': - self.broken_op = lambda: assemble(interpolate(x_in, V_brok), tensor=x_brok) - elif method == 'project': - self.broken_op = Projector(x_in, x_brok) - else: + if method == 'project': + self.broken_op = Projector(x_in, self.x_brok) + elif method != 'interpolate': raise ValueError(f'Valid methods are "interpolate" or "project", not {method}') - self.averager = Averager(x_brok, self.x_out) + self.averager = Averager(self.x_brok, self.x_out) # -------------------------------------------------------------------- # # Set up boundary recovery @@ -264,40 +261,48 @@ def __init__(self, x_in, x_out, method='interpolate', boundary_method=None): CG1 = FunctionSpace(mesh, "CG", 1) # now, break the problem down into components - x_out_scalars = [] + self.x_out_scalars = [] self.boundary_recoverers = [] self.interpolate_to_scalars = [] self.extra_averagers = [] + self.unit_vec = [] # the boundary recoverer needs to be done on a scalar fields # so need to extract component and restore it after the boundary recovery is done for i in range(V_out.value_size): - x_out_scalars.append(Function(CG1)) - self.interpolate_to_scalars.append( - lambda i=i: assemble(interpolate(self.x_out[i], CG1), tensor=x_out_scalars[i]) - ) - self.boundary_recoverers.append(BoundaryRecoverer(x_out_scalars[i], + self.unit_vec.append(as_vector( + [Constant(1.0) if j == i else Constant(0.0) for j in range(V_out.value_size)] + )) + self.x_out_scalars.append(Function(CG1)) + self.boundary_recoverers.append(BoundaryRecoverer(self.x_out_scalars[i], method=BoundaryMethod.taylor, eff_coords=eff_coords[i])) - self.interpolate_to_vector = interpolate(as_vector(x_out_scalars), V_out) + self.x_out_expr = self.x_out_scalars[0]*self.unit_vec[0] + for i in range(1, V_out.value_size): + self.x_out_expr += self.x_out_scalars[i]*self.unit_vec[i] def project(self): """Perform the whole recovery step.""" # Initial averaging step - self.broken_op.project() if self.method == 'project' else self.broken_op() + if self.method == 'project': + self.broken_op.project() + else: + self.x_brok.interpolate(self.x_in) self.averager.project() # Boundary recovery if self.boundary_method is not None: # For vector elements, treat each component separately if self.vector_function_space: - for (interpolate_to_scalar, boundary_recoverer) \ - in zip(self.interpolate_to_scalars, self.boundary_recoverers): - interpolate_to_scalar() + # TODO: something about interpolating to scalars and then back + # is making the lowest-order bubble tests assymetric! + for i, boundary_recoverer in enumerate(self.boundary_recoverers): + self.x_out_scalars[i].interpolate(dot(self.unit_vec[i], self.x_out)) + # Correct at boundaries boundary_recoverer.apply() # Combine the components to obtain the vector field - self.x_out.assign(assemble(self.interpolate_to_vector)) + self.x_out.interpolate(as_vector(self.x_out_expr)) else: # Extrapolate at boundaries self.boundary_recoverer.apply() diff --git a/gusto/solvers/__init__.py b/gusto/solvers/__init__.py index fc4c8a7d2..2f3b728ff 100644 --- a/gusto/solvers/__init__.py +++ b/gusto/solvers/__init__.py @@ -1,3 +1,3 @@ from gusto.solvers.parameters import * # noqa -from gusto.solvers.linear_solvers import * # noqa from gusto.solvers.preconditioners import * # noqa +from gusto.solvers.solver_presets import * # noqa diff --git a/gusto/solvers/linear_solvers.py b/gusto/solvers/linear_solvers.py deleted file mode 100644 index 8149b045f..000000000 --- a/gusto/solvers/linear_solvers.py +++ /dev/null @@ -1,972 +0,0 @@ -""" -This module provides abstract linear solver objects. - -The linear solvers provided here are used for solving linear problems on mixed -finite element spaces. -""" - -from firedrake import ( - split, LinearVariationalProblem, Constant, LinearVariationalSolver, - TestFunctions, TrialFunctions, TestFunction, TrialFunction, lhs, - rhs, FacetNormal, div, dx, jump, avg, dS, dS_v, dS_h, ds_v, ds_t, ds_b, - ds_tb, inner, action, dot, grad, Function, VectorSpaceBasis, cross, - BrokenElement, FunctionSpace, MixedFunctionSpace, DirichletBC, as_vector, - assemble, conditional -) -from firedrake.fml import Term, drop -from firedrake.petsc import flatten_parameters -from firedrake.__future__ import interpolate -from pyop2.profiling import timed_function, timed_region - -from gusto.equations.active_tracers import TracerVariableType -from gusto.core.logging import ( - logger, DEBUG, logging_ksp_monitor_true_residual, - attach_custom_monitor -) -from gusto.core.labels import linearisation, time_derivative, hydrostatic -from gusto.equations import thermodynamics -from gusto.recovery.recovery_kernels import AverageWeightings, AverageKernel -from abc import ABCMeta, abstractmethod, abstractproperty - - -__all__ = ["BoussinesqSolver", "LinearTimesteppingSolver", "CompressibleSolver", - "ThermalSWSolver", "MoistConvectiveSWSolver"] - - -class TimesteppingSolver(object, metaclass=ABCMeta): - """Base class for timestepping linear solvers for Gusto.""" - - def __init__(self, equations, alpha=0.5, tau_values=None, - solver_parameters=None, overwrite_solver_parameters=False): - """ - Args: - equations (:class:`PrognosticEquation`): the model's equation. - alpha (float, optional): the semi-implicit off-centring factor. - Defaults to 0.5. A value of 1 is fully-implicit. - tau_values (dict, optional): contains the semi-implicit relaxation - parameters. Defaults to None, in which case the value of alpha is used. - solver_parameters (dict, optional): contains the options to be - passed to the underlying :class:`LinearVariationalSolver`. - Defaults to None. - overwrite_solver_parameters (bool, optional): if True use only the - `solver_parameters` that have been passed in. If False then - update the default parameters with the `solver_parameters` - passed in. Defaults to False. - """ - self.equations = equations - self.dt = equations.domain.dt - self.alpha = Constant(alpha) - self.tau_values = tau_values if tau_values is not None else {} - - if solver_parameters is not None: - if not overwrite_solver_parameters: - p = flatten_parameters(self.solver_parameters) - p.update(flatten_parameters(solver_parameters)) - solver_parameters = p - self.solver_parameters = solver_parameters - - if logger.isEnabledFor(DEBUG): - self.solver_parameters["ksp_monitor_true_residual"] = None - - # setup the solver - self._setup_solver() - - @staticmethod - def log_ksp_residuals(ksp): - if logger.isEnabledFor(DEBUG): - ksp.setMonitor(logging_ksp_monitor_true_residual) - - @abstractproperty - def solver_parameters(self): - """Solver parameters for this solver""" - pass - - @abstractmethod - def _setup_solver(self): - pass - - @abstractmethod - def update_reference_profiles(self): - """ - Update the solver when the reference profiles have changed. - - This typically includes forcing any Jacobians that depend on - the reference profiles to be reassembled, and recalculating - any values derived from the reference values. - """ - pass - - @abstractmethod - def solve(self): - pass - - -class CompressibleSolver(TimesteppingSolver): - """ - Timestepping linear solver object for the compressible Euler equations. - - This solves a linear problem for the compressible Euler equations in - theta-exner formulation with prognostic variables u (velocity), rho - (density) and theta (potential temperature). It follows the following - strategy: - - (1) Analytically eliminate theta (introduces error near topography) - - (2a) Formulate the resulting mixed system for u and rho using a - hybridized mixed method. This breaks continuity in the - linear perturbations of u, and introduces a new unknown on the - mesh interfaces approximating the average of the Exner pressure - perturbations. These trace unknowns also act as Lagrange - multipliers enforcing normal continuity of the "broken" u variable. - - (2b) Statically condense the block-sparse system into a single system - for the Lagrange multipliers. This is the only globally coupled - system requiring a linear solver. - - (2c) Using the computed trace variables, we locally recover the - broken velocity and density perturbations. This is accomplished - in two stages: - (i): Recover rho locally using the multipliers. - (ii): Recover "broken" u locally using rho and the multipliers. - - (2d) Project the "broken" velocity field into the HDiv-conforming - space using local averaging. - - (3) Reconstruct theta - """ - - solver_parameters = {'mat_type': 'matfree', - 'ksp_type': 'preonly', - 'pc_type': 'python', - 'pc_python_type': 'firedrake.SCPC', - 'pc_sc_eliminate_fields': '0, 1', - # The reduced operator is not symmetric - 'condensed_field': {'ksp_type': 'fgmres', - 'ksp_rtol': 1.0e-8, - 'ksp_atol': 1.0e-8, - 'ksp_max_it': 100, - 'pc_type': 'gamg', - 'pc_gamg_sym_graph': None, - 'mg_levels': {'ksp_type': 'gmres', - 'ksp_max_it': 5, - 'pc_type': 'bjacobi', - 'sub_pc_type': 'ilu'}}} - - def __init__(self, equations, alpha=0.5, tau_values=None, - solver_parameters=None, overwrite_solver_parameters=False): - """ - Args: - equations (:class:`PrognosticEquation`): the model's equation. - alpha (float, optional): the semi-implicit off-centring factor. - Defaults to 0.5. A value of 1 is fully-implicit. - tau_values (dict, optional): contains the semi-implicit relaxation - parameters. Defaults to None, in which case the value of alpha is used. - solver_parameters (dict, optional): contains the options to be - passed to the underlying :class:`LinearVariationalSolver`. - Defaults to None. - overwrite_solver_parameters (bool, optional): if True use only the - `solver_parameters` that have been passed in. If False then - update the default parameters with the `solver_parameters` - passed in. Defaults to False. - """ - self.equations = equations - self.quadrature_degree = equations.domain.max_quad_degree - - super().__init__(equations, alpha, tau_values, solver_parameters, - overwrite_solver_parameters) - - @timed_function("Gusto:SolverSetup") - def _setup_solver(self): - - equations = self.equations - cp = equations.parameters.cp - dt = self.dt - # Set relaxation parameters. If an alternative has not been given, set - # to semi-implicit off-centering factor - beta_u = dt*self.tau_values.get("u", self.alpha) - beta_t = dt*self.tau_values.get("theta", self.alpha) - beta_r = dt*self.tau_values.get("rho", self.alpha) - - Vu = equations.domain.spaces("HDiv") - Vu_broken = FunctionSpace(equations.domain.mesh, BrokenElement(Vu.ufl_element())) - Vtheta = equations.domain.spaces("theta") - Vrho = equations.domain.spaces("DG") - - h_deg = Vrho.ufl_element().degree()[0] - v_deg = Vrho.ufl_element().degree()[1] - Vtrace = FunctionSpace(equations.domain.mesh, "HDiv Trace", degree=(h_deg, v_deg)) - - # Split up the rhs vector (symbolically) - self.xrhs = Function(self.equations.function_space) - u_in, rho_in, theta_in = split(self.xrhs)[0:3] - - # Build the function space for "broken" u, rho, and pressure trace - M = MixedFunctionSpace((Vu_broken, Vrho, Vtrace)) - w, phi, dl = TestFunctions(M) - u, rho, l0 = TrialFunctions(M) - - n = FacetNormal(equations.domain.mesh) - - # Get background fields - _, rhobar, thetabar = split(equations.X_ref)[0:3] - exnerbar = thermodynamics.exner_pressure(equations.parameters, rhobar, thetabar) - exnerbar_rho = thermodynamics.dexner_drho(equations.parameters, rhobar, thetabar) - exnerbar_theta = thermodynamics.dexner_dtheta(equations.parameters, rhobar, thetabar) - - # Analytical (approximate) elimination of theta - k = equations.domain.k # Upward pointing unit vector - theta = -dot(k, u)*dot(k, grad(thetabar))*beta_t + theta_in - - # Only include theta' (rather than exner') in the vertical - # component of the gradient - - # The exner prime term (here, bars are for mean and no bars are - # for linear perturbations) - exner = exnerbar_theta*theta + exnerbar_rho*rho - - # Vertical projection - def V(u): - return k*inner(u, k) - - # hydrostatic projection - h_project = lambda u: u - k*inner(u, k) - - # Specify degree for some terms as estimated degree is too large - dx_qp = dx(degree=(equations.domain.max_quad_degree)) - dS_v_qp = dS_v(degree=(equations.domain.max_quad_degree)) - dS_h_qp = dS_h(degree=(equations.domain.max_quad_degree)) - ds_v_qp = ds_v(degree=(equations.domain.max_quad_degree)) - ds_tb_qp = (ds_t(degree=(equations.domain.max_quad_degree)) - + ds_b(degree=(equations.domain.max_quad_degree))) - - # Add effect of density of water upon theta, using moisture reference profiles - # TODO: Explore if this is the right thing to do for the linear problem - if equations.active_tracers is not None: - mr_t = Constant(0.0)*thetabar - for tracer in equations.active_tracers: - if tracer.chemical == 'H2O': - if tracer.variable_type == TracerVariableType.mixing_ratio: - idx = equations.field_names.index(tracer.name) - mr_bar = split(equations.X_ref)[idx] - mr_t += mr_bar - else: - raise NotImplementedError('Only mixing ratio tracers are implemented') - - theta_w = theta / (1 + mr_t) - thetabar_w = thetabar / (1 + mr_t) - else: - theta_w = theta - thetabar_w = thetabar - - _l0 = TrialFunction(Vtrace) - _dl = TestFunction(Vtrace) - a_tr = _dl('+')*_l0('+')*(dS_v_qp + dS_h_qp) + _dl*_l0*ds_v_qp + _dl*_l0*ds_tb_qp - - def L_tr(f): - return _dl('+')*avg(f)*(dS_v_qp + dS_h_qp) + _dl*f*ds_v_qp + _dl*f*ds_tb_qp - - cg_ilu_parameters = {'ksp_type': 'cg', - 'pc_type': 'bjacobi', - 'sub_pc_type': 'ilu'} - - # Project field averages into functions on the trace space - rhobar_avg = Function(Vtrace) - exnerbar_avg = Function(Vtrace) - - rho_avg_prb = LinearVariationalProblem(a_tr, L_tr(rhobar), rhobar_avg, - constant_jacobian=True) - exner_avg_prb = LinearVariationalProblem(a_tr, L_tr(exnerbar), exnerbar_avg, - constant_jacobian=True) - - self.rho_avg_solver = LinearVariationalSolver(rho_avg_prb, - solver_parameters=cg_ilu_parameters, - options_prefix='rhobar_avg_solver') - self.exner_avg_solver = LinearVariationalSolver(exner_avg_prb, - solver_parameters=cg_ilu_parameters, - options_prefix='exnerbar_avg_solver') - - # "broken" u, rho, and trace system - # NOTE: no ds_v integrals since equations are defined on - # a periodic (or sphere) base mesh. - if any([t.has_label(hydrostatic) for t in self.equations.residual]): - u_mass = inner(w, (h_project(u) - u_in))*dx - else: - u_mass = inner(w, (u - u_in))*dx - - eqn = ( - # momentum equation - u_mass - - beta_u*cp*div(theta_w*V(w))*exnerbar*dx_qp - # following does nothing but is preserved in the comments - # to remind us why (because V(w) is purely vertical). - # + beta*cp*jump(theta_w*V(w), n=n)*exnerbar_avg('+')*dS_v_qp - + beta_u*cp*jump(theta_w*V(w), n=n)*exnerbar_avg('+')*dS_h_qp - + beta_u*cp*dot(theta_w*V(w), n)*exnerbar_avg*ds_tb_qp - - beta_u*cp*div(thetabar_w*w)*exner*dx_qp - # trace terms appearing after integrating momentum equation - + beta_u*cp*jump(thetabar_w*w, n=n)*l0('+')*(dS_v_qp + dS_h_qp) - + beta_u*cp*dot(thetabar_w*w, n)*l0*(ds_tb_qp + ds_v_qp) - # mass continuity equation - + (phi*(rho - rho_in) - beta_r*inner(grad(phi), u)*rhobar)*dx - + beta_r*jump(phi*u, n=n)*rhobar_avg('+')*(dS_v + dS_h) - # term added because u.n=0 is enforced weakly via the traces - + beta_r*phi*dot(u, n)*rhobar_avg*(ds_tb + ds_v) - # constraint equation to enforce continuity of the velocity - # through the interior facets and weakly impose the no-slip - # condition - + dl('+')*jump(u, n=n)*(dS_v + dS_h) - + dl*dot(u, n)*(ds_t + ds_b + ds_v) - ) - # TODO: can we get this term using FML? - # contribution of the sponge term - if hasattr(self.equations, "mu"): - eqn += dt*self.equations.mu*inner(w, k)*inner(u, k)*dx_qp - - if equations.parameters.Omega is not None: - Omega = as_vector([0, 0, equations.parameters.Omega]) - eqn += beta_u*inner(w, cross(2*Omega, u))*dx - - aeqn = lhs(eqn) - Leqn = rhs(eqn) - - # Function for the hybridized solutions - self.urhol0 = Function(M) - - hybridized_prb = LinearVariationalProblem(aeqn, Leqn, self.urhol0, - constant_jacobian=True) - hybridized_solver = LinearVariationalSolver(hybridized_prb, - solver_parameters=self.solver_parameters, - options_prefix='ImplicitSolver') - self.hybridized_solver = hybridized_solver - - # Project broken u into the HDiv space using facet averaging. - # Weight function counting the dofs of the HDiv element: - self._weight = Function(Vu) - weight_kernel = AverageWeightings(Vu) - weight_kernel.apply(self._weight) - - # Averaging kernel - self._average_kernel = AverageKernel(Vu) - - # HDiv-conforming velocity - self.u_hdiv = Function(Vu) - - # Reconstruction of theta - theta = TrialFunction(Vtheta) - gamma = TestFunction(Vtheta) - - self.theta = Function(Vtheta) - theta_eqn = gamma*(theta - theta_in - + dot(k, self.u_hdiv)*dot(k, grad(thetabar))*beta_t)*dx - - theta_problem = LinearVariationalProblem(lhs(theta_eqn), rhs(theta_eqn), self.theta, - constant_jacobian=True) - self.theta_solver = LinearVariationalSolver(theta_problem, - solver_parameters=cg_ilu_parameters, - options_prefix='thetabacksubstitution') - - # Store boundary conditions for the div-conforming velocity to apply - # post-solve - self.bcs = self.equations.bcs['u'] - - # Log residuals on hybridized solver - self.log_ksp_residuals(self.hybridized_solver.snes.ksp) - # Log residuals on the trace system too - python_context = self.hybridized_solver.snes.ksp.pc.getPythonContext() - attach_custom_monitor(python_context, logging_ksp_monitor_true_residual) - - @timed_function("Gusto:UpdateReferenceProfiles") - def update_reference_profiles(self): - with timed_region("Gusto:HybridProjectRhobar"): - logger.info('Compressible linear solver: rho average solve') - self.rho_avg_solver.solve() - - with timed_region("Gusto:HybridProjectExnerbar"): - logger.info('Compressible linear solver: Exner average solve') - self.exner_avg_solver.solve() - - # Because the left hand side of the hybridised problem depends - # on the reference profile, the Jacobian matrix should change - # when the reference profiles are updated. This call will tell - # the hybridized_solver to reassemble the Jacobian next time - # `solve` is called. - self.hybridized_solver.invalidate_jacobian() - - @timed_function("Gusto:LinearSolve") - def solve(self, xrhs, dy): - """ - Solve the linear problem. - - Args: - xrhs (:class:`Function`): the right-hand side field in the - appropriate :class:`MixedFunctionSpace`. - dy (:class:`Function`): the resulting field in the appropriate - :class:`MixedFunctionSpace`. - """ - self.xrhs.assign(xrhs) - - # Solve the hybridized system - logger.info('Compressible linear solver: hybridized solve') - self.hybridized_solver.solve() - - broken_u, rho1, _ = self.urhol0.subfunctions - u1 = self.u_hdiv - - # Project broken_u into the HDiv space - u1.assign(0.0) - - with timed_region("Gusto:HybridProjectHDiv"): - logger.info('Compressible linear solver: restore continuity') - self._average_kernel.apply(u1, self._weight, broken_u) - - # Reapply bcs to ensure they are satisfied - for bc in self.bcs: - bc.apply(u1) - - # Copy back into u and rho cpts of dy - u, rho, theta = dy.subfunctions[0:3] - u.assign(u1) - rho.assign(rho1) - - # Reconstruct theta - with timed_region("Gusto:ThetaRecon"): - logger.info('Compressible linear solver: theta solve') - self.theta_solver.solve() - - # Copy into theta cpt of dy - theta.assign(self.theta) - - -class BoussinesqSolver(TimesteppingSolver): - """ - Linear solver object for the Boussinesq equations. - - This solves a linear problem for the Boussinesq equations - with prognostic variables u (velocity), p (pressure) and b (buoyancy). It - follows the following strategy: - - This solver follows the following strategy: - (1) Analytically eliminate b (introduces error near topography) - (2) Solve resulting system for (u,p) using a hybrid-mixed method - (3) Reconstruct b - """ - - solver_parameters = { - 'ksp_type': 'preonly', - 'mat_type': 'matfree', - 'pc_type': 'python', - 'pc_python_type': 'firedrake.HybridizationPC', - 'hybridization': {'ksp_type': 'cg', - 'pc_type': 'gamg', - 'ksp_rtol': 1e-8, - 'mg_levels': {'ksp_type': 'chebyshev', - 'ksp_max_it': 2, - 'pc_type': 'bjacobi', - 'sub_pc_type': 'ilu'}} - } - - @timed_function("Gusto:SolverSetup") - def _setup_solver(self): - equation = self.equations # just cutting down line length a bit - - dt = self.dt - # Set relaxation parameters. If an alternative has not been given, set - # to semi-implicit off-centering factor - beta_u = dt*self.tau_values.get("u", self.alpha) - beta_p = dt*self.tau_values.get("p", self.alpha) - beta_b = dt*self.tau_values.get("b", self.alpha) - Vu = equation.domain.spaces("HDiv") - Vb = equation.domain.spaces("theta") - Vp = equation.domain.spaces("DG") - - # Split up the rhs vector (symbolically) - self.xrhs = Function(self.equations.function_space) - u_in, p_in, b_in = split(self.xrhs) - - # Build the reduced function space for u,p - M = MixedFunctionSpace((Vu, Vp)) - w, phi = TestFunctions(M) - u, p = TrialFunctions(M) - - # Get background fields - bbar = split(equation.X_ref)[2] - - # Analytical (approximate) elimination of theta - k = equation.domain.k # Upward pointing unit vector - b = -dot(k, u)*dot(k, grad(bbar))*beta_b + b_in - - # vertical projection - def V(u): - return k*inner(u, k) - - eqn = ( - inner(w, (u - u_in))*dx - - beta_u*div(w)*p*dx - - beta_u*inner(w, k)*b*dx - ) - - if equation.compressible: - cs = equation.parameters.cs - eqn += phi * (p - p_in) * dx + beta_p * phi * cs**2 * div(u) * dx - else: - eqn += beta_p * phi * div(u) * dx - - if hasattr(self.equations, "mu"): - eqn += dt*self.equations.mu*inner(w, k)*inner(u, k)*dx - - if equation.parameters.Omega is not None: - Omega = as_vector((0, 0, equation.parameter.Omega)) - eqn += beta_u*inner(w, cross(2*Omega, u))*dx - - aeqn = lhs(eqn) - Leqn = rhs(eqn) - - # Place to put result of u p solver - self.up = Function(M) - - # Boundary conditions (assumes extruded mesh) - # BCs are declared for the plain velocity space. As we need them in - # a mixed problem, we replicate the BCs but for subspace of M - bcs = [DirichletBC(M.sub(0), bc.function_arg, bc.sub_domain) for bc in self.equations.bcs['u']] - - # Solver for u, p - up_problem = LinearVariationalProblem(aeqn, Leqn, self.up, bcs=bcs, - constant_jacobian=True) - - # Provide callback for the nullspace of the trace system - def trace_nullsp(T): - return VectorSpaceBasis(constant=True) - - appctx = {"trace_nullspace": trace_nullsp} - self.up_solver = LinearVariationalSolver(up_problem, - solver_parameters=self.solver_parameters, - appctx=appctx) - - # Reconstruction of b - b = TrialFunction(Vb) - gamma = TestFunction(Vb) - - u, p = self.up.subfunctions - self.b = Function(Vb) - - b_eqn = gamma*(b - b_in - + dot(k, u)*dot(k, grad(bbar))*beta_b)*dx - - b_problem = LinearVariationalProblem(lhs(b_eqn), - rhs(b_eqn), - self.b, - constant_jacobian=True) - self.b_solver = LinearVariationalSolver(b_problem) - - # Log residuals on hybridized solver - self.log_ksp_residuals(self.up_solver.snes.ksp) - - @timed_function("Gusto:UpdateReferenceProfiles") - def update_reference_profiles(self): - self.up_solver.invalidate_jacobian() - - @timed_function("Gusto:LinearSolve") - def solve(self, xrhs, dy): - """ - Solve the linear problem. - - Args: - xrhs (:class:`Function`): the right-hand side field in the - appropriate :class:`MixedFunctionSpace`. - dy (:class:`Function`): the resulting field in the appropriate - :class:`MixedFunctionSpace`. - """ - self.xrhs.assign(xrhs) - - with timed_region("Gusto:VelocityPressureSolve"): - logger.info('Boussinesq linear solver: mixed solve') - self.up_solver.solve() - - u1, p1 = self.up.subfunctions - u, p, b = dy.subfunctions - u.assign(u1) - p.assign(p1) - - with timed_region("Gusto:BuoyancyRecon"): - logger.info('Boussinesq linear solver: buoyancy reconstruction') - self.b_solver.solve() - - b.assign(self.b) - - -class ThermalSWSolver(TimesteppingSolver): - """Linear solver object for the thermal shallow water equations. - - This solves a linear problem for the thermal shallow water - equations with prognostic variables u (velocity), D (depth) and - either b (buoyancy) or b_e (equivalent buoyancy). It follows the - following strategy: - - (1) Eliminate b - (2) Solve the resulting system for (u, D) using a hybrid-mixed method - (3) Reconstruct b - - """ - - solver_parameters = { - 'ksp_type': 'preonly', - 'mat_type': 'matfree', - 'pc_type': 'python', - 'pc_python_type': 'firedrake.HybridizationPC', - 'hybridization': {'ksp_type': 'cg', - 'pc_type': 'gamg', - 'ksp_rtol': 1e-8, - 'mg_levels': {'ksp_type': 'chebyshev', - 'ksp_max_it': 2, - 'pc_type': 'bjacobi', - 'sub_pc_type': 'ilu'}} - } - - @timed_function("Gusto:SolverSetup") - def _setup_solver(self): - equation = self.equations # just cutting down line length a bit - equivalent_buoyancy = equation.equivalent_buoyancy - - dt = self.dt - beta_u = dt*self.tau_values.get("u", self.alpha) - beta_d = dt*self.tau_values.get("D", self.alpha) - beta_b = dt*self.tau_values.get("b", self.alpha) - Vu = equation.domain.spaces("HDiv") - VD = equation.domain.spaces("DG") - Vb = equation.domain.spaces("DG") - - # Check that the third field is buoyancy - if not equation.field_names[2] == 'b' and not (equation.field_names[2] == 'b_e' and equivalent_buoyancy): - raise NotImplementedError("Field 'b' or 'b_e' must exist to use the thermal linear solver in the SIQN scheme") - - # Split up the rhs vector - self.xrhs = Function(equation.function_space) - u_in, D_in, b_in = split(self.xrhs)[0:3] - - # Build the reduced function space for u, D - M = MixedFunctionSpace((Vu, VD)) - w, phi = TestFunctions(M) - u, D = TrialFunctions(M) - - # Get background buoyancy and depth - Dbar, bbar = split(equation.X_ref)[1:3] - - # Approximate elimination of b - b = -dot(u, grad(bbar))*beta_b + b_in - - if equivalent_buoyancy: - # compute q_v using q_sat to partition q_t into q_v and q_c - self.q_sat_func = Function(VD) - self.qvbar = Function(VD) - qtbar = split(equation.X_ref)[3] - - # set up interpolators that use the X_ref values for D and b_e - self.q_sat_expr_interpolate = interpolate( - equation.compute_saturation(equation.X_ref), VD) - self.q_v_interpolate = interpolate( - conditional(qtbar < self.q_sat_func, qtbar, self.q_sat_func), - VD) - - # bbar was be_bar and here we correct to become bbar - bbar += equation.parameters.beta2 * self.qvbar - - n = FacetNormal(equation.domain.mesh) - - eqn = ( - inner(w, (u - u_in)) * dx - - beta_u * (D - Dbar) * div(w*bbar) * dx - + beta_u * jump(w*bbar, n) * avg(D-Dbar) * dS - - beta_u * 0.5 * Dbar * bbar * div(w) * dx - - beta_u * 0.5 * Dbar * b * div(w) * dx - - beta_u * 0.5 * bbar * div(w*(D-Dbar)) * dx - + beta_u * 0.5 * jump((D-Dbar)*w, n) * avg(bbar) * dS - + inner(phi, (D - D_in)) * dx - + beta_d * phi * div(Dbar*u) * dx - ) - - if 'coriolis' in equation.prescribed_fields._field_names: - f = equation.prescribed_fields('coriolis') - eqn += beta_u * f * inner(w, equation.domain.perp(u)) * dx - - aeqn = lhs(eqn) - Leqn = rhs(eqn) - - # Place to put results of (u,D) solver - self.uD = Function(M) - - # Boundary conditions - bcs = [DirichletBC(M.sub(0), bc.function_arg, bc.sub_domain) for bc in self.equations.bcs['u']] - - # Solver for u, D - uD_problem = LinearVariationalProblem(aeqn, Leqn, self.uD, bcs=bcs, - constant_jacobian=True) - - # Provide callback for the nullspace of the trace system - def trace_nullsp(T): - return VectorSpaceBasis(constant=True) - - appctx = {"trace_nullspace": trace_nullsp} - self.uD_solver = LinearVariationalSolver(uD_problem, - solver_parameters=self.solver_parameters, - appctx=appctx) - # Reconstruction of b - b = TrialFunction(Vb) - gamma = TestFunction(Vb) - - u, D = self.uD.subfunctions - self.b = Function(Vb) - - b_eqn = gamma*(b - b_in + inner(u, grad(bbar))*beta_b) * dx - - b_problem = LinearVariationalProblem(lhs(b_eqn), - rhs(b_eqn), - self.b, - constant_jacobian=True) - self.b_solver = LinearVariationalSolver(b_problem) - - # Log residuals on hybridized solver - self.log_ksp_residuals(self.uD_solver.snes.ksp) - - @timed_function("Gusto:UpdateReferenceProfiles") - def update_reference_profiles(self): - if self.equations.equivalent_buoyancy: - self.q_sat_func.assign(assemble(self.q_sat_expr_interpolate)) - self.qvbar.assign(assemble(self.q_v_interpolate)) - - @timed_function("Gusto:LinearSolve") - def solve(self, xrhs, dy): - """ - Solve the linear problem. - - Args: - xrhs (:class:`Function`): the right-hand side field in the - appropriate :class:`MixedFunctionSpace`. - dy (:class:`Function`): the resulting field in the appropriate - :class:`MixedFunctionSpace`. - """ - self.xrhs.assign(xrhs) - - # Check that the b reference profile has been set - bbar = split(self.equations.X_ref)[2] - b = dy.subfunctions[2] - bbar_func = Function(b.function_space()).interpolate(bbar) - if bbar_func.dat.data.max() == 0 and bbar_func.dat.data.min() == 0: - logger.warning("The reference profile for b in the linear solver is zero. To set a non-zero profile add b to the set_reference_profiles argument.") - - with timed_region("Gusto:VelocityDepthSolve"): - logger.info('Thermal linear solver: mixed solve') - self.uD_solver.solve() - - u1, D1 = self.uD.subfunctions - u = dy.subfunctions[0] - D = dy.subfunctions[1] - b = dy.subfunctions[2] - u.assign(u1) - D.assign(D1) - - with timed_region("Gusto:BuoyancyRecon"): - logger.info('Thermal linear solver: buoyancy reconstruction') - self.b_solver.solve() - - b.assign(self.b) - - -class LinearTimesteppingSolver(object): - """ - A general object for solving mixed finite element linear problems. - - This linear solver object is general and is designed for use with different - equation sets, including with the non-linear shallow-water equations. It - forms the linear problem from the equations using FML. The linear system is - solved using a hybridised-mixed method. - """ - - solver_parameters = { - 'ksp_type': 'preonly', - 'mat_type': 'matfree', - 'pc_type': 'python', - 'pc_python_type': 'firedrake.HybridizationPC', - 'hybridization': {'ksp_type': 'cg', - 'pc_type': 'gamg', - 'ksp_rtol': 1e-8, - 'mg_levels': {'ksp_type': 'chebyshev', - 'ksp_max_it': 2, - 'pc_type': 'bjacobi', - 'sub_pc_type': 'ilu'}} - } - - def __init__(self, equation, alpha, reference_dependent=True): - """ - Args: - equation (:class:`PrognosticEquation`): the model's equation object. - alpha (float): the semi-implicit off-centring factor. A value of 1 - is fully-implicit. - reference_dependent: this indicates that the solver Jacobian should - be rebuilt if the reference profiles have been updated. - """ - self.reference_dependent = reference_dependent - - residual = equation.residual.label_map( - lambda t: t.has_label(linearisation), - lambda t: Term(t.get(linearisation).form, t.labels), - drop) - - self.alpha = Constant(alpha) - dt = equation.domain.dt - W = equation.function_space - beta = dt*self.alpha - - # Split up the rhs vector (symbolically) - self.xrhs = Function(W) - - aeqn = residual.label_map( - lambda t: (t.has_label(time_derivative) and t.has_label(linearisation)), - map_if_false=lambda t: beta*t) - Leqn = residual.label_map( - lambda t: (t.has_label(time_derivative) and t.has_label(linearisation)), - map_if_false=drop) - - # Place to put result of solver - self.dy = Function(W) - - # Solver - bcs = [DirichletBC(W.sub(0), bc.function_arg, bc.sub_domain) for bc in equation.bcs['u']] - problem = LinearVariationalProblem(aeqn.form, - action(Leqn.form, self.xrhs), - self.dy, bcs=bcs, - constant_jacobian=True) - - self.solver = LinearVariationalSolver(problem, - solver_parameters=self.solver_parameters, - options_prefix='linear_solver') - - @timed_function("Gusto:UpdateReferenceProfiles") - def update_reference_profiles(self): - if self.reference_dependent: - self.solver.invalidate_jacobian() - - @timed_function("Gusto:LinearSolve") - def solve(self, xrhs, dy): - """ - Solve the linear problem. - - Args: - xrhs (:class:`Function`): the right-hand side field in the - appropriate :class:`MixedFunctionSpace`. - dy (:class:`Function`): the resulting field in the appropriate - :class:`MixedFunctionSpace`. - """ - self.xrhs.assign(xrhs) - self.solver.solve() - dy.assign(self.dy) - - -class MoistConvectiveSWSolver(TimesteppingSolver): - """ - Linear solver for the moist convective shallow water equations. - - This solves a linear problem for the shallow water equations with prognostic - variables u (velocity) and D (depth). The linear system is solved using a - hybridised-mixed method. - """ - - solver_parameters = { - 'ksp_type': 'preonly', - 'mat_type': 'matfree', - 'pc_type': 'python', - 'pc_python_type': 'firedrake.HybridizationPC', - 'hybridization': {'ksp_type': 'cg', - 'pc_type': 'gamg', - 'ksp_rtol': 1e-8, - 'mg_levels': {'ksp_type': 'chebyshev', - 'ksp_max_it': 2, - 'pc_type': 'bjacobi', - 'sub_pc_type': 'ilu'}} - } - - @timed_function("Gusto:SolverSetup") - def _setup_solver(self): - equation = self.equations # just cutting down line length a bit - dt = self.dt - beta_u = dt*self.tau_values.get("u", self.alpha) - beta_d = dt*self.tau_values.get("D", self.alpha) - Vu = equation.domain.spaces("HDiv") - VD = equation.domain.spaces("DG") - - # Split up the rhs vector - self.xrhs = Function(self.equations.function_space) - u_in = split(self.xrhs)[0] - D_in = split(self.xrhs)[1] - - # Build the reduced function space for u, D - M = MixedFunctionSpace((Vu, VD)) - w, phi = TestFunctions(M) - u, D = TrialFunctions(M) - - # Get background depth - Dbar = split(equation.X_ref)[1] - - g = equation.parameters.g - - eqn = ( - inner(w, (u - u_in)) * dx - - beta_u * (D - Dbar) * div(w*g) * dx - + inner(phi, (D - D_in)) * dx - + beta_d * phi * div(Dbar*u) * dx - ) - - if 'coriolis' in equation.prescribed_fields._field_names: - f = equation.prescribed_fields('coriolis') - eqn += beta_u * f * inner(w, equation.domain.perp(u)) * dx - - aeqn = lhs(eqn) - Leqn = rhs(eqn) - - # Place to put results of (u,D) solver - self.uD = Function(M) - - # Boundary conditions - bcs = [DirichletBC(M.sub(0), bc.function_arg, bc.sub_domain) for bc in self.equations.bcs['u']] - - # Solver for u, D - uD_problem = LinearVariationalProblem(aeqn, Leqn, self.uD, bcs=bcs, - constant_jacobian=True) - - # Provide callback for the nullspace of the trace system - def trace_nullsp(T): - return VectorSpaceBasis(constant=True) - - appctx = {"trace_nullspace": trace_nullsp} - self.uD_solver = LinearVariationalSolver(uD_problem, - solver_parameters=self.solver_parameters, - appctx=appctx) - - # Log residuals on hybridized solver - self.log_ksp_residuals(self.uD_solver.snes.ksp) - - @timed_function("Gusto:UpdateReferenceProfiles") - def update_reference_profiles(self): - self.uD_solver.invalidate_jacobian() - - @timed_function("Gusto:LinearSolve") - def solve(self, xrhs, dy): - """ - Solve the linear problem. - - Args: - xrhs (:class:`Function`): the right-hand side field in the - appropriate :class:`MixedFunctionSpace`. - dy (:class:`Function`): the resulting field in the appropriate - :class:`MixedFunctionSpace`. - """ - self.xrhs.assign(xrhs) - - with timed_region("Gusto:VelocityDepthSolve"): - logger.info('Moist convective linear solver: mixed solve') - self.uD_solver.solve() - - u1, D1 = self.uD.subfunctions - u = dy.subfunctions[0] - D = dy.subfunctions[1] - u.assign(u1) - D.assign(D1) diff --git a/gusto/solvers/preconditioners.py b/gusto/solvers/preconditioners.py index 39397774e..8ff02a026 100644 --- a/gusto/solvers/preconditioners.py +++ b/gusto/solvers/preconditioners.py @@ -1,17 +1,29 @@ """A module containing specialised preconditioners for Gusto applications.""" from firedrake import (dot, jump, dS_h, ds_b, ds_t, ds, - FacetNormal, Tensor, AssembledVector) + FacetNormal, Tensor, AssembledVector, + AuxiliaryOperatorPC, PETSc) from firedrake.preconditioners import PCBase from firedrake.matrix_free.operators import ImplicitMatrixContext -from firedrake.petsc import PETSc from gusto.recovery.recovery_kernels import AverageKernel, AverageWeightings +from gusto.core.logging import logger, DEBUG, logging_ksp_monitor_true_residual from pyop2.profiling import timed_region, timed_function from functools import partial -__all__ = ["VerticalHybridizationPC"] +__all__ = ["VerticalHybridizationPC", "AuxiliaryPC", "CompressibleHybridisedSCPC"] + + +class AuxiliaryPC(AuxiliaryOperatorPC): + """ + A preconditioner that allows the passing of an auxiliary form to be solved by + a chosen PETSc preconditioner. This is commonly used to pass Schur complement forms + to eliminate a variable in a mixed system. + """ + def form(self, pc, test, trial): + a, bcs = self.get_appctx(pc)['auxform'] + return (a(test, trial), bcs) class VerticalHybridizationPC(PCBase): @@ -405,3 +417,399 @@ def view(self, pc, viewer=None): viewer.pushASCIITab() viewer.printfASCII("Project the broken hdiv solution into the HDiv space.\n") viewer.popASCIITab() + + +class CompressibleHybridisedSCPC(PCBase): + """ + A bespoke hybridised preconditioner for the compressible Euler equations. + + This solves a linear problem for the compressible Euler equations in + theta-exner formulation with prognostic variables u (velocity), rho + (density) and theta (potential temperature). It follows the following + strategy: + + (1) Analytically eliminate theta (introduces error near topography) + + (2a) Formulate the resulting mixed system for u and rho using a + hybridized mixed method. This breaks continuity in the + linear perturbations of u, and introduces a new unknown on the + mesh interfaces approximating the average of the Exner pressure + perturbations. These trace unknowns also act as Lagrange + multipliers enforcing normal continuity of the "broken" u variable. + + (2b) Statically condense the block-sparse system into a single system + for the Lagrange multipliers. This is the only globally coupled + system requiring a linear solver. + + (2c) Using the computed trace variables, we locally recover the + broken velocity and density perturbations. This is accomplished + in two stages: + (i): Recover rho locally using the multipliers. + (ii): Recover "broken" u locally using rho and the multipliers. + + (2d) Project the "broken" velocity field into the HDiv-conforming + space using local averaging. + + (3) Reconstruct theta + + This method requires special handling as Firedrake's existing hybridization + preconditioner assumes that the pressure gradient term contains variables + from the mixed system being solved for. In our case, we first eliminate pressure, + then eliminate theta analytically, violating this assumption. + """ + + _prefix = "compressible_hybrid_scpc" + + scpc_parameters = { + 'mat_type': 'matfree', + 'ksp_type': 'preonly', + 'pc_type': 'python', + 'pc_python_type': 'firedrake.SCPC', + 'pc_sc_eliminate_fields': '0, 1', + # The reduced operator is not symmetric + 'condensed_field': { + 'ksp_type': 'fgmres', + 'ksp_rtol': 1.0e-8, + 'ksp_atol': 1.0e-8, + 'ksp_max_it': 100, + 'pc_type': 'gamg', + 'pc_gamg_sym_graph': None, + 'mg_levels': { + 'ksp_type': 'gmres', + 'ksp_max_it': 5, + 'pc_type': 'bjacobi', + 'sub_pc_type': 'ilu' + } + } + } + + cg_ilu_parameters = { + 'ksp_type': 'cg', + 'pc_type': 'bjacobi', + 'sub_pc_type': 'ilu' + } + + def initialize(self, pc): + """ + Set up problem and solver + """ + from firedrake import ( + split, LinearVariationalProblem, LinearVariationalSolver, + TestFunctions, TrialFunctions, TestFunction, TrialFunction, lhs, + rhs, FacetNormal, div, dx, jump, avg, dS_v, dS_h, ds_v, ds_t, ds_b, + ds_tb, inner, dot, grad, Function, cross, + BrokenElement, FunctionSpace, MixedFunctionSpace, as_vector, + Cofunction, Constant + ) + from gusto.equations.active_tracers import TracerVariableType + from gusto.core.labels import hydrostatic + if pc.getType() != "python": + raise ValueError("Expecting PC type python") + + self._process_context(pc) + + self.hybridised_scpc_parameters = self.scpc_parameters + self.rho_avg_solver_parameters = self.cg_ilu_parameters + self.theta_solver_parameters = self.cg_ilu_parameters + self.exner_avg_solver_parameters = self.cg_ilu_parameters + + if logger.isEnabledFor(DEBUG): + self.hybridised_scpc_parameters['ksp_monitor_true_residual'] = None + self.hybridised_scpc_parameters['ksp_converged_reason'] = None + + # Equations and parameters + equations = self.equations + dt = self.dt + cp = equations.parameters.cp + + # Set relaxation parameters. If an alternative has not been given, set + # to semi-implicit off-centering factor + beta_u = dt*self.tau_values.get("u", self.alpha) + beta_t = dt*self.tau_values.get("theta", self.alpha) + beta_r = dt*self.tau_values.get("rho", self.alpha) + + # Get function spaces + self.Vu = self.equations.domain.spaces("HDiv") + self.Vrho = self.equations.domain.spaces("DG") + self.Vtheta = self.equations.domain.spaces("theta") + + # Build trace and broken spaces + self.Vu_broken = FunctionSpace(self.mesh, BrokenElement(self.Vu.ufl_element())) + h_deg = self.Vrho.ufl_element().degree()[0] + v_deg = self.Vrho.ufl_element().degree()[1] + self.Vtrace = FunctionSpace(self.mesh, "HDiv Trace", degree=(h_deg, v_deg)) + + # Mixed Function Spaces + self.W = equations.function_space + self.W_hyb = MixedFunctionSpace((self.Vu_broken, self.Vrho, self.Vtrace)) + + # Define Functions and Cofunctions for rhs and solution vectors + self.xstar = Cofunction(self.W.dual()) + self.xrhs = Function(self.W) + self.y = Function(self.W) + + self.y_hybrid = Function(self.W_hyb) + + # Split input functions + u_in, rho_in, theta_in = self.xrhs.subfunctions[0:3] + + # Get bcs + self.bcs = self.equations.bcs['u'] + + # Set up hybridized solver for (u, rho, l) system + w, phi, dl = TestFunctions(self.W_hyb) + u, rho, l0 = TrialFunctions(self.W_hyb) + n = FacetNormal(self.mesh) + + # Get background fields + _, rhobar, thetabar = split(self.equations.X_ref)[0:3] + kappa = equations.parameters.kappa + R_d = equations.parameters.R_d + p_0 = equations.parameters.p_0 + exnerbar = (rhobar * R_d * thetabar / p_0) ** (kappa / (1 - kappa)) + exnerbar_rho = (kappa / (1 - kappa)) * (rhobar * R_d * thetabar / p_0) ** (kappa / (1 - kappa)) / rhobar + exnerbar_theta = (kappa / (1 - kappa)) * (rhobar * R_d * thetabar / p_0) ** (kappa / (1 - kappa)) / thetabar + + # Analytical (approximate) elimination of theta + k = equations.domain.k # Upward pointing unit vector + theta = -dot(k, u)*dot(k, grad(thetabar))*beta_t + theta_in + + # Only include theta' (rather than exner') in the vertical + # component of the gradient + + # The exner prime term (here, bars are for mean and no bars are + # for linear perturbations) + exner = exnerbar_theta*theta + exnerbar_rho*rho + + # Vertical projection + def V(u): + return k*inner(u, k) + + # hydrostatic projection + h_project = lambda u: u - k*inner(u, k) + + # Specify degree for some terms as estimated degree is too large + dx_qp = dx(degree=(equations.domain.max_quad_degree)) + dS_v_qp = dS_v(degree=(equations.domain.max_quad_degree)) + dS_h_qp = dS_h(degree=(equations.domain.max_quad_degree)) + ds_v_qp = ds_v(degree=(equations.domain.max_quad_degree)) + ds_tb_qp = (ds_t(degree=(equations.domain.max_quad_degree)) + + ds_b(degree=(equations.domain.max_quad_degree))) + + # Add effect of density of water upon theta, using moisture reference profiles + # TODO: Explore if this is the right thing to do for the linear problem + if equations.active_tracers is not None: + mr_t = Constant(0.0)*thetabar + for tracer in equations.active_tracers: + if tracer.chemical == 'H2O': + if tracer.variable_type == TracerVariableType.mixing_ratio: + idx = equations.field_names.index(tracer.name) + mr_bar = split(equations.X_ref)[idx] + mr_t += mr_bar + else: + raise NotImplementedError('Only mixing ratio tracers are implemented') + + theta_w = theta / (1 + mr_t) + thetabar_w = thetabar / (1 + mr_t) + else: + theta_w = theta + thetabar_w = thetabar + + _l0 = TrialFunction(self.Vtrace) + _dl = TestFunction(self.Vtrace) + a_tr = _dl('+')*_l0('+')*(dS_v_qp + dS_h_qp) + _dl*_l0*ds_v_qp + _dl*_l0*ds_tb_qp + + def L_tr(f): + return _dl('+')*avg(f)*(dS_v_qp + dS_h_qp) + _dl*f*ds_v_qp + _dl*f*ds_tb_qp + + # Project field averages into functions on the trace space + rhobar_avg = Function(self.Vtrace) + exnerbar_avg = Function(self.Vtrace) + + rho_avg_prb = LinearVariationalProblem(a_tr, L_tr(rhobar), rhobar_avg, + constant_jacobian=True) + exner_avg_prb = LinearVariationalProblem(a_tr, L_tr(exnerbar), exnerbar_avg, + constant_jacobian=True) + + self.rho_avg_solver = LinearVariationalSolver( + rho_avg_prb, solver_parameters=self.rho_avg_solver_parameters, + options_prefix=pc.getOptionsPrefix()+'rhobar_avg_solver' + ) + self.exner_avg_solver = LinearVariationalSolver( + exner_avg_prb, solver_parameters=self.exner_avg_solver_parameters, + options_prefix=pc.getOptionsPrefix()+'exnerbar_avg_solver' + ) + + # "broken" u, rho, and trace system + # NOTE: no ds_v integrals since equations are defined on + # a periodic (or sphere) base mesh. + if any([t.has_label(hydrostatic) for t in equations.residual]): + u_mass = inner(w, (h_project(u) - u_in))*dx + else: + u_mass = inner(w, (u - u_in))*dx + + eqn = ( + # momentum equation + u_mass + - beta_u*cp*div(theta_w*V(w))*exnerbar*dx_qp + # following does nothing but is preserved in the comments + # to remind us why (because V(w) is purely vertical). + # + beta*cp*jump(theta_w*V(w), n=n)*exnerbar_avg('+')*dS_v_qp + + beta_u*cp*jump(theta_w*V(w), n=n)*exnerbar_avg('+')*dS_h_qp + + beta_u*cp*dot(theta_w*V(w), n)*exnerbar_avg*ds_tb_qp + - beta_u*cp*div(thetabar_w*w)*exner*dx_qp + # trace terms appearing after integrating momentum equation + + beta_u*cp*jump(thetabar_w*w, n=n)*l0('+')*(dS_v_qp + dS_h_qp) + + beta_u*cp*dot(thetabar_w*w, n)*l0*(ds_tb_qp + ds_v_qp) + # mass continuity equation + + (phi*(rho - rho_in) - beta_r*inner(grad(phi), u)*rhobar)*dx + + beta_r*jump(phi*u, n=n)*rhobar_avg('+')*(dS_v + dS_h) + # term added because u.n=0 is enforced weakly via the traces + + beta_r*phi*dot(u, n)*rhobar_avg*(ds_tb + ds_v) + # constraint equation to enforce continuity of the velocity + # through the interior facets and weakly impose the no-slip + # condition + + dl('+')*jump(u, n=n)*(dS_v + dS_h) + + dl*dot(u, n)*(ds_t + ds_b + ds_v) + ) + + # TODO: can we get this term using FML? + # contribution of the sponge term + if hasattr(self.equations, "mu"): + eqn += dt*self.equations.mu*inner(w, k)*inner(u, k)*dx_qp + + if equations.parameters.Omega is not None: + Omega = as_vector([0, 0, equations.parameters.Omega]) + eqn += beta_u*inner(w, cross(2*Omega, u))*dx + + aeqn = lhs(eqn) + Leqn = rhs(eqn) + + appctx = {'slateschur_form': aeqn} + + hybridized_prb = LinearVariationalProblem( + aeqn, Leqn, self.y_hybrid, constant_jacobian=True + ) + self.hybridized_solver = LinearVariationalSolver( + hybridized_prb, solver_parameters=self.hybridised_scpc_parameters, + options_prefix=pc.getOptionsPrefix()+self._prefix, appctx=appctx + ) + + if logger.isEnabledFor(DEBUG): + self.hybridized_solver.snes.ksp.setMonitor( + logging_ksp_monitor_true_residual + ) + + # Project broken u into the HDiv space using facet averaging. + # Weight function counting the dofs of the HDiv element: + self._weight = Function(self.Vu) + weight_kernel = AverageWeightings(self.Vu) + weight_kernel.apply(self._weight) + + # Averaging kernel + self._average_kernel = AverageKernel(self.Vu) + + # HDiv-conforming velocity + self.u_hdiv = Function(self.Vu) + + # Reconstruction of theta + theta = TrialFunction(self.Vtheta) + gamma = TestFunction(self.Vtheta) + + self.theta = Function(self.Vtheta) + theta_eqn = gamma*(theta - theta_in + + dot(k, self.u_hdiv)*dot(k, grad(thetabar))*beta_t)*dx + + theta_problem = LinearVariationalProblem( + lhs(theta_eqn), rhs(theta_eqn), self.theta, constant_jacobian=True + ) + + self.theta_solver = LinearVariationalSolver( + theta_problem, solver_parameters=self.theta_solver_parameters, + options_prefix=pc.getOptionsPrefix()+'thetabacksubstitution' + ) + + if logger.isEnabledFor(DEBUG): + self.theta_solver.snes.ksp.setMonitor( + logging_ksp_monitor_true_residual + ) + + # Project reference profiles at initialisation + self.rho_avg_solver.solve() + self.exner_avg_solver.solve() + self.hybridized_solver.invalidate_jacobian() + + def apply(self, pc, x, y): + """ + Apply the preconditioner to x, putting the result in y. + + Args: + pc (:class:`PETSc.PC`): the preconditioner object. + x (:class:`PETSc.Vec`): the vector to apply the preconditioner to. + y (:class:`PETSc.Vec`): the vector to store the result. + """ + + # transfer x -> self.xstar + with self.xstar.dat.vec_wo as xv: + x.copy(xv) + + self.xrhs.assign(self.xstar.riesz_representation()) + # Solve hybridized system + self.hybridized_solver.solve() + + # Recover broken u and rho + u_broken, rho, l = self.y_hybrid.subfunctions + self.u_hdiv.assign(0) + self._average_kernel.apply(self.u_hdiv, self._weight, u_broken) + for bc in self.bcs: + bc.zero(self.u_hdiv) + + # Transfer data to non-hybrid space + self.y.subfunctions[0].assign(self.u_hdiv) + self.y.subfunctions[1].assign(rho) + + # Recover theta + self.theta.assign(0) + self.theta_solver.solve() + self.y.subfunctions[2].assign(self.theta) + + with self.y.dat.vec_ro as vout: + # copy into PETSc output vector + vout.copy(y) + + def update(self, pc): + PETSc.Sys.Print("[PC.update] Updating hybridized PC") + + if hasattr(self, "rho_avg_solver"): + self.rho_avg_solver.solve() + self.exner_avg_solver.solve() + self.hybridized_solver.invalidate_jacobian() + + def _process_context(self, pc): + appctx = self.get_appctx(pc) + self.appctx = appctx + self.prefix = pc.getOptionsPrefix() + self._prefix + + self.equations = appctx.get('equations') + self.mesh = self.equations.domain.mesh + + self.alpha = appctx.get('alpha') + tau_values = appctx.get('tau_values') + self.tau_values = tau_values if tau_values is not None else {} + + self.dt = self.equations.domain.dt + + def applyTranspose(self, pc, x, y): + """ + Apply the transpose of the preconditioner. + + Args: + pc (:class:`PETSc.PC`): the preconditioner object. + x (:class:`PETSc.Vec`): the vector to apply the preconditioner to. + y (:class:`PETSc.Vec`): the vector to put the result into. + + Raises: + NotImplementedError: this method is currently not implemented. + """ + + raise NotImplementedError("The transpose application of the PC is not implemented.") diff --git a/gusto/solvers/solver_presets.py b/gusto/solvers/solver_presets.py new file mode 100644 index 000000000..de2e8c5b8 --- /dev/null +++ b/gusto/solvers/solver_presets.py @@ -0,0 +1,397 @@ +""" +This module provides PETSc dictionaries for linear problems. + +The solvers provided here are used for solving linear problems on mixed +finite element spaces. +""" + +from gusto.equations import ( + CompressibleEulerEquations, BoussinesqEquations, ShallowWaterEquations, + ShallowWaterEquations_1d, ThermalShallowWaterEquations +) +from gusto.core.logging import logger, DEBUG +from firedrake import VectorSpaceBasis + +__all__ = ["hybridised_solver_parameters", "monolithic_solver_parameters"] + + +def hybridised_solver_parameters(equation, solver_prognostics, alpha=0.5, tau_values=None): + """ + Returns PETSc solver settings for hybridised solver for mixed finite + element problems. + + Parameters + ---------- + equation (:class:`PrognosticEquation`): the model's equation. + solver_prognostics : list + A list of the names of prognostic variables to include in the solver. + alpha : float, optional + The implicitness parameter for the time discretisation. Default is 0.5. + tau_values : dict, optional + A dictionary of stabilization parameters for the hybridization. + Default is None. + Returns + ------- + settings : dict + A dictionary containing the PETSc solver settings. + appctx : dict + A dictionary containing the application context for the solver. + """ + + # Callback function for the nullspace of the trace system + def trace_nullsp(T): + return VectorSpaceBasis(constant=True) + + # Prepare some generic variables to help with logic below ------------------ + + # Create list of scalar variables that are not solved for by the linear + # solver -- i.e. the list of variables excluding the "solver prognostics". + # For example, these could be moisture variables + tracers = ",".join( + str(idx) for idx, name in enumerate(equation.field_names) + if name not in solver_prognostics + ) + num_tracers = len(equation.field_names) - len(solver_prognostics) + is_shallow_water = isinstance(equation, ( + ShallowWaterEquations, ShallowWaterEquations_1d + )) + + # ------------------------------------------------------------------------ # + # We chose our solver settings based on the equation being solved... + + # ======================================================================== # + # Compressible Euler + # ======================================================================== # + + if isinstance(equation, CompressibleEulerEquations): + # Compressible Euler equations - (u, rho, theta) system. We use a + # bespoke hybridization preconditioner for this system owing to the + # nonlinear pressure gradient term. + settings = { + 'ksp_monitor': None, + 'ksp_type': 'preonly', + 'mat_type': 'matfree', + 'pc_type': 'python', + 'pc_python_type': 'gusto.CompressibleHybridisedSCPC', + } + + # We pass the implicit weighting parameter (alpha) and tau_values to the + # preconditioner, and the equation to construct the hybridized system. + # Provide callback for nullspace of the trace system with trace_nullsp + appctx = { + 'equations': equation, + 'alpha': alpha, + 'trace_nullspace': trace_nullsp + } + if tau_values is not None: + appctx['tau_values'] = tau_values + + # ======================================================================== # + # Boussinesq + # ======================================================================== # + + elif isinstance(equation, BoussinesqEquations): + # Boussinesq equations - (u, p, theta) system. We use a fieldsplit + # preconditioner to first eliminate theta via Schur complement, then + # hybridize the (u, p) system. + settings = { + 'ksp_error_if_not_converged': None, + 'mat_type': 'matfree', + 'ksp_type': 'preonly', + 'pc_type': 'fieldsplit', + 'pc_fieldsplit_type': 'schur', + 'pc_fieldsplit_schur_fact_type': 'full', + 'pc_fieldsplit_1_fields': '0,1', # (u, p) + 'pc_fieldsplit_0_fields': '2', # eliminate theta + 'fieldsplit_theta': { # theta solve, simple block jacobi with ilu on blocks + 'ksp_type': 'preonly', + 'pc_type': 'python', + 'pc_python_type': 'firedrake.AssembledPC', + 'assembled_pc_type': 'bjacobi', + 'assembled_sub_pc_type': 'ilu', + }, + 'fieldsplit_1': { # (u, p) solve, hybridized + 'ksp_monitor': None, + 'ksp_type': 'preonly', + 'pc_type': 'python', + 'pc_python_type': 'gusto.AuxiliaryPC', + 'aux': { + 'mat_type': 'matfree', + 'pc_type': 'python', + 'pc_python_type': 'firedrake.HybridizationPC', # Uses Firedrake's + 'hybridization': { # hybridization PC + 'ksp_type': 'cg', + 'pc_type': 'gamg', # AMG for trace system + 'ksp_rtol': 1e-8, + 'mg_levels': { + 'ksp_type': 'chebyshev', + 'ksp_max_it': 2, + 'pc_type': 'bjacobi', + 'sub_pc_type': 'ilu' + }, + 'mg_coarse': { + 'ksp_type': 'preonly', + 'pc_type': 'lu', + 'pc_factor_mat_solver_type': 'mumps', + }, + }, + }, + }, + } + # To eliminate theta, we pass the Schur complement form to the + # AuxiliaryPC. This Schur complement form is defined in the + # BoussinesqEquations class. + # Provide callback for nullspace of the trace system with trace_nullsp + appctx = { + 'auxform': equation.schur_complement_form(alpha=alpha), + 'trace_nullspace': trace_nullsp, + } + + # ======================================================================== # + # Moist Thermal Shallow Water + # ======================================================================== # + + elif isinstance(equation, ThermalShallowWaterEquations) and num_tracers > 0: + # (u, D, b, tracers) system - moist thermal shallow water + settings = { + 'ksp_error_if_not_converged': None, + 'mat_type': 'matfree', + 'ksp_type': 'preonly', + 'pc_type': 'fieldsplit', + 'pc_fieldsplit_type': 'additive', # additive fieldsplit to separate tracers + 'pc_fieldsplit_0_fields': '0,1,2', # (u, D, b/b_e) system + 'pc_fieldsplit_1_fields': tracers, # do nothing for scalar field + 'fieldsplit_0': { # (u, D, b/b_e) solve with Schur complement elimination of b/b_e + 'pc_type': 'fieldsplit', + 'pc_fieldsplit_type': 'schur', + 'pc_fieldsplit_schur_fact_type': 'full', + 'pc_fieldsplit_1_fields': '0,1', + 'pc_fieldsplit_0_fields': '2', # eliminate b/b_e + 'fieldsplit_L2': { # b/b_e solve, simple block jacobi with ilu on blocks + 'ksp_type': 'preonly', + 'pc_type': 'python', + 'pc_python_type': 'firedrake.AssembledPC', + 'assembled_pc_type': 'bjacobi', + 'assembled_sub_pc_type': 'ilu', + }, + 'fieldsplit_1': { # (u, D) solve, hybridized + 'ksp_monitor': None, + 'ksp_type': 'preonly', + 'pc_type': 'python', + 'pc_python_type': 'gusto.AuxiliaryPC', + 'aux': { + 'mat_type': 'matfree', + 'pc_type': 'python', + 'pc_python_type': 'firedrake.HybridizationPC', # Uses Firedrake's hybridization PC + 'hybridization': { + 'ksp_type': 'cg', + 'pc_type': 'gamg', # AMG for trace system + 'ksp_rtol': 1e-8, + 'mg_levels': { + 'ksp_type': 'chebyshev', + 'ksp_max_it': 2, + 'pc_type': 'bjacobi', + 'sub_pc_type': 'ilu' + }, + 'mg_coarse': { + 'ksp_type': 'preonly', + 'pc_type': 'lu', + 'pc_factor_mat_solver_type': 'mumps', + }, + }, + }, + }, + }, + 'fieldsplit_L2': { # do nothing for scalar field - the rhs is 0 + 'ksp_type': 'preonly', + 'pc_type': 'none', + }, + } + # To eliminate b/b_e, we pass the Schur complement form to the + # AuxiliaryPC. This Schur complement form is defined in the + # ThermalShallowWaterEquations class. + # Provide callback for nullspace of the trace system with trace_nullsp + appctx = { + 'auxform': equation.schur_complement_form(alpha=alpha), + 'trace_nullspace': trace_nullsp, + } + + # ======================================================================== # + # Thermal Shallow Water (Dry) + # ======================================================================== # + + elif isinstance(equation, ThermalShallowWaterEquations): + # (u, D, b) system - thermal shallow water + settings = { + 'ksp_error_if_not_converged': None, + 'mat_type': 'matfree', + 'ksp_type': 'preonly', + 'pc_type': 'fieldsplit', + 'pc_fieldsplit_type': 'schur', + 'pc_fieldsplit_schur_fact_type': 'full', + 'pc_fieldsplit_1_fields': '0,1', # (u, D) system + 'pc_fieldsplit_0_fields': '2', # eliminate of b by Schur complement + 'fieldsplit_L2': { # b solve, simple block jacobi with ilu on blocks + 'ksp_type': 'preonly', + 'pc_type': 'python', + 'pc_python_type': 'firedrake.AssembledPC', + 'assembled_pc_type': 'bjacobi', + 'assembled_sub_pc_type': 'ilu', + }, + 'fieldsplit_1': { # (u, D) solve, hybridized + 'ksp_type': 'preonly', + 'pc_type': 'python', + 'pc_python_type': 'gusto.AuxiliaryPC', + 'aux': { + 'mat_type': 'matfree', + 'pc_type': 'python', + 'pc_python_type': 'firedrake.HybridizationPC', # Uses Firedrake's + 'hybridization': { # hybridization PC + 'ksp_type': 'cg', + 'pc_type': 'gamg', # AMG for trace system + 'ksp_rtol': 1e-8, + 'mg_levels': { + 'ksp_type': 'chebyshev', + 'ksp_max_it': 2, + 'pc_type': 'bjacobi', + 'sub_pc_type': 'ilu' + }, + 'mg_coarse': { + 'ksp_type': 'preonly', + 'pc_type': 'lu', + 'pc_factor_mat_solver_type': 'mumps', + }, + }, + }, + }, + } + # To eliminate b we pass the Schur complement form to the AuxiliaryPC. + # This Schur complement form is defined in the + # ThermalShallowWaterEquations class. + # Provide callback for nullspace of the trace system with trace_nullsp + appctx = { + 'auxform': equation.schur_complement_form(alpha=alpha), + 'trace_nullspace': trace_nullsp, + } + + # ======================================================================== # + # Moist Shallow Water (non-thermal) + # ======================================================================== # + + elif is_shallow_water and num_tracers > 0: + # (u, D, tracers) system - moist convective shallow water + settings = { + 'mat_type': 'matfree', + 'ksp_type': 'preonly', + "pc_type": "fieldsplit", + "pc_fieldsplit_type": "additive", # additive fieldsplit is used + # to separate tracers + "pc_fieldsplit_0_fields": "0,1", # (u, D) system to be hybridized + "pc_fieldsplit_1_fields": tracers, # do nothing for scalar fields + "fieldsplit_0": { + 'ksp_type': 'preonly', + 'pc_type': 'python', + 'pc_python_type': 'firedrake.HybridizationPC', # Uses Firedrake's + 'hybridization': { # hybridization PC + 'ksp_type': 'cg', + 'pc_type': 'gamg', # AMG for trace system + 'ksp_rtol': 1e-8, + 'mg_levels': { + 'ksp_type': 'chebyshev', + 'ksp_max_it': 2, + 'pc_type': 'bjacobi', + 'sub_pc_type': 'ilu' + } + } + }, + "fieldsplit_1": { # do nothing for scalar fields - the rhs is 0 + 'ksp_type': 'preonly', + 'pc_type': 'none', + }, + } + + # Provide callback for the nullspace of trace system with trace_nullsp + appctx = {'trace_nullspace': trace_nullsp} + + # ======================================================================== # + # Shallow Water (standard) + # ======================================================================== # + + elif is_shallow_water and num_tracers == 0: + # (u, D) system - dry shallow water + # No elimination via Schur complement, just hybridization of the + # full system. + settings = { + 'ksp_type': 'preonly', + 'mat_type': 'matfree', + 'pc_type': 'python', + 'pc_python_type': 'firedrake.HybridizationPC', # Uses Firedrake's + 'hybridization': { # hybridization PC + 'ksp_type': 'cg', + 'pc_type': 'gamg', # AMG for trace system + 'ksp_rtol': 1e-8, + 'mg_levels': { + 'ksp_type': 'chebyshev', + 'ksp_max_it': 2, + 'pc_type': 'bjacobi', + 'sub_pc_type': 'ilu' + } + } + } + # Provide callback for the nullspace of the trace system with trace_nullsp. + appctx = { + 'trace_nullspace': trace_nullsp, + } + + else: + raise NotImplementedError( + "Hybridised solver presets not implemented for equation " + f"type {type(equation)}" + ) + + # ======================================================================== # + # Generic settings + # ======================================================================== # + + if logger.isEnabledFor(DEBUG): + settings["ksp_monitor_true_residual"] = None + # TODO: the following output is not picked up by the Gusto logger + fieldsplit_keys = [f'fieldsplit_{i}' for i in ['0', '1', 'L2', 'theta']] + for key in fieldsplit_keys: + if key in settings: + settings[key]['ksp_monitor_true_residual'] = None + + return settings, appctx + + +def monolithic_solver_parameters(): + """ + Returns PETSc solver settings for monolithic solver for mixed finite + element problems. + + Returns + ------- + settings : dict + A dictionary containing the PETSc solver settings. + """ + settings = { + "snes_type": "ksponly", + "mat_type": "matfree", + "ksp_type": "gmres", + "ksp_converged_reason": None, + "ksp_atol": 1e-5, + "ksp_rtol": 1e-5, + "ksp_max_it": 400, + "pc_type": "python", + "pc_python_type": "firedrake.AssembledPC", + "assembled_pc_star_sub_sub_pc_type": "lu", + "assembled_pc_type": "python", + "assembled_pc_python_type": "firedrake.ASMStarPC", + "assembled_pc_star_construct_dim": 0, + "assembled_pc_star_sub_sub_pc_factor_mat_ordering_type": "rcm", + "assembled_pc_star_sub_sub_pc_factor_reuse_ordering": None, + "assembled_pc_star_sub_sub_pc_factor_reuse_fill": None, + "assembled_pc_star_sub_sub_pc_factor_fill": 1.2 + } + + return settings diff --git a/gusto/spatial_methods/augmentation.py b/gusto/spatial_methods/augmentation.py index 0a893394d..179cd14dc 100644 --- a/gusto/spatial_methods/augmentation.py +++ b/gusto/spatial_methods/augmentation.py @@ -11,11 +11,17 @@ transpose, nabla_grad, outer, dS, dS_h, dS_v, sign, jump, div, Constant, sqrt, cross, curl, FunctionSpace, assemble, DirichletBC ) -from firedrake.fml import subject +from firedrake.fml import ( + subject, all_terms, replace_subject, replace_test_function, + drop, Term, LabelledForm +) from gusto import ( time_derivative, transport, transporting_velocity, TransportEquationType, - logger + logger, prognostic, mass_weighted ) +from gusto.spatial_methods.limiters import MeanLimiter +from gusto.core.conservative_projection import ConservativeProjector +import numpy as np class Augmentation(object, metaclass=ABCMeta): @@ -49,6 +55,13 @@ def update(self, x_in_mixed): pass + def limit(self, x_in_mixed): + """ + Apply any special limiting as part of the augmentation + """ + + pass + class VorticityTransport(Augmentation): """ @@ -73,6 +86,8 @@ def __init__( self, domain, eqns, transpose_commutator=True, supg=False ): + self.name = 'vorticity' + V_vel = domain.spaces('HDiv') V_vort = domain.spaces('H1') @@ -238,3 +253,308 @@ def update(self, x_in_mixed): logger.debug('Vorticity solve') self.solver.solve() self.x_in.subfunctions[1].assign(self.Z_in) + + +class MeanMixingRatio(Augmentation): + """ + This augments a transport problem involving a mixing ratio to + include a mean mixing ratio field. This enables posivity to be + ensured during conservative transport. + + Args: + domain (:class:`Domain`): The domain object. + eqns (:class:`PrognosticEquationSet`): The overarching equation set. + mX_names (:class: list): A list of mixing ratios that + require augmented mean mixing ratios. + """ + + def __init__( + self, domain, eqns, mX_names + ): + + self.name = 'mean_mixing_ratio' + self.mX_names = mX_names + self.mX_num = len(mX_names) + + # Store information about original equation set + self.field_names = [] + for i in np.arange(len(eqns.field_names)): + self.field_names.append(eqns.field_names[i]) + + self.eqn_orig = eqns + self.domain = domain + exist_spaces = eqns.spaces + self.idx_orig = len(exist_spaces) + + DG0 = FunctionSpace(domain.mesh, "DG", 0) + DG1 = FunctionSpace(domain.mesh, "DG", 1) + + # Set up fields and names for each mixing ratio + self.mean_names = [] + self.mean_idxs = [] + self.mX_idxs = [] + mX_spaces = [] + mean_spaces = [] + self.rho_idxs = [] + + for i in range(self.mX_num): + mX_name = mX_names[i] + self.mean_names.append('mean_'+mX_name) + self.field_names.append(self.mean_names[-1]) + mean_spaces.append(DG0) + exist_spaces.append(DG0) + + self.mean_idxs.append(self.idx_orig + i) + + # Extract the mixing ratio in question: + mX_idx = eqns.field_names.index(mX_name) + mX_spaces.append(eqns.spaces[mX_idx]) + self.mX_idxs.append(mX_idx) + + # Determine if this is a conservatively transported tracer. + # If so, extract the corresponding density name, if not + # set this to None. + for tracer in eqns.active_tracers: + if tracer.name == mX_name: + if tracer.density_name is not None: + self.rho_idxs.append(eqns.field_names.index(tracer.density_name)) + else: + self.rho_idxs.append('None') + + # Define a limiter using the mean mixing ratios + self.limiters = MeanLimiter(mX_spaces) + + # Projector for computing the mean mixing ratios + self.DG1_field = Function(DG1) + self.rho_field = Function(DG1) + self.DG0_field = Function(DG0) + self.compute_mean_mX = ConservativeProjector(self.rho_field, self.rho_field, self.DG1_field, self.DG0_field) + + # Create the new mixed function space + self.fs = MixedFunctionSpace(exist_spaces) + + self.X = Function(self.fs) + self.tests = TestFunctions(self.fs) + self.x_in = Function(self.fs) + self.x_out = Function(self.fs) + + self.bcs = None + + def setup_residual(self, equation): + """ + Create a new residual for the augmented equation set, + using the larger mixed function space that includes the mean + mixing ratios, and new residual terms for the mean mixing ratio + equations + + Args: + equation (:class:`PrognosticEquationSet`): The overarching equation set. + Note, this does not include the mean mixing ratios. + """ + + # Copy the existing residual + new_residual = equation.residual + + # Replace test and trial functions of original residual with + # those from the new (larger) mixed function space. + # The indices of the original fields + # are the same in the new mixed space. + for idx in range(self.idx_orig): + new_residual = new_residual.label_map( + all_terms, + replace_subject(self.X, old_idx=idx, new_idx=idx) + ) + new_residual = new_residual.label_map( + all_terms, + replace_test_function(self.tests, old_idx=idx, new_idx=idx) + ) + + # Loop over each mean mixing ratio, + # copy the residual terms relating to the original mixing ratio, + # update the test and trial functions, then add to the new residual. + for i in range(self.mX_num): + mean_residual = new_residual.label_map( + lambda t: t.get(prognostic) == self.mX_names[i], + map_if_false=drop + ) + + # Replace all instances of original mixing ratios with + # the mean versions + for j in range(self.mX_num): + mean_residual = mean_residual.label_map( + all_terms, + replace_subject(self.X, old_idx=self.mX_idxs[j], new_idx=self.mean_idxs[j]) + ) + + mean_residual = mean_residual.label_map( + all_terms, + replace_test_function(self.tests, old_idx=self.mX_idxs[i], new_idx=self.mean_idxs[i]) + ) + + # Update the name to be that of the mean mixing ratio + mean_residual = mean_residual.label_map( + all_terms, + lambda t: prognostic.update_value(t, self.mean_names[i]) + ) + + # Append to the new residual + new_residual += mean_residual + + self.residual = subject(new_residual, self.X) + + # For any mass_weighted (conservative transport) terms, + # replace the subject and test functions for + # the mass-weighted form, and update the label to + # point to the new form. + for term in self.residual: + if term.has_label(mass_weighted): + field = term.get(prognostic) + mass_term = term.get(mass_weighted) + + # Extract the previous labels for the + # mass-weighted term + if term.has_label(transport): + old_mass_weighted_labels = mass_term.labels + + # Transport terms are Terms not LabelledForms, + # so this change this to use the label_map + mass_term = LabelledForm(mass_term) + else: + old_mass_weighted_labels = mass_term.terms[0].labels + + if field in self.mX_names: + list_idx = self.mX_names.index(field) + mX_idx = self.mX_idxs[list_idx] + rho_idx = self.rho_idxs[list_idx] + + # Replace mixing ratio + mass_term_new = mass_term.label_map( + all_terms, + replace_subject(self.X, old_idx=mX_idx, new_idx=mX_idx) + ) + + # Replace original density + mass_term_new = mass_term_new.label_map( + all_terms, + replace_subject(self.X, old_idx=rho_idx, new_idx=rho_idx) + ) + + # Replace test function + mass_term_new = mass_term_new.label_map( + all_terms, + replace_test_function(self.tests, old_idx=mX_idx, new_idx=mX_idx) + ) + + elif field in self.mean_names: + list_idx = self.mean_names.index(field) + mX_idx = self.mX_idxs[list_idx] + mean_idx = self.mean_idxs[list_idx] + rho_idx = self.rho_idxs[list_idx] + + # Replace mixing ratio + mass_term_new = mass_term.label_map( + all_terms, + replace_subject(self.X, old_idx=mX_idx, new_idx=mean_idx) + ) + + # Replace density + mass_term_new = mass_term_new.label_map( + all_terms, + replace_subject(self.X, old_idx=rho_idx, new_idx=rho_idx) + ) + + # Replace test function + mass_term_new = mass_term_new.label_map( + all_terms, + replace_test_function(self.tests, old_idx=mX_idx, new_idx=mean_idx) + ) + + # Create a new mass-weighted term, which has the correct labels + mass_term_new = Term(mass_term_new.form, old_mass_weighted_labels) + mass_term_new = subject(mass_term_new, self.X) + + # Make a new term, that links to the new mass-weighted term + new_term = Term(term.form, term.labels) + new_term = mass_weighted.update_value(new_term, mass_term_new) + + # Put this new term back in the residual: + self.residual = self.residual.label_map( + lambda t: t == term, + map_if_true=lambda t: new_term + ) + + self.residual = subject(self.residual, self.X) + + def pre_apply(self, x_in): + """ + Sets the original fields, i.e. not the mean fields + + Args: + x_in (:class:`Function`): The input fields + """ + + for idx in range(self.idx_orig): + self.x_in.subfunctions[idx].assign(x_in.subfunctions[idx]) + + def post_apply(self, x_out): + """ + Sets the output fields, i.e. not the mean fields + + Args: + x_out (:class:`Function`): The output fields + """ + + for idx in range(self.idx_orig): + x_out.subfunctions[idx].assign(self.x_out.subfunctions[idx]) + + def update(self, x_in_mixed): + """ + Compute the mean mixing ratio field by conservative projection, + where both the target and source density are in the higher-order + space. + + Args: + x_in_mixed (:class:`Function`): The mixed function, containing + mean fields to update. + """ + + # Update the density field for the mean mixing ratios + self.rho_field.assign(x_in_mixed.subfunctions[0]) + + # Update the mean mixing ratios: + for i in range(self.mX_num): + # Extract the reference density: + self.rho_field.assign(x_in_mixed.subfunctions[self.rho_idxs[i]]) + + # Compute the mean mixing ratio with conservative projection + self.DG1_field.assign(x_in_mixed.subfunctions[self.mX_idxs[i]]) + self.compute_mean_mX.project() + + # Clip any minuscule negative values in the mean mixing ratio + self.limiters._clip_means_kernel.apply(self.DG0_field, self.DG0_field) + + x_in_mixed.subfunctions[self.mean_idxs[i]].assign(self.DG0_field) + + def limit(self, x_in_mixed): + """ + Limit the mixing ratios using a blended limiter with + the mean mixing ratios + + Args: + x_in_mixed (:class:`Function`): The mixed function, containing + mixing ratio fields to limit. + """ + + # Ensure non-negativity by applying the blended limiter + mX_pre = [] + means = [] + + for i in range(self.mX_num): + mX_pre.append(x_in_mixed.subfunctions[self.mX_idxs[i]]) + means.append(x_in_mixed.subfunctions[self.mean_idxs[i]]) + + self.limiters.apply(mX_pre, means) + + for i in range(self.mX_num): + self.limiters._clip_DG1_field.apply(mX_pre[i], mX_pre[i]) + x_in_mixed.subfunctions[self.mX_idxs[i]].assign(mX_pre[i]) diff --git a/gusto/spatial_methods/limiters.py b/gusto/spatial_methods/limiters.py index ea1539ae2..dc30f6c6c 100644 --- a/gusto/spatial_methods/limiters.py +++ b/gusto/spatial_methods/limiters.py @@ -6,14 +6,14 @@ """ from firedrake import (BrokenElement, Function, FunctionSpace, interval, - FiniteElement, TensorProductElement) + FiniteElement, TensorProductElement, Constant) from firedrake.slope_limiter.vertex_based_limiter import VertexBasedLimiter -from gusto.core.kernels import LimitMidpoints, ClipZero +from gusto.core.kernels import LimitMidpoints, ClipZero, MeanMixingRatioWeights import numpy as np __all__ = ["DG1Limiter", "ThetaLimiter", "NoLimiter", "ZeroLimiter", - "MixedFSLimiter"] + "MixedFSLimiter", "MeanLimiter"] class DG1Limiter(object): @@ -261,3 +261,91 @@ def apply(self, fields): for field, sublimiter in self.sublimiters.items(): field = fields.subfunctions[self.field_idxs[field]] sublimiter.apply(field) + + +class MeanLimiter(object): + """ + A mass-preserving limiter for mixing ratios that ensures non-negativity + by blending the mixing ratio with its associated mean field. + The blending factor is given by the DG0 function lamda. The same lamda + is used when there are multiple fields, for mass conservation. + """ + + def __init__(self, spaces): + """ + Args: + spaces: The function spaces for the DG1 mixing ratios + Raises: + ValueError: If the space is not appropriate for the limiter, i.e DG1 + """ + + # The Mean Limiter is currently set up for mixing ratios in DG1. + for space in spaces: + degree = space.ufl_element().degree() + if (space.ufl_element().sobolev_space.name != 'L2' + or ((type(degree) is tuple and np.any([deg != 1 for deg in degree])) + and degree != 1)): + raise NotImplementedError('MeanLimiter only implemented for mixing' + + 'ratios in the DG1 space') + + self.space = spaces[0] + mesh = self.space.mesh() + + # Create equispaced DG1 space needed for limiting + if space.extruded: + cell = mesh._base_mesh.ufl_cell().cellname() + DG1_hori_elt = FiniteElement("DG", cell, 1, variant="equispaced") + DG1_vert_elt = FiniteElement("DG", interval, 1, variant="equispaced") + DG1_element = TensorProductElement(DG1_hori_elt, DG1_vert_elt) + else: + cell = mesh.ufl_cell().cellname() + DG1_element = FiniteElement("DG", cell, 1, variant="equispaced") + + DG1_equispaced = FunctionSpace(mesh, DG1_element) + DG0 = FunctionSpace(mesh, 'DG', 0) + DG1 = FunctionSpace(mesh, 'DG', 1) + + self.lamda = Function(DG0) + self.mX_field = Function(DG1_equispaced) + self.mean_field = Function(DG0) + self.mX_new = Function(DG1_equispaced) + + self._lamda_kernel = MeanMixingRatioWeights(DG1_equispaced) + + # Also construct kernels to clip any very small negatives + # that arise from numerical error. These are used in the + # mean mixing ratio augmentation limit routine. + self._clip_DG1_field = ClipZero(DG1) + self._clip_means_kernel = ClipZero(DG0) + + def apply(self, mX_fields, mean_fields): + """ + Compute the limiter weights, lambda, and use these + to combine the DG1 mixing ratio and DG0 mean field + to ensure non-negativity. + + Args: + mX_fields (:class:`Function`): the DG1 mixing ratios to limit. + mean_fields (:class:`Function`): the DG0 mean field associated with + each mX_field. + """ + + # Remove weights from previous applications + self.lamda.interpolate(Constant(0.0)) + + for i in range(len(mX_fields)): + # Interpolate fields from DG1 to DG1 equispaced + self.mX_field.interpolate(mX_fields[i]) + self.mean_field.interpolate(mean_fields[i]) + + # Update the weights based on any negative values + self._lamda_kernel.apply(self.lamda, self.mX_field, self.mean_field) + + # Perform blended limiting, with all mixing ratios using + # the same lambda field to ensure conservation. + for i in range(len(mX_fields)): + self.mX_field.interpolate(mX_fields[i]) + self.mean_field.interpolate(mean_fields[i]) + + self.mX_new.interpolate((Constant(1.0) - self.lamda)*self.mX_field + self.lamda*self.mean_field) + mX_fields[i].interpolate(self.mX_new) diff --git a/gusto/time_discretisation/__init__.py b/gusto/time_discretisation/__init__.py index 20b428449..e26959ad2 100644 --- a/gusto/time_discretisation/__init__.py +++ b/gusto/time_discretisation/__init__.py @@ -4,4 +4,5 @@ from gusto.time_discretisation.imex_runge_kutta import * # noqa from gusto.time_discretisation.multi_level_schemes import * # noqa from gusto.time_discretisation.wrappers import * # noqa -from gusto.time_discretisation.sdc import * # noqa \ No newline at end of file +from gusto.time_discretisation.deferred_correction import * # noqa +from gusto.time_discretisation.parallel_dc import * # noqa \ No newline at end of file diff --git a/gusto/time_discretisation/sdc.py b/gusto/time_discretisation/deferred_correction.py similarity index 51% rename from gusto/time_discretisation/sdc.py rename to gusto/time_discretisation/deferred_correction.py index 7c419cb21..cd692e2bc 100644 --- a/gusto/time_discretisation/sdc.py +++ b/gusto/time_discretisation/deferred_correction.py @@ -1,46 +1,76 @@ -u""" -Objects for discretising time derivatives using Spectral Deferred Correction -Methods. +""" +Objects for discretising time derivatives using Deferred Correction (DC) +Methods. This includes Spectral Deferred Correction (SDC) and Serial Revisionist +Integral Deferred Correction (RIDC) methods. -SDC objects discretise ∂y/∂t = F(y), for variable y, time t and -operator F. +These methods discretise ∂y/∂t = F(y), for variable y, time t, and operator F. -Written in Picard integral form this equation is -y(t) = y_n + int[t_n,t] F(y(s)) ds +In Picard integral form, this equation is: +y(t) = y_n + ∫[t_n, t] F(y(s)) ds -Using some quadrature rule, we can evaluate y on a temporal quadrature node as -y_m = y_n + sum[j=1,M] q_mj*F(y_j) -where q_mj can be found by integrating Lagrange polynomials. This is similar to -how Runge-Kutta methods are formed. +================================================================================ +Spectral Deferred Correction (SDC) Formulation: +================================================================================ -In matrix form this equation is: -(I - dt*Q*F)(y)=y_n +SDC methods integrate the function F(y) over the interval [t_n, t_n+1] using +quadrature. Evaluating y on temporal quadrature nodes gives: +y_m = y_n + Σ[j=1,M] q_mj * F(y_j) +where q_mj are derived from integrating Lagrange polynomials, similar to how +Runge-Kutta methods are constructed. -Computing y by Picard iteration through k we get: -y^(k+1)=y^k + (y_n - (I - dt*Q*F)(y^k)) +In matrix form: +(I - dt * Q * F)(y) = y_n -Finally, to get our SDC method we precondition this system, using some approximation -of Q Q_delta: -(I - dt*Q_delta*F)(y^(k+1)) = y_n + dt*(Q - Q_delta)F(y^k) +Using Picard iteration: +y^(k+1) = y^k + (y_n - (I - dt * Q * F)(y^k)) -The zero-to-node (Z2N) formulation is then: -y_m^(k+1) = y_n + sum(j=1,M) q'_mj*(F(y_j^(k+1)) - F(y_j^k)) - + sum(j=1,M) q_mj*F(y_(m-1)^k) -for entires q_mj in Q and q'_mj in Q_delta. +Preconditioning this system with an approximation Q_delta gives: +(I - dt * Q_delta * F)(y^(k+1)) = y_n + dt * (Q - Q_delta) * F(y^k) -Node-wise from previous quadrature node (N2N formulation), the implicit SDC calculation is: -y_m^(k+1) = y_(m-1)^(k+1) + dtau_m*(F(y_(m)^(k+1)) - F(y_(m)^k)) - + sum(j=1,M) s_mj*F(y_(m-1)^k) -where s_mj = q_mj - q_(m-1)j for entires q_ik in Q. +Two formulations are commonly used: +1. Zero-to-node (Z2N): + y_m^(k+1) = y_n + Σ[j=1,M] q'_mj * (F(y_j^(k+1)) - F(y_j^k)) + + Σ[j=1,M] q_mj * F(y_(j)^k) + where q_mj are entries in Q and q'_mj are entries in Q_delta. +2. Node-to-node (N2N): + y_m^(k+1) = y_(m-1)^(k+1) + dtau_m * (F(y_(m)^(k+1)) - F(y_(m)^k)) + + Σ[j=1,M] s_mj * F(y_(j)^k) + where s_mj = q_mj - q_(m-1)j for entries q_ik in Q. -Key choices in our SDC method are: -- Choice of quadrature node type (e.g. gauss-lobatto) +Key choices in SDC: +- Quadrature node type (e.g., Gauss-Lobatto) - Number of quadrature nodes -- Number of iterations - each iteration increases the order of accuracy up to - the order of the underlying quadrature -- Choice of Q_delta (e.g. Forward Euler, Backward Euler, LU-trick) -- How to get initial solution on quadrature nodes +- Number of iterations (each iteration increases accuracy up to the quadrature order) +- Choice of Q_delta (e.g., Forward Euler, Backward Euler, LU-trick) +- Initial solution on quadrature nodes + +================================================================================ +Revisionist Integral Deferred Correction (RIDC) Formulation: +================================================================================ + +RIDC methods are similar to SDC but use equidistant nodes and a different +formulation for the error equation. The process involves: +1. Using a low-order method (predictor) to compute an initial solution: + y_m^(0) = y_(m-1)^(0) + dt * F(y_(m)^(0)) + +2. Performing K correction steps: + y_m^(k+1) = y_(m-1)^(k+1) + dt * (F(y_(m)^(k+1)) - F(y_(m)^k)) + + Σ[j=1,M] s_mj * F(y_(j)^k) +We solve on N equispaced nodes on the interval [0, T] divided into J intervals, +each further divided into M subintervals: + + 0 * * * * * | * * * * * | * * * * * | * * * * * | * * * * * T + | J intervals, each with M subintervals | + +Here, M >> K, and M must be at least K * (K+1) / 2 for the reduced stencil RIDC method. +dt = T / N, N = J * M. +Each correction sweep increases accuracy up to the quadrature order. + +Key choices in RIDC: +- Number of subintervals J +- Number of quadrature nodes M + 1 +- Number of correction iterations K """ from abc import ABCMeta @@ -54,10 +84,9 @@ from firedrake.utils import cached_property from gusto.time_discretisation.time_discretisation import wrapper_apply from gusto.core.labels import (time_derivative, implicit, explicit, source_label) - from qmat import genQCoeffs, genQDeltaCoeffs -__all__ = ["SDC"] +__all__ = ["SDC", "RIDC"] class SDC(object, metaclass=ABCMeta): @@ -66,7 +95,7 @@ class SDC(object, metaclass=ABCMeta): def __init__(self, base_scheme, domain, M, maxk, quad_type, node_type, qdelta_imp, qdelta_exp, formulation="N2N", field_name=None, linear_solver_parameters=None, nonlinear_solver_parameters=None, final_update=True, - limiter=None, options=None, initial_guess="base"): + limiter=None, initial_guess="base"): """ Initialise SDC object Args: @@ -96,10 +125,6 @@ def __init__(self, base_scheme, domain, M, maxk, quad_type, node_type, qdelta_im quadrature value. Defaults to True limiter (:class:`Limiter` object, optional): a limiter to apply to the evolving field to enforce monotonicity. Defaults to None. - options (:class:`AdvectionOptions`, optional): an object containing - options to either be passed to the spatial discretisation, or - to control the "wrapper" methods, such as Embedded DG or a - recovery method. Defaults to None. initial_guess (str, optional): Initial guess to be base timestepper, or copy """ # Check the configuration options @@ -108,6 +133,7 @@ def __init__(self, base_scheme, domain, M, maxk, quad_type, node_type, qdelta_im # Initialise parameters self.base = base_scheme + self.base.dt = domain.dt self.field_name = field_name self.domain = domain self.dt_coarse = domain.dt @@ -125,11 +151,6 @@ def __init__(self, base_scheme, domain, M, maxk, quad_type, node_type, qdelta_im quadType=quad_type, form=formulation) - # Rescale to be over [0,dt] rather than [0,1] - self.nodes = float(self.dt_coarse)*self.nodes - self.dtau = np.diff(np.append(0, self.nodes)) - self.Q = float(self.dt_coarse)*self.Q - self.Qfin = float(self.dt_coarse)*self.weights self.qdelta_imp_type = qdelta_imp self.formulation = formulation self.node_type = node_type @@ -137,10 +158,18 @@ def __init__(self, base_scheme, domain, M, maxk, quad_type, node_type, qdelta_im # Get Q_delta matrices self.Qdelta_imp = genQDeltaCoeffs(qdelta_imp, form=formulation, - nodes=self.nodes, Q=self.Q, nNodes=M, nodeType=node_type, quadType=quad_type) + nodes=self.nodes, Q=self.Q, nNodes=M, nodeType=node_type, quadType=quad_type, k=1) self.Qdelta_exp = genQDeltaCoeffs(qdelta_exp, form=formulation, nodes=self.nodes, Q=self.Q, nNodes=M, nodeType=node_type, quadType=quad_type) + # Rescale to be over [0,dt] rather than [0,1] + self.nodes = float(self.dt_coarse)*self.nodes + self.dtau = np.diff(np.append(0, self.nodes)) + self.Q = float(self.dt_coarse)*self.Q + self.Qfin = float(self.dt_coarse)*self.weights + self.Qdelta_imp = float(self.dt_coarse)*self.Qdelta_imp + self.Qdelta_exp = float(self.dt_coarse)*self.Qdelta_exp + # Set default linear and nonlinear solver options if none passed in if linear_solver_parameters is None: self.linear_solver_parameters = {'snes_type': 'ksponly', @@ -206,7 +235,7 @@ def setup(self, equation, apply_bcs=True, *active_labels): self.quad = [Function(W) for _ in range(self.M)] self.source_Uk = [Function(W) for _ in range(self.M+1)] self.source_Ukp1 = [Function(W) for _ in range(self.M+1)] - self.U_SDC = Function(W) + self.U_DC = Function(W) self.U_start = Function(W) self.Un = Function(W) self.Q_ = Function(W) @@ -283,7 +312,7 @@ def res(self, m): lambda t: t.has_label(time_derivative), map_if_false=drop) residual = mass_form.label_map(all_terms, - map_if_true=replace_subject(self.U_SDC, old_idx=self.idx)) + map_if_true=replace_subject(self.U_DC, old_idx=self.idx)) residual -= mass_form.label_map(all_terms, map_if_true=replace_subject(self.U_start, old_idx=self.idx)) # Loop through nodes up to m-1 and calcualte @@ -349,7 +378,7 @@ def res(self, m): # Qdelta_imp[m,m]*(F(y_(m)^(k+1)) - F(y_(m)^k)) r_imp_kp1 = self.residual.label_map( lambda t: t.has_label(implicit), - map_if_true=replace_subject(self.U_SDC, old_idx=self.idx), + map_if_true=replace_subject(self.U_DC, old_idx=self.idx), map_if_false=drop) r_imp_kp1 = r_imp_kp1.label_map( all_terms, @@ -379,7 +408,7 @@ def solvers(self): solvers = [] for m in range(self.M): # setup solver using residual defined in derived class - problem = NonlinearVariationalProblem(self.res(m), self.U_SDC, bcs=self.bcs) + problem = NonlinearVariationalProblem(self.res(m), self.U_DC, bcs=self.bcs) solver_name = self.field_name+self.__class__.__name__ + "%s" % (m) solvers.append(NonlinearVariationalSolver(problem, solver_parameters=self.nonlinear_solver_parameters, options_prefix=solver_name)) return solvers @@ -429,7 +458,7 @@ def apply(self, x_out, x_in): if self.qdelta_imp_type == "MIN-SR-FLEX": # Recompute Implicit Q_delta matrix for each iteration k - self.Qdelta_imp = genQDeltaCoeffs( + self.Qdelta_imp = float(self.dt_coarse)*genQDeltaCoeffs( self.qdelta_imp_type, form=self.formulation, nodes=self.nodes, @@ -463,7 +492,7 @@ def apply(self, x_out, x_in): if (self.formulation == "N2N"): self.U_start.assign(self.Unodes1[m-1]) self.solver = solver_list[m-1] - self.U_SDC.assign(self.Unodes[m]) + self.U_DC.assign(self.Unodes[m]) # Compute # for N2N: @@ -474,7 +503,7 @@ def apply(self, x_out, x_in): # y_m^(k+1) = y^n + sum(j=1,m) Qdelta_imp[m,j]*(F(y_(m)^(k+1)) - F(y_(m)^k)) # + sum(j=1,M) Q_delta_exp[m,j]*(S(y_(m-1)^(k+1)) - S(y_(m-1)^k)) self.solver.solve() - self.Unodes1[m].assign(self.U_SDC) + self.Unodes1[m].assign(self.U_DC) # Evaluate source terms for evaluate in self.evaluate_source: @@ -509,3 +538,375 @@ def apply(self, x_out, x_in): x_out.assign(self.Unodes[-1]) else: x_out.assign(self.Unodes[-1]) + + +class RIDC(object, metaclass=ABCMeta): + """Class for Revisionist Integral Deferred Correction schemes.""" + + def __init__(self, base_scheme, domain, M, K, field_name=None, + linear_solver_parameters=None, nonlinear_solver_parameters=None, + limiter=None, reduced=True): + """ + Initialise RIDC object + Args: + base_scheme (:class:`TimeDiscretisation`): Base time stepping scheme to get first guess of solution on + quadrature nodes. + domain (:class:`Domain`): the model's domain object, containing the + mesh and the compatible function spaces. + M (int): Number of subintervals + K (int): Max number of correction interations + field_name (str, optional): name of the field to be evolved. + Defaults to None. + linear_solver_parameters (dict, optional): dictionary of parameters to + pass to the underlying linear solver. Defaults to None. + nonlinear_solver_parameters (dict, optional): dictionary of parameters to + pass to the underlying nonlinear solver. Defaults to None. + limiter (:class:`Limiter` object, optional): a limiter to apply to + the evolving field to enforce monotonicity. Defaults to None. + reduced (bool, optional): whether to use reduced stencils for RIDC. Defaults to True. + """ + self.base = base_scheme + self.field_name = field_name + self.domain = domain + self.dt_coarse = domain.dt + self.limiter = limiter + self.augmentation = self.base.augmentation + self.wrapper = self.base.wrapper + self.K = K + self.M = M + self.reduced = reduced + self.dt = Constant(float(self.dt_coarse)/(self.M)) + + if reduced: + self.Q = [] + for l in range(1, self.K + 1): + _, _, Q = genQCoeffs( + "Collocation", + nNodes=l + 1, + nodeType="EQUID", + quadType="LOBATTO", + form="N2N" + ) + Q = l * float(self.dt) * Q + self.Q.append(Q) + else: + # Get integration weights + _, _, self.Q = genQCoeffs( + "Collocation", + nNodes=self.K + 1, + nodeType="EQUID", + quadType="LOBATTO", + form="N2N" + ) + self.Q = self.K * float(self.dt) * self.Q + + # Set default linear and nonlinear solver options if none passed in + if linear_solver_parameters is None: + self.linear_solver_parameters = {'snes_type': 'ksponly', + 'ksp_type': 'cg', + 'pc_type': 'bjacobi', + 'sub_pc_type': 'ilu'} + else: + self.linear_solver_parameters = linear_solver_parameters + + if nonlinear_solver_parameters is None: + self.nonlinear_solver_parameters = {'snes_type': 'newtonls', + 'ksp_type': 'gmres', + 'pc_type': 'bjacobi', + 'sub_pc_type': 'ilu'} + else: + self.nonlinear_solver_parameters = nonlinear_solver_parameters + + def setup(self, equation, apply_bcs=True, *active_labels): + """ + Set up the RIDC time discretisation based on the equation. + + Args: + equation (:class:`PrognosticEquation`): the model's equation. + apply_bcs (bool, optional): whether to apply the equation's boundary + conditions. Defaults to True. + *active_labels (:class:`Label`): labels indicating which terms of + the equation to include. + """ + # Inherit from base time discretisation + self.base.setup(equation, apply_bcs, *active_labels) + self.equation = self.base.equation + self.residual = self.base.residual + self.evaluate_source = self.base.evaluate_source + + for t in self.residual: + # Check all terms are labeled implicit or explicit + if ((not t.has_label(implicit)) and (not t.has_label(explicit)) + and (not t.has_label(time_derivative)) and (not t.has_label(source_label))): + raise NotImplementedError("Non time-derivative or source terms must be labeled as implicit or explicit") + + # Set up bcs + self.bcs = self.base.bcs + + # Set up RIDC variables + if self.field_name is not None and hasattr(equation, "field_names"): + self.idx = equation.field_names.index(self.field_name) + W = equation.spaces[self.idx] + else: + self.field_name = equation.field_name + W = equation.function_space + self.idx = None + self.W = W + self.Unodes = [Function(W) for _ in range(self.M+1)] + self.Unodes1 = [Function(W) for _ in range(self.M+1)] + self.fUnodes = [Function(W) for _ in range(self.M+1)] + self.quad = [Function(W) for _ in range(self.M+1)] + self.source_Uk = [Function(W) for _ in range(self.M+1)] + self.source_Ukp1 = [Function(W) for _ in range(self.M+1)] + self.U_DC = Function(W) + self.U_start = Function(W) + self.Un = Function(W) + self.Q_ = Function(W) + self.quad_final = Function(W) + self.U_fin = Function(W) + self.Urhs = Function(W) + self.Uin = Function(W) + self.source_in = Function(W) + self.source_Ukp1_m = Function(W) + self.source_Uk_m = Function(W) + self.Uk_mp1 = Function(W) + self.Uk_m = Function(W) + self.Ukp1_m = Function(W) + + @property + def nlevels(self): + return 1 + + def compute_quad(self, Q, fUnodes, m): + """ + Computes integration of F(y) on quadrature nodes + """ + quad = Function(self.W) + quad.assign(0.) + for k in range(0, np.shape(Q)[1]): + quad += float(Q[m, k])*fUnodes[k] + return quad + + def compute_quad_final(self, Q, fUnodes, m): + """ + Computes final integration of F(y) on quadrature nodes + """ + quad = Function(self.W) + quad.assign(0.) + if self.reduced: + l = np.shape(Q)[0] - 1 + else: + l = self.K + for k in range(0, l+1): + quad += float(Q[-1, k])*fUnodes[m - l + k] + return quad + + @property + def res_rhs(self): + """Set up the residual for the calculation of F(y).""" + a = self.residual.label_map(lambda t: t.has_label(time_derivative), + replace_subject(self.Urhs, old_idx=self.idx), + drop) + # F(y) + L = self.residual.label_map(lambda t: any(t.has_label(time_derivative, source_label)), + drop, + replace_subject(self.Uin, old_idx=self.idx)) + L_source = self.residual.label_map(lambda t: t.has_label(source_label), + replace_subject(self.source_in, old_idx=self.idx), + drop) + residual_rhs = a - (L + L_source) + return residual_rhs.form + + @property + def res(self): + """Set up the discretisation's residual.""" + # Add time derivative terms y^(k+1)_m - y_n + mass_form = self.residual.label_map( + lambda t: t.has_label(time_derivative), + map_if_false=drop) + residual = mass_form.label_map(all_terms, + map_if_true=replace_subject(self.U_DC, old_idx=self.idx)) + residual -= mass_form.label_map(all_terms, + map_if_true=replace_subject(self.U_start, old_idx=self.idx)) + + # Calculate source terms + r_source_kp1 = self.residual.label_map( + lambda t: t.has_label(source_label), + map_if_true=replace_subject(self.source_Ukp1_m, old_idx=self.idx), + map_if_false=drop) + r_source_kp1 = r_source_kp1.label_map( + all_terms, + lambda t: Constant(self.dt)*t) + residual += r_source_kp1 + + r_source_k = self.residual.label_map( + lambda t: t.has_label(source_label), + map_if_true=replace_subject(self.source_Uk_m, old_idx=self.idx), + map_if_false=drop) + r_source_k = r_source_k.label_map( + all_terms, + map_if_true=lambda t: Constant(self.dt)*t) + residual -= r_source_k + + # Add on final implicit terms + # dt*(F(y_(m)^(k+1)) - F(y_(m)^k)) + r_imp_kp1 = self.residual.label_map( + lambda t: t.has_label(implicit), + map_if_true=replace_subject(self.U_DC, old_idx=self.idx), + map_if_false=drop) + r_imp_kp1 = r_imp_kp1.label_map( + all_terms, + lambda t: Constant(self.dt)*t) + residual += r_imp_kp1 + r_imp_k = self.residual.label_map( + lambda t: t.has_label(implicit), + map_if_true=replace_subject(self.Uk_mp1, old_idx=self.idx), + map_if_false=drop) + r_imp_k = r_imp_k.label_map( + all_terms, + lambda t: Constant(self.dt)*t) + residual -= r_imp_k + + r_exp_kp1 = self.residual.label_map( + lambda t: t.has_label(explicit), + map_if_true=replace_subject(self.Ukp1_m, old_idx=self.idx), + map_if_false=drop) + r_exp_kp1 = r_exp_kp1.label_map( + all_terms, + lambda t: Constant(self.dt)*t) + residual += r_exp_kp1 + r_exp_k = self.residual.label_map( + lambda t: t.has_label(explicit), + map_if_true=replace_subject(self.Uk_m, old_idx=self.idx), + map_if_false=drop) + r_exp_k = r_exp_k.label_map( + all_terms, + lambda t: Constant(self.dt)*t) + residual -= r_exp_k + + # Add on sum(j=1,M) s_mj*F(y_m^k), where s_mj = q_mj-q_m-1j + # and s1j = q1j. + Q = self.residual.label_map(lambda t: t.has_label(time_derivative), + replace_subject(self.Q_, old_idx=self.idx), + drop) + residual += Q + return residual.form + + @cached_property + def solver(self): + """Set up the problem and the solver for the nonlinear solve.""" + # setup solver using residual defined in derived class + problem = NonlinearVariationalProblem(self.res, self.U_DC, bcs=self.bcs) + solver_name = self.field_name+self.__class__.__name__ + solver = NonlinearVariationalSolver(problem, solver_parameters=self.nonlinear_solver_parameters, options_prefix=solver_name) + return solver + + @cached_property + def solver_rhs(self): + """Set up the problem and the solver for mass matrix inversion.""" + # setup linear solver using rhs residual defined in derived class + prob_rhs = NonlinearVariationalProblem(self.res_rhs, self.Urhs, bcs=self.bcs) + solver_name = self.field_name+self.__class__.__name__+"_rhs" + return NonlinearVariationalSolver(prob_rhs, solver_parameters=self.linear_solver_parameters, + options_prefix=solver_name) + + @wrapper_apply + def apply(self, x_out, x_in): + self.Un.assign(x_in) + + # Compute initial guess on quadrature nodes with low-order + # base timestepper + self.Unodes[0].assign(self.Un) + self.M1 = self.K + + for m in range(self.M): + self.base.dt = float(self.dt) + self.base.apply(self.Unodes[m+1], self.Unodes[m]) + + for m in range(self.M+1): + for evaluate in self.evaluate_source: + evaluate(self.Unodes[m], self.base.dt, x_out=self.source_Uk[m]) + + # Iterate through correction sweeps + for k in range(1, self.K+1): + # Compute: sum(j=1,M) (s_mj*F(y_m^k) + s_mj*S(y_m^k)) + for m in range(self.M+1): + self.Uin.assign(self.Unodes[m]) + # Include source terms + for evaluate in self.evaluate_source: + evaluate(self.Uin, self.base.dt, x_out=self.source_in) + self.solver_rhs.solve() + self.fUnodes[m].assign(self.Urhs) + + # Loop through quadrature nodes and solve + self.Unodes1[0].assign(self.Unodes[0]) + for evaluate in self.evaluate_source: + evaluate(self.Unodes[0], self.base.dt, x_out=self.source_Uk[0]) + if self.reduced: + self.M1 = k + for m in range(0, self.M1): + # Set integration matrix + if self.reduced: + self.Q_.assign(self.compute_quad(self.Q[k-1], self.fUnodes, m+1)) + else: + self.Q_.assign(self.compute_quad(self.Q, self.fUnodes, m+1)) + + # Set initial guess for solver, and pick correct solver + self.U_start.assign(self.Unodes1[m]) + self.Ukp1_m.assign(self.Unodes1[m]) + self.Uk_mp1.assign(self.Unodes[m+1]) + self.Uk_m.assign(self.Unodes[m]) + self.source_Ukp1_m.assign(self.source_Ukp1[m]) + self.source_Uk_m.assign(self.source_Uk[m]) + self.U_DC.assign(self.Unodes[m+1]) + + # Compute: + # y_m^(k+1) = y_(m-1)^(k+1) + dt*(F(y_(m)^(k+1)) - F(y_(m)^k) + # + S(y_(m-1)^(k+1)) - S(y_(m-1)^k)) + # + sum(j=1,M) s_mj*(F+S)(y^k) + self.solver.solve() + self.Unodes1[m+1].assign(self.U_DC) + + # Evaluate source terms + for evaluate in self.evaluate_source: + evaluate(self.Unodes1[m+1], self.base.dt, x_out=self.source_Ukp1[m+1]) + + # Apply limiter if required + if self.limiter is not None: + self.limiter.apply(self.Unodes1[m+1]) + for m in range(self.M1, self.M): + # Set integration matrix + if self.reduced: + self.Q_.assign(self.compute_quad_final(self.Q[k-1], self.fUnodes, m+1)) + else: + self.Q_.assign(self.compute_quad_final(self.Q, self.fUnodes, m+1)) + + # Set initial guess for solver, and pick correct solver + self.U_start.assign(self.Unodes1[m]) + self.Ukp1_m.assign(self.Unodes1[m]) + self.Uk_mp1.assign(self.Unodes[m+1]) + self.Uk_m.assign(self.Unodes[m]) + self.source_Ukp1_m.assign(self.source_Ukp1[m]) + self.source_Uk_m.assign(self.source_Uk[m]) + self.U_DC.assign(self.Unodes[m+1]) + + # Compute: + # y_m^(k+1) = y_(m-1)^(k+1) + dt*(F(y_(m)^(k+1)) - F(y_(m)^k) + # + S(y_(m-1)^(k+1)) - S(y_(m-1)^k)) + # + sum(j=1,M) s_mj*(F+S)(y^k) + self.solver.solve() + self.Unodes1[m+1].assign(self.U_DC) + + # Evaluate source terms + for evaluate in self.evaluate_source: + evaluate(self.Unodes1[m+1], self.base.dt, x_out=self.source_Ukp1[m+1]) + + # Apply limiter if required + if self.limiter is not None: + self.limiter.apply(self.Unodes1[m+1]) + + for m in range(self.M+1): + self.Unodes[m].assign(self.Unodes1[m]) + self.source_Uk[m].assign(self.source_Ukp1[m]) + + x_out.assign(self.Unodes[-1]) diff --git a/gusto/time_discretisation/explicit_runge_kutta.py b/gusto/time_discretisation/explicit_runge_kutta.py index 187339df8..09f8689e6 100644 --- a/gusto/time_discretisation/explicit_runge_kutta.py +++ b/gusto/time_discretisation/explicit_runge_kutta.py @@ -282,7 +282,7 @@ def res(self): ) # Set up all-but-last RHS - if self.idx is not None: + if self.idx is not None and self.wrapper is None: # If original function is in mixed function space, then ensure # correct test function in the all-but-last form r_all_but_last = self.residual.label_map( @@ -321,7 +321,6 @@ def solve_stage(self, x0, stage): if self.rk_formulation == RungeKuttaFormulation.increment: self.x1.assign(x0) - for i in range(stage): self.x1.assign(self.x1 + self.dt*self.butcher_matrix[stage-1, i]*self.k[i]) for evaluate in self.evaluate_source: @@ -340,8 +339,6 @@ def solve_stage(self, x0, stage): self.x1.assign(x0) for i in range(self.nStages): self.x1.assign(self.x1 + self.dt*self.butcher_matrix[stage, i]*self.k[i]) - self.x1.assign(self.x1) - if self.limiter is not None: self.limiter.apply(self.x1) @@ -352,7 +349,9 @@ def solve_stage(self, x0, stage): # Use previous stage value as a first guess (otherwise may not converge) self.field_i[stage+1].assign(self.field_i[stage]) - if self.limiter is not None: + if hasattr(self.augmentation, 'limit') and callable(self.augmentation.limit): + self.augmentation.limit(self.field_i[stage]) + elif self.limiter is not None: self.limiter.apply(self.field_i[stage]) for evaluate in self.evaluate_source: @@ -363,7 +362,9 @@ def solve_stage(self, x0, stage): if (stage == self.nStages - 1): self.x1.assign(self.field_i[stage+1]) - if self.limiter is not None: + if hasattr(self.augmentation, 'limit') and callable(self.augmentation.limit): + self.augmentation.limit(self.x1) + elif self.limiter is not None: self.limiter.apply(self.x1) elif self.rk_formulation == RungeKuttaFormulation.linear: diff --git a/gusto/time_discretisation/imex_runge_kutta.py b/gusto/time_discretisation/imex_runge_kutta.py index ba3911fd4..82e2a5ab9 100644 --- a/gusto/time_discretisation/imex_runge_kutta.py +++ b/gusto/time_discretisation/imex_runge_kutta.py @@ -93,6 +93,15 @@ def __init__(self, domain, butcher_imp, butcher_exp, field_name=None, self.butcher_exp = butcher_exp self.nStages = int(np.shape(self.butcher_imp)[1]) + # Some butcher tableaus have zero first stage, if so, we don't need to do an + # initial solve and can copy across x_in to x_s[0] + self.zero_first_stage = True + self.solver_start_stage = 1 + for value in self.butcher_imp[0]: + if value != 0.0: + self.zero_first_stage = False + self.solver_start_stage = 0 + # Set default linear and nonlinear solver options if none passed in if linear_solver_parameters is None: self.linear_solver_parameters = {'snes_type': 'ksponly', @@ -231,7 +240,7 @@ def final_res(self): def solvers(self): """Set up a list of solvers for each problem at a stage.""" solvers = [] - for stage in range(self.nStages): + for stage in range(self.solver_start_stage, self.nStages): # setup solver using residual defined in derived class problem = NonlinearVariationalProblem(self.res(stage), self.x_out, bcs=self.bcs) solver_name = self.field_name+self.__class__.__name__ + "%s" % (stage) @@ -251,15 +260,16 @@ def apply(self, x_out, x_in): self.x1.assign(x_in) self.x_out.assign(x_in) solver_list = self.solvers + self.xs[0].assign(x_in) + + for stage in range(self.solver_start_stage, self.nStages): - for stage in range(self.nStages): - self.solver = solver_list[stage] + self.solver = solver_list[stage-self.solver_start_stage] # Set initial solver guess - if (stage > 0): - self.x_out.assign(self.xs[stage-1]) - # Evaluate source terms - for evaluate in self.evaluate_source: - evaluate(self.xs[stage-1], self.dt, x_out=self.source[stage-1]) + self.x_out.assign(self.xs[stage-1]) + # Evaluate source terms + for evaluate in self.evaluate_source: + evaluate(self.xs[stage-1], self.dt, x_out=self.source[stage-1]) self.solver.solve() # Apply limiter diff --git a/gusto/time_discretisation/parallel_dc.py b/gusto/time_discretisation/parallel_dc.py new file mode 100644 index 000000000..0dfd52ef6 --- /dev/null +++ b/gusto/time_discretisation/parallel_dc.py @@ -0,0 +1,404 @@ +""" +Objects for discretising time derivatives using time-parallel Deferred Correction +Methods. + +This module inherits from the serial SDC and RIDC classes, and implements the +parallelisation of the SDC and RIDC methods using MPI. + +SDC parallelises across the quadrature nodes by using diagonal QDelta matrices, +while RIDC parallelises across the correction iterations by using a reduced stencil +and pipelining. +""" + +from firedrake import ( + Function +) +from gusto.time_discretisation.time_discretisation import wrapper_apply +from qmat import genQDeltaCoeffs +from gusto.time_discretisation.deferred_correction import SDC, RIDC +from gusto.core.logging import logger + +__all__ = ["Parallel_RIDC", "Parallel_SDC"] + + +class Parallel_RIDC(RIDC): + """Class for Parallel Revisionist Integral Deferred Correction schemes.""" + + def __init__(self, base_scheme, domain, M, K, J, output_freq, flush_freq=None, field_name=None, + linear_solver_parameters=None, nonlinear_solver_parameters=None, + limiter=None, communicator=None): + """ + Initialise RIDC object + Args: + base_scheme (:class:`TimeDiscretisation`): Base time stepping scheme to get first guess of solution on + quadrature nodes. + domain (:class:`Domain`): the model's domain object, containing the + mesh and the compatible function spaces. + M (int): Number of subintervals + K (int): Max number of correction interations + J (int): Number of intervals + output_freq (int): Frequency at which output is done + flush_freq (int): Frequency at which to flush the pipeline + field_name (str, optional): name of the field to be evolved. + Defaults to None. + linear_solver_parameters (dict, optional): dictionary of parameters to + pass to the underlying linear solver. Defaults to None. + nonlinear_solver_parameters (dict, optional): dictionary of parameters to + pass to the underlying nonlinear solver. Defaults to None. + limiter (:class:`Limiter` object, optional): a limiter to apply to + the evolving field to enforce monotonicity. Defaults to None. + communicator (MPI communicator, optional): communicator for parallel execution. Defaults to None. + """ + + super(Parallel_RIDC, self).__init__(base_scheme, domain, M, K, field_name, + linear_solver_parameters, nonlinear_solver_parameters, + limiter, reduced=True) + self.comm = communicator + self.TAG_EXCHANGE_FIELD = 11 # Tag for sending nodal fields (Firedrake Functions) + self.TAG_EXCHANGE_SOURCE = self.TAG_EXCHANGE_FIELD + J # Tag for sending nodal source fields (Firedrake Functions) + self.TAG_FLUSH_PIPE = self.TAG_EXCHANGE_SOURCE + J # Tag for flushing pipe and restarting + self.TAG_FINAL_OUT = self.TAG_FLUSH_PIPE + J # Tag for the final broadcast and output + self.TAG_END_INTERVAL = self.TAG_FINAL_OUT + J # Tag for telling the rank above you that you have ended interval j + + if flush_freq is None: + self.flush_freq = 1 + else: + self.flush_freq = flush_freq + + self.J = J + self.step = 1 + self.output_freq = output_freq + + if self.flush_freq == 0 or (self.flush_freq != 0 and self.output_freq % self.flush_freq != 0): + logger.warn("Output on all parallel in time ranks will not be the same until end of run!") + + # Checks for parallel RIDC + if self.comm is None: + raise ValueError("No communicator provided. Please provide a valid MPI communicator.") + if self.comm.ensemble_comm.size != self.K + 1: + raise ValueError("Number of ranks must be equal to K+1 for Parallel RIDC.") + if self.M < self.K*(self.K+1)//2: + raise ValueError("Number of subintervals M must be greater than K*(K+1)/2 for Parallel RIDC.") + + def setup(self, equation, apply_bcs=True, *active_labels): + """ + Set up the RIDC time discretisation based on the equation. + + Args: + equation (:class:`PrognosticEquation`): the model's equation. + apply_bcs (bool, optional): whether to apply the equation's boundary + conditions. Defaults to True. + *active_labels (:class:`Label`): labels indicating which terms of + the equation to include. + """ + super(Parallel_RIDC, self).setup(equation, apply_bcs, *active_labels) + + self.Uk_mp1 = Function(self.W) + self.Uk_m = Function(self.W) + self.Ukp1_m = Function(self.W) + + @wrapper_apply + def apply(self, x_out, x_in): + # Set up varibles on this rank + x_out.assign(x_in) + self.kval = self.comm.ensemble_comm.rank + self.Un.assign(x_in) + self.Unodes[0].assign(self.Un) + # Loop through quadrature nodes and solve + self.Unodes1[0].assign(self.Unodes[0]) + for evaluate in self.evaluate_source: + evaluate(self.Unodes[0], self.base.dt, x_out=self.source_Uk[0]) + self.Uin.assign(self.Unodes[0]) + self.solver_rhs.solve() + self.fUnodes[0].assign(self.Urhs) + + # On first communicator, we do the predictor step + if (self.comm.ensemble_comm.rank == 0): + # Base timestepper + for m in range(self.M): + self.base.dt = float(self.dt) + self.base.apply(self.Unodes[m+1], self.Unodes[m]) + for evaluate in self.evaluate_source: + evaluate(self.Unodes[m+1], self.base.dt, x_out=self.source_Uk[m+1]) + + # Send base guess to k+1 correction + self.comm.send(self.Unodes[m+1], dest=self.kval+1, tag=self.TAG_EXCHANGE_FIELD + self.step) + self.comm.send(self.source_Uk[m+1], dest=self.kval+1, tag=self.TAG_EXCHANGE_SOURCE + self.step) + else: + for m in range(1, self.kval + 1): + # Receive and evaluate the stencil of guesses we need to correct + self.comm.recv(self.Unodes[m], source=self.kval-1, tag=self.TAG_EXCHANGE_FIELD + self.step) + self.comm.recv(self.source_Uk[m], source=self.kval-1, tag=self.TAG_EXCHANGE_SOURCE + self.step) + self.Uin.assign(self.Unodes[m]) + for evaluate in self.evaluate_source: + evaluate(self.Uin, self.base.dt, x_out=self.source_in) + self.solver_rhs.solve() + self.fUnodes[m].assign(self.Urhs) + for m in range(0, self.kval): + # Set S matrix + self.Q_.assign(self.compute_quad(self.Q[self.kval-1], self.fUnodes, m+1)) + + # Set initial guess for solver, and pick correct solver + self.U_start.assign(self.Unodes1[m]) + self.Ukp1_m.assign(self.Unodes1[m]) + self.Uk_mp1.assign(self.Unodes[m+1]) + self.Uk_m.assign(self.Unodes[m]) + self.source_Ukp1_m.assign(self.source_Ukp1[m]) + self.source_Uk_m.assign(self.source_Uk[m]) + self.U_DC.assign(self.Unodes[m+1]) + + # Compute + # y_m^(k+1) = y_(m-1)^(k+1) + dt*(F(y_(m)^(k+1)) - F(y_(m)^k) + # + S(y_(m-1)^(k+1)) - S(y_(m-1)^k)) + # + sum(j=1,M) s_mj*(F+S)(y_j^k) + self.solver.solve() + self.Unodes1[m+1].assign(self.U_DC) + + # Evaluate source terms + for evaluate in self.evaluate_source: + evaluate(self.Unodes1[m+1], self.base.dt, x_out=self.source_Ukp1[m+1]) + + # Apply limiter if required + if self.limiter is not None: + self.limiter.apply(self.Unodes1[m+1]) + # Send our updated value to next communicator + if self.kval < self.K: + self.comm.send(self.Unodes1[m+1], dest=self.kval+1, tag=self.TAG_EXCHANGE_FIELD + self.step) + self.comm.send(self.source_Ukp1[m+1], dest=self.kval+1, tag=self.TAG_EXCHANGE_SOURCE + self.step) + + for m in range(self.kval, self.M): + # Receive the guess we need to correct and evaluate the rhs + self.comm.recv(self.Unodes[m+1], source=self.kval-1, tag=self.TAG_EXCHANGE_FIELD + self.step) + self.comm.recv(self.source_Uk[m+1], source=self.kval-1, tag=self.TAG_EXCHANGE_SOURCE + self.step) + self.Uin.assign(self.Unodes[m+1]) + for evaluate in self.evaluate_source: + evaluate(self.Uin, self.base.dt, x_out=self.source_in) + self.solver_rhs.solve() + self.fUnodes[m+1].assign(self.Urhs) + + # Set S matrix + self.Q_.assign(self.compute_quad_final(self.Q[self.kval-1], self.fUnodes, m+1)) + + # Set initial guess for solver, and pick correct solver + self.U_start.assign(self.Unodes1[m]) + self.Ukp1_m.assign(self.Unodes1[m]) + self.Uk_mp1.assign(self.Unodes[m+1]) + self.Uk_m.assign(self.Unodes[m]) + self.source_Ukp1_m.assign(self.source_Ukp1[m]) + self.source_Uk_m.assign(self.source_Uk[m]) + self.U_DC.assign(self.Unodes[m+1]) + + # y_m^(k+1) = y_(m-1)^(k+1) + dt*(F(y_(m)^(k+1)) - F(y_(m)^k) + # + S(y_(m-1)^(k+1)) - S(y_(m-1)^k)) + # + sum(j=1,M) s_mj*(F+S)(y^k) + self.solver.solve() + self.Unodes1[m+1].assign(self.U_DC) + + # Evaluate source terms + for evaluate in self.evaluate_source: + evaluate(self.Unodes1[m+1], self.base.dt, x_out=self.source_Ukp1[m+1]) + + # Apply limiter if required + if self.limiter is not None: + self.limiter.apply(self.Unodes1[m+1]) + + # Send our updated value to next communicator + if self.kval < self.K: + self.comm.send(self.Unodes1[m+1], dest=self.kval+1, tag=self.TAG_EXCHANGE_FIELD + self.step) + self.comm.send(self.source_Ukp1[m+1], dest=self.kval+1, tag=self.TAG_EXCHANGE_SOURCE + self.step) + + for m in range(self.M+1): + self.Unodes[m].assign(self.Unodes1[m]) + self.source_Uk[m].assign(self.source_Ukp1[m]) + + if (self.flush_freq > 0 and self.step % self.flush_freq == 0) or self.step == self.J: + # Flush the pipe to ensure all ranks have the same data + if (self.kval == self.K): + x_out.assign(self.Unodes[-1]) + for i in range(self.K): + self.comm.send(x_out, dest=i, tag=self.TAG_FLUSH_PIPE + self.step) + else: + self.comm.recv(x_out, source=self.K, tag=self.TAG_FLUSH_PIPE + self.step) + else: + x_out.assign(self.Unodes[-1]) + + self.step += 1 + + +class Parallel_SDC(SDC): + """Class for Spectral Deferred Correction schemes.""" + + def __init__(self, base_scheme, domain, M, maxk, quad_type, node_type, qdelta_imp, qdelta_exp, + field_name=None, + linear_solver_parameters=None, nonlinear_solver_parameters=None, final_update=True, + limiter=None, options=None, initial_guess="base", communicator=None): + """ + Initialise SDC object + Args: + base_scheme (:class:`TimeDiscretisation`): Base time stepping scheme to get first guess of solution on + quadrature nodes. + domain (:class:`Domain`): the model's domain object, containing the + mesh and the compatible function spaces. + M (int): Number of quadrature nodes to compute spectral integration over + maxk (int): Max number of correction interations + quad_type (str): Type of quadrature to be used. Options are + GAUSS, RADAU-LEFT, RADAU-RIGHT and LOBATTO + node_type (str): Node type to be used. Options are + EQUID, LEGENDRE, CHEBY-1, CHEBY-2, CHEBY-3 and CHEBY-4 + qdelta_imp (str): Implicit Qdelta matrix to be used. Options are + BE, LU, TRAP, EXACT, PIC, OPT, WEIRD, MIN-SR-NS, MIN-SR-S + qdelta_exp (str): Explicit Qdelta matrix to be used. Options are + FE, EXACT, PIC + field_name (str, optional): name of the field to be evolved. + Defaults to None. + linear_solver_parameters (dict, optional): dictionary of parameters to + pass to the underlying linear solver. Defaults to None. + nonlinear_solver_parameters (dict, optional): dictionary of parameters to + pass to the underlying nonlinear solver. Defaults to None. + final_update (bool, optional): Whether to compute final update, or just take last + quadrature value. Defaults to True + limiter (:class:`Limiter` object, optional): a limiter to apply to + the evolving field to enforce monotonicity. Defaults to None. + initial_guess (str, optional): Initial guess to be base timestepper, or copy + communicator (MPI communicator, optional): communicator for parallel execution. Defaults to None. + """ + super().__init__(base_scheme, domain, M, maxk, quad_type, node_type, qdelta_imp, qdelta_exp, + formulation="Z2N", field_name=field_name, + linear_solver_parameters=linear_solver_parameters, nonlinear_solver_parameters=nonlinear_solver_parameters, + final_update=final_update, + limiter=limiter, initial_guess=initial_guess) + self.comm = communicator + + # Checks for parallel SDC + if self.comm is None: + raise ValueError("No communicator provided. Please provide a valid MPI communicator.") + if self.comm.ensemble_comm.size != self.M: + raise ValueError("Number of ranks must be equal to the number of nodes M for Parallel SDC.") + + def compute_quad(self): + """ + Computes integration of F(y) on quadrature nodes + """ + x = Function(self.W) + for j in range(self.M): + x.assign(float(self.Q[j, self.comm.ensemble_comm.rank])*self.fUnodes[self.comm.ensemble_comm.rank]) + self.comm.reduce(x, self.quad[j], root=j) + + def compute_quad_final(self): + """ + Computes final integration of F(y) on quadrature nodes + """ + x = Function(self.W) + x.assign(float(self.Qfin[self.comm.ensemble_comm.rank])*self.fUnodes[self.comm.ensemble_comm.rank]) + self.comm.allreduce(x, self.quad_final) + + @wrapper_apply + def apply(self, x_out, x_in): + self.Un.assign(x_in) + self.U_start.assign(self.Un) + solver_list = self.solvers + + # Compute initial guess on quadrature nodes with low-order + # base timestepper + self.Unodes[0].assign(self.Un) + if (self.base_flag): + for m in range(self.M): + self.base.dt = float(self.dtau[m]) + self.base.apply(self.Unodes[m+1], self.Unodes[m]) + else: + for m in range(self.M): + self.Unodes[m+1].assign(self.Un) + for m in range(self.M+1): + for evaluate in self.evaluate_source: + evaluate(self.Unodes[m], self.base.dt, x_out=self.source_Uk[m]) + + # Iterate through correction sweeps + k = 0 + while k < self.maxk: + k += 1 + + if self.qdelta_imp_type == "MIN-SR-FLEX": + # Recompute Implicit Q_delta matrix for each iteration k + self.Qdelta_imp = float(self.dt_coarse)*genQDeltaCoeffs( + self.qdelta_imp_type, + form=self.formulation, + nodes=self.nodes, + Q=self.Q, + nNodes=self.M, + nodeType=self.node_type, + quadType=self.quad_type, + k=k + ) + + # Compute for N2N: sum(j=1,M) (s_mj*F(y_m^k) + s_mj*S(y_m^k)) + # for Z2N: sum(j=1,M) (q_mj*F(y_m^k) + q_mj*S(y_m^k)) + self.Uin.assign(self.Unodes[self.comm.ensemble_comm.rank+1]) + # Include source terms + for evaluate in self.evaluate_source: + evaluate(self.Uin, self.base.dt, x_out=self.source_in) + self.solver_rhs.solve() + self.fUnodes[self.comm.ensemble_comm.rank].assign(self.Urhs) + + self.compute_quad() + + # Loop through quadrature nodes and solve + self.Unodes1[0].assign(self.Unodes[0]) + for evaluate in self.evaluate_source: + evaluate(self.Unodes[0], self.base.dt, x_out=self.source_Uk[0]) + + # Set Q or S matrix + self.Q_.assign(self.quad[self.comm.ensemble_comm.rank]) + + # Set initial guess for solver, and pick correct solver + self.solver = solver_list[self.comm.ensemble_comm.rank] + self.U_DC.assign(self.Unodes[self.comm.ensemble_comm.rank+1]) + + # Compute + # for N2N: + # y_m^(k+1) = y_(m-1)^(k+1) + dtau_m*(F(y_(m)^(k+1)) - F(y_(m)^k) + # + S(y_(m-1)^(k+1)) - S(y_(m-1)^k)) + # + sum(j=1,M) s_mj*(F+S)(y^k) + # for Z2N: + # y_m^(k+1) = y^n + sum(j=1,m) Qdelta_imp[m,j]*(F(y_(m)^(k+1)) - F(y_(m)^k)) + # + sum(j=1,M) Q_delta_exp[m,j]*(S(y_(m-1)^(k+1)) - S(y_(m-1)^k)) + self.solver.solve() + self.Unodes1[self.comm.ensemble_comm.rank+1].assign(self.U_DC) + + # Evaluate source terms + for evaluate in self.evaluate_source: + evaluate(self.Unodes1[self.comm.ensemble_comm.rank+1], self.base.dt, x_out=self.source_Ukp1[self.comm.ensemble_comm.rank+1]) + + # Apply limiter if required + if self.limiter is not None: + self.limiter.apply(self.Unodes1[self.comm.ensemble_comm.rank+1]) + + self.Unodes[self.comm.ensemble_comm.rank+1].assign(self.Unodes1[self.comm.ensemble_comm.rank+1]) + self.source_Uk[self.comm.ensemble_comm.rank+1].assign(self.source_Ukp1[self.comm.ensemble_comm.rank+1]) + + if self.maxk > 0: + # Compute value at dt rather than final quadrature node tau_M + if self.final_update: + self.Uin.assign(self.Unodes1[self.comm.ensemble_comm.rank+1]) + self.source_in.assign(self.source_Ukp1[self.comm.ensemble_comm.rank+1]) + self.solver_rhs.solve() + self.fUnodes[self.comm.ensemble_comm.rank].assign(self.Urhs) + self.compute_quad_final() + # Compute y_(n+1) = y_n + sum(j=1,M) q_j*F(y_j) + if self.comm.ensemble_comm.rank == self.M-1: + self.U_fin.assign(self.Unodes[-1]) + self.comm.bcast(self.U_fin, self.M-1) + self.solver_fin.solve() + # Apply limiter if required + if self.limiter is not None: + self.limiter.apply(self.U_fin) + x_out.assign(self.U_fin) + else: + # Take value at final quadrature node dtau_M + if self.comm.ensemble_comm.rank == self.M-1: + x_out.assign(self.Unodes[-1]) + self.comm.bcast(x_out, self.M-1) + else: + # Take value at final quadrature node dtau_M + if self.comm.ensemble_comm.rank == self.M-1: + x_out.assign(self.Unodes[-1]) + self.comm.bcast(x_out, self.M-1) diff --git a/gusto/time_discretisation/time_discretisation.py b/gusto/time_discretisation/time_discretisation.py index 7d215e4f3..6e0a7e05e 100644 --- a/gusto/time_discretisation/time_discretisation.py +++ b/gusto/time_discretisation/time_discretisation.py @@ -17,12 +17,15 @@ from firedrake.utils import cached_property from gusto.core.configuration import EmbeddedDGOptions, RecoveryOptions -from gusto.core.labels import (time_derivative, prognostic, physics_label, - mass_weighted, nonlinear_time_derivative) +from gusto.core.labels import ( + time_derivative, prognostic, physics_label, mass_weighted, + nonlinear_time_derivative, all_but_last +) from gusto.core.logging import logger, DEBUG, logging_ksp_monitor_true_residual from gusto.time_discretisation.wrappers import * from gusto.solvers import mass_parameters + __all__ = ["TimeDiscretisation", "ExplicitTimeDiscretisation", "BackwardEuler", "ThetaMethod", "TrapeziumRule", "TR_BDF2"] @@ -230,8 +233,12 @@ def setup(self, equation, apply_bcs=True, *active_labels): # Check if there are any mass-weighted terms: if len(self.residual.label_map(lambda t: t.has_label(mass_weighted), map_if_false=drop)) > 0: - for field in equation.field_names: + if hasattr(self.augmentation, 'field_names'): + field_names = self.augmentation.field_names + else: + field_names = equation.field_names + for field in field_names: # Check if the mass term for this prognostic is mass-weighted if len(self.residual.label_map(( lambda t: t.get(prognostic) == field @@ -260,6 +267,7 @@ def setup(self, equation, apply_bcs=True, *active_labels): self.residual = self.residual.label_map( lambda t: t.get(prognostic) == field and t.has_label(mass_weighted), map_if_true=lambda t: t.get(mass_weighted)) + # -------------------------------------------------------------------- # # Set up Wrappers # -------------------------------------------------------------------- # @@ -327,6 +335,21 @@ def setup(self, equation, apply_bcs=True, *active_labels): all_terms, map_if_true=replace_test_function(new_test)) + # TODO: roll this out to other wrappers + # Replace test function in any all_but_last label + def replace_test_all_but_last(t): + old_abl_form = t.get(all_but_last) + new_abl_form = old_abl_form.label_map( + all_terms, + replace_test_function(new_test, old_idx=self.idx) + ) + return all_but_last(t, new_abl_form) + + self.residual = self.residual.label_map( + lambda t: t.has_label(all_but_last), + map_if_true=replace_test_all_but_last + ) + self.residual = self.wrapper.label_terms(self.residual) if self.solver_parameters is None: self.solver_parameters = self.wrapper.solver_parameters diff --git a/gusto/timestepping/semi_implicit_quasi_newton.py b/gusto/timestepping/semi_implicit_quasi_newton.py index c3c1af152..2de865fe8 100644 --- a/gusto/timestepping/semi_implicit_quasi_newton.py +++ b/gusto/timestepping/semi_implicit_quasi_newton.py @@ -9,7 +9,6 @@ ) from firedrake.fml import drop, replace_subject, Term from firedrake.__future__ import interpolate -from firedrake.petsc import flatten_parameters from pyop2.profiling import timed_stage, timed_function from gusto.core import TimeLevelFields, StateFields from gusto.core.labels import ( @@ -20,9 +19,11 @@ from gusto.core.logging import logger, DEBUG, logging_ksp_monitor_true_residual from gusto.time_discretisation.time_discretisation import ExplicitTimeDiscretisation from gusto.timestepping.timestepper import BaseTimestepper +from gusto.solvers.solver_presets import hybridised_solver_parameters +from gusto.equations.compressible_euler_equations import CompressibleEulerEquations -__all__ = ["SemiImplicitQuasiNewton", "Forcing"] +__all__ = ["SemiImplicitQuasiNewton", "Forcing", "QuasiNewtonLinearSolver"] class SemiImplicitQuasiNewton(BaseTimestepper): @@ -36,15 +37,17 @@ class SemiImplicitQuasiNewton(BaseTimestepper): """ def __init__(self, equation_set, io, transport_schemes, spatial_methods, - auxiliary_equations_and_schemes=None, linear_solver=None, - diffusion_schemes=None, physics_schemes=None, + auxiliary_equations_and_schemes=None, + diffusion_schemes=None, inner_physics_schemes=None, + final_physics_schemes=None, slow_physics_schemes=None, fast_physics_schemes=None, alpha=0.5, tau_values=None, off_centred_u=False, + physics_beta=0.5, num_outer=2, num_inner=2, accelerator=True, predictor=None, reference_update_freq=None, spinup_steps=0, solver_prognostics=None, linear_solver_parameters=None, - overwrite_linear_solver_parameters=False): + appctx=None): """ Args: equation_set (:class:`PrognosticEquationSet`): the prognostic @@ -58,12 +61,16 @@ def __init__(self, equation_set, io, transport_schemes, spatial_methods, auxiliary_equations_and_schemes: iterable of ``(equation, scheme)`` pairs indicating any additional equations to be solved and the scheme to use to solve them. Defaults to None. - linear_solver (:class:`TimesteppingSolver`, optional): the object - to use for the linear solve. Defaults to None. diffusion_schemes (iter, optional): iterable of pairs of the form ``(field_name, scheme)`` indicating the fields to diffuse, and the :class:`~.TimeDiscretisation` to use. Defaults to None. - physics_schemes: (list, optional): a list of tuples of the form + inner_physics_schemes: (list, optional): a list of tuples of the form + (:class:`PhysicsParametrisation`, :class:`TimeDiscretisation`), + pairing physics parametrisations and timestepping schemes to use + for each. Timestepping schemes for physics must be explicit. + These schemes are all evaluated with forcing, both explicitly and + implicitly. Defaults to None. + final_physics_schemes: (list, optional): a list of tuples of the form (:class:`PhysicsParametrisation`, :class:`TimeDiscretisation`), pairing physics parametrisations and timestepping schemes to use for each. Timestepping schemes for physics must be explicit. @@ -86,6 +93,9 @@ def __init__(self, equation_set, io, transport_schemes, spatial_methods, off_centred_u (bool, optional): option to offcentre the transporting velocity. Defaults to False, in which case transporting velocity is centred. If True offcentring uses value of alpha. + physics_beta (float, optional): the semi-implicit off-centering + parameter for physics schemes. A value of 1 corresponds to fully + implicit, while 0 corresponds to fully explicit. Defaults to 0.5. num_outer (int, optional): number of outer iterations in the semi- implicit algorithm. The outer loop includes transport and any fast physics schemes. Defaults to 2. Note that default used by @@ -124,10 +134,9 @@ def __init__(self, equation_set, io, transport_schemes, spatial_methods, linear_solver_parameters (dict, optional): contains the options to be passed to the underlying :class:`LinearVariationalSolver`. Defaults to None. - overwrite_solver_parameters (bool, optional): if True use only the - `solver_parameters` that have been passed in. If False then - update the default parameters with the `solver_parameters` - passed in. Defaults to False. + appctx (dict, optional): a dictionary of application context for the + underlying :class:`LinearVariationalSolver`. + Defaults to None. """ # Basic parts of the SIQN structure ------------------------------------ @@ -152,6 +161,7 @@ def __init__(self, equation_set, io, transport_schemes, spatial_methods, W = equation_set.function_space self.xrhs = Function(W) self.xrhs_phys = Function(W) + self.xrhs_inner_phys = Function(W) self.dy = Function(W) # Determine prognostics for solver ------------------------------------- @@ -191,15 +201,24 @@ def __init__(self, equation_set, io, transport_schemes, spatial_methods, # BCs, Forcing and Linear Solver --------------------------------------- self.bcs = equation_set.bcs self.forcing = Forcing(equation_set, self.implicit_terms, self.alpha) - if linear_solver is None: - self.linear_solver = SIQNLinearSolver( - equation_set, solver_prognostics, self.implicit_terms, - self.alpha, tau_values=tau_values, - solver_parameters=linear_solver_parameters, - overwrite_solver_parameters=overwrite_linear_solver_parameters - ) + + if linear_solver_parameters is None: + self.linear_solver_parameters, self.appctx = \ + hybridised_solver_parameters( + equation_set, + solver_prognostics, + alpha=self.alpha, + tau_values=tau_values + ) else: - self.linear_solver = linear_solver + self.linear_solver_parameters = linear_solver_parameters + self.appctx = appctx + self.linear_solver = QuasiNewtonLinearSolver( + equation_set, solver_prognostics, self.implicit_terms, + self.alpha, tau_values=tau_values, + solver_parameters=self.linear_solver_parameters, + appctx=self.appctx + ) # Options relating to reference profiles and spin-up ------------------- self._alpha_original = float(alpha) # float so as to not upset adjoint @@ -247,8 +266,13 @@ def __init__(self, equation_set, io, transport_schemes, spatial_methods, f'No transport method found for variable {scheme.field_name}' # Physics schemes ------------------------------------------------------ - if physics_schemes is not None: - self.final_physics_schemes = physics_schemes + self.physics_beta = physics_beta + if inner_physics_schemes is not None: + self.inner_physics_schemes = inner_physics_schemes + else: + self.inner_physics_schemes = [] + if final_physics_schemes is not None: + self.final_physics_schemes = final_physics_schemes else: self.final_physics_schemes = [] if slow_physics_schemes is not None: @@ -261,7 +285,8 @@ def __init__(self, equation_set, io, transport_schemes, spatial_methods, self.fast_physics_schemes = [] self.all_physics_schemes = (self.slow_physics_schemes + self.fast_physics_schemes - + self.final_physics_schemes) + + self.final_physics_schemes + + self.inner_physics_schemes) for parametrisation, scheme in self.all_physics_schemes: assert scheme.nlevels == 1, \ @@ -342,9 +367,11 @@ def setup_fields(self): self.x = TimeLevelFields(self.equation, 1) if self.simult is True: # If there is any simultaneous transport, add an extra 'simult' field: - self.x.add_fields(self.equation, levels=("star", "p", "simult", "after_slow", "after_fast")) + self.x.add_fields(self.equation, levels=("star", "p", "simult", "after_slow", "after_fast", + "implicit_phys", "explicit_phys")) else: - self.x.add_fields(self.equation, levels=("star", "p", "after_slow", "after_fast")) + self.x.add_fields(self.equation, levels=("star", "p", "after_slow", "after_fast", + "implicit_phys", "explicit_phys")) for aux_eqn, _ in self.auxiliary_equations_and_schemes: self.x.add_fields(aux_eqn) # Prescribed fields for auxiliary eqns should come from prognostics of @@ -379,8 +406,8 @@ def copy_active_tracers(self, x_in, x_out): time derivative term for that tracer has a linearisation. Args: - x_in: The input set of fields - x_out: The output set of fields + x_in: The input set of fields + x_out: The output set of fields """ for name in self.non_solver_prognostics: @@ -483,6 +510,8 @@ def timestep(self): x_after_fast = self.x.after_fast xrhs = self.xrhs xrhs_phys = self.xrhs_phys + x_explicit_phys = self.x.explicit_phys + x_implicit_phys = self.x.implicit_phys dy = self.dy # Update reference profiles -------------------------------------------- @@ -509,6 +538,19 @@ def timestep(self): # Put explicit forcing into xstar self.forcing.apply(x_after_slow, xn, xstar(self.field_name), "explicit") + # Explicit physics + if abs(self.physics_beta - 1.0) > 0.0: + for _, scheme in self.inner_physics_schemes: + logger.info("Semi-implicit Quasi-Newton: Explicit physics") + # Evaluate explict physics on xn + scheme.apply(x_explicit_phys(scheme.field_name), xn(scheme.field_name)) + # Compute increment from physics scheme + x_phys_inc = (x_explicit_phys(self.field_name) - xn(self.field_name)) + # Add (1-beta)*dt*P[xn] to xstar + xstar(self.field_name).assign( + xstar(self.field_name) + (1 - self.physics_beta)*x_phys_inc + ) + # set xp here so that variables that are not transported have # the correct values xp(self.field_name).assign(xstar(self.field_name)) @@ -544,6 +586,13 @@ def timestep(self): # Zero implicit forcing to accelerate solver convergence self.forcing.zero_non_wind_terms(self.equation, xnp1, xrhs, self.equation.field_names) + # Implicit physics + if abs(self.physics_beta) > 0.0: + for _, scheme in self.inner_physics_schemes: + logger.info(f'Semi-implicit Quasi Newton: Implicit physics {(outer, inner)}') + scheme.apply(x_implicit_phys(self.field_name), xnp1(self.field_name)) + xrhs += self.physics_beta*(x_implicit_phys(self.field_name) - xnp1(self.field_name)) + xrhs -= xnp1(self.field_name) # Linear solve ------------------------------------------------- @@ -761,7 +810,7 @@ def zero_non_wind_terms(self, equation, x_in, x_out, field_names): x_out.subfunctions[field_index].assign(x_in(field_name)) -class SIQNLinearSolver(object): +class QuasiNewtonLinearSolver(object): """ Sets up the linear solver for the Semi-Implicit Quasi-Newton timestepper. @@ -770,26 +819,12 @@ class SIQNLinearSolver(object): Quasi-Newton problem. """ - solver_parameters = { - 'ksp_type': 'preonly', - 'mat_type': 'matfree', - 'pc_type': 'python', - 'pc_python_type': 'firedrake.HybridizationPC', - 'hybridization': {'ksp_type': 'cg', - 'pc_type': 'gamg', - 'ksp_rtol': 1e-8, - 'mg_levels': {'ksp_type': 'chebyshev', - 'ksp_max_it': 2, - 'pc_type': 'bjacobi', - 'sub_pc_type': 'ilu'}} - } - def __init__(self, equation, solver_prognostics, implicit_terms, - alpha, tau_values=None, solver_parameters=None, - overwrite_solver_parameters=False, reference_dependent=True): + alpha, solver_parameters, tau_values=None, + reference_dependent=True, appctx=None): """ Args: - equations (:class:`PrognosticEquation`): the model's equation. + equation (:class:`PrognosticEquation`): the model's equation. solver_prognostics (list): a list of prognostic variable names to include in the linear solver. implicit_terms (list): a list of labels for terms that are always @@ -808,18 +843,19 @@ def __init__(self, equation, solver_prognostics, implicit_terms, passed in. Defaults to False. reference_dependent: this indicates that the solver Jacobian should be rebuilt if the reference profiles have been updated. + appctx: appctx for the underlying :class:`LinearVariationalSolver`. """ - # Update or set solver parameters -------------------------------------- - if solver_parameters is not None: - if not overwrite_solver_parameters: - p = flatten_parameters(self.solver_parameters) - p.update(flatten_parameters(solver_parameters)) - solver_parameters = p - self.solver_parameters = solver_parameters + # Set solver parameters -------------------------------------- + self.solver_parameters = solver_parameters - if logger.isEnabledFor(DEBUG): - self.solver_parameters["ksp_monitor_true_residual"] = None + self.equation = equation + self.solver_prognostics = solver_prognostics + + if appctx is not None: + self.appctx = appctx + else: + self.appctx = {} dt = equation.domain.dt self.reference_dependent = reference_dependent @@ -883,23 +919,47 @@ def __init__(self, equation, solver_prognostics, implicit_terms, ] problem = LinearVariationalProblem( aeqn.form, action(Leqn.form, self.xrhs), self.dy, bcs=bcs, - constant_jacobian=True) + constant_jacobian=True + ) self.solver = LinearVariationalSolver( - problem, solver_parameters=self.solver_parameters, + problem, appctx=self.appctx, + solver_parameters=self.solver_parameters, options_prefix='linear_solver' ) - @staticmethod - def log_ksp_residuals(ksp): if logger.isEnabledFor(DEBUG): - ksp.setMonitor(logging_ksp_monitor_true_residual) + self.solver.snes.ksp.setMonitor(logging_ksp_monitor_true_residual) @timed_function("Gusto:UpdateReferenceProfiles") def update_reference_profiles(self): if self.reference_dependent: + self.equation.update_reference_profiles() self.solver.invalidate_jacobian() + # TODO: Issue #686 is to address this reference profile update bug (pythonPC update not called) + # this line forces it to update for now + pc = self.solver.snes.getKSP().getPC() + if (isinstance(self.equation, CompressibleEulerEquations) and pc.getType() == "python"): + pc.getPythonContext().update(pc) + + def zero_non_prognostics(self, equation, xrhs, field_names, prognostic_names): + """ + Zero rhs term F(x) for non-prognostics. + + Args: + equation (:class:`PrognosticEquationSet`): the prognostic + equation set to be solved + xrhs (:class:`FieldCreator`): the field to be incremented. + field_names (str): list of fields names for prognostic fields + """ + for field_name in field_names: + + if field_name not in prognostic_names: + logger.debug(f'Semi-Implicit Quasi Newton: Zeroing xrhs for {field_name}') + field_index = equation.field_names.index(field_name) + xrhs.subfunctions[field_index].assign(0.0) + @timed_function("Gusto:LinearSolve") def solve(self, xrhs, dy): """ @@ -911,6 +971,13 @@ def solve(self, xrhs, dy): dy (:class:`Function`): the resulting increment field in the appropriate :class:`MixedFunctionSpace`. """ + self.xrhs.assign(xrhs) + self.zero_non_prognostics(self.equation, self.xrhs, + self.equation.field_names, + self.solver_prognostics) + self.dy.assign(0.0) + self.solver.solve() + dy.assign(self.dy) diff --git a/gusto/timestepping/split_timestepper.py b/gusto/timestepping/split_timestepper.py index 2006c73ac..1ce2ad3fd 100644 --- a/gusto/timestepping/split_timestepper.py +++ b/gusto/timestepping/split_timestepper.py @@ -213,6 +213,16 @@ def transporting_velocity(self): def setup_scheme(self): self.setup_equation(self.equation) + + if self.scheme.augmentation is not None: + if hasattr(self.scheme.augmentation, 'setup_residual') and callable(self.scheme.augmentation.setup_residual): + # In the augmentation residual, + dynamics = Label('dynamics') + self.scheme.augmentation.residual = self.scheme.augmentation.residual.label_map( + lambda t: not any(t.has_label(time_derivative, physics_label)), + map_if_true=lambda t: dynamics(t) + ) + # Go through and label all non-physics terms with a "dynamics" label dynamics = Label('dynamics') self.equation.label_terms(lambda t: not any(t.has_label(time_derivative, physics_label)), dynamics) @@ -297,11 +307,24 @@ def transporting_velocity(self): def setup_scheme(self): self.setup_equation(self.equation) + + if self.scheme.augmentation is not None: + if hasattr(self.scheme.augmentation, 'setup_residual') and callable(self.scheme.augmentation.setup_residual): + # In the augmentation residual, + # go through and label all non-physics terms with a "dynamics" label + dynamics = Label('dynamics') + self.scheme.augmentation.residual = self.scheme.augmentation.residual.label_map( + lambda t: not any(t.has_label(time_derivative, physics_label)), + map_if_true=lambda t: dynamics(t) + ) + # Go through and label all non-physics terms with a "dynamics" label dynamics = Label('dynamics') self.equation.label_terms(lambda t: not any(t.has_label(time_derivative, physics_label)), dynamics) + apply_bcs = True self.scheme.setup(self.equation, apply_bcs, dynamics) + self.setup_transporting_velocity(self.scheme) if self.io.output.log_courant: self.scheme.courant_max = self.io.courant_max diff --git a/gusto/timestepping/timestepper.py b/gusto/timestepping/timestepper.py index d7c80391b..a16950749 100644 --- a/gusto/timestepping/timestepper.py +++ b/gusto/timestepping/timestepper.py @@ -111,6 +111,13 @@ def setup_equation(self, equation): for method in self.spatial_methods: method.replace_form(equation) + # If we have an augmentation with a separate residual to set up, + # do so now that we have replaced the transport forms. + if hasattr(self, 'scheme'): + if self.scheme.augmentation is not None: + if hasattr(self.scheme.augmentation, 'setup_residual') and callable(self.scheme.augmentation.setup_residual): + self.scheme.augmentation.setup_residual(self.equation) + def setup_transporting_velocity(self, scheme): """ Set up the time discretisation by replacing the transporting velocity @@ -351,6 +358,8 @@ def setup_scheme(self): self.setup_equation(self.equation) self.scheme.setup(self.equation) self.setup_transporting_velocity(self.scheme) + if hasattr(self.scheme, 'base'): + self.setup_transporting_velocity(self.scheme.base) if self.io.output.log_courant: self.scheme.courant_max = self.io.courant_max diff --git a/gusto/timestepping/tr_bdf2_quasi_newton.py b/gusto/timestepping/tr_bdf2_quasi_newton.py index 3e418746a..6ec11551d 100644 --- a/gusto/timestepping/tr_bdf2_quasi_newton.py +++ b/gusto/timestepping/tr_bdf2_quasi_newton.py @@ -10,11 +10,11 @@ from pyop2.profiling import timed_stage from gusto.core import TimeLevelFields, StateFields from gusto.core.labels import (transport, diffusion, sponge, incompressible) -from gusto.solvers import LinearTimesteppingSolver from gusto.core.logging import logger from gusto.time_discretisation.time_discretisation import ExplicitTimeDiscretisation from gusto.timestepping.timestepper import BaseTimestepper -from gusto.timestepping.semi_implicit_quasi_newton import Forcing +from gusto.timestepping.semi_implicit_quasi_newton import Forcing, QuasiNewtonLinearSolver +from gusto.solvers.solver_presets import hybridised_solver_parameters __all__ = ["TRBDF2QuasiNewton"] @@ -38,9 +38,16 @@ def __init__(self, equation_set, io, transport_schemes, spatial_methods, slow_physics_schemes=None, fast_physics_schemes=None, gamma=(1-sqrt(2)/2), + tau_values_tr=None, + tau_values_bdf=None, num_outer_tr=2, num_inner_tr=2, num_outer_bdf=2, num_inner_bdf=2, reference_update_freq=None, + solver_prognostics=None, + tr_solver_parameters=None, + tr_appctx=None, + bdf_solver_parameters=None, + bdf_appctx=None ): """ Args: @@ -77,6 +84,14 @@ def __init__(self, equation_set, io, transport_schemes, spatial_methods, timestep between 0 and 0.5. A value of 0.5 corresponds to a SIQN scheme with alpha = 0.5. Defaults to 1 - sqrt(2)/2, which makes the scheme L-stable. + tau_values_tr (dict, optional): a dictionary with keys the names of + prognostic variables and values the tau values to use for each + variable. Defaults to None, in which case the value of gamma + is used. + tau_values_bdf (dict, optional): a dictionary with keys the names of + prognostic variables and values the tau values to use for each + variable. Defaults to None, in which case the value of gamma2 + is used. num_outer_tr (int, optional): number of outer iterations in the trapeziodal step. The outer loop includes transport and any fast physics schemes. Defaults to 2. @@ -98,6 +113,24 @@ def __init__(self, equation_set, io, transport_schemes, spatial_methods, time step. Setting it to None turns off the update, and reference profiles will remain at their initial values. Defaults to None. + solver_prognostics (list, optional): a list of the names of + prognostic variables to include in the solver. Defaults to None, + in which case all prognostics that aren't active tracers are + included in the solver. + tr_solver_parameters (dict, optional): contains the options to + be passed to the underlying :class:`LinearVariationalSolver` for the + trapezoidal step. + Defaults to None. + tr_appctx (dict, optional): a dictionary of application context for the + underlying :class:`LinearVariationalSolver` for the trapezoidal step. + Defaults to None. + bdf_solver_parameters (dict, optional): contains the options to + be passed to the underlying :class:`LinearVariationalSolver` for the + BDF2 step. + Defaults to None. + bdf_appctx (dict, optional): a dictionary of application context for the + underlying :class:`LinearVariationalSolver` for the BDF2 step. + Defaults to None. """ self.num_outer_tr = num_outer_tr @@ -111,6 +144,7 @@ def __init__(self, equation_set, io, transport_schemes, spatial_methods, self.gamma = Function(R, val=float(gamma)) self.gamma2 = Function(R, val=((1 - 2*float(gamma))/(2 - 2*float(gamma)))) self.gamma3 = Function(R, val=((1-float(self.gamma2))/(2*float(gamma)))) + self.implicit_terms = [incompressible, sponge] # Options relating to reference profiles self.reference_update_freq = reference_update_freq @@ -121,6 +155,40 @@ def __init__(self, equation_set, io, transport_schemes, spatial_methods, self.implicit_terms = [incompressible, sponge] self.spatial_methods = spatial_methods + # Determine prognostics for solver ------------------------------------- + self.non_solver_prognostics = [] + if solver_prognostics is not None: + assert type(solver_prognostics) is list, ( + "solver_prognostics should be a list of prognostic variable " + + f"names, and not {type(solver_prognostics)}" + ) + for prognostic_name in solver_prognostics: + assert prognostic_name in equation_set.field_names, ( + f"Prognostic variable {prognostic_name} not found in " + + "equation set field names" + ) + for field_name in equation_set.field_names: + if field_name not in solver_prognostics: + self.non_solver_prognostics.append(field_name) + else: + # Remove any active tracers by default + solver_prognostics = [] + if equation_set.active_tracers is not None: + self.non_solver_prognostics = [ + tracer.name for tracer in equation_set.active_tracers + ] + for field_name in equation_set.field_names: + if field_name not in self.non_solver_prognostics: + solver_prognostics.append(field_name) + else: + # No active tracers so prognostics are field names + solver_prognostics = equation_set.field_names.copy() + + logger.info( + 'TR-BDF2 Quasi-Newton: Using solver prognostics ' + + f'{solver_prognostics}' + ) + if physics_schemes is not None: self.final_physics_schemes = physics_schemes else: @@ -189,12 +257,44 @@ def __init__(self, equation_set, io, transport_schemes, spatial_methods, self.dy = Function(W) if tr_solver is None: - self.tr_solver = LinearTimesteppingSolver(equation_set, self.gamma) + if tr_solver_parameters is None: + self.tr_solver_parameters, self.tr_appctx = \ + hybridised_solver_parameters( + equation_set, + solver_prognostics, + alpha=self.gamma, + tau_values=tau_values_tr + ) + else: + self.tr_solver_parameters = tr_solver_parameters + self.tr_appctx = tr_appctx + self.tr_solver = QuasiNewtonLinearSolver( + equation_set, solver_prognostics, self.implicit_terms, + self.gamma, tau_values=tau_values_tr, + solver_parameters=self.tr_solver_parameters, + appctx=self.tr_appctx + ) else: self.tr_solver = tr_solver if bdf_solver is None: - self.bdf_solver = LinearTimesteppingSolver(equation_set, self.gamma2) + if bdf_solver_parameters is None: + self.bdf_solver_parameters, self.bdf_appctx = \ + hybridised_solver_parameters( + equation_set, + solver_prognostics, + alpha=self.gamma2, + tau_values=tau_values_bdf + ) + else: + self.bdf_solver_parameters = bdf_solver_parameters + self.bdf_appctx = bdf_appctx + self.bdf_solver = QuasiNewtonLinearSolver( + equation_set, solver_prognostics, self.implicit_terms, + self.gamma2, tau_values=tau_values_bdf, + solver_parameters=self.bdf_solver_parameters, + appctx=self.bdf_appctx + ) else: self.bdf_solver = bdf_solver @@ -289,7 +389,8 @@ def update_reference_profiles(self): if float(self.t) + self.reference_update_freq > self.last_ref_update_time: self.equation.X_ref.assign(self.x.n(self.field_name)) self.last_ref_update_time = float(self.t) - self.linear_solver.update_reference_profiles() + self.tr_solver.update_reference_profiles() + self.bdf_solver.update_reference_profiles() elif self.to_update_ref_profile: self.tr_solver.update_reference_profiles() diff --git a/integration-tests/balance/test_boussinesq_balance.py b/integration-tests/balance/test_boussinesq_balance.py index 21e55d9e4..53a0b35c4 100644 --- a/integration-tests/balance/test_boussinesq_balance.py +++ b/integration-tests/balance/test_boussinesq_balance.py @@ -46,13 +46,9 @@ def setup_balance(dirname): DGUpwind(eqns, 'p'), DGUpwind(eqns, 'b')] - # Set up linear solver - linear_solver = BoussinesqSolver(eqns) - # build time stepper stepper = SemiImplicitQuasiNewton(eqns, io, transported_fields, - transport_methods, - linear_solver=linear_solver) + transport_methods) # ------------------------------------------------------------------------ # # Initial conditions diff --git a/integration-tests/balance/test_compressible_balance.py b/integration-tests/balance/test_compressible_balance.py index 29edcfa8d..544471a20 100644 --- a/integration-tests/balance/test_compressible_balance.py +++ b/integration-tests/balance/test_compressible_balance.py @@ -46,13 +46,9 @@ def setup_balance(dirname): DGUpwind(eqns, 'rho'), DGUpwind(eqns, 'theta')] - # Set up linear solver - linear_solver = CompressibleSolver(eqns) - # build time stepper stepper = SemiImplicitQuasiNewton(eqns, io, transported_fields, - transport_methods, - linear_solver=linear_solver) + transport_methods) # ------------------------------------------------------------------------ # # Initial conditions diff --git a/integration-tests/balance/test_saturated_balance.py b/integration-tests/balance/test_saturated_balance.py index 42124bcb6..a5f2b63e6 100644 --- a/integration-tests/balance/test_saturated_balance.py +++ b/integration-tests/balance/test_saturated_balance.py @@ -90,17 +90,13 @@ def setup_saturated(dirname, recovered): DGUpwind(eqns, 'water_vapour'), DGUpwind(eqns, 'cloud_water')] - # Linear solver - linear_solver = CompressibleSolver(eqns) - # Physics schemes physics_schemes = [(SaturationAdjustment(eqns), ForwardEuler(domain))] # Time stepper stepper = SemiImplicitQuasiNewton(eqns, io, transported_fields, transport_methods, - linear_solver=linear_solver, - physics_schemes=physics_schemes) + final_physics_schemes=physics_schemes) # ------------------------------------------------------------------------ # # Initial conditions diff --git a/integration-tests/balance/test_unsaturated_balance.py b/integration-tests/balance/test_unsaturated_balance.py index d3a6463f8..5bae4b188 100644 --- a/integration-tests/balance/test_unsaturated_balance.py +++ b/integration-tests/balance/test_unsaturated_balance.py @@ -85,17 +85,13 @@ def setup_unsaturated(dirname, recovered): DGUpwind(eqns, 'water_vapour'), DGUpwind(eqns, 'cloud_water')] - # Linear solver - linear_solver = CompressibleSolver(eqns) - # Set up physics physics_schemes = [(SaturationAdjustment(eqns), ForwardEuler(domain))] # Time stepper stepper = SemiImplicitQuasiNewton(eqns, io, transported_fields, transport_methods, - linear_solver=linear_solver, - physics_schemes=physics_schemes) + final_physics_schemes=physics_schemes) # ------------------------------------------------------------------------ # # Initial conditions diff --git a/integration-tests/conftest.py b/integration-tests/conftest.py index 02b1addfc..7fce74377 100644 --- a/integration-tests/conftest.py +++ b/integration-tests/conftest.py @@ -17,9 +17,13 @@ def tracer_sphere(tmpdir, degree, small_dt): radius = 1 - mesh = IcosahedralSphereMesh(radius=radius, - refinement_level=3, - degree=1) + + dirname = str(tmpdir) + mesh = IcosahedralSphereMesh( + radius=radius, + refinement_level=3, + degree=1 + ) x = SpatialCoordinate(mesh) # Parameters chosen so that dt != 1 @@ -30,7 +34,7 @@ def tracer_sphere(tmpdir, degree, small_dt): else: dt = pi/3. * 0.02 - output = OutputParameters(dirname=str(tmpdir), dumpfreq=15) + output = OutputParameters(dirname=dirname, dumpfreq=15) domain = Domain(mesh, dt, family="BDM", degree=degree) io = IO(domain, output) @@ -49,6 +53,8 @@ def tracer_sphere(tmpdir, degree, small_dt): def tracer_slice(tmpdir, degree, small_dt): n = 30 if degree == 0 else 15 + + dirname = str(tmpdir) m = PeriodicIntervalMesh(n, 1.) mesh = ExtrudedMesh(m, layers=n, layer_height=1./n) @@ -61,7 +67,7 @@ def tracer_slice(tmpdir, degree, small_dt): else: dt = 0.01 tmax = 0.75 - output = OutputParameters(dirname=str(tmpdir), dumpfreq=25) + output = OutputParameters(dirname=dirname, dumpfreq=25) domain = Domain(mesh, dt, family="CG", degree=degree) io = IO(domain, output) @@ -89,10 +95,11 @@ def tracer_blob_slice(tmpdir, degree, small_dt): else: dt = 0.01 L = 10. + dirname = str(tmpdir) m = PeriodicIntervalMesh(10, L) mesh = ExtrudedMesh(m, layers=10, layer_height=1.) - output = OutputParameters(dirname=str(tmpdir), dumpfreq=25) + output = OutputParameters(dirname=dirname, dumpfreq=25) domain = Domain(mesh, dt, family="CG", degree=degree) io = IO(domain, output) diff --git a/integration-tests/data/boussinesq_compressible_chkpt.h5 b/integration-tests/data/boussinesq_compressible_chkpt.h5 index f6bb26545..0ade4b1bc 100644 Binary files a/integration-tests/data/boussinesq_compressible_chkpt.h5 and b/integration-tests/data/boussinesq_compressible_chkpt.h5 differ diff --git a/integration-tests/data/boussinesq_incompressible_chkpt.h5 b/integration-tests/data/boussinesq_incompressible_chkpt.h5 index a1a77ff05..b751ef143 100644 Binary files a/integration-tests/data/boussinesq_incompressible_chkpt.h5 and b/integration-tests/data/boussinesq_incompressible_chkpt.h5 differ diff --git a/integration-tests/data/simult_SIQN_order0_chkpt.h5 b/integration-tests/data/simult_SIQN_order0_chkpt.h5 index ed6c84568..2403a1c7c 100644 Binary files a/integration-tests/data/simult_SIQN_order0_chkpt.h5 and b/integration-tests/data/simult_SIQN_order0_chkpt.h5 differ diff --git a/integration-tests/data/simult_SIQN_order1_chkpt.h5 b/integration-tests/data/simult_SIQN_order1_chkpt.h5 index ddab0c1e1..b7a77761f 100644 Binary files a/integration-tests/data/simult_SIQN_order1_chkpt.h5 and b/integration-tests/data/simult_SIQN_order1_chkpt.h5 differ diff --git a/integration-tests/data/sw_fplane_chkpt.h5 b/integration-tests/data/sw_fplane_chkpt.h5 index 3eb6f4dc8..55a7970db 100644 Binary files a/integration-tests/data/sw_fplane_chkpt.h5 and b/integration-tests/data/sw_fplane_chkpt.h5 differ diff --git a/integration-tests/equations/test_boussinesq.py b/integration-tests/equations/test_boussinesq.py index ed1a4f552..7deb40604 100644 --- a/integration-tests/equations/test_boussinesq.py +++ b/integration-tests/equations/test_boussinesq.py @@ -52,19 +52,17 @@ def run_boussinesq(tmpdir, compressible): transport_methods = [DGUpwind(eqn, "u"), DGUpwind(eqn, "p"), DGUpwind(eqn, "b", ibp=b_opts.ibp)] + solver_prognostics = ['u', 'p', 'b'] else: transported_fields = [TrapeziumRule(domain, "u"), SSPRK3(domain, "b", options=b_opts)] transport_methods = [DGUpwind(eqn, "u"), DGUpwind(eqn, "b", ibp=b_opts.ibp)] - - # Linear solver - linear_solver = BoussinesqSolver(eqn) + solver_prognostics = ['u', 'b'] # Time stepper stepper = SemiImplicitQuasiNewton(eqn, io, transported_fields, - transport_methods, - linear_solver=linear_solver) + transport_methods, solver_prognostics=solver_prognostics) # ------------------------------------------------------------------------ # # Initial conditions diff --git a/integration-tests/equations/test_dry_compressible.py b/integration-tests/equations/test_dry_compressible.py index f1d24c014..aca22f198 100644 --- a/integration-tests/equations/test_dry_compressible.py +++ b/integration-tests/equations/test_dry_compressible.py @@ -46,13 +46,9 @@ def run_dry_compressible(tmpdir): DGUpwind(eqn, 'rho'), DGUpwind(eqn, 'theta')] - # Linear solver - linear_solver = CompressibleSolver(eqn) - # Time stepper stepper = SemiImplicitQuasiNewton(eqn, io, transported_fields, transport_methods, - linear_solver=linear_solver, num_outer=4, num_inner=1) # ------------------------------------------------------------------------ # diff --git a/integration-tests/equations/test_moist_compressible.py b/integration-tests/equations/test_moist_compressible.py index 6b8d937f7..5a9fa39cc 100644 --- a/integration-tests/equations/test_moist_compressible.py +++ b/integration-tests/equations/test_moist_compressible.py @@ -47,13 +47,9 @@ def run_moist_compressible(tmpdir): DGUpwind(eqn, "rho"), DGUpwind(eqn, "theta")] - # Linear solver - linear_solver = CompressibleSolver(eqn) - # Time stepper stepper = SemiImplicitQuasiNewton(eqn, io, transported_fields, transport_methods, - linear_solver=linear_solver, num_outer=4, num_inner=1) # ------------------------------------------------------------------------ # diff --git a/integration-tests/model/test_TR-BDF2.py b/integration-tests/model/test_TR-BDF2.py index 7183e8fdc..167ad55d4 100644 --- a/integration-tests/model/test_TR-BDF2.py +++ b/integration-tests/model/test_TR-BDF2.py @@ -87,18 +87,11 @@ def run_TR_BDF2(tmpdir, order): ["u", "rho", "theta", "water_vapour", "cloud_water"] ] - # Linear solvers + # Timestepper gamma = (1-sqrt(2)/2) - gamma2 = (1 - 2*float(gamma))/(2 - 2*float(gamma)) - - tr_solver = CompressibleSolver(eqns, alpha=gamma) - bdf_solver = CompressibleSolver(eqns, alpha=gamma2) - stepper = TRBDF2QuasiNewton( eqns, io, transported_fields, transport_methods, - gamma=gamma, - tr_solver=tr_solver, - bdf_solver=bdf_solver + gamma=gamma ) # ------------------------------------------------------------------------ # # Initial conditions diff --git a/integration-tests/model/test_checkpointing.py b/integration-tests/model/test_checkpointing.py index 09bb5ff16..301a0b953 100644 --- a/integration-tests/model/test_checkpointing.py +++ b/integration-tests/model/test_checkpointing.py @@ -34,13 +34,9 @@ def set_up_model_objects(mesh, dt, output, stepper_type, ref_update_freq): SSPRK3(domain, "rho"), SSPRK3(domain, "theta")] - # Set up linear solver - linear_solver = CompressibleSolver(eqns) - # build time stepper stepper = SemiImplicitQuasiNewton(eqns, io, transported_fields, transport_methods, - linear_solver=linear_solver, reference_update_freq=ref_update_freq) elif stepper_type == 'multi_level': diff --git a/integration-tests/model/test_sdc.py b/integration-tests/model/test_deferred_correction.py similarity index 68% rename from integration-tests/model/test_sdc.py rename to integration-tests/model/test_deferred_correction.py index fd1dba068..7db9051f1 100644 --- a/integration-tests/model/test_sdc.py +++ b/integration-tests/model/test_deferred_correction.py @@ -1,11 +1,14 @@ """ -This runs a simple transport test on the sphere using the SDC time discretisations to +This runs a simple transport test on the sphere using the DC time discretisations to test whether the errors are within tolerance. The test is run for the following schemes: - IMEX_SDC_Le(1,1) - IMEX SDC with 1 quadrature node of Gauss type (2nd order scheme) - IMEX_SDC_R(2,2) - IMEX SDC with 2 qaudrature nodes of Radau type (3rd order scheme) using LU decomposition for the implicit update - BE_SDC_Lo(3,3) - Implicit SDC with 3 quadrature nodes of Lobatto type (4th order scheme). - FE_SDC_Le(3,5) - Explicit SDC with 3 quadrature nodes of Gauss type (6th order scheme). +- IMEX_RIDC_R(3) - IMEX RIDC with 4 quadrature nodes of equidistant type, reduced stencils (3rd order scheme). +- BE_RIDC(4) - Implicit RIDC with 3 quadrature nodes of equidistant type, full stencils (4th order scheme). +- FE_RIDC(4) - Explicit RIDC with 3 quadrature nodes of equidistant type, full stencils (4th order scheme). """ from firedrake import norm @@ -19,8 +22,9 @@ def run(timestepper, tmax, f_end): @pytest.mark.parametrize( - "scheme", ["IMEX_SDC_Le(1,1)", "IMEX_SDC_R(2,2)", "BE_SDC_Lo(3,3)", "FE_SDC_Le(3,5)"]) -def test_sdc(tmpdir, scheme, tracer_setup): + "scheme", ["IMEX_SDC_Le(1,1)", "IMEX_SDC_R(2,2)", "BE_SDC_Lo(3,3)", "FE_SDC_Le(3,5)", "IMEX_RIDC_R(3)", + "BE_RIDC(4)", "FE_RIDC(4)"]) +def test_dc(tmpdir, scheme, tracer_setup): geometry = "sphere" setup = tracer_setup(tmpdir, geometry) domain = setup.domain @@ -66,11 +70,33 @@ def test_sdc(tmpdir, scheme, tracer_setup): elif scheme == "FE_SDC_Le(3,5)": quad_type = "GAUSS" M = 3 - k = 4 + k = 5 eqn.label_terms(lambda t: not t.has_label(time_derivative), explicit) base_scheme = ForwardEuler(domain) scheme = SDC(base_scheme, domain, M, k, quad_type, node_type, qdelta_imp, qdelta_exp, final_update=True, initial_guess="base") + elif scheme == "IMEX_RIDC_R(3)": + k = 2 + M = k*(k+1)//2 + 1 + eqn = ContinuityEquation(domain, V, "f") + # Split continuity term + eqn = split_continuity_form(eqn) + eqn.label_terms(lambda t: not any(t.has_label(time_derivative, transport)), implicit) + eqn.label_terms(lambda t: t.has_label(transport), explicit) + base_scheme = IMEX_Euler(domain) + scheme = RIDC(base_scheme, domain, M, k, reduced=True) + elif scheme == "BE_RIDC(4)": + k = 3 + M = 3 + eqn.label_terms(lambda t: not t.has_label(time_derivative), implicit) + base_scheme = BackwardEuler(domain) + scheme = RIDC(base_scheme, domain, M, k, reduced=False) + elif scheme == "FE_RIDC(4)": + M = 3 + k = 3 + eqn.label_terms(lambda t: not t.has_label(time_derivative), explicit) + base_scheme = ForwardEuler(domain) + scheme = RIDC(base_scheme, domain, M, k, reduced=False) transport_method = DGUpwind(eqn, 'f') diff --git a/integration-tests/model/test_inner_loop_physics.py b/integration-tests/model/test_inner_loop_physics.py new file mode 100644 index 000000000..8404b4dac --- /dev/null +++ b/integration-tests/model/test_inner_loop_physics.py @@ -0,0 +1,259 @@ +""" +Testing inner loop physics and the MoistThermalSWSolver. +Based on the moist thermal gravity wave test. Takes 3 timesteps with inner +loop physics and compares this to the solution using standard, end-of-timestep +physics and the ThermalSWSolver. +""" + +from gusto import * +from firedrake import (IcosahedralSphereMesh, SpatialCoordinate, pi, sqrt, + min_value, cos, sin, Constant, exp, Function, dx) +import numpy as np + + +def set_up_model_objects(mesh, dt, q0, beta2, nu, physics_type, output): + + # ----------------------------------------------------------------- # + # Test case parameters + # ----------------------------------------------------------------- # + + physics_beta = 0.5 + H = 5960. + + # moist shallow water parameters + gamma_v = 1 + + # Domain + domain = Domain(mesh, dt, "BDM", degree=1) + + # Equation parameters + parameters = ShallowWaterParameters(mesh, H=H, nu=nu, + beta2=beta2, q0=q0) + + # Equation + tracers = [WaterVapour(space='DG'), CloudWater(space='DG')] + if physics_type == "final": + eqns = ThermalShallowWaterEquations(domain, parameters, + active_tracers=tracers) + elif physics_type == "inner_loop": + eqns = ThermalShallowWaterEquations(domain, parameters, + active_tracers=tracers) + + _, Dbar, bbar, qvbar, _ = eqns.X_ref.subfunctions[::] + _, D, b, qv, _ = eqns.X.subfunctions[::] + _, _, lamda, tau1, tau2 = eqns.tests[::] + + # Get parameters from equation + q0 = eqns.parameters.q0 + nu = eqns.parameters.nu + beta2 = eqns.parameters.beta2 + g = eqns.parameters.g + H = eqns.parameters.H + b_ebar = bbar - beta2*qvbar + sat_expr = q0*H/(Dbar) * exp(nu*(1 - b_ebar/g)) + P_expr = qv - sat_expr*(-D/Dbar - b*nu/g + qv*nu*beta2/g) + phys_b_form = physics_beta * lamda * beta2 * P_expr * dx + phys_qv_form = physics_beta * tau1 * P_expr * dx + phys_qc_form = -physics_beta * tau2 * P_expr * dx + eqns.residual += ( + subject(prognostic(physics_label(phys_b_form), 'b'), eqns.X) + ) + eqns.residual += ( + subject(prognostic(physics_label(phys_qv_form), 'water_vapour'), eqns.X) + ) + eqns.residual += ( + subject(prognostic(physics_label(phys_qc_form), 'cloud_water'), eqns.X) + ) + + io = IO(domain, output) + + # Limiters + DG1limiter = DG1Limiter(domain.spaces('DG')) + zerolimiter = ZeroLimiter(domain.spaces('DG')) + + physics_sublimiters = {'water_vapour': zerolimiter, + 'cloud_water': zerolimiter} + + physics_limiter = MixedFSLimiter(eqns, physics_sublimiters) + + transport_methods = [DGUpwind(eqns, field_name) for field_name in eqns.field_names] + + transported_fields = [TrapeziumRule(domain, "u"), + SSPRK3(domain, "D"), + SSPRK3(domain, "b", limiter=DG1limiter), + SSPRK3(domain, "water_vapour", limiter=DG1limiter), + SSPRK3(domain, "cloud_water", limiter=DG1limiter)] + + # Physics + def phys_sat_func(x_in): + D = x_in.subfunctions[1] + b = x_in.subfunctions[2] + q_v = x_in.subfunctions[3] + b_e = Function(b.function_space()).interpolate(b - beta2*q_v) + return (q0*H/D) * exp(nu*(1 - b_e/parameters.g)) + + # Physics schemes + sat_adj = SWSaturationAdjustment(eqns, phys_sat_func, + time_varying_saturation=True, + parameters=parameters, + thermal_feedback=True, + beta2=beta2, gamma_v=gamma_v) + + physics_schemes = [(sat_adj, ForwardEuler(domain, limiter=physics_limiter))] + + # build timestepper + if physics_type == "final": + stepper = SemiImplicitQuasiNewton(eqns, io, transported_fields, + transport_methods, + final_physics_schemes=physics_schemes, + num_outer=2, num_inner=2) + elif physics_type == "inner_loop": + solver_parameters = monolithic_solver_parameters() + stepper = SemiImplicitQuasiNewton(eqns, io, transported_fields, + transport_methods, + inner_physics_schemes=physics_schemes, + num_outer=2, num_inner=2, + solver_prognostics=eqns.field_names, + linear_solver_parameters=solver_parameters) + + return stepper, eqns + + +def initialise_fields(eqns, stepper): + + params = eqns.parameters + u_max = 20. + R = 6371220. + + # perturbation parameters + R0 = pi/9. + R0sq = R0**2 + lamda_c = -pi/2. + phi_c = pi/6. + # parameters for initial buoyancy + phi_0 = 3e4 + epsilon = 1/300 + theta_0 = epsilon*phi_0**2 + + # Perturbation + x = SpatialCoordinate(eqns.domain.mesh) + lamda, phi, _ = lonlatr_from_xyz(x[0], x[1], x[2]) + lsq = (lamda - lamda_c)**2 + thsq = (phi - phi_c)**2 + rsq = min_value(R0sq, lsq+thsq) + r = sqrt(rsq) + pert = 2000.0 * (1 - r/R0) + + # saturation function (depending on b_e) + def sat_func(D, b_e): + return (params.q0*params.H/D) * exp(params.nu*(1 - b_e/params.g)) + + u0 = stepper.fields("u") + D0 = stepper.fields("D") + b0 = stepper.fields("b") + v0 = stepper.fields("water_vapour") + c0 = stepper.fields("cloud_water") + + # velocity + uexpr = xyz_vector_from_lonlatr(u_max*cos(phi), 0, 0, x) + + # buoyancy + Omega = params.Omega + g = params.g + w = Omega*R*u_max + (u_max**2)/2 + sigma = w/10 + numerator = theta_0 + sigma*((cos(phi))**2) * ((w + sigma)*(cos(phi))**2 + 2*(phi_0 - w - sigma)) + denominator = phi_0**2 + (w + sigma)**2*(sin(phi))**4 - 2*phi_0*(w + sigma)*(sin(phi))**2 + theta = numerator/denominator + b_guess = params.g * (1 - theta) + + # depth + Dexpr = params.H - (1/g)*(w + sigma)*((sin(phi))**2) + pert + + # iterate to find initial b_e_expr, from which the vapour and saturation + # function are recovered + q_t = 0.03 + + def iterate(): + n_iterations = 10 + D_init = Function(D0.function_space()).interpolate(Dexpr) + b_init = Function(b0.function_space()).interpolate(b_guess) + b_e_init = Function(b0.function_space()).interpolate(b_init - params.beta2*q_t) + q_v_init = Function(v0.function_space()).interpolate(q_t) + for i in range(n_iterations): + q_sat_expr = sat_func(D_init, b_e_init) + dq_sat_dq_v_expr = params.nu*params.beta2/g*q_sat_expr + q_v_init.interpolate(q_v_init - (q_sat_expr - q_v_init)/(dq_sat_dq_v_expr - 1.0)) + b_e_init.interpolate(b_init - params.beta2*q_v_init) + return b_e_init + + b_e = iterate() + + initial_sat = sat_func(Dexpr, b_e) + + vexpr = initial_sat + + # back out the initial buoyancy using b_e and q_v + bexpr = b_e + params.beta2*vexpr + + # cloud is the rest of q_t that isn't vapour + cexpr = Constant(q_t) - vexpr + + u0.project(uexpr) + D0.interpolate(Dexpr) + b0.interpolate(bexpr) + v0.interpolate(vexpr) + c0.interpolate(cexpr) + + # Set reference profiles + Dbar = Function(D0.function_space()).assign(params.H) + bbar = Function(b0.function_space()).interpolate(bexpr) + vbar = Function(v0.function_space()).interpolate(vexpr) + cbar = Function(c0.function_space()).interpolate(cexpr) + stepper.set_reference_profiles([('D', Dbar), ('b', bbar), + ('water_vapour', vbar), ('cloud_water', cbar)]) + + +def test_inner_loop_physics(tmpdir): + + # Parameters + q0 = 0.0115 + beta2 = 9.80616*10 + nu = 1.5 + + # Set up mesh + R = 6371220. + ref = 3 + mesh = IcosahedralSphereMesh(radius=R, refinement_level=ref, degree=2) + + # Set up timestepping parameters + dt = 600 + + dirname_1 = str(tmpdir)+'/standard_physics' + dirname_2 = str(tmpdir)+'/inner_loop_physics' + + # Set up equations and timesteppers + output1 = OutputParameters(dirname=dirname_1, + dump_vtus=True, + checkpoint=True) + + output2 = OutputParameters(dirname=dirname_2, + dump_vtus=True, + checkpoint=True) + + stepper1, eqns1 = set_up_model_objects(mesh, dt, q0, beta2, nu, physics_type="final", output=output1) + stepper2, eqns2 = set_up_model_objects(mesh, dt, q0, beta2, nu, physics_type="inner_loop", output=output2) + + # Initialise + initialise_fields(eqns1, stepper1) + initialise_fields(eqns2, stepper2) + + # Run + stepper1.run(t=0.0, tmax=3*dt) + stepper2.run(t=0.0, tmax=3*dt) + + for field_name in ['u', 'D', 'b', 'water_vapour', 'cloud_water']: + diff_array = stepper1.fields(field_name).dat.data - stepper2.fields(field_name).dat.data + error = np.linalg.norm(diff_array) / np.linalg.norm(stepper1.fields(field_name).dat.data) + assert error < 0.0015, \ + f'Field {field_name} is not the same when using inner loop physics as final physics' diff --git a/integration-tests/model/test_mean_mixing_ratio.py b/integration-tests/model/test_mean_mixing_ratio.py new file mode 100644 index 000000000..842913439 --- /dev/null +++ b/integration-tests/model/test_mean_mixing_ratio.py @@ -0,0 +1,156 @@ +""" +Tests the mean mixing ratio augmentation, which is used for +non-negativity limiting in a conservative transport scheme. +A few timesteps are taken in the terminator toy test, with +non-negativity and mass conservation checked. + +""" + +from gusto import * +from firedrake import ( + exp, cos, sin, SpatialCoordinate, + pi, max_value, assemble, dx +) + + +def setup_mean_mixing_ratio(dirname): + dt = 450. + tau = 12.*24.*60.*60. # time period of reversible wind, in s + radius = 6371220. # radius of the sphere, in m + theta_cr = pi/9. # central latitude of first reaction rate, in rad + lamda_cr = -pi/3. # central longitude of first reaction rate, in rad + theta_c1 = 0. # central latitude of first chemical blob, in rad + theta_c2 = 0. # central latitude of second chemical blob, in rad + lamda_c1 = -pi/4. # central longitude of first chemical blob, in rad + lamda_c2 = pi/4. # central longitude of second chemical blob, in rad + rho_b = 1 # Base dry density + g_max = 0.5 # Maximum amplitude of Gaussian density perturbations + b0 = 5 # Controls the width of the chemical blobs + + mesh = GeneralCubedSphereMesh(radius, 12, degree=2) + xyz = SpatialCoordinate(mesh) + + # Only use order 1 elements + domain = Domain(mesh, dt, 'RTCF', 1) + + # get lat lon coordinates + lamda, theta, _ = lonlatr_from_xyz(xyz[0], xyz[1], xyz[2]) + + # Define co-located tracers of the dry density and the two species + rho_d = ActiveTracer(name='rho_d', space='DG', + variable_type=TracerVariableType.density, + transport_eqn=TransportEquationType.conservative) + + X_tracer = ActiveTracer(name='X_tracer', space='DG', + variable_type=TracerVariableType.mixing_ratio, + transport_eqn=TransportEquationType.tracer_conservative, + density_name='rho_d') + + X2_tracer = ActiveTracer(name='X2_tracer', space='DG', + variable_type=TracerVariableType.mixing_ratio, + transport_eqn=TransportEquationType.tracer_conservative, + density_name='rho_d') + + # Define the mixing ratios first to test that the tracers will be + # automatically re-ordered such that the density field + # is indexed before the mixing ratio. + tracers = [X_tracer, X2_tracer, rho_d] + + # Equation + V = domain.spaces("HDiv") + eqn = CoupledTransportEquation(domain, active_tracers=tracers, Vu=V) + + output = OutputParameters(dirname=dirname) + io = IO(domain, output) + + k1 = max_value(0, sin(theta)*sin(theta_cr) + cos(theta)*cos(theta_cr)*cos(lamda-lamda_cr)) + k2 = 1 + + mixed_phys_limiter = MixedFSLimiter( + eqn, + {'rho_d': ZeroLimiter(domain.spaces('DG')), + 'X_tracer': ZeroLimiter(domain.spaces('DG')), + 'X2_tracer': ZeroLimiter(domain.spaces('DG'))} + ) + + # Using the analytical forcing from Appendix D of Lauritzen et. al. + physics_schemes = [(TerminatorToy(eqn, k1=k1, k2=k2, species1_name='X_tracer', + species2_name='X2_tracer', analytical_formulation=True), + ForwardEuler(domain, limiter=mixed_phys_limiter))] + + X, Y, Z = xyz + X1, Y1, Z1 = xyz_from_lonlatr(lamda_c1, theta_c1, radius) + X2, Y2, Z2 = xyz_from_lonlatr(lamda_c2, theta_c2, radius) + + g1 = g_max*exp(-(b0/(radius**2))*((X-X1)**2 + (Y-Y1)**2 + (Z-Z1)**2)) + g2 = g_max*exp(-(b0/(radius**2))*((X-X2)**2 + (Y-Y2)**2 + (Z-Z2)**2)) + + rho_expr = rho_b + g1 + g2 + + X_T_0 = 4e-6 + r = k1/(4*k2) + D_val = sqrt(r**2 + 2*X_T_0*r) + + # Initial condition for each species + X_0 = D_val - r + X2_0 = 0.5*(X_T_0 - D_val + r) + + def u_t(t): + k = 10*radius/tau + + u_zonal = ( + k*(sin(lamda - 2*pi*t/tau)**2)*sin(2*theta)*cos(pi*t/tau) + + ((2*pi*radius)/tau)*cos(theta) + ) + u_merid = k*sin(2*(lamda - 2*pi*t/tau))*cos(theta)*cos(pi*t/tau) + + return xyz_vector_from_lonlatr(u_zonal, u_merid, Constant(0.0), xyz) + + augmentation = MeanMixingRatio(domain, eqn, ['X_tracer', 'X2_tracer']) + transport_scheme = SSPRK3(domain, augmentation=augmentation, rk_formulation=RungeKuttaFormulation.predictor) + transport_method = [DGUpwind(eqn, 'rho_d'), DGUpwind(eqn, 'X_tracer'), DGUpwind(eqn, 'X2_tracer')] + + time_varying_velocity = True + stepper = SplitPrescribedTransport(eqn, transport_scheme, io, + time_varying_velocity, + spatial_methods=transport_method, + physics_schemes=physics_schemes) + + stepper.setup_prescribed_expr(u_t) + + # Initial conditions + stepper.fields("rho_d").interpolate(rho_expr) + stepper.fields("X_tracer").interpolate(X_0) + stepper.fields("X2_tracer").interpolate(X2_0) + + X_sum = assemble(stepper.fields("rho_d")*stepper.fields("X_tracer")*dx) + X2_sum = assemble(stepper.fields("rho_d")*stepper.fields("X2_tracer")*dx) + XT_init = X_sum + 2*X2_sum + + return stepper, XT_init + + +def test_mean_mixing_ratio(tmpdir): + + # Setup and run + dirname = str(tmpdir) + + stepper, XT_init = setup_mean_mixing_ratio(dirname) + + # Run for four timesteps + stepper.run(t=0, tmax=1800.) + rho_d = stepper.fields("rho_d") + X_tracer = stepper.fields("X_tracer") + X2_tracer = stepper.fields("X2_tracer") + + X_sum = assemble(rho_d*X_tracer*dx) + X2_sum = assemble(rho_d*X2_tracer*dx) + XT_sum = X_sum + 2*X2_sum + td_err = np.abs(XT_init - XT_sum)/XT_init + + # Check that all the fields are non-negative + assert all(X_tracer.dat.data >= 0.0) and all(X2_tracer.dat.data >= 0.0), \ + "mean mixing ratio field has not ensured non-negativity" + + # Confirm mass conservation to a certain tolerance + assert td_err < 1e-14, "mean mixing ratio field has not ensured mass conservation" diff --git a/integration-tests/model/test_nc_outputting.py b/integration-tests/model/test_nc_outputting.py index 55d63264d..8905de324 100644 --- a/integration-tests/model/test_nc_outputting.py +++ b/integration-tests/model/test_nc_outputting.py @@ -12,9 +12,10 @@ ZComponent, MeridionalComponent, ZonalComponent, RadialComponent, DGUpwind) from mpi4py import MPI -from netCDF4 import Dataset, chartostring +from netCDF4 import Dataset import pytest from pytest_mpi import parallel_assert +import numpy as np def make_dirname(test_name, suffix=""): @@ -169,6 +170,17 @@ def assertion(): return output_data[metadata_key][0] - output_value < 1e-14 else: def assertion(): - return str(chartostring(output_data[metadata_key][0])) == output_value + var = output_data[metadata_key] + row = var[0, :] if getattr(var, 'ndim', None) == 2 else var[:] + arr = np.array(row) + + if arr.dtype.kind == 'S': # bytes-like chars + decoded = b''.join(arr.tolist()).decode('utf-8').rstrip('\x00') + elif arr.dtype.kind in ('U', 'O'): # already text + decoded = ''.join(arr.tolist()).rstrip('\x00') + else: # fallback + decoded = b''.join(arr.view('S1').tolist()).decode('utf-8').rstrip('\x00') + + return decoded == output_value parallel_assert(assertion, participating=output_data is not None, msg=error_message) diff --git a/integration-tests/model/test_parallel_dc.py b/integration-tests/model/test_parallel_dc.py new file mode 100644 index 000000000..f1b3fe585 --- /dev/null +++ b/integration-tests/model/test_parallel_dc.py @@ -0,0 +1,111 @@ +""" +This runs a simple transport test on the sphere using the parallel DC time discretisations to +test whether the errors are within tolerance. The test is run for the following schemes: +- IMEX_SDC(2,2) - IMEX SDC with 2 qaudrature nodes of Radau type and 2 correction sweeps (2nd order scheme) +- IMEX_RIDC(2,1) - IMEX RIDC with 3 quadrature nodes of equidistant type, 1 correction sweep, reduced stencils (2nd order scheme). + Has a pipeline flush frequency of 1 (every timestep). +- IMEX_RIDC(2,5) - IMEX RIDC with 3 quadrature nodes of equidistant type, 1 correction sweep, reduced stencils (2nd order scheme). + Has a pipeline flush frequency of 5 (every 5 timesteps). +""" + +from firedrake import (norm, Ensemble, COMM_WORLD, SpatialCoordinate, + as_vector, pi, exp, IcosahedralSphereMesh) + +from gusto import * +import pytest +from pytest_mpi.parallel_assert import parallel_assert + + +def run(timestepper, tmax, f_end): + timestepper.run(0, tmax) + print(norm(timestepper.fields("f") - f_end) / norm(f_end)) + return norm(timestepper.fields("f") - f_end) / norm(f_end) + + +@pytest.mark.parallel(nprocs=[2]) +@pytest.mark.parametrize( + "scheme", ["IMEX_RIDC(2,1)", "IMEX_RIDC(2,5)", "IMEX_SDC(2,2)"]) +def test_parallel_dc(tmpdir, scheme): + + if scheme == "IMEX_SDC(2,2)": + M = 2 + k = 2 + ensemble = Ensemble(COMM_WORLD, COMM_WORLD.size//(M)) + elif scheme == "IMEX_RIDC(2,1)" or scheme == "IMEX_RIDC(2,5)": + k = 1 + ensemble = Ensemble(COMM_WORLD, COMM_WORLD.size//(k+1)) + + # Get the tracer setup + radius = 1 + dirname = str(tmpdir) + mesh = IcosahedralSphereMesh( + radius=radius, + refinement_level=3, + degree=1, + comm=ensemble.comm + ) + x = SpatialCoordinate(mesh) + + # Parameters chosen so that dt != 1 + # Gaussian is translated from (lon=pi/2, lat=0) to (lon=0, lat=0) + # to demonstrate that transport is working correctly + + dt = pi/3. * 0.02 + dumpfreq = 15 + output = OutputParameters(dirname=dirname, dump_vtus=False, dump_nc=True, dumpfreq=dumpfreq) + domain = Domain(mesh, dt, family="BDM", degree=1) + io = IO(domain, output) + + umax = 1.0 + uexpr = as_vector([- umax * x[1] / radius, umax * x[0] / radius, 0.0]) + + tmax = pi/2 + f_init = exp(-x[2]**2 - x[0]**2) + f_end = exp(-x[2]**2 - x[1]**2) + + tol = 0.05 + + domain = domain + V = domain.spaces("DG") + eqn = ContinuityEquation(domain, V, "f") + + if scheme == "IMEX_SDC(2,2)": + eqn.label_terms(lambda t: not t.has_label(time_derivative), implicit) + + quad_type = "RADAU-RIGHT" + node_type = "LEGENDRE" + qdelta_imp = "MIN-SR-FLEX" + qdelta_exp = "MIN-SR-NS" + base_scheme = IMEX_Euler(domain) + time_scheme = Parallel_SDC(base_scheme, domain, M, k, quad_type, node_type, qdelta_imp, + qdelta_exp, final_update=True, initial_guess="base", communicator=ensemble) + elif scheme == "IMEX_RIDC(2,1)": + eqn = split_continuity_form(eqn) + eqn.label_terms(lambda t: not any(t.has_label(time_derivative, transport)), implicit) + eqn.label_terms(lambda t: t.has_label(transport), explicit) + + M = 5 + J = int(tmax/dt) + base_scheme = IMEX_Euler(domain) + time_scheme = Parallel_RIDC(base_scheme, domain, M, k, J, output_freq=dumpfreq, flush_freq=1, communicator=ensemble) + elif scheme == "IMEX_RIDC(2,5)": + eqn = split_continuity_form(eqn) + eqn.label_terms(lambda t: not any(t.has_label(time_derivative, transport)), implicit) + eqn.label_terms(lambda t: t.has_label(transport), explicit) + + M = 5 + J = int(tmax/dt) + base_scheme = IMEX_Euler(domain) + time_scheme = Parallel_RIDC(base_scheme, domain, M, k, J, output_freq=dumpfreq, flush_freq=5, communicator=ensemble) + + transport_method = DGUpwind(eqn, 'f') + + time_varying_velocity = False + timestepper = PrescribedTransport( + eqn, time_scheme, io, time_varying_velocity, transport_method + ) + + timestepper.fields("f").interpolate(f_init) + timestepper.fields("u").project(uexpr) + error = run(timestepper, tmax, f_end) + parallel_assert(error < tol, f"Error too large, Error: {error}, tol: {tol}") diff --git a/integration-tests/model/test_simultaneous_SIQN.py b/integration-tests/model/test_simultaneous_SIQN.py index ddc471997..1afb1467f 100644 --- a/integration-tests/model/test_simultaneous_SIQN.py +++ b/integration-tests/model/test_simultaneous_SIQN.py @@ -125,16 +125,13 @@ def run_simult_SIQN(tmpdir, order): ["u", "rho", "theta", "water_vapour", "cloud_water"] ] - # Linear solver - linear_solver = CompressibleSolver(eqns) - # Physics schemes (condensation/evaporation) physics_schemes = [(SaturationAdjustment(eqns), ForwardEuler(domain))] # Time stepper stepper = SemiImplicitQuasiNewton( eqns, io, transported_fields, transport_methods, - linear_solver=linear_solver, physics_schemes=physics_schemes + final_physics_schemes=physics_schemes ) # ------------------------------------------------------------------------ # diff --git a/integration-tests/physics/test_terminator_toy.py b/integration-tests/physics/test_terminator_toy.py index 2ff6e5445..ff10404af 100644 --- a/integration-tests/physics/test_terminator_toy.py +++ b/integration-tests/physics/test_terminator_toy.py @@ -68,6 +68,18 @@ def run_terminator_toy(dirname, physics_coupling): eqn, transport_scheme, io, time_varying_velocity, spatial_methods=transport_method, physics_schemes=physics_schemes ) + elif physics_coupling == "analytic": + physics_schemes = [(TerminatorToy(eqn, k1=k1, k2=k2, species1_name='Y', + species2_name='Y2', analytical_formulation=True), + ForwardEuler(domain))] + + transport_scheme = SSPRK3(domain) + time_varying_velocity = True + stepper = SplitPrescribedTransport( + eqn, transport_scheme, io, time_varying_velocity, + spatial_methods=transport_method, physics_schemes=physics_schemes + ) + else: physics_parametrisation = [TerminatorToy(eqn, k1=k1, k2=k2, species1_name='Y', species2_name='Y2')] @@ -108,7 +120,7 @@ def u_t(t): return stepper, Y_steady, Y2_steady -@pytest.mark.parametrize("physics_coupling", ["split", "nonsplit"]) +@pytest.mark.parametrize("physics_coupling", ["split", "nonsplit", "analytic"]) def test_terminator_toy_setup(tmpdir, physics_coupling): dirname = str(tmpdir) stepper, Y_steady, Y2_steady = run_terminator_toy(dirname, physics_coupling) diff --git a/integration-tests/transport/test_advective_then_flux.py b/integration-tests/transport/test_advective_then_flux.py index d45bf5d7a..2d4216bd2 100644 --- a/integration-tests/transport/test_advective_then_flux.py +++ b/integration-tests/transport/test_advective_then_flux.py @@ -7,7 +7,7 @@ from gusto import * from firedrake import ( PeriodicRectangleMesh, cos, sin, SpatialCoordinate, - assemble, dx, pi, as_vector, errornorm, Function, div + assemble, dx, pi, as_vector, errornorm, Function, div, norm ) import pytest @@ -101,11 +101,11 @@ def test_advective_then_flux(tmpdir, desirable_property): rho = stepper.fields("rho") # Check for divergence-linearity/constancy - assert errornorm(rho, rho_true) < 1e-11, \ + assert errornorm(rho, rho_true) / norm(rho_true) < 1e-14, \ "advective-then-flux form is not yielding the correct answer" # Check for conservation mass_initial = assemble(rho_true*dx) mass_final = assemble(rho*dx) - assert abs(mass_final - mass_initial) < 1e-14, \ + assert abs(mass_final - mass_initial) / mass_initial < 1e-14, \ "advective-then-flux form is not conservative" diff --git a/integration-tests/transport/test_zero_limiter.py b/integration-tests/transport/test_zero_limiter.py index 16691657b..bcf5282d0 100644 --- a/integration-tests/transport/test_zero_limiter.py +++ b/integration-tests/transport/test_zero_limiter.py @@ -78,8 +78,6 @@ def gamma_v(x_in): transport_methods = [DGUpwind(eqns, field_name) for field_name in eqns.field_names] - linear_solver = ThermalSWSolver(eqns) - zerolimiter = ZeroLimiter(domain.spaces('DG')) DG1limiter = DG1Limiter(domain.spaces('DG')) @@ -111,8 +109,7 @@ def gamma_v(x_in): ] stepper = SemiImplicitQuasiNewton(eqns, io, transported_fields, transport_methods, - linear_solver=linear_solver, - physics_schemes=physics_schemes) + final_physics_schemes=physics_schemes) # ------------------------------------------------------------------------ # # Initial conditions diff --git a/plotting/shallow_water/plot_moist_thermal_gw.py b/plotting/shallow_water/plot_moist_thermal_gw.py new file mode 100644 index 000000000..607349437 --- /dev/null +++ b/plotting/shallow_water/plot_moist_thermal_gw.py @@ -0,0 +1,202 @@ +""" +Plots the moist thermal gravity wave test case. +""" +from os.path import abspath, dirname +import matplotlib.pyplot as plt +import numpy as np +from netCDF4 import Dataset +from tomplot import ( + set_tomplot_style, tomplot_cmap, plot_contoured_field, + add_colorbar_ax, plot_field_quivers, tomplot_field_title, + extract_gusto_coords, extract_gusto_field, regrid_horizontal_slice +) + +# ---------------------------------------------------------------------------- # +# Directory for results and plots +# ---------------------------------------------------------------------------- # +# When copying this example these paths need editing, which will usually involve +# removing the abspath part to set directory paths relative to this file +results_file_name = f'{abspath(dirname(__file__))}/../../results/moist_thermal_gw/field_output.nc' +plot_stem = f'{abspath(dirname(__file__))}/../../figures/shallow_water/moist_thermal_gw' +beta2 = 9.80616*10 + +# ---------------------------------------------------------------------------- # +# Initial plot details +# ---------------------------------------------------------------------------- # +init_field_names = ['u', 'D', 'cloud_water', 'b_e'] +init_colour_schemes = ['Oranges', 'YlGnBu', 'cividis', 'PuRd_r'] +init_field_labels = [r'$|u|$ (m s$^{-1}$)', r'$D$ (m)', + r'$q_{cl}$ (kg kg$^{-1})$', r'$b_e$ (m s$^{-2}$)'] +init_contours_to_remove = [None, None, None, None] +init_contours = [np.linspace(0.0, 20.0, 9), + np.linspace(1800.0, 4800.0, 15), + np.linspace(0.002, 0.02, 10), + np.linspace(7.0, 8.5, 16)] + +# ---------------------------------------------------------------------------- # +# Final plot details +# ---------------------------------------------------------------------------- # +final_field_names = ['D', 'cloud_water', 'b_e'] +final_colour_schemes = ['YlGnBu', 'cividis', 'PuRd_r'] +final_field_labels = [r'$D$ (m)', r'$q_{cl}$ (kg kg$^{-1}$)', r'$b_e$ (m s$^{-2}$)'] +final_contours_to_remove = [None, None, None] +final_contours = [ + np.linspace(1800.0, 4800.0, 15), + np.linspace(0.002, 0.02, 10), + np.linspace(7.0, 8.5, 16) +] + +# ---------------------------------------------------------------------------- # +# General options +# ---------------------------------------------------------------------------- # +contour_method = 'tricontour' +xlims = [-180, 180] +ylims = [-90, 90] +minmax_format = { + 'cloud_water': '.1e', + 'b_e': '.2f', + 'u': '.1f', + 'D': '.0f' +} + +# Things that are likely the same for all plots -------------------------------- +set_tomplot_style() +data_file = Dataset(results_file_name, 'r') + +# ---------------------------------------------------------------------------- # +# INITIAL PLOTTING +# ---------------------------------------------------------------------------- # +fig, axarray = plt.subplots(2, 2, figsize=(16, 12), sharex='all', sharey='all') +time_idx = 0 + +for i, (ax, field_name, colour_scheme, field_label, contour_to_remove, contours) in \ + enumerate(zip( + axarray.flatten(), init_field_names, init_colour_schemes, + init_field_labels, init_contours_to_remove, init_contours)): + + # Data extraction ---------------------------------------------------------- + if field_name == 'u': + zonal_data = extract_gusto_field(data_file, 'u_zonal', time_idx=time_idx) + meridional_data = extract_gusto_field(data_file, 'u_meridional', time_idx=time_idx) + field_data = np.sqrt(zonal_data**2 + meridional_data**2) + coords_X, coords_Y = extract_gusto_coords(data_file, 'u_zonal') + + elif field_name == 'b_e' and 'b_e' not in data_file.variables.keys(): + b_data = extract_gusto_field(data_file, 'b', time_idx=time_idx) + qv_data = extract_gusto_field(data_file, 'water_vapour', time_idx=time_idx) + coords_X, coords_Y = extract_gusto_coords(data_file, 'b') + field_data = b_data - beta2*qv_data + + else: + field_data = extract_gusto_field(data_file, field_name, time_idx=time_idx) + coords_X, coords_Y = extract_gusto_coords(data_file, field_name) + time = data_file['time'][time_idx] / (24.*60.*60.) + + # Plot data ---------------------------------------------------------------- + cmap, lines = tomplot_cmap(contours, colour_scheme, remove_contour=contour_to_remove) + cf, _ = plot_contoured_field( + ax, coords_X, coords_Y, field_data, contour_method, contours, + cmap=cmap, line_contours=lines + ) + + add_colorbar_ax(ax, cf, field_label, location='bottom', cbar_labelpad=-10) + tomplot_field_title( + ax, None, minmax=True, field_data=field_data, + minmax_format=minmax_format[field_name] + ) + + # Add quivers -------------------------------------------------------------- + if field_name == 'u': + # Need to re-grid to lat-lon grid to get sensible looking quivers + lon_1d = np.linspace(-180.0, 180.0, 91) + lat_1d = np.linspace(-90.0, 90.0, 81) + lon_2d, lat_2d = np.meshgrid(lon_1d, lat_1d, indexing='ij') + regrid_zonal_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, zonal_data, + periodic_fix='sphere' + ) + regrid_meridional_data = regrid_horizontal_slice( + lon_2d, lat_2d, coords_X, coords_Y, meridional_data, + periodic_fix='sphere' + ) + plot_field_quivers( + ax, lon_2d, lat_2d, regrid_zonal_data, regrid_meridional_data, + spatial_filter_step=6, magnitude_filter=1.0, + ) + + # Labels ------------------------------------------------------------------- + if i in [0, 2]: + ax.set_ylabel(r'$\vartheta$ (deg)', labelpad=-20) + ax.set_ylim(ylims) + ax.set_yticks(ylims) + ax.set_yticklabels(ylims) + + if i in [2, 3]: + ax.set_xlabel(r'$\lambda$ (deg)', labelpad=-10) + ax.set_xlim(xlims) + ax.set_xticks(xlims) + ax.set_xticklabels(xlims) + +# Save figure ------------------------------------------------------------------ +fig.subplots_adjust(wspace=0.25) +plt.suptitle(f't = {time:.1f} days') +plot_name = f'{plot_stem}_initial.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() + +# ---------------------------------------------------------------------------- # +# FINAL PLOTTING +# ---------------------------------------------------------------------------- # +fig, axarray = plt.subplots(1, 3, figsize=(21, 6), sharex='all', sharey='all') +time_idx = -1 + +for i, (ax, field_name, colour_scheme, field_label, contour_to_remove, contours) in \ + enumerate(zip( + axarray, final_field_names, final_colour_schemes, + final_field_labels, final_contours_to_remove, final_contours)): + + # Data extraction ---------------------------------------------------------- + if field_name == 'b_e' and 'b_e' not in data_file.variables.keys(): + b_data = extract_gusto_field(data_file, 'b', time_idx=time_idx) + qv_data = extract_gusto_field(data_file, 'water_vapour', time_idx=time_idx) + coords_X, coords_Y = extract_gusto_coords(data_file, 'b') + field_data = b_data - beta2*qv_data + + else: + field_data = extract_gusto_field(data_file, field_name, time_idx=time_idx) + coords_X, coords_Y = extract_gusto_coords(data_file, field_name) + + time = data_file['time'][time_idx] / (24.*60.*60.) + + # Plot data ---------------------------------------------------------------- + cmap, lines = tomplot_cmap(contours, colour_scheme, remove_contour=contour_to_remove) + cf, _ = plot_contoured_field( + ax, coords_X, coords_Y, field_data, contour_method, contours, + cmap=cmap, line_contours=lines + ) + + add_colorbar_ax(ax, cf, field_label, location='bottom', cbar_labelpad=-10) + tomplot_field_title( + ax, None, minmax=True, field_data=field_data, + minmax_format=minmax_format[field_name] + ) + + # Labels ------------------------------------------------------------------- + if i == 0: + ax.set_ylabel(r'$\vartheta$ (deg)', labelpad=-20) + ax.set_ylim(ylims) + ax.set_yticks(ylims) + ax.set_yticklabels(ylims) + + ax.set_xlabel(r'$\lambda$ (deg)', labelpad=-10) + ax.set_xlim(xlims) + ax.set_xticks(xlims) + ax.set_xticklabels(xlims) + +# Save figure ------------------------------------------------------------------ +plt.suptitle(f't = {time:.1f} days') +plot_name = f'{plot_stem}_final.png' +print(f'Saving figure to {plot_name}') +fig.savefig(plot_name, bbox_inches='tight') +plt.close() diff --git a/plotting/shallow_water/plot_moist_thermal_equivb_gravity_wave.py b/plotting/shallow_water/plot_moist_thermal_gw_equivb.py similarity index 90% rename from plotting/shallow_water/plot_moist_thermal_equivb_gravity_wave.py rename to plotting/shallow_water/plot_moist_thermal_gw_equivb.py index b771cc89b..cb65e1866 100644 --- a/plotting/shallow_water/plot_moist_thermal_equivb_gravity_wave.py +++ b/plotting/shallow_water/plot_moist_thermal_gw_equivb.py @@ -16,10 +16,8 @@ # ---------------------------------------------------------------------------- # # When copying this example these paths need editing, which will usually involve # removing the abspath part to set directory paths relative to this file - -results_file_name = f'{abspath(dirname(__file__))}/../../results/moist_thermal_equivb_gw/field_output.nc' -plot_stem = f'{abspath(dirname(__file__))}/../../figures/shallow_water/moist_thermal_equivb_gw' - +results_file_name = f'{abspath(dirname(__file__))}/../../results/moist_thermal_gw_equivb/field_output.nc' +plot_stem = f'{abspath(dirname(__file__))}/../../figures/shallow_water/moist_thermal_gw_equivb' beta2 = 9.80616*10 # ---------------------------------------------------------------------------- # @@ -30,20 +28,25 @@ init_field_labels = [r'$|u|$ (m s$^{-1}$)', r'$D$ (m)', r'$q_{cl}$ (kg kg$^{-1})$', r'$b_e$ (m s$^{-2}$)'] init_contours_to_remove = [None, None, None, None] -init_contours = [np.linspace(0.0, 20.0, 9), - np.linspace(4800.0, 8000.0, 19), - np.linspace(0.01, 0.02, 11), - np.linspace(9, 10, 15)] +init_contours = [ + np.linspace(0.0, 20.0, 9), + np.linspace(1800.0, 4800.0, 15), + np.linspace(0.002, 0.02, 10), + np.linspace(7.0, 8.5, 16) +] # ---------------------------------------------------------------------------- # # Final plot details # ---------------------------------------------------------------------------- # -final_field_names = ['PartitionedCloud', 'b_e'] -final_colour_schemes = ['cividis', 'PuRd_r'] -final_field_labels = [r'$q_{cl}$ (kg kg$^{-1}$)', r'$b_e$ (m s$^{-2}$)'] -final_contours_to_remove = [None, None] -final_contours = [np.linspace(0.01, 0.02, 11), - np.linspace(9, 10, 15)] +final_field_names = ['D', 'PartitionedCloud', 'b_e'] +final_colour_schemes = ['YlGnBu', 'cividis', 'PuRd_r'] +final_field_labels = [r'$D$ (m)', r'$q_{cl}$ (kg kg$^{-1}$)', r'$b_e$ (m s$^{-2}$)'] +final_contours_to_remove = [None, None, None] +final_contours = [ + np.linspace(1800.0, 4800.0, 15), + np.linspace(0.002, 0.02, 10), + np.linspace(7.0, 8.5, 16) +] # ---------------------------------------------------------------------------- # # General options @@ -141,7 +144,7 @@ # ---------------------------------------------------------------------------- # # FINAL PLOTTING # ---------------------------------------------------------------------------- # -fig, axarray = plt.subplots(1, 2, figsize=(16, 8), sharex='all', sharey='all') +fig, axarray = plt.subplots(1, 3, figsize=(21, 6), sharex='all', sharey='all') time_idx = -1 for i, (ax, field_name, colour_scheme, field_label, contour_to_remove, contours) in \ @@ -149,6 +152,7 @@ axarray, final_field_names, final_colour_schemes, final_field_labels, final_contours_to_remove, final_contours)): + # Data extraction ---------------------------------------------------------- field_data = extract_gusto_field(data_file, field_name, time_idx=time_idx) coords_X, coords_Y = extract_gusto_coords(data_file, field_name) diff --git a/unit-tests/recovery_tests/test_recovery_2d_vert_slice.py b/unit-tests/recovery_tests/test_recovery_2d_vert_slice.py index db59d0485..372c399b9 100644 --- a/unit-tests/recovery_tests/test_recovery_2d_vert_slice.py +++ b/unit-tests/recovery_tests/test_recovery_2d_vert_slice.py @@ -82,7 +82,7 @@ def test_vertical_slice_recovery(geometry, mesh, expr): # our actual theta and rho and v rho_CG1_true = Function(CG1).interpolate(expr) theta_CG1_true = Function(CG1).interpolate(expr) - v_CG1_true = Function(vec_CG1).interpolate(as_vector([expr, expr])) + v_CG1_true = Function(vec_CG1).interpolate(as_vector([expr, -3.0*expr])) rho_Vt_true = Function(Vt).interpolate(expr) # make the initial fields by projecting expressions into the lowest order spaces @@ -90,7 +90,7 @@ def test_vertical_slice_recovery(geometry, mesh, expr): rho_CG1 = Function(CG1) theta_Vt = Function(Vt).interpolate(expr) theta_CG1 = Function(CG1) - v_Vu = Function(Vu).project(as_vector([expr, expr])) + v_Vu = Function(Vu).project(as_vector([expr, -3.0*expr])) v_CG1 = Function(vec_CG1) rho_Vt = Function(Vt)