From 05337ea4e9f9e01339dce8149d2ac033c04d90c5 Mon Sep 17 00:00:00 2001
From: Jesse Lentz <42011922+J-Lentz@users.noreply.github.com>
Date: Thu, 13 Jul 2023 13:16:11 -0400
Subject: [PATCH 01/11] refactor: `monin_obukhov_stable_mix` calls from
`stable_mix_1d` (#1268)
---
monin_obukhov/monin_obukhov.F90 | 53 ++++++++++++++++-----------------
1 file changed, 26 insertions(+), 27 deletions(-)
diff --git a/monin_obukhov/monin_obukhov.F90 b/monin_obukhov/monin_obukhov.F90
index 883e4cbe34..ac8a89075f 100644
--- a/monin_obukhov/monin_obukhov.F90
+++ b/monin_obukhov/monin_obukhov.F90
@@ -274,16 +274,18 @@ subroutine stable_mix_3d(rich, mix)
real, intent(in) , dimension(:,:,:) :: rich
real, intent(out), dimension(:,:,:) :: mix
+integer :: n2 !< Size of dimension 2 of mix and rich
+integer :: n3 !< Size of dimension 3 of mix and rich
+integer :: i, j !< Loop indices
-integer :: n, ier
-
-if(.not.module_is_initialized) call error_mesg('stable_mix_3d in monin_obukhov_mod', &
- 'monin_obukhov_init has not been called', FATAL)
-
-n = size(rich,1)*size(rich,2)*size(rich,3)
-call monin_obukhov_stable_mix(stable_option, rich_crit, zeta_trans, &
- & n, rich, mix, ier)
+n2 = size(mix, 2)
+n3 = size(mix, 3)
+do j=1, n3
+ do i=1, n2
+ call stable_mix(rich(:, i, j), mix(:, i, j))
+ enddo
+enddo
end subroutine stable_mix_3d
@@ -943,16 +945,15 @@ subroutine stable_mix_2d(rich, mix)
real, intent(in) , dimension(:,:) :: rich
real, intent(out), dimension(:,:) :: mix
+integer :: n2 !< Size of dimension 2 of mix and rich
+integer :: i !< Loop index
-real, dimension(size(rich,1),size(rich,2),1) :: rich_3d, mix_3d
-
-rich_3d(:,:,1) = rich
+n2 = size(mix, 2)
-call stable_mix_3d(rich_3d, mix_3d)
-
-mix = mix_3d(:,:,1)
+do i=1, n2
+ call stable_mix(rich(:, i), mix(:, i))
+enddo
-return
end subroutine stable_mix_2d
@@ -962,16 +963,17 @@ subroutine stable_mix_1d(rich, mix)
real, intent(in) , dimension(:) :: rich
real, intent(out), dimension(:) :: mix
+integer :: n !< Size of mix and rich
+integer :: ierr !< Error code set by monin_obukhov_stable_mix
-real, dimension(size(rich),1,1) :: rich_3d, mix_3d
-
-rich_3d(:,1,1) = rich
+if (.not.module_is_initialized) call error_mesg('stable_mix in monin_obukhov_mod', &
+ 'monin_obukhov_init has not been called', FATAL)
-call stable_mix_3d(rich_3d, mix_3d)
+n = size(mix)
-mix = mix_3d(:,1,1)
+call monin_obukhov_stable_mix(stable_option, rich_crit, zeta_trans, &
+ & n, rich, mix, ierr)
-return
end subroutine stable_mix_1d
!=======================================================================
@@ -981,15 +983,12 @@ subroutine stable_mix_0d(rich, mix)
real, intent(in) :: rich
real, intent(out) :: mix
-real, dimension(1,1,1) :: rich_3d, mix_3d
-
-rich_3d(1,1,1) = rich
+real, dimension(1) :: mix_1d !< Representation of mix as a dimension(1) array
-call stable_mix_3d(rich_3d, mix_3d)
+call stable_mix([rich], mix_1d)
-mix = mix_3d(1,1,1)
+mix = mix_1d(1)
-return
end subroutine stable_mix_0d
!=======================================================================
From 0463dd122cb53418848a9798e12fa6641d8c2483 Mon Sep 17 00:00:00 2001
From: Jesse Lentz <42011922+J-Lentz@users.noreply.github.com>
Date: Thu, 13 Jul 2023 13:30:34 -0400
Subject: [PATCH 02/11] fix: out-of-bounds memory access in axis_utils2 (#1157)
---
axis_utils/include/axis_utils2.inc | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/axis_utils/include/axis_utils2.inc b/axis_utils/include/axis_utils2.inc
index 21deca9fb4..3acd69b28c 100644
--- a/axis_utils/include/axis_utils2.inc
+++ b/axis_utils/include/axis_utils2.inc
@@ -213,7 +213,7 @@
endif
lon_strt = lon(1)
- do i=2,len+1
+ do i=2,len
lon(i) = lon_in_range(lon(i),lon_strt)
lon_strt = lon(i)
enddo
From 8eb24f8bcdd65d1e061bff5578b928013e375de2 Mon Sep 17 00:00:00 2001
From: Jesse Lentz <42011922+J-Lentz@users.noreply.github.com>
Date: Thu, 13 Jul 2023 13:44:53 -0400
Subject: [PATCH 03/11] fix: maximize system stacksize limit in fms_init
(#1233)
---
fms/Makefile.am | 1 +
fms/fms.F90 | 9 +++++++++
fms/fms_stacksize.c | 33 +++++++++++++++++++++++++++++++++
3 files changed, 43 insertions(+)
create mode 100644 fms/fms_stacksize.c
diff --git a/fms/Makefile.am b/fms/Makefile.am
index 8f8c58525b..ca8b107941 100644
--- a/fms/Makefile.am
+++ b/fms/Makefile.am
@@ -32,6 +32,7 @@ noinst_LTLIBRARIES = libfms.la
# Each convenience library depends on its source.
libfms_la_SOURCES = \
fms.F90 \
+ fms_stacksize.c \
include/fms.inc \
include/fms_r4.fh \
include/fms_r8.fh \
diff --git a/fms/fms.F90 b/fms/fms.F90
index 3ec8052148..2ac9393b48 100644
--- a/fms/fms.F90
+++ b/fms/fms.F90
@@ -336,6 +336,11 @@ subroutine fms_init (localcomm, alt_input_nml_path)
use fms_io_mod, only: fms_io_version
#endif
+ interface
+ subroutine maximize_system_stacksize_limit() bind(C)
+ end subroutine
+ end interface
+
integer, intent(in), optional :: localcomm
character(len=*), intent(in), optional :: alt_input_nml_path
integer :: ierr, io
@@ -344,6 +349,10 @@ subroutine fms_init (localcomm, alt_input_nml_path)
if (module_is_initialized) return ! return silently if already called
module_is_initialized = .true.
+
+!---- Raise the system stack size limit to its maximum permissible value ----
+ call maximize_system_stacksize_limit
+
!---- initialize mpp routines ----
if(present(localcomm)) then
if(present(alt_input_nml_path)) then
diff --git a/fms/fms_stacksize.c b/fms/fms_stacksize.c
new file mode 100644
index 0000000000..7631656475
--- /dev/null
+++ b/fms/fms_stacksize.c
@@ -0,0 +1,33 @@
+/***********************************************************************
+ * GNU Lesser General Public License
+ *
+ * This file is part of the GFDL Flexible Modeling System (FMS).
+ *
+ * FMS is free software: you can redistribute it and/or modify it under
+ * the terms of the GNU Lesser General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or (at
+ * your option) any later version.
+ *
+ * FMS is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FMS. If not, see .
+ **********************************************************************/
+
+#include
+
+/*
+ * Set the stack size limit to its maximum permissible value
+ */
+
+void maximize_system_stacksize_limit()
+{
+ struct rlimit stacksize;
+
+ getrlimit(RLIMIT_STACK, &stacksize);
+ stacksize.rlim_cur = stacksize.rlim_max;
+ setrlimit(RLIMIT_STACK, &stacksize);
+}
From a2471c86e7b0c4596428f36a31de8ce13727db1a Mon Sep 17 00:00:00 2001
From: Ryan Mulhall <35538242+rem1776@users.noreply.github.com>
Date: Thu, 13 Jul 2023 14:14:55 -0400
Subject: [PATCH 04/11] test: remove stack limit checks in test scripts (#1280)
---
test_fms/fms2_io/test_fms2_io.sh | 10 ----------
test_fms/mpp/test_mpp_chksum.sh | 5 -----
test_fms/test-lib.sh.in | 5 -----
3 files changed, 20 deletions(-)
diff --git a/test_fms/fms2_io/test_fms2_io.sh b/test_fms/fms2_io/test_fms2_io.sh
index 8a604e6655..5e0bd31c0e 100755
--- a/test_fms/fms2_io/test_fms2_io.sh
+++ b/test_fms/fms2_io/test_fms2_io.sh
@@ -31,16 +31,6 @@
# Create and enter output directory
output_dir
-# use smaller arrays if system stack size is limited
-if [ $STACK_LIMITED ]; then
- cat <<_EOF > input.nml
-&test_fms2_io_nml
- nx = 32
- ny = 32
- nz = 10
-/
-_EOF
-fi
touch input.nml
# run the tests
diff --git a/test_fms/mpp/test_mpp_chksum.sh b/test_fms/mpp/test_mpp_chksum.sh
index 03d252794b..bea691aa5f 100755
--- a/test_fms/mpp/test_mpp_chksum.sh
+++ b/test_fms/mpp/test_mpp_chksum.sh
@@ -29,11 +29,6 @@
echo "&test_mpp_chksum_nml" > input.nml
echo "test_num = 1" >> input.nml
-# replaces defaults with smaller sizes if stack size is limited
-if [ $STACK_LIMITED ]; then
- echo "nx = 64" >> input.nml
- echo "ny = 64" >> input.nml
-fi
echo "/" >> input.nml
test_expect_success "mpp_chksum simple functionality" '
diff --git a/test_fms/test-lib.sh.in b/test_fms/test-lib.sh.in
index a2cfe8ebf8..b983b48d84 100644
--- a/test_fms/test-lib.sh.in
+++ b/test_fms/test-lib.sh.in
@@ -33,11 +33,6 @@ TEST_NAME="$(basename "$0" .sh)"
TEST_NUMBER="${TEST_NAME%%-*}"
TEST_NUMBER="${TEST_NUMBER#t}"
-# if using intel with a limited stack size, sets to run smaller tests
-if [ "$($FC --version | grep ifort)" -a "$(ulimit -s)" != "unlimited" 2> /dev/null ]; then
- STACK_LIMITED=1
-fi
-
exec 7>&2
# For now, write all output
#if test -n "$VERBOSE"
From 6693a4fdd67e490864c55b05c2ec5bf699341c45 Mon Sep 17 00:00:00 2001
From: Ryan Mulhall <35538242+rem1776@users.noreply.github.com>
Date: Thu, 13 Jul 2023 14:37:47 -0400
Subject: [PATCH 05/11] fix: mpp global arrays test fixes (#1174)
---
test_fms/mpp/test_global_arrays.F90 | 491 +++++++++++++---------------
test_fms/mpp/test_global_arrays.sh | 22 +-
test_fms/mpp/test_mpp_domains.F90 | 117 -------
3 files changed, 246 insertions(+), 384 deletions(-)
diff --git a/test_fms/mpp/test_global_arrays.F90 b/test_fms/mpp/test_global_arrays.F90
index ce2b125cb4..4f27b0c666 100644
--- a/test_fms/mpp/test_global_arrays.F90
+++ b/test_fms/mpp/test_global_arrays.F90
@@ -34,21 +34,24 @@ program test_global_arrays
use mpp_domains_mod, only: mpp_global_min, mpp_get_data_domain,mpp_get_compute_domain
use mpp_domains_mod, only: mpp_domains_exit, mpp_update_domains
use mpp_domains_mod, only: mpp_get_domain_shift, mpp_global_sum
+ use mpp_domains_mod, only: CYCLIC_GLOBAL_DOMAIN, NORTH, EAST, CENTER, CORNER, BITWISE_EXACT_SUM
+ use mpp_mod, only: MPP_CLOCK_SYNC, MPP_CLOCK_DETAILED, mpp_clock_id, mpp_clock_begin, mpp_clock_end
+ use fms_mod, only: check_nml_error, input_nml_file
implicit none
integer, parameter :: length=64
- integer :: id, pe, npes, root, i, j, icount, jcount
- integer(i4_kind) :: maxI4, minI4, ierr, sumI4, sumI4_5d
- integer(i8_kind) :: maxI8, minI8, sumI8, sumI8_5d
- integer(i4_kind), allocatable :: dataI4(:,:), dataI4_5d(:,:,:,:,:), dataI4_shuf(:,:)
- integer(i8_kind), allocatable :: dataI8(:,:), dataI8_5d(:,:,:,:,:), dataI8_shuf(:,:)
- real(r4_kind), allocatable :: dataR4(:,:), dataR4_5d(:,:,:,:,:), dataR4_shuf(:,:)
- real(r8_kind), allocatable :: dataR8(:,:), dataR8_5d(:,:,:,:,:), dataR8_shuf(:,:)
+ integer :: id, pe, npes, root, i, j, icount, jcount, io
+ integer(i4_kind) :: maxI4, minI4, ierr, sumI4, sumI4_5d, sumI4_shuf
+ integer(i8_kind) :: maxI8, minI8, sumI8, sumI8_5d, sumI8_shuf
+ integer(i4_kind), allocatable :: dataI4(:,:), dataI4_shuf(:,:), recv_data_i4(:,:)
+ integer(i8_kind), allocatable :: dataI8(:,:), dataI8_shuf(:,:), recv_data_i8(:,:)
+ real(r4_kind), allocatable :: dataR4(:,:), dataR4_shuf(:,:), recv_data_r4(:,:)
+ real(r8_kind), allocatable :: dataR8(:,:), dataR8_shuf(:,:), recv_data_r8(:,:)
real, allocatable :: rands(:)
type(domain2D) :: domain
- real(r8_kind) :: rcoef, maxR8, minR8, sumR8, sumR8_5d
- real(r4_kind) :: maxR4, minR4, sumR4, sumR4_5d
+ real(r8_kind) :: rcoef, maxR8, minR8, sumR8, sumR8_shuf
+ real(r4_kind) :: maxR4, minR4, sumR4, sumR4_shuf
integer :: isc, iec, jsc, jec
integer :: isd, ied, jsd, jed
character(len=32) :: strTmp1, strTmp2
@@ -56,22 +59,60 @@ program test_global_arrays
integer(i8_kind), parameter :: randmaxI8 = 4096
real(r8_kind), parameter :: tol4 = 1e-4, tol8 = 1e-6!> tolerance for real comparisons
- call mpp_init(mpp_init_test_init_true_only)
+ ! namelist variables - just logicals to enable individual tests
+ ! simple just does normal max/min + sums across a domain
+ ! full does max/min+sums with halos and symmetry
+ logical :: test_simple= .false. , test_full = .false.
+ namelist / test_global_arrays_nml / test_simple, test_full
+
+ call mpp_init()
+
call mpp_domains_init()
- call mpp_set_stack_size(3145746)
- call mpp_domains_set_stack_size(3145746)
+ !call mpp_set_stack_size(3145746)
+ call mpp_domains_set_stack_size(4000000)
+
+ read(input_nml_file, nml=test_global_arrays_nml, iostat=io)
+ ierr = check_nml_error(io, 'test_global_arrays_nml')
pe = mpp_pe()
npes = mpp_npes()
call mpp_set_root_pe(0)
root = mpp_root_pe()
+ if( test_simple) then
+ call test_mpp_global_simple()
+ deallocate(dataI4, dataI8, dataR4, dataR8, rands)
+ deallocate(dataR4_shuf, dataR8_shuf,dataI4_shuf, dataI8_shuf)
+ else if(test_full) then
+ call test_global_reduce( 'Simple')
+ call test_global_reduce( 'Simple symmetry center')
+ call test_global_reduce( 'Simple symmetry corner')
+ call test_global_reduce( 'Simple symmetry east')
+ call test_global_reduce( 'Simple symmetry north')
+ call test_global_reduce( 'Cyclic symmetry center')
+ call test_global_reduce( 'Cyclic symmetry corner')
+ call test_global_reduce( 'Cyclic symmetry east')
+ call test_global_reduce( 'Cyclic symmetry north')
+ else
+ call mpp_error(FATAL, "test_global_arrays: either test_sum or test_max_min must be true in input.nml")
+ endif
+ call mpp_sync()
+
+ call mpp_domains_exit()
+ call MPI_FINALIZE(ierr)
+
+ contains
+
+subroutine test_mpp_global_simple()
+
!> define domains and allocate
- call mpp_define_domains( (/1,length,1,length/), (/4,2/), domain, xhalo=0)
+ call mpp_define_domains( (/1,length,1,length/), (/1,8/), domain, xhalo=0)
call mpp_get_compute_domain(domain, jsc, jec, isc, iec)
call mpp_get_data_domain(domain, jsd, jed, isd, ied)
allocate(dataI4(jsd:jed, isd:ied),dataI8(jsd:jed, isd:ied), rands(length*length))
allocate(dataR4(jsd:jed, isd:ied), dataR8(jsd:jed, isd:ied))
allocate(dataR4_shuf(jsd:jed, isd:ied), dataR8_shuf(jsd:jed, isd:ied))
allocate(dataI4_shuf(jsd:jed, isd:ied), dataI8_shuf(jsd:jed, isd:ied))
+ allocate(recv_data_r4(jsd:jed, isd:ied), recv_data_r8(jsd:jed, isd:ied))
+ allocate(recv_data_i4(jsd:jed, isd:ied), recv_data_i8(jsd:jed, isd:ied))
dataI4 = 0; dataI8 = 0; dataR4 = 0.0; dataR8 = 0.0
dataR8_shuf=0.0; dataR4_shuf=0.0;dataI8_shuf=0; dataI4_shuf=0
@@ -166,97 +207,92 @@ program test_global_arrays
NEW_LINE('a')//"Sum: "// strTmp1 )
endif
- !> shuffle real data ordering and copy into array with 5 ranks
- dataR4_shuf = dataR4
- dataR8_shuf = dataR8
- call shuffleDataR4(dataR4_shuf)
- call shuffleDataR8(dataR8_shuf)
- allocate(dataR4_5d(jsd:jed, isd:ied, 1, 1, 1), dataR8_5d(jsd:jed,isd:ied, 1, 1, 1))
-
- dataR4_5d = 0.0
- dataR8_5d = 0.0
-
- do i=isc,iec
- do j=jsc,jec
- dataR4_5d(j, i, 1, 1, 1) = dataR4_shuf(j, i)
- dataR8_5d(j, i, 1, 1, 1) = dataR8_shuf(j, i)
- end do
- end do
+ !> moves the data into different pe's and checks the sum still matches
+ dataR4_shuf = dataR4 ; dataR8_shuf = dataR8
+ dataI4_shuf = dataI4 ; dataI8_shuf = dataI8
+ !! swap data with neighboring pe
+ if(modulo(pe, 2) .eq. 0) then
+ print *, pe, pe+1, SUM(dataR8_shuf)
+ call mpp_send(dataR4_shuf, SIZE(dataR4_shuf), pe+1)
+ call mpp_recv(recv_data_r4, SIZE(dataR4_shuf), pe+1)
+ call mpp_sync()
+ call mpp_send(dataR8_shuf, SIZE(dataR8_shuf), pe+1)
+ call mpp_recv(recv_data_r8, SIZE(dataR8_shuf), pe+1)
+ call mpp_sync()
+ call mpp_send(dataI4_shuf, SIZE(dataI4_shuf), pe+1)
+ call mpp_recv(recv_data_I4, SIZE(dataI4_shuf), pe+1)
+ call mpp_sync()
+ call mpp_send(dataI8_shuf, SIZE(dataI8_shuf), pe+1)
+ call mpp_recv(recv_data_I8, SIZE(dataI8_shuf), pe+1)
+ else
+ print *, pe, pe-1, SUM(dataR8_shuf)
+ call mpp_recv(recv_data_r4, SIZE(dataR4_shuf), pe-1)
+ call mpp_send(dataR4_shuf, SIZE(dataR4_shuf), pe-1)
+ call mpp_sync()
+ call mpp_recv(recv_data_r8, SIZE(dataR8_shuf), pe-1)
+ call mpp_send(dataR8_shuf, SIZE(dataR8_shuf), pe-1)
+ call mpp_sync()
+ call mpp_send(dataI4_shuf, SIZE(dataI4_shuf), pe-1)
+ call mpp_recv(recv_data_I4, SIZE(dataI4_shuf), pe-1)
+ call mpp_sync()
+ call mpp_send(dataI8_shuf, SIZE(dataI8_shuf), pe-1)
+ call mpp_recv(recv_data_I8, SIZE(dataI8_shuf), pe-1)
+ endif
call mpp_sync()
+ dataR4_shuf = recv_data_r4
+ dataR8_shuf = recv_data_r8
- call mpp_error(NOTE, "----------Testing 32-bit real mpp_global_sum with 5 ranks and reordering----------")
- call mpp_update_domains(dataR4_5d, domain)
- sumR4_5d = mpp_global_sum(domain, dataR4_5d)
+ call mpp_error(NOTE, "----------Testing 32-bit real mpp_global_sum with reordering----------")
+ call mpp_update_domains(dataR4_shuf, domain)
+ sumR4_shuf = mpp_global_sum(domain, dataR4_shuf)
! check that shuffled array results are approximately the same as the original array
- if(abs(sumR4-sumR4_5d) .gt. 1E-4 ) then
+ if(abs(sumR4-sumR4_shuf) .gt. 1E-4 ) then
strTmp1 = ""; strTmp2=""
- write(strTmp1,*) sumR4_5d
+ write(strTmp1,*) sumR4_shuf
write(strTmp2,*) sumR4
call mpp_error(FATAL,"test_global_arrays: invalid 32-bit real answer after reordering"// &
NEW_LINE('a')//"Sum: "// strTmp1// " ne "//strTmp2)
endif
- call mpp_error(NOTE, "----------Testing 64-bit real mpp_global_sum with 5 ranks and reordering----------")
- call mpp_update_domains(dataR8_5d, domain)
- sumR8_5d = mpp_global_sum(domain, dataR8_5d)
+ call mpp_sync()
+ call mpp_error(NOTE, "----------Testing 64-bit real mpp_global_sum with reordering----------")
+ call mpp_update_domains(dataR8_shuf, domain)
+ sumR8_shuf = mpp_global_sum(domain, dataR8_shuf)
! check that shuffled array results are approximately the same as the original array
- if(abs(sumR8-sumR8_5d) .gt. 1E-7) then
+ if(abs(sumR8-sumR8_shuf) .gt. 1E-7) then
strTmp1 = ""; strTmp2=""
- write(strTmp1,*) sumR8_5d
+ write(strTmp1,*) sumR8_shuf
write(strTmp2,*) sumR8
call mpp_error(FATAL,"test_global_arrays: invalid 64-bit real answer after reordering"// &
NEW_LINE('a')//"Sum: "// strTmp1// " ne "//strTmp2)
endif
- !> shuffle integer data ordering and copy into array with 5 ranks
- dataI4_shuf = dataI4
- dataI8_shuf = dataI8
- call shuffleDataI4(dataI4_shuf)
- call shuffleDataI8(dataI8_shuf)
- allocate(dataI4_5d(jsd:jed, isd:ied, 1, 1, 1), dataI8_5d(jsd:jed,isd:ied, 1, 1, 1))
-
- dataI4_5d = 0
- dataI8_5d = 0
- do i=isc,iec
- do j=jsc,jec
- dataI4_5d(j, i, 1, 1, 1) = dataI4_shuf(j, i)
- dataI8_5d(j, i, 1, 1, 1) = dataI8_shuf(j, i)
- end do
- end do
- call mpp_sync()
-
- call mpp_error(NOTE, "----------Testing 32-bit integer mpp_global_sum with 5 ranks and reordering----------")
- call mpp_update_domains(dataI4_5d, domain)
- sumI4_5d = mpp_global_sum(domain, dataI4_5d)
+ call mpp_error(NOTE, "----------Testing 32-bit integer mpp_global_sum with reordering----------")
+ call mpp_update_domains(dataI4_shuf, domain)
+ sumI4_shuf = mpp_global_sum(domain, dataI4_shuf)
! check that shuffled array results are approximately the same as the original array
- if(sumI4 .ne. sumI4_5d) then
+ if(sumI4 .ne. sumI4_shuf) then
strTmp1 = ""; strTmp2=""
- write(strTmp1,*) sumI4_5d
+ write(strTmp1,*) sumI4_shuf
write(strTmp2,*) sumI4
call mpp_error(FATAL,"test_global_arrays: invalid 32-bit integer answer after reordering"// &
NEW_LINE('a')//"Sum: "// strTmp1// " ne "//strTmp2)
endif
- call mpp_error(NOTE, "----------Testing 64-bit integer mpp_global_sum with 5 ranks and reordering----------")
- call mpp_update_domains(dataI8_5d, domain)
- sumI8_5d = mpp_global_sum(domain, dataI8_5d)
+ call mpp_error(NOTE, "----------Testing 64-bit integer mpp_global_sum with reordering----------")
+ call mpp_update_domains(dataI8_shuf, domain)
+ sumI8_shuf = mpp_global_sum(domain, dataI8_shuf)
! check that shuffled array results are approximately the same as the original array
- if(sumI8 .ne. sumI8_5d) then
+ if(sumI8 .ne. sumI8_shuf) then
strTmp1 = ""; strTmp2=""
- write(strTmp1,*) sumI8_5d
+ write(strTmp1,*) sumI8_shuf
write(strTmp2,*) sumI8
call mpp_error(FATAL,"test_global_arrays: invalid 64-bit integer answer after reordering"// &
NEW_LINE('a')//"Sum: "// strTmp1// " ne "//strTmp2)
endif
-
- deallocate(dataI4, dataI8, dataR4, dataR8, rands, dataI4_5d, dataI8_5d, dataR4_5d, dataR8_5d)
- deallocate(dataR4_shuf, dataR8_shuf,dataI4_shuf, dataI8_shuf)
- call mpp_domains_exit()
- call MPI_FINALIZE(ierr)
-
- contains
+end subroutine test_mpp_global_simple
!> true if all pes return the same result and have a lower/higher local max/min
function checkResultInt4(res)
@@ -368,7 +404,6 @@ function checkSumReal4(gsum)
real(r4_kind),intent(in) :: gsum
real(r4_kind),allocatable :: recv(:) !> pe's local sum at 1, global sum at 2
real(r4_kind) :: nsum
- integer :: i
allocate(recv(2))
! root receives and sums local sums from each pe
@@ -402,7 +437,6 @@ function checkSumReal8(gsum)
real(r8_kind),intent(in) :: gsum
real(r8_kind),allocatable :: recv(:) !> pe's local sum at 1, global sum at 2
real(r8_kind) :: nsum
- integer :: i
allocate(recv(2))
! root receives and sums local sums from each pe
@@ -436,7 +470,6 @@ function checkSumInt4(gsum)
integer(i4_kind),intent(in) :: gsum
integer(i4_kind),allocatable :: recv(:) !> pe's local sum at 1, global sum at 2
integer(i4_kind) :: nsum
- integer :: i
allocate(recv(2))
! root receives and sums local sums from each pe
@@ -470,7 +503,6 @@ function checkSumInt8(gsum)
integer(i8_kind),intent(in) :: gsum
integer(i8_kind),allocatable :: recv(:) !> pe's local sum at 1, global sum at 2
integer(i8_kind) :: nsum
- integer :: i
allocate(recv(2))
! root receives and sums local sums from each pe
@@ -497,192 +529,123 @@ function checkSumInt8(gsum)
deallocate(recv)
end function checkSumInt8
-!> aggregates data on root and randomizes ordering, then sends partitions back to pes
-subroutine shuffleDataI4(dataI4)
- integer(i4_kind), intent(INOUT) :: dataI4(:,:)
- integer(i4_kind), allocatable :: trans(:,:), shuffled(:),tmp
- integer :: rind
-
- allocate(trans(SIZE(dataI4,1), SIZE(dataI4,2)))
- allocate(shuffled(1:length*length))
-
- if( pe.eq.root) then
- !> get array partitions and aggregate into 1d
- shuffled(1:SIZE(dataI4)) = RESHAPE(dataI4, (/SIZE(dataI4)/))
- do i=1, npes-1
- call mpp_recv(trans, SIZE(dataI4) , i)
- shuffled( SIZE(trans)*i+1 : SIZE(trans)*(i+1)) = RESHAPE(trans, (/SIZE(trans)/))
- end do
-
- !> shuffle order
- do i=1, length*length
- rind = (rands(i) * length * length)
- if( rind .eq. 0) then
- rind = 1
- endif
- tmp = shuffled(i)
- shuffled(i) = shuffled(rind)
- shuffled(rind) = tmp
- end do
- trans = 0
-
- !> send back to pes
- do i=0, npes-1
- trans = RESHAPE(shuffled(SIZE(trans)*i + 1:SIZE(trans)*(i+1)), &
- (/SIZE(trans,1), SIZE(trans,2) /) )
- if(i.ne.root) then
- call mpp_send(trans, SIZE(trans), i)
- else
- dataI4 = trans
- endif
- end do
- else
- call mpp_send(dataI4, SIZE(dataI4), root)
- call mpp_recv(trans, SIZE(dataI4), root)
- dataI4 = trans
- endif
- deallocate(trans, shuffled)
-end subroutine shuffleDataI4
-
-!> aggregates data on root and randomizes ordering, then sends partitions back to pes
-subroutine shuffleDataI8(dataI8)
- integer(i8_kind), intent(INOUT) :: dataI8(:,:)
- integer(i8_kind), allocatable :: trans(:,:), shuffled(:), tmp
- integer :: rind
-
- allocate(trans(SIZE(dataI8,1), SIZE(dataI8,2)))
- allocate(shuffled(1:length*length))
-
- if( pe.eq.root) then
- !> get array partitions and aggregate into 1d
- shuffled(1:SIZE(dataI8)) = RESHAPE(dataI8, (/SIZE(dataI8)/))
- do i=1, npes-1
- call mpp_recv(trans, SIZE(dataI8) , i)
- shuffled( SIZE(trans)*i+1 : SIZE(trans)*(i+1)) = RESHAPE(trans, (/SIZE(trans)/))
- end do
-
- !> shuffle order
- do i=1, length*length
- rind = (rands(i) * length * length)
- if( rind .eq. 0) then
- rind = 1
- endif
- tmp = shuffled(i)
- shuffled(i) = shuffled(rind)
- shuffled(rind) = tmp
- end do
- trans = 0
-
- !> send back to pes
- do i=0, npes-1
- trans = RESHAPE(shuffled(SIZE(trans)*i + 1:SIZE(trans)*(i+1)), &
- (/SIZE(trans,1), SIZE(trans,2) /) )
- if(i.ne.root) then
- call mpp_send(trans, SIZE(trans), i)
- else
- dataI8 = trans
- endif
- end do
- else
- call mpp_send(dataI8, SIZE(dataI8), root)
- call mpp_recv(trans, SIZE(dataI8), root)
- dataI8 = trans
- endif
- deallocate(trans, shuffled)
-end subroutine shuffleDataI8
-
-!> aggregates 32-bit real data on root and randomizes ordering, then sends partitions back to pes
-subroutine shuffleDataR4(dataR4)
- real(r4_kind), intent(INOUT) :: dataR4(:,:)
- real(r4_kind), allocatable :: trans(:,:), shuffled(:), tmp
- integer :: rind
-
- allocate(trans(SIZE(dataR4,1), SIZE(dataR4,2)))
- allocate(shuffled(1:length*length))
-
- if( pe.eq.root) then
- !> get array partitions and aggregate into 1d
- shuffled(1:SIZE(dataR4)) = RESHAPE(dataR4, (/SIZE(dataR4)/))
- do i=1, npes-1
- call mpp_recv(trans, SIZE(dataR4) , i)
- shuffled( SIZE(trans)*i+1 : SIZE(trans)*(i+1)) = RESHAPE(trans, (/SIZE(trans)/))
- end do
-
- !> shuffle order
- do i=1, length*length
- rind = (rands(i) * length * length)
- if( rind .eq. 0) then
- rind = 1
- endif
- tmp = shuffled(i)
- shuffled(i) = shuffled(rind)
- shuffled(rind) = tmp
- end do
- trans = 0
-
- !> send back to pes
- do i=0, npes-1
- trans = RESHAPE(shuffled(SIZE(trans)*i + 1:SIZE(trans)*(i+1)), &
- (/SIZE(trans,1), SIZE(trans,2) /) )
- if(i.ne.root) then
- call mpp_send(trans, SIZE(trans), i)
- else
- dataR4 = trans
- endif
- end do
- else
- call mpp_send(dataR4, SIZE(dataR4), root)
- call mpp_recv(trans, SIZE(dataR4), root)
- dataR4 = trans
- endif
- deallocate(trans, shuffled)
-end subroutine shuffleDataR4
-
-!> aggregates 64-bit real data on root and randomizes ordering, then sends partitions back to pes
-subroutine shuffleDataR8(dataR8)
- real(r8_kind), intent(INOUT) :: dataR8(:,:)
- real(r8_kind), allocatable :: trans(:,:), shuffled(:), tmp
- integer :: rind
-
- allocate(trans(SIZE(dataR8,1), SIZE(dataR8,2)))
- allocate(shuffled(1:length*length))
-
- if( pe.eq.root) then
- !> get array partitions and aggregate into 1d
- shuffled(1:SIZE(dataR8)) = RESHAPE(dataR8, (/SIZE(dataR8)/))
- do i=1, npes-1
- call mpp_recv(trans, SIZE(dataR8) , i)
- shuffled( SIZE(trans)*i+1 : SIZE(trans)*(i+1)) = RESHAPE(trans, (/SIZE(trans)/))
- end do
-
- !> shuffle order
- do i=1, length*length
- rind = (rands(i) * length * length)
- if( rind .eq. 0) then
- rind = 1
- endif
- tmp = shuffled(i)
- shuffled(i) = shuffled(rind)
- shuffled(rind) = tmp
- end do
- trans = 0
-
- !> send back to pes
- do i=0, npes-1
- trans = RESHAPE(shuffled(SIZE(trans)*i + 1:SIZE(trans)*(i+1)), &
- (/SIZE(trans,1), SIZE(trans,2) /) )
- if(i.ne.root) then
- call mpp_send(trans, SIZE(trans), i)
- else
- dataR8 = trans
- endif
- end do
- else
- call mpp_send(dataR8, SIZE(dataR8), root)
- call mpp_recv(trans, SIZE(dataR8), root)
- dataR8 = trans
- endif
- deallocate(trans, shuffled)
-end subroutine shuffleDataR8
+ !--- test mpp_global_sum, mpp_global_min and mpp_global_max
+ subroutine test_global_reduce (type)
+ character(len=*), intent(in) :: type
+ real :: lsum, gsum, lmax, gmax, lmin, gmin
+ integer :: ni, nj, ishift, jshift, position, k
+ integer :: is, ie, js, je !, isd, ied, jsd, jed
+ integer :: nx=128, ny=128, nz=40, stackmax=4000000
+ integer :: layout(2)
+ integer :: whalo = 2, ehalo = 2, shalo = 2, nhalo = 2
+ real, allocatable, dimension(:,:,:) :: global1, x
+ real, allocatable, dimension(:,:) :: global2D
+ !--- set up domain
+ call mpp_define_layout( (/1,nx,1,ny/), npes, layout )
+ select case(type)
+ case( 'Simple' )
+ call mpp_define_domains( (/1,nx,1,ny/), layout, domain, whalo=whalo, ehalo=ehalo, &
+ shalo=shalo, nhalo=nhalo, name=type )
+ case( 'Simple symmetry center', 'Simple symmetry corner', 'Simple symmetry east', 'Simple symmetry north' )
+ call mpp_define_domains( (/1,nx,1,ny/), layout, domain, whalo=whalo, ehalo=ehalo, &
+ shalo=shalo, nhalo=nhalo, name=type, symmetry = .true. )
+ case( 'Cyclic symmetry center', 'Cyclic symmetry corner', 'Cyclic symmetry east', 'Cyclic symmetry north' )
+ call mpp_define_domains( (/1,nx,1,ny/), layout, domain, whalo=whalo, ehalo=ehalo, shalo=shalo, nhalo=nhalo,&
+ name=type, symmetry = .true., xflags=CYCLIC_GLOBAL_DOMAIN, &
+ & yflags=CYCLIC_GLOBAL_DOMAIN )
+ case default
+ call mpp_error( FATAL, 'TEST_MPP_DOMAINS: no such test: '//type//' in test_global_field' )
+ end select
+ call mpp_get_compute_domain( domain, is, ie, js, je )
+ call mpp_get_data_domain ( domain, isd, ied, jsd, jed )
+
+ !--- determine if an extra point is needed
+ ishift = 0; jshift = 0; position = CENTER
+ select case(type)
+ case ('Simple symmetry corner', 'Cyclic symmetry corner')
+ ishift = 1; jshift = 1; position = CORNER
+ case ('Simple symmetry east', 'Cyclic symmetry east' )
+ ishift = 1; jshift = 0; position = EAST
+ case ('Simple symmetry north', 'Cyclic symmetry north')
+ ishift = 0; jshift = 1; position = NORTH
+ end select
+
+ ie = ie+ishift; je = je+jshift
+ ied = ied+ishift; jed = jed+jshift
+ ni = nx+ishift; nj = ny+jshift
+ allocate(global1(1-whalo:ni+ehalo, 1-shalo:nj+nhalo, nz))
+ global1 = 0.0
+ do k = 1,nz
+ do j = 1,nj
+ do i = 1,ni
+ global1(i,j,k) = k + i*1e-3 + j*1e-6
+ end do
+ end do
+ enddo
+
+ !--- NOTE: even though the domain is cyclic, no need to apply cyclic condition on the global data
+
+ allocate( x (isd:ied,jsd:jed,nz) )
+ allocate( global2D(ni,nj))
+
+ x(:,:,:) = global1(isd:ied,jsd:jed,:)
+ do j = 1, nj
+ do i = 1, ni
+ global2D(i,j) = sum(global1(i,j,:))
+ enddo
+ enddo
+ !test mpp_global_sum
+
+ if(type(1:6) == 'Simple') then
+ gsum = sum( global2D(1:ni,1:nj) )
+ else
+ gsum = sum( global2D(1:nx, 1:ny) )
+ endif
+ id = mpp_clock_id( type//' sum', flags=MPP_CLOCK_SYNC+MPP_CLOCK_DETAILED )
+ call mpp_clock_begin(id)
+ lsum = mpp_global_sum( domain, x, position = position )
+ call mpp_clock_end (id)
+ if( pe.EQ.mpp_root_pe() )print '(a,2es15.8,a,es12.4)', type//' Fast sum=', lsum, gsum
+
+ !test exact mpp_global_sum
+ id = mpp_clock_id( type//' exact sum', flags=MPP_CLOCK_SYNC+MPP_CLOCK_DETAILED )
+ call mpp_clock_begin(id)
+ lsum = mpp_global_sum( domain, x, BITWISE_EXACT_SUM, position = position )
+ call mpp_clock_end (id)
+ !--- The following check will fail on altix in normal mode, but it is ok
+ !--- in debugging mode. It is ok on irix.
+ call compare_data_scalar(lsum, gsum, FATAL, type//' mpp_global_exact_sum')
+
+ !test mpp_global_min
+ gmin = minval(global1(1:ni, 1:nj, :))
+ id = mpp_clock_id( type//' min', flags=MPP_CLOCK_SYNC+MPP_CLOCK_DETAILED )
+ call mpp_clock_begin(id)
+ lmin = mpp_global_min( domain, x, position = position )
+ call mpp_clock_end (id)
+ call compare_data_scalar(lmin, gmin, FATAL, type//' mpp_global_min')
+
+ !test mpp_global_max
+ gmax = maxval(global1(1:ni, 1:nj, :))
+ id = mpp_clock_id( type//' max', flags=MPP_CLOCK_SYNC+MPP_CLOCK_DETAILED )
+ call mpp_clock_begin(id)
+ lmax = mpp_global_max( domain, x, position = position )
+ call mpp_clock_end (id)
+ call compare_data_scalar(lmax, gmax, FATAL, type//' mpp_global_max' )
+
+ deallocate(global1, x)
+
+ end subroutine test_global_reduce
+
+ subroutine compare_data_scalar( a, b, action, string )
+ real, intent(in) :: a, b
+ integer, intent(in) :: action
+ character(len=*), intent(in) :: string
+ if( a .EQ. b)then
+ if( pe.EQ.mpp_root_pe() )call mpp_error( NOTE, trim(string)//': data comparison are OK.' )
+ else
+ call mpp_error( action, trim(string)//': data comparison are not OK.' )
+ end if
+
+ end subroutine compare_data_scalar
end program test_global_arrays
diff --git a/test_fms/mpp/test_global_arrays.sh b/test_fms/mpp/test_global_arrays.sh
index 596d1ecb0a..18390415e5 100755
--- a/test_fms/mpp/test_global_arrays.sh
+++ b/test_fms/mpp/test_global_arrays.sh
@@ -27,10 +27,26 @@
# Set common test settings.
. ../test-lib.sh
-# ensure input.nml file present
-touch input.nml
+cat <<_EOF > input.nml
+&test_global_arrays_nml
+ test_simple = .true.
+ test_full = .false.
+/
+_EOF
-test_expect_success "global array functions with mixed precision" '
+test_expect_success "mpp_global_sum/max/min with simple domain" '
mpirun -n 8 ./test_global_arrays
'
+
+cat <<_EOF > input.nml
+&test_global_arrays_nml
+ test_simple = .false.
+ test_full = .true.
+/
+_EOF
+
+test_expect_success "mpp_global_sum/max/min with symmetry and halos" '
+ mpirun -n 6 ./test_global_arrays
+'
+
test_done
diff --git a/test_fms/mpp/test_mpp_domains.F90 b/test_fms/mpp/test_mpp_domains.F90
index ab9ba1a447..1ae1d904da 100644
--- a/test_fms/mpp/test_mpp_domains.F90
+++ b/test_fms/mpp/test_mpp_domains.F90
@@ -250,17 +250,6 @@ program test_mpp_domains
call test_uniform_mosaic('Cubic-Grid') ! 6 tiles.
call test_nonuniform_mosaic('Five-Tile')
- if(.not. wide_halo) then
- call test_global_reduce( 'Simple')
- call test_global_reduce( 'Simple symmetry center')
- call test_global_reduce( 'Simple symmetry corner')
- call test_global_reduce( 'Simple symmetry east')
- call test_global_reduce( 'Simple symmetry north')
- call test_global_reduce( 'Cyclic symmetry center')
- call test_global_reduce( 'Cyclic symmetry corner')
- call test_global_reduce( 'Cyclic symmetry east')
- call test_global_reduce( 'Cyclic symmetry north')
- endif
call test_redistribute( 'Complete pelist' )
call test_redistribute( 'Overlap pelist' )
@@ -6057,112 +6046,6 @@ subroutine test_cyclic_offset( type )
end subroutine test_cyclic_offset
- !--- test mpp_global_sum, mpp_global_min and mpp_global_max
- subroutine test_global_reduce (type)
- character(len=*), intent(in) :: type
- real :: lsum, gsum, lmax, gmax, lmin, gmin
- integer :: ni, nj, ishift, jshift, position
- integer :: is, ie, js, je, isd, ied, jsd, jed
-
- type(domain2D) :: domain
- real, allocatable, dimension(:,:,:) :: global1, x
- real, allocatable, dimension(:,:) :: global2D
- !--- set up domain
- call mpp_define_layout( (/1,nx,1,ny/), npes, layout )
- select case(type)
- case( 'Simple' )
- call mpp_define_domains( (/1,nx,1,ny/), layout, domain, whalo=whalo, ehalo=ehalo, &
- shalo=shalo, nhalo=nhalo, name=type )
- case( 'Simple symmetry center', 'Simple symmetry corner', 'Simple symmetry east', 'Simple symmetry north' )
- call mpp_define_domains( (/1,nx,1,ny/), layout, domain, whalo=whalo, ehalo=ehalo, &
- shalo=shalo, nhalo=nhalo, name=type, symmetry = .true. )
- case( 'Cyclic symmetry center', 'Cyclic symmetry corner', 'Cyclic symmetry east', 'Cyclic symmetry north' )
- call mpp_define_domains( (/1,nx,1,ny/), layout, domain, whalo=whalo, ehalo=ehalo, shalo=shalo, nhalo=nhalo,&
- name=type, symmetry = .true., xflags=CYCLIC_GLOBAL_DOMAIN, &
- & yflags=CYCLIC_GLOBAL_DOMAIN )
- case default
- call mpp_error( FATAL, 'TEST_MPP_DOMAINS: no such test: '//type//' in test_global_field' )
- end select
- call mpp_get_compute_domain( domain, is, ie, js, je )
- call mpp_get_data_domain ( domain, isd, ied, jsd, jed )
-
- !--- determine if an extra point is needed
- ishift = 0; jshift = 0; position = CENTER
- select case(type)
- case ('Simple symmetry corner', 'Cyclic symmetry corner')
- ishift = 1; jshift = 1; position = CORNER
- case ('Simple symmetry east', 'Cyclic symmetry east' )
- ishift = 1; jshift = 0; position = EAST
- case ('Simple symmetry north', 'Cyclic symmetry north')
- ishift = 0; jshift = 1; position = NORTH
- end select
-
- ie = ie+ishift; je = je+jshift
- ied = ied+ishift; jed = jed+jshift
- ni = nx+ishift; nj = ny+jshift
- allocate(global1(1-whalo:ni+ehalo, 1-shalo:nj+nhalo, nz))
- global1 = 0.0
- do k = 1,nz
- do j = 1,nj
- do i = 1,ni
- global1(i,j,k) = k + i*1e-3 + j*1e-6
- end do
- end do
- enddo
-
- !--- NOTE: even though the domain is cyclic, no need to apply cyclic condition on the global data
-
- allocate( x (isd:ied,jsd:jed,nz) )
- allocate( global2D(ni,nj))
-
- x(:,:,:) = global1(isd:ied,jsd:jed,:)
- do j = 1, nj
- do i = 1, ni
- global2D(i,j) = sum(global1(i,j,:))
- enddo
- enddo
- !test mpp_global_sum
-
- if(type(1:6) == 'Simple') then
- gsum = sum( global2D(1:ni,1:nj) )
- else
- gsum = sum( global2D(1:nx, 1:ny) )
- endif
- id = mpp_clock_id( type//' sum', flags=MPP_CLOCK_SYNC+MPP_CLOCK_DETAILED )
- call mpp_clock_begin(id)
- lsum = mpp_global_sum( domain, x, position = position )
- call mpp_clock_end (id)
- if( pe.EQ.mpp_root_pe() )print '(a,2es15.8,a,es12.4)', type//' Fast sum=', lsum, gsum
-
- !test exact mpp_global_sum
- id = mpp_clock_id( type//' exact sum', flags=MPP_CLOCK_SYNC+MPP_CLOCK_DETAILED )
- call mpp_clock_begin(id)
- lsum = mpp_global_sum( domain, x, BITWISE_EXACT_SUM, position = position )
- call mpp_clock_end (id)
- !--- The following check will fail on altix in normal mode, but it is ok
- !--- in debugging mode. It is ok on irix.
- call compare_data_scalar(lsum, gsum, FATAL, type//' mpp_global_exact_sum')
-
- !test mpp_global_min
- gmin = minval(global1(1:ni, 1:nj, :))
- id = mpp_clock_id( type//' min', flags=MPP_CLOCK_SYNC+MPP_CLOCK_DETAILED )
- call mpp_clock_begin(id)
- lmin = mpp_global_min( domain, x, position = position )
- call mpp_clock_end (id)
- call compare_data_scalar(lmin, gmin, FATAL, type//' mpp_global_min')
-
- !test mpp_global_max
- gmax = maxval(global1(1:ni, 1:nj, :))
- id = mpp_clock_id( type//' max', flags=MPP_CLOCK_SYNC+MPP_CLOCK_DETAILED )
- call mpp_clock_begin(id)
- lmax = mpp_global_max( domain, x, position = position )
- call mpp_clock_end (id)
- call compare_data_scalar(lmax, gmax, FATAL, type//' mpp_global_max' )
-
- deallocate(global1, x)
-
- end subroutine test_global_reduce
-
subroutine test_parallel_2D ( )
integer :: npes, layout(2), i, j, k,is, ie, js, je, isd, ied, jsd, jed
From e6225adaef3008355746a3a07fc0675b22132325 Mon Sep 17 00:00:00 2001
From: JONG KIM
Date: Thu, 13 Jul 2023 14:53:04 -0400
Subject: [PATCH 06/11] feat: update mpp_do_update_ad.fh to resolve JEDI
requirement (#1225)
---
mpp/include/mpp_do_global_field_ad.fh | 4 +-
mpp/include/mpp_do_updateV_ad.fh | 2 +-
mpp/include/mpp_do_update_ad.fh | 173 ++++++++++++++++++-------
mpp/include/mpp_get_boundary_ad.fh | 2 +-
mpp/include/mpp_global_field_ad.fh | 4 +-
mpp/include/mpp_sum_mpi_ad.fh | 2 +-
mpp/include/mpp_sum_nocomm_ad.fh | 2 +-
mpp/include/mpp_update_domains2D_ad.fh | 16 +--
8 files changed, 143 insertions(+), 62 deletions(-)
diff --git a/mpp/include/mpp_do_global_field_ad.fh b/mpp/include/mpp_do_global_field_ad.fh
index 5c72b5adbf..d32e6aa4b8 100644
--- a/mpp/include/mpp_do_global_field_ad.fh
+++ b/mpp/include/mpp_do_global_field_ad.fh
@@ -22,8 +22,8 @@
!> @addtogroup mpp_domains_mod
!> @{
- !> Gets a global field from a local field
- !! local field may be on compute OR data domain
+ !> Gets a local ad field from a global field
+ !! global field may be on compute OR data domain
subroutine MPP_DO_GLOBAL_FIELD_3D_AD_( domain, local, global, tile, ishift, jshift, flags, default_data)
type(domain2D), intent(in) :: domain
MPP_TYPE_, intent(inout) :: local(:,:,:)
diff --git a/mpp/include/mpp_do_updateV_ad.fh b/mpp/include/mpp_do_updateV_ad.fh
index d6cce14abf..8d230f501c 100644
--- a/mpp/include/mpp_do_updateV_ad.fh
+++ b/mpp/include/mpp_do_updateV_ad.fh
@@ -21,7 +21,7 @@
!***********************************************************************
!> @addtogroup mpp_domains_mod
!> @{
- !> Updates data domain of 3D field whose computational domains have been computed
+ !> Updates data domain of 3D ad field whose computational domains have been computed
subroutine MPP_DO_UPDATE_AD_3D_V_(f_addrsx,f_addrsy, domain, update_x, update_y, &
d_type, ke, gridtype, flags)
integer(i8_kind), intent(in) :: f_addrsx(:,:), f_addrsy(:,:)
diff --git a/mpp/include/mpp_do_update_ad.fh b/mpp/include/mpp_do_update_ad.fh
index 7afbe8317d..7e7382dcb8 100644
--- a/mpp/include/mpp_do_update_ad.fh
+++ b/mpp/include/mpp_do_update_ad.fh
@@ -1,6 +1,4 @@
! -*-f90-*-
-
-
!***********************************************************************
!* GNU Lesser General Public License
!*
@@ -21,8 +19,12 @@
!***********************************************************************
!> @addtogroup mpp_domains_mod
!> @{
-
- !> Updates data domain of 3D field whose computational domains have been computed
+ !> Updates data domain of 3D ad field whose computational domains have been computed
+ !! @brief Applies linear adjoint operation to 3D field based on duality of MPP_DO_UPDATE_3D_
+ !! @note Adjoint duality exists between MPI SEND and MPI_RECV.
+ !! However, checkpoint is needed for forward buffer information.
+ !! ref: BN. Cheng, A Duality between Forward and Adjoint MPI Communication Routines
+ !! COMPUTATIONAL METHODS IN SCIENCE AND TECHNOLOGY Special Issue 2006, 23-24
subroutine MPP_DO_UPDATE_AD_3D_( f_addrs, domain, update, d_type, ke, flags)
integer(i8_kind), intent(in) :: f_addrs(:,:)
type(domain2D), intent(in) :: domain
@@ -35,6 +37,7 @@
pointer(ptr_field, field)
integer :: update_flags
type(overlap_type), pointer :: overPtr => NULL()
+ character(len=8) :: text
!equate to mpp_domains_stack
MPP_TYPE_ :: buffer(size(mpp_domains_stack(:)))
@@ -43,13 +46,16 @@
!receive domains saved here for unpacking
!for non-blocking version, could be recomputed
- integer, allocatable :: msg1(:), msg2(:)
+ integer, allocatable :: msg1(:), msg2(:), msg3(:)
logical :: send(8), recv(8), update_edge_only
- integer :: to_pe, from_pe, pos, msgsize, msgsize_send
+ integer :: to_pe, from_pe, pos, msgsize
integer :: n, l_size, l, m, i, j, k
integer :: is, ie, js, je, tMe, dir
integer :: buffer_recv_size, nlist, outunit
-
+ integer :: send_start_pos !>Send buffer start location
+ !!This serves as ad recv buffer start location
+ integer :: send_msgsize(MAXLIST) !>Send buffer msg size storage
+ !!This should be checkpointed for reverse ad communication
outunit = stdout()
ptr = LOC(mpp_domains_stack)
@@ -80,9 +86,10 @@
if(debug_message_passing) then
nlist = size(domain%list(:))
- allocate(msg1(0:nlist-1), msg2(0:nlist-1) )
+ allocate(msg1(0:nlist-1), msg2(0:nlist-1), msg3(0:nlist-1) )
msg1 = 0
msg2 = 0
+ msg3 = 0
do m = 1, update%nrecv
overPtr => update%recv(m)
msgsize = 0
@@ -96,7 +103,6 @@
end do
from_pe = update%recv(m)%pe
l = from_pe-mpp_root_pe()
- call mpp_recv( msg1(l), glen=1, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_1 )
msg2(l) = msgsize
enddo
@@ -111,9 +117,13 @@
msgsize = msgsize + (ie-is+1)*(je-js+1)
end if
end do
- call mpp_send( msgsize, plen=1, to_pe=overPtr%pe, tag=COMM_TAG_1 )
+ l = overPtr%pe - mpp_root_pe()
+ msg3(l) = msgsize
enddo
- call mpp_sync_self(check=EVENT_RECV)
+ ! mpp_sync_self is desirable but keep mpp_alltoall
+ ! to exactly follow the duality of mpp_do_update.fh
+ ! all-to-all may have scaling issues on very large systems
+ call mpp_alltoall(msg3, 1, msg1, 1)
do m = 0, nlist-1
if(msg1(m) .NE. msg2(m)) then
@@ -122,14 +132,16 @@
call mpp_error(FATAL, "mpp_do_update: mismatch on send and recv size")
endif
enddo
- call mpp_sync_self()
write(outunit,*)"NOTE from mpp_do_update: message sizes are matched between send and recv for domain " &
//trim(domain%name)
- deallocate(msg1, msg2)
+ deallocate(msg1, msg2, msg3)
endif
- !recv
+ ! Duality of ad code requires checkpoint info: buffer recv size and send pos and msgsize
+ ! from the forward recv portion of mpp_do_update.fh
+ ! ref above in line 26
buffer_pos = 0
+
do m = 1, update%nrecv
overPtr => update%recv(m)
if( overPtr%count == 0 )cycle
@@ -137,38 +149,24 @@
do n = 1, overPtr%count
dir = overPtr%dir(n)
if(recv(dir)) then
- tMe = overPtr%tileMe(n)
is = overPtr%is(n); ie = overPtr%ie(n)
js = overPtr%js(n); je = overPtr%je(n)
msgsize = msgsize + (ie-is+1)*(je-js+1)
- msgsize_send = (ie-is+1)*(je-js+1)*ke*l_size
- pos = buffer_pos + msgsize_send
- do l=1,l_size ! loop over number of fields
- ptr_field = f_addrs(l, tMe)
- do k = ke,1,-1
- do j = je, js, -1
- do i = ie, is, -1
- buffer(pos) = field(i,j,k)
- field(i,j,k) = 0.
- pos = pos - 1
- end do
- end do
- end do
- end do
end if
end do
msgsize = msgsize*ke*l_size
if( msgsize.GT.0 )then
- to_pe = overPtr%pe
- call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=to_pe, tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
- end do ! end do m = 1, update%nrecv
+ end do
buffer_recv_size = buffer_pos
+ send_start_pos = buffer_pos
- ! send
+ ! checkpoint send_msgsize
+ buffer_pos = buffer_recv_size
do m = 1, update%nsend
+ send_msgsize(m) = 0
overPtr => update%send(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
@@ -179,19 +177,99 @@
enddo
if( msgsize.GT.0 )then
msgsize = msgsize*ke*l_size
- msgsize_send = msgsize
+ end if
+
+ do n = 1, overPtr%count
+ dir = overPtr%dir(n)
+ if( send(dir) ) then
+ tMe = overPtr%tileMe(n)
+ is = overPtr%is(n); ie = overPtr%ie(n)
+ js = overPtr%js(n); je = overPtr%je(n)
+ pos = pos + (ie-is+1)*(je-js+1)*ke*l_size
+ endif
+ end do
+
+ send_msgsize(m) = pos-buffer_pos
+ buffer_pos = pos
+ end do
+
+ ! bufferize for backward communication
+ ! using pack procedures of recv in mpp_do_update.fh
+ buffer_pos = buffer_recv_size
+ do m = update%nrecv, 1, -1
+ overPtr => update%recv(m)
+ if( overPtr%count == 0 )cycle
+ pos = buffer_pos
+ do n = overPtr%count, 1, -1
+ dir = overPtr%dir(n)
+ if( recv(dir) ) then
+ tMe = overPtr%tileMe(n)
+ is = overPtr%is(n); ie = overPtr%ie(n)
+ js = overPtr%js(n); je = overPtr%je(n)
+ msgsize = (ie-is+1)*(je-js+1)*ke*l_size
+ pos = buffer_pos - msgsize
+ buffer_pos = pos
+ do l=1,l_size ! loop over number of fields
+ ptr_field = f_addrs(l, tMe)
+ do k = 1,ke
+ do j = js, je
+ do i = is, ie
+ pos = pos + 1
+ buffer(pos) = field(i,j,k)
+ end do
+ end do
+ end do
+ end do
+ endif
+ end do
+ end do
+
+ ! for duality, mpp_send of mpp_do_update.sh becomes mpp_recv in adjoint
+ buffer_pos = send_start_pos
+ do m = 1, update%nsend
+ msgsize = send_msgsize(m)
+ if(msgsize == 0) cycle
+ to_pe = update%send(m)%pe
+ call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=to_pe, block=.FALSE., tag=COMM_TAG_2 )
+ buffer_pos = buffer_pos + msgsize
+ end do
+
+ ! for duality, mpp_recv of mpp_do_update.sh becomes mpp_send in adjoint
+ buffer_pos = 0
+ do m = 1, update%nrecv
+ overPtr => update%recv(m)
+ if( overPtr%count == 0 )cycle
+ msgsize = 0
+ do n = 1, overPtr%count
+ dir = overPtr%dir(n)
+ if(recv(dir)) then
+ is = overPtr%is(n); ie = overPtr%ie(n)
+ js = overPtr%js(n); je = overPtr%je(n)
+ msgsize = msgsize + (ie-is+1)*(je-js+1)
+ end if
+ end do
+
+ msgsize = msgsize*ke*l_size
+ if( msgsize.GT.0 )then
from_pe = overPtr%pe
- call mpp_recv( buffer(buffer_pos+1), glen=msgsize, from_pe=from_pe, block=.FALSE., tag=COMM_TAG_2 )
+ mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, (buffer_pos+msgsize) )
+ if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
+ write( text,'(i8)' )mpp_domains_stack_hwm
+ call mpp_error( FATAL, 'MPP_DO_UPDATE: mpp_domains_stack overflow, '// &
+ 'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.' )
+ end if
+ call mpp_send( buffer(buffer_pos+1), plen=msgsize, to_pe=from_pe, tag=COMM_TAG_2 )
buffer_pos = buffer_pos + msgsize
end if
- end do ! end do ist = 0,nlist-1
+ end do
call mpp_sync_self(check=EVENT_RECV)
+ ! unpack and linear adjoint operation
+ ! in reverse order of pack process of mpp_do_update.fh
buffer_pos = buffer_recv_size
-
- ! send
do m = 1, update%nsend
+ send_msgsize(m) = 0
overPtr => update%send(m)
if( overPtr%count == 0 )cycle
pos = buffer_pos
@@ -201,7 +279,13 @@
if( send(dir) ) msgsize = msgsize + overPtr%msgsize(n)
enddo
if( msgsize.GT.0 )then
- buffer_pos = pos
+ msgsize = msgsize*ke*l_size
+ mpp_domains_stack_hwm = max( mpp_domains_stack_hwm, pos+msgsize )
+ if( mpp_domains_stack_hwm.GT.mpp_domains_stack_size )then
+ write( text,'(i8)' )mpp_domains_stack_hwm
+ call mpp_error( FATAL, 'MPP_START_UPDATE_DOMAINS: mpp_domains_stack overflow, ' // &
+ 'call mpp_domains_set_stack_size('//trim(text)//') from all PEs.')
+ end if
end if
do n = 1, overPtr%count
@@ -259,15 +343,12 @@
end do
end do
end do
- end select
+ end select
endif
end do ! do n = 1, overPtr%count
-
- msgsize = pos - buffer_pos
- if( msgsize.GT.0 )then
- buffer_pos = pos
- end if
- end do ! end do ist = 0,nlist-1
+ send_msgsize(m) = pos-buffer_pos
+ buffer_pos = pos
+ end do
call mpp_sync_self()
diff --git a/mpp/include/mpp_get_boundary_ad.fh b/mpp/include/mpp_get_boundary_ad.fh
index 56a18120e6..6701d375dd 100644
--- a/mpp/include/mpp_get_boundary_ad.fh
+++ b/mpp/include/mpp_get_boundary_ad.fh
@@ -21,7 +21,7 @@
!> @addtogroup mpp_domains_mod
!> @{
-!> This routine is used to retrieve scalar boundary data for symmetric domain.
+!> This routine is used to retrieve scalar ad boundary data for symmetric domain.
subroutine MPP_GET_BOUNDARY_AD_2D_(field, domain, ebuffer, sbuffer, wbuffer, nbuffer, flags, &
position, complete, tile_count)
type(domain2D), intent(in) :: domain
diff --git a/mpp/include/mpp_global_field_ad.fh b/mpp/include/mpp_global_field_ad.fh
index 7d948f9366..712d12e48e 100644
--- a/mpp/include/mpp_global_field_ad.fh
+++ b/mpp/include/mpp_global_field_ad.fh
@@ -21,8 +21,8 @@
!***********************************************************************
!> @addtogroup mpp_domains_mod
!> @{
- !> Get a global field from a local field
- !! local field may be on compute OR data domain
+ !> Get a local ad field from a global ad field
+ !! global field may be on compute OR data domain
subroutine MPP_GLOBAL_FIELD_2D_AD_( domain, local, global, flags, position,tile_count, default_data)
type(domain2D), intent(in) :: domain
MPP_TYPE_, intent(out) :: local(:,:)
diff --git a/mpp/include/mpp_sum_mpi_ad.fh b/mpp/include/mpp_sum_mpi_ad.fh
index 9b61b9457b..ee28d6c4bf 100644
--- a/mpp/include/mpp_sum_mpi_ad.fh
+++ b/mpp/include/mpp_sum_mpi_ad.fh
@@ -20,7 +20,7 @@
!* License along with FMS. If not, see .
!***********************************************************************
!> Sums array a over the PEs in pelist (all PEs if this argument is omitted)
- !! result is also automatically broadcast: all PEs have the sum in a at the end
+ !! forward array is already summed and broadcasted: all PEs already have the ad sum
!! we are using f77-style call: array passed by address and not descriptor; further,
!! the f90 conformance check is avoided.
!> @ingroup mpp_mod
diff --git a/mpp/include/mpp_sum_nocomm_ad.fh b/mpp/include/mpp_sum_nocomm_ad.fh
index 9a427aa9d0..263bfde8d6 100644
--- a/mpp/include/mpp_sum_nocomm_ad.fh
+++ b/mpp/include/mpp_sum_nocomm_ad.fh
@@ -21,7 +21,7 @@
!***********************************************************************
!> Sums array a over the PEs in pelist (all PEs if this argument is omitted)
- !! result is also automatically broadcast: all PEs have the sum in a at the end
+ !! forward array is already summed and broadcasted: all PEs already have the ad sum
!! we are using f77-style call: array passed by address and not descriptor; further,
!! the f90 conformance check is avoided.
subroutine MPP_SUM_AD_( a, length, pelist )
diff --git a/mpp/include/mpp_update_domains2D_ad.fh b/mpp/include/mpp_update_domains2D_ad.fh
index e5fc6e7af3..8a876fdba5 100644
--- a/mpp/include/mpp_update_domains2D_ad.fh
+++ b/mpp/include/mpp_update_domains2D_ad.fh
@@ -19,7 +19,7 @@
!***********************************************************************
!> @addtogroup mpp_domains_mod
!> @{
- !> Updates data domain of 2D field whose computational domains have been computed
+ !> Updates data domain of 2D ad field whose computational domains have been computed
subroutine MPP_UPDATE_DOMAINS_AD_2D_( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count)
MPP_TYPE_, intent(inout) :: field(:,:)
@@ -39,7 +39,7 @@
return
end subroutine MPP_UPDATE_DOMAINS_AD_2D_
- !> Updates data domain of 3D field whose computational domains have been computed
+ !> Updates data domain of 3D ad field whose computational domains have been computed
subroutine MPP_UPDATE_DOMAINS_AD_3D_( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count)
MPP_TYPE_, intent(inout) :: field(:,:,:)
@@ -176,7 +176,7 @@
end subroutine MPP_UPDATE_DOMAINS_AD_3D_
- !> Updates data domain of 4D field whose computational domains have been computed
+ !> Updates data domain of 4D ad field whose computational domains have been computed
subroutine MPP_UPDATE_DOMAINS_AD_4D_( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
MPP_TYPE_, intent(inout) :: field(:,:,:,:)
@@ -196,7 +196,7 @@
return
end subroutine MPP_UPDATE_DOMAINS_AD_4D_
- !> Updates data domain of 5D field whose computational domains have been computed
+ !> Updates data domain of 5D ad field whose computational domains have been computed
subroutine MPP_UPDATE_DOMAINS_AD_5D_( field, domain, flags, complete, position, &
whalo, ehalo, shalo, nhalo, name, tile_count )
MPP_TYPE_, intent(inout) :: field(:,:,:,:,:)
@@ -224,7 +224,7 @@
!vector fields
subroutine MPP_UPDATE_DOMAINS_AD_2D_V_( fieldx, fieldy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count)
-!updates data domain of 2D field whose computational domains have been computed
+!updates data domain of 2D ad field whose computational domains have been computed
MPP_TYPE_, intent(inout) :: fieldx(:,:), fieldy(:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
@@ -247,7 +247,7 @@
subroutine MPP_UPDATE_DOMAINS_AD_3D_V_( fieldx, fieldy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count)
-!updates data domain of 3D field whose computational domains have been computed
+!updates data domain of 3D ad field whose computational domains have been computed
MPP_TYPE_, intent(inout) :: fieldx(:,:,:), fieldy(:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
@@ -422,7 +422,7 @@
subroutine MPP_UPDATE_DOMAINS_AD_4D_V_( fieldx, fieldy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count )
-!updates data domain of 4D field whose computational domains have been computed
+!updates data domain of 4D ad field whose computational domains have been computed
MPP_TYPE_, intent(inout) :: fieldx(:,:,:,:), fieldy(:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
@@ -445,7 +445,7 @@
subroutine MPP_UPDATE_DOMAINS_AD_5D_V_( fieldx, fieldy, domain, flags, gridtype, complete, &
whalo, ehalo, shalo, nhalo, name, tile_count )
-!updates data domain of 5D field whose computational domains have been computed
+!updates data domain of 5D ad field whose computational domains have been computed
MPP_TYPE_, intent(inout) :: fieldx(:,:,:,:,:), fieldy(:,:,:,:,:)
type(domain2D), intent(inout) :: domain
integer, intent(in), optional :: flags, gridtype
From 2be8aa452ad3e5f43e92c38a64f12d1ae6c43fb8 Mon Sep 17 00:00:00 2001
From: Ryan Mulhall <35538242+rem1776@users.noreply.github.com>
Date: Thu, 13 Jul 2023 14:57:40 -0400
Subject: [PATCH 07/11] chore: add prefixed aliases for libfms routines (#1262)
BREAKING CHANGE: Any code using the global fms module (libFMS.F90) will break as this adds prefixes to all names in that module.
---
libFMS.F90 | 947 +++++++++++++++-------
test_fms/mpp/test_domains_utility_mod.F90 | 4 +-
test_fms/mpp/test_mpp_chksum.F90 | 5 +-
test_fms/mpp/test_mpp_domains.F90 | 2 +-
test_fms/mpp/test_mpp_nesting.F90 | 4 +-
5 files changed, 682 insertions(+), 280 deletions(-)
diff --git a/libFMS.F90 b/libFMS.F90
index 872c587a8c..02b54df82a 100644
--- a/libFMS.F90
+++ b/libFMS.F90
@@ -28,11 +28,18 @@
!! and routines. Overloaded type operators/assignments cannot be imported individually
!! (ie. `use fms, only: OPERATOR(*)` includes any defined '*' operators within FMS).
!!
-!! Remappings due to conflicts:
+!! Renaming scheme:
+!! Routines and variables: fms__routine_name
+!! Types: FmsModuleNameTypeName
!!
-!! get_mosaic_tile_grid from mosaic2(fms2_io) => mosaic2_get_mosaic_tile_grid
+!! Exceptions (mainly for rep:
+!! - Parameter values are kept their original names
+!! - If module name is already included (like in init routines) only fms prefix will be added.
+!! - Similarly if theres a redundant module name included already included it will not be repeated
+!! (ie. mpp_update_domains => fms_mpp_domains_update_domains)
+!! - Override interfaces for operators and assignment are provided
!!
-!! read_data from interpolator_mod(fms2_io) => interpolator_read_data
+!! Remappings due to name conflicts:
!!
!! ZERO from interpolator_mod(mpp_parameter) => INTERPOLATOR_ZERO
!!
@@ -41,7 +48,7 @@
!! Not in this module:
!!
!! axis_utils_mod, fms_io_mod, time_interp_external_mod
-!! get_grid_version_mpp_mod, mpp_io_mod, mosaic_mod,
+!! get_grid_version_mpp_mod, mpp_io_mod, mosaic_mod, &
!! fms_mod(partial, old io excluded), drifters modules
!! constants_mod (FMSconstants should be used externally)
!! grid_mod, mosaic_mod
@@ -65,215 +72,458 @@ module fms
fms_affinity_set
!> amip_interp
- use amip_interp_mod, only: amip_interp_init, get_amip_sst, get_amip_ice, &
- amip_interp_new,amip_interp_del, amip_interp_type, &
- assignment(=), i_sst, j_sst, sst_ncep, sst_anom, &
- forecast_mode, use_ncep_sst
+ use amip_interp_mod, only: fms_amip_interp_init => amip_interp_init, &
+ fms_amip_interp_get_amip_sst => get_amip_sst, &
+ fms_amip_interp_get_amip_ice => get_amip_ice, &
+ fms_amip_interp_new => amip_interp_new, &
+ fms_amip_interp_del => amip_interp_del, &
+ FmsAmipInterp_type => amip_interp_type, &
+ assignment(=), &
+ fms_amip_interp_i_sst => i_sst, &
+ fms_amip_interp_j_sst => j_sst, &
+ fms_amip_interp_sst_ncep => sst_ncep, &
+ fms_amip_interp_sst_anom => sst_anom, &
+ fms_amip_interp_forecast_mode=> forecast_mode, &
+ fms_amip_interp_use_ncep_sst => use_ncep_sst
!> astronomy
- use astronomy_mod, only: astronomy_init, get_period, set_period, &
- set_orbital_parameters, get_orbital_parameters, &
- set_ref_date_of_ae, get_ref_date_of_ae, &
- diurnal_solar, daily_mean_solar, annual_mean_solar, &
- astronomy_end, universal_time, orbital_time
+ use astronomy_mod, only: fms_astronomy_init => astronomy_init, &
+ fms_astronomy_get_period => get_period, &
+ fms_astronomy_set_period => set_period, &
+ fms_astronomy_set_orbital_parameters => set_orbital_parameters, &
+ fms_astronomy_get_orbital_parameters => get_orbital_parameters, &
+ fms_astronomy_set_ref_date_of_ae => set_ref_date_of_ae, &
+ fms_astronomy_get_ref_date_of_ae => get_ref_date_of_ae, &
+ fms_astronomy_diurnal_solar => diurnal_solar, &
+ fms_astronomy_daily_mean_solar => daily_mean_solar, &
+ fms_astronomy_annual_mean_solar => annual_mean_solar, &
+ fms_astronomy_end => astronomy_end, &
+ fms_astronomy_universal_time => universal_time, &
+ fms_astronomy_orbital_time => orbital_time
!> axis_utils
- use axis_utils2_mod, only: get_axis_cart, get_axis_modulo, lon_in_range, &
- tranlon, frac_index, nearest_index, interp_1d, &
- get_axis_modulo_times, axis_edges
+ use axis_utils2_mod, only: fms_axis_utils2_get_axis_cart => get_axis_cart, &
+ fms_axis_utils2_get_axis_modulo => get_axis_modulo, &
+ fms_axis_utils2_lon_in_range => lon_in_range, &
+ fms_axis_utils2_tranlon => tranlon, &
+ fms_axis_utils2_frac_index => frac_index, &
+ fms_axis_utils2_nearest_index => nearest_index, &
+ fms_axis_utils2_interp_1d => interp_1d, &
+ fms_axis_utils2_get_axis_modulo_times => get_axis_modulo_times, &
+ fms_axis_utils2_axis_edges => axis_edges
!>block_control
- use block_control_mod, only: block_control_type, define_blocks, &
- define_blocks_packed
+ use block_control_mod, only: FmsBlockControl_type => block_control_type, &
+ fms_block_control_define_blocks => define_blocks, &
+ fms_block_control_define_blocks_packed => define_blocks_packed
!> column_diagnostics
- use column_diagnostics_mod, only: column_diagnostics_init, &
- initialize_diagnostic_columns, &
- column_diagnostics_header, &
- close_column_diagnostics_units
+ use column_diagnostics_mod, only: fms_column_diagnostics_init => column_diagnostics_init, &
+ fms_column_diagnostics_initialize_diagnostic_columns => &
+ initialize_diagnostic_columns, &
+ fms_column_diagnostics_header => column_diagnostics_header, &
+ fms_column_diagnostics_close_units => close_column_diagnostics_units
!> coupler
- use coupler_types_mod, only: coupler_types_init, coupler_type_copy, &
- coupler_type_spawn, coupler_type_set_diags, &
- coupler_type_write_chksums, coupler_type_send_data, &
- coupler_type_data_override, coupler_type_register_restarts, &
- coupler_type_restore_state, coupler_type_increment_data, &
- coupler_type_rescale_data, coupler_type_copy_data, &
- coupler_type_redistribute_data, coupler_type_destructor, &
- coupler_type_initialized, coupler_type_extract_data, &
- coupler_type_set_data,coupler_type_copy_1d_2d, &
- coupler_type_copy_1d_3d, coupler_3d_values_type, &
- coupler_3d_field_type, coupler_3d_bc_type, &
- coupler_2d_values_type, coupler_2d_field_type, &
- coupler_2d_bc_type, coupler_1d_values_type, &
- coupler_1d_field_type, coupler_1d_bc_type, &
- ind_pcair, ind_u10, ind_psurf, ind_alpha, ind_csurf, &
- ind_sc_no, ind_flux, ind_deltap, ind_kw, ind_flux0, &
- ind_deposition, ind_runoff
- use ensemble_manager_mod, only: ensemble_manager_init, get_ensemble_id, get_ensemble_size, &
- get_ensemble_pelist, ensemble_pelist_setup, &
- get_ensemble_filter_pelist
- use atmos_ocean_fluxes_mod, only: atmos_ocean_fluxes_init, atmos_ocean_type_fluxes_init, &
- aof_set_coupler_flux
+ use coupler_types_mod, only: fms_coupler_types_init => coupler_types_init, &
+ fms_coupler_type_copy => coupler_type_copy, &
+ fms_coupler_type_spawn => coupler_type_spawn, &
+ fms_coupler_type_set_diags => coupler_type_set_diags, &
+ fms_coupler_type_write_chksums => coupler_type_write_chksums, &
+ fms_coupler_type_send_data => coupler_type_send_data, &
+ fms_coupler_type_data_override => coupler_type_data_override, &
+ fms_coupler_type_register_restarts => coupler_type_register_restarts, &
+ fms_coupler_type_restore_state => coupler_type_restore_state, &
+ fms_coupler_type_increment_data => coupler_type_increment_data, &
+ fms_coupler_type_rescale_data => coupler_type_rescale_data, &
+ fms_coupler_type_copy_data => coupler_type_copy_data, &
+ fms_coupler_type_redistribute_data => coupler_type_redistribute_data, &
+ fms_coupler_type_destructor => coupler_type_destructor, &
+ fms_coupler_type_initialized => coupler_type_initialized, &
+ fms_coupler_type_extract_data => coupler_type_extract_data, &
+ fms_coupler_type_set_data => coupler_type_set_data, &
+ fms_coupler_type_copy_1d_2d => coupler_type_copy_1d_2d, &
+ fms_coupler_type_copy_1d_3d => coupler_type_copy_1d_3d, &
+ FmsCoupler3dValues_type => coupler_3d_values_type, &
+ FmsCoupler3dField_type => coupler_3d_field_type, &
+ FmsCoupler3dBC_type => coupler_3d_bc_type, &
+ FmsCoupler2dValues_type => coupler_2d_values_type, &
+ FmsCoupler2dField_type => coupler_2d_field_type, &
+ FmsCoupler2dBC_type => coupler_2d_bc_type, &
+ FmsCoupler1dValues_type => coupler_1d_values_type, &
+ FmsCoupler1dField_type => coupler_1d_field_type, &
+ FmsCoupler1dBC_type => coupler_1d_bc_type, &
+ fms_coupler_ind_pcair => ind_pcair, &
+ fms_coupler_ind_u10 => ind_u10, &
+ fms_coupler_ind_psurf => ind_psurf, &
+ fms_coupler_ind_alpha => ind_alpha, &
+ fms_coupler_ind_csurf => ind_csurf, &
+ fms_coupler_ind_sc_no => ind_sc_no, &
+ fms_coupler_ind_flux => ind_flux, &
+ fms_coupler_ind_deltap => ind_deltap, &
+ fms_coupler_ind_kw => ind_kw, &
+ fms_coupler_ind_flux0 => ind_flux0, &
+ fms_coupler_ind_deposition => ind_deposition,&
+ fms_coupler_ind_runoff => ind_runoff
+ use ensemble_manager_mod, only: fms_ensemble_manager_init => ensemble_manager_init, &
+ fms_ensemble_manager_get_ensemble_id => get_ensemble_id, &
+ fms_ensemble_manager_get_ensemble_size => get_ensemble_size, &
+ fms_ensemble_manager_get_ensemble_pelist => get_ensemble_pelist, &
+ fms_ensemble_manager_ensemble_pelist_setup => ensemble_pelist_setup, &
+ fms_ensemble_manager_get_ensemble_filter_pelist => get_ensemble_filter_pelist
+ use atmos_ocean_fluxes_mod, only: fms_atmos_ocean_fluxes_init => atmos_ocean_fluxes_init, &
+ fms_atmos_ocean_type_fluxes_init => atmos_ocean_type_fluxes_init, &
+ fms_atmos_ocean_fluxes_set_coupler_flux => aof_set_coupler_flux
!> data_override
- use data_override_mod, only: data_override_init, data_override, &
- data_override_unset_domains, data_override_UG
+ use data_override_mod, only: fms_data_override_init => data_override_init, &
+ fms_data_override => data_override, &
+ fms_data_override_unset_domains => data_override_unset_domains, &
+ fms_data_override_UG => data_override_UG
!> diag_integral
- use diag_integral_mod, only: diag_integral_init, diag_integral_field_init, &
- sum_diag_integral_field, diag_integral_output, &
- diag_integral_end
+ use diag_integral_mod, only: fms_diag_integral_init => diag_integral_init, &
+ fms_diag_integral_field_init => diag_integral_field_init, &
+ fms_sum_diag_integral_field => sum_diag_integral_field, &
+ fms_diag_integral_output => diag_integral_output, &
+ fms_diag_integral_end => diag_integral_end
!> diag_manager
!! includes imports from submodules made public
- use diag_manager_mod, only: diag_manager_init, send_data, send_tile_averaged_data, &
- diag_manager_end, register_diag_field, register_static_field, &
- diag_axis_init, get_base_time, get_base_date, need_data, &
- DIAG_ALL, DIAG_OCEAN, DIAG_OTHER, get_date_dif, DIAG_SECONDS,&
- DIAG_MINUTES, DIAG_HOURS, DIAG_DAYS, DIAG_MONTHS, DIAG_YEARS, &
- get_diag_global_att, set_diag_global_att, diag_field_add_attribute, &
- diag_field_add_cell_measures, get_diag_field_id, &
- diag_axis_add_attribute, diag_grid_init, diag_grid_end, &
- diag_manager_set_time_end, diag_send_complete, &
- diag_send_complete_instant, DIAG_FIELD_NOT_FOUND, &
- CMOR_MISSING_VALUE, null_axis_id
+ use diag_manager_mod, only: fms_diag_init => diag_manager_init, &
+ fms_diag_send_data => send_data, &
+ fms_diag_send_tile_averaged_data => send_tile_averaged_data, &
+ fms_diag_end => diag_manager_end, &
+ fms_diag_register_diag_field => register_diag_field, &
+ fms_diag_register_static_field => register_static_field, &
+ fms_diag_axis_init => diag_axis_init, &
+ fms_diag_get_base_time => get_base_time, &
+ fms_diag_get_base_date => get_base_date, &
+ fms_diag_need_data => need_data, &
+ DIAG_ALL, &
+ DIAG_OCEAN, &
+ DIAG_OTHER, &
+ fms_get_date_dif => get_date_dif, &
+ DIAG_SECONDS,&
+ DIAG_MINUTES, &
+ DIAG_HOURS, &
+ DIAG_DAYS, &
+ DIAG_MONTHS, &
+ DIAG_YEARS, &
+ fms_diag_get_global_att => get_diag_global_att, &
+ fms_diag_set_global_att => set_diag_global_att, &
+ fms_diag_field_add_attribute => diag_field_add_attribute, &
+ fms_diag_field_add_cell_measures => diag_field_add_cell_measures, &
+ fms_diag_get_field_id => get_diag_field_id, &
+ fms_diag_axis_add_attribute => diag_axis_add_attribute, &
+ fms_diag_grid_init => diag_grid_init, &
+ fms_diag_grid_end => diag_grid_end, &
+ fms_diag_set_time_end => diag_manager_set_time_end, &
+ fms_diag_send_complete => diag_send_complete, &
+ fms_diag_send_complete_instant => diag_send_complete_instant, &
+ DIAG_FIELD_NOT_FOUND, &
+ CMOR_MISSING_VALUE, &
+ null_axis_id
!> exchange
- use xgrid_mod, only: xmap_type, setup_xmap, set_frac_area, put_to_xgrid, &
- get_from_xgrid, xgrid_count, some, conservation_check, &
- xgrid_init, AREA_ATM_SPHERE, AREA_OCN_SPHERE, AREA_ATM_MODEL, &
- AREA_OCN_MODEL, get_ocean_model_area_elements, grid_box_type, &
- get_xmap_grid_area, put_to_xgrid_ug, get_from_xgrid_ug, &
- set_frac_area_ug, FIRST_ORDER, SECOND_ORDER, stock_move_ug, &
- stock_move, stock_type, stock_print, get_index_range, &
- stock_integrate_2d
+ use xgrid_mod, only: FmsXgridXmap_type => xmap_type, &
+ fms_xgrid_setup_xmap => setup_xmap, &
+ fms_xgrid_set_frac_area => set_frac_area, &
+ fms_xgrid_put_to_xgrid => put_to_xgrid, &
+ fms_xgrid_get_from_xgrid => get_from_xgrid, &
+ fms_xgrid_count => xgrid_count, &
+ fms_xgrid_some => some, &
+ fms_xgrid_conservation_check => conservation_check, &
+ fms_xgrid_init => xgrid_init, &
+ AREA_ATM_SPHERE, AREA_OCN_SPHERE, AREA_ATM_MODEL, AREA_OCN_MODEL, &
+ fms_xgrid_get_ocean_model_area_elements => get_ocean_model_area_elements, &
+ FmsXgridGridBox_type => grid_box_type, &
+ fms_xgrid_get_xmap_grid_area => get_xmap_grid_area, &
+ fms_xgrid_put_to_xgrid_ug => put_to_xgrid_ug, &
+ fms_xgrid_get_from_xgrid_ug => get_from_xgrid_ug, &
+ fms_xgrid_set_frac_area_ug => set_frac_area_ug, &
+ FIRST_ORDER, SECOND_ORDER, &
+ fms_xgrid_stock_move_ug => stock_move_ug, &
+ fms_xgrid_stock_move => stock_move, &
+ FmsXgridStock_type => stock_type, &
+ fms_xgrid_stock_print => stock_print, &
+ fms_xgrid_get_index_range => get_index_range, &
+ fms_xgrid_stock_integrate_2d => stock_integrate_2d
use stock_constants_mod, only: NELEMS, ISTOCK_WATER, ISTOCK_HEAT, ISTOCK_SALT, &
- ISTOCK_TOP, ISTOCK_BOTTOM, ISTOCK_SIDE, stocks_file, &
- stocks_report, stocks_report_init, stocks_set_init_time, &
- atm_stock, ocn_stock, lnd_stock, ice_stock
+ ISTOCK_TOP, ISTOCK_BOTTOM, ISTOCK_SIDE, &
+ fms_stock_constants_stocks_file => stocks_file, &
+ fms_stock_constants_stocks_report => stocks_report, &
+ fms_stocks_report_init => stocks_report_init, &
+ fms_stocks_set_init_time => stocks_set_init_time, &
+ fms_stock_constants_atm_stock => atm_stock, &
+ fms_stock_constants_ocn_stock => ocn_stock, &
+ fms_stock_constants_lnd_stock => lnd_stock, &
+ fms_stock_constants_ice_stock => ice_stock
!> field manager
- use field_manager_mod, only: field_manager_init, field_manager_end, find_field_index, &
- get_field_info, &
- get_field_method, get_field_methods, parse, fm_change_list, &
- fm_change_root, fm_dump_list, fm_exists, fm_get_index, &
- fm_get_current_list, fm_get_length, fm_get_type, fm_get_value, &
- fm_init_loop, &
- fm_loop_over_list, fm_new_list, fm_new_value, &
- fm_reset_loop, fm_return_root, &
- fm_modify_name, fm_query_method, fm_find_methods, fm_copy_list, &
- fm_field_name_len, fm_path_name_len, &
- fm_string_len, fm_type_name_len, NUM_MODELS, NO_FIELD, &
- MODEL_ATMOS, MODEL_OCEAN, MODEL_LAND, MODEL_ICE, MODEL_COUPLER, &
- method_type, method_type_short, &
- method_type_very_short, fm_list_iter_type, default_method
- use fm_util_mod, only: fm_util_start_namelist, fm_util_end_namelist, &
- fm_util_check_for_bad_fields, fm_util_set_caller, &
- fm_util_reset_caller, fm_util_set_no_overwrite, &
- fm_util_reset_no_overwrite, fm_util_set_good_name_list, &
- fm_util_reset_good_name_list, fm_util_get_length, &
- fm_util_get_integer, fm_util_get_logical, fm_util_get_real, &
- fm_util_get_string, fm_util_get_integer_array, &
- fm_util_get_logical_array, fm_util_get_real_array, &
- fm_util_get_string_array, fm_util_set_value, &
- fm_util_set_value_integer_array, fm_util_set_value_logical_array, &
- fm_util_set_value_real_array, fm_util_set_value_string_array, &
- fm_util_set_value_integer, fm_util_set_value_logical, &
- fm_util_set_value_real, fm_util_set_value_string, &
- fm_util_get_index_list, fm_util_get_index_string, &
- fm_util_default_caller
+ use field_manager_mod, only: fms_field_manager_init => field_manager_init, &
+ fms_field_manager_end => field_manager_end, &
+ fms_field_manager_find_field_index => find_field_index, &
+ fms_field_manager_get_field_info => get_field_info, &
+ fms_field_manager_get_field_method => get_field_method, &
+ fms_field_manager_get_field_methods => get_field_methods, &
+ fms_field_manager_parse => parse, &
+ fms_field_manager_fm_change_list => fm_change_list, &
+ fms_field_manager_fm_change_root => fm_change_root, &
+ fms_field_manager_fm_dump_list => fm_dump_list, &
+ fms_field_manager_fm_exists => fm_exists, &
+ fms_field_manager_fm_get_index => fm_get_index, &
+ fms_field_manager_fm_get_current_list => fm_get_current_list, &
+ fms_field_manager_fm_get_length => fm_get_length, &
+ fms_field_manager_fm_get_type => fm_get_type, &
+ fms_field_manager_fm_get_value => fm_get_value, &
+ fms_field_manager_fm_init_loop => fm_init_loop, &
+ fms_field_manager_fm_loop_over_list => fm_loop_over_list, &
+ fms_field_manager_fm_new_list => fm_new_list, &
+ fms_field_manager_fm_new_value => fm_new_value, &
+ fms_field_manager_fm_reset_loop => fm_reset_loop, &
+ fms_field_manager_fm_return_root => fm_return_root, &
+ fms_field_manager_fm_modify_name => fm_modify_name, &
+ fms_field_manager_fm_query_method => fm_query_method, &
+ fms_field_manager_fm_find_methods => fm_find_methods, &
+ fms_field_manager_fm_copy_list => fm_copy_list, &
+ fms_field_manager_fm_field_name_len => fm_field_name_len, &
+ fms_field_manager_fm_path_name_len => fm_path_name_len, &
+ fms_field_manager_fm_string_len => fm_string_len, &
+ fms_field_manager_fm_type_name_len => fm_type_name_len, &
+ NUM_MODELS, NO_FIELD, MODEL_ATMOS, MODEL_OCEAN, MODEL_LAND, MODEL_ICE, MODEL_COUPLER, &
+ FmsFieldManagerMethod_type => method_type, &
+ FmsFieldManagerMethodShort_type => method_type_short, &
+ FmsFieldManagerMethodVeryShort_type => method_type_very_short, &
+ FmsFieldManagerListIterator_type => fm_list_iter_type, &
+ fms_field_manager_default_method => default_method
+ use fm_util_mod, only: fms_fm_util_start_namelist => fm_util_start_namelist, &
+ fms_fm_util_end_namelist => fm_util_end_namelist, &
+ fms_fm_util_check_for_bad_fields => fm_util_check_for_bad_fields, &
+ fms_fm_util_set_caller => fm_util_set_caller, &
+ fms_fm_util_reset_caller => fm_util_reset_caller, &
+ fms_fm_util_set_no_overwrite => fm_util_set_no_overwrite, &
+ fms_fm_util_reset_no_overwrite => fm_util_reset_no_overwrite, &
+ fms_fm_util_set_good_name_list => fm_util_set_good_name_list, &
+ fms_fm_util_reset_good_name_list => fm_util_reset_good_name_list, &
+ fms_fm_util_get_length => fm_util_get_length, &
+ fms_fm_util_get_integer => fm_util_get_integer, &
+ fms_fm_util_get_logical => fm_util_get_logical, &
+ fms_fm_util_get_real => fm_util_get_real, &
+ fms_fm_util_get_string => fm_util_get_string, &
+ fms_fm_util_get_integer_array => fm_util_get_integer_array, &
+ fms_fm_util_get_logical_array => fm_util_get_logical_array, &
+ fms_fm_util_get_real_array => fm_util_get_real_array, &
+ fms_fm_util_get_string_array => fm_util_get_string_array, &
+ fms_fm_util_set_value => fm_util_set_value, &
+ fms_fm_util_set_value_integer_array => fm_util_set_value_integer_array, &
+ fms_fm_util_set_value_logical_array => fm_util_set_value_logical_array, &
+ fms_fm_util_set_value_real_array => fm_util_set_value_real_array, &
+ fms_fm_util_set_value_string_array => fm_util_set_value_string_array, &
+ fms_fm_util_set_value_integer => fm_util_set_value_integer, &
+ fms_fm_util_set_value_logical => fm_util_set_value_logical, &
+ fms_fm_util_set_value_real => fm_util_set_value_real, &
+ fms_fm_util_set_value_string => fm_util_set_value_string, &
+ fms_fm_util_get_index_list => fm_util_get_index_list, &
+ fms_fm_util_get_index_string => fm_util_get_index_string, &
+ fms_fm_util_default_caller => fm_util_default_caller
!> fms2_io
+ !! TODO need to see opinions on these
+ !! not sure if we need fms_ prefix for routines
+ !! types do not follow our typical naming convention(no _type and uses camel case instead of _ spacing)
use fms2_io_mod, only: unlimited, FmsNetcdfFile_t, FmsNetcdfDomainFile_t, &
- FmsNetcdfUnstructuredDomainFile_t, open_file, open_virtual_file, &
- close_file, register_axis, register_field, register_restart_field, &
- write_data, read_data, write_restart, write_new_restart, &
- read_restart, read_new_restart, global_att_exists, &
- variable_att_exists, register_global_attribute, &
- register_variable_attribute, get_global_attribute, &
- get_variable_attribute, get_num_dimensions, &
- get_dimension_names, dimension_exists, is_dimension_unlimited, &
- get_dimension_size, get_num_variables, get_variable_names, &
- variable_exists, get_variable_num_dimensions, &
- get_variable_dimension_names, get_variable_size, &
- get_compute_domain_dimension_indices, &
- get_global_io_domain_indices, Valid_t, get_valid, is_valid, &
- get_unlimited_dimension_name, get_variable_unlimited_dimension_index, &
- file_exists, compressed_start_and_count, get_variable_sense, &
- get_variable_missing, get_variable_units, get_time_calendar, &
- open_check, is_registered_to_restart, check_if_open, &
- set_fileobj_time_name, is_dimension_registered, &
- fms2_io_init, get_mosaic_tile_grid, &
- write_restart_bc, read_restart_bc, get_filename_appendix, & !> 2021.02-a1
- set_filename_appendix, get_instance_filename, &
- nullify_filename_appendix, ascii_read, get_mosaic_tile_file, &
- parse_mask_table
+ FmsNetcdfUnstructuredDomainFile_t, &
+ Valid_t, &
+ fms2_io_open_file => open_file, &
+ fms2_io_open_virtual_file => open_virtual_file, &
+ fms2_io_close_file => close_file, &
+ fms2_io_register_axis => register_axis, &
+ fms2_io_register_field => register_field, &
+ fms2_io_register_restart_field => register_restart_field, &
+ fms2_io_write_data => write_data, &
+ fms2_io_read_data => read_data, &
+ fms2_io_write_restart => write_restart, &
+ fms2_io_write_new_restart => write_new_restart, &
+ fms2_io_read_restart => read_restart, &
+ fms2_io_read_new_restart => read_new_restart, &
+ fms2_io_global_att_exists => global_att_exists, &
+ fms2_io_variable_att_exists => variable_att_exists, &
+ fms2_io_register_global_attribute => register_global_attribute, &
+ fms2_io_register_variable_attribute => register_variable_attribute, &
+ fms2_io_get_global_attribute => get_global_attribute, &
+ fms2_io_get_variable_attribute => get_variable_attribute, &
+ fms2_io_get_num_dimensions => get_num_dimensions, &
+ fms2_io_get_dimension_names => get_dimension_names, &
+ fms2_io_dimension_exists => dimension_exists, &
+ fms2_io_is_dimension_unlimited => is_dimension_unlimited, &
+ fms2_io_get_dimension_size => get_dimension_size, &
+ fms2_io_get_num_variables => get_num_variables, &
+ fms2_io_get_variable_names => get_variable_names, &
+ fms2_io_variable_exists => variable_exists, &
+ fms2_io_get_variable_num_dimensions => get_variable_num_dimensions, &
+ fms2_io_get_variable_dimension_names => get_variable_dimension_names, &
+ fms2_io_get_variable_size => get_variable_size, &
+ fms2_io_get_compute_domain_dimension_indices => get_compute_domain_dimension_indices, &
+ fms2_io_get_global_io_domain_indices => get_global_io_domain_indices, &
+ fms2_io_get_valid => get_valid, &
+ fms2_io_is_valid => is_valid, &
+ fms2_io_get_unlimited_dimension_name => get_unlimited_dimension_name, &
+ fms2_io_get_variable_unlimited_dimension_index => get_variable_unlimited_dimension_index, &
+ fms2_io_file_exists => file_exists, &
+ fms2_io_compressed_start_and_count => compressed_start_and_count, &
+ fms2_io_get_variable_sense => get_variable_sense, &
+ fms2_io_get_variable_missing => get_variable_missing, &
+ fms2_io_get_variable_units => get_variable_units, &
+ fms2_io_get_time_calendar => get_time_calendar, &
+ fms2_io_open_check => open_check, &
+ fms2_io_is_registered_to_restart => is_registered_to_restart, &
+ fms2_io_check_if_open => check_if_open, &
+ fms2_io_set_fileobj_time_name => set_fileobj_time_name, &
+ fms2_io_is_dimension_registered => is_dimension_registered, &
+ fms2_io_fms2_io_init => fms2_io_init, &
+ fms2_io_get_mosaic_tile_grid => get_mosaic_tile_grid, &
+ fms2_io_write_restart_bc => write_restart_bc, &
+ fms2_io_read_restart_bc => read_restart_bc, &
+ fms2_io_get_filename_appendix => get_filename_appendix, &
+ fms2_io_set_filename_appendix => set_filename_appendix, &
+ fms2_io_get_instance_filename => get_instance_filename, &
+ fms2_io_nullify_filename_appendix => nullify_filename_appendix, &
+ fms2_io_ascii_read => ascii_read, &
+ fms2_io_get_mosaic_tile_file => get_mosaic_tile_file, &
+ fms2_io_parse_mask_table => parse_mask_table
! used via fms2_io
- ! fms_io_utils_mod, fms_netcdf_domain_io_mod, netcdf_io_mod,
+ ! fms_io_utils_mod, fms_netcdf_domain_io_mod, netcdf_io_mod, &
! fms_netcdf_unstructured_domain_io_mod, blackboxio
!> fms
!! routines that don't conflict with fms2_io
- use fms_mod, only: fms_init, fms_end, error_mesg, fms_error_handler, check_nml_error, &
- monotonic_array, string_array_index, clock_flag_default, &
- print_memory_usage, write_version_number
+ use fms_mod, only: fms_init, fms_end, error_mesg, fms_error_handler, &
+ check_nml_error, &
+ fms_monotonic_array => monotonic_array, fms_string_array_index => string_array_index, &
+ fms_clock_flag_default => clock_flag_default, fms_print_memory_usage => print_memory_usage, &
+ fms_write_version_number => write_version_number
!> horiz_interp
- use horiz_interp_mod, only: horiz_interp, horiz_interp_new, horiz_interp_del, &
- horiz_interp_init, horiz_interp_end
- use horiz_interp_type_mod, only: horiz_interp_type, assignment(=), CONSERVE, &
- BILINEAR, SPHERICA, BICUBIC, stats
+ use horiz_interp_mod, only: fms_horiz_interp => horiz_interp, fms_horiz_interp_new => horiz_interp_new, &
+ fms_horiz_interp_del => horiz_interp_del, fms_horiz_interp_init => horiz_interp_init, &
+ fms_horiz_interp_end => horiz_interp_end
+ use horiz_interp_type_mod, only: FmsHorizInterp_type => horiz_interp_type, &
+ assignment(=), CONSERVE, BILINEAR, SPHERICA, BICUBIC, &
+ fms_horiz_interp_type_stats => stats
!! used via horiz_interp
! horiz_interp_bicubic_mod, horiz_interp_bilinear_mod
! horiz_interp_conserve_mod, horiz_interp_spherical_mod
!> interpolator
- use interpolator_mod, only: interpolator_init, interpolator, interpolate_type_eq, &
- obtain_interpolator_time_slices, unset_interpolator_time_flag, &
- interpolator_end, init_clim_diag, query_interpolator, &
- interpolate_type, CONSTANT, &
- INTERP_WEIGHTED_P, INTERP_LINEAR_P, INTERP_LOG_P, &
- INTERPOLATOR_ZERO=>ZERO, & !! conflicts with mpp_parameter's ZERO
- interpolator_read_data=>read_data !! conflicts with fms2_io interface
+ use interpolator_mod, only: fms_interpolator_init => interpolator_init, &
+ fms_interpolator => interpolator, &
+ fms_interpolate_type_eq => interpolate_type_eq, &
+ fms_interpolator_obtain_interpolator_time_slices => obtain_interpolator_time_slices, &
+ fms_interpolator_unset_interpolator_time_flag => unset_interpolator_time_flag, &
+ fms_interpolator_end => interpolator_end, &
+ fms_interpolator_init_clim_diag => init_clim_diag, &
+ fms_interpolator_query_interpolator => query_interpolator, &
+ FmsInterpolate_type => interpolate_type, &
+ CONSTANT, INTERP_WEIGHTED_P, INTERP_LINEAR_P, INTERP_LOG_P, &
+ FMS_INTERPOLATOR_ZERO=>ZERO, & !! conflicts with mpp_parameter's ZERO
+ fms_interpolator_read_data=>read_data
!> memutils
- use memutils_mod, only: memutils_init, print_memuse_stats
+ use memutils_mod, only: fms_memutils_init => memutils_init, &
+ fms_memutils_print_memuse_stats => print_memuse_stats
!> monin_obukhov
- use monin_obukhov_mod, only: monin_obukhov_init, monin_obukhov_end, &
- mo_drag, mo_profile, mo_diff, stable_mix
- use monin_obukhov_inter, only: monin_obukhov_diff, monin_obukhov_drag_1d, &
- monin_obukhov_solve_zeta, monin_obukhov_derivative_t, &
- monin_obukhov_derivative_m, monin_obukhov_profile_1d, &
- monin_obukhov_integral_m, monin_obukhov_integral_tq, &
- monin_obukhov_stable_mix
+ use monin_obukhov_mod, only: fms_monin_obukhov_init => monin_obukhov_init, &
+ fms_monin_obukhov_end => monin_obukhov_end, &
+ fms_monin_obukhov_mo_drag => mo_drag, &
+ fms_monin_obukhov_mo_profile => mo_profile, &
+ fms_monin_obukhov_mo_diff => mo_diff, &
+ fms_monin_obukhov_stable_mix => stable_mix
+ use monin_obukhov_inter, only: fms_monin_obukhov_inter_diff => monin_obukhov_diff, &
+ fms_monin_obukhov_inter_drag_1d => monin_obukhov_drag_1d, &
+ fms_monin_obukhov_inter_solve_zeta => monin_obukhov_solve_zeta, &
+ fms_monin_obukhov_inter_derivative_t => monin_obukhov_derivative_t, &
+ fms_monin_obukhov_inter_derivative_m => monin_obukhov_derivative_m, &
+ fms_monin_obukhov_inter_profile_1d => monin_obukhov_profile_1d, &
+ fms_monin_obukhov_inter_integral_m => monin_obukhov_integral_m, &
+ fms_monin_obukhov_inter_integral_tq => monin_obukhov_integral_tq, &
+ fms_monin_obukhov_inter_stable_mix => monin_obukhov_stable_mix
!> mosaic
- use mosaic2_mod, only: get_mosaic_ntiles, get_mosaic_ncontacts, &
- get_mosaic_grid_sizes, get_mosaic_contact, &
- get_mosaic_xgrid_size, get_mosaic_xgrid, &
- calc_mosaic_grid_area, calc_mosaic_grid_great_circle_area, &
- is_inside_polygon, &
- mosaic2_get_mosaic_tile_grid => get_mosaic_tile_grid !overloaded in fms2_io
- use grid2_mod, only: get_grid_ntiles, get_grid_size, get_grid_cell_centers, &
- get_grid_cell_vertices, get_grid_cell_Area, get_grid_comp_area, &
- define_cube_mosaic, get_great_circle_algorithm, grid_init, grid_end
- use gradient_mod, only: gradient_cubic, calc_cubic_grid_info
+ use mosaic2_mod, only: fms_mosaic2_get_mosaic_ntiles => get_mosaic_ntiles, &
+ fms_mosaic2_get_mosaic_ncontacts => get_mosaic_ncontacts, &
+ fms_mosaic2_get_mosaic_grid_sizes => get_mosaic_grid_sizes, &
+ fms_mosaic2_get_mosaic_contact => get_mosaic_contact, &
+ fms_mosaic2_get_mosaic_xgrid_size => get_mosaic_xgrid_size, &
+ fms_mosaic2_get_mosaic_xgrid => get_mosaic_xgrid, &
+ fms_mosaic2_calc_mosaic_grid_area => calc_mosaic_grid_area, &
+ fms_mosaic2_calc_mosaic_grid_great_circle_area => calc_mosaic_grid_great_circle_area, &
+ fms_mosaic2_is_inside_polygon => is_inside_polygon, &
+ fms_mosaic2_get_mosaic_tile_grid => get_mosaic_tile_grid !overloaded in fms2_io
+ use grid2_mod, only: fms_grid2_get_grid_ntiles => get_grid_ntiles, &
+ fms_grid2_get_grid_size => get_grid_size, &
+ fms_grid2_get_grid_cell_centers => get_grid_cell_centers, &
+ fms_grid2_get_grid_cell_vertices => get_grid_cell_vertices, &
+ fms_grid2_get_grid_cell_Area => get_grid_cell_Area, &
+ fms_grid2_get_grid_comp_area => get_grid_comp_area, &
+ fms_grid2_define_cube_mosaic => define_cube_mosaic, &
+ fms_grid2_get_great_circle_algorithm => get_great_circle_algorithm, &
+ fms_grid2_grid_init => grid_init, &
+ fms_grid2_end => grid_end
+ use gradient_mod, only: fms_gradient_cubic => gradient_cubic, &
+ fms_gradient_calc_cubic_grid_info => calc_cubic_grid_info
!> mpp
- use mpp_mod, only: stdin, stdout, stderr, &
- stdlog, lowercase, uppercase, mpp_error, mpp_error_state, &
- mpp_set_warn_level, mpp_sync, mpp_sync_self, mpp_set_stack_size, &
- mpp_pe, mpp_npes, mpp_root_pe, mpp_set_root_pe, mpp_declare_pelist, &
- mpp_get_current_pelist, mpp_set_current_pelist, &
- mpp_get_current_pelist_name, mpp_clock_id, mpp_clock_set_grain, &
- mpp_record_timing_data, get_unit, read_ascii_file, read_input_nml, &
- mpp_clock_begin, mpp_clock_end, get_ascii_file_num_lines, &
- mpp_record_time_start, mpp_record_time_end, mpp_chksum, &
- mpp_max, mpp_min, mpp_sum, mpp_transmit, mpp_send, mpp_recv, &
- mpp_sum_ad, mpp_broadcast, mpp_init, mpp_exit, mpp_gather, &
- mpp_scatter, mpp_alltoall, mpp_type, mpp_byte, mpp_type_create, &
- mpp_type_free, input_nml_file
+ use mpp_mod, only: fms_mpp_stdin => stdin, &
+ fms_mpp_stdout => stdout, &
+ fms_mpp_stderr => stderr, &
+ fms_mpp_stdlog => stdlog, &
+ fms_mpp_lowercase => lowercase, &
+ fms_mpp_uppercase => uppercase, &
+ fms_mpp_error => mpp_error, &
+ fms_mpp_error_state => mpp_error_state, &
+ fms_mpp_set_warn_level => mpp_set_warn_level, &
+ fms_mpp_sync => mpp_sync, &
+ fms_mpp_sync_self => mpp_sync_self, &
+ fms_mpp_set_stack_size => mpp_set_stack_size, &
+ fms_mpp_pe => mpp_pe, &
+ fms_mpp_npes => mpp_npes, &
+ fms_mpp_root_pe => mpp_root_pe, &
+ fms_mpp_set_root_pe => mpp_set_root_pe, &
+ fms_mpp_declare_pelist => mpp_declare_pelist, &
+ fms_mpp_get_current_pelist => mpp_get_current_pelist, &
+ fms_mpp_set_current_pelist => mpp_set_current_pelist, &
+ fms_mpp_get_current_pelist_name => mpp_get_current_pelist_name, &
+ fms_mpp_clock_id => mpp_clock_id, &
+ fms_mpp_clock_set_grain => mpp_clock_set_grain, &
+ fms_mpp_record_timing_data => mpp_record_timing_data, &
+ fms_mpp_get_unit => get_unit, &
+ fms_mpp_read_ascii_file => read_ascii_file, &
+ fms_mpp_read_input_nml => read_input_nml, &
+ fms_mpp_clock_begin => mpp_clock_begin, &
+ fms_mpp_clock_end => mpp_clock_end, &
+ fms_mpp_get_ascii_file_num_lines => get_ascii_file_num_lines, &
+ fms_mpp_record_time_start => mpp_record_time_start, &
+ fms_mpp_record_time_end => mpp_record_time_end, &
+ fms_mpp_chksum => mpp_chksum, &
+ fms_mpp_max => mpp_max, &
+ fms_mpp_min => mpp_min, &
+ fms_mpp_sum => mpp_sum, &
+ fms_mpp_transmit => mpp_transmit, &
+ fms_mpp_send => mpp_send, &
+ fms_mpp_recv => mpp_recv, &
+ fms_mpp_sum_ad => mpp_sum_ad, &
+ fms_mpp_broadcast => mpp_broadcast, &
+ fms_mpp_init => mpp_init, &
+ fms_mpp_exit => mpp_exit, &
+ fms_mpp_gather => mpp_gather, &
+ fms_mpp_scatter => mpp_scatter, &
+ fms_mpp_alltoall => mpp_alltoall, &
+ FmsMpp_type => mpp_type, &
+ FmsMpp_byte => mpp_byte, &
+ fms_mpp_type_create => mpp_type_create, &
+ fms_mpp_type_free => mpp_type_free, &
+ fms_mpp_input_nml_file => input_nml_file
use mpp_parameter_mod,only:MAXPES, MPP_VERBOSE, MPP_DEBUG, ALL_PES, ANY_PE, NULL_PE, &
NOTE, WARNING, FATAL, MPP_WAIT, MPP_READY, MAX_CLOCKS, &
MAX_EVENT_TYPES, MAX_EVENTS, MPP_CLOCK_SYNC, MPP_CLOCK_DETAILED, &
@@ -298,74 +548,161 @@ module fms
MAX_DOMAIN_FIELDS, MAX_TILES, ZERO, NINETY, MINUS_NINETY, &
ONE_HUNDRED_EIGHTY, NONBLOCK_UPDATE_TAG, EDGEUPDATE, EDGEONLY, &
NONSYMEDGEUPDATE, NONSYMEDGE
- use mpp_data_mod, only: stat, mpp_stack, ptr_stack, status, ptr_status, sync, &
- ptr_sync, mpp_from_pe, ptr_from, remote_Data_loc, &
- ptr_remote, mpp_domains_stack, ptr_domains_stack, &
- mpp_domains_stack_nonblock, ptr_domains_stack_nonblock
- use mpp_utilities_mod, only: mpp_array_global_min_max
- use mpp_memutils_mod, only: mpp_print_memuse_stats, mpp_mem_dump, &
- mpp_memuse_begin, mpp_memuse_end
- use mpp_efp_mod, only: mpp_reproducing_sum, mpp_efp_list_sum_across_PEs, &
- mpp_efp_plus, mpp_efp_minus, mpp_efp_to_real, &
- mpp_real_to_efp, mpp_efp_real_diff, operator(+), &
- operator(-), assignment(=), mpp_query_efp_overflow_error, &
- mpp_reset_efp_overflow_error, mpp_efp_type
- use mpp_domains_mod, only: domain_axis_spec, domain1D, domain2D, DomainCommunicator2D, &
- nest_domain_type, mpp_group_update_type, &
- mpp_domains_set_stack_size, mpp_get_compute_domain, &
- mpp_get_compute_domains, mpp_get_data_domain, &
- mpp_get_global_domain, mpp_get_domain_components, &
- mpp_get_layout, mpp_get_pelist, operator(.EQ.), operator(.NE.), &
- mpp_domain_is_symmetry, mpp_domain_is_initialized, &
- mpp_get_neighbor_pe, mpp_nullify_domain_list, &
- mpp_set_compute_domain, mpp_set_data_domain, mpp_set_global_domain, &
- mpp_get_memory_domain, mpp_get_domain_shift, &
- mpp_domain_is_tile_root_pe, mpp_get_tile_id, &
- mpp_get_domain_extents, mpp_get_current_ntile, &
- mpp_get_ntile_count, mpp_get_tile_list, mpp_get_tile_npes, &
- mpp_get_domain_root_pe, mpp_get_tile_pelist, &
- mpp_get_tile_compute_domains, mpp_get_num_overlap, &
- mpp_get_overlap, mpp_get_io_domain, mpp_get_domain_pe, &
- mpp_get_domain_tile_root_pe, mpp_get_domain_name, &
- mpp_get_io_domain_layout, mpp_copy_domain, mpp_set_domain_symmetry, &
- mpp_get_update_pelist, mpp_get_update_size, &
- mpp_get_domain_npes, mpp_get_domain_pelist, &
- mpp_clear_group_update, mpp_group_update_initialized, &
- mpp_group_update_is_set, mpp_get_global_domains, &
- mpp_global_field, mpp_global_max, mpp_global_min, mpp_global_sum, &
- mpp_global_sum_tl, mpp_global_sum_ad, mpp_broadcast_domain, &
- mpp_domains_init, mpp_domains_exit, mpp_redistribute, &
- mpp_update_domains, mpp_check_field, mpp_start_update_domains, &
- mpp_complete_update_domains, mpp_create_group_update, &
- mpp_do_group_update, mpp_start_group_update, &
- mpp_complete_group_update, mpp_reset_group_update_field, &
- mpp_update_nest_fine, mpp_update_nest_coarse, mpp_get_boundary, &
- mpp_update_domains_ad, mpp_get_boundary_ad, mpp_pass_SG_to_UG, &
- mpp_pass_UG_to_SG, mpp_define_layout, mpp_define_domains, &
- mpp_modify_domain, mpp_define_mosaic, mpp_define_mosaic_pelist, &
- mpp_define_null_domain, mpp_mosaic_defined, &
- mpp_define_io_domain, mpp_deallocate_domain, &
- mpp_compute_extent, mpp_compute_block_extent, &
- mpp_define_unstruct_domain, domainUG, mpp_get_UG_io_domain, &
- mpp_get_UG_domain_npes, mpp_get_UG_compute_domain, &
- mpp_get_UG_domain_tile_id, mpp_get_UG_domain_pelist, &
- mpp_get_ug_domain_grid_index, mpp_get_UG_domain_ntiles, &
- mpp_get_UG_global_domain, mpp_global_field_ug, &
- mpp_get_ug_domain_tile_list, mpp_get_UG_compute_domains, &
- mpp_define_null_UG_domain, NULL_DOMAINUG, mpp_get_UG_domains_index, &
- mpp_get_UG_SG_domain, mpp_get_UG_domain_tile_pe_inf, &
- mpp_define_nest_domains, mpp_get_C2F_index, mpp_get_F2C_index, &
- mpp_get_nest_coarse_domain, mpp_get_nest_fine_domain, &
- mpp_is_nest_coarse, mpp_is_nest_fine, &
- mpp_get_nest_pelist, mpp_get_nest_npes, &
- mpp_get_nest_fine_pelist, mpp_get_nest_fine_npes, &
- mpp_domain_UG_is_tile_root_pe, mpp_deallocate_domainUG, &
- mpp_get_io_domain_UG_layout, NULL_DOMAIN1D, NULL_DOMAIN2D, &
- mpp_create_super_grid_domain, mpp_shift_nest_domains
+ ! this should really only be used internally
+ !use mpp_data_mod, only: stat, mpp_stack, ptr_stack, status, ptr_status, sync, &
+ ! ptr_sync, mpp_from_pe, ptr_from, remote_Data_loc, &
+ ! ptr_remote, mpp_domains_stack, ptr_domains_stack, &
+ ! mpp_domains_stack_nonblock, ptr_domains_stack_nonblock
+ use mpp_utilities_mod, only: fms_mpp_utilities_array_global_min_max => mpp_array_global_min_max
+ use mpp_memutils_mod, only: fms_mpp_memutils_print_memuse_stats => mpp_print_memuse_stats, &
+ fms_mpp_memutils_mem_dump => mpp_mem_dump, &
+ fms_mpp_memutils_memuse_begin => mpp_memuse_begin, &
+ fms_mpp_memutils_memuse_end => mpp_memuse_end
+ use mpp_efp_mod, only: fms_mpp_efp_reproducing_sum => mpp_reproducing_sum, &
+ fms_mpp_efp_list_sum_across_PEs => mpp_efp_list_sum_across_PEs, &
+ fms_mpp_efp_plus => mpp_efp_plus, &
+ fms_mpp_efp_minus => mpp_efp_minus, &
+ fms_mpp_efp_to_real => mpp_efp_to_real, &
+ fms_mpp_efp_real_to_efp => mpp_real_to_efp, &
+ fms_mpp_efp_real_diff => mpp_efp_real_diff, &
+ operator(+), operator(-), assignment(=), &
+ fms_mpp_efp_query_overflow_error => mpp_query_efp_overflow_error, &
+ fms_mpp_efp_reset_overflow_error => mpp_reset_efp_overflow_error, &
+ FmsMppEfp_type => mpp_efp_type
+ use mpp_domains_mod, only: FmsMppDomains_axis_spec => domain_axis_spec, &
+ FmsMppDomain1D => domain1D, &
+ FmsMppDomain2D => domain2D, &
+ FmsMppDomainCommunicator2D => DomainCommunicator2D, &
+ FmsMppDomainsNestDomain_type => nest_domain_type, &
+ FmsMppDomainsGroupUpdate_type => mpp_group_update_type, &
+ fms_mpp_domains_domains_set_stack_size => mpp_domains_set_stack_size, &
+ fms_mpp_domains_get_compute_domain => mpp_get_compute_domain, &
+ fms_mpp_domains_get_compute_domains => mpp_get_compute_domains, &
+ fms_mpp_domains_get_data_domain => mpp_get_data_domain, &
+ fms_mpp_domains_get_global_domain => mpp_get_global_domain, &
+ fms_mpp_domains_get_domain_components => mpp_get_domain_components, &
+ fms_mpp_domains_get_layout => mpp_get_layout, &
+ fms_mpp_domains_get_pelist => mpp_get_pelist, &
+ operator(.EQ.), operator(.NE.), &
+ fms_mpp_domains_domain_is_symmetry => mpp_domain_is_symmetry, &
+ fms_mpp_domains_domain_is_initialized => mpp_domain_is_initialized, &
+ fms_mpp_domains_get_neighbor_pe => mpp_get_neighbor_pe, &
+ fms_mpp_domains_nullify_domain_list => mpp_nullify_domain_list, &
+ fms_mpp_domains_set_compute_domain => mpp_set_compute_domain, &
+ fms_mpp_domains_set_data_domain => mpp_set_data_domain, &
+ fms_mpp_domains_set_global_domain => mpp_set_global_domain, &
+ fms_mpp_domains_get_memory_domain => mpp_get_memory_domain, &
+ fms_mpp_domains_get_domain_shift => mpp_get_domain_shift, &
+ fms_mpp_domains_domain_is_tile_root_pe => mpp_domain_is_tile_root_pe, &
+ fms_mpp_domains_get_tile_id => mpp_get_tile_id, &
+ fms_mpp_domains_get_domain_extents => mpp_get_domain_extents, &
+ fms_mpp_domains_get_current_ntile => mpp_get_current_ntile, &
+ fms_mpp_domains_get_ntile_count => mpp_get_ntile_count, &
+ fms_mpp_domains_get_tile_list => mpp_get_tile_list, &
+ fms_mpp_domains_get_tile_npes => mpp_get_tile_npes, &
+ fms_mpp_domains_get_domain_root_pe => mpp_get_domain_root_pe, &
+ fms_mpp_domains_get_tile_pelist => mpp_get_tile_pelist, &
+ fms_mpp_domains_get_tile_compute_domains => mpp_get_tile_compute_domains, &
+ fms_mpp_domains_get_num_overlap => mpp_get_num_overlap, &
+ fms_mpp_domains_get_overlap => mpp_get_overlap, &
+ fms_mpp_domains_get_io_domain => mpp_get_io_domain, &
+ fms_mpp_domains_get_domain_pe => mpp_get_domain_pe, &
+ fms_mpp_domains_get_domain_tile_root_pe => mpp_get_domain_tile_root_pe, &
+ fms_mpp_domains_get_domain_name => mpp_get_domain_name, &
+ fms_mpp_domains_get_io_domain_layout => mpp_get_io_domain_layout, &
+ fms_mpp_domains_copy_domain => mpp_copy_domain, &
+ fms_mpp_domains_set_domain_symmetry => mpp_set_domain_symmetry, &
+ fms_mpp_domains_get_update_pelist => mpp_get_update_pelist, &
+ fms_mpp_domains_get_update_size => mpp_get_update_size, &
+ fms_mpp_domains_get_domain_npes => mpp_get_domain_npes, &
+ fms_mpp_domains_get_domain_pelist => mpp_get_domain_pelist, &
+ fms_mpp_domains_clear_group_update => mpp_clear_group_update, &
+ fms_mpp_domains_group_update_initialized => mpp_group_update_initialized, &
+ fms_mpp_domains_group_update_is_set => mpp_group_update_is_set, &
+ fms_mpp_domains_get_global_domains => mpp_get_global_domains, &
+ fms_mpp_domains_global_field => mpp_global_field, &
+ fms_mpp_domains_global_max => mpp_global_max, &
+ fms_mpp_domains_global_min => mpp_global_min, &
+ fms_mpp_domains_global_sum => mpp_global_sum, &
+ fms_mpp_domains_global_sum_tl => mpp_global_sum_tl, &
+ fms_mpp_domains_global_sum_ad => mpp_global_sum_ad, &
+ fms_mpp_domains_broadcast_domain => mpp_broadcast_domain, &
+ fms_mpp_domains_init => mpp_domains_init, &
+ fms_mpp_domains_exit => mpp_domains_exit, &
+ fms_mpp_domains_redistribute => mpp_redistribute, &
+ fms_mpp_domains_update_domains => mpp_update_domains, &
+ fms_mpp_domains_check_field => mpp_check_field, &
+ fms_mpp_domains_start_update_domains => mpp_start_update_domains, &
+ fms_mpp_domains_complete_update_domains => mpp_complete_update_domains, &
+ fms_mpp_domains_create_group_update => mpp_create_group_update, &
+ fms_mpp_domains_do_group_update => mpp_do_group_update, &
+ fms_mpp_domains_start_group_update => mpp_start_group_update, &
+ fms_mpp_domains_complete_group_update => mpp_complete_group_update, &
+ fms_mpp_domains_reset_group_update_field => mpp_reset_group_update_field, &
+ fms_mpp_domains_update_nest_fine => mpp_update_nest_fine, &
+ fms_mpp_domains_update_nest_coarse => mpp_update_nest_coarse, &
+ fms_mpp_domains_get_boundary => mpp_get_boundary, &
+ fms_mpp_domains_update_domains_ad => mpp_update_domains_ad, &
+ fms_mpp_domains_get_boundary_ad => mpp_get_boundary_ad, &
+ fms_mpp_domains_pass_SG_to_UG => mpp_pass_SG_to_UG, &
+ fms_mpp_domains_pass_UG_to_SG => mpp_pass_UG_to_SG, &
+ fms_mpp_domains_define_layout => mpp_define_layout, &
+ fms_mpp_domains_define_domains => mpp_define_domains, &
+ fms_mpp_domains_modify_domain => mpp_modify_domain, &
+ fms_mpp_domains_define_mosaic => mpp_define_mosaic, &
+ fms_mpp_domains_define_mosaic_pelist => mpp_define_mosaic_pelist, &
+ fms_mpp_domains_define_null_domain => mpp_define_null_domain, &
+ fms_mpp_domains_mosaic_defined => mpp_mosaic_defined, &
+ fms_mpp_domains_define_io_domain => mpp_define_io_domain, &
+ fms_mpp_domains_deallocate_domain => mpp_deallocate_domain, &
+ fms_mpp_domains_compute_extent => mpp_compute_extent, &
+ fms_mpp_domains_compute_block_extent => mpp_compute_block_extent, &
+ fms_mpp_domains_define_unstruct_domain => mpp_define_unstruct_domain, &
+ fmsMppDomainUG => domainUG, &
+ fms_mpp_domains_get_UG_io_domain => mpp_get_UG_io_domain, &
+ fms_mpp_domains_get_UG_domain_npes => mpp_get_UG_domain_npes, &
+ fms_mpp_domains_get_UG_compute_domain => mpp_get_UG_compute_domain, &
+ fms_mpp_domains_get_UG_domain_tile_id => mpp_get_UG_domain_tile_id, &
+ fms_mpp_domains_get_UG_domain_pelist => mpp_get_UG_domain_pelist, &
+ fms_mpp_domains_get_ug_domain_grid_index => mpp_get_ug_domain_grid_index, &
+ fms_mpp_domains_get_UG_domain_ntiles => mpp_get_UG_domain_ntiles, &
+ fms_mpp_domains_get_UG_global_domain => mpp_get_UG_global_domain, &
+ fms_mpp_domains_global_field_ug => mpp_global_field_ug, &
+ fms_mpp_domains_get_ug_domain_tile_list => mpp_get_ug_domain_tile_list, &
+ fms_mpp_domains_get_UG_compute_domains => mpp_get_UG_compute_domains, &
+ fms_mpp_domains_define_null_UG_domain => mpp_define_null_UG_domain, &
+ fms_mpp_domains_NULL_DOMAINUG => NULL_DOMAINUG, &
+ fms_mpp_domains_get_UG_domains_index => mpp_get_UG_domains_index, &
+ fms_mpp_domains_get_UG_SG_domain => mpp_get_UG_SG_domain, &
+ fms_mpp_domains_get_UG_domain_tile_pe_inf => mpp_get_UG_domain_tile_pe_inf, &
+ fms_mpp_domains_define_nest_domains => mpp_define_nest_domains, &
+ fms_mpp_domains_get_C2F_index => mpp_get_C2F_index, &
+ fms_mpp_domains_get_F2C_index => mpp_get_F2C_index, &
+ fms_mpp_domains_get_nest_coarse_domain => mpp_get_nest_coarse_domain, &
+ fms_mpp_domains_get_nest_fine_domain => mpp_get_nest_fine_domain, &
+ fms_mpp_domains_is_nest_coarse => mpp_is_nest_coarse, &
+ fms_mpp_domains_is_nest_fine => mpp_is_nest_fine, &
+ fms_mpp_domains_get_nest_pelist => mpp_get_nest_pelist, &
+ fms_mpp_domains_get_nest_npes => mpp_get_nest_npes, &
+ fms_mpp_domains_get_nest_fine_pelist => mpp_get_nest_fine_pelist, &
+ fms_mpp_domains_get_nest_fine_npes => mpp_get_nest_fine_npes, &
+ fms_mpp_domains_domain_UG_is_tile_root_pe => mpp_domain_UG_is_tile_root_pe, &
+ fms_mpp_domains_deallocate_domainUG => mpp_deallocate_domainUG, &
+ fms_mpp_domains_get_io_domain_UG_layout => mpp_get_io_domain_UG_layout, &
+ NULL_DOMAIN1D, &
+ NULL_DOMAIN2D, &
+ fms_mpp_domains_create_super_grid_domain => mpp_create_super_grid_domain, &
+ fms_mpp_domains_shift_nest_domains => mpp_shift_nest_domains
!> parser
#ifdef use_yaml
- use yaml_parser_mod, only: open_and_parse_file, get_num_blocks, get_block_ids, get_value_from_key, &
- get_nkeys, get_key_ids, get_key_name, get_key_value
+ use yaml_parser_mod, only: fms_yaml_parser_open_and_parse_file => open_and_parse_file, &
+ fms_yaml_parser_get_num_blocks => get_num_blocks, &
+ fms_yaml_parser_get_block_ids => get_block_ids, &
+ fms_yaml_parser_get_value_from_key => get_value_from_key, &
+ fms_yaml_parser_get_nkeys => get_nkeys, &
+ fms_yaml_parser_get_key_ids => get_key_ids, &
+ fms_yaml_parser_get_key_name => get_key_name, &
+ fms_yaml_parser_get_key_value => get_key_value
#endif
!> platform
@@ -373,64 +710,124 @@ module fms
l8_kind, l4_kind, i2_kind, ptr_kind
!> random_numbers
- use random_numbers_mod, only: randomNumberStream, initializeRandomNumberStream, &
- getRandomNumbers, constructSeed
+ use random_numbers_mod, only: fms_random_numbers_randomNumberStream => randomNumberStream, &
+ fms_random_numbers_initializeRandomNumbersStream => initializeRandomNumberStream, &
+ fms_random_numbers_getRandomNumbers => getRandomNumbers, &
+ fms_random_numbers_constructSeed => constructSeed
!> sat_vapor_pres
- use sat_vapor_pres_mod, only: lookup_es, lookup_des, sat_vapor_pres_init, &
- lookup_es2, lookup_des2, lookup_es2_des2, &
- lookup_es3, lookup_des3, lookup_es3_des3, &
- lookup_es_des, compute_qs, compute_mrs, &
- escomp, descomp
+ use sat_vapor_pres_mod, only: fms_sat_vapor_pres_lookup_es => lookup_es, &
+ fms_sat_vapor_pres_lookup_des => lookup_des, &
+ fms_sat_vapor_pres_init => sat_vapor_pres_init, &
+ fms_sat_vapor_pres_lookup_es2 => lookup_es2, &
+ fms_sat_vapor_pres_lookup_des2 => lookup_des2, &
+ fms_sat_vapor_pres_lookup_es2_des2 => lookup_es2_des2, &
+ fms_sat_vapor_pres_lookup_es3 => lookup_es3, &
+ fms_sat_vapor_pres_lookup_des3 => lookup_des3, &
+ fms_sat_vapor_pres_lookup_es3_des3 => lookup_es3_des3, &
+ fms_sat_vapor_pres_lookup_es_des => lookup_es_des, &
+ fms_sat_vapor_pres_compute_qs => compute_qs, &
+ fms_sat_vapor_pres_compute_mrs => compute_mrs, &
+ fms_sat_vapor_pres_escomp => escomp, &
+ fms_sat_vapor_pres_descomp => descomp
!> string_utils
- use fms_string_utils_mod, only: string, fms_array_to_pointer, fms_pointer_to_array, fms_sort_this, &
- fms_find_my_string, fms_find_unique, fms_c2f_string, fms_cstring2cpointer, &
- string_copy
+ use fms_string_utils_mod, only: fms_string_utils_string => string, &
+ fms_string_utils_array_to_pointer => fms_array_to_pointer, &
+ fms_string_utils_fms_pointer_to_array => fms_pointer_to_array, &
+ fms_string_utils_sort_this => fms_sort_this, &
+ fms_string_utils_find_my_string => fms_find_my_string, &
+ fms_string_utils_find_unique => fms_find_unique, &
+ fms_string_utils_c2f_string => fms_c2f_string, &
+ fms_string_utils_cstring2cpointer => fms_cstring2cpointer, &
+ fms_string_utils_copy => string_copy
!> time_interp
- use time_interp_mod, only: time_interp_init, time_interp, fraction_of_year, &
+ use time_interp_mod, only: fms_time_interp_init => time_interp_init, &
+ fms_time_interp => time_interp, fms_fraction_of_year=> fraction_of_year, &
NONE, YEAR, MONTH, DAY
- use time_interp_external2_mod, only: init_external_field, time_interp_external, &
- time_interp_external_init, time_interp_external_exit, &
- get_external_field_size, get_time_axis, &
- get_external_field_missing, set_override_region, &
- reset_src_data_region, get_external_fileobj, &
+ use time_interp_external2_mod, only: fms_time_interp_external_init_external_field => init_external_field, &
+ fms_time_interp_external => time_interp_external, &
+ fms_time_interp_external_init => time_interp_external_init, &
+ fms_time_interp_external_exit => time_interp_external_exit, &
+ fms_time_interp_external_get_external_field_size => get_external_field_size, &
+ fms_time_interp_external_get_time_axis => get_time_axis, &
+ fms_time_interp_external_get_external_field_missing => get_external_field_missing, &
+ fms_time_interp_external_set_override_region => set_override_region, &
+ fms_time_interp_external_reset_src_data_region => reset_src_data_region, &
+ fms_time_interp_external_get_external_fileobj => get_external_fileobj, &
NO_REGION, INSIDE_REGION, OUTSIDE_REGION, &
SUCCESS, ERR_FIELD_NOT_FOUND
!> time_manager
- use time_manager_mod, only: time_type, operator(+), operator(-), operator(*), &
+ use time_manager_mod, only: FmsTime_type => time_type, &
+ operator(+), operator(-), operator(*), assignment(=),&
operator(/), operator(>), operator(>=), operator(==), &
operator(/=), operator(<), operator(<=), operator(//), &
- assignment(=), set_time, increment_time, decrement_time, &
- get_time, interval_alarm, repeat_alarm, time_type_to_real, &
- real_to_time_type, time_list_error, THIRTY_DAY_MONTHS, &
- JULIAN, GREGORIAN, NOLEAP, NO_CALENDAR, INVALID_CALENDAR, &
- set_calendar_type, get_calendar_type, set_ticks_per_second, &
- get_ticks_per_second, set_date, get_date, increment_date, &
- decrement_date, days_in_month, leap_year, length_of_year, &
- days_in_year, day_of_year, month_name, valid_calendar_types, &
- time_manager_init, print_time, print_date, set_date_julian, &
- get_date_julian, get_date_no_leap, date_to_string
- use get_cal_time_mod, only: get_cal_time
+ fms_time_manager_set_time => set_time, &
+ fms_time_manager_increment_time => increment_time, &
+ fms_time_manager_decrement_time => decrement_time, &
+ fms_time_manager_get_time => get_time, &
+ fms_time_manager_interval_alarm => interval_alarm, &
+ fms_time_manager_repeat_alarm => repeat_alarm, &
+ fms_time_manager_time_type_to_real => time_type_to_real, &
+ fms_time_manager_real_to_time_type => real_to_time_type, &
+ fms_time_manager_time_list_error => time_list_error, &
+ THIRTY_DAY_MONTHS, JULIAN, GREGORIAN, NOLEAP, NO_CALENDAR, INVALID_CALENDAR, &
+ fms_time_manager_set_calendar_type => set_calendar_type, &
+ fms_time_manager_get_calendar_type => get_calendar_type, &
+ fms_time_manager_set_ticks_per_second => set_ticks_per_second, &
+ fms_time_manager_get_ticks_per_second => get_ticks_per_second, &
+ fms_time_manager_set_date => set_date, &
+ fms_time_manager_get_date => get_date, &
+ fms_time_manager_increment_date => increment_date, &
+ fms_time_manager_decrement_date => decrement_date, &
+ fms_time_manager_days_in_month => days_in_month, &
+ fms_time_manager_leap_year => leap_year, &
+ fms_time_manager_length_of_year => length_of_year, &
+ fms_time_manager_days_in_year => days_in_year, &
+ fms_time_manager_day_of_year => day_of_year, &
+ fms_time_manager_month_name => month_name, &
+ fms_time_manager_valid_calendar_types => valid_calendar_types, &
+ fms_time_manager_init => time_manager_init, &
+ fms_time_manager_print_time => print_time, &
+ fms_time_manager_print_date => print_date, &
+ fms_time_manager_set_date_julian => set_date_julian, &
+ fms_time_manager_get_date_julian => get_date_julian, &
+ fms_time_manager_get_date_no_leap => get_date_no_leap, &
+ fms_time_manager_date_to_string => date_to_string
+ use get_cal_time_mod, only: fms_get_cal_time => get_cal_time
!> topography
- use gaussian_topog_mod, only: gaussian_topog_init, get_gaussian_topog
- use topography_mod, only: topography_init, get_topog_mean, get_topog_stdev, &
- get_ocean_frac, get_ocean_mask, get_water_frac, &
- get_water_mask
+ use gaussian_topog_mod, only: fms_gaussian_topog_init => gaussian_topog_init, &
+ fms_get_gaussian_topog => get_gaussian_topog
+ use topography_mod, only: fms_topography_init => topography_init, &
+ fms_topography_get_topog_mean => get_topog_mean, &
+ fms_topography_get_topog_stdev => get_topog_stdev, &
+ fms_topography_get_ocean_frac => get_ocean_frac, &
+ fms_topography_get_ocean_mask => get_ocean_mask, &
+ fms_topography_get_water_frac => get_water_frac, &
+ fms_topography_get_water_mask => get_water_mask
!> tracer_manager
- use tracer_manager_mod, only: tracer_manager_init, tracer_manager_end, &
- check_if_prognostic, get_tracer_indices, &
- get_tracer_index, get_tracer_names, &
- get_tracer_name, query_method, &
- set_tracer_atts, set_tracer_profile, &
- register_tracers, get_number_tracers, &
- adjust_mass, adjust_positive_def, NO_TRACER, MAX_TRACER_FIELDS
+ use tracer_manager_mod, only: fms_tracer_manager_init => tracer_manager_init, &
+ fms_tracer_manager_end => tracer_manager_end, &
+ fms_tracer_manager_check_if_prognostic => check_if_prognostic, &
+ fms_tracer_manager_get_tracer_indices => get_tracer_indices, &
+ fms_tracer_manager_get_tracer_index => get_tracer_index, &
+ fms_tracer_manager_get_tracer_names => get_tracer_names, &
+ fms_tracer_manager_get_tracer_name => get_tracer_name, &
+ fms_tracer_manager_query_method => query_method, &
+ fms_tracer_manager_set_tracer_atts => set_tracer_atts, &
+ fms_tracer_manager_set_tracer_profile => set_tracer_profile, &
+ fms_tracer_manager_register_tracers => register_tracers, &
+ fms_tracer_manager_get_number_tracers => get_number_tracers, &
+ fms_tracer_manager_adjust_mass => adjust_mass, &
+ fms_tracer_manager_adjust_positive_def => adjust_positive_def, &
+ NO_TRACER, MAX_TRACER_FIELDS
!> tridiagonal
- use tridiagonal_mod, only: tri_invert, close_tridiagonal
+ use tridiagonal_mod, only: fms_tridiagonal_tri_invert => tri_invert, &
+ fms_tridiagonal_close_tridiagonal => close_tridiagonal
implicit none
diff --git a/test_fms/mpp/test_domains_utility_mod.F90 b/test_fms/mpp/test_domains_utility_mod.F90
index 43271e053f..f88054b9f5 100644
--- a/test_fms/mpp/test_domains_utility_mod.F90
+++ b/test_fms/mpp/test_domains_utility_mod.F90
@@ -22,9 +22,9 @@
module test_domains_utility_mod
use mpp_mod, only : FATAL, WARNING, MPP_DEBUG, NOTE
use mpp_mod, only : mpp_error
- use mpp_domains_mod, only : ZERO, NINETY, MINUS_NINETY
+ use mpp_domains_mod, only : ZERO, NINETY, MINUS_NINETY, &
+ domain2d, mpp_define_mosaic
use platform_mod, only: r4_kind, r8_kind
- use fms
interface fill_coarse_data
module procedure fill_coarse_data_r8
diff --git a/test_fms/mpp/test_mpp_chksum.F90 b/test_fms/mpp/test_mpp_chksum.F90
index a63ee7d22e..5810e42cab 100644
--- a/test_fms/mpp/test_mpp_chksum.F90
+++ b/test_fms/mpp/test_mpp_chksum.F90
@@ -23,7 +23,10 @@
!> single pe and distributed checksums
program test_mpp_chksum
- use fms
+ use mpp_mod
+ use mpp_domains_mod
+ use fms_mod
+ use platform_mod
implicit none
diff --git a/test_fms/mpp/test_mpp_domains.F90 b/test_fms/mpp/test_mpp_domains.F90
index 1ae1d904da..3ca557788f 100644
--- a/test_fms/mpp/test_mpp_domains.F90
+++ b/test_fms/mpp/test_mpp_domains.F90
@@ -54,7 +54,7 @@ program test_mpp_domains
NONSYMEDGEUPDATE
use mpp_domains_mod, only : domainUG, mpp_define_unstruct_domain, mpp_get_UG_domain_tile_id
use mpp_domains_mod, only : mpp_get_UG_compute_domain, mpp_pass_SG_to_UG, mpp_pass_UG_to_SG
- use mpp_domains_mod, only : mpp_global_field_ug
+ use mpp_domains_mod, only : mpp_global_field_ug, mpp_get_ug_global_domain
use compare_data_checksums
use test_domains_utility_mod
diff --git a/test_fms/mpp/test_mpp_nesting.F90 b/test_fms/mpp/test_mpp_nesting.F90
index 201fd217f0..833c580bf5 100644
--- a/test_fms/mpp/test_mpp_nesting.F90
+++ b/test_fms/mpp/test_mpp_nesting.F90
@@ -19,7 +19,9 @@
!> Tests nested domain operations and routines in mpp_domains
program test_mpp_nesting
- use fms
+ use fms_mod
+ use mpp_domains_mod
+ use mpp_mod
use compare_data_checksums
use test_domains_utility_mod
use platform_mod
From 0e4a6754db0f3bbbcf8c76f003d0df974928c30f Mon Sep 17 00:00:00 2001
From: Jesse Lentz <42011922+J-Lentz@users.noreply.github.com>
Date: Thu, 27 Jul 2023 13:21:57 -0400
Subject: [PATCH 08/11] fix: add fms_stacksize.c to fms_c_src_files in
CMakeLists.txt (#1285)
---
CMakeLists.txt | 1 +
1 file changed, 1 insertion(+)
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 270539cd4d..b2db09eace 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -196,6 +196,7 @@ list(APPEND fms_fortran_src_files
# Collect FMS C source files
list(APPEND fms_c_src_files
affinity/affinity.c
+ fms/fms_stacksize.c
mosaic/create_xgrid.c
mosaic/gradient_c2l.c
mosaic/interp.c
From c7a70e7de670962a6cc2ee1d0b667966c32bab8d Mon Sep 17 00:00:00 2001
From: Ryan Mulhall <35538242+rem1776@users.noreply.github.com>
Date: Fri, 28 Jul 2023 08:35:41 -0400
Subject: [PATCH 09/11] chore: update changelog for 2023.02 release (#1306)
---
CHANGELOG.md | 39 +++++++++++++++++++++++++++++++++++++++
CMakeLists.txt | 2 +-
configure.ac | 2 +-
libFMS/Makefile.am | 2 +-
4 files changed, 42 insertions(+), 3 deletions(-)
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 0cc9802f8f..32cc50cd27 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -6,6 +6,45 @@ and this project uses `yyyy.rr[.pp]`, where `yyyy` is the year a patch is releas
`rr` is a sequential release number (starting from `01`), and an optional two-digit
sequential patch number (starting from `01`).
+## [2023.02] - 2023-07-27
+### Known Issues
+- GCC 11.1.0 is unsupported due to compilation issues with select type. The issue is resolved in later GCC releases.
+- When outputting sub-region diagnostics, the current diag_manager does not add "tileX" to the filename when using a cube sphere. This leads to trouble when trying to combine the files and regrid them (if the region is in two different tiles)
+- GCC 10 and greater causing io issues when compiled using O2 optimization flags
+- GNU compilers prior to the GCC 9.0 release are unsupported for this release due to lack of support for the findloc intrinsic function. This will result in an error saying 'findloc' has no IMPLICIT type and can be resolved by compiling with gcc version 9.0 or greater.
+
+### Added
+- MPP/EXCHANGE: Adds association checks before pointer deallocations in mpp includes and xgrid
+
+### Changed
+- LIBFMS: The libFMS.F90 file (module name `fms`) meant to provide global access has been updated to include 'fms' and it's module/subdirectory name as prefixes for all names. This will only affect external codes that are already using the global module (via `use fms`) and not individual modules.
+- MIXED PRECISION: Updates the axis_utils2, horiz_interp, sat_vapor_pressure, and axis_utils subdirectories to support mixed precision real values.
+- FMS2_IO: Added in mpp_scatter and mpp_gather performance changes from the 2023.01.01 patch. See below for more details.
+- FMS2_IO: Improved error messages to give more debugging information
+- FMS_MOD: Changed fms_init to include a system call to set the stack size to unlimited, removed previously added stack size fixes
+- MONIN_OBUKHOV: Restructures the subroutines in `stable_mix` interface so that 1d calls the underlying implementation, and 2 and 3d call it on 1d slices of the data as opposed to passing in mismatched arrays.
+- MPP: Updates from JEDI for ajoint version the mpp halo filling (mpp_do_update_ad.fh), adds checkpoint for forward buffer information.
+
+### Fixed
+- MPP: mpp_broadcast causing an unintended error message due to checking the wrong pe value
+- MPP: Added workaround for GCC 12 issues causing errors with string lengths in fms2_io
+- FMS2_IO: Fixed support for 'packed' data when using NF_SHORT variables. Scale_factor and add_offset attributes will now be applied if present.
+- DOCS: Improved doxygen comments for tranlon, updated deployment action for site
+- TESTS: Workaround added for ICE coming from mpp_alltoall test with intel 2022.3, and fixes for any test scripts missing input.nml creation. Fixes for mpp/test_global_array failures.
+- TIME_INTERP: Fixes crashes when calling with a non-existant field
+- DIAG_MANAGER: Fixes a module dependency issue causing failures during parallel builds
+- AXIS_UTILS2: Fixes an out of bounds memory index
+
+### Removed
+- FMS_IO/MPP_IO: The two older io modules, fms_io_mod and mpp_io_mod, have been deprecated and will not be compiled by default. If you wish to compile these modules, you must use the -Duse_deprecated_io CPP flag or the --enable-deprecated-io configure option if building with autotools.
+
+### Tag Commit Hashes
+- 2023.02-beta1 2be8aa452ad3e5f43e92c38a64f12d1ae6c43fb8
+- 2023.02-alpha3 8c73bd18dc1d580f2ee524c37cf903ff54d40501
+- 2023.02-alpha2 783019fdec89a8db2b26247c2f63d4782e1495c0
+- 2023.02-alpga1 419c66be31f82ebb13a91ea5e837c707eb54473b
+
+
## [2023.01.01] - 2023-06-06
### Changed
- FMS2_IO: Performance changes for domain_reads_2d and domain_reads_3d:
diff --git a/CMakeLists.txt b/CMakeLists.txt
index b2db09eace..ff75d3cf90 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -26,7 +26,7 @@ set(CMAKE_Fortran_FLAGS_DEBUG)
# Define the CMake project
project(FMS
- VERSION 2023.01.0
+ VERSION 2023.02.0
DESCRIPTION "GFDL FMS Library"
HOMEPAGE_URL "https://www.gfdl.noaa.gov/fms"
LANGUAGES C Fortran)
diff --git a/configure.ac b/configure.ac
index 82588b6c84..f7f0fea4aa 100644
--- a/configure.ac
+++ b/configure.ac
@@ -25,7 +25,7 @@ AC_PREREQ([2.69])
# Initialize with name, version, and support email address.
AC_INIT([GFDL FMS Library],
- [2023.01.00-dev],
+ [2023.02.00],
[gfdl.climate.model.info@noaa.gov],
[FMS],
[https://www.github.com/NOAA-GFDL/FMS])
diff --git a/libFMS/Makefile.am b/libFMS/Makefile.am
index e56820e701..db57f86562 100644
--- a/libFMS/Makefile.am
+++ b/libFMS/Makefile.am
@@ -28,7 +28,7 @@ lib_LTLIBRARIES = libFMS.la
# These linker flags specify libtool version info.
# See http://www.gnu.org/software/libtool/manual/libtool.html#Libtool-versioning
# for information regarding incrementing `-version-info`.
-libFMS_la_LDFLAGS = -version-info 15:0:0
+libFMS_la_LDFLAGS = -version-info 16:0:0
# Add the convenience libraries to the FMS library.
libFMS_la_LIBADD = $(top_builddir)/platform/libplatform.la
From c9824565cef3dfa298fb12305f0f317ed60b5169 Mon Sep 17 00:00:00 2001
From: "github-actions[bot]"
<41898282+github-actions[bot]@users.noreply.github.com>
Date: Fri, 28 Jul 2023 10:51:08 -0400
Subject: [PATCH 10/11] chore: append dev to version number post-release
(#1308)
---
configure.ac | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/configure.ac b/configure.ac
index f7f0fea4aa..74aac31cde 100644
--- a/configure.ac
+++ b/configure.ac
@@ -25,7 +25,7 @@ AC_PREREQ([2.69])
# Initialize with name, version, and support email address.
AC_INIT([GFDL FMS Library],
- [2023.02.00],
+ [2023.02.00-dev],
[gfdl.climate.model.info@noaa.gov],
[FMS],
[https://www.github.com/NOAA-GFDL/FMS])
From 0efc2e5c57d080d16fb3e076e9edc21a860992fa Mon Sep 17 00:00:00 2001
From: Ryan Mulhall <35538242+rem1776@users.noreply.github.com>
Date: Fri, 28 Jul 2023 11:58:26 -0400
Subject: [PATCH 11/11] CI: update gnu image and add dockerfile (#1206)
---
.github/workflows/Dockerfile.gnu | 68 +++++++++++++++++++
.github/workflows/build_ubuntu_gnu.yml | 37 ----------
.github/workflows/github_autotools_gnu.yml | 39 +++++++++++
...ntel_pr.yml => github_autotools_intel.yml} | 0
...ild_cmake_gnu.yml => github_cmake_gnu.yml} | 0
.../{coupler.yml => github_coupler_gnu.yml} | 0
.../{update_docs.yml => github_doc_site.yml} | 0
.../{lint_fms.yml => github_linter.yml} | 0
...el_tag.yml => parallelworks_am4_intel.yml} | 0
.github/workflows/spack.env | 17 +++++
.github/workflows/version.yml | 4 +-
CI.md | 22 +++---
12 files changed, 141 insertions(+), 46 deletions(-)
create mode 100644 .github/workflows/Dockerfile.gnu
delete mode 100644 .github/workflows/build_ubuntu_gnu.yml
create mode 100644 .github/workflows/github_autotools_gnu.yml
rename .github/workflows/{intel_pr.yml => github_autotools_intel.yml} (100%)
rename .github/workflows/{build_cmake_gnu.yml => github_cmake_gnu.yml} (100%)
rename .github/workflows/{coupler.yml => github_coupler_gnu.yml} (100%)
rename .github/workflows/{update_docs.yml => github_doc_site.yml} (100%)
rename .github/workflows/{lint_fms.yml => github_linter.yml} (100%)
rename .github/workflows/{am4_regression_parallelWorks_intel_tag.yml => parallelworks_am4_intel.yml} (100%)
create mode 100644 .github/workflows/spack.env
diff --git a/.github/workflows/Dockerfile.gnu b/.github/workflows/Dockerfile.gnu
new file mode 100644
index 0000000000..3506c2b9ee
--- /dev/null
+++ b/.github/workflows/Dockerfile.gnu
@@ -0,0 +1,68 @@
+#***********************************************************************
+#* GNU Lesser General Public License
+#*
+#* This file is part of the GFDL Flexible Modeling System (FMS).
+#*
+#* FMS is free software: you can redistribute it and/or modify it under
+#* the terms of the GNU Lesser General Public License as published by
+#* the Free Software Foundation, either version 3 of the License, or (at
+#* your option) any later version.
+#*
+#* FMS is distributed in the hope that it will be useful, but WITHOUT
+#* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+#* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+#* for more details.
+#*
+#* You should have received a copy of the GNU Lesser General Public
+#* License along with FMS. If not, see .
+#***********************************************************************
+# FMS CI image recipefile for GNU
+# Runs on centos stream (builder has same base from redhat registry)
+#
+# arguments to specify versions to build can be given to docker or changed here (--build-arg name=val)
+FROM spack/rockylinux9:latest as builder
+
+ARG gcc_version=12.3.0
+ARG netcdfc_version=4.9.0
+ARG netcdff_version=4.6.0
+ARG libyaml_version=0.2.5
+ARG mpich_version=4.0.2
+
+COPY spack.env /opt/deps/spack.env
+
+# perl's download kept timing out
+RUN sed -i 's/connect_timeout: 10/connect_timeout: 600/' /opt/spack/etc/spack/defaults/config.yaml && \
+ spack install gcc@${gcc_version} && \
+ source /opt/spack/share/spack/setup-env.sh && \
+ spack load gcc@${gcc_version} && \
+ spack compiler find && \
+ sed "s/COMPILER/gcc@$gcc_version/" /opt/deps/spack.env > spack.yaml && \
+ sed -i "s/NETCDF_C_VERSION/$netcdfc_version/" spack.yaml && \
+ sed -i "s/NETCDF_F_VERSION/$netcdff_version/" spack.yaml && \
+ sed -i "s/LIBYAML_VERSION/$libyaml_version/" spack.yaml && \
+ sed -i "s/MPI_LIB/mpich@$mpich_version/" spack.yaml && \
+ spack env activate -d . && \
+ spack -e . concretize -f > /opt/deps/deps.log && \
+ spack install --fail-fast
+
+# copy built software to base from first image
+FROM rockylinux:9
+
+COPY --from=builder /opt/view/ /opt/view/
+COPY --from=builder /opt/deps/ /opt/deps/
+
+# input files used with --enable-input-tests
+# need to be on the dev boxes if building
+COPY ./fms_test_input /home/unit_tests_input
+
+RUN dnf install -y autoconf make automake m4 libtool pkg-config zip
+
+ENV FC="mpifort"
+ENV CC="mpicc"
+ENV MPICH_FC="/opt/view/bin/gfortran"
+ENV MPICH_CC="/opt/view/bin/gcc"
+ENV FCFLAGS="-I/opt/view/include"
+ENV CFLAGS="-I/opt/view/include"
+ENV LDFLAGS="-L/opt/view/lib"
+ENV LD_LIBRARY_PATH="/opt/view/lib:/opt/view/lib64:/usr/local/lib:/usr/local/lib64"
+ENV PATH="/opt/view/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin"
diff --git a/.github/workflows/build_ubuntu_gnu.yml b/.github/workflows/build_ubuntu_gnu.yml
deleted file mode 100644
index 7c53895b15..0000000000
--- a/.github/workflows/build_ubuntu_gnu.yml
+++ /dev/null
@@ -1,37 +0,0 @@
-name: Build libFMS test with autotools
-
-on: [push, pull_request]
-
-jobs:
- build:
- runs-on: ubuntu-latest
- defaults:
- run:
- shell: bash
- strategy:
- matrix:
- conf-flags: [--disable-openmp, --enable-mixed-mode, --disable-setting-flags, --with-mpi=no]
- input-flag: [--with-yaml, --enable-test-input=/home/unit_tests_input]
- io-flag: [ --enable-deprecated-io, --disable-deprecated-io]
- container:
- image: noaagfdl/hpc-me.ubuntu-minimal:gnu-input
- env:
- TEST_VERBOSE: 1
- DISTCHECK_CONFIGURE_FLAGS: "${{ matrix.conf-flags }} ${{ matrix.input-flag }} ${{ matrix.io-flag }}"
- steps:
- - name: Checkout code
- uses: actions/checkout@v2
- - name: Prepare GNU autoconf for build
- run: autoreconf -if
- - name: Configure the build
- if: ${{ matrix.conf-flags != '--disable-setting-flags' }}
- run: ./configure ${DISTCHECK_CONFIGURE_FLAGS} || cat config.log
- - name: Configure the build with compiler flags
- if: ${{ matrix.conf-flags == '--disable-setting-flags' }}
- run: ./configure ${DISTCHECK_CONFIGURE_FLAGS} FCFLAGS="-fdefault-real-8 -fdefault-double-8 -fcray-pointer -ffree-line-length-none -I/usr/include $FCFLAGS" || cat config.log
- - name: Build the library
- run: make distcheck
- if: ${{ matrix.conf-flags != '--with-mpi=no' }}
- - name: Build the library (without test suite for serial build)
- run: make
- if: ${{ matrix.conf-flags == '--with-mpi=no' }}
diff --git a/.github/workflows/github_autotools_gnu.yml b/.github/workflows/github_autotools_gnu.yml
new file mode 100644
index 0000000000..d25f440959
--- /dev/null
+++ b/.github/workflows/github_autotools_gnu.yml
@@ -0,0 +1,39 @@
+# 'main' required ci, does a distcheck (builds, tests, check install)
+# image created off dockerfile in repo, compile/link flags are set there
+name: Build libFMS test with autotools
+
+on: [push, pull_request]
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ conf-flag: [ --disable-openmp, --enable-mixed-mode, --disable-setting-flags, --with-mpi=no]
+ input-flag: [--with-yaml, --enable-test-input=/home/unit_tests_input]
+ exclude:
+ - conf-flag: --with-mpi=no
+ input-flag: --enable-test-input=/home/unit_tests_input
+ container:
+ image: noaagfdl/fms-ci-rocky-gnu:12.3.0
+ env:
+ TEST_VERBOSE: 1
+ DISTCHECK_CONFIGURE_FLAGS: "${{ matrix.conf-flag }} ${{ matrix.input-flag }} ${{ matrix.io-flag }}"
+ SKIP_TESTS: "test_mpp_domains2.14 test_horiz_interp2.9 test_horiz_interp2.10 test_yaml_parser.5" # temporary till fixes are in
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v2
+ - name: Prepare GNU autoconf for build
+ run: autoreconf -if
+ - name: Configure the build
+ if: ${{ matrix.conf-flag != '--disable-setting-flags' }}
+ run: ./configure ${DISTCHECK_CONFIGURE_FLAGS} || cat config.log
+ - name: Configure the build with compiler flags
+ if: ${{ matrix.conf-flag == '--disable-setting-flags' }}
+ run: ./configure ${DISTCHECK_CONFIGURE_FLAGS} FCFLAGS="-fdefault-real-8 -fdefault-double-8 -fcray-pointer -ffree-line-length-none -I/usr/include $FCFLAGS" || cat config.log
+ - name: Build the library
+ run: make distcheck
+ if: ${{ matrix.conf-flag != '--with-mpi=no' }}
+ - name: Build the library (without test suite for serial build)
+ run: make
+ if: ${{ matrix.conf-flag == '--with-mpi=no' }}
diff --git a/.github/workflows/intel_pr.yml b/.github/workflows/github_autotools_intel.yml
similarity index 100%
rename from .github/workflows/intel_pr.yml
rename to .github/workflows/github_autotools_intel.yml
diff --git a/.github/workflows/build_cmake_gnu.yml b/.github/workflows/github_cmake_gnu.yml
similarity index 100%
rename from .github/workflows/build_cmake_gnu.yml
rename to .github/workflows/github_cmake_gnu.yml
diff --git a/.github/workflows/coupler.yml b/.github/workflows/github_coupler_gnu.yml
similarity index 100%
rename from .github/workflows/coupler.yml
rename to .github/workflows/github_coupler_gnu.yml
diff --git a/.github/workflows/update_docs.yml b/.github/workflows/github_doc_site.yml
similarity index 100%
rename from .github/workflows/update_docs.yml
rename to .github/workflows/github_doc_site.yml
diff --git a/.github/workflows/lint_fms.yml b/.github/workflows/github_linter.yml
similarity index 100%
rename from .github/workflows/lint_fms.yml
rename to .github/workflows/github_linter.yml
diff --git a/.github/workflows/am4_regression_parallelWorks_intel_tag.yml b/.github/workflows/parallelworks_am4_intel.yml
similarity index 100%
rename from .github/workflows/am4_regression_parallelWorks_intel_tag.yml
rename to .github/workflows/parallelworks_am4_intel.yml
diff --git a/.github/workflows/spack.env b/.github/workflows/spack.env
new file mode 100644
index 0000000000..69a3bdcbd0
--- /dev/null
+++ b/.github/workflows/spack.env
@@ -0,0 +1,17 @@
+# template for spack environment yaml
+# uppercase words get replaced before activating
+spack:
+ specs:
+ - COMPILER
+ - MPI_LIB
+ - netcdf-c@NETCDF_C_VERSION ^MPI_LIB
+ - netcdf-fortran@NETCDF_F_VERSION
+ - libyaml@LIBYAML_VERSION
+ concretizer:
+ unify: true
+ packages:
+ all:
+ compiler: [ COMPILER ]
+ config:
+ install_tree: /opt/deps
+ view: /opt/view
diff --git a/.github/workflows/version.yml b/.github/workflows/version.yml
index 1e71236386..31641aea69 100644
--- a/.github/workflows/version.yml
+++ b/.github/workflows/version.yml
@@ -1,3 +1,5 @@
+# appends -dev to the version upon release and opens pr
+# CI won't run on generated PR, easiest workaround is to close + reopen
on:
release:
types: [published]
@@ -16,4 +18,4 @@ jobs:
branch-suffix: timestamp # add a timestamp to branch name
delete-branch: true # delete afer merge
title: Append dev to version number post-release
- body: automated change, adds '-dev' to the version number upon releases
+ body: automated change, adds '-dev' to the version number upon releases. This PR will need to be closed and reopened to run CI testing.
diff --git a/CI.md b/CI.md
index 225b25129b..89b4db256e 100644
--- a/CI.md
+++ b/CI.md
@@ -8,24 +8,30 @@ Required CI for pull requests are listed first.
## Pull Request CI and checks
### Build libFMS with autotools
+
Required GNU build test for all pull requests/pushes.
Runs `make distcheck` after configuring via GNU autotools.
+Runs on a container image with spack installed dependencies, on top a rocky linux base.
+
+Dockerfile for image is stored at .github/workflows/Dockerfile.gnu for more specific information on the CI environment.
+
Container environment:
-gcc v7.3.0
-mpich v3.3a2
-netcdf v4.6.0
-netcdf-fortran v4.4.4
+gcc v12.3.0
+mpich v4.0.2
+netcdf v4.9.0
+netcdf-fortran v4.6.0
autoconf v2.69
+libyaml v0.2.5
-container hosted at [noaagfdl/ubuntu_libfms_gnu:latest](https://hub.docker.com/r/noaagfdl/ubuntu_libfms_gnu)
-
-`./configure` flags:
-- `--enable-openmp`
+`./configure` flags tested:
- `--disable-openmp`
- `--enable-mixed-mode`
+- `--with-mpi=no` (disables unit testing)
- `--disable-setting-flags`
- `--with-yaml`
+- `--enable-test-input=/home/unit_tests_input`
+
### Build libfms with cmake
Required GNU build test for all pull requests/pushes.